diff --git a/.drone.yml b/.drone.yml new file mode 100644 index 000000000..38ded2015 --- /dev/null +++ b/.drone.yml @@ -0,0 +1,216 @@ +--- +kind: pipeline +name: arm64_gcc_make + +platform: + os: linux + arch: arm64 + +steps: +- name: Build and Test + image: ubuntu:18.04 + environment: + CC: gcc + COMMON_FLAGS: 'DYNAMIC_ARCH=1 TARGET=ARMV8 NUM_THREADS=32' + commands: + - echo "MAKE_FLAGS:= $COMMON_FLAGS" + - apt-get update -y + - apt-get install -y make $CC gfortran perl + - $CC --version + - make QUIET_MAKE=1 $COMMON_FLAGS + - make -C test $COMMON_FLAGS + - make -C ctest $COMMON_FLAGS + - make -C utest $COMMON_FLAGS + +--- +kind: pipeline +name: arm32_gcc_make + +platform: + os: linux + arch: arm + +steps: +- name: Build and Test + image: ubuntu:18.04 + environment: + CC: gcc + COMMON_FLAGS: 'DYNAMIC_ARCH=1 TARGET=ARMV6 NUM_THREADS=32' + commands: + - echo "MAKE_FLAGS:= $COMMON_FLAGS" + - apt-get update -y + - apt-get install -y make $CC gfortran perl + - $CC --version + - make QUIET_MAKE=1 $COMMON_FLAGS + - make -C test $COMMON_FLAGS + - make -C ctest $COMMON_FLAGS + - make -C utest $COMMON_FLAGS + +--- +kind: pipeline +name: arm64_clang_make + +platform: + os: linux + arch: arm64 + +steps: +- name: Build and Test + image: ubuntu:18.04 + environment: + CC: clang + COMMON_FLAGS: 'DYNAMIC_ARCH=1 TARGET=ARMV8 NUM_THREADS=32' + commands: + - echo "MAKE_FLAGS:= $COMMON_FLAGS" + - apt-get update -y + - apt-get install -y make $CC gfortran perl + - $CC --version + - make QUIET_MAKE=1 $COMMON_FLAGS + - make -C test $COMMON_FLAGS + - make -C ctest $COMMON_FLAGS + - make -C utest $COMMON_FLAGS + +--- +kind: pipeline +name: arm32_clang_cmake + +platform: + os: linux + arch: arm + +steps: +- name: Build and Test + image: ubuntu:18.04 + environment: + CC: clang + CMAKE_FLAGS: '-DDYNAMIC_ARCH=1 -DTARGET=ARMV6 -DNUM_THREADS=32 -DNOFORTRAN=ON -DBUILD_WITHOUT_LAPACK=ON' + commands: + - echo "CMAKE_FLAGS:= $CMAKE_FLAGS" + - apt-get update -y + - apt-get install -y make $CC g++ perl cmake + - $CC --version + - mkdir build && cd build + - cmake $CMAKE_FLAGS .. + - make -j + - ctest -V + +--- +kind: pipeline +name: arm64_gcc_cmake + +platform: + os: linux + arch: arm64 + +steps: +- name: Build and Test + image: ubuntu:18.04 + environment: + CC: gcc + CMAKE_FLAGS: '-DDYNAMIC_ARCH=1 -DTARGET=ARMV8 -DNUM_THREADS=32 -DNOFORTRAN=ON -DBUILD_WITHOUT_LAPACK=ON' + commands: + - echo "CMAKE_FLAGS:= $CMAKE_FLAGS" + - apt-get update -y + - apt-get install -y make $CC g++ perl cmake + - $CC --version + - mkdir build && cd build + - cmake $CMAKE_FLAGS .. + - make -j + - ctest -V + +--- +kind: pipeline +name: arm64_clang_cmake + +platform: + os: linux + arch: arm64 + +steps: +- name: Build and Test + image: ubuntu:18.04 + environment: + CC: clang + CMAKE_FLAGS: '-DDYNAMIC_ARCH=1 -DTARGET=ARMV8 -DNUM_THREADS=32 -DNOFORTRAN=ON -DBUILD_WITHOUT_LAPACK=ON' + commands: + - echo "CMAKE_FLAGS:= $CMAKE_FLAGS" + - apt-get update -y + - apt-get install -y make $CC g++ perl cmake + - $CC --version + - mkdir build && cd build + - cmake $CMAKE_FLAGS .. + - make -j + - ctest -V + +--- +kind: pipeline +name: arm64_native_test + +platform: + os: linux + arch: arm64 + +steps: +- name: Build and Test + image: ubuntu:18.04 + environment: + CC: gcc + COMMON_FLAGS: 'USE_OPENMP=1' + commands: + - echo "MAKE_FLAGS:= $COMMON_FLAGS" + - apt-get update -y + - apt-get install -y make $CC gfortran perl python g++ + - $CC --version + - make QUIET_MAKE=1 $COMMON_FLAGS + - make -C test $COMMON_FLAGS + - make -C ctest $COMMON_FLAGS + - make -C utest $COMMON_FLAGS + - make -C cpp_thread_test dgemm_tester +--- +kind: pipeline +name: epyc_native_test + +platform: + os: linux + arch: amd64 + +steps: +- name: Build and Test + image: ubuntu:18.04 + environment: + CC: gcc + COMMON_FLAGS: 'USE_OPENMP=1' + commands: + - echo "MAKE_FLAGS:= $COMMON_FLAGS" + - apt-get update -y + - apt-get install -y make $CC gfortran perl python g++ + - $CC --version + - make QUIET_MAKE=1 $COMMON_FLAGS + - make -C test $COMMON_FLAGS + - make -C ctest $COMMON_FLAGS + - make -C utest $COMMON_FLAGS + - make -C cpp_thread_test dgemm_tester +--- +kind: pipeline +name: arm64_gcc10 + +platform: + os: linux + arch: arm64 + +steps: +- name: Build and Test + image: ubuntu:20.04 + environment: + CC: gcc-10 + FC: gfortran-10 + COMMON_FLAGS: 'TARGET=ARMV8 DYNAMIC_ARCH=1' + commands: + - echo "MAKE_FLAGS:= $COMMON_FLAGS" + - apt-get update -y + - apt-get install -y make $CC gfortran-10 perl python g++ + - $CC --version + - make QUIET_MAKE=1 $COMMON_FLAGS + - make -C utest $COMMON_FLAGS + - make -C test $COMMON_FLAGS + diff --git a/.github/workflows/dynamic_arch.yml b/.github/workflows/dynamic_arch.yml new file mode 100644 index 000000000..ca53e8857 --- /dev/null +++ b/.github/workflows/dynamic_arch.yml @@ -0,0 +1,103 @@ +name: continuous build + +on: [push, pull_request] + +jobs: + build: + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest, macos-latest] + fortran: [gfortran, flang] + build: [cmake, make] + steps: + - name: Checkout repository + uses: actions/checkout@v2 + + - name: Compilation cache + uses: actions/cache@v2 + with: + path: ~/.ccache + # We include the commit sha in the cache key, as new cache entries are + # only created if there is no existing entry for the key yet. + key: ${{ runner.os }}-ccache-${{ github.sha }} + # Restore any ccache cache entry, if none for + # ${{ runner.os }}-ccache-${{ github.sha }} exists + restore-keys: | + ${{ runner.os }}-ccache- + + - name: Print system information + run: | + if [ "$RUNNER_OS" == "Linux" ]; then + cat /proc/cpuinfo + elif [ "$RUNNER_OS" == "macOS" ]; then + sysctl -a | grep machdep.cpu + else + echo "$RUNNER_OS not supported" + exit 1 + fi + + - name: Install Dependencies + run: | + if [ "$RUNNER_OS" == "Linux" ]; then + sudo apt-get install -y gfortran cmake ccache + elif [ "$RUNNER_OS" == "macOS" ]; then + brew install coreutils cmake ccache + else + echo "$RUNNER_OS not supported" + exit 1 + fi + ccache -M 300M # Limit the ccache size; Github's overall cache limit is 5GB + + - name: gfortran build + if: matrix.build == 'make' && matrix.fortran == 'gfortran' + run: | + if [ "$RUNNER_OS" == "Linux" ]; then + export PATH="/usr/lib/ccache:${PATH}" + elif [ "$RUNNER_OS" == "macOS" ]; then + export PATH="$(brew --prefix)/opt/ccache/libexec:${PATH}" + else + echo "$RUNNER_OS not supported" + exit 1 + fi + + make -j$(nproc) DYNAMIC_ARCH=1 USE_OPENMP=0 + + - name: flang build + if: matrix.build == 'make' && matrix.fortran == 'flang' + run: | + if [ "$RUNNER_OS" == "Linux" ]; then + export PATH="/usr/lib/ccache:${PATH}" + elif [ "$RUNNER_OS" == "macOS" ]; then + exit 0 + else + echo "$RUNNER_OS not supported" + exit 1 + fi + + cd /usr/ + sudo wget -nv https://github.com/flang-compiler/flang/releases/download/flang_20190329/flang-20190329-x86-70.tgz + sudo tar xf flang-20190329-x86-70.tgz + sudo rm flang-20190329-x86-70.tgz + cd - + + make -j$(nproc) DYNAMIC_ARCH=1 USE_OPENMP=0 FC=flang + + + - name: CMake gfortran build + if: matrix.build == 'cmake' && matrix.fortran == 'gfortran' + run: | + if [ "$RUNNER_OS" == "Linux" ]; then + export PATH="/usr/lib/ccache:${PATH}" + elif [ "$RUNNER_OS" == "macOS" ]; then + export PATH="$(brew --prefix)/opt/ccache/libexec:${PATH}" + else + echo "$RUNNER_OS not supported" + exit 1 + fi + + mkdir build + cd build + cmake -DDYNAMIC_ARCH=1 -DNOFORTRAN=0 -DBUILD_WITHOUT_LAPACK=0 -DCMAKE_VERBOSE_MAKEFILE=ON -DCMAKE_BUILD_TYPE=Release .. + make -j$(nproc) diff --git a/.github/workflows/nightly-Homebrew-build.yml b/.github/workflows/nightly-Homebrew-build.yml new file mode 100644 index 000000000..b025f8634 --- /dev/null +++ b/.github/workflows/nightly-Homebrew-build.yml @@ -0,0 +1,84 @@ +# Only the "head" branch of the OpenBLAS package is tested + +on: + push: + paths: + - '**/nightly-Homebrew-build.yml' + pull_request: + branches: + - develop + paths: + - '**/nightly-Homebrew-build.yml' + schedule: + - cron: 45 7 * * * +# This is 7:45 AM UTC daily, late at night in the USA + +# Since push and pull_request will still always be building and testing the `develop` branch, +# it only makes sense to test if this file has been changed + +name: Nightly-Homebrew-Build +jobs: + build-OpenBLAS-with-Homebrew: + runs-on: macos-latest + env: + DEVELOPER_DIR: /Applications/Xcode_11.4.1.app/Contents/Developer + HOMEBREW_DEVELOPER: "ON" + HOMEBREW_DISPLAY_INSTALL_TIMES: "ON" + HOMEBREW_NO_ANALYTICS: "ON" + HOMEBREW_NO_AUTO_UPDATE: "ON" + HOMEBREW_NO_BOTTLE_SOURCE_FALLBACK: "ON" + HOMEBREW_NO_INSTALL_CLEANUP: "ON" + + steps: + - name: Random delay for cron job + run: | + delay=$(( RANDOM % 600 )) + printf 'Delaying for %s seconds on event %s' ${delay} "${{ github.event_name }}" + sleep ${delay} + if: github.event_name == 'schedule' + + - uses: actions/checkout@v2 + # This isn't even needed, technically. Homebrew will get `develop` via git + + - name: Update Homebrew + if: github.event_name != 'pull_request' + run: brew update || true + + - name: unlink installed gcc to allow updating + run: | + brew unlink gcc@8 + brew unlink gcc@9 + + - name: Install prerequisites + run: brew install --fetch-HEAD --HEAD --only-dependencies --keep-tmp openblas + + - name: Install and bottle OpenBLAS + run: brew install --fetch-HEAD --HEAD --build-bottle --keep-tmp openblas + # the HEAD flags tell Homebrew to build the develop branch fetch via git + + - name: Create bottle + run: | + brew bottle -v openblas + mkdir bottles + mv *.bottle.tar.gz bottles + + - name: Upload bottle + uses: actions/upload-artifact@v1 + with: + name: openblas--HEAD.catalina.bottle.tar.gz + path: bottles + + - name: Show linkage + run: brew linkage -v openblas + + - name: Test openblas + run: brew test --HEAD --verbose openblas + + - name: Audit openblas formula + run: | + brew audit --strict openblas + brew cat openblas + + - name: Post logs on failure + if: failure() + run: brew gist-logs --with-hostname -v openblas diff --git a/.gitignore b/.gitignore index 79e116271..b2c857945 100644 --- a/.gitignore +++ b/.gitignore @@ -70,6 +70,7 @@ test/SBLAT2.SUMM test/SBLAT3.SUMM test/ZBLAT2.SUMM test/ZBLAT3.SUMM +test/SHBLAT3.SUMM test/cblat1 test/cblat2 test/cblat3 @@ -79,6 +80,7 @@ test/dblat3 test/sblat1 test/sblat2 test/sblat3 +test/test_shgemm test/zblat1 test/zblat2 test/zblat3 @@ -87,5 +89,6 @@ build.* *.swp benchmark/*.goto benchmark/smallscaling - -.vscode \ No newline at end of file +.vscode +CMakeCache.txt +CMakeFiles/* diff --git a/.travis.yml b/.travis.yml index eee7674fe..bde0e202d 100644 --- a/.travis.yml +++ b/.travis.yml @@ -16,8 +16,7 @@ matrix: before_script: &common-before - COMMON_FLAGS="DYNAMIC_ARCH=1 TARGET=NEHALEM NUM_THREADS=32" script: - - set -e - - travis_wait 45 make QUIET_MAKE=1 $COMMON_FLAGS $BTYPE + - make QUIET_MAKE=1 $COMMON_FLAGS $BTYPE - make -C test $COMMON_FLAGS $BTYPE - make -C ctest $COMMON_FLAGS $BTYPE - make -C utest $COMMON_FLAGS $BTYPE @@ -25,6 +24,37 @@ matrix: - TARGET_BOX=LINUX64 - BTYPE="BINARY=64" + - <<: *test-ubuntu + os: linux-ppc64le + before_script: + - COMMON_FLAGS="DYNAMIC_ARCH=1 TARGET=POWER8 NUM_THREADS=32" + env: + # for matrix annotation only + - TARGET_BOX=PPC64LE_LINUX + - BTYPE="BINARY=64 USE_OPENMP=1" + + - <<: *test-ubuntu + os: linux + arch: s390x + before_script: + - COMMON_FLAGS="DYNAMIC_ARCH=1 TARGET=Z13 NUM_THREADS=32" + env: + # for matrix annotation only + - TARGET_BOX=IBMZ_LINUX + - BTYPE="BINARY=64 USE_OPENMP=1" + + - <<: *test-ubuntu + os: linux + dist: focal + arch: s390x + compiler: clang + before_script: + - COMMON_FLAGS="DYNAMIC_ARCH=1 TARGET=Z13 NUM_THREADS=32" + env: + # for matrix annotation only + - TARGET_BOX=IBMZ_LINUX + - BTYPE="BINARY=64 USE_OPENMP=0 CC=clang" + - <<: *test-ubuntu env: - TARGET_BOX=LINUX64 @@ -57,6 +87,40 @@ matrix: - TARGET_BOX=LINUX32 - BTYPE="BINARY=32" + - os: linux + arch: ppc64le + dist: bionic + compiler: gcc + before_script: + - sudo add-apt-repository 'ppa:ubuntu-toolchain-r/test' -y + - sudo apt-get update + - sudo apt-get install gcc-9 gfortran-9 -y + script: + - make QUIET_MAKE=1 BINARY=64 USE_OPENMP=1 CC=gcc-9 FC=gfortran-9 + - make -C test $COMMON_FLAGS $BTYPE + - make -C ctest $COMMON_FLAGS $BTYPE + - make -C utest $COMMON_FLAGS $BTYPE + env: + # for matrix annotation only + - TARGET_BOX=PPC64LE_LINUX_P9 + + - os: linux + arch: ppc64le + dist: bionic + compiler: gcc + before_script: + - sudo add-apt-repository 'ppa:ubuntu-toolchain-r/test' -y + - sudo apt-get update + - sudo apt-get install gcc-9 gfortran-9 -y + script: + - make QUIET_MAKE=1 BUILD_BFLOAT16=1 BINARY=64 USE_OPENMP=1 CC=gcc-9 FC=gfortran-9 + - make -C test $COMMON_FLAGS $BTYPE + - make -C ctest $COMMON_FLAGS $BTYPE + - make -C utest $COMMON_FLAGS $BTYPE + env: + # for matrix annotation only + - TARGET_BOX=PPC64LE_LINUX_P9 + - os: linux compiler: gcc addons: @@ -89,7 +153,6 @@ matrix: - sudo sh alpine-chroot-install -p 'build-base gfortran perl linux-headers' before_script: *common-before script: - - set -e # XXX: Disable some warnings for now to avoid exceeding Travis limit for log size. - alpine make QUIET_MAKE=1 $COMMON_FLAGS $BTYPE CFLAGS="-Wno-misleading-indentation -Wno-sign-conversion -Wno-incompatible-pointer-types" @@ -132,7 +195,6 @@ matrix: before_script: - COMMON_ARGS="-DTARGET=NEHALEM -DNUM_THREADS=32" script: - - set -e - mkdir build - CONFIG=Release - cmake -Bbuild -H. $CMAKE_ARGS $COMMON_ARGS -DCMAKE_BUILD_TYPE=$CONFIG @@ -149,57 +211,64 @@ matrix: - &test-macos os: osx - osx_image: xcode10.1 + osx_image: xcode11.5 before_script: - - COMMON_FLAGS="DYNAMIC_ARCH=1 TARGET=NEHALEM NUM_THREADS=32" - - brew update - - brew install gcc # for gfortran + - COMMON_FLAGS="DYNAMIC_ARCH=1 NUM_THREADS=32" script: - travis_wait 45 make QUIET_MAKE=1 $COMMON_FLAGS $BTYPE env: - - BTYPE="BINARY=64 INTERFACE64=1" + - BTYPE="TARGET=NEHALEM BINARY=64 INTERFACE64=1 FC=gfortran-9" - <<: *test-macos - osx_image: xcode8.3 + osx_image: xcode12 + before_script: + - COMMON_FLAGS="DYNAMIC_ARCH=1 NUM_THREADS=32" + - brew update + - brew install gcc@10 + script: + - travis_wait 45 make QUIET_MAKE=1 $COMMON_FLAGS $BTYPE env: - - BTYPE="BINARY=32" + - BTYPE="TARGET=NEHALEM BINARY=64 INTERFACE64=1 FC=gfortran-10" + + # - <<: *test-macos + # osx_image: xcode10 + # env: + # - BTYPE="TARGET=NEHALEM BINARY=32 NOFORTRAN=1" - - &emulated-arm - dist: trusty - sudo: required - services: docker - env: IMAGE_ARCH=arm32 TARGET_ARCH=ARMV6 COMPILER=gcc - name: "Emulated Build for ARMV6 with gcc" - before_install: sudo docker run --rm --privileged multiarch/qemu-user-static:register --reset - script: | - echo "FROM openblas/alpine:${IMAGE_ARCH} - COPY . /tmp/openblas - RUN mkdir /tmp/openblas/build && \ - cd /tmp/openblas/build && \ - CC=${COMPILER} cmake -D DYNAMIC_ARCH=OFF \ - -D TARGET=${TARGET_ARCH} \ - -D BUILD_SHARED_LIBS=ON \ - -D BUILD_WITHOUT_LAPACK=ON \ - -D BUILD_WITHOUT_CBLAS=ON \ - -D CMAKE_BUILD_TYPE=Release ../ && \ - cmake --build ." > Dockerfile - docker build . - - <<: *emulated-arm - env: IMAGE_ARCH=arm32 TARGET_ARCH=ARMV6 COMPILER=clang - name: "Emulated Build for ARMV6 with clang" - - <<: *emulated-arm - env: IMAGE_ARCH=arm64 TARGET_ARCH=ARMV8 COMPILER=gcc - name: "Emulated Build for ARMV8 with gcc" - - <<: *emulated-arm - env: IMAGE_ARCH=arm64 TARGET_ARCH=ARMV8 COMPILER=clang - name: "Emulated Build for ARMV8 with clang" - - allow_failures: - - env: IMAGE_ARCH=arm32 TARGET_ARCH=ARMV6 COMPILER=gcc - - env: IMAGE_ARCH=arm32 TARGET_ARCH=ARMV6 COMPILER=clang - - env: IMAGE_ARCH=arm64 TARGET_ARCH=ARMV8 COMPILER=gcc - - env: IMAGE_ARCH=arm64 TARGET_ARCH=ARMV8 COMPILER=clang + - <<: *test-macos + osx_image: xcode11.5 + before_script: + - COMMON_FLAGS="DYNAMIC_ARCH=1 NUM_THREADS=32" + - brew update + env: +# - CC="/Applications/Xcode-10.1.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/clang" +# - CFLAGS="-O2 -Wno-macro-redefined -isysroot /Applications/Xcode-10.1.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS12.1.sdk -arch arm64 -miphoneos-version-min=10.0" + - CC="/Applications/Xcode-11.5.GM.Seed.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/clang" + - CFLAGS="-O2 -Wno-macro-redefined -isysroot /Applications/Xcode-11.5.GM.Seed.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS13.5.sdk -arch arm64 -miphoneos-version-min=10.0" + - BTYPE="TARGET=ARMV8 BINARY=64 HOSTCC=clang NOFORTRAN=1" + - <<: *test-macos + osx_image: xcode11.5 + env: +# - CC="/Applications/Xcode-10.1.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/clang" +# - CFLAGS="-O2 -mno-thumb -Wno-macro-redefined -isysroot /Applications/Xcode-10.1.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS12.1.sdk -arch armv7 -miphoneos-version-min=5.1" + - CC="/Applications/Xcode-11.5.GM.Seed.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/clang" + - CFLAGS="-O2 -mno-thumb -Wno-macro-redefined -isysroot /Applications/Xcode-11.5.GM.Seed.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS13.5.sdk -arch armv7 -miphoneos-version-min=5.1" + - BTYPE="TARGET=ARMV7 HOSTCC=clang NOFORTRAN=1" + - &test-graviton2 + os: linux + arch: arm64-graviton2 + dist: focal + group: edge + virt: lxd + compiler: gcc + addons: + apt: + packages: + - gfortran + script: + - travis_wait 45 make && make lapack-test + # whitelist branches: only: diff --git a/CMakeLists.txt b/CMakeLists.txt index 8900973a5..c5ba3ceed 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -6,7 +6,7 @@ cmake_minimum_required(VERSION 2.8.5) project(OpenBLAS C ASM) set(OpenBLAS_MAJOR_VERSION 0) set(OpenBLAS_MINOR_VERSION 3) -set(OpenBLAS_PATCH_VERSION 7.dev) +set(OpenBLAS_PATCH_VERSION 13.dev) set(OpenBLAS_VERSION "${OpenBLAS_MAJOR_VERSION}.${OpenBLAS_MINOR_VERSION}.${OpenBLAS_PATCH_VERSION}") # Adhere to GNU filesystem layout conventions @@ -20,9 +20,17 @@ if(MSVC) option(BUILD_WITHOUT_LAPACK "Do not build LAPACK and LAPACKE (Only BLAS or CBLAS)" ON) endif() option(BUILD_WITHOUT_CBLAS "Do not build the C interface (CBLAS) to the BLAS functions" OFF) -option(DYNAMIC_ARCH "Include support for multiple CPU targets, with automatic selection at runtime (x86/x86_64 only)" OFF) -option(DYNAMIC_OLDER "Include specific support for older cpu models (Penryn,Dunnington,Atom,Nano,Opteron) with DYNAMIC_ARCH" OFF) +option(DYNAMIC_ARCH "Include support for multiple CPU targets, with automatic selection at runtime (x86/x86_64, aarch64 or ppc only)" OFF) +option(DYNAMIC_OLDER "Include specific support for older x86 cpu models (Penryn,Dunnington,Atom,Nano,Opteron) with DYNAMIC_ARCH" OFF) option(BUILD_RELAPACK "Build with ReLAPACK (recursive implementation of several LAPACK functions on top of standard LAPACK)" OFF) +option(USE_LOCKING "Use locks even in single-threaded builds to make them callable from multiple threads" OFF) +if(${CMAKE_SYSTEM_NAME} MATCHES "Linux") +option(NO_AFFINITY "Disable support for CPU affinity masks to avoid binding processes from e.g. R or numpy/scipy to a single core" ON) +else() +set(NO_AFFINITY 1) +endif() +option(CPP_THREAD_SAFETY_TEST "Run a massively parallel DGEMM test to confirm thread safety of the library (requires OpenMP and about 1.3GB of RAM)" OFF) +option(CPP_THREAD_SAFETY_GEMV "Run a massively parallel DGEMV test to confirm thread safety of the library (requires OpenMP)" OFF) # Add a prefix or suffix to all exported symbol names in the shared library. # Avoids conflicts with other BLAS libraries, especially when using @@ -81,9 +89,13 @@ if (NOT NO_LAPACK) list(APPEND SUBDIRS lapack) endif () +if (NOT DEFINED BUILD_BFLOAT16) + set (BUILD_BFLOAT16 false) +endif () # set which float types we want to build for if (NOT DEFINED BUILD_SINGLE AND NOT DEFINED BUILD_DOUBLE AND NOT DEFINED BUILD_COMPLEX AND NOT DEFINED BUILD_COMPLEX16) # if none are defined, build for all +# set(BUILD_BFLOAT16 true) set(BUILD_SINGLE true) set(BUILD_DOUBLE true) set(BUILD_COMPLEX true) @@ -115,6 +127,11 @@ if (BUILD_COMPLEX16) list(APPEND FLOAT_TYPES "ZCOMPLEX") # defines COMPLEX and DOUBLE endif () +if (BUILD_BFLOAT16) + message(STATUS "Building Half Precision") + list(APPEND FLOAT_TYPES "BFLOAT16") # defines nothing +endif () + if (NOT DEFINED CORE OR "${CORE}" STREQUAL "UNKNOWN") message(FATAL_ERROR "Detecting CPU failed. Please set TARGET explicitly, e.g. make TARGET=your_cpu_target. Please read README for details.") endif () @@ -206,7 +223,8 @@ if (USE_THREAD) target_link_libraries(${OpenBLAS_LIBNAME} ${CMAKE_THREAD_LIBS_INIT}) endif() -if (MSVC OR NOT NOFORTRAN) +#if (MSVC OR NOT NOFORTRAN) +if (NOT NO_CBLAS) # Broken without fortran on unix add_subdirectory(utest) endif() @@ -217,6 +235,10 @@ if (NOT MSVC AND NOT NOFORTRAN) if(NOT NO_CBLAS) add_subdirectory(ctest) endif() + add_subdirectory(lapack-netlib/TESTING) + if (CPP_THREAD_SAFETY_TEST OR CPP_THREAD_SAFETY_GEMV) + add_subdirectory(cpp_thread_test) + endif() endif() set_target_properties(${OpenBLAS_LIBNAME} PROPERTIES @@ -228,11 +250,11 @@ if (BUILD_SHARED_LIBS AND BUILD_RELAPACK) if (NOT MSVC) target_link_libraries(${OpenBLAS_LIBNAME} "-Wl,-allow-multiple-definition") else() - target_link_libraries(${OpenBLAS_LIBNAME} "/FORCE:MULTIPLE") + set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /FORCE:MULTIPLE") endif() endif() -if (BUILD_SHARED_LIBS AND NOT ${SYMBOLPREFIX}${SYMBOLSUFIX} STREQUAL "") +if (BUILD_SHARED_LIBS AND NOT ${SYMBOLPREFIX}${SYMBOLSUFFIX} STREQUAL "") if (NOT DEFINED ARCH) set(ARCH_IN "x86_64") else() @@ -341,10 +363,21 @@ endif() if(NOT NO_CBLAS) message (STATUS "Generating cblas.h in ${CMAKE_INSTALL_INCLUDEDIR}") - set(CBLAS_H ${CMAKE_BINARY_DIR}/generated/cblas.h) file(READ ${CMAKE_CURRENT_SOURCE_DIR}/cblas.h CBLAS_H_CONTENTS) string(REPLACE "common" "openblas_config" CBLAS_H_CONTENTS_NEW "${CBLAS_H_CONTENTS}") + if (NOT ${SYMBOLPREFIX} STREQUAL "") + string(REPLACE " cblas" " ${SYMBOLPREFIX}cblas" CBLAS_H_CONTENTS "${CBLAS_H_CONTENTS_NEW}") + string(REPLACE " openblas" " ${SYMBOLPREFIX}openblas" CBLAS_H_CONTENTS_NEW "${CBLAS_H_CONTENTS}") + string (REPLACE " ${SYMBOLPREFIX}openblas_complex" " openblas_complex" CBLAS_H_CONTENTS "${CBLAS_H_CONTENTS_NEW}") + string(REPLACE " goto" " ${SYMBOLPREFIX}goto" CBLAS_H_CONTENTS_NEW "${CBLAS_H_CONTENTS}") + endif() + if (NOT ${SYMBOLSUFFIX} STREQUAL "") + string(REGEX REPLACE "(cblas[^ (]*)" "\\1${SYMBOLSUFFIX}" CBLAS_H_CONTENTS "${CBLAS_H_CONTENTS_NEW}") + string(REGEX REPLACE "(openblas[^ (]*)" "\\1${SYMBOLSUFFIX}" CBLAS_H_CONTENTS_NEW "${CBLAS_H_CONTENTS}") + string(REGEX REPLACE "(openblas_complex[^ ]*)${SYMBOLSUFFIX}" "\\1" CBLAS_H_CONTENTS "${CBLAS_H_CONTENTS_NEW}") + string(REGEX REPLACE "(goto[^ (]*)" "\\1${SYMBOLSUFFIX}" CBLAS_H_CONTENTS_NEW "${CBLAS_H_CONTENTS}") + endif() file(WRITE ${CBLAS_H} "${CBLAS_H_CONTENTS_NEW}") install (FILES ${CBLAS_H} DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}) endif() @@ -361,11 +394,9 @@ if(NOT NO_LAPACKE) install (FILES ${CMAKE_BINARY_DIR}/lapacke_mangling.h DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/openblas${SUFFIX64}) endif() -include(FindPkgConfig QUIET) -if(PKG_CONFIG_FOUND) - configure_file(${PROJECT_SOURCE_DIR}/cmake/openblas.pc.in ${PROJECT_BINARY_DIR}/openblas${SUFFIX64}.pc @ONLY) - install (FILES ${PROJECT_BINARY_DIR}/openblas${SUFFIX64}.pc DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig/) -endif() +# Install pkg-config files +configure_file(${PROJECT_SOURCE_DIR}/cmake/openblas.pc.in ${PROJECT_BINARY_DIR}/openblas${SUFFIX64}.pc @ONLY) +install (FILES ${PROJECT_BINARY_DIR}/openblas${SUFFIX64}.pc DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig/) # GNUInstallDirs "DATADIR" wrong here; CMake search path wants "share". diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md index 3859a9c19..be9a32a7c 100644 --- a/CONTRIBUTORS.md +++ b/CONTRIBUTORS.md @@ -171,3 +171,26 @@ In chronological order: * [2019-02-01] added missing Blas Level-1,2 (single precision) simd codes * [2019-03-14] power9 dgemm/dtrmm kernel * [2019-04-29] power9 sgemm/strmm kernel + +* Jiachen Wang + * [2019-07-29] optimize AVX2 DGEMM + * [2019-10-20] AVX512 DGEMM kernel (4x8) + * [2019-11-06] optimize AVX512 SGEMM + * [2019-11-12] AVX512 CGEMM & ZGEMM kernels + * [2019-12-23] optimize AVX2 CGEMM and ZGEMM + * [2019-12-30] AVX2 CGEMM3M & ZGEMM3M kernels + * [2020-01-07] optimize AVX2 SGEMM and STRMM + +* Rajalakshmi Srinivasaraghavan + * [2020-04-15] Half-precision GEMM for bfloat16 + +* Marius Hillenbrand + * [2020-05-12] Revise dynamic architecture detection for IBM z + * [2020-05-12] Add new sgemm and strmm kernel for IBM z14 + * [2020-09-07] Fix builds with clang on IBM z, including dynamic architecture support + +* Danfeng Zhang + * [2020-05-20] Improve performance of SGEMM and STRMM on Arm Cortex-A53 + +* PingTouGe Semiconductor Co., Ltd. + * [2020-10] Add RISC-V Vector (0.7.1) support. Optimize BLAS kernels for Xuantie C910 diff --git a/Changelog.txt b/Changelog.txt index 8df35d5c3..cbc7007ac 100644 --- a/Changelog.txt +++ b/Changelog.txt @@ -1,4 +1,366 @@ OpenBLAS ChangeLog +==================================================================== +Version 0.3.13 + 12-Dec-2020 + + common: + * Added a generic bfloat16 SBGEMV kernel + * Fixed a potentially severe memory leak after fork in OpenMP builds + that was introduced in 0.3.12 + * Added detection of the Fujitsu Fortran compiler + * Added detection of the (e)gfortran compiler on OpenBSD + * Added support for overriding the default name of the library independently + from symbol suffixing in the gmake builds (already supported in cmake) + +RISCV: + * Added a RISC V port optimized for C910V + +POWER: + * Added optimized POWER10 kernels for SAXPY, CAXPY, SDOT, DDOT and DGEMV_N + * Improved DGEMM performance on POWER10 + * Improved STRSM and DTRSM performance on POWER9 and POWER10 + * Fixed segmemtation faults in DYNAMIC_ARCH builds + * Fixed compilation with the PGI compiler + +x86: + * Fixed compilation of kernels that require SSE2 intrinsics since 0.3.12 + +x86_64: + * Added an optimized bfloat16 SBGEMV kernel for SkylakeX and Cooperlake + * Improved the performance of SASUM and DASUM kernels through parallelization + * Improved the performance of SROT and DROT kernels + * Improved the performance of multithreaded xSYRK + * Fixed OpenMP builds that use the LLVM Clang compiler together with GNU gfortran + (where linking of both the LLVM libomp and GNU libgomp could lead to lockups or + wrong results) + * Fixed miscompilations by old gcc 4.6 + * Fixed misdetection of AVX2 capability in some Sandybridge cpus + * Fixed lockups in builds combining DYNAMIC_ARCH with TARGET=GENERIC on OpenBSD + +ARM64: + * Fixed segmemtation faults in DYNAMIC_ARCH builds + +MIPS: + * Improved kernels for Loongson 3R3 ("3A") and 3R4 ("3B") models, including MSA + * Fixed bugs in the MSA kernels for CGEMM, CTRMM, CGEMV and ZGEMV + * Added handling of zero increments in the MSA kernels for SSWAP and DSWAP + * Added DYNAMIC_ARCH support for MIPS64 (currently Loongson3R3/3R4 only) + +SPARC: + * Fixed building 32 and 64 bit SPARC kernels with the SolarisStudio compilers + +==================================================================== +Version 0.3.12 + 24-Oct-2020 + +common: + * Fixed missing BLAS/LAPACK functions (inadvertently dropped during + the build system restructuring) + * Fixed argument conversion macro in LAPACKE_zgesvdq (LAPACK #458) + +POWER: + * Added optimized SCOPY/CCOPY kernels for POWER10 + * Increased and unified the default size of the GEMM BUFFER + * Fixed building for POWER10 in DYNAMIC_ARCH mode + * POWER10 compatibility test now checks binutils version as well + * Cleaned up compiler warnings + +x86_64: + * corrected compiler version checks for AVX2 compatibility + * added compiler option -mavx2 for building with flang + * fixed direct SGEMM pathway for small matrix sizes (broken by + the code refactoring in 0.3.11) + * fixed unhandled partial register clobbers in several kernels + for AXPY,DOT,GEMV_N and GEMV_T flagged by gcc10 tree-vectorizer + +ARMV8: + * improved Apple Vortex support to include cross-compiling + +==================================================================== +Version 0.3.11 + 17-Oct-2020 + +common: + * API change: + the newly added BFLOAT16 functions were renamed to use the + letter "B" instead of "H" to avoid potential confusion with + the IEEE "half precision float" type, i.e. the 0.3.10 + SHGEMM is now SBGEMM and the corresponding build option + was changed from "BUILD_HALF" to "BUILD_BFLOAT16". + * Reduced the default BLAS3_MEM_ALLOC_THRESHOLD (used as an upper + limit for placing temporary arrays on the stack) to be compatible + with a stack size of 1mb (as imposed by the JAVA runtime library) + * Added mixed-precision dot function SBDOT and utility functions + shstobf16, shdtobf16, sbf16tos and dbf16tod to convert between + single or double precision float arrays and bfloat16 arrays + * Fixed prototypes of LAPACK_?ggsvp and LAPACK_?ggsvd functions + in lapack.h + * Fixed underflow and rounding errors in LAPACK SLANV2 and DLANV2 + (causing miscalculations in e.g. SHSEQR/DHSEQR, LAPACK issue #263) + * Fixed workspace calculation in LAPACK ?GELQ (LAPACK issue #415) + * Fixed several bugs in the LAPACK testsuite + * Improved performance of TRMM and TRSM for certain problem sizes + * Fixed infinite recursions and workspace miscalculations in ReLAPACK + * CMAKE builds no longer require pkg-config for creating the .pc file + * Makefile builds no longer misread NO_CBLAS=0 or NO_LAPACK=0 as + enabling these options + * Fixed detection of gfortran when invoked through an mpi wrapper + * Improve thread reinitialization performance with OpenMP after a fork + * Added support for building only the subset of the library required + for a particular precision by specifying BUILD_SINGLE, BUILD_DOUBLE + * Optional function name prefixes and suffixes are now correctly + reflected in the generated cblas.h + * Added CMAKE build support for the LAPACK and multithreading tests + +POWER: + * Added optimized support for POWER10 + * Added support for compiling for POWER8 in 32bit mode + * Added support for compilation with LLVM/clang + * Added support for compilation with NVIDIA/PGI compilers + * Fixed building on big-endian POWER8 + * Fixed miscompilation of ZDOTC by gcc10 + * Fixed alignment errors in the POWER8 SAXPY kernel + * Improved CPU detection on AIX + * Supported building with older compilers on POWER9 + +x86_64: + * Added support for Intel Cooperlake + * Added autodetection of AMD Renoir/Matisse/Zen3 cpus + * Added autodetection of Intel Comet Lake cpus + * Reimplemented ?sum, ?dot and daxpy using universal intrinsics + * Reset the fpu state before using the fpu on Windows as a workaround + for a problem introduced in Windows 10 build 19041 (a.k.a. SDK 2004) + * Fixed potentially undefined behaviour in the dot and gemv_t kernels + * Fixed a potential segmentation fault in DYNAMIC_ARCH builds + * Fixed building for ZEN with PGI/NVIDIA and AMD AOCC compilers + +ARMV7: + * Fixed cpu detection on BSD-like systems + +ARMV8: + * Added preliminary support for Apple Vortex cpus + * Added support for the Cavium ThunderX3T110 cpu + * Fixed cpu detection on BSD-like systems + * Fixed compilation in -std=C18 mode + +IBM Z: + * Added support for compiling with the clang compiler + * Improved GEMM performance on Z14 + +==================================================================== +Version 0.3.10 + 14-Jun-2020 + +common: + * Improved thread locking behaviour in blas_server and parallel getrf + * Imported bugfix 394 from LAPACK (spurious reference to "XERBL" + due to overlong lines) + * Imported bugfix 403 from LAPACK (compile option "recursive" required + for correctness with Intel and PGI) + * Imported bugfix 408 from LAPACK (wrong scaling in ZHEEQUB) + * Imported bugfix 411 from LAPACK (infinite loop in LARGV/LARTG/LARTGP) + * Fixed mismatches between BUFFERSIZE and GEMM_UNROLL parameters that + could lead to crashes at large matrix sizes + * Restored internal soname in dynamic libraries on FreeBSD and Dragonfly + * Added API (openblas_setaffinity) to set the thread affinity on Linux + * Added initial infrastructure for half-precision floating point + (bfloat16) support with a generic implementation of SHGEMM + * Added CMAKE build system support for building the cblas_Xgemm3m + functions + * Fixed CMAKE support for building in a path with embedded spaces + * Fixed CMAKE (non)handling of NO_EXPRECISION and MAX_STACK_ALLOC + * Fixed GCC version detection in the Makefiles + * Allowed overriding the names of AR, AS and LD in Makefile builds + +POWER: + * Fixed big-endian POWER8 ELFv2 builds on FreeBSD + * Fixed GCC version checks and DYNAMIC_ARCH builds on POWER9 + * Fixed CMAKE build support for POWER9 + * fixed a potential race condition in the thread buffer allocation + * Worked around LAPACK test failures on PPC G4 + +MIPS: + * Fixed a potential race condition in the thread buffer allocation + * Added support for MIPS 24K/24KE family based on P5600 kernels + +MIPS64: + * fixed a potential race condition in the thread buffer allocation + * Added TARGET=GENERIC + +ARMV7: + * Fixed a race condition in the thread buffer allocation + +ARMV8: + * Fixed a race condition in the thread buffer allocation + * Fixed zero initialisation in the assembly for SGEMM and DGEMM BETA + * Improved performance of the ThunderX2 DAXPY kernel + * Added an optimized SGEMM kernel for Cortex A53 + * Fixed Makefile support for INTERFACE64 (8-byte integer) + +x86_64: + * Fixed a syntax error in the CMAKE setup for SkylakeX + * Improved performance of STRSM on Haswell, SkylakeX and Ryzen + * Improved SGEMM performance on SGEMM for workloads with ldc a + multiple of 1024 + * Improved DGEMM performance on Skylake X + * Fixed unwanted AVX512-dependency of SGEMM in DYNAMIC_ARCH + builds created on SkylakeX + * Removed data alignment requirement in the SSE2 copy kernels + that could cause spurious crashes + * Added a workaround for an optimizer bug in AppleClang 11.0.3 + * Fixed LAPACK test failures due to wrong options for Intel Fortran + * Fixed compilation and LAPACK test results with recent Flang + and AMD AOCC + * Fixed DYNAMIC_ARCH builds with CMAKE on OS X + * Fixed missing exports of cblas_i?amin, cblas_i?min, cblas_i?max, + cblas_?sum, cblas_?gemm3m in the shared library on OS + * Fixed reporting of cpu name in DYNAMIC_ARCH builds (would sometimes + show the name of an older generation chip supported by the same kernels) + +IBM Z: + * Improved performance of SGEMM/STRMM and DGEMM/DTRMM on Z14 + +==================================================================== +Version 0.3.9 + 1-Mar-2020 + + common: + * Fixed a miscompilation of the GETRF functions with CMAKE + * Imported bugfix 390 from LAPACK (missing NaN propagation in xCOMBSSQ) + * The size of the memory buffer used for splitting GEMM tasks across + multiple threads can now be configured in the build system. + +POWER: + * Fixed several compilation problems related to endianness + and ELF version on POWER8 and POWER9 + * Fixed use of the absolute value IAMIN/IAMAX instead of IMIN/IMAX + * Fixed a race condition in the level3 blas code + +MIPS64: + * Fixed use of the absoltute value IAMIN/IAMAX instead of IMIN/IMAX + +ARMV7: + * Fixed a race condition in the level3 blas code + * Fixed compilation on Android +ARMV8: + * Added support for Ampere EMAG8180 + * Added support for Neoverse N1 + * Improved performance of the blas_lock function + * Fixed a race condition in the level3 blas code + * Fixed a performance regression on TSV110-based servers + +x86_64: + * Fixed a long-standing error with undeclared register overwrites + in the DSCAL microkernel for HASWELL,SKYLAKEX and ZEN + * Fixed a long-standing bug in the SSE implementation of IAMAX + * Fixed a CMAKE build failure with DYNAMIC_ARCH + * Fixed cpu autodetection of Goldmont+, Cannon Lake and Ice Lake + * Fixed a compilation failure on OSX with compiler name containing dash + * Fixed compilation with MinGW on SkylakeX + * Improved speed of the AVX512 GEMM3M kernel on SkylakeX + * Added an AVX512 STRMM kernel for SkylakeX + * Improved GEMM performance on Haswell and Zen + +zarch: + * fixed compilation of the DYNAMIC_ARCH code + +==================================================================== +Version 0.3.8 + 9-Feb-2020 + +common: +` * LAPACK has been updated to 3.9.0 (plus patches up to + January 2nd, 2020) + * CMAKE support has been improved in several areas including + cross-compilation + * a thread race condition in the GEMM3M kernels was resolved + * the "generic" (plain C) gemm beta kernel used by many targets + has been sped up + * an optimized version of the LAPACK trtrs functions has been added + * an incompatibilty between the LAPACK tests and the OpenBLAS + implementation of XERBLA was resolved, removing the numerous + warnings about wrong error exits in the former + * support for NetBSD has been added + * support for compilation with g95 and non-GNU versions of ld + has been improved + * support for compilation with (upcoming) gcc 10 has been added + +POWER: + * worked around miscompilation of several POWER8 and POWER9 + kernels by older versions of gcc + * added support for big-endian POWER8 and for compilation on AIX + * corrected bugs in the big-endian support for PPC440 and PPC970 + * DYNAMIC_ARCH support is now available in CMAKE builds as well + +ARMV8: + * performance of DGEMM_BETA and SGEMM_NCOPY has been improved + * compilation for 32bit works again + * performance of the RPCC function has been improved + * improved performance on small systems + * DYNAMIC_ARCH support is now available in CMAKE builds as well + * cross-compilation from OSX to IOS was simplified + +x86_64: + * a new AVX512 DGEMM kernel was added and the AVX512 SGEMM kernel + was significantly improved + * optimized AVX512 kernels for CGEMM and ZGEMM have been added + * AVX2 kernels for STRMM, SGEMM, and CGEMM have been significantly + sped up and optimized CGEMM3M and ZGEMM3M kernels have been added + * added support for QEMU virtual cpus + * a compilation problem with PGI and SUN compilers was fixed + * Intel "Goldmont plus" is now autodetected + * a potential crash on program exit on MS Windows has been fixed + +x86: + * an unwanted case sensitivity in the implementation of LSAME + on older 32bit AMD cpus was fixed + +zarch: + * Z15 is now supported as Z14 + * DYNAMIC_ARCH is now available on ZARCH as well + +==================================================================== +Version 0.3.7 +11-Aug 2019 + +common: + * having the gmake special variables TARGET_ARCH or TARGET_MACH + defined no longer causes build failures in ctest or utest + * defining NO_AFFINITY or USE_TLS to 0 in gmake builds no longer + has the same effect as setting them to 1 + * a new test program was added to allow checking the library for + thread safety + * a new option USE_LOCKING was added to ensure thread safety when + OpenBLAS itself is built without multithreading but will be + called from multiple threads. + * a build failure on Linux with glibc versions earlier than 2.5 + was fixed + * a runtime error with CPU enumeration (and NO_AFFINITY not set) + on glibc 2.6 was fixed + * NO_AFFINITY was added to the CMAKE options (and defaults to being + active on Linux, as in the gmake builds) + +x86_64: + * the build-time logic for detection of AVX512 availability in + the processor and compiler was fixed + * gmake builds on OSX now set the internal name of the library to + libopenblas.0.dylib (consistent with CMAKE) + * the Haswell DGEMM kernel received a significant speedup through + improved prefetch and load instructions + * performance of DGEMM, DTRMM, DTRSM and ZDOT on Zen/Zen2 was markedly + increased by avoiding vpermpd instructions + * the SKYLAKEX (AVX512) DGEMM helper functions have now been disabled + to fix remaining errors in DGEMM, DSYMM and DTRMM + +POWER: + * added support for building on FreeBSD/powerpc64 and FreeBSD/ppc970 + * added optimized kernels for POWER9 SGEMM and STRMM + +ARMV7: + * fixed the softfp implementations of xAMAX and IxAMAX + * removed the predefined -march= flags on both ARMV5 and ARMV6 as + they were appropriate for only a subset of platforms + ==================================================================== Version 0.3.6 29-Apr-2019 diff --git a/Jenkinsfile b/Jenkinsfile new file mode 100644 index 000000000..2b61bed9f --- /dev/null +++ b/Jenkinsfile @@ -0,0 +1,9 @@ +node { + stage('Checkout') { + checkout + } + + stage('Build') { + sh("make") + } +} diff --git a/Makefile b/Makefile index 273fde33e..de0735c4a 100644 --- a/Makefile +++ b/Makefile @@ -34,7 +34,7 @@ endif LAPACK_NOOPT := $(filter-out -O0 -O1 -O2 -O3 -Ofast,$(LAPACK_FFLAGS)) -SUBDIRS_ALL = $(SUBDIRS) test ctest utest exports benchmark ../laswp ../bench +SUBDIRS_ALL = $(SUBDIRS) test ctest utest exports benchmark ../laswp ../bench cpp_thread_test .PHONY : all libs netlib $(RELA) test ctest shared install .NOTPARALLEL : all libs $(RELA) prof lapack-test install blas-test @@ -56,10 +56,27 @@ ifneq ($(INTERFACE64), 0) @echo " Use 64 bits int (equivalent to \"-i8\" in Fortran) " endif endif - - @echo " C compiler ... $(C_COMPILER) (command line : $(CC))" + @$(CC) --version > /dev/null 2>&1;\ + if [ $$? -eq 0 ]; then \ + cverinfo=`$(CC) --version | sed -n '1p'`; \ + if [ -z "$${cverinfo}" ]; then \ + cverinfo=`$(CC) --version | sed -n '2p'`; \ + fi; \ + echo " C compiler ... $(C_COMPILER) (cmd & version : $${cverinfo})";\ + else \ + echo " C compiler ... $(C_COMPILER) (command line : $(CC))";\ + fi ifeq ($(NOFORTRAN), $(filter 0,$(NOFORTRAN))) - @echo " Fortran compiler ... $(F_COMPILER) (command line : $(FC))" + @$(FC) --version > /dev/null 2>&1;\ + if [ $$? -eq 0 ]; then \ + fverinfo=`$(FC) --version | sed -n '1p'`; \ + if [ -z "$${fverinfo}" ]; then \ + fverinfo=`$(FC) --version | sed -n '2p'`; \ + fi; \ + echo " Fortran compiler ... $(F_COMPILER) (cmd & version : $${fverinfo})";\ + else \ + echo " Fortran compiler ... $(F_COMPILER) (command line : $(FC))";\ + fi endif ifneq ($(OSNAME), AIX) @echo -n " Library Name ... $(LIBNAME)" @@ -68,9 +85,13 @@ else endif ifndef SMP - @echo " (Single threaded) " + @echo " (Single-threading) " else - @echo " (Multi threaded; Max num-threads is $(NUM_THREADS))" + @echo " (Multi-threading; Max num-threads is $(NUM_THREADS))" +endif + +ifeq ($(DYNAMIC_ARCH), 1) + @echo " Supporting multiple $(ARCH) cpu models with minimum requirement for the common code being $(CORE)" endif ifeq ($(USE_OPENMP), 1) @@ -97,18 +118,19 @@ endif shared : ifneq ($(NO_SHARED), 1) -ifeq ($(OSNAME), $(filter $(OSNAME),Linux SunOS Android Haiku)) +ifeq ($(OSNAME), $(filter $(OSNAME),Linux SunOS Android Haiku FreeBSD DragonFly)) @$(MAKE) -C exports so @ln -fs $(LIBSONAME) $(LIBPREFIX).so @ln -fs $(LIBSONAME) $(LIBPREFIX).so.$(MAJOR_VERSION) endif -ifeq ($(OSNAME), $(filter $(OSNAME),FreeBSD OpenBSD NetBSD DragonFly)) +ifeq ($(OSNAME), $(filter $(OSNAME),OpenBSD NetBSD)) @$(MAKE) -C exports so @ln -fs $(LIBSONAME) $(LIBPREFIX).so endif ifeq ($(OSNAME), Darwin) @$(MAKE) -C exports dyn @ln -fs $(LIBDYNNAME) $(LIBPREFIX).dylib + @ln -fs $(LIBDYNNAME) $(LIBPREFIX).$(MAJOR_VERSION).dylib endif ifeq ($(OSNAME), WINNT) @$(MAKE) -C exports dll @@ -123,10 +145,13 @@ ifeq ($(NOFORTRAN), $(filter 0,$(NOFORTRAN))) touch $(LIBNAME) ifndef NO_FBLAS $(MAKE) -C test all - $(MAKE) -C utest all endif -ifndef NO_CBLAS + $(MAKE) -C utest all +ifneq ($(NO_CBLAS), 1) $(MAKE) -C ctest all +ifeq ($(CPP_THREAD_SAFETY_TEST), 1) + $(MAKE) -C cpp_thread_test all +endif endif endif @@ -225,7 +250,7 @@ ifeq ($(NOFORTRAN), $(filter 0,$(NOFORTRAN))) @$(MAKE) -C $(NETLIB_LAPACK_DIR) lapacklib @$(MAKE) -C $(NETLIB_LAPACK_DIR) tmglib endif -ifndef NO_LAPACKE +ifneq ($(NO_LAPACKE), 1) @$(MAKE) -C $(NETLIB_LAPACK_DIR) lapackelib endif endif @@ -243,21 +268,26 @@ prof_lapack : lapack_prebuild lapack_prebuild : ifeq ($(NOFORTRAN), $(filter 0,$(NOFORTRAN))) - -@echo "FORTRAN = $(FC)" > $(NETLIB_LAPACK_DIR)/make.inc - -@echo "OPTS = $(LAPACK_FFLAGS)" >> $(NETLIB_LAPACK_DIR)/make.inc + -@echo "FC = $(FC)" > $(NETLIB_LAPACK_DIR)/make.inc + -@echo "FFLAGS = $(LAPACK_FFLAGS)" >> $(NETLIB_LAPACK_DIR)/make.inc + -@echo "FFLAGS_DRV = $(LAPACK_FFLAGS)" >> $(NETLIB_LAPACK_DIR)/make.inc -@echo "POPTS = $(LAPACK_FPFLAGS)" >> $(NETLIB_LAPACK_DIR)/make.inc - -@echo "NOOPT = -O0 $(LAPACK_NOOPT)" >> $(NETLIB_LAPACK_DIR)/make.inc + -@echo "FFLAGS_NOOPT = -O0 $(LAPACK_NOOPT)" >> $(NETLIB_LAPACK_DIR)/make.inc -@echo "PNOOPT = $(LAPACK_FPFLAGS) -O0" >> $(NETLIB_LAPACK_DIR)/make.inc - -@echo "LOADOPTS = $(FFLAGS) $(EXTRALIB)" >> $(NETLIB_LAPACK_DIR)/make.inc +ifeq ($(C_COMPILER)$(F_COMPILER)$(USE_OPENMP), CLANGGFORTRAN1) + -@echo "LDFLAGS = $(FFLAGS) $(EXTRALIB) -lomp" >> $(NETLIB_LAPACK_DIR)/make.inc +else + -@echo "LDFLAGS = $(FFLAGS) $(EXTRALIB)" >> $(NETLIB_LAPACK_DIR)/make.inc +endif -@echo "CC = $(CC)" >> $(NETLIB_LAPACK_DIR)/make.inc -@echo "override CFLAGS = $(LAPACK_CFLAGS)" >> $(NETLIB_LAPACK_DIR)/make.inc - -@echo "override ARCH = $(AR)" >> $(NETLIB_LAPACK_DIR)/make.inc - -@echo "ARCHFLAGS = $(ARFLAGS) -ru" >> $(NETLIB_LAPACK_DIR)/make.inc + -@echo "AR = $(AR)" >> $(NETLIB_LAPACK_DIR)/make.inc + -@echo "ARFLAGS = $(ARFLAGS) -ru" >> $(NETLIB_LAPACK_DIR)/make.inc -@echo "RANLIB = $(RANLIB)" >> $(NETLIB_LAPACK_DIR)/make.inc - -@echo "LAPACKLIB = ../$(LIBNAME)" >> $(NETLIB_LAPACK_DIR)/make.inc - -@echo "TMGLIB = ../$(LIBNAME)" >> $(NETLIB_LAPACK_DIR)/make.inc + -@echo "LAPACKLIB = ../../$(LIBNAME)" >> $(NETLIB_LAPACK_DIR)/make.inc + -@echo "TMGLIB = ../../../$(LIBNAME)" >> $(NETLIB_LAPACK_DIR)/make.inc -@echo "BLASLIB = ../../../$(LIBNAME)" >> $(NETLIB_LAPACK_DIR)/make.inc - -@echo "LAPACKELIB = ../$(LIBNAME)" >> $(NETLIB_LAPACK_DIR)/make.inc + -@echo "LAPACKELIB = ../../../$(LIBNAME)" >> $(NETLIB_LAPACK_DIR)/make.inc -@echo "LAPACKLIB_P = ../$(LIBNAME_P)" >> $(NETLIB_LAPACK_DIR)/make.inc -@echo "SUFFIX = $(SUFFIX)" >> $(NETLIB_LAPACK_DIR)/make.inc -@echo "PSUFFIX = $(PSUFFIX)" >> $(NETLIB_LAPACK_DIR)/make.inc @@ -281,6 +311,18 @@ else endif ifeq ($(BUILD_LAPACK_DEPRECATED), 1) -@echo "BUILD_DEPRECATED = 1" >> $(NETLIB_LAPACK_DIR)/make.inc +endif +ifeq ($(BUILD_SINGLE), 1) + -@echo "BUILD_SINGLE = 1" >> $(NETLIB_LAPACK_DIR)/make.inc +endif +ifeq ($(BUILD_DOUBLE), 1) + -@echo "BUILD_DOUBLE = 1" >> $(NETLIB_LAPACK_DIR)/make.inc +endif +ifeq ($(BUILD_COMPLEX), 1) + -@echo "BUILD_COMPLEX = 1" >> $(NETLIB_LAPACK_DIR)/make.inc +endif +ifeq ($(BUILD_COMPLEX16), 1) + -@echo "BUILD_COMPLEX16 = 1" >> $(NETLIB_LAPACK_DIR)/make.inc endif -@echo "LAPACKE_WITH_TMG = 1" >> $(NETLIB_LAPACK_DIR)/make.inc -@cat make.inc >> $(NETLIB_LAPACK_DIR)/make.inc @@ -313,9 +355,9 @@ lapack-test : $(MAKE) -j 1 -C $(NETLIB_LAPACK_DIR)/TESTING/EIG xeigtstc xeigtstd xeigtsts xeigtstz $(MAKE) -j 1 -C $(NETLIB_LAPACK_DIR)/TESTING/LIN xlintstc xlintstd xlintstds xlintstrfd xlintstrfz xlintsts xlintstz xlintstzc xlintstrfs xlintstrfc ifneq ($(CROSS), 1) - ( cd $(NETLIB_LAPACK_DIR)/INSTALL; make all; ./testlsame; ./testslamch; ./testdlamch; \ + ( cd $(NETLIB_LAPACK_DIR)/INSTALL; $(MAKE) all; ./testlsame; ./testslamch; ./testdlamch; \ ./testsecond; ./testdsecnd; ./testieee; ./testversion ) - (cd $(NETLIB_LAPACK_DIR); ./lapack_testing.py -r ) + (cd $(NETLIB_LAPACK_DIR); ./lapack_testing.py -r -b TESTING) endif lapack-runtest: @@ -345,11 +387,12 @@ clean :: @$(MAKE) -C kernel clean #endif @$(MAKE) -C reference clean - @rm -f *.$(LIBSUFFIX) *.so *~ *.exe getarch getarch_2nd *.dll *.lib *.$(SUFFIX) *.dwf $(LIBPREFIX).$(LIBSUFFIX) $(LIBPREFIX)_p.$(LIBSUFFIX) $(LIBPREFIX).so.$(MAJOR_VERSION) *.lnk myconfig.h + @rm -f *.$(LIBSUFFIX) *.so *~ *.exe getarch getarch_2nd *.dll *.lib *.$(SUFFIX) *.dwf $(LIBPREFIX).$(LIBSUFFIX) $(LIBPREFIX)_p.$(LIBSUFFIX) $(LIBPREFIX).so.$(MAJOR_VERSION) *.lnk myconfig.h *.so.renamed *.a.renamed *.so.0 ifeq ($(OSNAME), Darwin) @rm -rf getarch.dSYM getarch_2nd.dSYM endif @rm -f Makefile.conf config.h Makefile_kernel.conf config_kernel.h st* *.dylib + @rm -f cblas.tmp cblas.tmp2 @touch $(NETLIB_LAPACK_DIR)/make.inc @$(MAKE) -C $(NETLIB_LAPACK_DIR) clean @rm -f $(NETLIB_LAPACK_DIR)/make.inc $(NETLIB_LAPACK_DIR)/lapacke/include/lapacke_mangling.h diff --git a/Makefile.arm b/Makefile.arm index eedd39b73..a27b58e84 100644 --- a/Makefile.arm +++ b/Makefile.arm @@ -1,7 +1,7 @@ ifeq ($(CORE), $(filter $(CORE),ARMV7 CORTEXA9 CORTEXA15)) ifeq ($(OSNAME), Android) -CCOMMON_OPT += -mfpu=neon -march=armv7-a -FCOMMON_OPT += -mfpu=neon -march=armv7-a +CCOMMON_OPT += -mfpu=neon -march=armv7-a +FCOMMON_OPT += -mfpu=neon -march=armv7-a else CCOMMON_OPT += -mfpu=vfpv3 -march=armv7-a FCOMMON_OPT += -mfpu=vfpv3 -march=armv7-a @@ -9,11 +9,11 @@ endif endif ifeq ($(CORE), ARMV6) -CCOMMON_OPT += -mfpu=vfp -march=armv6 -FCOMMON_OPT += -mfpu=vfp -march=armv6 +CCOMMON_OPT += -mfpu=vfp +FCOMMON_OPT += -mfpu=vfp endif -ifeq ($(CORE), ARMV5) -CCOMMON_OPT += -march=armv5 -FCOMMON_OPT += -march=armv5 +ifdef HAVE_NEON +CCOMMON_OPT += -mfpu=neon +FCOMMON_OPT += -mfpu=neon endif diff --git a/Makefile.arm64 b/Makefile.arm64 index 4d10ff684..c3fe583e4 100644 --- a/Makefile.arm64 +++ b/Makefile.arm64 @@ -1,4 +1,4 @@ - +ifneq ($(C_COMPILER), PGI) ifeq ($(CORE), ARMV8) CCOMMON_OPT += -march=armv8-a FCOMMON_OPT += -march=armv8-a @@ -24,6 +24,23 @@ CCOMMON_OPT += -march=armv8-a -mtune=cortex-a73 FCOMMON_OPT += -march=armv8-a -mtune=cortex-a73 endif +# Use a72 tunings because Neoverse-N1 is only available +# in GCC>=9 +ifeq ($(CORE), NEOVERSEN1) +ifeq ($(GCCVERSIONGTEQ7), 1) +ifeq ($(GCCVERSIONGTEQ9), 1) +CCOMMON_OPT += -march=armv8.2-a -mtune=neoverse-n1 +FCOMMON_OPT += -march=armv8.2-a -mtune=neoverse-n1 +else +CCOMMON_OPT += -march=armv8.2-a -mtune=cortex-a72 +FCOMMON_OPT += -march=armv8.2-a -mtune=cortex-a72 +endif +else +CCOMMON_OPT += -march=armv8-a -mtune=cortex-a72 +FCOMMON_OPT += -march=armv8-a -mtune=cortex-a72 +endif +endif + ifeq ($(CORE), THUNDERX) CCOMMON_OPT += -march=armv8-a -mtune=thunderx FCOMMON_OPT += -march=armv8-a -mtune=thunderx @@ -39,7 +56,25 @@ CCOMMON_OPT += -march=armv8.1-a -mtune=thunderx2t99 FCOMMON_OPT += -march=armv8.1-a -mtune=thunderx2t99 endif +ifeq ($(CORE), THUNDERX3T110) +ifeq ($(GCCVERSIONGTEQ10), 1) +CCOMMON_OPT += -march=armv8.3-a -mtune=thunderx3t110 +FCOMMON_OPT += -march=armv8.3-a -mtune=thunderx3t110 +else +CCOMMON_OPT += -march=armv8.1-a -mtune=thunderx2t99 +FCOMMON_OPT += -march=armv8.1-a -mtune=thunderx2t99 +endif +endif + +ifeq ($(CORE), VORTEX) +CCOMMON_OPT += -march=armv8.3-a +FCOMMON_OPT += -march=armv8.3-a +endif + +ifeq ($(GCCVERSIONGTEQ9), 1) ifeq ($(CORE), TSV110) CCOMMON_OPT += -march=armv8.2-a -mtune=tsv110 FCOMMON_OPT += -march=armv8.2-a -mtune=tsv110 endif +endif +endif diff --git a/Makefile.install b/Makefile.install index fefecd98d..e8b64465f 100644 --- a/Makefile.install +++ b/Makefile.install @@ -9,10 +9,18 @@ OPENBLAS_INCLUDE_DIR := $(PREFIX)/include OPENBLAS_LIBRARY_DIR := $(PREFIX)/lib OPENBLAS_BINARY_DIR := $(PREFIX)/bin OPENBLAS_BUILD_DIR := $(CURDIR) -OPENBLAS_CMAKE_DIR := $(OPENBLAS_LIBRARY_DIR)/cmake/openblas +OPENBLAS_CMAKE_DIR := $(OPENBLAS_LIBRARY_DIR)/cmake/$(LIBSONAMEBASE) OPENBLAS_CMAKE_CONFIG := OpenBLASConfig.cmake OPENBLAS_CMAKE_CONFIG_VERSION := OpenBLASConfigVersion.cmake OPENBLAS_PKGCONFIG_DIR := $(OPENBLAS_LIBRARY_DIR)/pkgconfig +PKG_EXTRALIB := $(EXTRALIB) +ifeq ($(USE_OPENMP), 1) + ifeq ($(C_COMPILER), PGI) + PKG_EXTRALIB += -lomp + else + PKG_EXTRALIB += -lgomp + endif +endif .PHONY : install .NOTPARALLEL : install @@ -45,12 +53,28 @@ install : lib.grd ifndef NO_CBLAS @echo Generating cblas.h in $(DESTDIR)$(OPENBLAS_INCLUDE_DIR) - @sed 's/common/openblas_config/g' cblas.h > "$(DESTDIR)$(OPENBLAS_INCLUDE_DIR)/cblas.h" + @cp cblas.h cblas.tmp +ifdef SYMBOLPREFIX + @sed 's/cblas[^( ]*/$(SYMBOLPREFIX)&/g' cblas.tmp > cblas.tmp2 + @sed 's/openblas[^( ]*/$(SYMBOLPREFIX)&/g' cblas.tmp2 > cblas.tmp + #change back any openblas_complex_float and double that got hit + @sed 's/$(SYMBOLPREFIX)openblas_complex_/openblas_complex_/g' cblas.tmp > cblas.tmp2 + @sed 's/goto[^( ]*/$(SYMBOLPREFIX)&/g' cblas.tmp2 > cblas.tmp +endif +ifdef SYMBOLSUFFIX + @sed 's/cblas[^( ]*/&$(SYMBOLSUFFIX)/g' cblas.tmp > cblas.tmp2 + @sed 's/openblas[^( ]*/&$(SYMBOLSUFFIX)/g' cblas.tmp2 > cblas.tmp + #change back any openblas_complex_float and double that got hit + @sed 's/\(openblas_complex_\)\([^ ]*\)$(SYMBOLSUFFIX)/\1\2 /g' cblas.tmp > cblas.tmp2 + @sed 's/goto[^( ]*/&$(SYMBOLSUFFIX)/g' cblas.tmp2 > cblas.tmp +endif + @sed 's/common/openblas_config/g' cblas.tmp > "$(DESTDIR)$(OPENBLAS_INCLUDE_DIR)/cblas.h" endif ifneq ($(OSNAME), AIX) ifndef NO_LAPACKE @echo Copying LAPACKE header files to $(DESTDIR)$(OPENBLAS_INCLUDE_DIR) + @-install -pm644 $(NETLIB_LAPACK_DIR)/LAPACKE/include/lapack.h "$(DESTDIR)$(OPENBLAS_INCLUDE_DIR)/lapack.h" @-install -pm644 $(NETLIB_LAPACK_DIR)/LAPACKE/include/lapacke.h "$(DESTDIR)$(OPENBLAS_INCLUDE_DIR)/lapacke.h" @-install -pm644 $(NETLIB_LAPACK_DIR)/LAPACKE/include/lapacke_config.h "$(DESTDIR)$(OPENBLAS_INCLUDE_DIR)/lapacke_config.h" @-install -pm644 $(NETLIB_LAPACK_DIR)/LAPACKE/include/lapacke_mangling_with_flags.h.in "$(DESTDIR)$(OPENBLAS_INCLUDE_DIR)/lapacke_mangling.h" @@ -67,23 +91,24 @@ endif #for install shared library ifneq ($(NO_SHARED),1) @echo Copying the shared library to $(DESTDIR)$(OPENBLAS_LIBRARY_DIR) -ifeq ($(OSNAME), $(filter $(OSNAME),Linux SunOS Android Haiku)) +ifeq ($(OSNAME), $(filter $(OSNAME),Linux SunOS Android Haiku FreeBSD DragonFly)) @install -pm755 $(LIBSONAME) "$(DESTDIR)$(OPENBLAS_LIBRARY_DIR)" @cd "$(DESTDIR)$(OPENBLAS_LIBRARY_DIR)" ; \ ln -fs $(LIBSONAME) $(LIBPREFIX).so ; \ ln -fs $(LIBSONAME) $(LIBPREFIX).so.$(MAJOR_VERSION) endif -ifeq ($(OSNAME), $(filter $(OSNAME),FreeBSD OpenBSD NetBSD DragonFly)) +ifeq ($(OSNAME), $(filter $(OSNAME),OpenBSD NetBSD)) @cp $(LIBSONAME) "$(DESTDIR)$(OPENBLAS_LIBRARY_DIR)" @cd "$(DESTDIR)$(OPENBLAS_LIBRARY_DIR)" ; \ ln -fs $(LIBSONAME) $(LIBPREFIX).so endif ifeq ($(OSNAME), Darwin) @-cp $(LIBDYNNAME) "$(DESTDIR)$(OPENBLAS_LIBRARY_DIR)" - @-install_name_tool -id "$(DESTDIR)$(OPENBLAS_LIBRARY_DIR)/$(LIBDYNNAME)" "$(DESTDIR)$(OPENBLAS_LIBRARY_DIR)/$(LIBDYNNAME)" + @-install_name_tool -id "$(OPENBLAS_LIBRARY_DIR)/$(LIBPREFIX).$(MAJOR_VERSION).dylib" "$(DESTDIR)$(OPENBLAS_LIBRARY_DIR)/$(LIBDYNNAME)" @cd "$(DESTDIR)$(OPENBLAS_LIBRARY_DIR)" ; \ - ln -fs $(LIBDYNNAME) $(LIBPREFIX).dylib + ln -fs $(LIBDYNNAME) $(LIBPREFIX).dylib ; \ + ln -fs $(LIBDYNNAME) $(LIBPREFIX).$(MAJOR_VERSION).dylib endif ifeq ($(OSNAME), WINNT) @-cp $(LIBDLLNAME) "$(DESTDIR)$(OPENBLAS_BINARY_DIR)" @@ -99,6 +124,7 @@ else #install on AIX has different options syntax ifndef NO_LAPACKE @echo Copying LAPACKE header files to $(DESTDIR)$(OPENBLAS_INCLUDE_DIR) + @-installbsd -c -m 644 $(NETLIB_LAPACK_DIR)/LAPACKE/include/lapack.h "$(DESTDIR)$(OPENBLAS_INCLUDE_DIR)/lapack.h" @-installbsd -c -m 644 $(NETLIB_LAPACK_DIR)/LAPACKE/include/lapacke.h "$(DESTDIR)$(OPENBLAS_INCLUDE_DIR)/lapacke.h" @-installbsd -c -m 644 $(NETLIB_LAPACK_DIR)/LAPACKE/include/lapacke_config.h "$(DESTDIR)$(OPENBLAS_INCLUDE_DIR)/lapacke_config.h" @-installbsd -c -m 644 $(NETLIB_LAPACK_DIR)/LAPACKE/include/lapacke_mangling_with_flags.h.in "$(DESTDIR)$(OPENBLAS_INCLUDE_DIR)/lapacke_mangling.h" @@ -124,13 +150,13 @@ endif endif #Generating openblas.pc - @echo Generating openblas.pc in "$(DESTDIR)$(OPENBLAS_PKGCONFIG_DIR)" - @echo 'libdir='$(OPENBLAS_LIBRARY_DIR) > "$(DESTDIR)$(OPENBLAS_PKGCONFIG_DIR)/openblas.pc" - @echo 'includedir='$(OPENBLAS_INCLUDE_DIR) >> "$(DESTDIR)$(OPENBLAS_PKGCONFIG_DIR)/openblas.pc" - @echo 'openblas_config= USE_64BITINT='$(USE_64BITINT) 'DYNAMIC_ARCH='$(DYNAMIC_ARCH) 'DYNAMIC_OLDER='$(DYNAMIC_OLDER) 'NO_CBLAS='$(NO_CBLAS) 'NO_LAPACK='$(NO_LAPACK) 'NO_LAPACKE='$(NO_LAPACKE) 'NO_AFFINITY='$(NO_AFFINITY) 'USE_OPENMP='$(USE_OPENMP) $(CORE) 'MAX_THREADS='$(NUM_THREADS)>> "$(DESTDIR)$(OPENBLAS_PKGCONFIG_DIR)/openblas.pc" - @echo 'version='$(VERSION) >> "$(DESTDIR)$(OPENBLAS_PKGCONFIG_DIR)/openblas.pc" - @echo 'extralib='$(EXTRALIB) >> "$(DESTDIR)$(OPENBLAS_PKGCONFIG_DIR)/openblas.pc" - @cat openblas.pc.in >> "$(DESTDIR)$(OPENBLAS_PKGCONFIG_DIR)/openblas.pc" + @echo Generating $(LIBSONAMEBASE).pc in "$(DESTDIR)$(OPENBLAS_PKGCONFIG_DIR)" + @echo 'libdir='$(OPENBLAS_LIBRARY_DIR) > "$(DESTDIR)$(OPENBLAS_PKGCONFIG_DIR)/$(LIBSONAMEBASE).pc" + @echo 'includedir='$(OPENBLAS_INCLUDE_DIR) >> "$(DESTDIR)$(OPENBLAS_PKGCONFIG_DIR)/$(LIBSONAMEBASE).pc" + @echo 'openblas_config= USE_64BITINT='$(USE_64BITINT) 'DYNAMIC_ARCH='$(DYNAMIC_ARCH) 'DYNAMIC_OLDER='$(DYNAMIC_OLDER) 'NO_CBLAS='$(NO_CBLAS) 'NO_LAPACK='$(NO_LAPACK) 'NO_LAPACKE='$(NO_LAPACKE) 'NO_AFFINITY='$(NO_AFFINITY) 'USE_OPENMP='$(USE_OPENMP) $(CORE) 'MAX_THREADS='$(NUM_THREADS)>> "$(DESTDIR)$(OPENBLAS_PKGCONFIG_DIR)/$(LIBSONAMEBASE).pc" + @echo 'version='$(VERSION) >> "$(DESTDIR)$(OPENBLAS_PKGCONFIG_DIR)/$(LIBSONAMEBASE).pc" + @echo 'extralib='$(PKG_EXTRALIB) >> "$(DESTDIR)$(OPENBLAS_PKGCONFIG_DIR)/$(LIBSONAMEBASE).pc" + @cat openblas.pc.in >> "$(DESTDIR)$(OPENBLAS_PKGCONFIG_DIR)/$(LIBSONAMEBASE).pc" #Generating OpenBLASConfig.cmake @@ -165,4 +191,3 @@ endif @echo " endif ()" >> "$(DESTDIR)$(OPENBLAS_CMAKE_DIR)/$(OPENBLAS_CMAKE_CONFIG_VERSION)" @echo "endif ()" >> "$(DESTDIR)$(OPENBLAS_CMAKE_DIR)/$(OPENBLAS_CMAKE_CONFIG_VERSION)" @echo Install OK! - diff --git a/Makefile.power b/Makefile.power index 195f1930f..946f55232 100644 --- a/Makefile.power +++ b/Makefile.power @@ -9,26 +9,80 @@ else USE_OPENMP = 1 endif +ifeq ($(CORE), POWER10) +ifneq ($(C_COMPILER), PGI) +CCOMMON_OPT += -Ofast -mcpu=power10 -mtune=power10 -mvsx -fno-fast-math +FCOMMON_OPT += -O2 -frecursive -mcpu=power10 -mtune=power10 -fno-fast-math +endif +endif + ifeq ($(CORE), POWER9) -ifeq ($(USE_OPENMP), 1) -COMMON_OPT += -Ofast -mcpu=power9 -mtune=power9 -mvsx -malign-power -DUSE_OPENMP -fno-fast-math -fopenmp -FCOMMON_OPT += -O2 -frecursive -mcpu=power9 -mtune=power9 -malign-power -DUSE_OPENMP -fno-fast-math -fopenmp +ifneq ($(C_COMPILER), PGI) +CCOMMON_OPT += -Ofast -mvsx -fno-fast-math +ifeq ($(C_COMPILER), GCC) +ifneq ($(GCCVERSIONGT4), 1) +$(warning your compiler is too old to fully support POWER9, getting a newer version of gcc is recommended) +CCOMMON_OPT += -mcpu=power8 -mtune=power8 +else +CCOMMON_OPT += -mcpu=power9 -mtune=power9 +endif +else +CCOMMON_OPT += -mcpu=power9 -mtune=power9 +endif +else +CCOMMON_OPT += -fast -Mvect=simd -Mcache_align +endif +ifneq ($(F_COMPILER), PGI) +FCOMMON_OPT += -O2 -frecursive -fno-fast-math +ifeq ($(C_COMPILER), GCC) +ifneq ($(GCCVERSIONGT4), 1) +$(warning your compiler is too old to fully support POWER9, getting a newer version of gcc is recommended) +FCOMMON_OPT += -mcpu=power8 -mtune=power8 else -COMMON_OPT += -Ofast -mcpu=power9 -mtune=power9 -mvsx -malign-power -fno-fast-math -FCOMMON_OPT += -O2 -frecursive -mcpu=power9 -mtune=power9 -malign-power -fno-fast-math +FCOMMON_OPT += -mcpu=power9 -mtune=power9 +endif +else +FCOMMON_OPT += -mcpu=power9 -mtune=power9 +endif +else +FCOMMON_OPT += -O2 -Mrecursive endif endif ifeq ($(CORE), POWER8) +ifneq ($(C_COMPILER), PGI) +CCOMMON_OPT += -Ofast -mcpu=power8 -mtune=power8 -mvsx -fno-fast-math +else +CCOMMON_OPT += -fast -Mvect=simd -Mcache_align +endif +ifneq ($(F_COMPILER), PGI) +ifeq ($(OSNAME), AIX) +FCOMMON_OPT += -O1 -frecursive -mcpu=power8 -mtune=power8 -fno-fast-math +else +FCOMMON_OPT += -O2 -frecursive -mcpu=power8 -mtune=power8 -fno-fast-math +endif +else +FCOMMON_OPT += -O2 -Mrecursive +endif +endif + ifeq ($(USE_OPENMP), 1) -COMMON_OPT += -Ofast -mcpu=power8 -mtune=power8 -mvsx -malign-power -DUSE_OPENMP -fno-fast-math -fopenmp -FCOMMON_OPT += -O2 -frecursive -mcpu=power8 -mtune=power8 -malign-power -DUSE_OPENMP -fno-fast-math -fopenmp +ifneq ($(C_COMPILER), PGI) +CCOMMON_OPT += -DUSE_OPENMP -fopenmp +else +CCOMMON_OPT += -DUSE_OPENMP -mp +endif +ifneq ($(F_COMPILER), PGI) +FCOMMON_OPT += -DUSE_OPENMP -fopenmp else -COMMON_OPT += -Ofast -mcpu=power8 -mtune=power8 -mvsx -malign-power -fno-fast-math -FCOMMON_OPT += -O2 -frecursive -mcpu=power8 -mtune=power8 -malign-power -fno-fast-math +FCOMMON_OPT += -DUSE_OPENMP -mp endif endif +# workaround for C->FORTRAN ABI violation in LAPACKE +ifeq ($(F_COMPILER), GFORTRAN) +FCOMMON_OPT += -fno-optimize-sibling-calls +endif FLAMEPATH = $(HOME)/flame/lib @@ -64,6 +118,9 @@ CCOMMON_OPT += -mpowerpc64 -maix64 ifeq ($(COMPILER_F77), g77) FCOMMON_OPT += -mpowerpc64 -maix64 endif +ifeq ($(F_COMPILER), GFORTRAN) +FCOMMON_OPT += -mpowerpc64 -maix64 +endif ifeq ($(COMPILER_F77), xlf) FCOMMON_OPT += -q64 endif diff --git a/Makefile.prebuild b/Makefile.prebuild index a366004a1..d6395da7b 100644 --- a/Makefile.prebuild +++ b/Makefile.prebuild @@ -17,7 +17,11 @@ ifdef CPUIDEMU EXFLAGS = -DCPUIDEMU -DVENDOR=99 endif -ifeq ($(TARGET), 1004K) +ifeq ($(TARGET), MIPS24K) +TARGET_FLAGS = -mips32r2 +endif + +ifeq ($(TARGET), MIPS1004K) TARGET_FLAGS = -mips32r2 endif @@ -37,12 +41,16 @@ ifeq ($(TARGET), I6500) TARGET_FLAGS = -mips64r6 endif +ifeq ($(TARGET), C910V) +TARGET_FLAGS = -march=rv64gcvxthead -mabi=lp64v +endif + all: getarch_2nd ./getarch_2nd 0 >> $(TARGET_MAKE) ./getarch_2nd 1 >> $(TARGET_CONF) config.h : c_check f_check getarch - perl ./c_check $(TARGET_MAKE) $(TARGET_CONF) $(CC) $(TARGET_FLAGS) + perl ./c_check $(TARGET_MAKE) $(TARGET_CONF) $(CC) $(TARGET_FLAGS) $(CFLAGS) ifneq ($(ONLY_CBLAS), 1) perl ./f_check $(TARGET_MAKE) $(TARGET_CONF) $(FC) $(TARGET_FLAGS) else @@ -59,13 +67,13 @@ endif getarch : getarch.c cpuid.S dummy $(CPUIDEMU) - $(HOSTCC) $(CFLAGS) $(EXFLAGS) -o $(@F) getarch.c cpuid.S $(CPUIDEMU) + $(HOSTCC) $(HOST_CFLAGS) $(EXFLAGS) -o $(@F) getarch.c cpuid.S $(CPUIDEMU) getarch_2nd : getarch_2nd.c config.h dummy ifndef TARGET_CORE - $(HOSTCC) -I. $(CFLAGS) -o $(@F) getarch_2nd.c + $(HOSTCC) -I. $(HOST_CFLAGS) -o $(@F) getarch_2nd.c else - $(HOSTCC) -I. $(CFLAGS) -DBUILD_KERNEL -o $(@F) getarch_2nd.c + $(HOSTCC) -I. $(HOST_CFLAGS) -DBUILD_KERNEL -o $(@F) getarch_2nd.c endif dummy: diff --git a/Makefile.riscv64 b/Makefile.riscv64 new file mode 100644 index 000000000..15d7b059c --- /dev/null +++ b/Makefile.riscv64 @@ -0,0 +1,4 @@ +ifeq ($(CORE), C910V) +CCOMMON_OPT += -march=rv64gcvxthead -mabi=lp64v +FCOMMON_OPT += -march=rv64gcvxthead -mabi=lp64v -static +endif diff --git a/Makefile.rule b/Makefile.rule index 17815096e..c68c20923 100644 --- a/Makefile.rule +++ b/Makefile.rule @@ -3,7 +3,7 @@ # # This library's version -VERSION = 0.3.7.dev +VERSION = 0.3.13.dev # If you set the suffix, the library name will be libopenblas_$(LIBNAMESUFFIX).a # and libopenblas_$(LIBNAMESUFFIX).so. Meanwhile, the soname in shared library @@ -58,6 +58,12 @@ VERSION = 0.3.7.dev # For force setting for multi threaded, specify USE_THREAD = 1 # USE_THREAD = 0 +# If you want to build a single-threaded OpenBLAS, but expect to call this +# from several concurrent threads in some other program, comment this in for +# thread safety. (This is done automatically for USE_THREAD=1 , and should not +# be necessary when USE_OPENMP=1) +# USE_LOCKING = 1 + # If you're going to use this library with OpenMP, please comment it in. # This flag is always set for POWER8. Don't set USE_OPENMP = 0 if you're targeting POWER8. # USE_OPENMP = 1 @@ -91,6 +97,15 @@ VERSION = 0.3.7.dev # they need to wait for the preceding API calls to finish or risk data corruption. # NUM_PARALLEL = 2 +# When multithreading, OpenBLAS needs to use a memory buffer for communicating +# and collating results for individual subranges of the original matrix. Since +# the original GotoBLAS of the early 2000s, the default size of this buffer has +# been set at a value of 32<<20 (which is 32MB) on x86_64 , twice that on PPC. +# If you expect to handle large problem sizes (beyond about 30000x30000) uncomment +# this line and adjust the (32<= 4) +GCCVERSIONGT4 := $(shell expr `$(CC) -dumpversion | cut -f1 -d.` \> 4) +GCCVERSIONGT5 := $(shell expr `$(CC) -dumpversion | cut -f1 -d.` \> 5) +GCCVERSIONGTEQ7 := $(shell expr `$(CC) -dumpversion | cut -f1 -d.` \>= 7) +GCCVERSIONGTEQ9 := $(shell expr `$(CC) -dumpversion | cut -f1 -d.` \>= 9) +GCCVERSIONGTEQ11 := $(shell expr `$(CC) -dumpversion | cut -f1 -d.` \>= 11) +GCCVERSIONGTEQ10 := $(shell expr `$(CC) -dumpversion | cut -f1 -d.` \>= 10) +# Note that the behavior of -dumpversion is compile-time-configurable for +# gcc-7.x and newer. Use -dumpfullversion there +ifeq ($(GCCVERSIONGTEQ7),1) + GCCDUMPVERSION_PARAM := -dumpfullversion +else + GCCDUMPVERSION_PARAM := -dumpversion +endif +GCCMINORVERSIONGTEQ1 := $(shell expr `$(CC) $(GCCDUMPVERSION_PARAM) | cut -f2 -d.` \>= 1) +GCCMINORVERSIONGTEQ2 := $(shell expr `$(CC) $(GCCDUMPVERSION_PARAM) | cut -f2 -d.` \>= 2) +GCCMINORVERSIONGTEQ7 := $(shell expr `$(CC) $(GCCDUMPVERSION_PARAM) | cut -f2 -d.` \>= 7) endif # @@ -304,13 +394,9 @@ ifeq ($(C_COMPILER), CLANG) CCOMMON_OPT += -DMS_ABI endif -ifeq ($(C_COMPILER), GCC) -#Test for supporting MS_ABI -GCCVERSIONGTEQ4 := $(shell expr `$(CC) -dumpversion | cut -f1 -d.` \>= 4) -GCCVERSIONGT4 := $(shell expr `$(CC) -dumpversion | cut -f1 -d.` \> 4) -GCCMINORVERSIONGTEQ7 := $(shell expr `$(CC) -dumpversion | cut -f2 -d.` \>= 7) +#Version tests for supporting specific features (MS_ABI, POWER9 intrinsics) ifeq ($(GCCVERSIONGT4), 1) -# GCC Majar version > 4 +# GCC Major version > 4 # It is compatible with MSVC ABI. CCOMMON_OPT += -DMS_ABI endif @@ -322,7 +408,6 @@ ifeq ($(GCCMINORVERSIONGTEQ7), 1) CCOMMON_OPT += -DMS_ABI endif endif -endif # Ensure the correct stack alignment on Win32 # http://permalink.gmane.org/gmane.comp.lib.openblas.general/97 @@ -388,6 +473,12 @@ ifneq ($(MAX_STACK_ALLOC), 0) CCOMMON_OPT += -DMAX_STACK_ALLOC=$(MAX_STACK_ALLOC) endif +ifdef USE_LOCKING +ifneq ($(USE_LOCKING), 0) +CCOMMON_OPT += -DUSE_LOCKING +endif +endif + # # Architecture dependent settings # @@ -508,7 +599,7 @@ DYNAMIC_CORE += HASWELL ZEN endif ifneq ($(NO_AVX512), 1) ifneq ($(NO_AVX2), 1) -DYNAMIC_CORE += SKYLAKEX +DYNAMIC_CORE += SKYLAKEX COOPERLAKE endif endif endif @@ -523,15 +614,93 @@ endif ifeq ($(ARCH), arm64) DYNAMIC_CORE = ARMV8 +DYNAMIC_CORE += CORTEXA53 DYNAMIC_CORE += CORTEXA57 +DYNAMIC_CORE += CORTEXA72 +DYNAMIC_CORE += CORTEXA73 +DYNAMIC_CORE += NEOVERSEN1 +DYNAMIC_CORE += FALKOR DYNAMIC_CORE += THUNDERX DYNAMIC_CORE += THUNDERX2T99 +DYNAMIC_CORE += TSV110 +DYNAMIC_CORE += EMAG8180 +DYNAMIC_CORE += THUNDERX3T110 +ifdef DYNAMIC_LIST +override DYNAMIC_CORE = ARMV8 $(DYNAMIC_LIST) +XCCOMMON_OPT = -DDYNAMIC_LIST -DDYN_ARMV8 +XCCOMMON_OPT += $(foreach dcore,$(DYNAMIC_LIST),-DDYN_$(dcore)) +endif +endif + +ifeq ($(ARCH), mips64) +DYNAMIC_CORE = LOONGSON3R3 LOONGSON3R4 +endif + +ifeq ($(ARCH), zarch) +DYNAMIC_CORE = ZARCH_GENERIC + +# if the compiler accepts -march=arch11 or -march=z13 and can compile a file +# with z13-specific inline assembly, then we can include support for Z13. +# note: -march=z13 is equivalent to -march=arch11 yet some compiler releases +# only support one or the other. +# note: LLVM version 6.x supported -march=z13 yet could not handle vector +# registers in inline assembly, so the check for supporting the -march flag is +# not enough. +ZARCH_TEST_COMPILE=-c $(TOPDIR)/kernel/zarch/damin_z13.c -I$(TOPDIR) -o /dev/null > /dev/null 2> /dev/null +ZARCH_CC_SUPPORTS_ARCH11=$(shell $(CC) -march=arch11 $(ZARCH_TEST_COMPILE) && echo 1) +ZARCH_CC_SUPPORTS_Z13=$(shell $(CC) -march=z13 $(ZARCH_TEST_COMPILE) && echo 1) + +ifeq ($(or $(ZARCH_CC_SUPPORTS_ARCH11), $(ZARCH_CC_SUPPORTS_Z13)), 1) +DYNAMIC_CORE += Z13 +CCOMMON_OPT += -DDYN_Z13 +else +$(info OpenBLAS: Not building Z13 kernels because the compiler $(CC) does not support it) +endif + +# as above for z13, check for -march=arch12 and z14 support in the compiler. +ZARCH_CC_SUPPORTS_ARCH12=$(shell $(CC) -march=arch12 $(ZARCH_TEST_COMPILE) && echo 1) +ZARCH_CC_SUPPORTS_Z14=$(shell $(CC) -march=z14 $(ZARCH_TEST_COMPILE) && echo 1) +ifeq ($(or $(ZARCH_CC_SUPPORTS_ARCH12), $(ZARCH_CC_SUPPORTS_Z14)), 1) +DYNAMIC_CORE += Z14 +CCOMMON_OPT += -DDYN_Z14 +else +$(info OpenBLAS: Not building Z14 kernels because the compiler $(CC) does not support it) endif +endif # ARCH zarch + ifeq ($(ARCH), power) +ifneq ($(C_COMPILER), PGI) DYNAMIC_CORE = POWER6 DYNAMIC_CORE += POWER8 +ifneq ($(C_COMPILER), GCC) DYNAMIC_CORE += POWER9 +DYNAMIC_CORE += POWER10 +CCOMMON_OPT += -DHAVE_P10_SUPPORT +endif +ifeq ($(C_COMPILER), GCC) +ifeq ($(GCCVERSIONGT5), 1) +DYNAMIC_CORE += POWER9 +else +$(info, OpenBLAS: Your gcc version is too old to build the POWER9 kernels.) +endif +LDVERSIONGTEQ35 := $(shell expr `$(CC) -Wl,--version 2> /dev/null | head -1 | cut -f2 -d "." | cut -f1 -d "-"` \>= 35) +ifeq ($(GCCVERSIONGTEQ11)$(LDVERSIONGTEQ35), 11) +DYNAMIC_CORE += POWER10 +CCOMMON_OPT += -DHAVE_P10_SUPPORT +else ifeq ($(GCCVERSIONGTEQ10), 1) +ifeq ($(GCCMINORVERSIONGTEQ2)$(LDVERSIONGTEQ35), 11) +DYNAMIC_CORE += POWER10 +CCOMMON_OPT += -DHAVE_P10_SUPPORT +endif +else +$(info, OpenBLAS: Your gcc version is too old to build the POWER10 kernels.) +endif +endif +else +DYNAMIC_CORE = POWER8 +DYNAMIC_CORE += POWER9 +endif endif # If DYNAMIC_CORE is not set, DYNAMIC_ARCH cannot do anything, so force it to empty @@ -586,9 +755,22 @@ endif ifeq ($(ARCH), arm64) NO_BINARY_MODE = 1 BINARY_DEFINED = 1 +ifdef INTERFACE64 +ifneq ($(INTERFACE64), 0) +ifeq ($(F_COMPILER), GFORTRAN) +FCOMMON_OPT += -fdefault-integer-8 +endif +ifeq ($(F_COMPILER), FLANG) +FCOMMON_OPT += -i8 +endif +endif +endif endif - +ifeq ($(ARCH), riscv64) +NO_BINARY_MODE = 1 +BINARY_DEFINED = 1 +endif # @@ -621,17 +803,17 @@ CCOMMON_OPT += -mabi=32 BINARY_DEFINED = 1 endif -ifeq ($(CORE), LOONGSON3A) -CCOMMON_OPT += -march=mips64 -FCOMMON_OPT += -march=mips64 +ifeq ($(CORE), $(filter $(CORE),LOONGSON3R3 LOONGSON3R4)) +CCOMMON_OPT += -march=loongson3a +FCOMMON_OPT += -march=loongson3a endif -ifeq ($(CORE), LOONGSON3B) -CCOMMON_OPT += -march=mips64 -FCOMMON_OPT += -march=mips64 +ifeq ($(CORE), MIPS24K) +CCOMMON_OPT += -mips32r2 -mtune=24kc $(MSA_FLAGS) +FCOMMON_OPT += -mips32r2 -mtune=24kc $(MSA_FLAGS) endif -ifeq ($(CORE), 1004K) +ifeq ($(CORE), MIPS1004K) CCOMMON_OPT += -mips32r2 $(MSA_FLAGS) FCOMMON_OPT += -mips32r2 $(MSA_FLAGS) endif @@ -665,7 +847,9 @@ endif ifndef BINARY_DEFINED ifneq ($(OSNAME), AIX) ifdef BINARY64 +ifneq ($(ARCH), riscv64) CCOMMON_OPT += -m64 +endif else CCOMMON_OPT += -m32 endif @@ -675,8 +859,29 @@ endif endif ifeq ($(C_COMPILER), PGI) +PGCVERSIONGT20 := $(shell expr `$(CC) --version|sed -n "2p" |sed -e "s/[^0-9.]//g" |cut -d "." -f 1` \> 20) +PGCVERSIONGTEQ20 := $(shell expr `$(CC) --version|sed -n "2p" |sed -e "s/[^0-9.]//g" |cut -d "." -f 1` \>= 20) +PGCMINORVERSIONGE11 := $(shell expr `$(CC) --version|sed -n "2p" |sed -e "s/[^0-9.]//g" |cut -c 4-5` == 11) +PGCVERSIONCHECK := $(PGCVERSIONGT20)$(PGCVERSIONEQ20)$(PGCMINORVERSIONGE11) +ifeq ($(PGCVERSIONCHECK), $(filter $(PGCVERSIONCHECK), 110 111 011)) +NEWPGI := 1 +endif ifdef BINARY64 +ifeq ($(ARCH), x86_64) CCOMMON_OPT += -tp p7-64 +ifneq ($(NEWPGI),1) +CCOMMON_OPT += -D__MMX__ -Mnollvm +endif +else +ifeq ($(ARCH), power) +ifeq ($(CORE), POWER8) +CCOMMON_OPT += -tp pwr8 +endif +ifeq ($(CORE), POWER9) +CCOMMON_OPT += -tp pwr9 +endif +endif +endif else CCOMMON_OPT += -tp p7 endif @@ -696,6 +901,15 @@ endif ifeq ($(F_COMPILER), FLANG) CCOMMON_OPT += -DF_INTERFACE_FLANG +FCOMMON_OPT += -Mrecursive -Kieee +ifeq ($(OSNAME), Linux) +ifeq ($(ARCH), x86_64) +FLANG_VENDOR := $(shell `$(FC) --version|cut -f 1 -d "."|head -1`) +ifeq ($(FLANG_VENDOR),AOCC) +FCOMMON_OPT += -fno-unroll-loops +endif +endif +endif ifdef BINARY64 ifdef INTERFACE64 ifneq ($(INTERFACE64), 0) @@ -736,6 +950,9 @@ else FCOMMON_OPT += -m32 endif endif +ifneq ($(NO_LAPACKE), 1) +FCOMMON_OPT += -fno-second-underscore +endif endif endif @@ -744,6 +961,8 @@ CCOMMON_OPT += -DF_INTERFACE_GFORT FCOMMON_OPT += -Wall # make single-threaded LAPACK calls thread-safe #1847 FCOMMON_OPT += -frecursive +# work around ABI problem with passing single-character arguments +FCOMMON_OPT += -fno-optimize-sibling-calls #Don't include -lgfortran, when NO_LAPACK=1 or lsbcc ifneq ($(NO_LAPACK), 1) EXTRALIB += -lgfortran @@ -761,8 +980,10 @@ endif else ifdef BINARY64 ifneq ($(OSNAME), AIX) +ifneq ($(ARCH), riscv64) FCOMMON_OPT += -m64 endif +endif ifdef INTERFACE64 ifneq ($(INTERFACE64), 0) FCOMMON_OPT += -fdefault-integer-8 @@ -786,6 +1007,7 @@ ifneq ($(INTERFACE64), 0) FCOMMON_OPT += -i8 endif endif +FCOMMON_OPT += -recursive -fp-model strict -assume protect-parens ifeq ($(USE_OPENMP), 1) FCOMMON_OPT += -fopenmp endif @@ -825,10 +1047,28 @@ ifneq ($(INTERFACE64), 0) FCOMMON_OPT += -i8 endif endif +ifeq ($(ARCH), x86_64) FCOMMON_OPT += -tp p7-64 else +ifeq ($(ARCH), power) +ifeq ($(CORE), POWER6) +$(warning NVIDIA HPC compilers do not support POWER6.) +endif +ifeq ($(CORE), POWER8) +FCOMMON_OPT += -tp pwr8 +endif +ifeq ($(CORE), POWER9) +FCOMMON_OPT += -tp pwr9 +endif +ifeq ($(CORE), POWER10) +$(warning NVIDIA HPC compilers do not support POWER10.) +endif +endif +endif +else FCOMMON_OPT += -tp p7 endif +FCOMMON_OPT += -Mrecursive -Kieee ifeq ($(USE_OPENMP), 1) FCOMMON_OPT += -mp endif @@ -865,11 +1105,11 @@ FCOMMON_OPT += -n32 else FCOMMON_OPT += -n64 endif -ifeq ($(CORE), LOONGSON3A) +ifeq ($(CORE), LOONGSON3R3) FCOMMON_OPT += -loongson3 -static endif -ifeq ($(CORE), LOONGSON3B) +ifeq ($(CORE), LOONGSON3R4) FCOMMON_OPT += -loongson3 -static endif @@ -895,11 +1135,11 @@ CCOMMON_OPT += -n32 else CCOMMON_OPT += -n64 endif -ifeq ($(CORE), LOONGSON3A) +ifeq ($(CORE), LOONGSON3R3) CCOMMON_OPT += -loongson3 -static endif -ifeq ($(CORE), LOONGSON3B) +ifeq ($(CORE), LOONGSON3R4) CCOMMON_OPT += -loongson3 -static endif @@ -918,16 +1158,25 @@ CCOMMON_OPT += -w ifeq ($(ARCH), x86) CCOMMON_OPT += -m32 else -FCOMMON_OPT += -m64 +ifdef BINARY64 +CCOMMON_OPT += -m64 +else +CCOMMON_OPT += -m32 +endif endif endif ifeq ($(F_COMPILER), SUN) CCOMMON_OPT += -DF_INTERFACE_SUN +FCOMMON_OPT += -ftrap=%none -xrecursive ifeq ($(ARCH), x86) FCOMMON_OPT += -m32 else +ifdef BINARY64 FCOMMON_OPT += -m64 +else +FCOMMON_OPT += -m32 +endif endif ifeq ($(USE_OPENMP), 1) FCOMMON_OPT += -xopenmp=parallel @@ -1001,10 +1250,8 @@ ifdef SMP CCOMMON_OPT += -DSMP_SERVER ifeq ($(ARCH), mips64) -ifneq ($(CORE), LOONGSON3B) USE_SIMPLE_THREADED_LEVEL3 = 1 endif -endif ifeq ($(USE_OPENMP), 1) # USE_SIMPLE_THREADED_LEVEL3 = 1 @@ -1037,6 +1284,10 @@ CCOMMON_OPT += -DUSE_PAPI EXTRALIB += -lpapi -lperfctr endif +ifdef BUFFERSIZE +CCOMMON_OPT += -DBUFFERSIZE=$(BUFFERSIZE) +endif + ifdef DYNAMIC_THREADS CCOMMON_OPT += -DDYNAMIC_THREADS endif @@ -1049,10 +1300,26 @@ ifdef USE_SIMPLE_THREADED_LEVEL3 CCOMMON_OPT += -DUSE_SIMPLE_THREADED_LEVEL3 endif -ifdef USE_TLS +ifeq ($(USE_TLS), 1) CCOMMON_OPT += -DUSE_TLS endif +ifeq ($(BUILD_BFLOAT16), 1) +CCOMMON_OPT += -DBUILD_BFLOAT16 +endif +ifeq ($(BUILD_SINGLE), 1) +CCOMMON_OPT += -DBUILD_SINGLE=1 +endif +ifeq ($(BUILD_DOUBLE), 1) +CCOMMON_OPT += -DBUILD_DOUBLE=1 +endif +ifeq ($(BUILD_COMPLEX), 1) +CCOMMON_OPT += -DBUILD_COMPLEX=1 +endif +ifeq ($(BUILD_COMPLEX16), 1) +CCOMMON_OPT += -DBUILD_COMPLEX16=1 +endif + CCOMMON_OPT += -DVERSION=\"$(VERSION)\" ifndef SYMBOLPREFIX @@ -1063,10 +1330,14 @@ ifndef SYMBOLSUFFIX SYMBOLSUFFIX = endif +ifndef LIBSONAMEBASE +LIBSONAMEBASE = openblas +endif + ifndef LIBNAMESUFFIX -LIBNAMEBASE = $(SYMBOLPREFIX)openblas$(SYMBOLSUFFIX) +LIBNAMEBASE = $(SYMBOLPREFIX)$(LIBSONAMEBASE)$(SYMBOLSUFFIX) else -LIBNAMEBASE = $(SYMBOLPREFIX)openblas$(SYMBOLSUFFIX)_$(LIBNAMESUFFIX) +LIBNAMEBASE = $(SYMBOLPREFIX)$(LIBSONAMEBASE)$(SYMBOLSUFFIX)_$(LIBNAMESUFFIX) endif ifeq ($(OSNAME), CYGWIN_NT) @@ -1079,6 +1350,11 @@ KERNELDIR = $(TOPDIR)/kernel/$(ARCH) include $(TOPDIR)/Makefile.$(ARCH) +ifneq ($(C_COMPILER), PGI) +ifneq ($(C_COMPILER), SUN) +CCOMMON_OPT += -UASMNAME -UASMFNAME -UNAME -UCNAME -UCHAR_NAME -UCHAR_CNAME +endif +endif CCOMMON_OPT += -DASMNAME=$(FU)$(*F) -DASMFNAME=$(FU)$(*F)$(BU) -DNAME=$(*F)$(BU) -DCNAME=$(*F) -DCHAR_NAME=\"$(*F)$(BU)\" -DCHAR_CNAME=\"$(*F)\" ifeq ($(CORE), PPC440) @@ -1095,15 +1371,17 @@ endif ifneq ($(ARCH), x86_64) ifneq ($(ARCH), x86) -ifneq ($(CORE), LOONGSON3B) NO_AFFINITY = 1 endif endif -endif ifdef NO_AFFINITY +ifeq ($(NO_AFFINITY), 0) +override undefine NO_AFFINITY +else CCOMMON_OPT += -DNO_AFFINITY endif +endif ifdef FUNCTION_PROFILE CCOMMON_OPT += -DFUNCTION_PROFILE @@ -1167,7 +1445,6 @@ endif override CFLAGS += $(COMMON_OPT) $(CCOMMON_OPT) -I$(TOPDIR) override PFLAGS += $(COMMON_OPT) $(CCOMMON_OPT) -I$(TOPDIR) -DPROFILE $(COMMON_PROF) - override FFLAGS += $(COMMON_OPT) $(FCOMMON_OPT) override FPFLAGS += $(FCOMMON_OPT) $(COMMON_PROF) #MAKEOVERRIDES = @@ -1273,6 +1550,8 @@ export OSNAME export ARCH export CORE export LIBCORE +export __BYTE_ORDER__ +export ELF_VERSION export PGCPATH export CONFIG export CC @@ -1308,6 +1587,8 @@ export HAVE_SSE4_2 export HAVE_SSE4A export HAVE_SSE5 export HAVE_AVX +export HAVE_AVX2 +export HAVE_FMA3 export HAVE_VFP export HAVE_VFPV3 export HAVE_VFPV4 @@ -1318,7 +1599,11 @@ export KERNELDIR export FUNCTION_PROFILE export TARGET_CORE export NO_AVX512 +export NO_AVX2 +export BUILD_BFLOAT16 +export SBGEMM_UNROLL_M +export SBGEMM_UNROLL_N export SGEMM_UNROLL_M export SGEMM_UNROLL_N export DGEMM_UNROLL_M diff --git a/Makefile.tail b/Makefile.tail index 2adede1a5..54ba649db 100644 --- a/Makefile.tail +++ b/Makefile.tail @@ -1,16 +1,18 @@ +SBBLASOBJS_P = $(SBBLASOBJS:.$(SUFFIX)=.$(PSUFFIX)) SBLASOBJS_P = $(SBLASOBJS:.$(SUFFIX)=.$(PSUFFIX)) DBLASOBJS_P = $(DBLASOBJS:.$(SUFFIX)=.$(PSUFFIX)) QBLASOBJS_P = $(QBLASOBJS:.$(SUFFIX)=.$(PSUFFIX)) CBLASOBJS_P = $(CBLASOBJS:.$(SUFFIX)=.$(PSUFFIX)) ZBLASOBJS_P = $(ZBLASOBJS:.$(SUFFIX)=.$(PSUFFIX)) XBLASOBJS_P = $(XBLASOBJS:.$(SUFFIX)=.$(PSUFFIX)) +SBEXTOBJS_P = $(SBEXTOBJS:.$(SUFFIX)=.$(PSUFFIX)) COMMONOBJS_P = $(COMMONOBJS:.$(SUFFIX)=.$(PSUFFIX)) HPLOBJS_P = $(HPLOBJS:.$(SUFFIX)=.$(PSUFFIX)) -BLASOBJS = $(SBLASOBJS) $(DBLASOBJS) $(CBLASOBJS) $(ZBLASOBJS) -BLASOBJS_P = $(SBLASOBJS_P) $(DBLASOBJS_P) $(CBLASOBJS_P) $(ZBLASOBJS_P) +BLASOBJS = $(SBEXTOBJS) $(SBBLASOBJS) $(SBLASOBJS) $(DBLASOBJS) $(CBLASOBJS) $(ZBLASOBJS) $(CBAUXOBJS) +BLASOBJS_P = $(SBEXTOBJS_P) $(SBBLASOBJS_P) $(SBLASOBJS_P) $(DBLASOBJS_P) $(CBLASOBJS_P) $(ZBLASOBJS_P) $(CBAUXOBJS_P) ifdef EXPRECISION BLASOBJS += $(QBLASOBJS) $(XBLASOBJS) @@ -22,19 +24,23 @@ BLASOBJS += $(QBLASOBJS) $(XBLASOBJS) BLASOBJS_P += $(QBLASOBJS_P) $(XBLASOBJS_P) endif +$(SBBLASOBJS) $(SBBLASOBJS_P) : override CFLAGS += -DBFLOAT16 -UDOUBLE -UCOMPLEX $(SBLASOBJS) $(SBLASOBJS_P) : override CFLAGS += -UDOUBLE -UCOMPLEX $(DBLASOBJS) $(DBLASOBJS_P) : override CFLAGS += -DDOUBLE -UCOMPLEX $(QBLASOBJS) $(QBLASOBJS_P) : override CFLAGS += -DXDOUBLE -UCOMPLEX $(CBLASOBJS) $(CBLASOBJS_P) : override CFLAGS += -UDOUBLE -DCOMPLEX $(ZBLASOBJS) $(ZBLASOBJS_P) : override CFLAGS += -DDOUBLE -DCOMPLEX $(XBLASOBJS) $(XBLASOBJS_P) : override CFLAGS += -DXDOUBLE -DCOMPLEX +$(SBEXTOBJS) $(SBEXTOBJS_P) : override CFLAGS += -DBFLOAT16 -UDOUBLE -UCOMPLEX +$(SBBLASOBJS_P) : override CFLAGS += -DPROFILE $(COMMON_PROF) $(SBLASOBJS_P) : override CFLAGS += -DPROFILE $(COMMON_PROF) $(DBLASOBJS_P) : override CFLAGS += -DPROFILE $(COMMON_PROF) $(QBLASOBJS_P) : override CFLAGS += -DPROFILE $(COMMON_PROF) $(CBLASOBJS_P) : override CFLAGS += -DPROFILE $(COMMON_PROF) $(ZBLASOBJS_P) : override CFLAGS += -DPROFILE $(COMMON_PROF) $(XBLASOBJS_P) : override CFLAGS += -DPROFILE $(COMMON_PROF) +$(SBEXTOBJS_P) : override CFLAGS += -DPROFILE $(COMMON_PROF) libs :: $(BLASOBJS) $(COMMONOBJS) $(AR) $(ARFLAGS) -ru $(TOPDIR)/$(LIBNAME) $^ diff --git a/Makefile.x86 b/Makefile.x86 index a6196d365..0e27264d8 100644 --- a/Makefile.x86 +++ b/Makefile.x86 @@ -1,5 +1,10 @@ # COMPILER_PREFIX = mingw32- +ifdef HAVE_SSE +CCOMMON_OPT += -msse +FCOMMON_OPT += -msse +endif + ifeq ($(OSNAME), Interix) ARFLAGS = -m x86 @@ -54,3 +59,20 @@ LIBATLAS = -L$(ATLASPATH)/32 -lcblas -lf77blas -latlas -lm else LIBATLAS = -L$(ATLASPATH)/32 -lptf77blas -lptatlas -lpthread -lm endif +ifdef HAVE_SSE2 +CCOMMON_OPT += -msse2 +FCOMMON_OPT += -msse2 +endif +ifdef HAVE_SSE3 +CCOMMON_OPT += -msse3 +FCOMMON_OPT += -msse3 +ifdef HAVE_SSSE3 +CCOMMON_OPT += -mssse3 +FCOMMON_OPT += -mssse3 +endif +ifdef HAVE_SSE4_1 +CCOMMON_OPT += -msse4.1 +FCOMMON_OPT += -msse4.1 +endif +endif + diff --git a/Makefile.x86_64 b/Makefile.x86_64 index 1b7fe3ef4..175db823d 100644 --- a/Makefile.x86_64 +++ b/Makefile.x86_64 @@ -8,6 +8,31 @@ endif endif endif +ifdef HAVE_SSE3 +CCOMMON_OPT += -msse3 +FCOMMON_OPT += -msse3 +endif +ifdef HAVE_SSSE3 +CCOMMON_OPT += -mssse3 +FCOMMON_OPT += -mssse3 +endif +ifdef HAVE_SSE4_1 +CCOMMON_OPT += -msse4.1 +FCOMMON_OPT += -msse4.1 +endif +ifndef OLDGCC +ifdef HAVE_AVX +CCOMMON_OPT += -mavx +FCOMMON_OPT += -mavx +endif +endif +ifndef NO_AVX2 +ifdef HAVE_AVX2 +CCOMMON_OPT += -mavx2 +FCOMMON_OPT += -mavx2 +endif +endif + ifeq ($(CORE), SKYLAKEX) ifndef DYNAMIC_ARCH ifndef NO_AVX512 @@ -15,22 +40,69 @@ CCOMMON_OPT += -march=skylake-avx512 FCOMMON_OPT += -march=skylake-avx512 ifeq ($(OSNAME), CYGWIN_NT) CCOMMON_OPT += -fno-asynchronous-unwind-tables +FCOMMON_OPT += -fno-asynchronous-unwind-tables endif ifeq ($(OSNAME), WINNT) ifeq ($(C_COMPILER), GCC) CCOMMON_OPT += -fno-asynchronous-unwind-tables +FCOMMON_OPT += -fno-asynchronous-unwind-tables endif endif endif endif endif -ifeq ($(CORE), HASWELL) +ifeq ($(CORE), COOPERLAKE) ifndef DYNAMIC_ARCH +ifndef NO_AVX512 +ifeq ($(C_COMPILER), GCC) +# cooperlake support was added in 10.1 +ifeq ($(GCCVERSIONGTEQ10)$(GCCMINORVERSIONGTEQ1), 11) +CCOMMON_OPT += -march=cooperlake +FCOMMON_OPT += -march=cooperlake +endif +endif +ifeq ($(OSNAME), CYGWIN_NT) +CCOMMON_OPT += -fno-asynchronous-unwind-tables +FCOMMON_OPT += -fno-asynchronous-unwind-tables +endif +ifeq ($(OSNAME), WINNT) +ifeq ($(C_COMPILER), GCC) +CCOMMON_OPT += -fno-asynchronous-unwind-tables +FCOMMON_OPT += -fno-asynchronous-unwind-tables +endif +endif +endif +endif +endif + +ifdef HAVE_AVX2 ifndef NO_AVX2 +ifeq ($(C_COMPILER), GCC) +# AVX2 support was added in 4.7.0 +GCCVERSIONCHECK := $(GCCVERSIONGT4)$(GCCVERSIONGTEQ4)$(GCCMINORVERSIONGTEQ7) +ifeq ($(GCCVERSIONCHECK), $(filter $(GCCVERSIONCHECK), 011 110 111)) CCOMMON_OPT += -mavx2 +endif +else +ifeq ($(C_COMPILER), CLANG) +CCOMMON_OPT += -mavx2 +endif +endif +ifeq ($(F_COMPILER), GFORTRAN) +# AVX2 support was added in 4.7.0 +GCCVERSIONGTEQ4 := $(shell expr `$(FC) -dumpversion | cut -f1 -d.` \>= 4) +GCCVERSIONGTEQ5 := $(shell expr `$(FC) -dumpversion | cut -f1 -d.` \>= 5) +GCCMINORVERSIONGTEQ7 := $(shell expr `$(FC) -dumpversion | cut -f2 -d.` \>= 7) +GCCVERSIONCHECK := $(GCCVERSIONGTEQ5)$(GCCVERSIONGTEQ4)$(GCCMINORVERSIONGTEQ7) +ifeq ($(GCCVERSIONCHECK), $(filter $(GCCVERSIONCHECK), 011 110 111)) FCOMMON_OPT += -mavx2 endif +else +ifeq ($(F_COMPILER), FLANG) +FCOMMON_OPT += -mavx2 +endif +endif endif endif diff --git a/Makefile.zarch b/Makefile.zarch index 47ea1eb71..092ca2589 100644 --- a/Makefile.zarch +++ b/Makefile.zarch @@ -5,6 +5,12 @@ FCOMMON_OPT += -march=z13 -mzvector endif ifeq ($(CORE), Z14) -CCOMMON_OPT += -march=z14 -mzvector +CCOMMON_OPT += -march=z14 -mzvector -O3 FCOMMON_OPT += -march=z14 -mzvector endif + +# Enable floating-point expression contraction for clang, since it is the +# default for gcc +ifeq ($(C_COMPILER), CLANG) +CCOMMON_OPT += -ffp-contract=on +endif diff --git a/README.md b/README.md index 76a65b74b..fed3936ee 100644 --- a/README.md +++ b/README.md @@ -6,11 +6,20 @@ Travis CI: [![Build Status](https://travis-ci.org/xianyi/OpenBLAS.svg?branch=dev AppVeyor: [![Build status](https://ci.appveyor.com/api/projects/status/09sohd35n8nkkx64/branch/develop?svg=true)](https://ci.appveyor.com/project/xianyi/openblas/branch/develop) +Drone CI: [![Build Status](https://cloud.drone.io/api/badges/xianyi/OpenBLAS/status.svg?branch=develop)](https://cloud.drone.io/xianyi/OpenBLAS/) + +[![Build Status](https://dev.azure.com/xianyi/OpenBLAS/_apis/build/status/xianyi.OpenBLAS?branchName=develop)](https://dev.azure.com/xianyi/OpenBLAS/_build/latest?definitionId=1&branchName=develop) + + ## Introduction -OpenBLAS is an optimized BLAS library based on GotoBLAS2 1.13 BSD version. +OpenBLAS is an optimized BLAS (Basic Linear Algebra Subprograms) library based on GotoBLAS2 1.13 BSD version. -Please read the documentation on the OpenBLAS wiki pages: . +Please read the documentation on the OpenBLAS wiki pages: . + +For a general introduction to the BLAS routines, please refer to the extensive documentation of their reference implementation hosted at netlib: +. On that site you will likewise find documentation for the reference implementation of the higher-level library LAPACK - the **L**inear **A**lgebra **Pack**age that comes included with OpenBLAS. If you are looking for a general primer or refresher on Linear Algebra, the set of six +20-minute lecture videos by Prof. Gilbert Strang on either MIT OpenCourseWare or Youtube may be helpful. ## Binary Packages @@ -22,8 +31,11 @@ You can download them from [file hosting on sourceforge.net](https://sourceforge ## Installation from Source -Download from project homepage, http://xianyi.github.com/OpenBLAS/, or check out the code -using Git from https://github.com/xianyi/OpenBLAS.git. +Download from project homepage, https://xianyi.github.com/OpenBLAS/, or check out the code +using Git from https://github.com/xianyi/OpenBLAS.git. (If you want the most up to date version, be +sure to use the develop branch - master is several years out of date due to a change of maintainership.) +Buildtime parameters can be chosen in Makefile.rule, see there for a short description of each option. +Most can also be given directly on the make or cmake command line. ### Dependencies @@ -38,7 +50,10 @@ Building OpenBLAS requires the following to be installed: Simply invoking `make` (or `gmake` on BSD) will detect the CPU automatically. To set a specific target CPU, use `make TARGET=xxx`, e.g. `make TARGET=NEHALEM`. -The full target list is in the file `TargetList.txt`. +The full target list is in the file `TargetList.txt`. For building with `cmake`, the +usual conventions apply, i.e. create a build directory either underneath the toplevel +OpenBLAS source directory or separate from it, and invoke `cmake` there with the path +to the source tree and any build options you plan to set. ### Cross compile @@ -51,6 +66,10 @@ Examples: ```sh make BINARY=64 CC=mips64el-unknown-linux-gnu-gcc FC=mips64el-unknown-linux-gnu-gfortran HOSTCC=gcc TARGET=LOONGSON3A ``` + or same with the newer mips-crosscompiler put out by Loongson that defaults to the 32bit ABI: + ```sh + make HOSTCC=gcc CC='/opt/mips-loongson-gcc7.3-linux-gnu/2019.06-29/bin/mips-linux-gnu-gcc -mabi=64' FC='/opt/mips-loongson-gcc7.3-linux-gnu/2019.06-29/bin/mips-linux-gnu-gfortran -mabi=64' TARGET=LOONGSON3A + ``` * On an x86 box, compile this library for a loongson3a CPU with loongcc (based on Open64) compiler: ```sh @@ -63,9 +82,7 @@ A debug version can be built using `make DEBUG=1`. ### Compile with MASS support on Power CPU (optional) -The [IBM MASS](http://www-01.ibm.com/software/awdtools/mass/linux/mass-linux.html) library -consists of a set of mathematical functions for C, C++, and Fortran applications that are -are tuned for optimum performance on POWER architectures. +The [IBM MASS](https://www.ibm.com/support/home/product/W511326D80541V01/other_software/mathematical_acceleration_subsystem) library consists of a set of mathematical functions for C, C++, and Fortran applications that are tuned for optimum performance on POWER architectures. OpenBLAS with MASS requires a 64-bit, little-endian OS on POWER. The library can be installed as shown: @@ -101,7 +118,7 @@ The default installation directory is `/opt/OpenBLAS`. ## Supported CPUs and Operating Systems -Please read `GotoBLAS_01Readme.txt`. +Please read `GotoBLAS_01Readme.txt` for older CPU models already supported by the 2010 GotoBLAS. ### Additional supported CPUs @@ -109,12 +126,18 @@ Please read `GotoBLAS_01Readme.txt`. - **Intel Xeon 56xx (Westmere)**: Used GotoBLAS2 Nehalem codes. - **Intel Sandy Bridge**: Optimized Level-3 and Level-2 BLAS with AVX on x86-64. -- **Intel Haswell**: Optimized Level-3 and Level-2 BLAS with AVX2 and FMA on x86-64. -- **Intel Skylake**: Optimized Level-3 and Level-2 BLAS with AVX512 and FMA on x86-64. +- **Intel Haswell**: Optimized Level-3 and Level-2 BLAS with AVX2 and FMA on x86-64. +- **Intel Skylake-X**: Optimized Level-3 and Level-2 BLAS with AVX512 and FMA on x86-64. - **AMD Bobcat**: Used GotoBLAS2 Barcelona codes. - **AMD Bulldozer**: x86-64 ?GEMM FMA4 kernels. (Thanks to Werner Saar) - **AMD PILEDRIVER**: Uses Bulldozer codes with some optimizations. - **AMD STEAMROLLER**: Uses Bulldozer codes with some optimizations. +- **AMD ZEN**: Uses Haswell codes with some optimizations. + +#### MIPS32 + +- **MIPS 1004K**: uses P5600 codes +- **MIPS 24K**: uses P5600 codes #### MIPS64 @@ -128,26 +151,68 @@ Please read `GotoBLAS_01Readme.txt`. #### ARM64 -- **ARMv8**: Experimental -- **ARM Cortex-A57**: Experimental +- **ARMv8**: Basic ARMV8 with small caches, optimized Level-3 and Level-2 BLAS +- **Cortex-A53**: same as ARMV8 (different cpu specifications) +- **Cortex A57**: Optimized Level-3 and Level-2 functions +- **Cortex A72**: same as A57 ( different cpu specifications) +- **Cortex A73**: same as A57 (different cpu specifications) +- **Falkor**: same as A57 (different cpu specifications) +- **ThunderX**: Optimized some Level-1 functions +- **ThunderX2T99**: Optimized Level-3 BLAS and parts of Levels 1 and 2 +- **ThunderX3T110** +- **TSV110**: Optimized some Level-3 helper functions +- **EMAG 8180**: preliminary support based on A57 +- **Neoverse N1**: (AWS Graviton2) preliminary support +- **Apple Vortex**: preliminary support based on ARMV8 #### PPC/PPC64 -- **POWER8**: Optimized Level-3 BLAS and some Level-1, only with `USE_OPENMP=1` +- **POWER8**: Optimized BLAS, only for PPC64LE (Little Endian), only with `USE_OPENMP=1` +- **POWER9**: Optimized Level-3 BLAS (real) and some Level-1,2. PPC64LE with OpenMP only. +- **POWER10**: #### IBM zEnterprise System -- **Z13**: Optimized Level-3 BLAS and Level-1,2 (double precision) +- **Z13**: Optimized Level-3 BLAS and Level-1,2 +- **Z14**: Optimized Level-3 BLAS and (single precision) Level-1,2 + +#### RISC-V + +- **C910V**: Optimized Leve-3 BLAS (real) and Level-1,2 by RISC-V Vector extension 0.7.1. + ```sh + make HOSTCC=gcc TARGET=C910V CC=riscv64-unknown-linux-gnu-gcc FC=riscv64-unknown-linux-gnu-gfortran + ``` + +### Support for multiple targets in a single library + +OpenBLAS can be built for multiple targets with runtime detection of the target cpu by specifiying `DYNAMIC_ARCH=1` in Makefile.rule, on the gmake command line or as `-DDYNAMIC_ARCH=TRUE` in cmake. + +For **x86_64**, the list of targets this activates contains Prescott, Core2, Nehalem, Barcelona, Sandybridge, Bulldozer, Piledriver, Steamroller, Excavator, Haswell, Zen, SkylakeX. For cpu generations not included in this list, the corresponding older model is used. If you also specify `DYNAMIC_OLDER=1`, specific support for Penryn, Dunnington, Opteron, Opteron/SSE3, Bobcat, Atom and Nano is added. Finally there is an option `DYNAMIC_LIST` that allows to specify an individual list of targets to include instead of the default. + +`DYNAMIC_ARCH` is also supported on **x86**, where it translates to Katmai, Coppermine, Northwood, Prescott, Banias, +Core2, Penryn, Dunnington, Nehalem, Athlon, Opteron, Opteron_SSE3, Barcelona, Bobcat, Atom and Nano. + +On **ARMV8**, it enables support for CortexA53, CortexA57, CortexA72, CortexA73, Falkor, ThunderX, ThunderX2T99, TSV110 as well as generic ARMV8 cpus. + +For **POWER**, the list encompasses POWER6, POWER8 and POWER9, on **ZARCH** it comprises Z13 and Z14. + +The `TARGET` option can be used in conjunction with `DYNAMIC_ARCH=1` to specify which cpu model should be assumed for all the +common code in the library, usually you will want to set this to the oldest model you expect to encounter. +Please note that it is not possible to combine support for different architectures, so no combined 32 and 64 bit or x86_64 and arm64 in the same library. ### Supported OS - **GNU/Linux** - **MinGW or Visual Studio (CMake)/Windows**: Please read . -- **Darwin/macOS**: Experimental. Although GotoBLAS2 supports Darwin, we are not macOS experts. +- **Darwin/macOS/OSX/iOS**: Experimental. Although GotoBLAS2 already supports Darwin, we are not OSX/iOS experts. - **FreeBSD**: Supported by the community. We don't actively test the library on this OS. - **OpenBSD**: Supported by the community. We don't actively test the library on this OS. +- **NetBSD**: Supported by the community. We don't actively test the library on this OS. - **DragonFly BSD**: Supported by the community. We don't actively test the library on this OS. - **Android**: Supported by the community. Please read . +- **AIX**: Supported on PPC up to POWER8 +- **Haiku**: Supported by the community. We don't actively test the library on this OS. +- **SunOS**: Supported by the community. We don't actively test the library on this OS: ## Usage @@ -179,7 +244,8 @@ We provide the following functions to control the number of threads at runtime: void goto_set_num_threads(int num_threads); void openblas_set_num_threads(int num_threads); ``` - +Note that these are only used once at library initialization, and are not available for +fine-tuning thread numbers in individual BLAS calls. If you compile this library with `USE_OPENMP=1`, you should use the above functions too. ## Reporting bugs @@ -202,7 +268,7 @@ Please see Changelog.txt to view the differences between OpenBLAS and GotoBLAS2 * Please use Clang version 3.1 and above to compile the library on Sandy Bridge microarchitecture. Clang 3.0 will generate the wrong AVX binary code. * Please use GCC version 6 or LLVM version 6 and above to compile Skylake AVX512 kernels. -* The number of CPUs/cores should less than or equal to 256. On Linux `x86_64` (`amd64`), +* The number of CPUs/cores should be less than or equal to 256. On Linux `x86_64` (`amd64`), there is experimental support for up to 1024 CPUs/cores and 128 numa nodes if you build the library with `BIGNUMA=1`. * OpenBLAS does not set processor affinity by default. diff --git a/TargetList.txt b/TargetList.txt index 6a57bf1af..d19964916 100644 --- a/TargetList.txt +++ b/TargetList.txt @@ -22,6 +22,7 @@ SANDYBRIDGE HASWELL SKYLAKEX ATOM +COOPERLAKE b)AMD CPU: ATHLON @@ -49,6 +50,7 @@ POWER6 POWER7 POWER8 POWER9 +POWER10 PPCG4 PPC970 PPC970MP @@ -58,7 +60,8 @@ CELL 3.MIPS CPU: P5600 -1004K +MIPS1004K +MIPS24K 4.MIPS64 CPU: SICORTEX @@ -88,12 +91,21 @@ CORTEXA53 CORTEXA57 CORTEXA72 CORTEXA73 +NEOVERSEN1 +EMAG8180 FALKOR THUNDERX THUNDERX2T99 TSV110 +THUNDERX3T110 +VORTEX 9.System Z: ZARCH_GENERIC Z13 Z14 + +10.RISC-V 64: +RISCV64_GENERIC +C910V + diff --git a/appveyor.yml b/appveyor.yml index 44a616aaa..1936059d5 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -35,7 +35,15 @@ environment: DYNAMIC_ARCH: ON WITH_FORTRAN: no - COMPILER: cl - + - COMPILER: MinGW64-gcc-7.2.0-mingw + DYNAMIC_ARCH: OFF + WITH_FORTRAN: ignore + - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015 + COMPILER: MinGW-gcc-6.3.0-32 + - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015 + COMPILER: MinGW-gcc-5.3.0 + WITH_FORTRAN: ignore + install: - if [%COMPILER%]==[clang-cl] call %CONDA_INSTALL_LOCN%\Scripts\activate.bat - if [%COMPILER%]==[clang-cl] conda config --add channels conda-forge --force @@ -52,7 +60,14 @@ install: before_build: - ps: if (-Not (Test-Path .\build)) { mkdir build } - cd build + - set PATH=%PATH:C:\Program Files\Git\usr\bin;=% + - if [%COMPILER%]==[MinGW-gcc-5.3.0] set PATH=C:\MinGW\bin;C:\msys64\usr\bin;C:\mingw-w64\x86_64-7.2.0-posix-seh-rt_v5-rev1\mingw64\bin;%PATH% + - if [%COMPILER%]==[MinGW64-gcc-7.2.0-mingw] set PATH=C:\MinGW\bin;C:\mingw-w64\x86_64-7.2.0-posix-seh-rt_v5-rev1\mingw64\bin;%PATH% + - if [%COMPILER%]==[MinGW-gcc-6.3.0-32] set PATH=C:\msys64\usr\bin;C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw64\bin;%PATH% - if [%COMPILER%]==[cl] cmake -G "Visual Studio 15 2017 Win64" .. + - if [%COMPILER%]==[MinGW64-gcc-7.2.0-mingw] cmake -G "MinGW Makefiles" -DNOFORTRAN=1 .. + - if [%COMPILER%]==[MinGW-gcc-6.3.0-32] cmake -G "MSYS Makefiles" -DNOFORTRAN=1 .. + - if [%COMPILER%]==[MinGW-gcc-5.3.0] cmake -G "MSYS Makefiles" -DNOFORTRAN=1 .. - if [%WITH_FORTRAN%]==[no] cmake -G "Ninja" -DCMAKE_CXX_COMPILER=clang-cl -DCMAKE_C_COMPILER=clang-cl -DMSVC_STATIC_CRT=ON .. - if [%WITH_FORTRAN%]==[yes] cmake -G "Ninja" -DCMAKE_CXX_COMPILER=clang-cl -DCMAKE_C_COMPILER=clang-cl -DCMAKE_Fortran_COMPILER=flang -DBUILD_WITHOUT_LAPACK=no -DNOFORTRAN=0 .. - if [%DYNAMIC_ARCH%]==[ON] cmake -DDYNAMIC_ARCH=ON -DDYNAMIC_LIST='CORE2;NEHALEM;SANDYBRIDGE;BULLDOZER;HASWELL' .. @@ -64,3 +79,4 @@ test_script: - echo Running Test - cd utest - openblas_utest + diff --git a/azure-pipelines.yml b/azure-pipelines.yml new file mode 100644 index 000000000..639cb3558 --- /dev/null +++ b/azure-pipelines.yml @@ -0,0 +1,71 @@ +trigger: + # start a new build for every push + batch: False + branches: + include: + - develop + +jobs: +# manylinux1 is useful to test because the +# standard Docker container uses an old version +# of gcc / glibc +- job: manylinux1_gcc + pool: + vmImage: 'ubuntu-16.04' + steps: + - script: | + echo "FROM quay.io/pypa/manylinux1_x86_64 + COPY . /tmp/openblas + RUN cd /tmp/openblas && \ + COMMON_FLAGS='DYNAMIC_ARCH=1 TARGET=NEHALEM NUM_THREADS=32' && \ + BTYPE='BINARY=64' CC=gcc && \ + make QUIET_MAKE=1 $COMMON_FLAGS $BTYPE && \ + make -C test $COMMON_FLAGS $BTYPE && \ + make -C ctest $COMMON_FLAGS $BTYPE && \ + make -C utest $COMMON_FLAGS $BTYPE" > Dockerfile + docker build . + displayName: Run manylinux1 docker build +- job: Intel_SDE_skx + pool: + vmImage: 'ubuntu-16.04' + steps: + - script: | + # at the time of writing the available Azure Ubuntu vm image + # does not support AVX512VL, so use more recent LTS version + echo "FROM ubuntu:bionic + COPY . /tmp/openblas + RUN apt-get -y update && apt-get -y install \\ + cmake \\ + gfortran \\ + make \\ + wget + RUN mkdir /tmp/SDE && cd /tmp/SDE && \\ + mkdir sde-external-8.35.0-2019-03-11-lin && \\ + wget --quiet -O sde-external-8.35.0-2019-03-11-lin.tar.bz2 https://www.dropbox.com/s/fopsnzj67572sj5/sde-external-8.35.0-2019-03-11-lin.tar.bz2?dl=0 && \\ + tar -xjvf sde-external-8.35.0-2019-03-11-lin.tar.bz2 -C /tmp/SDE/sde-external-8.35.0-2019-03-11-lin --strip-components=1 + RUN cd /tmp/openblas && CC=gcc make QUIET_MAKE=1 DYNAMIC_ARCH=1 NUM_THREADS=32 BINARY=64 + CMD cd /tmp/openblas && echo 0 > /proc/sys/kernel/yama/ptrace_scope && CC=gcc OPENBLAS_VERBOSE=2 /tmp/SDE/sde-external-8.35.0-2019-03-11-lin/sde64 -cpuid_in /tmp/SDE/sde-external-8.35.0-2019-03-11-lin/misc/cpuid/skx/cpuid.def -- make -C utest DYNAMIC_ARCH=1 NUM_THREADS=32 BINARY=64" > Dockerfile + docker build -t intel_sde . + # we need a privileged docker run for sde process attachment + docker run --privileged intel_sde + displayName: 'Run AVX512 SkylakeX docker build / test' + +- job: Windows_cl + pool: + vmImage: 'windows-latest' + steps: + - task: CMake@1 + inputs: + workingDirectory: 'build' # Optional + cmakeArgs: '-G "Visual Studio 16 2019" ..' + - task: CMake@1 + inputs: + cmakeArgs: '--build . --config Release' + workingDirectory: 'build' + - script: | + cd build + cd utest + dir + openblas_utest.exe + + diff --git a/benchmark/Makefile b/benchmark/Makefile index 51e9c64aa..f2f3b354a 100644 --- a/benchmark/Makefile +++ b/benchmark/Makefile @@ -1,2347 +1,3439 @@ -TOPDIR = .. -include $(TOPDIR)/Makefile.system - -# ACML standard -#ACML=/opt/acml5.3.1/gfortran64_mp/lib -#LIBACML = -fopenmp $(ACML)/libacml_mp.a -lgfortran -lm - -# ACML custom -#ACML=/opt/pb/acml-5-3-1-gfortran-64bit/gfortran64_fma4_mp/lib -#LIBACML = -fopenmp $(ACML)/libacml_mp.a -lgfortran -lm - -# ACML 6.1 custom -ACML=/home/saar/acml6.1/gfortran64_mp/lib -LIBACML = -fopenmp $(ACML)/libacml_mp.so -lgfortran -lm - - -# Atlas Ubuntu -#ATLAS=/usr/lib/atlas-base -#LIBATLAS = -fopenmp $(ATLAS)/liblapack_atlas.a $(ATLAS)/libptcblas.a $(ATLAS)/libptf77blas.a $(ATLAS)/libatlas.a -lgfortran -lm - -# Atlas RHEL and Fedora -ATLAS=/usr/lib64/atlas -LIBATLAS = -fopenmp $(ATLAS)/liblapack.a $(ATLAS)/libptcblas.a $(ATLAS)/libptf77blas.a $(ATLAS)/libatlas.a -lgfortran -lm - -# Intel standard -# MKL=/opt/intel/mkl/lib/intel64 -# LIBMKL = -L$(MKL) -lmkl_intel_lp64 -lmkl_gnu_thread -lmkl_core -lgomp -lpthread -lm - -# Intel custom -MKL=/home/saar/intel_mkl -LIBMKL = -L$(MKL) -lmkl_intel_lp64 -lmkl_gnu_thread -lmkl_core -lgomp -lpthread -lm - -# Apple vecLib -LIBVECLIB = -framework Accelerate - -ESSL=/opt/ibm/lib -#LIBESSL = -lesslsmp $(ESSL)/libxlomp_ser.so.1 $(ESSL)/libxlf90_r.so.1 $(ESSL)/libxlfmath.so.1 $(ESSL)/libxlsmp.so.1 /opt/ibm/xlC/13.1.3/lib/libxl.a -LIBESSL = -lesslsmp $(ESSL)/libxlf90_r.so.1 $(ESSL)/libxlfmath.so.1 $(ESSL)/libxlsmp.so.1 /opt/ibm/xlC/13.1.3/lib/libxl.a - -ifneq ($(NO_LAPACK), 1) -GOTO_LAPACK_TARGETS=slinpack.goto dlinpack.goto clinpack.goto zlinpack.goto \ - scholesky.goto dcholesky.goto ccholesky.goto zcholesky.goto \ - sgesv.goto dgesv.goto cgesv.goto zgesv.goto \ - sgeev.goto dgeev.goto cgeev.goto zgeev.goto \ - csymv.goto zsymv.goto \ - sgetri.goto dgetri.goto cgetri.goto zgetri.goto \ - spotrf.goto dpotrf.goto cpotrf.goto zpotrf.goto -else -GOTO_LAPACK_TARGETS= -endif - -ifeq ($(OSNAME), WINNT) - -goto :: slinpack.goto dlinpack.goto clinpack.goto zlinpack.goto \ - scholesky.goto dcholesky.goto ccholesky.goto zcholesky.goto \ - sgemm.goto dgemm.goto cgemm.goto zgemm.goto \ - strmm.goto dtrmm.goto ctrmm.goto ztrmm.goto \ - strsm.goto dtrsm.goto ctrsm.goto ztrsm.goto \ - ssyrk.goto dsyrk.goto csyrk.goto zsyrk.goto \ - ssyr2k.goto dsyr2k.goto csyr2k.goto zsyr2k.goto \ - sger.goto dger.goto cger.goto zger.goto \ - sdot.goto ddot.goto \ - srot.goto drot.goto \ - saxpy.goto daxpy.goto caxpy.goto zaxpy.goto \ - scopy.goto dcopy.goto ccopy.goto zcopy.goto \ - sswap.goto dswap.goto cswap.goto zswap.goto \ - sscal.goto dscal.goto cscal.goto zscal.goto \ - sasum.goto dasum.goto casum.goto zasum.goto \ - ssymv.goto dsymv.goto csymv.goto zsymv.goto \ - chemv.goto zhemv.goto \ - chemm.goto zhemm.goto \ - cherk.goto zherk.goto \ - cher2k.goto zher2k.goto \ - sgemv.goto dgemv.goto cgemv.goto zgemv.goto \ - sgeev.goto dgeev.goto cgeev.goto zgeev.goto \ - sgesv.goto dgesv.goto cgesv.goto zgesv.goto \ - sgetri.goto dgetri.goto cgetri.goto zgetri.goto \ - spotrf.goto dpotrf.goto cpotrf.goto zpotrf.goto \ - ssymm.goto dsymm.goto csymm.goto zsymm.goto - -acml :: slinpack.acml dlinpack.acml clinpack.acml zlinpack.acml \ - scholesky.acml dcholesky.acml ccholesky.acml zcholesky.acml \ - sgemm.acml dgemm.acml cgemm.acml zgemm.acml \ - strmm.acml dtrmm.acml ctrmm.acml ztrmm.acml \ - strsm.acml dtrsm.acml ctrsm.acml ztrsm.acml \ - ssyrk.acml dsyrk.acml csyrk.acml zsyrk.acml \ - ssyr2k.acml dsyr2k.acml csyr2k.acml zsyr2k.acml \ - sger.acml dger.acml cger.acml zger.acml \ - sdot.acml ddot.acml \ - saxpy.acml daxpy.acml caxpy.acml zaxpy.acml \ - scopy.acml dcopy.acml ccopy.acml zcopy.acml \ - sswap.acml dswap.acml cswap.acml zswap.acml \ - sscal.acml dscal.acml cscal.acml zscal.acml \ - sasum.acml dasum.acml casum.acml zasum.acml \ - ssymv.acml dsymv.acml csymv.acml zsymv.acml \ - chemv.acml zhemv.acml \ - chemm.acml zhemm.acml \ - cherk.acml zherk.acml \ - cher2k.acml zher2k.acml \ - sgemv.acml dgemv.acml cgemv.acml zgemv.acml \ - sgeev.acml dgeev.acml cgeev.acml zgeev.acml \ - sgesv.acml dgesv.acml cgesv.acml zgesv.acml \ - sgetri.acml dgetri.acml cgetri.acml zgetri.acml \ - spotrf.acml dpotrf.acml cpotrf.acml zpotrf.acml \ - ssymm.acml dsymm.acml csymm.acml zsymm.acml - -atlas :: slinpack.atlas dlinpack.atlas clinpack.atlas zlinpack.atlas \ - scholesky.atlas dcholesky.atlas ccholesky.atlas zcholesky.atlas \ - sgemm.atlas dgemm.atlas cgemm.atlas zgemm.atlas \ - strmm.atlas dtrmm.atlas ctrmm.atlas ztrmm.atlas \ - strsm.atlas dtrsm.atlas ctrsm.atlas ztrsm.atlas \ - ssyrk.atlas dsyrk.atlas csyrk.atlas zsyrk.atlas \ - ssyr2k.atlas dsyr2k.atlas csyr2k.atlas zsyr2k.atlas \ - sger.atlas dger.atlas cger.atlas zger.atlas\ - sdot.atlas ddot.atlas \ - saxpy.atlas daxpy.atlas caxpy.atlas zaxpy.atlas \ - scopy.atlas dcopy.atlas ccopy.atlas zcopy.atlas \ - sswap.atlas dswap.atlas cswap.atlas zswap.atlas \ - sscal.atlas dscal.atlas cscal.atlas zscal.atlas \ - sasum.atlas dasum.atlas casum.atlas zasum.atlas \ - ssymv.atlas dsymv.atlas csymv.atlas zsymv.atlas \ - chemv.atlas zhemv.atlas \ - chemm.acml zhemm.acml \ - chemm.atlas zhemm.atlas \ - cherk.atlas zherk.atlas \ - cher2k.atlas zher2k.atlas \ - sgemv.atlas dgemv.atlas cgemv.atlas zgemv.atlas \ - sgeev.atlas dgeev.atlas cgeev.atlas zgeev.atlas \ - sgesv.atlas dgesv.atlas cgesv.atlas zgesv.atlas \ - sgetri.atlas dgetri.atlas cgetri.atlas zgetri.atlas \ - spotrf.atlas dpotrf.atlas cpotrf.atlas zpotrf.atlas \ - ssymm.atlas dsymm.atlas csymm.atlas zsymm.atlas - -mkl :: slinpack.mkl dlinpack.mkl clinpack.mkl zlinpack.mkl \ - scholesky.mkl dcholesky.mkl ccholesky.mkl zcholesky.mkl \ - sgemm.mkl dgemm.mkl cgemm.mkl zgemm.mkl \ - strmm.mkl dtrmm.mkl ctrmm.mkl ztrmm.mkl \ - strsm.mkl dtrsm.mkl ctrsm.mkl ztrsm.mkl \ - ssyrk.mkl dsyrk.mkl csyrk.mkl zsyrk.mkl \ - ssyr2k.mkl dsyr2k.mkl csyr2k.mkl zsyr2k.mkl \ - sger.mkl dger.mkl cger.mkl zger.mkl \ - sdot.mkl ddot.mkl \ - saxpy.mkl daxpy.mkl caxpy.mkl zaxpy.mkl \ - scopy.mkl dcopy.mkl ccopy.mkl zcopy.mkl \ - sswap.mkl dswap.mkl cswap.mkl zswap.mkl \ - sscal.mkl dscal.mkl cscal.mkl zscal.mkl \ - sasum.mkl dasum.mkl casum.mkl zasum.mkl \ - ssymv.mkl dsymv.mkl csymv.mkl zsymv.mkl \ - chemv.mkl zhemv.mkl \ - chemm.mkl zhemm.mkl \ - cherk.mkl zherk.mkl \ - cher2k.mkl zher2k.mkl \ - sgemv.mkl dgemv.mkl cgemv.mkl zgemv.mkl \ - sgeev.mkl dgeev.mkl cgeev.mkl zgeev.mkl \ - sgesv.mkl dgesv.mkl cgesv.mkl zgesv.mkl \ - sgetri.mkl dgetri.mkl cgetri.mkl zgetri.mkl \ - spotrf.mkl dpotrf.mkl cpotrf.mkl zpotrf.mkl \ - ssymm.mkl dsymm.mkl csymm.mkl zsymm.mkl - -else - -goto :: sgemm.goto dgemm.goto cgemm.goto zgemm.goto \ - strmm.goto dtrmm.goto ctrmm.goto ztrmm.goto \ - strsm.goto dtrsm.goto ctrsm.goto ztrsm.goto \ - ssyrk.goto dsyrk.goto csyrk.goto zsyrk.goto \ - ssyr2k.goto dsyr2k.goto csyr2k.goto zsyr2k.goto \ - sger.goto dger.goto cger.goto zger.goto \ - sdot.goto ddot.goto cdot.goto zdot.goto \ - srot.goto drot.goto \ - saxpy.goto daxpy.goto caxpy.goto zaxpy.goto \ - scopy.goto dcopy.goto ccopy.goto zcopy.goto \ - sswap.goto dswap.goto cswap.goto zswap.goto \ - sscal.goto dscal.goto cscal.goto zscal.goto \ - sasum.goto dasum.goto casum.goto zasum.goto \ - ssymv.goto dsymv.goto \ - chemv.goto zhemv.goto \ - chemm.goto zhemm.goto \ - cherk.goto zherk.goto \ - cher2k.goto zher2k.goto \ - sgemv.goto dgemv.goto cgemv.goto zgemv.goto \ - ssymm.goto dsymm.goto csymm.goto zsymm.goto \ - smallscaling \ - isamax.goto idamax.goto icamax.goto izamax.goto \ - snrm2.goto dnrm2.goto scnrm2.goto dznrm2.goto $(GOTO_LAPACK_TARGETS) - -acml :: slinpack.acml dlinpack.acml clinpack.acml zlinpack.acml \ - scholesky.acml dcholesky.acml ccholesky.acml zcholesky.acml \ - sgemm.acml dgemm.acml cgemm.acml zgemm.acml \ - strmm.acml dtrmm.acml ctrmm.acml ztrmm.acml \ - strsm.acml dtrsm.acml ctrsm.acml ztrsm.acml \ - ssyrk.acml dsyrk.acml csyrk.acml zsyrk.acml \ - ssyr2k.acml dsyr2k.acml csyr2k.acml zsyr2k.acml \ - sger.acml dger.acml cger.acml zger.acml \ - sdot.acml ddot.acml \ - saxpy.acml daxpy.acml caxpy.acml zaxpy.acml \ - scopy.acml dcopy.acml ccopy.acml zcopy.acml \ - sswap.acml dswap.acml cswap.acml zswap.acml \ - sscal.acml dscal.acml cscal.acml zscal.acml \ - sasum.acml dasum.acml casum.acml zasum.acml \ - ssymv.acml dsymv.acml csymv.acml zsymv.acml \ - chemv.acml zhemv.acml \ - chemm.acml zhemm.acml \ - cherk.acml zherk.acml \ - cher2k.acml zher2k.acml \ - sgemv.acml dgemv.acml cgemv.acml zgemv.acml \ - sgeev.acml dgeev.acml cgeev.acml zgeev.acml \ - sgesv.acml dgesv.acml cgesv.acml zgesv.acml \ - sgetri.acml dgetri.acml cgetri.acml zgetri.acml \ - spotrf.acml dpotrf.acml cpotrf.acml zpotrf.acml \ - ssymm.acml dsymm.acml csymm.acml zsymm.acml - -atlas :: slinpack.atlas dlinpack.atlas clinpack.atlas zlinpack.atlas \ - scholesky.atlas dcholesky.atlas ccholesky.atlas zcholesky.atlas \ - sgemm.atlas dgemm.atlas cgemm.atlas zgemm.atlas \ - strmm.atlas dtrmm.atlas ctrmm.atlas ztrmm.atlas \ - strsm.atlas dtrsm.atlas ctrsm.atlas ztrsm.atlas \ - ssyrk.atlas dsyrk.atlas csyrk.atlas zsyrk.atlas \ - ssyr2k.atlas dsyr2k.atlas csyr2k.atlas zsyr2k.atlas \ - sger.atlas dger.atlas cger.atlas zger.atlas\ - sdot.atlas ddot.atlas \ - saxpy.atlas daxpy.atlas caxpy.atlas zaxpy.atlas \ - scopy.atlas dcopy.atlas ccopy.atlas zcopy.atlas \ - sswap.atlas dswap.atlas cswap.atlas zswap.atlas \ - sscal.atlas dscal.atlas cscal.atlas zscal.atlas \ - sasum.atlas dasum.atlas casum.atlas zasum.atlas \ - ssymv.atlas dsymv.atlas csymv.atlas zsymv.atlas \ - chemv.atlas zhemv.atlas \ - chemm.acml zhemm.acml \ - chemm.atlas zhemm.atlas \ - cherk.atlas zherk.atlas \ - cher2k.atlas zher2k.atlas \ - sgemv.atlas dgemv.atlas cgemv.atlas zgemv.atlas \ - sgeev.atlas dgeev.atlas cgeev.atlas zgeev.atlas \ - sgesv.atlas dgesv.atlas cgesv.atlas zgesv.atlas \ - sgetri.atlas dgetri.atlas cgetri.atlas zgetri.atlas \ - spotrf.atlas dpotrf.atlas cpotrf.atlas zpotrf.atlas \ - ssymm.atlas dsymm.atlas csymm.atlas zsymm.atlas \ - isamax.atlas idamax.atlas icamax.atlas izamax.atlas \ - snrm2.goto dnrm2.goto scnrm2.goto dznrm2.goto - -mkl :: slinpack.mkl dlinpack.mkl clinpack.mkl zlinpack.mkl \ - scholesky.mkl dcholesky.mkl ccholesky.mkl zcholesky.mkl \ - sgemm.mkl dgemm.mkl cgemm.mkl zgemm.mkl \ - strmm.mkl dtrmm.mkl ctrmm.mkl ztrmm.mkl \ - strsm.mkl dtrsm.mkl ctrsm.mkl ztrsm.mkl \ - ssyrk.mkl dsyrk.mkl csyrk.mkl zsyrk.mkl \ - ssyr2k.mkl dsyr2k.mkl csyr2k.mkl zsyr2k.mkl \ - sger.mkl dger.mkl cger.mkl zger.mkl \ - sdot.mkl ddot.mkl cdot.mkl zdot.mkl \ - saxpy.mkl daxpy.mkl caxpy.mkl zaxpy.mkl \ - scopy.mkl dcopy.mkl ccopy.mkl zcopy.mkl \ - sswap.mkl dswap.mkl cswap.mkl zswap.mkl \ - sscal.mkl dscal.mkl cscal.mkl zscal.mkl \ - sasum.mkl dasum.mkl casum.mkl zasum.mkl \ - ssymv.mkl dsymv.mkl csymv.mkl zsymv.mkl \ - chemv.mkl zhemv.mkl \ - chemm.mkl zhemm.mkl \ - cherk.mkl zherk.mkl \ - cher2k.mkl zher2k.mkl \ - sgemv.mkl dgemv.mkl cgemv.mkl zgemv.mkl \ - sgeev.mkl dgeev.mkl cgeev.mkl zgeev.mkl \ - sgesv.mkl dgesv.mkl cgesv.mkl zgesv.mkl \ - sgetri.mkl dgetri.mkl cgetri.mkl zgetri.mkl \ - spotrf.mkl dpotrf.mkl cpotrf.mkl zpotrf.mkl \ - ssymm.mkl dsymm.mkl csymm.mkl zsymm.mkl - - - - -endif - -essl :: sgemm.essl strmm.essl dgemm.essl dtrmm.essl \ - cgemm.essl ctrmm.essl zgemm.essl ztrmm.essl \ - slinpack.essl clinpack.essl dlinpack.essl zlinpack.essl \ - scholesky.essl ccholesky.essl dcholesky.essl zcholesky.essl \ - strsm.essl dtrsm.essl ctrsm.essl ztrsm.essl - -veclib :: slinpack.veclib dlinpack.veclib clinpack.veclib zlinpack.veclib \ - scholesky.veclib dcholesky.veclib ccholesky.veclib zcholesky.veclib \ - sgemm.veclib dgemm.veclib cgemm.veclib zgemm.veclib \ - strmm.veclib dtrmm.veclib ctrmm.veclib ztrmm.veclib \ - strsm.veclib dtrsm.veclib ctrsm.veclib ztrsm.veclib \ - ssyrk.veclib dsyrk.veclib csyrk.veclib zsyrk.veclib \ - ssyr2k.veclib dsyr2k.veclib csyr2k.veclib zsyr2k.veclib \ - sger.veclib dger.veclib cger.veclib zger.veclib \ - sdot.veclib ddot.veclib cdot.veclib zdot.veclib \ - saxpy.veclib daxpy.veclib caxpy.veclib zaxpy.veclib \ - scopy.veclib dcopy.veclib ccopy.veclib zcopy.veclib \ - sswap.veclib dswap.veclib cswap.veclib zswap.veclib \ - sscal.veclib dscal.veclib cscal.veclib zscal.veclib \ - sasum.veclib dasum.veclib casum.veclib zasum.veclib \ - ssymv.veclib dsymv.veclib csymv.veclib zsymv.veclib \ - chemv.veclib zhemv.veclib \ - chemm.veclib zhemm.veclib \ - cherk.veclib zherk.veclib \ - cher2k.veclib zher2k.veclib \ - sgemv.veclib dgemv.veclib cgemv.veclib zgemv.veclib \ - sgeev.veclib dgeev.veclib cgeev.veclib zgeev.veclib \ - sgesv.veclib dgesv.veclib cgesv.veclib zgesv.veclib \ - sgetri.veclib dgetri.veclib cgetri.veclib zgetri.veclib \ - spotrf.veclib dpotrf.veclib cpotrf.veclib zpotrf.veclib \ - ssymm.veclib dsymm.veclib csymm.veclib zsymm.veclib - -goto_3m :: cgemm3m.goto zgemm3m.goto - -mkl_3m :: cgemm3m.mkl zgemm3m.mkl - -all :: goto mkl atlas acml veclib - -exe : - @./Make_exe.sh - -##################################### Slinpack #################################################### -slinpack.goto : slinpack.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -slinpack.acml : slinpack.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -slinpack.atlas : slinpack.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -slinpack.mkl : slinpack.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -slinpack.veclib : slinpack.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -slinpack.essl : slinpack.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBESSL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Dlinpack #################################################### -dlinpack.goto : dlinpack.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -dlinpack.acml : dlinpack.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -dlinpack.atlas : dlinpack.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -dlinpack.mkl : dlinpack.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -dlinpack.veclib : dlinpack.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -dlinpack.essl : dlinpack.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBESSL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Clinpack #################################################### - -clinpack.goto : clinpack.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -clinpack.acml : clinpack.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -clinpack.atlas : clinpack.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -clinpack.mkl : clinpack.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -clinpack.veclib : clinpack.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -clinpack.essl : clinpack.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBESSL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Zlinpack #################################################### - -zlinpack.goto : zlinpack.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -zlinpack.acml : zlinpack.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zlinpack.atlas : zlinpack.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zlinpack.mkl : zlinpack.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zlinpack.veclib : zlinpack.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zlinpack.essl : zlinpack.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBESSL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Scholesky ################################################### - -scholesky.goto : scholesky.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -scholesky.acml : scholesky.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -scholesky.atlas : scholesky.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -scholesky.mkl : scholesky.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -scholesky.veclib : scholesky.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -scholesky.essl : scholesky.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBESSL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Dcholesky ################################################### - -dcholesky.goto : dcholesky.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -dcholesky.acml : dcholesky.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -dcholesky.atlas : dcholesky.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -dcholesky.mkl : dcholesky.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -dcholesky.veclib : dcholesky.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -dcholesky.essl : dcholesky.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBESSL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Ccholesky ################################################### - -ccholesky.goto : ccholesky.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -ccholesky.acml : ccholesky.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -ccholesky.atlas : ccholesky.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -ccholesky.mkl : ccholesky.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -ccholesky.veclib : ccholesky.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -ccholesky.essl : ccholesky.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBESSL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - - -##################################### Zcholesky ################################################### - -zcholesky.goto : zcholesky.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -zcholesky.acml : zcholesky.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zcholesky.atlas : zcholesky.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zcholesky.mkl : zcholesky.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zcholesky.veclib : zcholesky.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zcholesky.essl : zcholesky.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBESSL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Sgemm #################################################### -sgemm.goto : sgemm.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -sgemm.acml : sgemm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -sgemm.atlas : sgemm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -sgemm.mkl : sgemm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -sgemm.veclib : sgemm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -sgemm.essl : sgemm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBESSL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Dgemm #################################################### -dgemm.goto : dgemm.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -dgemm.acml : dgemm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -dgemm.atlas : dgemm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -dgemm.mkl : dgemm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -dgemm.veclib : dgemm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -dgemm.essl : dgemm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBESSL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Cgemm #################################################### - -cgemm.goto : cgemm.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -cgemm.acml : cgemm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -cgemm.atlas : cgemm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -cgemm.mkl : cgemm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -cgemm.veclib : cgemm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -cgemm.essl : cgemm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBESSL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Zgemm #################################################### - -zgemm.goto : zgemm.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -zgemm.acml : zgemm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zgemm.atlas : zgemm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zgemm.mkl : zgemm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zgemm.veclib : zgemm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zgemm.essl : zgemm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBESSL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Ssymm #################################################### -ssymm.goto : ssymm.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -ssymm.acml : ssymm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -ssymm.atlas : ssymm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -ssymm.mkl : ssymm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -ssymm.veclib : ssymm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Dsymm #################################################### -dsymm.goto : dsymm.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -dsymm.acml : dsymm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -dsymm.atlas : dsymm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -dsymm.mkl : dsymm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -dsymm.veclib : dsymm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Csymm #################################################### - -csymm.goto : csymm.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -csymm.acml : csymm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -csymm.atlas : csymm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -csymm.mkl : csymm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -csymm.veclib : csymm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Zsymm #################################################### - -zsymm.goto : zsymm.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -zsymm.acml : zsymm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zsymm.atlas : zsymm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zsymm.mkl : zsymm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zsymm.veclib : zsymm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Strmm #################################################### -strmm.goto : strmm.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -strmm.acml : strmm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -strmm.atlas : strmm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -strmm.mkl : strmm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -strmm.veclib : strmm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -strmm.essl : strmm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBESSL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Dtrmm #################################################### -dtrmm.goto : dtrmm.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -dtrmm.acml : dtrmm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -dtrmm.atlas : dtrmm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -dtrmm.mkl : dtrmm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -dtrmm.veclib : dtrmm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -dtrmm.essl : dtrmm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBESSL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Ctrmm #################################################### - -ctrmm.goto : ctrmm.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -ctrmm.acml : ctrmm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -ctrmm.atlas : ctrmm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -ctrmm.mkl : ctrmm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -ctrmm.veclib : ctrmm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -ctrmm.essl : ctrmm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBESSL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Ztrmm #################################################### - -ztrmm.goto : ztrmm.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -ztrmm.acml : ztrmm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -ztrmm.atlas : ztrmm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -ztrmm.mkl : ztrmm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -ztrmm.veclib : ztrmm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -ztrmm.essl : ztrmm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBESSL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Strsm #################################################### -strsm.goto : strsm.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -strsm.acml : strsm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -strsm.atlas : strsm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -strsm.mkl : strsm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -strsm.veclib : strsm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -strsm.essl : strsm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBESSL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Dtrsm #################################################### -dtrsm.goto : dtrsm.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -dtrsm.acml : dtrsm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -dtrsm.atlas : dtrsm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -dtrsm.mkl : dtrsm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -dtrsm.veclib : dtrsm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -dtrsm.essl : dtrsm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBESSL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Ctrsm #################################################### - -ctrsm.goto : ctrsm.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -ctrsm.acml : ctrsm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -ctrsm.atlas : ctrsm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -ctrsm.mkl : ctrsm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -ctrsm.veclib : ctrsm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -ctrsm.essl : ctrsm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBESSL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Ztrsm #################################################### - -ztrsm.goto : ztrsm.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -ztrsm.acml : ztrsm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -ztrsm.atlas : ztrsm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -ztrsm.mkl : ztrsm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -ztrsm.veclib : ztrsm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -ztrsm.essl : ztrsm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBESSL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Ssyrk #################################################### -ssyrk.goto : ssyrk.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -ssyrk.acml : ssyrk.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -ssyrk.atlas : ssyrk.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -ssyrk.mkl : ssyrk.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -ssyrk.veclib : ssyrk.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Dsyrk #################################################### -dsyrk.goto : dsyrk.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -dsyrk.acml : dsyrk.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -dsyrk.atlas : dsyrk.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -dsyrk.mkl : dsyrk.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -dsyrk.veclib : dsyrk.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Csyrk #################################################### - -csyrk.goto : csyrk.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -csyrk.acml : csyrk.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -csyrk.atlas : csyrk.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -csyrk.mkl : csyrk.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -csyrk.veclib : csyrk.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Zsyrk #################################################### - -zsyrk.goto : zsyrk.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -zsyrk.acml : zsyrk.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zsyrk.atlas : zsyrk.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zsyrk.mkl : zsyrk.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zsyrk.veclib : zsyrk.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Ssyr2k #################################################### -ssyr2k.goto : ssyr2k.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -ssyr2k.acml : ssyr2k.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -ssyr2k.atlas : ssyr2k.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -ssyr2k.mkl : ssyr2k.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -ssyr2k.veclib : ssyr2k.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Dsyr2k #################################################### -dsyr2k.goto : dsyr2k.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -dsyr2k.acml : dsyr2k.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -dsyr2k.atlas : dsyr2k.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -dsyr2k.mkl : dsyr2k.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -dsyr2k.veclib : dsyr2k.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Csyr2k #################################################### - -csyr2k.goto : csyr2k.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -csyr2k.acml : csyr2k.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -csyr2k.atlas : csyr2k.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -csyr2k.mkl : csyr2k.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -csyr2k.veclib : csyr2k.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Zsyr2k #################################################### - -zsyr2k.goto : zsyr2k.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -zsyr2k.acml : zsyr2k.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zsyr2k.atlas : zsyr2k.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zsyr2k.mkl : zsyr2k.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zsyr2k.veclib : zsyr2k.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Chemm #################################################### - -chemm.goto : chemm.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -chemm.acml : chemm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -chemm.atlas : chemm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -chemm.mkl : chemm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -chemm.veclib : chemm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Zhemm #################################################### - -zhemm.goto : zhemm.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -zhemm.acml : zhemm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zhemm.atlas : zhemm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zhemm.mkl : zhemm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zhemm.veclib : zhemm.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Cherk #################################################### - -cherk.goto : cherk.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -cherk.acml : cherk.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -cherk.atlas : cherk.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -cherk.mkl : cherk.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -cherk.veclib : cherk.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Zherk #################################################### - -zherk.goto : zherk.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -zherk.acml : zherk.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zherk.atlas : zherk.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zherk.mkl : zherk.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zherk.veclib : zherk.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Cher2k #################################################### - -cher2k.goto : cher2k.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -cher2k.acml : cher2k.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -cher2k.atlas : cher2k.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -cher2k.mkl : cher2k.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -cher2k.veclib : cher2k.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Zher2k #################################################### - -zher2k.goto : zher2k.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -zher2k.acml : zher2k.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zher2k.atlas : zher2k.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zher2k.mkl : zher2k.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zher2k.veclib : zher2k.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Sgemv #################################################### -sgemv.goto : sgemv.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -sgemv.acml : sgemv.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -sgemv.atlas : sgemv.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -sgemv.mkl : sgemv.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -sgemv.veclib : sgemv.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Dgemv #################################################### -dgemv.goto : dgemv.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -dgemv.acml : dgemv.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -dgemv.atlas : dgemv.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -dgemv.mkl : dgemv.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -dgemv.veclib : dgemv.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Cgemv #################################################### - -cgemv.goto : cgemv.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -cgemv.acml : cgemv.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -cgemv.atlas : cgemv.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -cgemv.mkl : cgemv.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -cgemv.veclib : cgemv.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Zgemv #################################################### - -zgemv.goto : zgemv.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -zgemv.acml : zgemv.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zgemv.atlas : zgemv.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zgemv.mkl : zgemv.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zgemv.veclib : zgemv.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Sger #################################################### -sger.goto : sger.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -sger.acml : sger.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -sger.atlas : sger.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -sger.mkl : sger.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -sger.veclib : sger.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Dger #################################################### -dger.goto : dger.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -dger.acml : dger.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -dger.atlas : dger.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -dger.mkl : dger.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -dger.veclib : dger.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Cger #################################################### -cger.goto : cger.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -cger.acml : cger.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -cger.atlas : cger.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -cger.mkl : cger.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -cger.veclib : cger.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Zger #################################################### -zger.goto : zger.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -zger.acml : zger.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zger.atlas : zger.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zger.mkl : zger.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zger.veclib : zger.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Ssymv #################################################### -ssymv.goto : ssymv.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -ssymv.acml : ssymv.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -ssymv.atlas : ssymv.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -ssymv.mkl : ssymv.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -ssymv.veclib : ssymv.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Dsymv #################################################### -dsymv.goto : dsymv.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -dsymv.acml : dsymv.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -dsymv.atlas : dsymv.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -dsymv.mkl : dsymv.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -dsymv.veclib : dsymv.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Csymv #################################################### -csymv.goto : csymv.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -csymv.acml : csymv.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -csymv.atlas : csymv.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -csymv.mkl : csymv.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -csymv.veclib : csymv.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Dsymv #################################################### -zsymv.goto : zsymv.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -zsymv.acml : zsymv.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zsymv.atlas : zsymv.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zsymv.mkl : zsymv.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zsymv.veclib : zsymv.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Sgeev #################################################### -sgeev.goto : sgeev.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -sgeev.acml : sgeev.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -sgeev.atlas : sgeev.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -sgeev.mkl : sgeev.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -sgeev.veclib : sgeev.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Dgeev #################################################### -dgeev.goto : dgeev.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -dgeev.acml : dgeev.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -dgeev.atlas : dgeev.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -dgeev.mkl : dgeev.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -dgeev.veclib : dgeev.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Cgeev #################################################### - -cgeev.goto : cgeev.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -cgeev.acml : cgeev.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -cgeev.atlas : cgeev.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -cgeev.mkl : cgeev.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -cgeev.veclib : cgeev.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Zgeev #################################################### - -zgeev.goto : zgeev.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -zgeev.acml : zgeev.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zgeev.atlas : zgeev.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zgeev.mkl : zgeev.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zgeev.veclib : zgeev.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Sgetri #################################################### -sgetri.goto : sgetri.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -sgetri.acml : sgetri.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -sgetri.atlas : sgetri.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -sgetri.mkl : sgetri.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -sgetri.veclib : sgetri.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Dgetri #################################################### -dgetri.goto : dgetri.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -dgetri.acml : dgetri.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -dgetri.atlas : dgetri.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -dgetri.mkl : dgetri.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -dgetri.veclib : dgetri.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Cgetri #################################################### - -cgetri.goto : cgetri.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -cgetri.acml : cgetri.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -cgetri.atlas : cgetri.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -cgetri.mkl : cgetri.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -cgetri.veclib : cgetri.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Zgetri #################################################### - -zgetri.goto : zgetri.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -zgetri.acml : zgetri.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zgetri.atlas : zgetri.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zgetri.mkl : zgetri.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zgetri.veclib : zgetri.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Spotrf #################################################### -spotrf.goto : spotrf.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -spotrf.acml : spotrf.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -spotrf.atlas : spotrf.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -spotrf.mkl : spotrf.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -spotrf.veclib : spotrf.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Dpotrf #################################################### -dpotrf.goto : dpotrf.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -dpotrf.acml : dpotrf.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -dpotrf.atlas : dpotrf.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -dpotrf.mkl : dpotrf.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -dpotrf.veclib : dpotrf.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Cpotrf #################################################### - -cpotrf.goto : cpotrf.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -cpotrf.acml : cpotrf.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -cpotrf.atlas : cpotrf.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -cpotrf.mkl : cpotrf.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -cpotrf.veclib : cpotrf.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Zpotrf #################################################### - -zpotrf.goto : zpotrf.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -zpotrf.acml : zpotrf.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zpotrf.atlas : zpotrf.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zpotrf.mkl : zpotrf.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zpotrf.veclib : zpotrf.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Chemv #################################################### - -chemv.goto : chemv.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -chemv.acml : chemv.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -chemv.atlas : chemv.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -chemv.mkl : chemv.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -chemv.veclib : chemv.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Zhemv #################################################### - -zhemv.goto : zhemv.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -zhemv.acml : zhemv.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zhemv.atlas : zhemv.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zhemv.mkl : zhemv.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zhemv.veclib : zhemv.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Sdot #################################################### -sdot.goto : sdot.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -sdot.acml : sdot.$(SUFFIX) - $(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -sdot.atlas : sdot.$(SUFFIX) - $(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -sdot.mkl : sdot.$(SUFFIX) - $(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -sdot.veclib : sdot.$(SUFFIX) - $(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Ddot #################################################### -ddot.goto : ddot.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -ddot.acml : ddot.$(SUFFIX) - $(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -ddot.atlas : ddot.$(SUFFIX) - $(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -ddot.mkl : ddot.$(SUFFIX) - $(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -ddot.veclib : ddot.$(SUFFIX) - $(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Cdot #################################################### -cdot.goto : cdot.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -cdot.acml : cdot.$(SUFFIX) - $(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -cdot.atlas : cdot.$(SUFFIX) - $(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -cdot.mkl : cdot-intel.$(SUFFIX) - $(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -cdot.veclib : cdot-intel.$(SUFFIX) - $(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Zdot #################################################### -zdot.goto : zdot.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -zdot.acml : zdot.$(SUFFIX) - $(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zdot.atlas : zdot.$(SUFFIX) - $(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zdot.mkl : zdot-intel.$(SUFFIX) - $(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zdot.veclib : zdot-intel.$(SUFFIX) - $(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Srot #################################################### -srot.goto : srot.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -srot.acml : srot.$(SUFFIX) - $(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -srot.atlas : srot.$(SUFFIX) - $(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -srot.mkl : srot.$(SUFFIX) - $(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -srot.veclib : srot.$(SUFFIX) - $(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Drot #################################################### -drot.goto : drot.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -drot.acml : drot.$(SUFFIX) - $(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -drot.atlas : drot.$(SUFFIX) - $(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -drot.mkl : drot.$(SUFFIX) - $(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -drot.veclib : drot.$(SUFFIX) - $(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - - -##################################### Saxpy #################################################### -saxpy.goto : saxpy.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -saxpy.acml : saxpy.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -saxpy.atlas : saxpy.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -saxpy.mkl : saxpy.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -saxpy.veclib : saxpy.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Daxpy #################################################### -daxpy.goto : daxpy.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -daxpy.acml : daxpy.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -daxpy.atlas : daxpy.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -daxpy.mkl : daxpy.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -daxpy.veclib : daxpy.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Caxpy #################################################### - -caxpy.goto : caxpy.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -caxpy.acml : caxpy.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -caxpy.atlas : caxpy.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -caxpy.mkl : caxpy.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -caxpy.veclib : caxpy.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Zaxpy #################################################### - -zaxpy.goto : zaxpy.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -zaxpy.acml : zaxpy.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zaxpy.atlas : zaxpy.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zaxpy.mkl : zaxpy.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zaxpy.veclib : zaxpy.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - - -##################################### Scopy #################################################### -scopy.goto : scopy.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -scopy.acml : scopy.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -scopy.atlas : scopy.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -scopy.mkl : scopy.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -scopy.veclib : scopy.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Dcopy #################################################### -dcopy.goto : dcopy.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -dcopy.acml : dcopy.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -dcopy.atlas : dcopy.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -dcopy.mkl : dcopy.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -dcopy.veclib : dcopy.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Ccopy #################################################### - -ccopy.goto : ccopy.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -ccopy.acml : ccopy.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -ccopy.atlas : ccopy.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -ccopy.mkl : ccopy.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -ccopy.veclib : ccopy.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Zcopy #################################################### - -zcopy.goto : zcopy.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -zcopy.acml : zcopy.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zcopy.atlas : zcopy.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zcopy.mkl : zcopy.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zcopy.veclib : zcopy.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Sscal #################################################### -sscal.goto : sscal.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -sscal.acml : sscal.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -sscal.atlas : sscal.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -sscal.mkl : sscal.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -sscal.veclib : sscal.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Dscal #################################################### -dscal.goto : dscal.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -dscal.acml : dscal.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -dscal.atlas : dscal.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -dscal.mkl : dscal.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -dscal.veclib : dscal.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Cscal #################################################### - -cscal.goto : cscal.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -cscal.acml : cscal.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -cscal.atlas : cscal.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -cscal.mkl : cscal.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -cscal.veclib : cscal.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Zscal #################################################### - -zscal.goto : zscal.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -zscal.acml : zscal.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zscal.atlas : zscal.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zscal.mkl : zscal.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zscal.veclib : zscal.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Sasum #################################################### -sasum.goto : sasum.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -sasum.acml : sasum.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -sasum.atlas : sasum.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -sasum.mkl : sasum.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -sasum.veclib : sasum.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Dasum #################################################### -dasum.goto : dasum.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -dasum.acml : dasum.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -dasum.atlas : dasum.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -dasum.mkl : dasum.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -dasum.veclib : dasum.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Casum #################################################### - -casum.goto : casum.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -casum.acml : casum.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -casum.atlas : casum.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -casum.mkl : casum.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -casum.veclib : casum.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Zasum #################################################### - -zasum.goto : zasum.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -zasum.acml : zasum.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zasum.atlas : zasum.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zasum.mkl : zasum.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zasum.veclib : zasum.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Sswap #################################################### -sswap.goto : sswap.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -sswap.acml : sswap.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -sswap.atlas : sswap.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -sswap.mkl : sswap.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -sswap.veclib : sswap.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Dswap #################################################### -dswap.goto : dswap.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -dswap.acml : dswap.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -dswap.atlas : dswap.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -dswap.mkl : dswap.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -dswap.veclib : dswap.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Cswap #################################################### - -cswap.goto : cswap.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -cswap.acml : cswap.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -cswap.atlas : cswap.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -cswap.mkl : cswap.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -cswap.veclib : cswap.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Zswap #################################################### - -zswap.goto : zswap.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -zswap.acml : zswap.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zswap.atlas : zswap.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zswap.mkl : zswap.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zswap.veclib : zswap.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - - -##################################### Sgesv #################################################### -sgesv.goto : sgesv.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -sgesv.acml : sgesv.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -sgesv.atlas : sgesv.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -sgesv.mkl : sgesv.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -sgesv.veclib : sgesv.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Dgesv #################################################### -dgesv.goto : dgesv.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -dgesv.acml : dgesv.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -dgesv.atlas : dgesv.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -dgesv.mkl : dgesv.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -dgesv.veclib : dgesv.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Cgesv #################################################### - -cgesv.goto : cgesv.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -cgesv.acml : cgesv.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -cgesv.atlas : cgesv.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -cgesv.mkl : cgesv.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -cgesv.veclib : cgesv.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Zgesv #################################################### - -zgesv.goto : zgesv.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -zgesv.acml : zgesv.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zgesv.atlas : zgesv.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zgesv.mkl : zgesv.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zgesv.veclib : zgesv.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - - -##################################### Cgemm3m #################################################### - -cgemm3m.goto : cgemm3m.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -cgemm3m.mkl : cgemm3m.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -cgemm3m.veclib : cgemm3m.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -##################################### Zgemm3m #################################################### - -zgemm3m.goto : zgemm3m.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -zgemm3m.mkl : zgemm3m.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -zgemm3m.veclib : zgemm3m.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -############################################## ISAMAX ############################################## -isamax.goto : isamax.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -isamax.atlas : isamax.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -############################################## IDAMAX ############################################## -idamax.goto : idamax.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -idamax.atlas : idamax.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -############################################## ICAMAX ############################################## -icamax.goto : icamax.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -icamax.atlas : icamax.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -############################################## IZAMAX ############################################## -izamax.goto : izamax.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -izamax.atlas : izamax.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -############################################## SNRM2 ############################################## -snrm2.goto : snrm2.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -snrm2.atlas : snrm2.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -############################################## DNRM2 ############################################## -dnrm2.goto : dnrm2.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -dnrm2.atlas : dnrm2.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -############################################## Sscnrm2 ############################################## -scnrm2.goto : scnrm2.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -scnrm2.atlas : scnrm2.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - -############################################## Ddznrm2 ############################################## -dznrm2.goto : dznrm2.$(SUFFIX) ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm - -dznrm2.atlas : dznrm2.$(SUFFIX) - -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) - - -################################################################################################### - -slinpack.$(SUFFIX) : linpack.c - $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ - -dlinpack.$(SUFFIX) : linpack.c - $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ - -clinpack.$(SUFFIX) : linpack.c - $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ - -zlinpack.$(SUFFIX) : linpack.c - $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ - -scholesky.$(SUFFIX) : cholesky.c - $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ - -dcholesky.$(SUFFIX) : cholesky.c - $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ - -ccholesky.$(SUFFIX) : cholesky.c - $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ - -zcholesky.$(SUFFIX) : cholesky.c - $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ - -sgemm.$(SUFFIX) : gemm.c - $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ - -dgemm.$(SUFFIX) : gemm.c - $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ - -cgemm.$(SUFFIX) : gemm.c - $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ - -zgemm.$(SUFFIX) : gemm.c - $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ - -ssymm.$(SUFFIX) : symm.c - $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ - -dsymm.$(SUFFIX) : symm.c - $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ - -csymm.$(SUFFIX) : symm.c - $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ - -zsymm.$(SUFFIX) : symm.c - $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ - -strmm.$(SUFFIX) : trmm.c - $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ - -dtrmm.$(SUFFIX) : trmm.c - $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ - -ctrmm.$(SUFFIX) : trmm.c - $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ - -ztrmm.$(SUFFIX) : trmm.c - $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ - -strsm.$(SUFFIX) : trsm.c - $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ - -dtrsm.$(SUFFIX) : trsm.c - $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ - -ctrsm.$(SUFFIX) : trsm.c - $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ - -ztrsm.$(SUFFIX) : trsm.c - $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ - -ssyrk.$(SUFFIX) : syrk.c - $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ - -dsyrk.$(SUFFIX) : syrk.c - $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ - -csyrk.$(SUFFIX) : syrk.c - $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ - -zsyrk.$(SUFFIX) : syrk.c - $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ - -ssyr2k.$(SUFFIX) : syr2k.c - $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ - -dsyr2k.$(SUFFIX) : syr2k.c - $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ - -csyr2k.$(SUFFIX) : syr2k.c - $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ - -zsyr2k.$(SUFFIX) : syr2k.c - $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ - -chemm.$(SUFFIX) : hemm.c - $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ - -zhemm.$(SUFFIX) : hemm.c - $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ - -cherk.$(SUFFIX) : herk.c - $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ - -zherk.$(SUFFIX) : herk.c - $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ - -cher2k.$(SUFFIX) : her2k.c - $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ - -zher2k.$(SUFFIX) : her2k.c - $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ - -sgemv.$(SUFFIX) : gemv.c - $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ - -dgemv.$(SUFFIX) : gemv.c - $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ - -cgemv.$(SUFFIX) : gemv.c - $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ - -zgemv.$(SUFFIX) : gemv.c - $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ - -sger.$(SUFFIX) : ger.c - $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ - -dger.$(SUFFIX) : ger.c - $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ - -cger.$(SUFFIX) : ger.c - $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ - -zger.$(SUFFIX) : ger.c - $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ - - -ssymv.$(SUFFIX) : symv.c - $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ - -dsymv.$(SUFFIX) : symv.c - $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ - -csymv.$(SUFFIX) : symv.c - $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ - -zsymv.$(SUFFIX) : symv.c - $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ - -sgeev.$(SUFFIX) : geev.c - $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ - -dgeev.$(SUFFIX) : geev.c - $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ - -cgeev.$(SUFFIX) : geev.c - $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ - -zgeev.$(SUFFIX) : geev.c - $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ - -sgetri.$(SUFFIX) : getri.c - $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ - -dgetri.$(SUFFIX) : getri.c - $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ - -cgetri.$(SUFFIX) : getri.c - $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ - -zgetri.$(SUFFIX) : getri.c - $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ - -spotrf.$(SUFFIX) : potrf.c - $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ - -dpotrf.$(SUFFIX) : potrf.c - $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ - -cpotrf.$(SUFFIX) : potrf.c - $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ - -zpotrf.$(SUFFIX) : potrf.c - $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ - -chemv.$(SUFFIX) : hemv.c - $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ - -zhemv.$(SUFFIX) : hemv.c - $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ - -sdot.$(SUFFIX) : dot.c - $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ - -ddot.$(SUFFIX) : dot.c - $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ - -cdot.$(SUFFIX) : zdot.c - $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ - -zdot.$(SUFFIX) : zdot.c - $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ - -cdot-intel.$(SUFFIX) : zdot-intel.c - $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ - -zdot-intel.$(SUFFIX) : zdot-intel.c - $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ - - - -saxpy.$(SUFFIX) : axpy.c - $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ - -daxpy.$(SUFFIX) : axpy.c - $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ - -caxpy.$(SUFFIX) : axpy.c - $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ - -zaxpy.$(SUFFIX) : axpy.c - $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ - -scopy.$(SUFFIX) : copy.c - $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ - -dcopy.$(SUFFIX) : copy.c - $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ - -ccopy.$(SUFFIX) : copy.c - $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ - -zcopy.$(SUFFIX) : copy.c - $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ - -sswap.$(SUFFIX) : swap.c - $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ - -dswap.$(SUFFIX) : swap.c - $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ - -cswap.$(SUFFIX) : swap.c - $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ - -zswap.$(SUFFIX) : swap.c - $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ - - - -sscal.$(SUFFIX) : scal.c - $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ - -dscal.$(SUFFIX) : scal.c - $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ - -cscal.$(SUFFIX) : scal.c - $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ - -zscal.$(SUFFIX) : scal.c - $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ - -sasum.$(SUFFIX) : asum.c - $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ - -dasum.$(SUFFIX) : asum.c - $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ - -casum.$(SUFFIX) : asum.c - $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ - -zasum.$(SUFFIX) : asum.c - $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ - - -sgesv.$(SUFFIX) : gesv.c - $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ - -dgesv.$(SUFFIX) : gesv.c - $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ - -cgesv.$(SUFFIX) : gesv.c - $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ - -zgesv.$(SUFFIX) : gesv.c - $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ - -srot.$(SUFFIX) : rot.c - $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ - -drot.$(SUFFIX) : rot.c - $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ - - - - - -cgemm3m.$(SUFFIX) : gemm3m.c - $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ - -zgemm3m.$(SUFFIX) : gemm3m.c - $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ - - -isamax.$(SUFFIX) : iamax.c - $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ - -idamax.$(SUFFIX) : iamax.c - $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ - -icamax.$(SUFFIX) : iamax.c - $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ - -izamax.$(SUFFIX) : iamax.c - $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ - - -snrm2.$(SUFFIX) : nrm2.c - $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ - -dnrm2.$(SUFFIX) : nrm2.c - $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ - -scnrm2.$(SUFFIX) : nrm2.c - $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ - -dznrm2.$(SUFFIX) : nrm2.c - $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ - - -smallscaling: smallscaling.c ../$(LIBNAME) - $(CC) $(CFLAGS) -o $(@F) $^ $(EXTRALIB) -fopenmp -lm -lpthread - -clean :: - @rm -f *.goto *.mkl *.acml *.atlas *.veclib *.essl smallscaling - -include $(TOPDIR)/Makefile.tail - +TOPDIR = .. +include $(TOPDIR)/Makefile.system + +# ACML standard +#ACML=/opt/acml5.3.1/gfortran64_mp/lib +#LIBACML = -fopenmp $(ACML)/libacml_mp.a -lgfortran -lm + +# ACML custom +#ACML=/opt/pb/acml-5-3-1-gfortran-64bit/gfortran64_fma4_mp/lib +#LIBACML = -fopenmp $(ACML)/libacml_mp.a -lgfortran -lm + +# ACML 6.1 custom +ACML=/home/saar/acml6.1/gfortran64_mp/lib +LIBACML = -fopenmp $(ACML)/libacml_mp.so -lgfortran -lm + + +# Atlas Ubuntu +#ATLAS=/usr/lib/atlas-base +#LIBATLAS = -fopenmp $(ATLAS)/liblapack_atlas.a $(ATLAS)/libptcblas.a $(ATLAS)/libptf77blas.a $(ATLAS)/libatlas.a -lgfortran -lm + +# Atlas RHEL and Fedora +ATLAS=/usr/lib64/atlas +LIBATLAS = -fopenmp $(ATLAS)/liblapack.a $(ATLAS)/libptcblas.a $(ATLAS)/libptf77blas.a $(ATLAS)/libatlas.a -lgfortran -lm + +# Intel standard +# MKL=/opt/intel/mkl/lib/intel64 +# LIBMKL = -L$(MKL) -lmkl_intel_lp64 -lmkl_gnu_thread -lmkl_core -lgomp -lpthread -lm + +# Intel custom +MKL=/home/saar/intel_mkl +LIBMKL = -L$(MKL) -lmkl_intel_lp64 -lmkl_gnu_thread -lmkl_core -lgomp -lpthread -lm + +# Apple vecLib +LIBVECLIB = -framework Accelerate + +ESSL=/opt/ibm/lib +#LIBESSL = -lesslsmp $(ESSL)/libxlomp_ser.so.1 $(ESSL)/libxlf90_r.so.1 $(ESSL)/libxlfmath.so.1 $(ESSL)/libxlsmp.so.1 /opt/ibm/xlC/13.1.3/lib/libxl.a +LIBESSL = -lesslsmp $(ESSL)/libxlf90_r.so.1 $(ESSL)/libxlfmath.so.1 $(ESSL)/libxlsmp.so.1 /opt/ibm/xlC/13.1.3/lib/libxl.a + +ifneq ($(NO_LAPACK), 1) +GOTO_LAPACK_TARGETS=slinpack.goto dlinpack.goto clinpack.goto zlinpack.goto \ + scholesky.goto dcholesky.goto ccholesky.goto zcholesky.goto \ + sgesv.goto dgesv.goto cgesv.goto zgesv.goto \ + sgeev.goto dgeev.goto cgeev.goto zgeev.goto \ + csymv.goto zsymv.goto \ + sgetri.goto dgetri.goto cgetri.goto zgetri.goto \ + spotrf.goto dpotrf.goto cpotrf.goto zpotrf.goto +else +GOTO_LAPACK_TARGETS= +endif + +ifeq ($(BUILD_BFLOAT16),1) +GOTO_HALF_TARGETS=sbgemm.goto +else +GOTO_HALF_TARGETS= +endif + +ifeq ($(OSNAME), WINNT) + +goto :: slinpack.goto dlinpack.goto clinpack.goto zlinpack.goto \ + scholesky.goto dcholesky.goto ccholesky.goto zcholesky.goto \ + sgemm.goto dgemm.goto cgemm.goto zgemm.goto \ + strmm.goto dtrmm.goto ctrmm.goto ztrmm.goto \ + strsm.goto dtrsm.goto ctrsm.goto ztrsm.goto \ + sspr.goto dspr.goto \ + sspr2.goto dspr2.goto \ + ssyr.goto dsyr.goto \ + ssyr2.goto dsyr2.goto \ + ssyrk.goto dsyrk.goto csyrk.goto zsyrk.goto \ + ssyr2k.goto dsyr2k.goto csyr2k.goto zsyr2k.goto \ + sger.goto dger.goto cger.goto zger.goto \ + sdot.goto ddot.goto \ + srot.goto drot.goto csrot.goto zdrot.goto \ + srotm.goto drotm.goto \ + saxpy.goto daxpy.goto caxpy.goto zaxpy.goto \ + scopy.goto dcopy.goto ccopy.goto zcopy.goto \ + sswap.goto dswap.goto cswap.goto zswap.goto \ + sscal.goto dscal.goto cscal.goto zscal.goto \ + sasum.goto dasum.goto casum.goto zasum.goto \ + ssymv.goto dsymv.goto csymv.goto zsymv.goto \ + chemv.goto zhemv.goto \ + chbmv.goto zhbmv.goto \ + chpmv.goto zhpmv.goto \ + chemm.goto zhemm.goto \ + cherk.goto zherk.goto \ + cher2k.goto zher2k.goto \ + cher.goto zher.goto \ + cher2.goto zher2.goto \ + sgemv.goto dgemv.goto cgemv.goto zgemv.goto \ + sspmv.goto dspmv.goto \ + strmv.goto dtrmv.goto ctrmv.goto ztrmv.goto \ + stpmv.goto dtpmv.goto ctpmv.goto ztpmv.goto \ + stpsv.goto dtpsv.goto ctpsv.goto ztpsv.goto \ + strsv.goto dtrsv.goto ctrsv.goto ztrsv.goto \ + sgeev.goto dgeev.goto cgeev.goto zgeev.goto \ + sgesv.goto dgesv.goto cgesv.goto zgesv.goto \ + sgetri.goto dgetri.goto cgetri.goto zgetri.goto \ + spotrf.goto dpotrf.goto cpotrf.goto zpotrf.goto \ + ssymm.goto dsymm.goto csymm.goto zsymm.goto \ + saxpby.goto daxpby.goto caxpby.goto zaxpby.goto $(GOTO_HALF_TARGETS) + +acml :: slinpack.acml dlinpack.acml clinpack.acml zlinpack.acml \ + scholesky.acml dcholesky.acml ccholesky.acml zcholesky.acml \ + sgemm.acml dgemm.acml cgemm.acml zgemm.acml \ + strmm.acml dtrmm.acml ctrmm.acml ztrmm.acml \ + strsm.acml dtrsm.acml ctrsm.acml ztrsm.acml \ + sspr.acml dspr.acml \ + sspr2.acml dspr2.acml \ + ssyr.acml dsyr.acml \ + ssyr2.acml dsyr2.acml \ + ssyrk.acml dsyrk.acml csyrk.acml zsyrk.acml \ + ssyr2k.acml dsyr2k.acml csyr2k.acml zsyr2k.acml \ + sger.acml dger.acml cger.acml zger.acml \ + sdot.acml ddot.acml \ + srot.acml drot.acml csrot.acml zdrot.acml \ + srotm.acml drotm.acml \ + saxpy.acml daxpy.acml caxpy.acml zaxpy.acml \ + scopy.acml dcopy.acml ccopy.acml zcopy.acml \ + sswap.acml dswap.acml cswap.acml zswap.acml \ + sscal.acml dscal.acml cscal.acml zscal.acml \ + sasum.acml dasum.acml casum.acml zasum.acml \ + ssymv.acml dsymv.acml csymv.acml zsymv.acml \ + chemv.acml zhemv.acml \ + chbmv.acml zhbmv.acml \ + chpmv.acml zhpmv.acml \ + chemm.acml zhemm.acml \ + cherk.acml zherk.acml \ + cher2k.acml zher2k.acml \ + cher.acml zher.acml \ + cher2.acml zher2.acml \ + sgemv.acml dgemv.acml cgemv.acml zgemv.acml \ + strmv.acml dtrmv.acml ctrmv.acml ztrmv.acml \ + stpmv.acml dtpmv.acml ctpmv.acml ztpmv.acml \ + stpsv.acml dtpsv.acml ctpsv.acml ztpsv.acml \ + strsv.acml dtrsv.acml ctrsv.acml ztrsv.acml \ + sgeev.acml dgeev.acml cgeev.acml zgeev.acml \ + sgesv.acml dgesv.acml cgesv.acml zgesv.acml \ + sgetri.acml dgetri.acml cgetri.acml zgetri.acml \ + spotrf.acml dpotrf.acml cpotrf.acml zpotrf.acml \ + ssymm.acml dsymm.acml csymm.acml zsymm.acml \ + saxpby.acml daxpby.acml caxpby.acml zaxpby.acml + +atlas :: slinpack.atlas dlinpack.atlas clinpack.atlas zlinpack.atlas \ + scholesky.atlas dcholesky.atlas ccholesky.atlas zcholesky.atlas \ + sgemm.atlas dgemm.atlas cgemm.atlas zgemm.atlas \ + strmm.atlas dtrmm.atlas ctrmm.atlas ztrmm.atlas \ + strsm.atlas dtrsm.atlas ctrsm.atlas ztrsm.atlas \ + sspr.atlas dspr.atlas \ + sspr2.atlas dspr2.atlas \ + ssyr.atlas dsyr.atlas \ + ssyr2.atlas dsyr2.atlas \ + ssyrk.atlas dsyrk.atlas csyrk.atlas zsyrk.atlas \ + ssyr2k.atlas dsyr2k.atlas csyr2k.atlas zsyr2k.atlas \ + sger.atlas dger.atlas cger.atlas zger.atlas\ + sdot.atlas ddot.atlas \ + srot.atlas drot.atlas csrot.atlas zdrot.atlas \ + srotm.atlas drotm.atlas \ + saxpy.atlas daxpy.atlas caxpy.atlas zaxpy.atlas \ + scopy.atlas dcopy.atlas ccopy.atlas zcopy.atlas \ + sswap.atlas dswap.atlas cswap.atlas zswap.atlas \ + sscal.atlas dscal.atlas cscal.atlas zscal.atlas \ + sasum.atlas dasum.atlas casum.atlas zasum.atlas \ + ssymv.atlas dsymv.atlas csymv.atlas zsymv.atlas \ + chemv.atlas zhemv.atlas \ + chbmv.atlas zhbmv.atlas \ + chpmv.atlas zhpmv.atlas \ + chemm.acml zhemm.acml \ + chemm.atlas zhemm.atlas \ + cherk.atlas zherk.atlas \ + cher2k.atlas zher2k.atlas \ + cher.atlas zher.atlas \ + cher2.atlas zher2.atlas \ + sgemv.atlas dgemv.atlas cgemv.atlas zgemv.atlas \ + sspmv.atlas dspmv.atlas \ + strmv.atlas dtrmv.atlas ctrmv.atlas ztrmv.atlas \ + stpmv.atlas dtpmv.atlas ctpmv.atlas ztpmv.atlas \ + stpsv.atlas dtpsv.atlas ctpsv.atlas ztpsv.atlas \ + strsv.atlas dtrsv.atlas ctrsv.atlas ztrsv.atlas \ + sgeev.atlas dgeev.atlas cgeev.atlas zgeev.atlas \ + sgesv.atlas dgesv.atlas cgesv.atlas zgesv.atlas \ + sgetri.atlas dgetri.atlas cgetri.atlas zgetri.atlas \ + spotrf.atlas dpotrf.atlas cpotrf.atlas zpotrf.atlas \ + ssymm.atlas dsymm.atlas csymm.atlas zsymm.atlas \ + saxpby.atlas daxpby.atlas caxpby.atlas zaxpby.atlas + +mkl :: slinpack.mkl dlinpack.mkl clinpack.mkl zlinpack.mkl \ + scholesky.mkl dcholesky.mkl ccholesky.mkl zcholesky.mkl \ + sgemm.mkl dgemm.mkl cgemm.mkl zgemm.mkl \ + strmm.mkl dtrmm.mkl ctrmm.mkl ztrmm.mkl \ + strsm.mkl dtrsm.mkl ctrsm.mkl ztrsm.mkl \ + sspr.mkl dspr.mkl \ + sspr2.mkl dspr2.mkl \ + ssyr.mkl dsyr.mkl \ + ssyr2.mkl dsyr2.mkl \ + ssyrk.mkl dsyrk.mkl csyrk.mkl zsyrk.mkl \ + ssyr2k.mkl dsyr2k.mkl csyr2k.mkl zsyr2k.mkl \ + sger.mkl dger.mkl cger.mkl zger.mkl \ + sdot.mkl ddot.mkl \ + srot.mkl drot.mkl csrot.mkl zdrot.mkl \ + srotm.mkl drotm.mkl \ + saxpy.mkl daxpy.mkl caxpy.mkl zaxpy.mkl \ + scopy.mkl dcopy.mkl ccopy.mkl zcopy.mkl \ + sswap.mkl dswap.mkl cswap.mkl zswap.mkl \ + sscal.mkl dscal.mkl cscal.mkl zscal.mkl \ + sasum.mkl dasum.mkl casum.mkl zasum.mkl \ + ssymv.mkl dsymv.mkl csymv.mkl zsymv.mkl \ + chemv.mkl zhemv.mkl \ + chbmv.mkl zhbmv.mkl \ + chpmv.mkl zhpmv.mkl \ + chemm.mkl zhemm.mkl \ + cherk.mkl zherk.mkl \ + cher2k.mkl zher2k.mkl \ + cher.mkl zher.mkl \ + cher2.mkl zher2.mkl \ + sgemv.mkl dgemv.mkl cgemv.mkl zgemv.mkl \ + strmv.mkl dtrmv.mkl ctrmv.mkl ztrmv.mkl \ + stpmv.mkl dtpmv.mkl ctpmv.mkl ztpmv.mkl \ + stpsv.mkl dtpsv.mkl ctpsv.mkl ztpsv.mkl \ + strsv.mkl dtrsv.mkl ctrsv.mkl ztrsv.mkl \ + sgeev.mkl dgeev.mkl cgeev.mkl zgeev.mkl \ + sgesv.mkl dgesv.mkl cgesv.mkl zgesv.mkl \ + sgetri.mkl dgetri.mkl cgetri.mkl zgetri.mkl \ + spotrf.mkl dpotrf.mkl cpotrf.mkl zpotrf.mkl \ + ssymm.mkl dsymm.mkl csymm.mkl zsymm.mkl \ + saxpby.mkl daxpby.mkl caxpby.mkl zaxpby.mkl + +else + +goto :: sgemm.goto dgemm.goto cgemm.goto zgemm.goto \ + strmm.goto dtrmm.goto ctrmm.goto ztrmm.goto \ + strsm.goto dtrsm.goto ctrsm.goto ztrsm.goto \ + sspr.goto dspr.goto \ + sspr2.goto dspr2.goto \ + ssyr.goto dsyr.goto \ + ssyr2.goto dsyr2.goto \ + ssyrk.goto dsyrk.goto csyrk.goto zsyrk.goto \ + ssyr2k.goto dsyr2k.goto csyr2k.goto zsyr2k.goto \ + sger.goto dger.goto cger.goto zger.goto \ + sdot.goto ddot.goto cdot.goto zdot.goto \ + srot.goto drot.goto csrot.goto zdrot.goto \ + srotm.goto drotm.goto \ + saxpy.goto daxpy.goto caxpy.goto zaxpy.goto \ + scopy.goto dcopy.goto ccopy.goto zcopy.goto \ + sswap.goto dswap.goto cswap.goto zswap.goto \ + sscal.goto dscal.goto cscal.goto zscal.goto \ + sasum.goto dasum.goto casum.goto zasum.goto \ + ssymv.goto dsymv.goto \ + chemv.goto zhemv.goto \ + chbmv.goto zhbmv.goto \ + chpmv.goto zhpmv.goto \ + chemm.goto zhemm.goto \ + cherk.goto zherk.goto \ + cher2k.goto zher2k.goto \ + cher.goto zher.goto \ + cher2.goto zher2.goto \ + sgemv.goto dgemv.goto cgemv.goto zgemv.goto \ + sspmv.goto dspmv.goto \ + strmv.goto dtrmv.goto ctrmv.goto ztrmv.goto \ + stpmv.goto dtpmv.goto ctpmv.goto ztpmv.goto \ + stpsv.goto dtpsv.goto ctpsv.goto ztpsv.goto \ + strsv.goto dtrsv.goto ctrsv.goto ztrsv.goto \ + ssymm.goto dsymm.goto csymm.goto zsymm.goto \ + smallscaling \ + isamax.goto idamax.goto icamax.goto izamax.goto \ + ismax.goto idmax.goto \ + isamin.goto idamin.goto icamin.goto izamin.goto \ + ismin.goto idmin.goto \ + samax.goto damax.goto camax.goto zamax.goto \ + smax.goto dmax.goto \ + samin.goto damin.goto camin.goto zamin.goto \ + smin.goto dmin.goto \ + saxpby.goto daxpby.goto caxpby.goto zaxpby.goto \ + snrm2.goto dnrm2.goto scnrm2.goto dznrm2.goto $(GOTO_LAPACK_TARGETS) $(GOTO_HALF_TARGETS) + +acml :: slinpack.acml dlinpack.acml clinpack.acml zlinpack.acml \ + scholesky.acml dcholesky.acml ccholesky.acml zcholesky.acml \ + sgemm.acml dgemm.acml cgemm.acml zgemm.acml \ + strmm.acml dtrmm.acml ctrmm.acml ztrmm.acml \ + strsm.acml dtrsm.acml ctrsm.acml ztrsm.acml \ + sspr.acml dspr.acml \ + sspr2.acml dspr2.acml \ + ssyr.acml dsyr.acml \ + ssyr2.acml dsyr2.acml \ + ssyrk.acml dsyrk.acml csyrk.acml zsyrk.acml \ + ssyr2k.acml dsyr2k.acml csyr2k.acml zsyr2k.acml \ + sger.acml dger.acml cger.acml zger.acml \ + sdot.acml ddot.acml \ + srot.acml drot.acml csrot.acml zdrot.acml \ + srotm.acml drotm.acml \ + saxpy.acml daxpy.acml caxpy.acml zaxpy.acml \ + scopy.acml dcopy.acml ccopy.acml zcopy.acml \ + sswap.acml dswap.acml cswap.acml zswap.acml \ + sscal.acml dscal.acml cscal.acml zscal.acml \ + sasum.acml dasum.acml casum.acml zasum.acml \ + ssymv.acml dsymv.acml csymv.acml zsymv.acml \ + chemv.acml zhemv.acml \ + chbmv.acml zhbmv.acml \ + chpmv.acml zhpmv.acml \ + chemm.acml zhemm.acml \ + cherk.acml zherk.acml \ + cher2k.acml zher2k.acml \ + cher.acml zher.acml \ + cher2.acml zher2.acml \ + sgemv.acml dgemv.acml cgemv.acml zgemv.acml \ + strmv.acml dtrmv.acml ctrmv.acml ztrmv.acml \ + stpmv.acml dtpmv.acml ctpmv.acml ztpmv.acml \ + stpsv.acml dtpsv.acml ctpsv.acml ztpsv.acml \ + strsv.acml dtrsv.acml ctrsv.acml ztrsv.acml \ + sgeev.acml dgeev.acml cgeev.acml zgeev.acml \ + sgesv.acml dgesv.acml cgesv.acml zgesv.acml \ + sgetri.acml dgetri.acml cgetri.acml zgetri.acml \ + spotrf.acml dpotrf.acml cpotrf.acml zpotrf.acml \ + ssymm.acml dsymm.acml csymm.acml zsymm.acml \ + saxpby.acml daxpby.acml caxpby.acml zaxpby.acml + +atlas :: slinpack.atlas dlinpack.atlas clinpack.atlas zlinpack.atlas \ + scholesky.atlas dcholesky.atlas ccholesky.atlas zcholesky.atlas \ + sgemm.atlas dgemm.atlas cgemm.atlas zgemm.atlas \ + strmm.atlas dtrmm.atlas ctrmm.atlas ztrmm.atlas \ + strsm.atlas dtrsm.atlas ctrsm.atlas ztrsm.atlas \ + sspr.atlas dspr.atlas \ + sspr2.atlas dspr2.atlas \ + ssyr.atlas dsyr.atlas \ + ssyr2.atlas dsyr2.atlas \ + ssyrk.atlas dsyrk.atlas csyrk.atlas zsyrk.atlas \ + ssyr2k.atlas dsyr2k.atlas csyr2k.atlas zsyr2k.atlas \ + sger.atlas dger.atlas cger.atlas zger.atlas\ + sdot.atlas ddot.atlas \ + srot.atlas drot.atlas csrot.atlas zdrot.atlas \ + srotm.atlas drotm.atlas \ + saxpy.atlas daxpy.atlas caxpy.atlas zaxpy.atlas \ + scopy.atlas dcopy.atlas ccopy.atlas zcopy.atlas \ + sswap.atlas dswap.atlas cswap.atlas zswap.atlas \ + sscal.atlas dscal.atlas cscal.atlas zscal.atlas \ + sasum.atlas dasum.atlas casum.atlas zasum.atlas \ + ssymv.atlas dsymv.atlas csymv.atlas zsymv.atlas \ + chemv.atlas zhemv.atlas \ + chbmv.atlas zhbmv.atlas \ + chpmv.atlas zhpmv.atlas \ + chemm.acml zhemm.acml \ + chemm.atlas zhemm.atlas \ + cherk.atlas zherk.atlas \ + cher2k.atlas zher2k.atlas \ + cher.atlas zher.atlas \ + cher2.atlas zher2.atlas \ + sgemv.atlas dgemv.atlas cgemv.atlas zgemv.atlas \ + sspmv.atlas dspmv.atlas \ + strmv.atlas dtrmv.atlas ctrmv.atlas ztrmv.atlas \ + stpmv.atlas dtpmv.atlas ctpmv.atlas ztpmv.atlas \ + stpsv.atlas dtpsv.atlas ctpsv.atlas ztpsv.atlas \ + strsv.atlas dtrsv.atlas ctrsv.atlas ztrsv.atlas \ + sgeev.atlas dgeev.atlas cgeev.atlas zgeev.atlas \ + sgesv.atlas dgesv.atlas cgesv.atlas zgesv.atlas \ + sgetri.atlas dgetri.atlas cgetri.atlas zgetri.atlas \ + spotrf.atlas dpotrf.atlas cpotrf.atlas zpotrf.atlas \ + ssymm.atlas dsymm.atlas csymm.atlas zsymm.atlas \ + isamax.atlas idamax.atlas icamax.atlas izamax.atlas \ + snrm2.atlas dnrm2.atlas scnrm2.atlas dznrm2.atlas \ + saxpby.atlas daxpby.atlas caxpby.atlas zaxpby.atlas + +mkl :: slinpack.mkl dlinpack.mkl clinpack.mkl zlinpack.mkl \ + scholesky.mkl dcholesky.mkl ccholesky.mkl zcholesky.mkl \ + sgemm.mkl dgemm.mkl cgemm.mkl zgemm.mkl \ + strmm.mkl dtrmm.mkl ctrmm.mkl ztrmm.mkl \ + strsm.mkl dtrsm.mkl ctrsm.mkl ztrsm.mkl \ + sspr.mkl dspr.mkl \ + sspr2.mkl dspr2.mkl \ + ssyr.mkl dsyr.mkl \ + ssyr2.mkl dsyr2.mkl \ + ssyrk.mkl dsyrk.mkl csyrk.mkl zsyrk.mkl \ + ssyr2k.mkl dsyr2k.mkl csyr2k.mkl zsyr2k.mkl \ + sger.mkl dger.mkl cger.mkl zger.mkl \ + sdot.mkl ddot.mkl cdot.mkl zdot.mkl \ + srot.atlas drot.atlas csrot.atlas zdrot.atlas \ + srotm.atlas drotm.atlas \ + saxpy.mkl daxpy.mkl caxpy.mkl zaxpy.mkl \ + scopy.mkl dcopy.mkl ccopy.mkl zcopy.mkl \ + sswap.mkl dswap.mkl cswap.mkl zswap.mkl \ + sscal.mkl dscal.mkl cscal.mkl zscal.mkl \ + sasum.mkl dasum.mkl casum.mkl zasum.mkl \ + ssymv.mkl dsymv.mkl csymv.mkl zsymv.mkl \ + chemv.mkl zhemv.mkl \ + chbmv.mkl zhbmv.mkl \ + chpmv.mkl zhpmv.mkl \ + chemm.mkl zhemm.mkl \ + cherk.mkl zherk.mkl \ + cher2k.mkl zher2k.mkl \ + cher.mkl zher.mkl \ + cher2.mkl zher2.mkl \ + sgemv.mkl dgemv.mkl cgemv.mkl zgemv.mkl \ + strmv.mkl dtrmv.mkl ctrmv.mkl ztrmv.mkl \ + stpmv.mkl dtpmv.mkl ctpmv.mkl ztpmv.mkl \ + stpsv.mkl dtpsv.mkl ctpsv.mkl ztpsv.mkl \ + strsv.mkl dtrsv.mkl ctrsv.mkl ztrsv.mkl \ + sgeev.mkl dgeev.mkl cgeev.mkl zgeev.mkl \ + sgesv.mkl dgesv.mkl cgesv.mkl zgesv.mkl \ + sgetri.mkl dgetri.mkl cgetri.mkl zgetri.mkl \ + spotrf.mkl dpotrf.mkl cpotrf.mkl zpotrf.mkl \ + ssymm.mkl dsymm.mkl csymm.mkl zsymm.mkl \ + saxpby.mkl daxpby.mkl caxpby.mkl zaxpby.mkl + + + + +endif + +essl :: sgemm.essl strmm.essl dgemm.essl dtrmm.essl \ + cgemm.essl ctrmm.essl zgemm.essl ztrmm.essl \ + slinpack.essl clinpack.essl dlinpack.essl zlinpack.essl \ + scholesky.essl ccholesky.essl dcholesky.essl zcholesky.essl \ + strsm.essl dtrsm.essl ctrsm.essl ztrsm.essl + +veclib :: slinpack.veclib dlinpack.veclib clinpack.veclib zlinpack.veclib \ + scholesky.veclib dcholesky.veclib ccholesky.veclib zcholesky.veclib \ + sgemm.veclib dgemm.veclib cgemm.veclib zgemm.veclib \ + strmm.veclib dtrmm.veclib ctrmm.veclib ztrmm.veclib \ + strsm.veclib dtrsm.veclib ctrsm.veclib ztrsm.veclib \ + sspr.veclib dspr.veclib \ + sspr2.veclib dspr2.veclib \ + ssyr.veclib dsyr.veclib \ + ssyr2.veclib dsyr2.veclib \ + ssyrk.veclib dsyrk.veclib csyrk.veclib zsyrk.veclib \ + ssyr2k.veclib dsyr2k.veclib csyr2k.veclib zsyr2k.veclib \ + sger.veclib dger.veclib cger.veclib zger.veclib \ + sdot.veclib ddot.veclib cdot.veclib zdot.veclib \ + srot.veclib drot.veclib csrot.veclib zdrot.veclib \ + srotm.veclib drotm.veclib \ + saxpy.veclib daxpy.veclib caxpy.veclib zaxpy.veclib \ + scopy.veclib dcopy.veclib ccopy.veclib zcopy.veclib \ + sswap.veclib dswap.veclib cswap.veclib zswap.veclib \ + sscal.veclib dscal.veclib cscal.veclib zscal.veclib \ + sasum.veclib dasum.veclib casum.veclib zasum.veclib \ + ssymv.veclib dsymv.veclib csymv.veclib zsymv.veclib \ + chemv.veclib zhemv.veclib \ + chbmv.veclib zhbmv.veclib \ + chpmv.veclib zhpmv.veclib \ + chemm.veclib zhemm.veclib \ + cherk.veclib zherk.veclib \ + cher2k.veclib zher2k.veclib \ + cher.veclib zher.veclib \ + cher2.veclib zher2.veclib \ + sgemv.veclib dgemv.veclib cgemv.veclib zgemv.veclib \ + strmv.veclib dtrmv.veclib ctrmv.veclib ztrmv.veclib \ + stpmv.veclib dtpmv.veclib ctpmv.veclib ztpmv.veclib \ + stpsv.veclib dtpsv.veclib ctpsv.veclib ztpsv.veclib \ + strsv.veclib dtrsv.veclib ctrsv.veclib ztrsv.veclib \ + sgeev.veclib dgeev.veclib cgeev.veclib zgeev.veclib \ + sgesv.veclib dgesv.veclib cgesv.veclib zgesv.veclib \ + sgetri.veclib dgetri.veclib cgetri.veclib zgetri.veclib \ + spotrf.veclib dpotrf.veclib cpotrf.veclib zpotrf.veclib \ + ssymm.veclib dsymm.veclib csymm.veclib zsymm.veclib \ + saxpby.veclib daxpby.veclib caxpby.veclib zaxpby.veclib + +goto_3m :: cgemm3m.goto zgemm3m.goto + +mkl_3m :: cgemm3m.mkl zgemm3m.mkl + +all :: goto mkl atlas acml veclib + +exe : + @./Make_exe.sh + +##################################### Slinpack #################################################### +slinpack.goto : slinpack.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +slinpack.acml : slinpack.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +slinpack.atlas : slinpack.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +slinpack.mkl : slinpack.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +slinpack.veclib : slinpack.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +slinpack.essl : slinpack.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBESSL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Dlinpack #################################################### +dlinpack.goto : dlinpack.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +dlinpack.acml : dlinpack.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dlinpack.atlas : dlinpack.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dlinpack.mkl : dlinpack.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dlinpack.veclib : dlinpack.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dlinpack.essl : dlinpack.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBESSL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Clinpack #################################################### + +clinpack.goto : clinpack.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +clinpack.acml : clinpack.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +clinpack.atlas : clinpack.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +clinpack.mkl : clinpack.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +clinpack.veclib : clinpack.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +clinpack.essl : clinpack.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBESSL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Zlinpack #################################################### + +zlinpack.goto : zlinpack.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +zlinpack.acml : zlinpack.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zlinpack.atlas : zlinpack.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zlinpack.mkl : zlinpack.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zlinpack.veclib : zlinpack.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zlinpack.essl : zlinpack.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBESSL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Scholesky ################################################### + +scholesky.goto : scholesky.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +scholesky.acml : scholesky.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +scholesky.atlas : scholesky.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +scholesky.mkl : scholesky.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +scholesky.veclib : scholesky.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +scholesky.essl : scholesky.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBESSL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Dcholesky ################################################### + +dcholesky.goto : dcholesky.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +dcholesky.acml : dcholesky.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dcholesky.atlas : dcholesky.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dcholesky.mkl : dcholesky.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dcholesky.veclib : dcholesky.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dcholesky.essl : dcholesky.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBESSL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Ccholesky ################################################### + +ccholesky.goto : ccholesky.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +ccholesky.acml : ccholesky.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ccholesky.atlas : ccholesky.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ccholesky.mkl : ccholesky.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ccholesky.veclib : ccholesky.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ccholesky.essl : ccholesky.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBESSL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + + +##################################### Zcholesky ################################################### + +zcholesky.goto : zcholesky.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +zcholesky.acml : zcholesky.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zcholesky.atlas : zcholesky.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zcholesky.mkl : zcholesky.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zcholesky.veclib : zcholesky.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zcholesky.essl : zcholesky.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBESSL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Sgemm #################################################### +ifeq ($(BUILD_BFLOAT16),1) +sbgemm.goto : sbgemm.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm +endif + +sgemm.goto : sgemm.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +sgemm.acml : sgemm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +sgemm.atlas : sgemm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +sgemm.mkl : sgemm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +sgemm.veclib : sgemm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +sgemm.essl : sgemm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBESSL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Dgemm #################################################### +dgemm.goto : dgemm.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +dgemm.acml : dgemm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dgemm.atlas : dgemm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dgemm.mkl : dgemm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dgemm.veclib : dgemm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dgemm.essl : dgemm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBESSL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Cgemm #################################################### + +cgemm.goto : cgemm.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +cgemm.acml : cgemm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +cgemm.atlas : cgemm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +cgemm.mkl : cgemm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +cgemm.veclib : cgemm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +cgemm.essl : cgemm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBESSL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Zgemm #################################################### + +zgemm.goto : zgemm.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +zgemm.acml : zgemm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zgemm.atlas : zgemm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zgemm.mkl : zgemm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zgemm.veclib : zgemm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zgemm.essl : zgemm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBESSL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Ssymm #################################################### +ssymm.goto : ssymm.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +ssymm.acml : ssymm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ssymm.atlas : ssymm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ssymm.mkl : ssymm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ssymm.veclib : ssymm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Dsymm #################################################### +dsymm.goto : dsymm.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +dsymm.acml : dsymm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dsymm.atlas : dsymm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dsymm.mkl : dsymm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dsymm.veclib : dsymm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Csymm #################################################### + +csymm.goto : csymm.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +csymm.acml : csymm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +csymm.atlas : csymm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +csymm.mkl : csymm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +csymm.veclib : csymm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Zsymm #################################################### + +zsymm.goto : zsymm.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +zsymm.acml : zsymm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zsymm.atlas : zsymm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zsymm.mkl : zsymm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zsymm.veclib : zsymm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Strmm #################################################### +strmm.goto : strmm.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +strmm.acml : strmm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +strmm.atlas : strmm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +strmm.mkl : strmm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +strmm.veclib : strmm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +strmm.essl : strmm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBESSL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Dtrmm #################################################### +dtrmm.goto : dtrmm.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +dtrmm.acml : dtrmm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dtrmm.atlas : dtrmm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dtrmm.mkl : dtrmm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dtrmm.veclib : dtrmm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dtrmm.essl : dtrmm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBESSL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Ctrmm #################################################### + +ctrmm.goto : ctrmm.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +ctrmm.acml : ctrmm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ctrmm.atlas : ctrmm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ctrmm.mkl : ctrmm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ctrmm.veclib : ctrmm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ctrmm.essl : ctrmm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBESSL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Ztrmm #################################################### + +ztrmm.goto : ztrmm.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +ztrmm.acml : ztrmm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ztrmm.atlas : ztrmm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ztrmm.mkl : ztrmm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ztrmm.veclib : ztrmm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ztrmm.essl : ztrmm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBESSL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Strsm #################################################### +strsm.goto : strsm.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +strsm.acml : strsm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +strsm.atlas : strsm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +strsm.mkl : strsm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +strsm.veclib : strsm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +strsm.essl : strsm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBESSL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Dtrsm #################################################### +dtrsm.goto : dtrsm.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +dtrsm.acml : dtrsm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dtrsm.atlas : dtrsm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dtrsm.mkl : dtrsm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dtrsm.veclib : dtrsm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dtrsm.essl : dtrsm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBESSL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Ctrsm #################################################### + +ctrsm.goto : ctrsm.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +ctrsm.acml : ctrsm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ctrsm.atlas : ctrsm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ctrsm.mkl : ctrsm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ctrsm.veclib : ctrsm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ctrsm.essl : ctrsm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBESSL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Ztrsm #################################################### + +ztrsm.goto : ztrsm.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +ztrsm.acml : ztrsm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ztrsm.atlas : ztrsm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ztrsm.mkl : ztrsm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ztrsm.veclib : ztrsm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ztrsm.essl : ztrsm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBESSL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) +##################################### Ssyr #################################################### +ssyr.goto : ssyr.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +ssyr.acml : ssyr.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ssyr.atlas : ssyr.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ssyr.mkl : ssyr.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ssyr.veclib : ssyr.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) +##################################### Dsyr #################################################### +dsyr.goto : dsyr.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +dsyr.acml : dsyr.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dsyr.atlas : dsyr.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dsyr.mkl : dsyr.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dsyr.veclib : dsyr.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Sspr #################################################### +sspr.goto : sspr.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +sspr.acml : sspr.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +sspr.atlas : sspr.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +sspr.mkl : sspr.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +sspr.veclib : sspr.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Dspr #################################################### +dspr.goto : dspr.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +dspr.acml : dspr.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dspr.atlas : dspr.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dspr.mkl : dspr.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dspr.veclib : dspr.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Sspr2 #################################################### +sspr2.goto : sspr2.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +sspr2.acml : sspr2.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +sspr2.atlas : sspr2.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +sspr2.mkl : sspr2.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +sspr2.veclib : sspr2.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Dspr2 #################################################### +dspr2.goto : dspr2.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +dspr2.acml : dspr2.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dspr2.atlas : dspr2.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dspr2.mkl : dspr2.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dspr2.veclib : dspr2.$(SUFFIX) + +##################################### Ssyr2 #################################################### +ssyr2.goto : ssyr2.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +ssyr2.acml : ssyr2.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ssyr2.atlas : ssyr2.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ssyr2.mkl : ssyr2.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ssyr2.veclib : ssyr2.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) +##################################### Dsyr2 #################################################### +dsyr2.goto : dsyr2.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +dsyr2.acml : dsyr2.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dsyr2.atlas : dsyr2.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dsyr2.mkl : dsyr2.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dsyr2.veclib : dsyr2.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Ssyrk #################################################### +ssyrk.goto : ssyrk.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +ssyrk.acml : ssyrk.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ssyrk.atlas : ssyrk.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ssyrk.mkl : ssyrk.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ssyrk.veclib : ssyrk.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Dsyrk #################################################### +dsyrk.goto : dsyrk.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +dsyrk.acml : dsyrk.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dsyrk.atlas : dsyrk.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dsyrk.mkl : dsyrk.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dsyrk.veclib : dsyrk.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Csyrk #################################################### + +csyrk.goto : csyrk.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +csyrk.acml : csyrk.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +csyrk.atlas : csyrk.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +csyrk.mkl : csyrk.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +csyrk.veclib : csyrk.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Zsyrk #################################################### + +zsyrk.goto : zsyrk.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +zsyrk.acml : zsyrk.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zsyrk.atlas : zsyrk.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zsyrk.mkl : zsyrk.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zsyrk.veclib : zsyrk.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Ssyr2k #################################################### +ssyr2k.goto : ssyr2k.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +ssyr2k.acml : ssyr2k.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ssyr2k.atlas : ssyr2k.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ssyr2k.mkl : ssyr2k.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ssyr2k.veclib : ssyr2k.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Dsyr2k #################################################### +dsyr2k.goto : dsyr2k.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +dsyr2k.acml : dsyr2k.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dsyr2k.atlas : dsyr2k.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dsyr2k.mkl : dsyr2k.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dsyr2k.veclib : dsyr2k.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Csyr2k #################################################### + +csyr2k.goto : csyr2k.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +csyr2k.acml : csyr2k.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +csyr2k.atlas : csyr2k.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +csyr2k.mkl : csyr2k.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +csyr2k.veclib : csyr2k.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Zsyr2k #################################################### + +zsyr2k.goto : zsyr2k.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +zsyr2k.acml : zsyr2k.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zsyr2k.atlas : zsyr2k.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zsyr2k.mkl : zsyr2k.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zsyr2k.veclib : zsyr2k.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Chemm #################################################### + +chemm.goto : chemm.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +chemm.acml : chemm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +chemm.atlas : chemm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +chemm.mkl : chemm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +chemm.veclib : chemm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Zhemm #################################################### + +zhemm.goto : zhemm.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +zhemm.acml : zhemm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zhemm.atlas : zhemm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zhemm.mkl : zhemm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zhemm.veclib : zhemm.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Cherk #################################################### + +cherk.goto : cherk.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +cherk.acml : cherk.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +cherk.atlas : cherk.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +cherk.mkl : cherk.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +cherk.veclib : cherk.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Zherk #################################################### + +zherk.goto : zherk.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +zherk.acml : zherk.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zherk.atlas : zherk.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zherk.mkl : zherk.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zherk.veclib : zherk.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Cher2k #################################################### + +cher2k.goto : cher2k.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +cher2k.acml : cher2k.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +cher2k.atlas : cher2k.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +cher2k.mkl : cher2k.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +cher2k.veclib : cher2k.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Zher2k #################################################### + +zher2k.goto : zher2k.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +zher2k.acml : zher2k.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zher2k.atlas : zher2k.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zher2k.mkl : zher2k.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zher2k.veclib : zher2k.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Cher #################################################### + +cher.goto : cher.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +cher.acml : cher.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +cher.atlas : cher.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +cher.mkl : cher.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +cher.veclib : cher.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Zher #################################################### + +zher.goto : zher.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +zher.acml : zher.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zher.atlas : zher.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zher.mkl : zher.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zher.veclib : zher.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Cher2 #################################################### + +cher2.goto : cher2.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +cher2.acml : cher2.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +cher2.atlas : cher2.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +cher2.mkl : cher2.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +cher2.veclib : cher2.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Zher2 #################################################### + +zher2.goto : zher2.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +zher2.acml : zher2.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zher2.atlas : zher2.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zher2.mkl : zher2.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zher2.veclib : zher2.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Sgemv #################################################### +sgemv.goto : sgemv.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +sgemv.acml : sgemv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +sgemv.atlas : sgemv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +sgemv.mkl : sgemv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +sgemv.veclib : sgemv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Dgemv #################################################### +dgemv.goto : dgemv.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +dgemv.acml : dgemv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dgemv.atlas : dgemv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dgemv.mkl : dgemv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dgemv.veclib : dgemv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Cgemv #################################################### + +cgemv.goto : cgemv.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +cgemv.acml : cgemv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +cgemv.atlas : cgemv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +cgemv.mkl : cgemv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +cgemv.veclib : cgemv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Zgemv #################################################### + +zgemv.goto : zgemv.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +zgemv.acml : zgemv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zgemv.atlas : zgemv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zgemv.mkl : zgemv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zgemv.veclib : zgemv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Sspmv #################################################### +sspmv.goto : sspmv.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +sspmv.atlas : sspmv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Dspmv #################################################### +dspmv.goto : dspmv.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +dspmv.atlas : dspmv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Strmv #################################################### +strmv.goto : strmv.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +strmv.acml : strmv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +strmv.atlas : strmv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +strmv.mkl : strmv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +strmv.veclib : strmv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Dtrmv #################################################### +dtrmv.goto : dtrmv.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +dtrmv.acml : dtrmv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dtrmv.atlas : dtrmv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dtrmv.mkl : dtrmv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dtrmv.veclib : dtrmv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Ctrmv #################################################### + +ctrmv.goto : ctrmv.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +ctrmv.acml : ctrmv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ctrmv.atlas : ctrmv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ctrmv.mkl : ctrmv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ctrmv.veclib : ctrmv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Ztrmv #################################################### + +ztrmv.goto : ztrmv.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +ztrmv.acml : ztrmv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ztrmv.atlas : ztrmv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ztrmv.mkl : ztrmv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ztrmv.veclib : ztrmv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + + +##################################### Stpmv #################################################### +stpmv.goto : stpmv.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +stpmv.acml : stpmv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +stpmv.atlas : stpmv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +stpmv.mkl : stpmv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +stpmv.veclib : stpmv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Dtpmv #################################################### +dtpmv.goto : dtpmv.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +dtpmv.acml : dtpmv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dtpmv.atlas : dtpmv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dtpmv.mkl : dtpmv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dtpmv.veclib : dtpmv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Ctpmv #################################################### + +ctpmv.goto : ctpmv.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +ctpmv.acml : ctpmv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ctpmv.atlas : ctpmv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ctpmv.mkl : ctpmv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ctpmv.veclib : ctpmv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Ztpmv #################################################### + +ztpmv.goto : ztpmv.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +ztpmv.acml : ztpmv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ztpmv.atlas : ztpmv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ztpmv.mkl : ztpmv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ztpmv.veclib : ztpmv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Stpsv #################################################### +stpsv.goto : stpsv.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +stpsv.acml : stpsv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +stpsv.atlas : stpsv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +stpsv.mkl : stpsv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +stpsv.veclib : stpsv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Dtpsv #################################################### +dtpsv.goto : dtpsv.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +dtpsv.acml : dtpsv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dtpsv.atlas : dtpsv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dtpsv.mkl : dtpsv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dtpsv.veclib : dtpsv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Ctpsv #################################################### + +ctpsv.goto : ctpsv.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +ctpsv.acml : ctpsv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ctpsv.atlas : ctpsv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ctpsv.mkl : ctpsv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ctpsv.veclib : ctpsv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Ztpsv #################################################### + +ztpsv.goto : ztpsv.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +ztpsv.acml : ztpsv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ztpsv.atlas : ztpsv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ztpsv.mkl : ztpsv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ztpsv.veclib : ztpsv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Strsv #################################################### +strsv.goto : strsv.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +strsv.acml : strsv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +strsv.atlas : strsv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +strsv.mkl : strsv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +strsv.veclib : strsv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Dtrsv #################################################### +dtrsv.goto : dtrsv.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +dtrsv.acml : dtrsv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dtrsv.atlas : dtrsv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dtrsv.mkl : dtrsv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dtrsv.veclib : dtrsv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Ctrsv #################################################### + +ctrsv.goto : ctrsv.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +ctrsv.acml : ctrsv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ctrsv.atlas : ctrsv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ctrsv.mkl : ctrsv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ctrsv.veclib : ctrsv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Ztrsv #################################################### + +ztrsv.goto : ztrsv.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +ztrsv.acml : ztrsv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ztrsv.atlas : ztrsv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ztrsv.mkl : ztrsv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ztrsv.veclib : ztrsv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Sger #################################################### +sger.goto : sger.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +sger.acml : sger.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +sger.atlas : sger.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +sger.mkl : sger.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +sger.veclib : sger.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Dger #################################################### +dger.goto : dger.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +dger.acml : dger.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dger.atlas : dger.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dger.mkl : dger.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dger.veclib : dger.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Cger #################################################### +cger.goto : cger.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +cger.acml : cger.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +cger.atlas : cger.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +cger.mkl : cger.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +cger.veclib : cger.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Zger #################################################### +zger.goto : zger.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +zger.acml : zger.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zger.atlas : zger.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zger.mkl : zger.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zger.veclib : zger.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Ssymv #################################################### +ssymv.goto : ssymv.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +ssymv.acml : ssymv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ssymv.atlas : ssymv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ssymv.mkl : ssymv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ssymv.veclib : ssymv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Dsymv #################################################### +dsymv.goto : dsymv.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +dsymv.acml : dsymv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dsymv.atlas : dsymv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dsymv.mkl : dsymv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dsymv.veclib : dsymv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Csymv #################################################### +csymv.goto : csymv.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +csymv.acml : csymv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +csymv.atlas : csymv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +csymv.mkl : csymv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +csymv.veclib : csymv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Dsymv #################################################### +zsymv.goto : zsymv.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +zsymv.acml : zsymv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zsymv.atlas : zsymv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zsymv.mkl : zsymv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zsymv.veclib : zsymv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Sgeev #################################################### +sgeev.goto : sgeev.$(SUFFIX) ../$(LIBNAME) + $(FC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +sgeev.acml : sgeev.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +sgeev.atlas : sgeev.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +sgeev.mkl : sgeev.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +sgeev.veclib : sgeev.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Dgeev #################################################### +dgeev.goto : dgeev.$(SUFFIX) ../$(LIBNAME) + $(FC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +dgeev.acml : dgeev.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dgeev.atlas : dgeev.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dgeev.mkl : dgeev.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dgeev.veclib : dgeev.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Cgeev #################################################### + +cgeev.goto : cgeev.$(SUFFIX) ../$(LIBNAME) + $(FC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +cgeev.acml : cgeev.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +cgeev.atlas : cgeev.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +cgeev.mkl : cgeev.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +cgeev.veclib : cgeev.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Zgeev #################################################### + +zgeev.goto : zgeev.$(SUFFIX) ../$(LIBNAME) + $(FC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +zgeev.acml : zgeev.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zgeev.atlas : zgeev.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zgeev.mkl : zgeev.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zgeev.veclib : zgeev.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Sgetri #################################################### +sgetri.goto : sgetri.$(SUFFIX) ../$(LIBNAME) + $(FC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +sgetri.acml : sgetri.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +sgetri.atlas : sgetri.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +sgetri.mkl : sgetri.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +sgetri.veclib : sgetri.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Dgetri #################################################### +dgetri.goto : dgetri.$(SUFFIX) ../$(LIBNAME) + $(FC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +dgetri.acml : dgetri.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dgetri.atlas : dgetri.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dgetri.mkl : dgetri.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dgetri.veclib : dgetri.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Cgetri #################################################### + +cgetri.goto : cgetri.$(SUFFIX) ../$(LIBNAME) + $(FC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +cgetri.acml : cgetri.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +cgetri.atlas : cgetri.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +cgetri.mkl : cgetri.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +cgetri.veclib : cgetri.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Zgetri #################################################### + +zgetri.goto : zgetri.$(SUFFIX) ../$(LIBNAME) + $(FC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +zgetri.acml : zgetri.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zgetri.atlas : zgetri.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zgetri.mkl : zgetri.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zgetri.veclib : zgetri.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Spotrf #################################################### +spotrf.goto : spotrf.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +spotrf.acml : spotrf.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +spotrf.atlas : spotrf.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +spotrf.mkl : spotrf.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +spotrf.veclib : spotrf.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Dpotrf #################################################### +dpotrf.goto : dpotrf.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +dpotrf.acml : dpotrf.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dpotrf.atlas : dpotrf.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dpotrf.mkl : dpotrf.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dpotrf.veclib : dpotrf.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Cpotrf #################################################### + +cpotrf.goto : cpotrf.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +cpotrf.acml : cpotrf.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +cpotrf.atlas : cpotrf.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +cpotrf.mkl : cpotrf.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +cpotrf.veclib : cpotrf.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Zpotrf #################################################### + +zpotrf.goto : zpotrf.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +zpotrf.acml : zpotrf.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zpotrf.atlas : zpotrf.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zpotrf.mkl : zpotrf.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zpotrf.veclib : zpotrf.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Chemv #################################################### + +chemv.goto : chemv.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +chemv.acml : chemv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +chemv.atlas : chemv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +chemv.mkl : chemv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +chemv.veclib : chemv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Zhemv #################################################### + +zhemv.goto : zhemv.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +zhemv.acml : zhemv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zhemv.atlas : zhemv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zhemv.mkl : zhemv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zhemv.veclib : zhemv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) +##################################### Chbmv #################################################### + +chbmv.goto : chbmv.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +chbmv.acml : chbmv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +chbmv.atlas : chbmv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +chbmv.mkl : chbmv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +chbmv.veclib : chbmv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) +##################################### Zhbmv #################################################### + +zhbmv.goto : zhbmv.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +zhbmv.acml : zhbmv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zhbmv.atlas : zhbmv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zhbmv.mkl : zhbmv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zhbmv.veclib : zhbmv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) +##################################### Chpmv #################################################### + +chpmv.goto : chpmv.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +chpmv.acml : chpmv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +chpmv.atlas : chpmv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +chpmv.mkl : chpmv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +chpmv.veclib : chpmv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) +##################################### Zhpmv #################################################### + +zhpmv.goto : zhpmv.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +zhpmv.acml : zhpmv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zhpmv.atlas : zhpmv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zhpmv.mkl : zhpmv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zhpmv.veclib : zhpmv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) +##################################### Sdot #################################################### +sdot.goto : sdot.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +sdot.acml : sdot.$(SUFFIX) + $(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +sdot.atlas : sdot.$(SUFFIX) + $(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +sdot.mkl : sdot.$(SUFFIX) + $(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +sdot.veclib : sdot.$(SUFFIX) + $(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Ddot #################################################### +ddot.goto : ddot.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +ddot.acml : ddot.$(SUFFIX) + $(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ddot.atlas : ddot.$(SUFFIX) + $(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ddot.mkl : ddot.$(SUFFIX) + $(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ddot.veclib : ddot.$(SUFFIX) + $(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Cdot #################################################### +cdot.goto : cdot.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +cdot.acml : cdot.$(SUFFIX) + $(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +cdot.atlas : cdot.$(SUFFIX) + $(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +cdot.mkl : cdot-intel.$(SUFFIX) + $(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +cdot.veclib : cdot-intel.$(SUFFIX) + $(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Zdot #################################################### +zdot.goto : zdot.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +zdot.acml : zdot.$(SUFFIX) + $(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zdot.atlas : zdot.$(SUFFIX) + $(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zdot.mkl : zdot-intel.$(SUFFIX) + $(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zdot.veclib : zdot-intel.$(SUFFIX) + $(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Srot #################################################### +srot.goto : srot.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +srot.acml : srot.$(SUFFIX) + $(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +srot.atlas : srot.$(SUFFIX) + $(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +srot.mkl : srot.$(SUFFIX) + $(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +srot.veclib : srot.$(SUFFIX) + $(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Drot #################################################### +drot.goto : drot.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +drot.acml : drot.$(SUFFIX) + $(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +drot.atlas : drot.$(SUFFIX) + $(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +drot.mkl : drot.$(SUFFIX) + $(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +drot.veclib : drot.$(SUFFIX) + $(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### csrot #################################################### +csrot.goto : csrot.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +csrot.acml : csrot.$(SUFFIX) + $(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +csrot.atlas : csrot.$(SUFFIX) + $(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +csrot.mkl : csrot.$(SUFFIX) + $(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +csrot.veclib : csrot.$(SUFFIX) + $(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### zdrot #################################################### +zdrot.goto : zdrot.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +zdrot.acml : zdrot.$(SUFFIX) + $(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zdrot.atlas : zdrot.$(SUFFIX) + $(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zdrot.mkl : zdrot.$(SUFFIX) + $(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zdrot.veclib : zdrot.$(SUFFIX) + $(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### srotm #################################################### +srotm.goto : srotm.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +srotm.acml : srotm.$(SUFFIX) + $(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +srotm.atlas : srotm.$(SUFFIX) + $(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +srotm.mkl : srotm.$(SUFFIX) + $(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +srotm.veclib : srotm.$(SUFFIX) + $(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### drotm #################################################### +drotm.goto : drotm.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +drotm.acml : drotm.$(SUFFIX) + $(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +drotm.atlas : drotm.$(SUFFIX) + $(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +drotm.mkl : drotm.$(SUFFIX) + $(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +drotm.veclib : drotm.$(SUFFIX) + $(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Saxpy #################################################### +saxpy.goto : saxpy.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +saxpy.acml : saxpy.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +saxpy.atlas : saxpy.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +saxpy.mkl : saxpy.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +saxpy.veclib : saxpy.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Daxpy #################################################### +daxpy.goto : daxpy.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +daxpy.acml : daxpy.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +daxpy.atlas : daxpy.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +daxpy.mkl : daxpy.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +daxpy.veclib : daxpy.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Caxpy #################################################### + +caxpy.goto : caxpy.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +caxpy.acml : caxpy.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +caxpy.atlas : caxpy.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +caxpy.mkl : caxpy.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +caxpy.veclib : caxpy.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Zaxpy #################################################### + +zaxpy.goto : zaxpy.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +zaxpy.acml : zaxpy.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zaxpy.atlas : zaxpy.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zaxpy.mkl : zaxpy.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zaxpy.veclib : zaxpy.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Saxpby #################################################### +saxpby.goto : saxpby.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +saxpby.acml : saxpby.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +saxpby.atlas : saxpby.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +saxpby.mkl : saxpby.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +saxpby.veclib : saxpby.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Daxpby #################################################### +daxpby.goto : daxpby.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +daxpby.acml : daxpby.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +daxpby.atlas : daxpby.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +daxpby.mkl : daxpby.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +daxpby.veclib : daxpby.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Caxpby #################################################### + +caxpby.goto : caxpby.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +caxpby.acml : caxpby.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +caxpby.atlas : caxpby.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +caxpby.mkl : caxpby.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +caxpby.veclib : caxpby.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Zaxpby #################################################### + +zaxpby.goto : zaxpby.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +zaxpby.acml : zaxpby.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zaxpby.atlas : zaxpby.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zaxpby.mkl : zaxpby.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zaxpby.veclib : zaxpby.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Scopy #################################################### +scopy.goto : scopy.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +scopy.acml : scopy.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +scopy.atlas : scopy.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +scopy.mkl : scopy.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +scopy.veclib : scopy.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Dcopy #################################################### +dcopy.goto : dcopy.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +dcopy.acml : dcopy.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dcopy.atlas : dcopy.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dcopy.mkl : dcopy.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dcopy.veclib : dcopy.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Ccopy #################################################### + +ccopy.goto : ccopy.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +ccopy.acml : ccopy.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ccopy.atlas : ccopy.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ccopy.mkl : ccopy.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +ccopy.veclib : ccopy.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Zcopy #################################################### + +zcopy.goto : zcopy.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +zcopy.acml : zcopy.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zcopy.atlas : zcopy.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zcopy.mkl : zcopy.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zcopy.veclib : zcopy.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Sscal #################################################### +sscal.goto : sscal.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +sscal.acml : sscal.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +sscal.atlas : sscal.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +sscal.mkl : sscal.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +sscal.veclib : sscal.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Dscal #################################################### +dscal.goto : dscal.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +dscal.acml : dscal.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dscal.atlas : dscal.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dscal.mkl : dscal.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dscal.veclib : dscal.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Cscal #################################################### + +cscal.goto : cscal.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +cscal.acml : cscal.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +cscal.atlas : cscal.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +cscal.mkl : cscal.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +cscal.veclib : cscal.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Zscal #################################################### + +zscal.goto : zscal.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +zscal.acml : zscal.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zscal.atlas : zscal.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zscal.mkl : zscal.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zscal.veclib : zscal.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Sasum #################################################### +sasum.goto : sasum.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +sasum.acml : sasum.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +sasum.atlas : sasum.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +sasum.mkl : sasum.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +sasum.veclib : sasum.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Dasum #################################################### +dasum.goto : dasum.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +dasum.acml : dasum.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dasum.atlas : dasum.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dasum.mkl : dasum.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dasum.veclib : dasum.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Casum #################################################### + +casum.goto : casum.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +casum.acml : casum.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +casum.atlas : casum.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +casum.mkl : casum.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +casum.veclib : casum.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Zasum #################################################### + +zasum.goto : zasum.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +zasum.acml : zasum.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zasum.atlas : zasum.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zasum.mkl : zasum.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zasum.veclib : zasum.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Sswap #################################################### +sswap.goto : sswap.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +sswap.acml : sswap.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +sswap.atlas : sswap.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +sswap.mkl : sswap.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +sswap.veclib : sswap.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Dswap #################################################### +dswap.goto : dswap.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +dswap.acml : dswap.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dswap.atlas : dswap.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dswap.mkl : dswap.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dswap.veclib : dswap.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Cswap #################################################### + +cswap.goto : cswap.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +cswap.acml : cswap.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +cswap.atlas : cswap.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +cswap.mkl : cswap.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +cswap.veclib : cswap.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Zswap #################################################### + +zswap.goto : zswap.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +zswap.acml : zswap.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zswap.atlas : zswap.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zswap.mkl : zswap.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zswap.veclib : zswap.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + + +##################################### Sgesv #################################################### +sgesv.goto : sgesv.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +sgesv.acml : sgesv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +sgesv.atlas : sgesv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +sgesv.mkl : sgesv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +sgesv.veclib : sgesv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Dgesv #################################################### +dgesv.goto : dgesv.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +dgesv.acml : dgesv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dgesv.atlas : dgesv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dgesv.mkl : dgesv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dgesv.veclib : dgesv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Cgesv #################################################### + +cgesv.goto : cgesv.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +cgesv.acml : cgesv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +cgesv.atlas : cgesv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +cgesv.mkl : cgesv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +cgesv.veclib : cgesv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Zgesv #################################################### + +zgesv.goto : zgesv.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +zgesv.acml : zgesv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zgesv.atlas : zgesv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zgesv.mkl : zgesv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zgesv.veclib : zgesv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + + +##################################### Cgemm3m #################################################### + +cgemm3m.goto : cgemm3m.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +cgemm3m.mkl : cgemm3m.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +cgemm3m.veclib : cgemm3m.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Zgemm3m #################################################### + +zgemm3m.goto : zgemm3m.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +zgemm3m.mkl : zgemm3m.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zgemm3m.veclib : zgemm3m.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBVECLIB) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +############################################## ISAMAX ############################################## +isamax.goto : isamax.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +isamax.atlas : isamax.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +############################################## IDAMAX ############################################## +idamax.goto : idamax.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +idamax.atlas : idamax.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +############################################## ICAMAX ############################################## +icamax.goto : icamax.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +icamax.atlas : icamax.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +############################################## IZAMAX ############################################## +izamax.goto : izamax.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +izamax.atlas : izamax.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +############################################## ISMAX ############################################## +ismax.goto : ismax.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +############################################## IDMAX ############################################## +idmax.goto : idmax.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +############################################## ISAMIN ############################################## +isamin.goto : isamin.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +############################################## IDAMIN ############################################## +idamin.goto : idamin.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +############################################## ICAMIN ############################################## +icamin.goto : icamin.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +############################################## IZAMIN ############################################## +izamin.goto : izamin.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +############################################## ISMIN ############################################## +ismin.goto : ismin.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +############################################## IDMIN ############################################## +idmin.goto : idmin.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +############################################## SAMAX ############################################## +samax.goto : samax.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +############################################## DAMAX ############################################## +damax.goto : damax.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +############################################## CAMAX ############################################## +camax.goto : camax.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +############################################## ZAMAX ############################################## +zamax.goto : zamax.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +############################################## SMAX ############################################## +smax.goto : smax.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +############################################## DMAX ############################################## +dmax.goto : dmax.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +############################################## SAMIN ############################################## +samin.goto : samin.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +############################################## DAMIN ############################################## +damin.goto : damin.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +############################################## CAMIN ############################################## +camin.goto : camin.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +############################################## ZAMIN ############################################## +zamin.goto : zamin.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +############################################## SMIN ############################################## +smin.goto : smin.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +############################################## DMIN ############################################## +dmin.goto : dmin.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +############################################## SNRM2 ############################################## +snrm2.goto : snrm2.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +snrm2.atlas : snrm2.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +############################################## DNRM2 ############################################## +dnrm2.goto : dnrm2.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +dnrm2.atlas : dnrm2.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +############################################## Sscnrm2 ############################################## +scnrm2.goto : scnrm2.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +scnrm2.atlas : scnrm2.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +############################################## Ddznrm2 ############################################## +dznrm2.goto : dznrm2.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) -lm + +dznrm2.atlas : dznrm2.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + + +################################################################################################### + +slinpack.$(SUFFIX) : linpack.c + $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ + +dlinpack.$(SUFFIX) : linpack.c + $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ + +clinpack.$(SUFFIX) : linpack.c + $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ + +zlinpack.$(SUFFIX) : linpack.c + $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ + +scholesky.$(SUFFIX) : cholesky.c + $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ + +dcholesky.$(SUFFIX) : cholesky.c + $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ + +ccholesky.$(SUFFIX) : cholesky.c + $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ + +zcholesky.$(SUFFIX) : cholesky.c + $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ + +ifeq ($(BUILD_BFLOAT16),1) +sbgemm.$(SUFFIX) : gemm.c + $(CC) $(CFLAGS) -c -DHALF -UCOMPLEX -UDOUBLE -o $(@F) $^ +endif + +sgemm.$(SUFFIX) : gemm.c + $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ + +dgemm.$(SUFFIX) : gemm.c + $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ + +cgemm.$(SUFFIX) : gemm.c + $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ + +zgemm.$(SUFFIX) : gemm.c + $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ + +ssymm.$(SUFFIX) : symm.c + $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ + +dsymm.$(SUFFIX) : symm.c + $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ + +csymm.$(SUFFIX) : symm.c + $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ + +zsymm.$(SUFFIX) : symm.c + $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ + +strmm.$(SUFFIX) : trmm.c + $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ + +dtrmm.$(SUFFIX) : trmm.c + $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ + +ctrmm.$(SUFFIX) : trmm.c + $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ + +ztrmm.$(SUFFIX) : trmm.c + $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ + +strsm.$(SUFFIX) : trsm.c + $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ + +dtrsm.$(SUFFIX) : trsm.c + $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ + +ctrsm.$(SUFFIX) : trsm.c + $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ + +ztrsm.$(SUFFIX) : trsm.c + $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ + +ssyr.$(SUFFIX) : syr.c + $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ + +dsyr.$(SUFFIX) : syr.c + $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ + +sspr.$(SUFFIX) : spr.c + $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ + +dspr.$(SUFFIX) : spr.c + $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ + +sspr2.$(SUFFIX) : spr2.c + $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ + +dspr2.$(SUFFIX) : spr2.c + $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ + +ssyr2.$(SUFFIX) : syr2.c + $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ + +dsyr2.$(SUFFIX) : syr2.c + $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ + +ssyrk.$(SUFFIX) : syrk.c + $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ + +dsyrk.$(SUFFIX) : syrk.c + $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ + +csyrk.$(SUFFIX) : syrk.c + $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ + +zsyrk.$(SUFFIX) : syrk.c + $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ + +ssyr2k.$(SUFFIX) : syr2k.c + $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ + +dsyr2k.$(SUFFIX) : syr2k.c + $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ + +csyr2k.$(SUFFIX) : syr2k.c + $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ + +zsyr2k.$(SUFFIX) : syr2k.c + $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ + +chemm.$(SUFFIX) : hemm.c + $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ + +zhemm.$(SUFFIX) : hemm.c + $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ + +cherk.$(SUFFIX) : herk.c + $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ + +zherk.$(SUFFIX) : herk.c + $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ + +cher2k.$(SUFFIX) : her2k.c + $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ + +zher2k.$(SUFFIX) : her2k.c + $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ + +cher.$(SUFFIX) : her.c + $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ + +zher.$(SUFFIX) : her.c + $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ + +cher2.$(SUFFIX) : her2.c + $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ + +zher2.$(SUFFIX) : her2.c + $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ + +sgemv.$(SUFFIX) : gemv.c + $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ + +dgemv.$(SUFFIX) : gemv.c + $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ + +cgemv.$(SUFFIX) : gemv.c + $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ + +zgemv.$(SUFFIX) : gemv.c + $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ + +sspmv.$(SUFFIX) : spmv.c + $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ + +dspmv.$(SUFFIX) : spmv.c + $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ + +strmv.$(SUFFIX) : trmv.c + $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ + +dtrmv.$(SUFFIX) : trmv.c + $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ + +ctrmv.$(SUFFIX) : trmv.c + $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ + +ztrmv.$(SUFFIX) : trmv.c + $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ + +stpmv.$(SUFFIX) : tpmv.c + $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ + +dtpmv.$(SUFFIX) : tpmv.c + $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ + +ctpmv.$(SUFFIX) : tpmv.c + $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ + +ztpmv.$(SUFFIX) : tpmv.c + $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ + +stpsv.$(SUFFIX) : tpsv.c + $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ + +dtpsv.$(SUFFIX) : tpsv.c + $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ + +ctpsv.$(SUFFIX) : tpsv.c + $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ + +ztpsv.$(SUFFIX) : tpsv.c + $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ + +strsv.$(SUFFIX) : trsv.c + $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ + +dtrsv.$(SUFFIX) : trsv.c + $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ + +ctrsv.$(SUFFIX) : trsv.c + $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ + +ztrsv.$(SUFFIX) : trsv.c + $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ + +sger.$(SUFFIX) : ger.c + $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ + +dger.$(SUFFIX) : ger.c + $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ + +cger.$(SUFFIX) : ger.c + $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ + +zger.$(SUFFIX) : ger.c + $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ + + +ssymv.$(SUFFIX) : symv.c + $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ + +dsymv.$(SUFFIX) : symv.c + $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ + +csymv.$(SUFFIX) : symv.c + $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ + +zsymv.$(SUFFIX) : symv.c + $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ + +sgeev.$(SUFFIX) : geev.c + $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ + +dgeev.$(SUFFIX) : geev.c + $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ + +cgeev.$(SUFFIX) : geev.c + $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ + +zgeev.$(SUFFIX) : geev.c + $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ + +sgetri.$(SUFFIX) : getri.c + $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ + +dgetri.$(SUFFIX) : getri.c + $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ + +cgetri.$(SUFFIX) : getri.c + $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ + +zgetri.$(SUFFIX) : getri.c + $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ + +spotrf.$(SUFFIX) : potrf.c + $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ + +dpotrf.$(SUFFIX) : potrf.c + $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ + +cpotrf.$(SUFFIX) : potrf.c + $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ + +zpotrf.$(SUFFIX) : potrf.c + $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ + +chemv.$(SUFFIX) : hemv.c + $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ + +zhemv.$(SUFFIX) : hemv.c + $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ + +chbmv.$(SUFFIX) : hbmv.c + $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ + +zhbmv.$(SUFFIX) : hbmv.c + $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ + +chpmv.$(SUFFIX) : hpmv.c + $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ + +zhpmv.$(SUFFIX) : hpmv.c + $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ + +sdot.$(SUFFIX) : dot.c + $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ + +ddot.$(SUFFIX) : dot.c + $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ + +cdot.$(SUFFIX) : zdot.c + $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ + +zdot.$(SUFFIX) : zdot.c + $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ + +cdot-intel.$(SUFFIX) : zdot-intel.c + $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ + +zdot-intel.$(SUFFIX) : zdot-intel.c + $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ + + + +saxpy.$(SUFFIX) : axpy.c + $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ + +daxpy.$(SUFFIX) : axpy.c + $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ + +caxpy.$(SUFFIX) : axpy.c + $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ + +zaxpy.$(SUFFIX) : axpy.c + $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ + +saxpby.$(SUFFIX) : axpby.c + $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ + +daxpby.$(SUFFIX) : axpby.c + $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ + +caxpby.$(SUFFIX) : axpby.c + $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ + +zaxpby.$(SUFFIX) : axpby.c + $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ + +scopy.$(SUFFIX) : copy.c + $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ + +dcopy.$(SUFFIX) : copy.c + $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ + +ccopy.$(SUFFIX) : copy.c + $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ + +zcopy.$(SUFFIX) : copy.c + $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ + +sswap.$(SUFFIX) : swap.c + $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ + +dswap.$(SUFFIX) : swap.c + $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ + +cswap.$(SUFFIX) : swap.c + $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ + +zswap.$(SUFFIX) : swap.c + $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ + + + +sscal.$(SUFFIX) : scal.c + $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ + +dscal.$(SUFFIX) : scal.c + $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ + +cscal.$(SUFFIX) : scal.c + $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ + +zscal.$(SUFFIX) : scal.c + $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ + +sasum.$(SUFFIX) : asum.c + $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ + +dasum.$(SUFFIX) : asum.c + $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ + +casum.$(SUFFIX) : asum.c + $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ + +zasum.$(SUFFIX) : asum.c + $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ + + +sgesv.$(SUFFIX) : gesv.c + $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ + +dgesv.$(SUFFIX) : gesv.c + $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ + +cgesv.$(SUFFIX) : gesv.c + $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ + +zgesv.$(SUFFIX) : gesv.c + $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ + +srot.$(SUFFIX) : rot.c + $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ + +drot.$(SUFFIX) : rot.c + $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ + +csrot.$(SUFFIX) : rot.c + $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ + +zdrot.$(SUFFIX) : rot.c + $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ + +srotm.$(SUFFIX) : rotm.c + $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ + +drotm.$(SUFFIX) : rotm.c + $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ + + + +cgemm3m.$(SUFFIX) : gemm3m.c + $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ + +zgemm3m.$(SUFFIX) : gemm3m.c + $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ + + +isamax.$(SUFFIX) : iamax.c + $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ + +idamax.$(SUFFIX) : iamax.c + $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ + +icamax.$(SUFFIX) : iamax.c + $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ + +izamax.$(SUFFIX) : iamax.c + $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ + + +ismax.$(SUFFIX) : imax.c + $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ + +idmax.$(SUFFIX) : imax.c + $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ + + +isamin.$(SUFFIX) : iamin.c + $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ + +idamin.$(SUFFIX) : iamin.c + $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ + +icamin.$(SUFFIX) : iamin.c + $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ + +izamin.$(SUFFIX) : iamin.c + $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ + + +ismin.$(SUFFIX) : imin.c + $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ + +idmin.$(SUFFIX) : imin.c + $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ + + +samax.$(SUFFIX) : amax.c + $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ + +damax.$(SUFFIX) : amax.c + $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ + +camax.$(SUFFIX) : amax.c + $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ + +zamax.$(SUFFIX) : amax.c + $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ + + +smax.$(SUFFIX) : max.c + $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ + +dmax.$(SUFFIX) : max.c + $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ + + +samin.$(SUFFIX) : amin.c + $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ + +damin.$(SUFFIX) : amin.c + $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ + +camin.$(SUFFIX) : amin.c + $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ + +zamin.$(SUFFIX) : amin.c + $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ + + +smin.$(SUFFIX) : min.c + $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ + +dmin.$(SUFFIX) : min.c + $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ + + +snrm2.$(SUFFIX) : nrm2.c + $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ + +dnrm2.$(SUFFIX) : nrm2.c + $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ + +scnrm2.$(SUFFIX) : nrm2.c + $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ + +dznrm2.$(SUFFIX) : nrm2.c + $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ + + +smallscaling: smallscaling.c ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(EXTRALIB) -fopenmp -lm -lpthread + +clean :: + @rm -f *.goto *.mkl *.acml *.atlas *.veclib *.essl smallscaling + +include $(TOPDIR)/Makefile.tail diff --git a/benchmark/amax.c b/benchmark/amax.c new file mode 100644 index 000000000..29310dd71 --- /dev/null +++ b/benchmark/amax.c @@ -0,0 +1,133 @@ +/*************************************************************************** +Copyright (c) 2016, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "bench.h" + +#undef AMAX + +#ifdef COMPLEX +#ifdef DOUBLE +#define AMAX BLASFUNC(dzamax) +#else +#define AMAX BLASFUNC(scamax) +#endif +#else +#ifdef DOUBLE +#define AMAX BLASFUNC(damax) +#else +#define AMAX BLASFUNC(samax) +#endif +#endif + +int main(int argc, char *argv[]) +{ + + FLOAT *x; + blasint m, i; + blasint inc_x = 1; + int loops = 1; + int l; + char *p; + + int from = 1; + int to = 200; + int step = 1; + + double time1, timeg; + + argc--; + argv++; + + if (argc > 0) + { + from = atol(*argv); + argc--; + argv++; + } + if (argc > 0) + { + to = MAX(atol(*argv), from); + argc--; + argv++; + } + if (argc > 0) + { + step = atol(*argv); + argc--; + argv++; + } + + if ((p = getenv("OPENBLAS_LOOPS"))) + loops = atoi(p); + if ((p = getenv("OPENBLAS_INCX"))) + inc_x = atoi(p); + + fprintf(stderr, "From : %3d To : %3d Step = %3d Inc_x = %d Loops = %d\n", from, to, step, inc_x, loops); + + if ((x = (FLOAT *)malloc(sizeof(FLOAT) * to * abs(inc_x) * COMPSIZE)) == NULL) + { + fprintf(stderr, "Out of Memory!!\n"); + exit(1); + } + +#ifdef __linux + srandom(getpid()); +#endif + + fprintf(stderr, " SIZE Flops\n"); + + for (m = from; m <= to; m += step) + { + + timeg = 0; + fprintf(stderr, " %6d : ", (int)m); + + for (l = 0; l < loops; l++) + { + + for (i = 0; i < m * COMPSIZE * abs(inc_x); i++) + { + x[i] = ((FLOAT)rand() / (FLOAT)RAND_MAX) - 0.5; + } + + begin(); + AMAX(&m, x, &inc_x); + end(); + timeg += getsec(); + } + + timeg /= loops; + + fprintf(stderr, + " %10.2f MFlops %10.6f sec\n", + COMPSIZE * sizeof(FLOAT) * 1. * (double)m / timeg * 1.e-6, timeg); + } + + return 0; +} + +// void main(int argc, char *argv[]) __attribute__((weak, alias("MAIN__"))); diff --git a/benchmark/amin.c b/benchmark/amin.c new file mode 100644 index 000000000..54a1d266a --- /dev/null +++ b/benchmark/amin.c @@ -0,0 +1,137 @@ +/*************************************************************************** +Copyright (c) 2016, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "bench.h" + +#undef AMIN + +#ifdef COMPLEX +#ifdef DOUBLE +#define AMIN BLASFUNC(dzamin) +#else +#define AMIN BLASFUNC(scamin) +#endif +#else +#ifdef DOUBLE +#define AMIN BLASFUNC(damin) +#else +#define AMIN BLASFUNC(samin) +#endif +#endif + +int main(int argc, char *argv[]) +{ + + FLOAT *x; + blasint m, i; + blasint inc_x = 1; + int loops = 1; + int l; + char *p; + + int from = 1; + int to = 200; + int step = 1; + + double time1, timeg; + + argc--; + argv++; + + if (argc > 0) + { + from = atol(*argv); + argc--; + argv++; + } + if (argc > 0) + { + to = MAX(atol(*argv), from); + argc--; + argv++; + } + if (argc > 0) + { + step = atol(*argv); + argc--; + argv++; + } + + if ((p = getenv("OPENBLAS_LOOPS"))) + loops = atoi(p); + if ((p = getenv("OPENBLAS_INCX"))) + inc_x = atoi(p); + + fprintf(stderr, "From : %3d To : %3d Step = %3d Inc_x = %d Loops = %d\n", from, to, step, inc_x, loops); + + if ((x = (FLOAT *)malloc(sizeof(FLOAT) * to * abs(inc_x) * COMPSIZE)) == NULL) + { + fprintf(stderr, "Out of Memory!!\n"); + exit(1); + } + +#ifdef __linux + srandom(getpid()); +#endif + + fprintf(stderr, " SIZE Flops\n"); + + for (m = from; m <= to; m += step) + { + + timeg = 0; + + fprintf(stderr, " %6d : ", (int)m); + + for (l = 0; l < loops; l++) + { + + for (i = 0; i < m * COMPSIZE * abs(inc_x); i++) + { + x[i] = ((FLOAT)rand() / (FLOAT)RAND_MAX) - 0.5; + } + + begin(); + + AMIN(&m, x, &inc_x); + + end(); + + timeg += getsec(); + } + + timeg /= loops; + + fprintf(stderr, + " %10.2f MFlops %10.6f sec\n", + COMPSIZE * sizeof(FLOAT) * 1. * (double)m / timeg * 1.e-6, timeg); + } + + return 0; +} + +// void main(int argc, char *argv[]) __attribute__((weak, alias("MAIN__"))); diff --git a/benchmark/asum.c b/benchmark/asum.c index 78ccdf47b..098ddc8ee 100644 --- a/benchmark/asum.c +++ b/benchmark/asum.c @@ -25,169 +25,108 @@ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ -#include -#include -#ifdef __CYGWIN32__ -#include -#endif -#include "common.h" - +#include "bench.h" #undef ASUM #ifdef COMPLEX #ifdef DOUBLE -#define ASUM BLASFUNC(dzasum) +#define ASUM BLASFUNC(dzasum) #else -#define ASUM BLASFUNC(scasum) +#define ASUM BLASFUNC(scasum) #endif #else #ifdef DOUBLE -#define ASUM BLASFUNC(dasum) +#define ASUM BLASFUNC(dasum) #else -#define ASUM BLASFUNC(sasum) +#define ASUM BLASFUNC(sasum) #endif #endif -#if defined(__WIN32__) || defined(__WIN64__) - -#ifndef DELTA_EPOCH_IN_MICROSECS -#define DELTA_EPOCH_IN_MICROSECS 11644473600000000ULL -#endif - -int gettimeofday(struct timeval *tv, void *tz){ - - FILETIME ft; - unsigned __int64 tmpres = 0; - static int tzflag; - - if (NULL != tv) - { - GetSystemTimeAsFileTime(&ft); - - tmpres |= ft.dwHighDateTime; - tmpres <<= 32; - tmpres |= ft.dwLowDateTime; - - /*converting file time to unix epoch*/ - tmpres /= 10; /*convert into microseconds*/ - tmpres -= DELTA_EPOCH_IN_MICROSECS; - tv->tv_sec = (long)(tmpres / 1000000UL); - tv->tv_usec = (long)(tmpres % 1000000UL); - } - - return 0; -} - -#endif - -#if !defined(__WIN32__) && !defined(__WIN64__) && !defined(__CYGWIN32__) && 0 - -static void *huge_malloc(BLASLONG size){ - int shmid; - void *address; - -#ifndef SHM_HUGETLB -#define SHM_HUGETLB 04000 -#endif - - if ((shmid =shmget(IPC_PRIVATE, - (size + HUGE_PAGESIZE) & ~(HUGE_PAGESIZE - 1), - SHM_HUGETLB | IPC_CREAT |0600)) < 0) { - printf( "Memory allocation failed(shmget).\n"); - exit(1); - } - - address = shmat(shmid, NULL, SHM_RND); - - if ((BLASLONG)address == -1){ - printf( "Memory allocation failed(shmat).\n"); - exit(1); - } - - shmctl(shmid, IPC_RMID, 0); - - return address; -} - -#define malloc huge_malloc - -#endif - -int main(int argc, char *argv[]){ +int main(int argc, char *argv[]) +{ FLOAT *x; FLOAT result; blasint m, i; - blasint inc_x=1; + blasint inc_x = 1; int loops = 1; int l; char *p; - int from = 1; - int to = 200; - int step = 1; + int from = 1; + int to = 200; + int step = 1; + double time1, timeg; - struct timeval start, stop; - double time1,timeg; + argc--; + argv++; - argc--;argv++; - - if (argc > 0) { from = atol(*argv); argc--; argv++;} - if (argc > 0) { to = MAX(atol(*argv), from); argc--; argv++;} - if (argc > 0) { step = atol(*argv); argc--; argv++;} + if (argc > 0) + { + from = atol(*argv); + argc--; + argv++; + } + if (argc > 0) + { + to = MAX(atol(*argv), from); + argc--; + argv++; + } + if (argc > 0) + { + step = atol(*argv); + argc--; + argv++; + } - if ((p = getenv("OPENBLAS_LOOPS"))) loops = atoi(p); - if ((p = getenv("OPENBLAS_INCX"))) inc_x = atoi(p); + if ((p = getenv("OPENBLAS_LOOPS"))) + loops = atoi(p); + if ((p = getenv("OPENBLAS_INCX"))) + inc_x = atoi(p); - fprintf(stderr, "From : %3d To : %3d Step = %3d Inc_x = %d Loops = %d\n", from, to, step,inc_x,loops); + fprintf(stderr, "From : %3d To : %3d Step = %3d Inc_x = %d Loops = %d\n", from, to, step, inc_x, loops); - if (( x = (FLOAT *)malloc(sizeof(FLOAT) * to * abs(inc_x) * COMPSIZE)) == NULL){ - fprintf(stderr,"Out of Memory!!\n");exit(1); + if ((x = (FLOAT *)malloc(sizeof(FLOAT) * to * abs(inc_x) * COMPSIZE)) == NULL) + { + fprintf(stderr, "Out of Memory!!\n"); + exit(1); } - -#ifdef linux +#ifdef __linux srandom(getpid()); #endif fprintf(stderr, " SIZE Flops\n"); - for(m = from; m <= to; m += step) + for (m = from; m <= to; m += step) { - timeg=0; - - fprintf(stderr, " %6d : ", (int)m); - - - for (l=0; l 1) + timeg /= loops; #ifdef COMPLEX fprintf(stderr, " %10.2f MFlops %10.6f sec\n", 4. * (double)m / timeg * 1.e-6, timeg); #else fprintf(stderr, " %10.2f MFlops %10.6f sec\n", 2. * (double)m / timeg * 1.e-6, timeg); #endif - } return 0; diff --git a/benchmark/axpby.c b/benchmark/axpby.c new file mode 100644 index 000000000..d02d9a889 --- /dev/null +++ b/benchmark/axpby.c @@ -0,0 +1,124 @@ +/*************************************************************************** +Copyright (c) 2014, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "bench.h" + +#undef AXPBY + +#ifdef COMPLEX +#ifdef DOUBLE +#define AXPBY BLASFUNC(zaxpby) +#else +#define AXPBY BLASFUNC(caxpby) +#endif +#else +#ifdef DOUBLE +#define AXPBY BLASFUNC(daxpby) +#else +#define AXPBY BLASFUNC(saxpby) +#endif +#endif + +int main(int argc, char *argv[]){ + + FLOAT *x, *y; + FLOAT alpha[2] = { 2.0, 2.0 }; + FLOAT beta[2] = {2.0, 2.0}; + blasint m, i; + blasint inc_x=1,inc_y=1; + int loops = 1; + int l; + char *p; + + int from = 1; + int to = 200; + int step = 1; + + double time1,timeg; + + argc--;argv++; + + if (argc > 0) { from = atol(*argv); argc--; argv++;} + if (argc > 0) { to = MAX(atol(*argv), from); argc--; argv++;} + if (argc > 0) { step = atol(*argv); argc--; argv++;} + + if ((p = getenv("OPENBLAS_LOOPS"))) loops = atoi(p); + if ((p = getenv("OPENBLAS_INCX"))) inc_x = atoi(p); + if ((p = getenv("OPENBLAS_INCY"))) inc_y = atoi(p); + + fprintf(stderr, "From : %3d To : %3d Step = %3d Inc_x = %d Inc_y = %d Loops = %d\n", from, to, step,inc_x,inc_y,loops); + + if (( x = (FLOAT *)malloc(sizeof(FLOAT) * to * abs(inc_x) * COMPSIZE)) == NULL){ + fprintf(stderr,"Out of Memory!!\n");exit(1); + } + + if (( y = (FLOAT *)malloc(sizeof(FLOAT) * to * abs(inc_y) * COMPSIZE)) == NULL){ + fprintf(stderr,"Out of Memory!!\n");exit(1); + } + +#ifdef __linux + srandom(getpid()); +#endif + + fprintf(stderr, " SIZE Flops\n"); + + for(m = from; m <= to; m += step) + { + + timeg=0; + + fprintf(stderr, " %6d : ", (int)m); + + + for(i = 0; i < m * COMPSIZE * abs(inc_x); i++){ + x[i] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + } + + for(i = 0; i < m * COMPSIZE * abs(inc_y); i++){ + y[i] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + } + + for (l=0; l -#include -#ifdef __CYGWIN32__ -#include -#endif -#include "common.h" - +#include "bench.h" #undef AXPY @@ -49,71 +43,6 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endif #endif -#if defined(__WIN32__) || defined(__WIN64__) - -#ifndef DELTA_EPOCH_IN_MICROSECS -#define DELTA_EPOCH_IN_MICROSECS 11644473600000000ULL -#endif - -int gettimeofday(struct timeval *tv, void *tz){ - - FILETIME ft; - unsigned __int64 tmpres = 0; - static int tzflag; - - if (NULL != tv) - { - GetSystemTimeAsFileTime(&ft); - - tmpres |= ft.dwHighDateTime; - tmpres <<= 32; - tmpres |= ft.dwLowDateTime; - - /*converting file time to unix epoch*/ - tmpres /= 10; /*convert into microseconds*/ - tmpres -= DELTA_EPOCH_IN_MICROSECS; - tv->tv_sec = (long)(tmpres / 1000000UL); - tv->tv_usec = (long)(tmpres % 1000000UL); - } - - return 0; -} - -#endif - -#if !defined(__WIN32__) && !defined(__WIN64__) && !defined(__CYGWIN32__) && 0 - -static void *huge_malloc(BLASLONG size){ - int shmid; - void *address; - -#ifndef SHM_HUGETLB -#define SHM_HUGETLB 04000 -#endif - - if ((shmid =shmget(IPC_PRIVATE, - (size + HUGE_PAGESIZE) & ~(HUGE_PAGESIZE - 1), - SHM_HUGETLB | IPC_CREAT |0600)) < 0) { - printf( "Memory allocation failed(shmget).\n"); - exit(1); - } - - address = shmat(shmid, NULL, SHM_RND); - - if ((BLASLONG)address == -1){ - printf( "Memory allocation failed(shmat).\n"); - exit(1); - } - - shmctl(shmid, IPC_RMID, 0); - - return address; -} - -#define malloc huge_malloc - -#endif - int main(int argc, char *argv[]){ FLOAT *x, *y; @@ -127,8 +56,6 @@ int main(int argc, char *argv[]){ int from = 1; int to = 200; int step = 1; - - struct timeval start, stop; double time1,timeg; argc--;argv++; @@ -151,7 +78,7 @@ int main(int argc, char *argv[]){ fprintf(stderr,"Out of Memory!!\n");exit(1); } -#ifdef linux +#ifdef __linux srandom(getpid()); #endif @@ -175,13 +102,13 @@ int main(int argc, char *argv[]){ for(i = 0; i < m * COMPSIZE * abs(inc_y); i++){ y[i] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; } - gettimeofday( &start, (struct timezone *)0); + begin(); AXPY (&m, alpha, x, &inc_x, y, &inc_y ); - gettimeofday( &stop, (struct timezone *)0); + end(); - time1 = (double)(stop.tv_sec - start.tv_sec) + (double)((stop.tv_usec - start.tv_usec)) * 1.e-6; + time1 = getsec(); timeg += time1; @@ -190,7 +117,7 @@ int main(int argc, char *argv[]){ timeg /= loops; fprintf(stderr, - " %10.2f MFlops %10.6f sec\n", + " %10.2f MFlops %10.9f sec\n", COMPSIZE * COMPSIZE * 2. * (double)m / timeg * 1.e-6, timeg); } diff --git a/benchmark/bench.h b/benchmark/bench.h new file mode 100644 index 000000000..1f9b8986c --- /dev/null +++ b/benchmark/bench.h @@ -0,0 +1,104 @@ +#include +#include +#include +#ifdef __CYGWIN32__ +#include +#endif +#include "common.h" + +#if defined(__WIN32__) || defined(__WIN64__) + +#ifndef DELTA_EPOCH_IN_MICROSECS +#define DELTA_EPOCH_IN_MICROSECS 11644473600000000ULL +#endif + +int gettimeofday(struct timeval *tv, void *tz){ + + FILETIME ft; + unsigned __int64 tmpres = 0; + static int tzflag; + + if (NULL != tv) + { + GetSystemTimeAsFileTime(&ft); + + tmpres |= ft.dwHighDateTime; + tmpres <<= 32; + tmpres |= ft.dwLowDateTime; + + /*converting file time to unix epoch*/ + tmpres /= 10; /*convert into microseconds*/ + tmpres -= DELTA_EPOCH_IN_MICROSECS; + tv->tv_sec = (long)(tmpres / 1000000UL); + tv->tv_usec = (long)(tmpres % 1000000UL); + } + + return 0; +} + +#endif + +#if !defined(__WIN32__) && !defined(__WIN64__) && !defined(__CYGWIN32__) && 0 + +static void *huge_malloc(BLASLONG size){ + int shmid; + void *address; + +#ifndef SHM_HUGETLB +#define SHM_HUGETLB 04000 +#endif + + if ((shmid =shmget(IPC_PRIVATE, + (size + HUGE_PAGESIZE) & ~(HUGE_PAGESIZE - 1), + SHM_HUGETLB | IPC_CREAT |0600)) < 0) { + printf( "Memory allocation failed(shmget).\n"); + exit(1); + } + + address = shmat(shmid, NULL, SHM_RND); + + if ((BLASLONG)address == -1){ + printf( "Memory allocation failed(shmat).\n"); + exit(1); + } + + shmctl(shmid, IPC_RMID, 0); + + return address; +} + + +#define malloc huge_malloc + +#endif + +#if defined(__WIN32__) || defined(__WIN64__) || !defined(_POSIX_TIMERS) + struct timeval start, stop; +#else + struct timespec start = { 0, 0 }, stop = { 0, 0 }; +#endif + +double getsec() +{ +#if defined(__WIN32__) || defined(__WIN64__) || !defined(_POSIX_TIMERS) + return (double)(stop.tv_sec - start.tv_sec) + (double)((stop.tv_usec - start.tv_usec)) * 1.e-6; +#else + return (double)(stop.tv_sec - start.tv_sec) + (double)((stop.tv_nsec - start.tv_nsec)) * 1.e-9; +#endif +} + +void begin() { +#if defined(__WIN32__) || defined(__WIN64__) || !defined(_POSIX_TIMERS) + gettimeofday( &start, (struct timezone *)0); +#else + clock_gettime(CLOCK_REALTIME, &start); +#endif +} + +void end() { +#if defined(__WIN32__) || defined(__WIN64__) || !defined(_POSIX_TIMERS) + gettimeofday( &stop, (struct timezone *)0); +#else + clock_gettime(CLOCK_REALTIME, &stop); +#endif +} \ No newline at end of file diff --git a/benchmark/cholesky.c b/benchmark/cholesky.c index 8d121efb3..65b20d039 100644 --- a/benchmark/cholesky.c +++ b/benchmark/cholesky.c @@ -36,12 +36,7 @@ /* or implied, of The University of Texas at Austin. */ /*********************************************************************/ -#include -#include -#ifdef __CYGWIN32__ -#include -#endif -#include "common.h" +#include "bench.h" double fabs(double); @@ -71,41 +66,6 @@ double fabs(double); #endif #endif - - -#if defined(__WIN32__) || defined(__WIN64__) - -#ifndef DELTA_EPOCH_IN_MICROSECS -#define DELTA_EPOCH_IN_MICROSECS 11644473600000000ULL -#endif - -int gettimeofday(struct timeval *tv, void *tz){ - - FILETIME ft; - unsigned __int64 tmpres = 0; - static int tzflag; - - if (NULL != tv) - { - GetSystemTimeAsFileTime(&ft); - - tmpres |= ft.dwHighDateTime; - tmpres <<= 32; - tmpres |= ft.dwLowDateTime; - - /*converting file time to unix epoch*/ - tmpres /= 10; /*convert into microseconds*/ - tmpres -= DELTA_EPOCH_IN_MICROSECS; - tv->tv_sec = (long)(tmpres / 1000000UL); - tv->tv_usec = (long)(tmpres % 1000000UL); - } - - return 0; -} - -#endif - - static __inline double getmflops(int ratio, int m, double secs){ double mm = (double)m; @@ -145,7 +105,6 @@ int main(int argc, char *argv[]){ FLOAT maxerr; - struct timeval start, stop; double time1; argc--;argv++; @@ -173,46 +132,46 @@ int main(int argc, char *argv[]){ #ifndef COMPLEX if (uplos & 1) { for (j = 0; j < m; j++) { - for(i = 0; i < j; i++) a[i + j * m] = 0.; - a[j + j * m] = ((double) rand() / (double) RAND_MAX) + 8.; - for(i = j + 1; i < m; i++) a[i + j * m] = ((double) rand() / (double) RAND_MAX) - 0.5; + for(i = 0; i < j; i++) a[(long)i + (long)j * (long)m] = 0.; + a[(long)j + (long)j * (long)m] = ((double) rand() / (double) RAND_MAX) + 8.; + for(i = j + 1; i < m; i++) a[(long)i + (long)j * (long)m] = ((double) rand() / (double) RAND_MAX) - 0.5; } } else { for (j = 0; j < m; j++) { - for(i = 0; i < j; i++) a[i + j * m] = ((double) rand() / (double) RAND_MAX) - 0.5; - a[j + j * m] = ((double) rand() / (double) RAND_MAX) + 8.; - for(i = j + 1; i < m; i++) a[i + j * m] = 0.; + for(i = 0; i < j; i++) a[(long)i + (long)j * (long)m] = ((double) rand() / (double) RAND_MAX) - 0.5; + a[(long)j + (long)j * (long)m] = ((double) rand() / (double) RAND_MAX) + 8.; + for(i = j + 1; i < m; i++) a[(long)i + (long)j * (long)m] = 0.; } } #else if (uplos & 1) { for (j = 0; j < m; j++) { for(i = 0; i < j; i++) { - a[(i + j * m) * 2 + 0] = 0.; - a[(i + j * m) * 2 + 1] = 0.; + a[((long)i + (long)j * (long)m) * 2 + 0] = 0.; + a[((long)i + (long)j * (long)m) * 2 + 1] = 0.; } - a[(j + j * m) * 2 + 0] = ((double) rand() / (double) RAND_MAX) + 8.; - a[(j + j * m) * 2 + 1] = 0.; + a[((long)j + (long)j * (long)m) * 2 + 0] = ((double) rand() / (double) RAND_MAX) + 8.; + a[((long)j + (long)j * (long)m) * 2 + 1] = 0.; for(i = j + 1; i < m; i++) { - a[(i + j * m) * 2 + 0] = ((double) rand() / (double) RAND_MAX) - 0.5; - a[(i + j * m) * 2 + 1] = ((double) rand() / (double) RAND_MAX) - 0.5; + a[((long)i + (long)j * (long)m) * 2 + 0] = ((double) rand() / (double) RAND_MAX) - 0.5; + a[((long)i + (long)j * (long)m) * 2 + 1] = ((double) rand() / (double) RAND_MAX) - 0.5; } } } else { for (j = 0; j < m; j++) { for(i = 0; i < j; i++) { - a[(i + j * m) * 2 + 0] = ((double) rand() / (double) RAND_MAX) - 0.5; - a[(i + j * m) * 2 + 1] = ((double) rand() / (double) RAND_MAX) - 0.5; + a[((long)i + (long)j * (long)m) * 2 + 0] = ((double) rand() / (double) RAND_MAX) - 0.5; + a[((long)i + (long)j * (long)m) * 2 + 1] = ((double) rand() / (double) RAND_MAX) - 0.5; } - a[(j + j * m) * 2 + 0] = ((double) rand() / (double) RAND_MAX) + 8.; - a[(j + j * m) * 2 + 1] = 0.; + a[((long)j + (long)j * (long)m) * 2 + 0] = ((double) rand() / (double) RAND_MAX) + 8.; + a[((long)j + (long)j * (long)m) * 2 + 1] = 0.; for(i = j + 1; i < m; i++) { - a[(i + j * m) * 2 + 0] = 0.; - a[(i + j * m) * 2 + 1] = 0.; + a[((long)i + (long)j * (long)m) * 2 + 0] = 0.; + a[((long)i + (long)j * (long)m) * 2 + 1] = 0.; } } } @@ -220,29 +179,31 @@ int main(int argc, char *argv[]){ SYRK(uplo[uplos], trans[uplos], &m, &m, alpha, a, &m, beta, b, &m); - gettimeofday( &start, (struct timezone *)0); + begin(); POTRF(uplo[uplos], &m, b, &m, &info); - gettimeofday( &stop, (struct timezone *)0); + end(); if (info != 0) { fprintf(stderr, "Info = %d\n", info); exit(1); } - time1 = (double)(stop.tv_sec - start.tv_sec) + (double)((stop.tv_usec - start.tv_usec)) * 1.e-6; + time1 = getsec(); - maxerr = 0.; if (!(uplos & 1)) { for (j = 0; j < m; j++) { for(i = 0; i <= j; i++) { #ifndef COMPLEX - if (maxerr < fabs(a[i + j * m] - b[i + j * m])) maxerr = fabs(a[i + j * m] - b[i + j * m]); + if (maxerr < fabs(a[(long)i + (long)j * (long)m] - b[(long)i + (long)j * (long)m])) + maxerr = fabs(a[(long)i + (long)j * (long)m] - b[(long)i + (long)j * (long)m]); #else - if (maxerr < fabs(a[(i + j * m) * 2 + 0] - b[(i + j * m) * 2 + 0])) maxerr = fabs(a[(i + j * m) * 2 + 0] - b[(i + j * m) * 2 + 0]); - if (maxerr < fabs(a[(i + j * m) * 2 + 1] - b[(i + j * m) * 2 + 1])) maxerr = fabs(a[(i + j * m) * 2 + 1] - b[(i + j * m) * 2 + 1]); + if (maxerr < fabs(a[((long)i + (long)j * (long)m) * 2 + 0] - b[((long)i + (long)j * (long)m) * 2 + 0])) + maxerr = fabs(a[((long)i + (long)j * (long)m) * 2 + 0] - b[((long)i + (long)j * (long)m) * 2 + 0]); + if (maxerr < fabs(a[((long)i + (long)j * (long)m) * 2 + 1] - b[((long)i + (long)j * (long)m) * 2 + 1])) + maxerr = fabs(a[((long)i + (long)j * (long)m) * 2 + 1] - b[((long)i + (long)j * (long)m) * 2 + 1]); #endif } } @@ -250,10 +211,13 @@ int main(int argc, char *argv[]){ for (j = 0; j < m; j++) { for(i = j; i < m; i++) { #ifndef COMPLEX - if (maxerr < fabs(a[i + j * m] - b[i + j * m])) maxerr = fabs(a[i + j * m] - b[i + j * m]); + if (maxerr < fabs(a[(long)i + (long)j * (long)m] - b[(long)i + (long)j * (long)m])) + maxerr = fabs(a[(long)i + (long)j * (long)m] - b[(long)i + (long)j * (long)m]); #else - if (maxerr < fabs(a[(i + j * m) * 2 + 0] - b[(i + j * m) * 2 + 0])) maxerr = fabs(a[(i + j * m) * 2 + 0] - b[(i + j * m) * 2 + 0]); - if (maxerr < fabs(a[(i + j * m) * 2 + 1] - b[(i + j * m) * 2 + 1])) maxerr = fabs(a[(i + j * m) * 2 + 1] - b[(i + j * m) * 2 + 1]); + if (maxerr < fabs(a[((long)i + (long)j * (long)m) * 2 + 0] - b[((long)i + (long)j * (long)m) * 2 + 0])) + maxerr = fabs(a[((long)i + (long)j * (long)m) * 2 + 0] - b[((long)i + (long)j * (long)m) * 2 + 0]); + if (maxerr < fabs(a[((long)i + (long)j * (long)m) * 2 + 1] - b[((long)i + (long)j * (long)m) * 2 + 1])) + maxerr = fabs(a[((long)i + (long)j * (long)m) * 2 + 1] - b[((long)i + (long)j * (long)m) * 2 + 1]); #endif } } diff --git a/benchmark/copy.c b/benchmark/copy.c index ea5b38d68..c5e447521 100644 --- a/benchmark/copy.c +++ b/benchmark/copy.c @@ -25,13 +25,7 @@ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ -#include -#include -#ifdef __CYGWIN32__ -#include -#endif -#include "common.h" - +#include "bench.h" #undef COPY @@ -49,71 +43,6 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endif #endif -#if defined(__WIN32__) || defined(__WIN64__) - -#ifndef DELTA_EPOCH_IN_MICROSECS -#define DELTA_EPOCH_IN_MICROSECS 11644473600000000ULL -#endif - -int gettimeofday(struct timeval *tv, void *tz){ - - FILETIME ft; - unsigned __int64 tmpres = 0; - static int tzflag; - - if (NULL != tv) - { - GetSystemTimeAsFileTime(&ft); - - tmpres |= ft.dwHighDateTime; - tmpres <<= 32; - tmpres |= ft.dwLowDateTime; - - /*converting file time to unix epoch*/ - tmpres /= 10; /*convert into microseconds*/ - tmpres -= DELTA_EPOCH_IN_MICROSECS; - tv->tv_sec = (long)(tmpres / 1000000UL); - tv->tv_usec = (long)(tmpres % 1000000UL); - } - - return 0; -} - -#endif - -#if !defined(__WIN32__) && !defined(__WIN64__) && !defined(__CYGWIN32__) && 0 - -static void *huge_malloc(BLASLONG size){ - int shmid; - void *address; - -#ifndef SHM_HUGETLB -#define SHM_HUGETLB 04000 -#endif - - if ((shmid =shmget(IPC_PRIVATE, - (size + HUGE_PAGESIZE) & ~(HUGE_PAGESIZE - 1), - SHM_HUGETLB | IPC_CREAT |0600)) < 0) { - printf( "Memory allocation failed(shmget).\n"); - exit(1); - } - - address = shmat(shmid, NULL, SHM_RND); - - if ((BLASLONG)address == -1){ - printf( "Memory allocation failed(shmat).\n"); - exit(1); - } - - shmctl(shmid, IPC_RMID, 0); - - return address; -} - -#define malloc huge_malloc - -#endif - int main(int argc, char *argv[]){ FLOAT *x, *y; @@ -128,8 +57,9 @@ int main(int argc, char *argv[]){ int to = 200; int step = 1; - struct timeval start, stop; - double time1,timeg; + double time1 = 0.0, timeg = 0.0; + long nanos = 0; + time_t seconds = 0; argc--;argv++; @@ -151,7 +81,7 @@ int main(int argc, char *argv[]){ fprintf(stderr,"Out of Memory!!\n");exit(1); } -#ifdef linux +#ifdef __linux srandom(getpid()); #endif @@ -163,35 +93,27 @@ int main(int argc, char *argv[]){ timeg=0; fprintf(stderr, " %6d : ", (int)m); + for(i = 0; i < m * COMPSIZE * abs(inc_x); i++){ + x[i] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + } + for(i = 0; i < m * COMPSIZE * abs(inc_y); i++){ + y[i] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + } for (l=0; l -#include -#ifdef __CYGWIN32__ -#include -#endif -#include "common.h" - +#include "bench.h" #undef DOT - #ifdef DOUBLE #define DOT BLASFUNC(ddot) #else #define DOT BLASFUNC(sdot) #endif - -#if defined(__WIN32__) || defined(__WIN64__) - -#ifndef DELTA_EPOCH_IN_MICROSECS -#define DELTA_EPOCH_IN_MICROSECS 11644473600000000ULL -#endif - -int gettimeofday(struct timeval *tv, void *tz){ - - FILETIME ft; - unsigned __int64 tmpres = 0; - static int tzflag; - - if (NULL != tv) - { - GetSystemTimeAsFileTime(&ft); - - tmpres |= ft.dwHighDateTime; - tmpres <<= 32; - tmpres |= ft.dwLowDateTime; - - /*converting file time to unix epoch*/ - tmpres /= 10; /*convert into microseconds*/ - tmpres -= DELTA_EPOCH_IN_MICROSECS; - tv->tv_sec = (long)(tmpres / 1000000UL); - tv->tv_usec = (long)(tmpres % 1000000UL); - } - - return 0; -} - -#endif - -#if !defined(__WIN32__) && !defined(__WIN64__) && !defined(__CYGWIN32__) && 0 - -static void *huge_malloc(BLASLONG size){ - int shmid; - void *address; - -#ifndef SHM_HUGETLB -#define SHM_HUGETLB 04000 -#endif - - if ((shmid =shmget(IPC_PRIVATE, - (size + HUGE_PAGESIZE) & ~(HUGE_PAGESIZE - 1), - SHM_HUGETLB | IPC_CREAT |0600)) < 0) { - printf( "Memory allocation failed(shmget).\n"); - exit(1); - } - - address = shmat(shmid, NULL, SHM_RND); - - if ((BLASLONG)address == -1){ - printf( "Memory allocation failed(shmat).\n"); - exit(1); - } - - shmctl(shmid, IPC_RMID, 0); - - return address; -} - -#define malloc huge_malloc - -#endif - int main(int argc, char *argv[]){ FLOAT *x, *y; @@ -122,7 +49,6 @@ int main(int argc, char *argv[]){ int to = 200; int step = 1; - struct timeval start, stop; double time1,timeg; argc--;argv++; @@ -145,7 +71,7 @@ int main(int argc, char *argv[]){ fprintf(stderr,"Out of Memory!!\n");exit(1); } -#ifdef linux +#ifdef __linux srandom(getpid()); #endif @@ -169,15 +95,12 @@ int main(int argc, char *argv[]){ for(i = 0; i < m * COMPSIZE * abs(inc_y); i++){ y[i] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; } - gettimeofday( &start, (struct timezone *)0); + begin(); result = DOT (&m, x, &inc_x, y, &inc_y ); - gettimeofday( &stop, (struct timezone *)0); - - time1 = (double)(stop.tv_sec - start.tv_sec) + (double)((stop.tv_usec - start.tv_usec)) * 1.e-6; - - timeg += time1; + end(); + timeg += getsec(); } diff --git a/benchmark/geev.c b/benchmark/geev.c index d3751defb..6e22cdfb6 100644 --- a/benchmark/geev.c +++ b/benchmark/geev.c @@ -36,13 +36,7 @@ /* or implied, of The University of Texas at Austin. */ /*********************************************************************/ -#include -#include -#ifdef __CYGWIN32__ -#include -#endif -#include "common.h" - +#include "bench.h" #undef GEEV @@ -74,71 +68,6 @@ extern void GEEV( char* jobvl, char* jobvr, blasint* n, FLOAT* a, FLOAT* vr, blasint* ldvr, FLOAT* work, blasint* lwork, FLOAT *rwork, blasint* info ); #endif -#if defined(__WIN32__) || defined(__WIN64__) - -#ifndef DELTA_EPOCH_IN_MICROSECS -#define DELTA_EPOCH_IN_MICROSECS 11644473600000000ULL -#endif - -int gettimeofday(struct timeval *tv, void *tz){ - - FILETIME ft; - unsigned __int64 tmpres = 0; - static int tzflag; - - if (NULL != tv) - { - GetSystemTimeAsFileTime(&ft); - - tmpres |= ft.dwHighDateTime; - tmpres <<= 32; - tmpres |= ft.dwLowDateTime; - - /*converting file time to unix epoch*/ - tmpres /= 10; /*convert into microseconds*/ - tmpres -= DELTA_EPOCH_IN_MICROSECS; - tv->tv_sec = (long)(tmpres / 1000000UL); - tv->tv_usec = (long)(tmpres % 1000000UL); - } - - return 0; -} - -#endif - -#if !defined(__WIN32__) && !defined(__WIN64__) && !defined(__CYGWIN32__) && 0 - -static void *huge_malloc(BLASLONG size){ - int shmid; - void *address; - -#ifndef SHM_HUGETLB -#define SHM_HUGETLB 04000 -#endif - - if ((shmid =shmget(IPC_PRIVATE, - (size + HUGE_PAGESIZE) & ~(HUGE_PAGESIZE - 1), - SHM_HUGETLB | IPC_CREAT |0600)) < 0) { - printf( "Memory allocation failed(shmget).\n"); - exit(1); - } - - address = shmat(shmid, NULL, SHM_RND); - - if ((BLASLONG)address == -1){ - printf( "Memory allocation failed(shmat).\n"); - exit(1); - } - - shmctl(shmid, IPC_RMID, 0); - - return address; -} - -#define malloc huge_malloc - -#endif - int main(int argc, char *argv[]){ FLOAT *a,*vl,*vr,*wi,*wr,*work,*rwork; @@ -154,7 +83,6 @@ int main(int argc, char *argv[]){ int to = 200; int step = 1; - struct timeval start, stop; double time1; argc--;argv++; @@ -195,7 +123,7 @@ int main(int argc, char *argv[]){ for(j = 0; j < to; j++){ for(i = 0; i < to * COMPSIZE; i++){ - a[i + j * to * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + a[(long)i + (long)j * (long)to * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; } } @@ -214,7 +142,7 @@ int main(int argc, char *argv[]){ } -#ifdef linux +#ifdef __linux srandom(getpid()); #endif @@ -223,7 +151,7 @@ int main(int argc, char *argv[]){ for(m = from; m <= to; m += step){ fprintf(stderr, " %6d : ", (int)m); - gettimeofday( &start, (struct timezone *)0); + begin(); lwork = -1; #ifndef COMPLEX @@ -239,14 +167,14 @@ int main(int argc, char *argv[]){ GEEV (&job, &jobr, &m, a, &m, wr, vl, &m, vr, &m, work, &lwork,rwork, &info); #endif - gettimeofday( &stop, (struct timezone *)0); + end(); if (info) { fprintf(stderr, "failed to compute eigenvalues .. %d\n", info); exit(1); } - time1 = (double)(stop.tv_sec - start.tv_sec) + (double)((stop.tv_usec - start.tv_usec)) * 1.e-6; + time1 = getsec(); fprintf(stderr, " %10.2f MFlops : %10.2f Sec : %d\n", diff --git a/benchmark/gemm.c b/benchmark/gemm.c index 85bcbc710..35f5096f3 100644 --- a/benchmark/gemm.c +++ b/benchmark/gemm.c @@ -25,13 +25,7 @@ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ -#include -#include -#ifdef __CYGWIN32__ -#include -#endif -#include "common.h" - +#include "bench.h" #undef GEMM @@ -39,6 +33,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #ifdef DOUBLE #define GEMM BLASFUNC(dgemm) +#elif defined(HALF) +#define GEMM BLASFUNC(sbgemm) #else #define GEMM BLASFUNC(sgemm) #endif @@ -53,74 +49,10 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endif -#if defined(__WIN32__) || defined(__WIN64__) - -#ifndef DELTA_EPOCH_IN_MICROSECS -#define DELTA_EPOCH_IN_MICROSECS 11644473600000000ULL -#endif - -int gettimeofday(struct timeval *tv, void *tz){ - - FILETIME ft; - unsigned __int64 tmpres = 0; - static int tzflag; - - if (NULL != tv) - { - GetSystemTimeAsFileTime(&ft); - - tmpres |= ft.dwHighDateTime; - tmpres <<= 32; - tmpres |= ft.dwLowDateTime; - - /*converting file time to unix epoch*/ - tmpres /= 10; /*convert into microseconds*/ - tmpres -= DELTA_EPOCH_IN_MICROSECS; - tv->tv_sec = (long)(tmpres / 1000000UL); - tv->tv_usec = (long)(tmpres % 1000000UL); - } - - return 0; -} - -#endif - -#if !defined(__WIN32__) && !defined(__WIN64__) && !defined(__CYGWIN32__) && 0 - -static void *huge_malloc(BLASLONG size){ - int shmid; - void *address; - -#ifndef SHM_HUGETLB -#define SHM_HUGETLB 04000 -#endif - - if ((shmid =shmget(IPC_PRIVATE, - (size + HUGE_PAGESIZE) & ~(HUGE_PAGESIZE - 1), - SHM_HUGETLB | IPC_CREAT |0600)) < 0) { - printf( "Memory allocation failed(shmget).\n"); - exit(1); - } - - address = shmat(shmid, NULL, SHM_RND); - - if ((BLASLONG)address == -1){ - printf( "Memory allocation failed(shmat).\n"); - exit(1); - } - - shmctl(shmid, IPC_RMID, 0); - - return address; -} - -#define malloc huge_malloc - -#endif - int main(int argc, char *argv[]){ - FLOAT *a, *b, *c; + IFLOAT *a, *b; + FLOAT *c; FLOAT alpha[] = {1.0, 0.0}; FLOAT beta [] = {0.0, 0.0}; char transa = 'N'; @@ -136,7 +68,6 @@ int main(int argc, char *argv[]){ int to = 200; int step = 1; - struct timeval start, stop; double time1, timeg; argc--;argv++; @@ -184,30 +115,30 @@ int main(int argc, char *argv[]){ k = to; } - if (( a = (FLOAT *)malloc(sizeof(FLOAT) * m * k * COMPSIZE)) == NULL) { + if (( a = (IFLOAT *)malloc(sizeof(IFLOAT) * m * k * COMPSIZE)) == NULL) { fprintf(stderr,"Out of Memory!!\n");exit(1); } - if (( b = (FLOAT *)malloc(sizeof(FLOAT) * k * n * COMPSIZE)) == NULL) { + if (( b = (IFLOAT *)malloc(sizeof(IFLOAT) * k * n * COMPSIZE)) == NULL) { fprintf(stderr,"Out of Memory!!\n");exit(1); } if (( c = (FLOAT *)malloc(sizeof(FLOAT) * m * n * COMPSIZE)) == NULL) { fprintf(stderr,"Out of Memory!!\n");exit(1); } -#ifdef linux +#ifdef __linux srandom(getpid()); #endif for (i = 0; i < m * k * COMPSIZE; i++) { - a[i] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + a[i] = ((IFLOAT) rand() / (IFLOAT) RAND_MAX) - 0.5; } for (i = 0; i < k * n * COMPSIZE; i++) { - b[i] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + b[i] = ((IFLOAT) rand() / (IFLOAT) RAND_MAX) - 0.5; } for (i = 0; i < m * n * COMPSIZE; i++) { c[i] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; } - + fprintf(stderr, " SIZE Flops Time\n"); for (i = from; i <= to; i += step) { @@ -225,14 +156,14 @@ int main(int argc, char *argv[]){ ldc = m; fprintf(stderr, " M=%4d, N=%4d, K=%4d : ", (int)m, (int)n, (int)k); - gettimeofday( &start, (struct timezone *)0); + begin(); for (j=0; j -#include -#ifdef __CYGWIN32__ -#include -#endif -#include "common.h" - +#include "bench.h" #undef GEMM @@ -53,71 +47,6 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endif -#if defined(__WIN32__) || defined(__WIN64__) - -#ifndef DELTA_EPOCH_IN_MICROSECS -#define DELTA_EPOCH_IN_MICROSECS 11644473600000000ULL -#endif - -int gettimeofday(struct timeval *tv, void *tz){ - - FILETIME ft; - unsigned __int64 tmpres = 0; - static int tzflag; - - if (NULL != tv) - { - GetSystemTimeAsFileTime(&ft); - - tmpres |= ft.dwHighDateTime; - tmpres <<= 32; - tmpres |= ft.dwLowDateTime; - - /*converting file time to unix epoch*/ - tmpres /= 10; /*convert into microseconds*/ - tmpres -= DELTA_EPOCH_IN_MICROSECS; - tv->tv_sec = (long)(tmpres / 1000000UL); - tv->tv_usec = (long)(tmpres % 1000000UL); - } - - return 0; -} - -#endif - -#if !defined(__WIN32__) && !defined(__WIN64__) && !defined(__CYGWIN32__) && 0 - -static void *huge_malloc(BLASLONG size){ - int shmid; - void *address; - -#ifndef SHM_HUGETLB -#define SHM_HUGETLB 04000 -#endif - - if ((shmid =shmget(IPC_PRIVATE, - (size + HUGE_PAGESIZE) & ~(HUGE_PAGESIZE - 1), - SHM_HUGETLB | IPC_CREAT |0600)) < 0) { - printf( "Memory allocation failed(shmget).\n"); - exit(1); - } - - address = shmat(shmid, NULL, SHM_RND); - - if ((BLASLONG)address == -1){ - printf( "Memory allocation failed(shmat).\n"); - exit(1); - } - - shmctl(shmid, IPC_RMID, 0); - - return address; -} - -#define malloc huge_malloc - -#endif - int main(int argc, char *argv[]){ FLOAT *a, *b, *c; @@ -133,7 +62,6 @@ int main(int argc, char *argv[]){ int to = 200; int step = 1; - struct timeval start, stop; double time1,timeg; argc--;argv++; @@ -163,7 +91,7 @@ int main(int argc, char *argv[]){ loops = atoi(p); -#ifdef linux +#ifdef __linux srandom(getpid()); #endif @@ -181,22 +109,18 @@ int main(int argc, char *argv[]){ for(j = 0; j < m; j++){ for(i = 0; i < m * COMPSIZE; i++){ - a[i + j * m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; - b[i + j * m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; - c[i + j * m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + a[(long)i + (long)j * (long)m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + b[(long)i + (long)j * (long)m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + c[(long)i + (long)j * (long)m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; } } - gettimeofday( &start, (struct timezone *)0); + begin(); GEMM (&trans, &trans, &m, &m, &m, alpha, a, &m, b, &m, beta, c, &m ); - gettimeofday( &stop, (struct timezone *)0); - - time1 = (double)(stop.tv_sec - start.tv_sec) + (double)((stop.tv_usec - start.tv_usec)) * 1.e-6; - - timeg += time1; - + end(); + timeg += getsec(); } timeg /= loops; diff --git a/benchmark/gemv.c b/benchmark/gemv.c index b6a42f42f..a0001277a 100644 --- a/benchmark/gemv.c +++ b/benchmark/gemv.c @@ -25,12 +25,7 @@ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ -#include -#include -#ifdef __CYGWIN32__ -#include -#endif -#include "common.h" +#include "bench.h" #undef GEMV @@ -52,72 +47,6 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endif #endif - -#if defined(__WIN32__) || defined(__WIN64__) - -#ifndef DELTA_EPOCH_IN_MICROSECS -#define DELTA_EPOCH_IN_MICROSECS 11644473600000000ULL -#endif - -int gettimeofday(struct timeval *tv, void *tz){ - - FILETIME ft; - unsigned __int64 tmpres = 0; - static int tzflag; - - if (NULL != tv) - { - GetSystemTimeAsFileTime(&ft); - - tmpres |= ft.dwHighDateTime; - tmpres <<= 32; - tmpres |= ft.dwLowDateTime; - - /*converting file time to unix epoch*/ - tmpres /= 10; /*convert into microseconds*/ - tmpres -= DELTA_EPOCH_IN_MICROSECS; - tv->tv_sec = (long)(tmpres / 1000000UL); - tv->tv_usec = (long)(tmpres % 1000000UL); - } - - return 0; -} - -#endif - -#if !defined(__WIN32__) && !defined(__WIN64__) && !defined(__CYGWIN32__) && 0 - -static void *huge_malloc(BLASLONG size){ - int shmid; - void *address; - -#ifndef SHM_HUGETLB -#define SHM_HUGETLB 04000 -#endif - - if ((shmid =shmget(IPC_PRIVATE, - (size + HUGE_PAGESIZE) & ~(HUGE_PAGESIZE - 1), - SHM_HUGETLB | IPC_CREAT |0600)) < 0) { - printf( "Memory allocation failed(shmget).\n"); - exit(1); - } - - address = shmat(shmid, NULL, SHM_RND); - - if ((BLASLONG)address == -1){ - printf( "Memory allocation failed(shmat).\n"); - exit(1); - } - - shmctl(shmid, IPC_RMID, 0); - - return address; -} - -#define malloc huge_malloc - -#endif - int main(int argc, char *argv[]){ FLOAT *a, *x, *y; @@ -137,7 +66,6 @@ int main(int argc, char *argv[]){ int to = 200; int step = 1; - struct timeval start, stop; double time1,timeg; argc--;argv++; @@ -181,7 +109,7 @@ int main(int argc, char *argv[]){ fprintf(stderr,"Out of Memory!!\n");exit(1); } -#ifdef linux +#ifdef __linux srandom(getpid()); #endif @@ -197,7 +125,7 @@ int main(int argc, char *argv[]){ fprintf(stderr, " %6dx%d : ", (int)m,(int)n); for(j = 0; j < m; j++){ for(i = 0; i < n * COMPSIZE; i++){ - a[i + j * m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + a[(long)j + (long)i * (long)m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; } } @@ -208,13 +136,13 @@ int main(int argc, char *argv[]){ x[i] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; } - for(i = 0; i < n * COMPSIZE * abs(inc_y); i++){ + for(i = 0; i < m * COMPSIZE * abs(inc_y); i++){ y[i] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; } - gettimeofday( &start, (struct timezone *)0); + begin(); GEMV (&trans, &m, &n, alpha, a, &m, x, &inc_x, beta, y, &inc_y ); - gettimeofday( &stop, (struct timezone *)0); - time1 = (double)(stop.tv_sec - start.tv_sec) + (double)((stop.tv_usec - start.tv_usec)) * 1.e-6; + end(); + time1 = getsec(); timeg += time1; } @@ -234,7 +162,7 @@ int main(int argc, char *argv[]){ fprintf(stderr, " %6dx%d : ", (int)m,(int)n); for(j = 0; j < m; j++){ for(i = 0; i < n * COMPSIZE; i++){ - a[i + j * m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + a[(long)j + (long)i * (long)m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; } } @@ -245,13 +173,13 @@ int main(int argc, char *argv[]){ x[i] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; } - for(i = 0; i < n * COMPSIZE * abs(inc_y); i++){ + for(i = 0; i < m * COMPSIZE * abs(inc_y); i++){ y[i] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; } - gettimeofday( &start, (struct timezone *)0); + begin(); GEMV (&trans, &m, &n, alpha, a, &m, x, &inc_x, beta, y, &inc_y ); - gettimeofday( &stop, (struct timezone *)0); - time1 = (double)(stop.tv_sec - start.tv_sec) + (double)((stop.tv_usec - start.tv_usec)) * 1.e-6; + end(); + time1 = getsec(); timeg += time1; } diff --git a/benchmark/ger.c b/benchmark/ger.c index a752a3c3e..7ce08c3ad 100644 --- a/benchmark/ger.c +++ b/benchmark/ger.c @@ -25,13 +25,7 @@ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ -#include -#include -#ifdef __CYGWIN32__ -#include -#endif -#include "common.h" - +#include "bench.h" #undef GER @@ -49,72 +43,6 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endif #endif - -#if defined(__WIN32__) || defined(__WIN64__) - -#ifndef DELTA_EPOCH_IN_MICROSECS -#define DELTA_EPOCH_IN_MICROSECS 11644473600000000ULL -#endif - -int gettimeofday(struct timeval *tv, void *tz){ - - FILETIME ft; - unsigned __int64 tmpres = 0; - static int tzflag; - - if (NULL != tv) - { - GetSystemTimeAsFileTime(&ft); - - tmpres |= ft.dwHighDateTime; - tmpres <<= 32; - tmpres |= ft.dwLowDateTime; - - /*converting file time to unix epoch*/ - tmpres /= 10; /*convert into microseconds*/ - tmpres -= DELTA_EPOCH_IN_MICROSECS; - tv->tv_sec = (long)(tmpres / 1000000UL); - tv->tv_usec = (long)(tmpres % 1000000UL); - } - - return 0; -} - -#endif - -#if !defined(__WIN32__) && !defined(__WIN64__) && !defined(__CYGWIN32__) && 0 - -static void *huge_malloc(BLASLONG size){ - int shmid; - void *address; - -#ifndef SHM_HUGETLB -#define SHM_HUGETLB 04000 -#endif - - if ((shmid =shmget(IPC_PRIVATE, - (size + HUGE_PAGESIZE) & ~(HUGE_PAGESIZE - 1), - SHM_HUGETLB | IPC_CREAT |0600)) < 0) { - printf( "Memory allocation failed(shmget).\n"); - exit(1); - } - - address = shmat(shmid, NULL, SHM_RND); - - if ((BLASLONG)address == -1){ - printf( "Memory allocation failed(shmat).\n"); - exit(1); - } - - shmctl(shmid, IPC_RMID, 0); - - return address; -} - -#define malloc huge_malloc - -#endif - int main(int argc, char *argv[]){ FLOAT *a, *x, *y; @@ -131,7 +59,6 @@ int main(int argc, char *argv[]){ int to = 200; int step = 1; - struct timeval start, stop; double time1,timeg; argc--;argv++; @@ -165,7 +92,7 @@ int main(int argc, char *argv[]){ fprintf(stderr,"Out of Memory!!\n");exit(1); } -#ifdef linux +#ifdef __linux srandom(getpid()); #endif @@ -182,7 +109,7 @@ int main(int argc, char *argv[]){ for(j = 0; j < m; j++){ for(i = 0; i < n * COMPSIZE; i++){ - a[i + j * m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + a[(long)i + (long)j * (long)m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; } } @@ -198,16 +125,13 @@ int main(int argc, char *argv[]){ for (l=0; l -#include -#ifdef __CYGWIN32__ -#include -#endif -#include "common.h" +#include "bench.h" double fabs(double); @@ -66,71 +61,6 @@ double fabs(double); #endif #endif -#if defined(__WIN32__) || defined(__WIN64__) - -#ifndef DELTA_EPOCH_IN_MICROSECS -#define DELTA_EPOCH_IN_MICROSECS 11644473600000000ULL -#endif - -int gettimeofday(struct timeval *tv, void *tz){ - - FILETIME ft; - unsigned __int64 tmpres = 0; - static int tzflag; - - if (NULL != tv) - { - GetSystemTimeAsFileTime(&ft); - - tmpres |= ft.dwHighDateTime; - tmpres <<= 32; - tmpres |= ft.dwLowDateTime; - - /*converting file time to unix epoch*/ - tmpres /= 10; /*convert into microseconds*/ - tmpres -= DELTA_EPOCH_IN_MICROSECS; - tv->tv_sec = (long)(tmpres / 1000000UL); - tv->tv_usec = (long)(tmpres % 1000000UL); - } - - return 0; -} - -#endif - -#if !defined(__WIN32__) && !defined(__WIN64__) && !defined(__CYGWIN32__) && 0 - -static void *huge_malloc(BLASLONG size){ - int shmid; - void *address; - -#ifndef SHM_HUGETLB -#define SHM_HUGETLB 04000 -#endif - - if ((shmid =shmget(IPC_PRIVATE, - (size + HUGE_PAGESIZE) & ~(HUGE_PAGESIZE - 1), - SHM_HUGETLB | IPC_CREAT |0600)) < 0) { - printf( "Memory allocation failed(shmget).\n"); - exit(1); - } - - address = shmat(shmid, NULL, SHM_RND); - - if ((BLASLONG)address == -1){ - printf( "Memory allocation failed(shmat).\n"); - exit(1); - } - - shmctl(shmid, IPC_RMID, 0); - - return address; -} - -#define malloc huge_malloc - -#endif - int main(int argc, char *argv[]){ FLOAT *a, *b; @@ -142,7 +72,6 @@ int main(int argc, char *argv[]){ int to = 200; int step = 1; - struct timeval start, stop; double time1; argc--;argv++; @@ -165,7 +94,7 @@ int main(int argc, char *argv[]){ fprintf(stderr,"Out of Memory!!\n");exit(1); } -#ifdef linux +#ifdef __linux srandom(getpid()); #endif @@ -177,39 +106,35 @@ int main(int argc, char *argv[]){ for(j = 0; j < m; j++){ for(i = 0; i < m * COMPSIZE; i++){ - a[i + j * m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + a[(long)i + (long)j * (long)m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; } } for(j = 0; j < m; j++){ for(i = 0; i < m * COMPSIZE; i++){ - b[i + j * m * COMPSIZE] = 0.0; + b[(long)i + (long)j * (long)m * COMPSIZE] = 0.0; } } for (j = 0; j < m; ++j) { for (i = 0; i < m * COMPSIZE; ++i) { - b[i] += a[i + j * m * COMPSIZE]; + b[i] += a[(long)i + (long)j * (long)m * COMPSIZE]; } } - gettimeofday( &start, (struct timezone *)0); + begin(); GESV (&m, &m, a, &m, ipiv, b, &m, &info); - gettimeofday( &stop, (struct timezone *)0); - - - time1 = (double)(stop.tv_sec - start.tv_sec) + (double)((stop.tv_usec - start.tv_usec)) * 1.e-6; - + end(); + time1 = getsec(); fprintf(stderr, "%10.2f MFlops %10.6f s\n", COMPSIZE * COMPSIZE * (2. / 3. * (double)m * (double)m * (double)m + 2. * (double)m * (double)m * (double)m ) / (time1) * 1.e-6 , time1); - } return 0; diff --git a/benchmark/getri.c b/benchmark/getri.c index 083cdc9aa..98a860906 100644 --- a/benchmark/getri.c +++ b/benchmark/getri.c @@ -36,12 +36,7 @@ /* or implied, of The University of Texas at Austin. */ /*********************************************************************/ -#include -#include -#ifdef __CYGWIN32__ -#include -#endif -#include "common.h" +#include "bench.h" #undef GETRF #undef GETRI @@ -72,71 +67,6 @@ extern void GETRI(blasint *m, FLOAT *a, blasint *lda, blasint *ipiv, FLOAT *work, blasint *lwork, blasint *info); -#if defined(__WIN32__) || defined(__WIN64__) - -#ifndef DELTA_EPOCH_IN_MICROSECS -#define DELTA_EPOCH_IN_MICROSECS 11644473600000000ULL -#endif - -int gettimeofday(struct timeval *tv, void *tz){ - - FILETIME ft; - unsigned __int64 tmpres = 0; - static int tzflag; - - if (NULL != tv) - { - GetSystemTimeAsFileTime(&ft); - - tmpres |= ft.dwHighDateTime; - tmpres <<= 32; - tmpres |= ft.dwLowDateTime; - - /*converting file time to unix epoch*/ - tmpres /= 10; /*convert into microseconds*/ - tmpres -= DELTA_EPOCH_IN_MICROSECS; - tv->tv_sec = (long)(tmpres / 1000000UL); - tv->tv_usec = (long)(tmpres % 1000000UL); - } - - return 0; -} - -#endif - -#if !defined(__WIN32__) && !defined(__WIN64__) && !defined(__CYGWIN32__) && 0 - -static void *huge_malloc(BLASLONG size){ - int shmid; - void *address; - -#ifndef SHM_HUGETLB -#define SHM_HUGETLB 04000 -#endif - - if ((shmid =shmget(IPC_PRIVATE, - (size + HUGE_PAGESIZE) & ~(HUGE_PAGESIZE - 1), - SHM_HUGETLB | IPC_CREAT |0600)) < 0) { - printf( "Memory allocation failed(shmget).\n"); - exit(1); - } - - address = shmat(shmid, NULL, SHM_RND); - - if ((BLASLONG)address == -1){ - printf( "Memory allocation failed(shmat).\n"); - exit(1); - } - - shmctl(shmid, IPC_RMID, 0); - - return address; -} - -#define malloc huge_malloc - -#endif - int main(int argc, char *argv[]){ FLOAT *a,*work; @@ -148,7 +78,6 @@ int main(int argc, char *argv[]){ int to = 200; int step = 1; - struct timeval start, stop; double time1; argc--;argv++; @@ -172,7 +101,7 @@ int main(int argc, char *argv[]){ for(j = 0; j < to; j++){ for(i = 0; i < to * COMPSIZE; i++){ - a[i + j * to * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + a[(long)i + (long)j * (long)to * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; } } @@ -188,7 +117,7 @@ int main(int argc, char *argv[]){ } -#ifdef linux +#ifdef __linux srandom(getpid()); #endif @@ -205,21 +134,21 @@ int main(int argc, char *argv[]){ exit(1); } - gettimeofday( &start, (struct timezone *)0); + begin(); lwork = -1; GETRI(&m, a, &m, ipiv, wkopt, &lwork, &info); lwork = (blasint)wkopt[0]; GETRI(&m, a, &m, ipiv, work, &lwork, &info); - gettimeofday( &stop, (struct timezone *)0); + end(); if (info) { fprintf(stderr, "failed compute inverse matrix .. %d\n", info); exit(1); } - time1 = (double)(stop.tv_sec - start.tv_sec) + (double)((stop.tv_usec - start.tv_usec)) * 1.e-6; + time1 = getsec(); fprintf(stderr, " %10.2f MFlops : %10.2f Sec : %d\n", diff --git a/benchmark/hbmv.c b/benchmark/hbmv.c new file mode 100644 index 000000000..35249bdf9 --- /dev/null +++ b/benchmark/hbmv.c @@ -0,0 +1,134 @@ +/*************************************************************************** +Copyright (c) 2014, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "bench.h" + +#undef HBMV + +#ifdef DOUBLE +#define HBMV BLASFUNC(zhbmv) +#else +#define HBMV BLASFUNC(chbmv) +#endif + +int main(int argc, char *argv[]){ + + FLOAT *a, *x, *y; + FLOAT alpha[] = {1.0, 1.0}; + FLOAT beta [] = {0.0, 0.0}; + blasint k = 1; + char uplo='L'; + blasint m, i, j; + blasint inc_x=1, inc_y=1; + int loops = 1; + int l; + char *p; + + int from = 1; + int to = 200; + int step = 1; + + double time1,timeg; + + argc--;argv++; + + if (argc > 0) { from = atol(*argv); argc--; argv++;} + if (argc > 0) { to = MAX(atol(*argv), from); argc--; argv++;} + if (argc > 0) { step = atol(*argv); argc--; argv++;} + + if ((p = getenv("OPENBLAS_LOOPS"))) loops = atoi(p); + if ((p = getenv("OPENBLAS_INCX"))) inc_x = atoi(p); + if ((p = getenv("OPENBLAS_INCY"))) inc_y = atoi(p); + if ((p = getenv("OPENBLAS_UPLO"))) uplo=*p; + if ((p = getenv("OPENBLAS_K"))) k = atoi(p); + + fprintf(stderr, "From : %3d To : %3d Step = %3d Uplo = '%c' k = %d Inc_x = %d Inc_y = %d Loops = %d\n", + from, to, step, uplo, k, inc_x, inc_y, loops); + + if (( a = (FLOAT *)malloc(sizeof(FLOAT) * to * to * COMPSIZE)) == NULL) { + fprintf(stderr,"Out of Memory!!\n"); + exit(1); + } + + if (( x = (FLOAT *)malloc(sizeof(FLOAT) * to * abs(inc_x) * COMPSIZE)) == NULL) { + fprintf(stderr,"Out of Memory!!\n"); + exit(1); + } + + if (( y = (FLOAT *)malloc(sizeof(FLOAT) * to * abs(inc_y) * COMPSIZE)) == NULL) { + fprintf(stderr,"Out of Memory!!\n"); + exit(1); + } + +#ifdef __linux + srandom(getpid()); +#endif + + fprintf(stderr, " SIZE Flops\n"); + + for(m = from; m <= to; m += step) { + + timeg=0; + + fprintf(stderr, " %6dx%d : ", (int)m, (int)m); + + for(j = 0; j < m; j++) { + for(i = 0; i < m * COMPSIZE; i++) { + a[i + j * m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + } + } + + for (l = 0; l < loops; l++) { + + for (i = 0; i < m * COMPSIZE * abs(inc_x); i++) { + x[i] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + } + + for (i = 0; i < m * COMPSIZE * abs(inc_y); i++) { + y[i] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + } + + begin(); + + HBMV (&uplo, &m, &k, alpha, a, &m, x, &inc_x, beta, y, &inc_y ); + + end(); + + timeg += getsec(); + + } + + timeg /= loops; + + fprintf(stderr, " %10.2f MFlops\n", + COMPSIZE * COMPSIZE * 2. * (double)(2 * k + 1) * (double)m / timeg * 1.e-6); + } + + return 0; +} + +// void main(int argc, char *argv[]) __attribute__((weak, alias("MAIN__"))); diff --git a/benchmark/hemm.c b/benchmark/hemm.c index 318c407ba..a0a9985ad 100644 --- a/benchmark/hemm.c +++ b/benchmark/hemm.c @@ -25,13 +25,7 @@ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ -#include -#include -#ifdef __CYGWIN32__ -#include -#endif -#include "common.h" - +#include "bench.h" #undef HEMM @@ -41,72 +35,6 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define HEMM BLASFUNC(chemm) #endif - -#if defined(__WIN32__) || defined(__WIN64__) - -#ifndef DELTA_EPOCH_IN_MICROSECS -#define DELTA_EPOCH_IN_MICROSECS 11644473600000000ULL -#endif - -int gettimeofday(struct timeval *tv, void *tz){ - - FILETIME ft; - unsigned __int64 tmpres = 0; - static int tzflag; - - if (NULL != tv) - { - GetSystemTimeAsFileTime(&ft); - - tmpres |= ft.dwHighDateTime; - tmpres <<= 32; - tmpres |= ft.dwLowDateTime; - - /*converting file time to unix epoch*/ - tmpres /= 10; /*convert into microseconds*/ - tmpres -= DELTA_EPOCH_IN_MICROSECS; - tv->tv_sec = (long)(tmpres / 1000000UL); - tv->tv_usec = (long)(tmpres % 1000000UL); - } - - return 0; -} - -#endif - -#if !defined(__WIN32__) && !defined(__WIN64__) && !defined(__CYGWIN32__) && 0 - -static void *huge_malloc(BLASLONG size){ - int shmid; - void *address; - -#ifndef SHM_HUGETLB -#define SHM_HUGETLB 04000 -#endif - - if ((shmid =shmget(IPC_PRIVATE, - (size + HUGE_PAGESIZE) & ~(HUGE_PAGESIZE - 1), - SHM_HUGETLB | IPC_CREAT |0600)) < 0) { - printf( "Memory allocation failed(shmget).\n"); - exit(1); - } - - address = shmat(shmid, NULL, SHM_RND); - - if ((BLASLONG)address == -1){ - printf( "Memory allocation failed(shmat).\n"); - exit(1); - } - - shmctl(shmid, IPC_RMID, 0); - - return address; -} - -#define malloc huge_malloc - -#endif - int main(int argc, char *argv[]){ FLOAT *a, *b, *c; @@ -126,7 +54,6 @@ int main(int argc, char *argv[]){ int to = 200; int step = 1; - struct timeval start, stop; double time1; argc--;argv++; @@ -151,7 +78,7 @@ int main(int argc, char *argv[]){ -#ifdef linux +#ifdef __linux srandom(getpid()); #endif @@ -164,21 +91,19 @@ int main(int argc, char *argv[]){ for(j = 0; j < m; j++){ for(i = 0; i < m * COMPSIZE; i++){ - a[i + j * m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; - b[i + j * m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; - c[i + j * m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + a[(long)i + (long)j * (long)m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + b[(long)i + (long)j * (long)m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + c[(long)i + (long)j * (long)m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; } } - gettimeofday( &start, (struct timezone *)0); + begin(); HEMM (&side, &uplo, &m, &m, alpha, a, &m, b, &m, beta, c, &m ); - gettimeofday( &stop, (struct timezone *)0); - - time1 = (double)(stop.tv_sec - start.tv_sec) + (double)((stop.tv_usec - start.tv_usec)) * 1.e-6; + end(); - gettimeofday( &start, (struct timezone *)0); + time1 = getsec(); fprintf(stderr, " %10.2f MFlops\n", diff --git a/benchmark/hemv.c b/benchmark/hemv.c index 05028e3cf..ad130ddd0 100644 --- a/benchmark/hemv.c +++ b/benchmark/hemv.c @@ -25,89 +25,16 @@ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ -#include -#include -#ifdef __CYGWIN32__ -#include -#endif -#include "common.h" - +#include "bench.h" #undef HEMV - #ifdef DOUBLE #define HEMV BLASFUNC(zhemv) #else #define HEMV BLASFUNC(chemv) #endif - -#if defined(__WIN32__) || defined(__WIN64__) - -#ifndef DELTA_EPOCH_IN_MICROSECS -#define DELTA_EPOCH_IN_MICROSECS 11644473600000000ULL -#endif - -int gettimeofday(struct timeval *tv, void *tz){ - - FILETIME ft; - unsigned __int64 tmpres = 0; - static int tzflag; - - if (NULL != tv) - { - GetSystemTimeAsFileTime(&ft); - - tmpres |= ft.dwHighDateTime; - tmpres <<= 32; - tmpres |= ft.dwLowDateTime; - - /*converting file time to unix epoch*/ - tmpres /= 10; /*convert into microseconds*/ - tmpres -= DELTA_EPOCH_IN_MICROSECS; - tv->tv_sec = (long)(tmpres / 1000000UL); - tv->tv_usec = (long)(tmpres % 1000000UL); - } - - return 0; -} - -#endif - -#if !defined(__WIN32__) && !defined(__WIN64__) && !defined(__CYGWIN32__) && 0 - -static void *huge_malloc(BLASLONG size){ - int shmid; - void *address; - -#ifndef SHM_HUGETLB -#define SHM_HUGETLB 04000 -#endif - - if ((shmid =shmget(IPC_PRIVATE, - (size + HUGE_PAGESIZE) & ~(HUGE_PAGESIZE - 1), - SHM_HUGETLB | IPC_CREAT |0600)) < 0) { - printf( "Memory allocation failed(shmget).\n"); - exit(1); - } - - address = shmat(shmid, NULL, SHM_RND); - - if ((BLASLONG)address == -1){ - printf( "Memory allocation failed(shmat).\n"); - exit(1); - } - - shmctl(shmid, IPC_RMID, 0); - - return address; -} - -#define malloc huge_malloc - -#endif - int main(int argc, char *argv[]){ FLOAT *a, *x, *y; @@ -124,7 +51,6 @@ int main(int argc, char *argv[]){ int to = 200; int step = 1; - struct timeval start, stop; double time1,timeg; argc--;argv++; @@ -152,7 +78,7 @@ int main(int argc, char *argv[]){ fprintf(stderr,"Out of Memory!!\n");exit(1); } -#ifdef linux +#ifdef __linux srandom(getpid()); #endif @@ -167,7 +93,7 @@ int main(int argc, char *argv[]){ for(j = 0; j < m; j++){ for(i = 0; i < m * COMPSIZE; i++){ - a[i + j * m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + a[(long)i + (long)j * (long)m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; } } @@ -182,13 +108,13 @@ int main(int argc, char *argv[]){ for(i = 0; i < m * COMPSIZE * abs(inc_y); i++){ y[i] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; } - gettimeofday( &start, (struct timezone *)0); + begin(); HEMV (&uplo, &m, alpha, a, &m, x, &inc_x, beta, y, &inc_y ); - gettimeofday( &stop, (struct timezone *)0); + end(); - time1 = (double)(stop.tv_sec - start.tv_sec) + (double)((stop.tv_usec - start.tv_usec)) * 1.e-6; + time1 = getsec(); timeg += time1; diff --git a/benchmark/her.c b/benchmark/her.c new file mode 100644 index 000000000..cd1fb7f48 --- /dev/null +++ b/benchmark/her.c @@ -0,0 +1,109 @@ +/*************************************************************************** +Copyright (c) 2020, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "bench.h" + +#undef HER + +#ifdef DOUBLE +#define HER BLASFUNC(zher) +#else +#define HER BLASFUNC(cher) +#endif + +int main(int argc, char *argv[]){ + + FLOAT *a, *x; + FLOAT alpha[] = {1.0, 1.0}; + blasint incx = 1; + char *p; + + char uplo='U'; + char trans='N'; + + if ((p = getenv("OPENBLAS_UPLO"))) uplo=*p; + if ((p = getenv("OPENBLAS_TRANS"))) trans=*p; + + blasint m, i, j; + + int from = 1; + int to = 200; + int step = 1; + double time1; + + argc--;argv++; + + if (argc > 0) { from = atol(*argv); argc--; argv++;} + if (argc > 0) { to = MAX(atol(*argv), from); argc--; argv++;} + if (argc > 0) { step = atol(*argv); argc--; argv++;} + + fprintf(stderr, "From : %3d To : %3d Step = %3d Uplo = %c Trans = %c\n", from, to, step,uplo,trans); + + + if (( a = (FLOAT *)malloc(sizeof(FLOAT) * to * to * COMPSIZE)) == NULL){ + fprintf(stderr,"Out of Memory!!\n");exit(1); + } + + if (( x = (FLOAT *)malloc(sizeof(FLOAT) * to * COMPSIZE)) == NULL){ + fprintf(stderr,"Out of Memory!!\n");exit(1); + } + + + +#ifdef __linux + srandom(getpid()); +#endif + + fprintf(stderr, " SIZE Flops\n"); + + for(m = from; m <= to; m += step) + { + fprintf(stderr, " %6d : ", (int)m); + + for(j = 0; j < m; j++){ + for(i = 0; i < m * COMPSIZE; i++){ + a[(long)i + (long)j * (long)m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + } + x[ (long)j * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + } + + begin(); + + HER (&uplo, &m, alpha, x, &incx, a, &m ); + + end(); + + time1 = getsec(); + + fprintf(stderr, + " %10.2f MFlops\n", + COMPSIZE * COMPSIZE * 1. * (double)m * (double)m / time1 * 1.e-6); + + } + + return 0; +} diff --git a/benchmark/her2.c b/benchmark/her2.c new file mode 100644 index 000000000..d87bfd466 --- /dev/null +++ b/benchmark/her2.c @@ -0,0 +1,113 @@ +/*************************************************************************** +Copyright (c) 2020, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "bench.h" + +#undef HER2 + +#ifdef DOUBLE +#define HER2 BLASFUNC(zher2) +#else +#define HER2 BLASFUNC(cher2) +#endif + +int main(int argc, char *argv[]){ + + FLOAT *a, *x, *y; + FLOAT alpha[] = {1.0, 1.0}; + blasint inc = 1; + char *p; + + char uplo='U'; + char trans='N'; + + if ((p = getenv("OPENBLAS_UPLO"))) uplo=*p; + if ((p = getenv("OPENBLAS_TRANS"))) trans=*p; + + blasint m, i, j; + + int from = 1; + int to = 200; + int step = 1; + + double time1; + + argc--;argv++; + + if (argc > 0) { from = atol(*argv); argc--; argv++;} + if (argc > 0) { to = MAX(atol(*argv), from); argc--; argv++;} + if (argc > 0) { step = atol(*argv); argc--; argv++;} + + fprintf(stderr, "From : %3d To : %3d Step = %3d Uplo = %c Trans = %c\n", from, to, step,uplo,trans); + + + if (( a = (FLOAT *)malloc(sizeof(FLOAT) * to * to * COMPSIZE)) == NULL){ + fprintf(stderr,"Out of Memory!!\n");exit(1); + } + + if (( x = (FLOAT *)malloc(sizeof(FLOAT) * to * COMPSIZE)) == NULL){ + fprintf(stderr,"Out of Memory!!\n");exit(1); + } + + if (( y = (FLOAT *)malloc(sizeof(FLOAT) * to * COMPSIZE)) == NULL){ + fprintf(stderr,"Out of Memory!!\n");exit(1); + } + +#ifdef __linux + srandom(getpid()); +#endif + + fprintf(stderr, " SIZE Flops\n"); + + for(m = from; m <= to; m += step) + { + fprintf(stderr, " %6d : ", (int)m); + + for(j = 0; j < m; j++){ + for(i = 0; i < m * COMPSIZE; i++){ + a[(long)i + (long)j * (long)m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + } + x[ (long)j * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + y[ (long)j * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + } + + begin(); + + HER2 (&uplo, &m, alpha, x, &inc, y, &inc, a, &m ); + + end(); + + time1 = getsec(); + + fprintf(stderr, + " %10.2f MFlops\n", + COMPSIZE * COMPSIZE * 2. * (double)m * (double)m / time1 * 1.e-6); + + } + + return 0; +} diff --git a/benchmark/her2k.c b/benchmark/her2k.c index 028e2718f..d3cdce696 100644 --- a/benchmark/her2k.c +++ b/benchmark/her2k.c @@ -25,13 +25,7 @@ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ -#include -#include -#ifdef __CYGWIN32__ -#include -#endif -#include "common.h" - +#include "bench.h" #undef HER2K #ifdef DOUBLE @@ -40,72 +34,6 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define HER2K BLASFUNC(cher2k) #endif - -#if defined(__WIN32__) || defined(__WIN64__) - -#ifndef DELTA_EPOCH_IN_MICROSECS -#define DELTA_EPOCH_IN_MICROSECS 11644473600000000ULL -#endif - -int gettimeofday(struct timeval *tv, void *tz){ - - FILETIME ft; - unsigned __int64 tmpres = 0; - static int tzflag; - - if (NULL != tv) - { - GetSystemTimeAsFileTime(&ft); - - tmpres |= ft.dwHighDateTime; - tmpres <<= 32; - tmpres |= ft.dwLowDateTime; - - /*converting file time to unix epoch*/ - tmpres /= 10; /*convert into microseconds*/ - tmpres -= DELTA_EPOCH_IN_MICROSECS; - tv->tv_sec = (long)(tmpres / 1000000UL); - tv->tv_usec = (long)(tmpres % 1000000UL); - } - - return 0; -} - -#endif - -#if !defined(__WIN32__) && !defined(__WIN64__) && !defined(__CYGWIN32__) && 0 - -static void *huge_malloc(BLASLONG size){ - int shmid; - void *address; - -#ifndef SHM_HUGETLB -#define SHM_HUGETLB 04000 -#endif - - if ((shmid =shmget(IPC_PRIVATE, - (size + HUGE_PAGESIZE) & ~(HUGE_PAGESIZE - 1), - SHM_HUGETLB | IPC_CREAT |0600)) < 0) { - printf( "Memory allocation failed(shmget).\n"); - exit(1); - } - - address = shmat(shmid, NULL, SHM_RND); - - if ((BLASLONG)address == -1){ - printf( "Memory allocation failed(shmat).\n"); - exit(1); - } - - shmctl(shmid, IPC_RMID, 0); - - return address; -} - -#define malloc huge_malloc - -#endif - int main(int argc, char *argv[]){ FLOAT *a, *b, *c; @@ -125,7 +53,6 @@ int main(int argc, char *argv[]){ int to = 200; int step = 1; - struct timeval start, stop; double time1; argc--;argv++; @@ -150,7 +77,7 @@ int main(int argc, char *argv[]){ -#ifdef linux +#ifdef __linux srandom(getpid()); #endif @@ -163,21 +90,19 @@ int main(int argc, char *argv[]){ for(j = 0; j < m; j++){ for(i = 0; i < m * COMPSIZE; i++){ - a[i + j * m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; - b[i + j * m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; - c[i + j * m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + a[(long)i + (long)j * (long)m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + b[(long)i + (long)j * (long)m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + c[(long)i + (long)j * (long)m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; } } - gettimeofday( &start, (struct timezone *)0); + begin(); HER2K (&uplo, &trans, &m, &m, alpha, a, &m, b, &m, beta, c, &m ); - gettimeofday( &stop, (struct timezone *)0); - - time1 = (double)(stop.tv_sec - start.tv_sec) + (double)((stop.tv_usec - start.tv_usec)) * 1.e-6; + end(); - gettimeofday( &start, (struct timezone *)0); + time1 = getsec(); fprintf(stderr, " %10.2f MFlops\n", diff --git a/benchmark/herk.c b/benchmark/herk.c index d2e25ff46..628dc2c11 100644 --- a/benchmark/herk.c +++ b/benchmark/herk.c @@ -25,89 +25,16 @@ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ -#include -#include -#ifdef __CYGWIN32__ -#include -#endif -#include "common.h" - +#include "bench.h" #undef HERK - #ifdef DOUBLE #define HERK BLASFUNC(zherk) #else #define HERK BLASFUNC(cherk) #endif - -#if defined(__WIN32__) || defined(__WIN64__) - -#ifndef DELTA_EPOCH_IN_MICROSECS -#define DELTA_EPOCH_IN_MICROSECS 11644473600000000ULL -#endif - -int gettimeofday(struct timeval *tv, void *tz){ - - FILETIME ft; - unsigned __int64 tmpres = 0; - static int tzflag; - - if (NULL != tv) - { - GetSystemTimeAsFileTime(&ft); - - tmpres |= ft.dwHighDateTime; - tmpres <<= 32; - tmpres |= ft.dwLowDateTime; - - /*converting file time to unix epoch*/ - tmpres /= 10; /*convert into microseconds*/ - tmpres -= DELTA_EPOCH_IN_MICROSECS; - tv->tv_sec = (long)(tmpres / 1000000UL); - tv->tv_usec = (long)(tmpres % 1000000UL); - } - - return 0; -} - -#endif - -#if !defined(__WIN32__) && !defined(__WIN64__) && !defined(__CYGWIN32__) && 0 - -static void *huge_malloc(BLASLONG size){ - int shmid; - void *address; - -#ifndef SHM_HUGETLB -#define SHM_HUGETLB 04000 -#endif - - if ((shmid =shmget(IPC_PRIVATE, - (size + HUGE_PAGESIZE) & ~(HUGE_PAGESIZE - 1), - SHM_HUGETLB | IPC_CREAT |0600)) < 0) { - printf( "Memory allocation failed(shmget).\n"); - exit(1); - } - - address = shmat(shmid, NULL, SHM_RND); - - if ((BLASLONG)address == -1){ - printf( "Memory allocation failed(shmat).\n"); - exit(1); - } - - shmctl(shmid, IPC_RMID, 0); - - return address; -} - -#define malloc huge_malloc - -#endif - int main(int argc, char *argv[]){ FLOAT *a, *c; @@ -127,7 +54,6 @@ int main(int argc, char *argv[]){ int to = 200; int step = 1; - struct timeval start, stop; double time1; argc--;argv++; @@ -149,7 +75,7 @@ int main(int argc, char *argv[]){ -#ifdef linux +#ifdef __linux srandom(getpid()); #endif @@ -162,25 +88,22 @@ int main(int argc, char *argv[]){ for(j = 0; j < m; j++){ for(i = 0; i < m * COMPSIZE; i++){ - a[i + j * m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; - c[i + j * m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + a[(long)i + (long)j * (long)m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + c[(long)i + (long)j * (long)m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; } } - gettimeofday( &start, (struct timezone *)0); + begin(); HERK (&uplo, &trans, &m, &m, alpha, a, &m, beta, c, &m ); - gettimeofday( &stop, (struct timezone *)0); + end(); - time1 = (double)(stop.tv_sec - start.tv_sec) + (double)((stop.tv_usec - start.tv_usec)) * 1.e-6; - - gettimeofday( &start, (struct timezone *)0); + time1 = getsec(); fprintf(stderr, " %10.2f MFlops\n", COMPSIZE * COMPSIZE * 1. * (double)m * (double)m * (double)m / time1 * 1.e-6); - } return 0; diff --git a/benchmark/hpmv.c b/benchmark/hpmv.c new file mode 100644 index 000000000..907e2adc4 --- /dev/null +++ b/benchmark/hpmv.c @@ -0,0 +1,133 @@ +/*************************************************************************** +Copyright (c) 2014, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "bench.h" + +#undef HPMV + +#ifdef DOUBLE +#define HPMV BLASFUNC(zhpmv) +#else +#define HPMV BLASFUNC(chpmv) +#endif + +int main(int argc, char *argv[]){ + + FLOAT *a, *x, *y; + FLOAT alpha[] = {1.0, 1.0}; + FLOAT beta [] = {1.0, 1.0}; + char uplo='L'; + blasint m, i, j; + blasint inc_x=1, inc_y=1; + int loops = 1; + int l; + char *p; + + int from = 1; + int to = 200; + int step = 1; + + double time1,timeg; + + argc--;argv++; + + if (argc > 0) { from = atol(*argv); argc--; argv++;} + if (argc > 0) { to = MAX(atol(*argv), from); argc--; argv++;} + if (argc > 0) { step = atol(*argv); argc--; argv++;} + + if ((p = getenv("OPENBLAS_LOOPS"))) loops = atoi(p); + if ((p = getenv("OPENBLAS_INCX"))) inc_x = atoi(p); + if ((p = getenv("OPENBLAS_INCY"))) inc_y = atoi(p); + if ((p = getenv("OPENBLAS_UPLO"))) uplo=*p; + + fprintf(stderr, "From : %3d To : %3d Step = %3d Uplo = '%c' Inc_x = %d Inc_y = %d Loops = %d\n", from, to, step,uplo,inc_x,inc_y,loops); + + if (( a = (FLOAT *)malloc(sizeof(FLOAT) * to * to * COMPSIZE)) == NULL) { + fprintf(stderr,"Out of Memory!!\n"); + exit(1); + } + + if (( x = (FLOAT *)malloc(sizeof(FLOAT) * to * abs(inc_x) * COMPSIZE)) == NULL) { + fprintf(stderr,"Out of Memory!!\n"); + exit(1); + } + + if (( y = (FLOAT *)malloc(sizeof(FLOAT) * to * abs(inc_y) * COMPSIZE)) == NULL) { + fprintf(stderr,"Out of Memory!!\n"); + exit(1); + } + +#ifdef __linux + srandom(getpid()); +#endif + + fprintf(stderr, " SIZE Flops\n"); + + for(m = from; m <= to; m += step) { + + timeg=0; + + fprintf(stderr, " %6dx%d : ", (int)m, (int)m); + + for(j = 0; j < m; j++) { + for(i = 0; i < m * COMPSIZE; i++) { + a[i + j * m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + } + } + + for (l = 0; l < loops; l++) { + + for (i = 0; i < m * COMPSIZE * abs(inc_x); i++) { + x[i] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + } + + for (i = 0; i < m * COMPSIZE * abs(inc_y); i++) { + y[i] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + } + + begin(); + + HPMV (&uplo, &m, alpha, a, x, &inc_x, beta, y, &inc_y ); + + end(); + + time1 = getsec(); + + timeg += time1; + + } + + timeg /= loops; + + fprintf(stderr, " %10.2f MFlops\n", + COMPSIZE * COMPSIZE * 2. * (double)m * (double)m / timeg * 1.e-6); + } + + return 0; +} + +// void main(int argc, char *argv[]) __attribute__((weak, alias("MAIN__"))); diff --git a/benchmark/iamax.c b/benchmark/iamax.c index 034e24ea9..15618cbcc 100644 --- a/benchmark/iamax.c +++ b/benchmark/iamax.c @@ -25,13 +25,7 @@ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ -#include -#include -#ifdef __CYGWIN32__ -#include -#endif -#include "common.h" - +#include "bench.h" #undef IAMAX @@ -49,71 +43,6 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endif #endif -#if defined(__WIN32__) || defined(__WIN64__) - -#ifndef DELTA_EPOCH_IN_MICROSECS -#define DELTA_EPOCH_IN_MICROSECS 11644473600000000ULL -#endif - -int gettimeofday(struct timeval *tv, void *tz){ - - FILETIME ft; - unsigned __int64 tmpres = 0; - static int tzflag; - - if (NULL != tv) - { - GetSystemTimeAsFileTime(&ft); - - tmpres |= ft.dwHighDateTime; - tmpres <<= 32; - tmpres |= ft.dwLowDateTime; - - /*converting file time to unix epoch*/ - tmpres /= 10; /*convert into microseconds*/ - tmpres -= DELTA_EPOCH_IN_MICROSECS; - tv->tv_sec = (long)(tmpres / 1000000UL); - tv->tv_usec = (long)(tmpres % 1000000UL); - } - - return 0; -} - -#endif - -#if !defined(__WIN32__) && !defined(__WIN64__) && !defined(__CYGWIN32__) && 0 - -static void *huge_malloc(BLASLONG size){ - int shmid; - void *address; - -#ifndef SHM_HUGETLB -#define SHM_HUGETLB 04000 -#endif - - if ((shmid =shmget(IPC_PRIVATE, - (size + HUGE_PAGESIZE) & ~(HUGE_PAGESIZE - 1), - SHM_HUGETLB | IPC_CREAT |0600)) < 0) { - printf( "Memory allocation failed(shmget).\n"); - exit(1); - } - - address = shmat(shmid, NULL, SHM_RND); - - if ((BLASLONG)address == -1){ - printf( "Memory allocation failed(shmat).\n"); - exit(1); - } - - shmctl(shmid, IPC_RMID, 0); - - return address; -} - -#define malloc huge_malloc - -#endif - int main(int argc, char *argv[]){ FLOAT *x; @@ -127,7 +56,6 @@ int main(int argc, char *argv[]){ int to = 200; int step = 1; - struct timeval start, stop; double time1,timeg; argc--;argv++; @@ -145,7 +73,7 @@ int main(int argc, char *argv[]){ fprintf(stderr,"Out of Memory!!\n");exit(1); } -#ifdef linux +#ifdef __linux srandom(getpid()); #endif @@ -166,13 +94,13 @@ int main(int argc, char *argv[]){ x[i] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; } - gettimeofday( &start, (struct timezone *)0); + begin(); IAMAX (&m, x, &inc_x); - gettimeofday( &stop, (struct timezone *)0); + end(); - time1 = (double)(stop.tv_sec - start.tv_sec) + (double)((stop.tv_usec - start.tv_usec)) * 1.e-6; + time1 = getsec(); timeg += time1; @@ -181,7 +109,7 @@ int main(int argc, char *argv[]){ timeg /= loops; fprintf(stderr, - " %10.2f MFlops %10.6f sec\n", + " %10.2f MBytes %10.6f sec\n", COMPSIZE * sizeof(FLOAT) * 1. * (double)m / timeg * 1.e-6, timeg); } diff --git a/benchmark/iamin.c b/benchmark/iamin.c new file mode 100644 index 000000000..a57638ecc --- /dev/null +++ b/benchmark/iamin.c @@ -0,0 +1,120 @@ +/*************************************************************************** +Copyright (c) 2016, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "bench.h" + +#undef IAMIN + +#ifdef COMPLEX +#ifdef DOUBLE +#define IAMIN BLASFUNC(izamin) +#else +#define IAMIN BLASFUNC(icamin) +#endif +#else +#ifdef DOUBLE +#define IAMIN BLASFUNC(idamin) +#else +#define IAMIN BLASFUNC(isamin) +#endif +#endif + +int main(int argc, char *argv[]){ + + FLOAT *x; + blasint m, i; + blasint inc_x=1; + int loops = 1; + int l; + char *p; + + int from = 1; + int to = 200; + int step = 1; + + double time1,timeg; + + argc--;argv++; + + if (argc > 0) { from = atol(*argv); argc--; argv++;} + if (argc > 0) { to = MAX(atol(*argv), from); argc--; argv++;} + if (argc > 0) { step = atol(*argv); argc--; argv++;} + + if ((p = getenv("OPENBLAS_LOOPS"))) loops = atoi(p); + if ((p = getenv("OPENBLAS_INCX"))) inc_x = atoi(p); + + fprintf(stderr, "From : %3d To : %3d Step = %3d Inc_x = %d Loops = %d\n", from, to, step,inc_x,loops); + + if (( x = (FLOAT *)malloc(sizeof(FLOAT) * to * abs(inc_x) * COMPSIZE)) == NULL){ + fprintf(stderr,"Out of Memory!!\n");exit(1); + } + +#ifdef __linux + srandom(getpid()); +#endif + + fprintf(stderr, " SIZE Flops\n"); + + for(m = from; m <= to; m += step) + { + + timeg=0; + + fprintf(stderr, " %6d : ", (int)m); + + + for (l=0; l 0) { from = atol(*argv); argc--; argv++;} + if (argc > 0) { to = MAX(atol(*argv), from); argc--; argv++;} + if (argc > 0) { step = atol(*argv); argc--; argv++;} + + if ((p = getenv("OPENBLAS_LOOPS"))) loops = atoi(p); + if ((p = getenv("OPENBLAS_INCX"))) inc_x = atoi(p); + + fprintf(stderr, "From : %3d To : %3d Step = %3d Inc_x = %d Loops = %d\n", from, to, step,inc_x,loops); + + if (( x = (FLOAT *)malloc(sizeof(FLOAT) * to * abs(inc_x) * COMPSIZE)) == NULL){ + fprintf(stderr,"Out of Memory!!\n");exit(1); + } + +#ifdef __linux + srandom(getpid()); +#endif + + fprintf(stderr, " SIZE Flops\n"); + + for(m = from; m <= to; m += step) + { + + timeg=0; + + fprintf(stderr, " %6d : ", (int)m); + + + for (l=0; l 0) { from = atol(*argv); argc--; argv++;} + if (argc > 0) { to = MAX(atol(*argv), from); argc--; argv++;} + if (argc > 0) { step = atol(*argv); argc--; argv++;} + + if ((p = getenv("OPENBLAS_LOOPS"))) loops = atoi(p); + if ((p = getenv("OPENBLAS_INCX"))) inc_x = atoi(p); + + fprintf(stderr, "From : %3d To : %3d Step = %3d Inc_x = %d Loops = %d\n", from, to, step,inc_x,loops); + + if (( x = (FLOAT *)malloc(sizeof(FLOAT) * to * abs(inc_x) * COMPSIZE)) == NULL){ + fprintf(stderr,"Out of Memory!!\n");exit(1); + } + +#ifdef __linux + srandom(getpid()); +#endif + + fprintf(stderr, " SIZE Flops\n"); + + for(m = from; m <= to; m += step) + { + + timeg=0; + + fprintf(stderr, " %6d : ", (int)m); + + + for (l=0; l -#include -#ifdef __CYGWIN32__ -#include -#endif -#include "common.h" +#include "bench.h" double fabs(double); @@ -72,71 +67,6 @@ double fabs(double); #endif #endif -#if defined(__WIN32__) || defined(__WIN64__) - -#ifndef DELTA_EPOCH_IN_MICROSECS -#define DELTA_EPOCH_IN_MICROSECS 11644473600000000ULL -#endif - -int gettimeofday(struct timeval *tv, void *tz){ - - FILETIME ft; - unsigned __int64 tmpres = 0; - static int tzflag; - - if (NULL != tv) - { - GetSystemTimeAsFileTime(&ft); - - tmpres |= ft.dwHighDateTime; - tmpres <<= 32; - tmpres |= ft.dwLowDateTime; - - /*converting file time to unix epoch*/ - tmpres /= 10; /*convert into microseconds*/ - tmpres -= DELTA_EPOCH_IN_MICROSECS; - tv->tv_sec = (long)(tmpres / 1000000UL); - tv->tv_usec = (long)(tmpres % 1000000UL); - } - - return 0; -} - -#endif - -#if !defined(__WIN32__) && !defined(__WIN64__) && !defined(__CYGWIN32__) && 0 - -static void *huge_malloc(BLASLONG size){ - int shmid; - void *address; - -#ifndef SHM_HUGETLB -#define SHM_HUGETLB 04000 -#endif - - if ((shmid =shmget(IPC_PRIVATE, - (size + HUGE_PAGESIZE) & ~(HUGE_PAGESIZE - 1), - SHM_HUGETLB | IPC_CREAT |0600)) < 0) { - printf( "Memory allocation failed(shmget).\n"); - exit(1); - } - - address = shmat(shmid, NULL, SHM_RND); - - if ((BLASLONG)address == -1){ - printf( "Memory allocation failed(shmat).\n"); - exit(1); - } - - shmctl(shmid, IPC_RMID, 0); - - return address; -} - -#define malloc huge_malloc - -#endif - int main(int argc, char *argv[]){ FLOAT *a, *b; @@ -151,7 +81,6 @@ int main(int argc, char *argv[]){ FLOAT maxerr; - struct timeval start, stop; double time1, time2; argc--;argv++; @@ -174,7 +103,7 @@ int main(int argc, char *argv[]){ fprintf(stderr,"Out of Memory!!\n");exit(1); } -#ifdef linux +#ifdef __linux srandom(getpid()); #endif @@ -186,7 +115,7 @@ int main(int argc, char *argv[]){ for(j = 0; j < m; j++){ for(i = 0; i < m * COMPSIZE; i++){ - a[i + j * m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + a[(long)i + (long)j * (long)m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; } } @@ -194,35 +123,35 @@ int main(int argc, char *argv[]){ for (j = 0; j < m; ++j) { for (i = 0; i < m * COMPSIZE; ++i) { - b[i] += a[i + j * m * COMPSIZE]; + b[i] += a[(long)i + (long)j * (long)m * COMPSIZE]; } } - gettimeofday( &start, (struct timezone *)0); + begin(); GETRF (&m, &m, a, &m, ipiv, &info); - gettimeofday( &stop, (struct timezone *)0); + end(); if (info) { fprintf(stderr, "Matrix is not singular .. %d\n", info); exit(1); } - time1 = (double)(stop.tv_sec - start.tv_sec) + (double)((stop.tv_usec - start.tv_usec)) * 1.e-6; + time1 = getsec(); - gettimeofday( &start, (struct timezone *)0); + begin(); GETRS("N", &m, &unit, a, &m, ipiv, b, &m, &info); - gettimeofday( &stop, (struct timezone *)0); + end(); if (info) { fprintf(stderr, "Matrix is not singular .. %d\n", info); exit(1); } - time2 = (double)(stop.tv_sec - start.tv_sec) + (double)((stop.tv_usec - start.tv_usec)) * 1.e-6; + time2 = getsec(); maxerr = 0.; diff --git a/benchmark/max.c b/benchmark/max.c new file mode 100644 index 000000000..301b943a5 --- /dev/null +++ b/benchmark/max.c @@ -0,0 +1,113 @@ +/*************************************************************************** +Copyright (c) 2016, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "bench.h" + +#undef NAMAX + +#ifndef COMPLEX +#ifdef DOUBLE +#define NAMAX BLASFUNC(dmax) +#else +#define NAMAX BLASFUNC(smax) +#endif +#endif + +int main(int argc, char *argv[]){ + + FLOAT *x; + blasint m, i; + blasint inc_x=1; + int loops = 1; + int l; + char *p; + + int from = 1; + int to = 200; + int step = 1; + + double time1,timeg; + + argc--;argv++; + + if (argc > 0) { from = atol(*argv); argc--; argv++;} + if (argc > 0) { to = MAX(atol(*argv), from); argc--; argv++;} + if (argc > 0) { step = atol(*argv); argc--; argv++;} + + if ((p = getenv("OPENBLAS_LOOPS"))) loops = atoi(p); + if ((p = getenv("OPENBLAS_INCX"))) inc_x = atoi(p); + + fprintf(stderr, "From : %3d To : %3d Step = %3d Inc_x = %d Loops = %d\n", from, to, step,inc_x,loops); + + if (( x = (FLOAT *)malloc(sizeof(FLOAT) * to * abs(inc_x) * COMPSIZE)) == NULL){ + fprintf(stderr,"Out of Memory!!\n");exit(1); + } + +#ifdef __linux + srandom(getpid()); +#endif + + fprintf(stderr, " SIZE Flops\n"); + + for(m = from; m <= to; m += step) + { + + timeg=0; + + fprintf(stderr, " %6d : ", (int)m); + + + for (l=0; l 0) { from = atol(*argv); argc--; argv++;} + if (argc > 0) { to = MAX(atol(*argv), from); argc--; argv++;} + if (argc > 0) { step = atol(*argv); argc--; argv++;} + + if ((p = getenv("OPENBLAS_LOOPS"))) loops = atoi(p); + if ((p = getenv("OPENBLAS_INCX"))) inc_x = atoi(p); + + fprintf(stderr, "From : %3d To : %3d Step = %3d Inc_x = %d Loops = %d\n", from, to, step,inc_x,loops); + + if (( x = (FLOAT *)malloc(sizeof(FLOAT) * to * abs(inc_x) * COMPSIZE)) == NULL){ + fprintf(stderr,"Out of Memory!!\n");exit(1); + } + +#ifdef __linux + srandom(getpid()); +#endif + + fprintf(stderr, " SIZE Flops\n"); + + for(m = from; m <= to; m += step) + { + + timeg=0; + + fprintf(stderr, " %6d : ", (int)m); + + + for (l=0; l -#include -#ifdef __CYGWIN32__ -#include -#endif -#include "common.h" - +#include "bench.h" #undef NRM2 @@ -49,71 +43,6 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endif #endif -#if defined(__WIN32__) || defined(__WIN64__) - -#ifndef DELTA_EPOCH_IN_MICROSECS -#define DELTA_EPOCH_IN_MICROSECS 11644473600000000ULL -#endif - -int gettimeofday(struct timeval *tv, void *tz){ - - FILETIME ft; - unsigned __int64 tmpres = 0; - static int tzflag; - - if (NULL != tv) - { - GetSystemTimeAsFileTime(&ft); - - tmpres |= ft.dwHighDateTime; - tmpres <<= 32; - tmpres |= ft.dwLowDateTime; - - /*converting file time to unix epoch*/ - tmpres /= 10; /*convert into microseconds*/ - tmpres -= DELTA_EPOCH_IN_MICROSECS; - tv->tv_sec = (long)(tmpres / 1000000UL); - tv->tv_usec = (long)(tmpres % 1000000UL); - } - - return 0; -} - -#endif - -#if !defined(__WIN32__) && !defined(__WIN64__) && !defined(__CYGWIN32__) && 0 - -static void *huge_malloc(BLASLONG size){ - int shmid; - void *address; - -#ifndef SHM_HUGETLB -#define SHM_HUGETLB 04000 -#endif - - if ((shmid =shmget(IPC_PRIVATE, - (size + HUGE_PAGESIZE) & ~(HUGE_PAGESIZE - 1), - SHM_HUGETLB | IPC_CREAT |0600)) < 0) { - printf( "Memory allocation failed(shmget).\n"); - exit(1); - } - - address = shmat(shmid, NULL, SHM_RND); - - if ((BLASLONG)address == -1){ - printf( "Memory allocation failed(shmat).\n"); - exit(1); - } - - shmctl(shmid, IPC_RMID, 0); - - return address; -} - -#define malloc huge_malloc - -#endif - int main(int argc, char *argv[]){ FLOAT *x; @@ -127,7 +56,6 @@ int main(int argc, char *argv[]){ int to = 200; int step = 1; - struct timeval start, stop; double time1,timeg; argc--;argv++; @@ -145,7 +73,7 @@ int main(int argc, char *argv[]){ fprintf(stderr,"Out of Memory!!\n");exit(1); } -#ifdef linux +#ifdef __linux srandom(getpid()); #endif @@ -166,13 +94,13 @@ int main(int argc, char *argv[]){ x[i] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; } - gettimeofday( &start, (struct timezone *)0); + begin(); NRM2 (&m, x, &inc_x); - gettimeofday( &stop, (struct timezone *)0); + end(); - time1 = (double)(stop.tv_sec - start.tv_sec) + (double)((stop.tv_usec - start.tv_usec)) * 1.e-6; + time1 = getsec(); timeg += time1; diff --git a/benchmark/potrf.c b/benchmark/potrf.c index 1d714549b..116d0cca5 100644 --- a/benchmark/potrf.c +++ b/benchmark/potrf.c @@ -36,12 +36,7 @@ /* or implied, of The University of Texas at Austin. */ /*********************************************************************/ -#include -#include -#ifdef __CYGWIN32__ -#include -#endif -#include "common.h" +#include "bench.h" double fabs(double); @@ -86,37 +81,7 @@ double fabs(double); // extern void POTRI(char *uplo, blasint *m, FLOAT *a, blasint *lda, blasint *info); // extern void POTRS(char *uplo, blasint *m, blasint *n, FLOAT *a, blasint *lda, FLOAT *b, blasint *ldb, blasint *info); -#if defined(__WIN32__) || defined(__WIN64__) - -#ifndef DELTA_EPOCH_IN_MICROSECS -#define DELTA_EPOCH_IN_MICROSECS 11644473600000000ULL -#endif - -int gettimeofday(struct timeval *tv, void *tz){ - - FILETIME ft; - unsigned __int64 tmpres = 0; - static int tzflag; - - if (NULL != tv) - { - GetSystemTimeAsFileTime(&ft); - tmpres |= ft.dwHighDateTime; - tmpres <<= 32; - tmpres |= ft.dwLowDateTime; - - /*converting file time to unix epoch*/ - tmpres /= 10; /*convert into microseconds*/ - tmpres -= DELTA_EPOCH_IN_MICROSECS; - tv->tv_sec = (long)(tmpres / 1000000UL); - tv->tv_usec = (long)(tmpres % 1000000UL); - } - - return 0; -} - -#endif int main(int argc, char *argv[]){ @@ -141,7 +106,6 @@ int main(int argc, char *argv[]){ int to = 200; int step = 1; - struct timeval start, stop; double time1; argc--;argv++; @@ -170,46 +134,46 @@ int main(int argc, char *argv[]){ #ifndef COMPLEX if (uplos & 1) { for (j = 0; j < m; j++) { - for(i = 0; i < j; i++) a[i + j * m] = 0.; - a[j + j * m] = ((double) rand() / (double) RAND_MAX) + 8.; - for(i = j + 1; i < m; i++) a[i + j * m] = ((double) rand() / (double) RAND_MAX) - 0.5; + for(i = 0; i < j; i++) a[(long)i + (long)j * (long)m] = 0.; + a[(long)j + (long)j * (long)m] = ((double) rand() / (double) RAND_MAX) + 8.; + for(i = j + 1; i < m; i++) a[(long)i + (long)j * (long)m] = ((double) rand() / (double) RAND_MAX) - 0.5; } } else { for (j = 0; j < m; j++) { - for(i = 0; i < j; i++) a[i + j * m] = ((double) rand() / (double) RAND_MAX) - 0.5; - a[j + j * m] = ((double) rand() / (double) RAND_MAX) + 8.; - for(i = j + 1; i < m; i++) a[i + j * m] = 0.; + for(i = 0; i < j; i++) a[(long)i + (long)j * (long)m] = ((double) rand() / (double) RAND_MAX) - 0.5; + a[(long)j + (long)j * (long)m] = ((double) rand() / (double) RAND_MAX) + 8.; + for(i = j + 1; i < m; i++) a[(long)i + (long)j * (long)m] = 0.; } } #else if (uplos & 1) { for (j = 0; j < m; j++) { for(i = 0; i < j; i++) { - a[(i + j * m) * 2 + 0] = 0.; - a[(i + j * m) * 2 + 1] = 0.; + a[((long)i + (long)j * (long)m) * 2 + 0] = 0.; + a[((long)i + (long)j * (long)m) * 2 + 1] = 0.; } - a[(j + j * m) * 2 + 0] = ((double) rand() / (double) RAND_MAX) + 8.; - a[(j + j * m) * 2 + 1] = 0.; + a[((long)j + (long)j * (long)m) * 2 + 0] = ((double) rand() / (double) RAND_MAX) + 8.; + a[((long)j + (long)j * (long)m) * 2 + 1] = 0.; for(i = j + 1; i < m; i++) { - a[(i + j * m) * 2 + 0] = ((double) rand() / (double) RAND_MAX) - 0.5; - a[(i + j * m) * 2 + 1] = ((double) rand() / (double) RAND_MAX) - 0.5; + a[((long)i + (long)j * (long)m) * 2 + 0] = 0; + a[((long)i + (long)j * (long)m) * 2 + 1] = ((double) rand() / (double) RAND_MAX) - 0.5; } } } else { for (j = 0; j < m; j++) { for(i = 0; i < j; i++) { - a[(i + j * m) * 2 + 0] = ((double) rand() / (double) RAND_MAX) - 0.5; - a[(i + j * m) * 2 + 1] = ((double) rand() / (double) RAND_MAX) - 0.5; + a[((long)i + (long)j * (long)m) * 2 + 0] = 0.; + a[((long)i + (long)j * (long)m) * 2 + 1] = ((double) rand() / (double) RAND_MAX) - 0.5; } - a[(j + j * m) * 2 + 0] = ((double) rand() / (double) RAND_MAX) + 8.; - a[(j + j * m) * 2 + 1] = 0.; + a[((long)j + (long)j * (long)m) * 2 + 0] = ((double) rand() / (double) RAND_MAX) + 8.; + a[((long)j + (long)j * (long)m) * 2 + 1] = 0.; for(i = j + 1; i < m; i++) { - a[(i + j * m) * 2 + 0] = 0.; - a[(i + j * m) * 2 + 1] = 0.; + a[((long)i + (long)j * (long)m) * 2 + 0] = 0.; + a[((long)i + (long)j * (long)m) * 2 + 1] = 0.; } } } @@ -217,18 +181,18 @@ int main(int argc, char *argv[]){ SYRK(uplo[uplos], trans[uplos], &m, &m, alpha, a, &m, beta, b, &m); - gettimeofday( &start, (struct timezone *)0); + begin(); POTRF(uplo[uplos], &m, b, &m, &info); - gettimeofday( &stop, (struct timezone *)0); + end(); if (info != 0) { fprintf(stderr, "Potrf info = %d\n", info); exit(1); } - time1 = (double)(stop.tv_sec - start.tv_sec) + (double)((stop.tv_usec - start.tv_usec)) * 1.e-6; + time1 = getsec(); flops = COMPSIZE * COMPSIZE * (1.0/3.0 * (double)m * (double)m *(double)m +1.0/2.0* (double)m *(double)m + 1.0/6.0* (double)m) / time1 * 1.e-6; if ( btest == 'S' ) @@ -240,17 +204,17 @@ int main(int argc, char *argv[]){ } } - gettimeofday( &start, (struct timezone *)0); + begin(); POTRS(uplo[uplos], &m, &m, b, &m, a, &m, &info); - gettimeofday( &stop, (struct timezone *)0); + end(); if (info != 0) { fprintf(stderr, "Potrs info = %d\n", info); exit(1); } - time1 = (double)(stop.tv_sec - start.tv_sec) + (double)((stop.tv_usec - start.tv_usec)) * 1.e-6; + time1 = getsec(); flops = COMPSIZE * COMPSIZE * (2.0 * (double)m * (double)m *(double)m ) / time1 * 1.e-6; } @@ -258,18 +222,18 @@ int main(int argc, char *argv[]){ if ( btest == 'I' ) { - gettimeofday( &start, (struct timezone *)0); + begin(); POTRI(uplo[uplos], &m, b, &m, &info); - gettimeofday( &stop, (struct timezone *)0); + end(); if (info != 0) { fprintf(stderr, "Potri info = %d\n", info); exit(1); } - time1 = (double)(stop.tv_sec - start.tv_sec) + (double)((stop.tv_usec - start.tv_usec)) * 1.e-6; + time1 = getsec(); flops = COMPSIZE * COMPSIZE * (2.0/3.0 * (double)m * (double)m *(double)m +1.0/2.0* (double)m *(double)m + 5.0/6.0* (double)m) / time1 * 1.e-6; } diff --git a/benchmark/rot.c b/benchmark/rot.c index 3ff783cc6..15b630e36 100644 --- a/benchmark/rot.c +++ b/benchmark/rot.c @@ -25,16 +25,11 @@ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ -#include -#include -#ifdef __CYGWIN32__ -#include -#endif -#include "common.h" - +#include "bench.h" -#undef DOT +#undef ROT +#ifndef COMPLEX #ifdef DOUBLE #define ROT BLASFUNC(drot) @@ -42,70 +37,14 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define ROT BLASFUNC(srot) #endif +#else -#if defined(__WIN32__) || defined(__WIN64__) - -#ifndef DELTA_EPOCH_IN_MICROSECS -#define DELTA_EPOCH_IN_MICROSECS 11644473600000000ULL -#endif - -int gettimeofday(struct timeval *tv, void *tz){ - - FILETIME ft; - unsigned __int64 tmpres = 0; - static int tzflag; - - if (NULL != tv) - { - GetSystemTimeAsFileTime(&ft); - - tmpres |= ft.dwHighDateTime; - tmpres <<= 32; - tmpres |= ft.dwLowDateTime; - - /*converting file time to unix epoch*/ - tmpres /= 10; /*convert into microseconds*/ - tmpres -= DELTA_EPOCH_IN_MICROSECS; - tv->tv_sec = (long)(tmpres / 1000000UL); - tv->tv_usec = (long)(tmpres % 1000000UL); - } - - return 0; -} - -#endif - -#if !defined(__WIN32__) && !defined(__WIN64__) && !defined(__CYGWIN32__) && 0 - -static void *huge_malloc(BLASLONG size){ - int shmid; - void *address; - -#ifndef SHM_HUGETLB -#define SHM_HUGETLB 04000 +#ifdef DOUBLE +#define ROT BLASFUNC(zdrot) +#else +#define ROT BLASFUNC(csrot) #endif - if ((shmid =shmget(IPC_PRIVATE, - (size + HUGE_PAGESIZE) & ~(HUGE_PAGESIZE - 1), - SHM_HUGETLB | IPC_CREAT |0600)) < 0) { - printf( "Memory allocation failed(shmget).\n"); - exit(1); - } - - address = shmat(shmid, NULL, SHM_RND); - - if ((BLASLONG)address == -1){ - printf( "Memory allocation failed(shmat).\n"); - exit(1); - } - - shmctl(shmid, IPC_RMID, 0); - - return address; -} - -#define malloc huge_malloc - #endif int main(int argc, char *argv[]){ @@ -124,7 +63,6 @@ int main(int argc, char *argv[]){ int to = 200; int step = 1; - struct timeval start, stop; double time1,timeg; argc--;argv++; @@ -147,7 +85,7 @@ int main(int argc, char *argv[]){ fprintf(stderr,"Out of Memory!!\n");exit(1); } -#ifdef linux +#ifdef __linux srandom(getpid()); #endif @@ -160,32 +98,31 @@ int main(int argc, char *argv[]){ fprintf(stderr, " %6d : ", (int)m); + for(i = 0; i < m * COMPSIZE * abs(inc_x); i++){ + x[i] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + } + + for(i = 0; i < m * COMPSIZE * abs(inc_y); i++){ + y[i] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + } for (l=0; l 0) { + from = atol(*argv); + argc--; + argv++; + } + if (argc > 0) { + to = MAX(atol(*argv), from); + argc--; + argv++; + } + if (argc > 0) { + step = atol(*argv); + argc--; + argv++; + } + + if ((p = getenv("OPENBLAS_LOOPS"))) + loops = atoi(p); + if ((p = getenv("OPENBLAS_INCX"))) + inc_x = atoi(p); + if ((p = getenv("OPENBLAS_INCY"))) + inc_y = atoi(p); + + fprintf( + stderr, + "From : %3d To : %3d Step = %3d Inc_x = %d Inc_y = %d Loops = %d\n", + from, to, step, inc_x, inc_y, loops); + + if ((x = (FLOAT *)malloc(sizeof(FLOAT) * to * abs(inc_x) * COMPSIZE)) == + NULL) { + fprintf(stderr, "Out of Memory!!\n"); + exit(1); + } + + if ((y = (FLOAT *)malloc(sizeof(FLOAT) * to * abs(inc_y) * COMPSIZE)) == + NULL) { + fprintf(stderr, "Out of Memory!!\n"); + exit(1); + } + +#ifdef __linux + srandom(getpid()); +#endif + + fprintf(stderr, " SIZE Flops\n"); + + for (m = from; m <= to; m += step) { + + timeg = 0; + + fprintf(stderr, " %6d : ", (int)m); + for (i = 0; i < m * COMPSIZE * abs(inc_x); i++) { + x[i] = ((FLOAT)rand() / (FLOAT)RAND_MAX) - 0.5; + } + + for (i = 0; i < m * COMPSIZE * abs(inc_y); i++) { + y[i] = ((FLOAT)rand() / (FLOAT)RAND_MAX) - 0.5; + } + + for (l = 0; l < loops; l++) { + begin(); + + ROTM(&m, x, &inc_x, y, &inc_y, param); + + end(); + + time1 = getsec(); + + timeg += time1; + } + + timeg /= loops; + + fprintf(stderr, " %10.2f MFlops %10.6f sec\n", + COMPSIZE * COMPSIZE * 6. * (double)m / timeg * 1.e-6, timeg); + } + + return 0; +} diff --git a/benchmark/scal.c b/benchmark/scal.c index 453c3234d..8de6cfd04 100644 --- a/benchmark/scal.c +++ b/benchmark/scal.c @@ -25,13 +25,7 @@ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ -#include -#include -#ifdef __CYGWIN32__ -#include -#endif -#include "common.h" - +#include "bench.h" #undef SCAL @@ -49,71 +43,6 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endif #endif -#if defined(__WIN32__) || defined(__WIN64__) - -#ifndef DELTA_EPOCH_IN_MICROSECS -#define DELTA_EPOCH_IN_MICROSECS 11644473600000000ULL -#endif - -int gettimeofday(struct timeval *tv, void *tz){ - - FILETIME ft; - unsigned __int64 tmpres = 0; - static int tzflag; - - if (NULL != tv) - { - GetSystemTimeAsFileTime(&ft); - - tmpres |= ft.dwHighDateTime; - tmpres <<= 32; - tmpres |= ft.dwLowDateTime; - - /*converting file time to unix epoch*/ - tmpres /= 10; /*convert into microseconds*/ - tmpres -= DELTA_EPOCH_IN_MICROSECS; - tv->tv_sec = (long)(tmpres / 1000000UL); - tv->tv_usec = (long)(tmpres % 1000000UL); - } - - return 0; -} - -#endif - -#if !defined(__WIN32__) && !defined(__WIN64__) && !defined(__CYGWIN32__) && 0 - -static void *huge_malloc(BLASLONG size){ - int shmid; - void *address; - -#ifndef SHM_HUGETLB -#define SHM_HUGETLB 04000 -#endif - - if ((shmid =shmget(IPC_PRIVATE, - (size + HUGE_PAGESIZE) & ~(HUGE_PAGESIZE - 1), - SHM_HUGETLB | IPC_CREAT |0600)) < 0) { - printf( "Memory allocation failed(shmget).\n"); - exit(1); - } - - address = shmat(shmid, NULL, SHM_RND); - - if ((BLASLONG)address == -1){ - printf( "Memory allocation failed(shmat).\n"); - exit(1); - } - - shmctl(shmid, IPC_RMID, 0); - - return address; -} - -#define malloc huge_malloc - -#endif - int main(int argc, char *argv[]){ FLOAT *x, *y; @@ -128,7 +57,6 @@ int main(int argc, char *argv[]){ int to = 200; int step = 1; - struct timeval start, stop; double time1,timeg; argc--;argv++; @@ -150,7 +78,7 @@ int main(int argc, char *argv[]){ fprintf(stderr,"Out of Memory!!\n");exit(1); } -#ifdef linux +#ifdef __linux srandom(getpid()); #endif @@ -174,13 +102,13 @@ int main(int argc, char *argv[]){ for(i = 0; i < m * COMPSIZE * abs(inc_y); i++){ y[i] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; } - gettimeofday( &start, (struct timezone *)0); + begin(); SCAL (&m, alpha, x, &inc_x); - gettimeofday( &stop, (struct timezone *)0); + end(); - time1 = (double)(stop.tv_sec - start.tv_sec) + (double)((stop.tv_usec - start.tv_usec)) * 1.e-6; + time1 = getsec(); timeg += time1; diff --git a/benchmark/spmv.c b/benchmark/spmv.c new file mode 100644 index 000000000..e4dcbf4ae --- /dev/null +++ b/benchmark/spmv.c @@ -0,0 +1,146 @@ +/*************************************************************************** +Copyright (c) 2014, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "bench.h" + +#undef SPMV + +#ifndef COMPLEX + +#ifdef DOUBLE +#define SPMV BLASFUNC(dspmv) +#else +#define SPMV BLASFUNC(sspmv) +#endif + +#else + +#ifdef DOUBLE +#define SPMV BLASFUNC(zspmv) +#else +#define SPMV BLASFUNC(cspmv) +#endif + +#endif + +int main(int argc, char *argv[]){ + + FLOAT *a, *x, *y; + FLOAT alpha[] = {1.0, 1.0}; + FLOAT beta [] = {1.0, 1.0}; + char uplo='L'; + blasint m, i, j; + blasint inc_x=1,inc_y=1; + int loops = 1; + int l; + char *p; + + int from = 1; + int to = 200; + int step = 1; + + double time1,timeg; + + argc--;argv++; + + if (argc > 0) { from = atol(*argv); argc--; argv++;} + if (argc > 0) { to = MAX(atol(*argv), from); argc--; argv++;} + if (argc > 0) { step = atol(*argv); argc--; argv++;} + + if ((p = getenv("OPENBLAS_LOOPS"))) loops = atoi(p); + if ((p = getenv("OPENBLAS_INCX"))) inc_x = atoi(p); + if ((p = getenv("OPENBLAS_INCY"))) inc_y = atoi(p); + if ((p = getenv("OPENBLAS_UPLO"))) uplo=*p; + + fprintf(stderr, "From : %3d To : %3d Step = %3d Uplo = '%c' Inc_x = %d Inc_y = %d Loops = %d\n", from, to, step,uplo,inc_x,inc_y,loops); + + if (( a = (FLOAT *)malloc(sizeof(FLOAT) * to * to * COMPSIZE)) == NULL){ + fprintf(stderr,"Out of Memory!!\n");exit(1); + } + + if (( x = (FLOAT *)malloc(sizeof(FLOAT) * to * abs(inc_x) * COMPSIZE)) == NULL){ + fprintf(stderr,"Out of Memory!!\n");exit(1); + } + + if (( y = (FLOAT *)malloc(sizeof(FLOAT) * to * abs(inc_y) * COMPSIZE)) == NULL){ + fprintf(stderr,"Out of Memory!!\n");exit(1); + } + +#ifdef __linux + srandom(getpid()); +#endif + + fprintf(stderr, " SIZE Flops\n"); + + for(m = from; m <= to; m += step) + { + + timeg=0; + + fprintf(stderr, " %6dx%d : ", (int)m,(int)m); + + for(j = 0; j < m; j++){ + for(i = 0; i < m * COMPSIZE; i++){ + a[(long)i + (long)j * (long)m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + } + } + + + for (l=0; l 0) { from = atol(*argv); argc--; argv++;} + if (argc > 0) { to = MAX(atol(*argv), from); argc--; argv++;} + if (argc > 0) { step = atol(*argv); argc--; argv++;} + + fprintf(stderr, "From : %3d To : %3d Step = %3d Uplo = %c Inc_x = %d\n", from, to, step,uplo,inc_x); + + + if (( a = (FLOAT *)malloc(sizeof(FLOAT) * to * to * COMPSIZE)) == NULL){ + fprintf(stderr,"Out of Memory!!\n");exit(1); + } + + if (( c = (FLOAT *)malloc(sizeof(FLOAT) * to * abs(inc_x) * COMPSIZE)) == NULL){ + fprintf(stderr,"Out of Memory!!\n");exit(1); + } + +#ifdef __linux + srandom(getpid()); +#endif + + fprintf(stderr, " SIZE Flops Time\n"); + + for(m = from; m <= to; m += step) + { + timeg=0; + + fprintf(stderr, " %6d : ", (int)m); + + for (l=0; l 0) { from = atol(*argv); argc--; argv++;} + if (argc > 0) { to = MAX(atol(*argv), from); argc--; argv++;} + if (argc > 0) { step = atol(*argv); argc--; argv++;} + + fprintf(stderr, "From : %3d To : %3d Step = %3d Uplo = %c Inc_x = %d Inc_y = %d\n", from, to, step,uplo,inc_x,inc_y); + + + if (( a = (FLOAT *)malloc(sizeof(FLOAT) * to * to * COMPSIZE)) == NULL){ + fprintf(stderr,"Out of Memory!!\n");exit(1); + } + + if (( b = (FLOAT *)malloc(sizeof(FLOAT) * to * abs(inc_y) * COMPSIZE)) == NULL){ + fprintf(stderr,"Out of Memory!!\n");exit(1); + } + + if (( c = (FLOAT *)malloc(sizeof(FLOAT) * to * abs(inc_x) * COMPSIZE)) == NULL){ + fprintf(stderr,"Out of Memory!!\n");exit(1); + } + +#ifdef __linux + srandom(getpid()); +#endif + + fprintf(stderr, " SIZE Flops Time\n"); + + for(m = from; m <= to; m += step) + { + timeg=0; + + fprintf(stderr, " %6d : ", (int)m); + + for (l=0; l -#include -#ifdef __CYGWIN32__ -#include -#endif -#include "common.h" +#include "bench.h" #undef SWAP @@ -49,71 +44,6 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endif #endif -#if defined(__WIN32__) || defined(__WIN64__) - -#ifndef DELTA_EPOCH_IN_MICROSECS -#define DELTA_EPOCH_IN_MICROSECS 11644473600000000ULL -#endif - -int gettimeofday(struct timeval *tv, void *tz){ - - FILETIME ft; - unsigned __int64 tmpres = 0; - static int tzflag; - - if (NULL != tv) - { - GetSystemTimeAsFileTime(&ft); - - tmpres |= ft.dwHighDateTime; - tmpres <<= 32; - tmpres |= ft.dwLowDateTime; - - /*converting file time to unix epoch*/ - tmpres /= 10; /*convert into microseconds*/ - tmpres -= DELTA_EPOCH_IN_MICROSECS; - tv->tv_sec = (long)(tmpres / 1000000UL); - tv->tv_usec = (long)(tmpres % 1000000UL); - } - - return 0; -} - -#endif - -#if !defined(__WIN32__) && !defined(__WIN64__) && !defined(__CYGWIN32__) && 0 - -static void *huge_malloc(BLASLONG size){ - int shmid; - void *address; - -#ifndef SHM_HUGETLB -#define SHM_HUGETLB 04000 -#endif - - if ((shmid =shmget(IPC_PRIVATE, - (size + HUGE_PAGESIZE) & ~(HUGE_PAGESIZE - 1), - SHM_HUGETLB | IPC_CREAT |0600)) < 0) { - printf( "Memory allocation failed(shmget).\n"); - exit(1); - } - - address = shmat(shmid, NULL, SHM_RND); - - if ((BLASLONG)address == -1){ - printf( "Memory allocation failed(shmat).\n"); - exit(1); - } - - shmctl(shmid, IPC_RMID, 0); - - return address; -} - -#define malloc huge_malloc - -#endif - int main(int argc, char *argv[]){ FLOAT *x, *y; @@ -128,7 +58,6 @@ int main(int argc, char *argv[]){ int to = 200; int step = 1; - struct timeval start, stop; double time1,timeg; argc--;argv++; @@ -151,7 +80,7 @@ int main(int argc, char *argv[]){ fprintf(stderr,"Out of Memory!!\n");exit(1); } -#ifdef linux +#ifdef __linux srandom(getpid()); #endif @@ -175,13 +104,13 @@ int main(int argc, char *argv[]){ for(i = 0; i < m * COMPSIZE * abs(inc_y); i++){ y[i] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; } - gettimeofday( &start, (struct timezone *)0); + begin(); SWAP (&m, x, &inc_x, y, &inc_y ); - gettimeofday( &stop, (struct timezone *)0); + end(); - time1 = (double)(stop.tv_sec - start.tv_sec) + (double)((stop.tv_usec - start.tv_usec)) * 1.e-6; + time1 = getsec(); timeg += time1; diff --git a/benchmark/symm.c b/benchmark/symm.c index 35ebcee97..1c6d91d00 100644 --- a/benchmark/symm.c +++ b/benchmark/symm.c @@ -25,13 +25,7 @@ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ -#include -#include -#ifdef __CYGWIN32__ -#include -#endif -#include "common.h" - +#include "bench.h" #undef SYMM @@ -53,71 +47,6 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endif -#if defined(__WIN32__) || defined(__WIN64__) - -#ifndef DELTA_EPOCH_IN_MICROSECS -#define DELTA_EPOCH_IN_MICROSECS 11644473600000000ULL -#endif - -int gettimeofday(struct timeval *tv, void *tz){ - - FILETIME ft; - unsigned __int64 tmpres = 0; - static int tzflag; - - if (NULL != tv) - { - GetSystemTimeAsFileTime(&ft); - - tmpres |= ft.dwHighDateTime; - tmpres <<= 32; - tmpres |= ft.dwLowDateTime; - - /*converting file time to unix epoch*/ - tmpres /= 10; /*convert into microseconds*/ - tmpres -= DELTA_EPOCH_IN_MICROSECS; - tv->tv_sec = (long)(tmpres / 1000000UL); - tv->tv_usec = (long)(tmpres % 1000000UL); - } - - return 0; -} - -#endif - -#if !defined(__WIN32__) && !defined(__WIN64__) && !defined(__CYGWIN32__) && 0 - -static void *huge_malloc(BLASLONG size){ - int shmid; - void *address; - -#ifndef SHM_HUGETLB -#define SHM_HUGETLB 04000 -#endif - - if ((shmid =shmget(IPC_PRIVATE, - (size + HUGE_PAGESIZE) & ~(HUGE_PAGESIZE - 1), - SHM_HUGETLB | IPC_CREAT |0600)) < 0) { - printf( "Memory allocation failed(shmget).\n"); - exit(1); - } - - address = shmat(shmid, NULL, SHM_RND); - - if ((BLASLONG)address == -1){ - printf( "Memory allocation failed(shmat).\n"); - exit(1); - } - - shmctl(shmid, IPC_RMID, 0); - - return address; -} - -#define malloc huge_malloc - -#endif - int main(int argc, char *argv[]){ FLOAT *a, *b, *c; @@ -137,7 +66,6 @@ int main(int argc, char *argv[]){ int to = 200; int step = 1; - struct timeval start, stop; double time1; argc--;argv++; @@ -162,7 +90,7 @@ int main(int argc, char *argv[]){ -#ifdef linux +#ifdef __linux srandom(getpid()); #endif @@ -175,21 +103,19 @@ int main(int argc, char *argv[]){ for(j = 0; j < m; j++){ for(i = 0; i < m * COMPSIZE; i++){ - a[i + j * m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; - b[i + j * m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; - c[i + j * m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + a[(long)i + (long)j * (long)m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + b[(long)i + (long)j * (long)m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + c[(long)i + (long)j * (long)m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; } } - gettimeofday( &start, (struct timezone *)0); + begin(); SYMM (&side, &uplo, &m, &m, alpha, a, &m, b, &m, beta, c, &m ); - gettimeofday( &stop, (struct timezone *)0); - - time1 = (double)(stop.tv_sec - start.tv_sec) + (double)((stop.tv_usec - start.tv_usec)) * 1.e-6; + end(); - gettimeofday( &start, (struct timezone *)0); + time1 = getsec(); fprintf(stderr, " %10.2f MFlops\n", diff --git a/benchmark/symv.c b/benchmark/symv.c index df2a5d301..0a35aaef0 100644 --- a/benchmark/symv.c +++ b/benchmark/symv.c @@ -25,13 +25,7 @@ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ -#include -#include -#ifdef __CYGWIN32__ -#include -#endif -#include "common.h" - +#include "bench.h" #undef SYMV @@ -53,71 +47,6 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endif -#if defined(__WIN32__) || defined(__WIN64__) - -#ifndef DELTA_EPOCH_IN_MICROSECS -#define DELTA_EPOCH_IN_MICROSECS 11644473600000000ULL -#endif - -int gettimeofday(struct timeval *tv, void *tz){ - - FILETIME ft; - unsigned __int64 tmpres = 0; - static int tzflag; - - if (NULL != tv) - { - GetSystemTimeAsFileTime(&ft); - - tmpres |= ft.dwHighDateTime; - tmpres <<= 32; - tmpres |= ft.dwLowDateTime; - - /*converting file time to unix epoch*/ - tmpres /= 10; /*convert into microseconds*/ - tmpres -= DELTA_EPOCH_IN_MICROSECS; - tv->tv_sec = (long)(tmpres / 1000000UL); - tv->tv_usec = (long)(tmpres % 1000000UL); - } - - return 0; -} - -#endif - -#if !defined(__WIN32__) && !defined(__WIN64__) && !defined(__CYGWIN32__) && 0 - -static void *huge_malloc(BLASLONG size){ - int shmid; - void *address; - -#ifndef SHM_HUGETLB -#define SHM_HUGETLB 04000 -#endif - - if ((shmid =shmget(IPC_PRIVATE, - (size + HUGE_PAGESIZE) & ~(HUGE_PAGESIZE - 1), - SHM_HUGETLB | IPC_CREAT |0600)) < 0) { - printf( "Memory allocation failed(shmget).\n"); - exit(1); - } - - address = shmat(shmid, NULL, SHM_RND); - - if ((BLASLONG)address == -1){ - printf( "Memory allocation failed(shmat).\n"); - exit(1); - } - - shmctl(shmid, IPC_RMID, 0); - - return address; -} - -#define malloc huge_malloc - -#endif - int main(int argc, char *argv[]){ FLOAT *a, *x, *y; @@ -134,7 +63,6 @@ int main(int argc, char *argv[]){ int to = 200; int step = 1; - struct timeval start, stop; double time1,timeg; argc--;argv++; @@ -162,7 +90,7 @@ int main(int argc, char *argv[]){ fprintf(stderr,"Out of Memory!!\n");exit(1); } -#ifdef linux +#ifdef __linux srandom(getpid()); #endif @@ -177,7 +105,7 @@ int main(int argc, char *argv[]){ for(j = 0; j < m; j++){ for(i = 0; i < m * COMPSIZE; i++){ - a[i + j * m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + a[(long)i + (long)j * (long)m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; } } @@ -192,13 +120,13 @@ int main(int argc, char *argv[]){ for(i = 0; i < m * COMPSIZE * abs(inc_y); i++){ y[i] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; } - gettimeofday( &start, (struct timezone *)0); + begin(); SYMV (&uplo, &m, alpha, a, &m, x, &inc_x, beta, y, &inc_y ); - gettimeofday( &stop, (struct timezone *)0); + end(); - time1 = (double)(stop.tv_sec - start.tv_sec) + (double)((stop.tv_usec - start.tv_usec)) * 1.e-6; + time1 = getsec(); timeg += time1; diff --git a/benchmark/syr.c b/benchmark/syr.c new file mode 100644 index 000000000..ebbf2bd3c --- /dev/null +++ b/benchmark/syr.c @@ -0,0 +1,113 @@ +/*************************************************************************** +Copyright (c) 2014, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "bench.h" + + +#undef SYR + +#ifdef DOUBLE +#define SYR BLASFUNC(dsyr) +#else +#define SYR BLASFUNC(ssyr) +#endif + + +int main(int argc, char *argv[]){ + + FLOAT *x,*a; + FLOAT alpha[] = {1.0, 1.0}; + char *p; + + char uplo='U'; + + if ((p = getenv("OPENBLAS_UPLO"))) uplo=*p; + + blasint m, i, j; + blasint inc_x= 1; + int from = 1; + int to = 200; + int step = 1; + + double time1; + + argc--;argv++; + + if (argc > 0) { from = atol(*argv); argc--; argv++;} + if (argc > 0) { to = MAX(atol(*argv), from); argc--; argv++;} + if (argc > 0) { step = atol(*argv); argc--; argv++;} + + fprintf(stderr, "From : %3d To : %3d Step = %3d Uplo = %c Inc_x = %d\n", from, to, step,uplo,inc_x); + + + if (( a = (FLOAT *)malloc(sizeof(FLOAT) * to * to * COMPSIZE)) == NULL){ + fprintf(stderr,"Out of Memory!!\n");exit(1); + } + + if (( x = (FLOAT *)malloc(sizeof(FLOAT) * to * abs(inc_x) * COMPSIZE)) == NULL){ + fprintf(stderr,"Out of Memory!!\n");exit(1); + } + +#ifdef __linux + srandom(getpid()); +#endif + + fprintf(stderr, " SIZE Flops\n"); + + for(m = from; m <= to; m += step) + { + + fprintf(stderr, " %6d : ", (int)m); + + for(i = 0; i < m * COMPSIZE * abs(inc_x); i++){ + x[i] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + } + + for(j = 0; j < m; j++){ + for(i = 0; i < m * COMPSIZE; i++){ + a[(long)i + (long)j * (long)m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + } + } + + begin(); + + SYR (&uplo, &m, alpha, x, &inc_x, a, &m ); + + end(); + + time1 = getsec(); + + fprintf(stderr, + " %10.2f MFlops\n", + COMPSIZE * COMPSIZE * 1. * (double)m * (double)m / time1 * 1.e-6); + + } + + return 0; +} + +// void main(int argc, char *argv[]) __attribute__((weak, alias("MAIN__"))); diff --git a/benchmark/syr2.c b/benchmark/syr2.c new file mode 100644 index 000000000..acbc86987 --- /dev/null +++ b/benchmark/syr2.c @@ -0,0 +1,121 @@ +/*************************************************************************** +Copyright (c) 2014, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "bench.h" + +#undef SYR2 + + +#ifdef DOUBLE +#define SYR2 BLASFUNC(dsyr2) +#else +#define SYR2 BLASFUNC(ssyr2) +#endif + +int main(int argc, char *argv[]){ + + FLOAT *x, *y, *a; + FLOAT alpha[] = {1.0, 1.0}; + char *p; + + char uplo='U'; + + if ((p = getenv("OPENBLAS_UPLO"))) uplo=*p; + + blasint m, i, j; + blasint inc_x= 1; + blasint inc_y= 1; + int from = 1; + int to = 200; + int step = 1; + + double time1; + + argc--;argv++; + + if (argc > 0) { from = atol(*argv); argc--; argv++;} + if (argc > 0) { to = MAX(atol(*argv), from); argc--; argv++;} + if (argc > 0) { step = atol(*argv); argc--; argv++;} + + fprintf(stderr, "From : %3d To : %3d Step = %3d Uplo = %c Inc_x = %d Inc_y = %d\n", from, to, step,uplo,inc_x,inc_y); + + if (( a = (FLOAT *)malloc(sizeof(FLOAT) * to * to * COMPSIZE)) == NULL){ + fprintf(stderr,"Out of Memory!!\n");exit(1); + } + + if (( x = (FLOAT *)malloc(sizeof(FLOAT) * to * abs(inc_x) * COMPSIZE)) == NULL){ + fprintf(stderr,"Out of Memory!!\n");exit(1); + } + + if (( y = (FLOAT *)malloc(sizeof(FLOAT) * to * abs(inc_y) * COMPSIZE)) == NULL){ + fprintf(stderr,"Out of Memory!!\n");exit(1); + } + + + +#ifdef __linux + srandom(getpid()); +#endif + + fprintf(stderr, " SIZE Flops\n"); + + for(m = from; m <= to; m += step) + { + + fprintf(stderr, " %6d : ", (int)m); + for(i = 0; i < m * COMPSIZE * abs(inc_x); i++){ + x[i] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + } + + for(i = 0; i < m * COMPSIZE * abs(inc_y); i++){ + y[i] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + } + + for(j = 0; j < m; j++){ + for(i = 0; i < m * COMPSIZE; i++){ + a[(long)i + (long)j * (long)m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + } + } + + begin(); + + SYR2 (&uplo, &m, alpha, x, &inc_x, y, &inc_y, a, &m ); + + end(); + + time1 = getsec(); + + fprintf(stderr, + " %10.2f MFlops\n", + COMPSIZE * COMPSIZE * 2. * (double)m * (double)m / time1 * 1.e-6); + + } + + return 0; +} + +// void main(int argc, char *argv[]) __attribute__((weak, alias("MAIN__"))); diff --git a/benchmark/syr2k.c b/benchmark/syr2k.c index 9840b5f3e..3895c2861 100644 --- a/benchmark/syr2k.c +++ b/benchmark/syr2k.c @@ -25,12 +25,7 @@ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ -#include -#include -#ifdef __CYGWIN32__ -#include -#endif -#include "common.h" +#include "bench.h" #undef SYR2K @@ -53,71 +48,6 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endif -#if defined(__WIN32__) || defined(__WIN64__) - -#ifndef DELTA_EPOCH_IN_MICROSECS -#define DELTA_EPOCH_IN_MICROSECS 11644473600000000ULL -#endif - -int gettimeofday(struct timeval *tv, void *tz){ - - FILETIME ft; - unsigned __int64 tmpres = 0; - static int tzflag; - - if (NULL != tv) - { - GetSystemTimeAsFileTime(&ft); - - tmpres |= ft.dwHighDateTime; - tmpres <<= 32; - tmpres |= ft.dwLowDateTime; - - /*converting file time to unix epoch*/ - tmpres /= 10; /*convert into microseconds*/ - tmpres -= DELTA_EPOCH_IN_MICROSECS; - tv->tv_sec = (long)(tmpres / 1000000UL); - tv->tv_usec = (long)(tmpres % 1000000UL); - } - - return 0; -} - -#endif - -#if !defined(__WIN32__) && !defined(__WIN64__) && !defined(__CYGWIN32__) && 0 - -static void *huge_malloc(BLASLONG size){ - int shmid; - void *address; - -#ifndef SHM_HUGETLB -#define SHM_HUGETLB 04000 -#endif - - if ((shmid =shmget(IPC_PRIVATE, - (size + HUGE_PAGESIZE) & ~(HUGE_PAGESIZE - 1), - SHM_HUGETLB | IPC_CREAT |0600)) < 0) { - printf( "Memory allocation failed(shmget).\n"); - exit(1); - } - - address = shmat(shmid, NULL, SHM_RND); - - if ((BLASLONG)address == -1){ - printf( "Memory allocation failed(shmat).\n"); - exit(1); - } - - shmctl(shmid, IPC_RMID, 0); - - return address; -} - -#define malloc huge_malloc - -#endif - int main(int argc, char *argv[]){ FLOAT *a, *b, *c; @@ -137,7 +67,6 @@ int main(int argc, char *argv[]){ int to = 200; int step = 1; - struct timeval start, stop; double time1; argc--;argv++; @@ -162,7 +91,7 @@ int main(int argc, char *argv[]){ -#ifdef linux +#ifdef __linux srandom(getpid()); #endif @@ -175,21 +104,19 @@ int main(int argc, char *argv[]){ for(j = 0; j < m; j++){ for(i = 0; i < m * COMPSIZE; i++){ - a[i + j * m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; - b[i + j * m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; - c[i + j * m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + a[(long)i + (long)j * (long)m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + b[(long)i + (long)j * (long)m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + c[(long)i + (long)j * (long)m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; } } - gettimeofday( &start, (struct timezone *)0); + begin(); SYR2K (&uplo, &trans, &m, &m, alpha, a, &m, b, &m, beta, c, &m ); - gettimeofday( &stop, (struct timezone *)0); - - time1 = (double)(stop.tv_sec - start.tv_sec) + (double)((stop.tv_usec - start.tv_usec)) * 1.e-6; + end(); - gettimeofday( &start, (struct timezone *)0); + time1 = getsec(); fprintf(stderr, " %10.2f MFlops\n", diff --git a/benchmark/syrk.c b/benchmark/syrk.c index 34817f2bb..82606a21a 100644 --- a/benchmark/syrk.c +++ b/benchmark/syrk.c @@ -25,13 +25,7 @@ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ -#include -#include -#ifdef __CYGWIN32__ -#include -#endif -#include "common.h" - +#include "bench.h" #undef SYRK @@ -53,71 +47,6 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endif -#if defined(__WIN32__) || defined(__WIN64__) - -#ifndef DELTA_EPOCH_IN_MICROSECS -#define DELTA_EPOCH_IN_MICROSECS 11644473600000000ULL -#endif - -int gettimeofday(struct timeval *tv, void *tz){ - - FILETIME ft; - unsigned __int64 tmpres = 0; - static int tzflag; - - if (NULL != tv) - { - GetSystemTimeAsFileTime(&ft); - - tmpres |= ft.dwHighDateTime; - tmpres <<= 32; - tmpres |= ft.dwLowDateTime; - - /*converting file time to unix epoch*/ - tmpres /= 10; /*convert into microseconds*/ - tmpres -= DELTA_EPOCH_IN_MICROSECS; - tv->tv_sec = (long)(tmpres / 1000000UL); - tv->tv_usec = (long)(tmpres % 1000000UL); - } - - return 0; -} - -#endif - -#if !defined(__WIN32__) && !defined(__WIN64__) && !defined(__CYGWIN32__) && 0 - -static void *huge_malloc(BLASLONG size){ - int shmid; - void *address; - -#ifndef SHM_HUGETLB -#define SHM_HUGETLB 04000 -#endif - - if ((shmid =shmget(IPC_PRIVATE, - (size + HUGE_PAGESIZE) & ~(HUGE_PAGESIZE - 1), - SHM_HUGETLB | IPC_CREAT |0600)) < 0) { - printf( "Memory allocation failed(shmget).\n"); - exit(1); - } - - address = shmat(shmid, NULL, SHM_RND); - - if ((BLASLONG)address == -1){ - printf( "Memory allocation failed(shmat).\n"); - exit(1); - } - - shmctl(shmid, IPC_RMID, 0); - - return address; -} - -#define malloc huge_malloc - -#endif - int main(int argc, char *argv[]){ FLOAT *a, *c; @@ -137,7 +66,6 @@ int main(int argc, char *argv[]){ int to = 200; int step = 1; - struct timeval start, stop; double time1; argc--;argv++; @@ -159,7 +87,7 @@ int main(int argc, char *argv[]){ -#ifdef linux +#ifdef __linux srandom(getpid()); #endif @@ -172,20 +100,18 @@ int main(int argc, char *argv[]){ for(j = 0; j < m; j++){ for(i = 0; i < m * COMPSIZE; i++){ - a[i + j * m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; - c[i + j * m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + a[(long)i + (long)j * (long)m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + c[(long)i + (long)j * (long)m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; } } - gettimeofday( &start, (struct timezone *)0); + begin(); SYRK (&uplo, &trans, &m, &m, alpha, a, &m, beta, c, &m ); - gettimeofday( &stop, (struct timezone *)0); - - time1 = (double)(stop.tv_sec - start.tv_sec) + (double)((stop.tv_usec - start.tv_usec)) * 1.e-6; + end(); - gettimeofday( &start, (struct timezone *)0); + time1 = getsec(); fprintf(stderr, " %10.2f MFlops\n", diff --git a/benchmark/tpmv.c b/benchmark/tpmv.c new file mode 100644 index 000000000..41f2e0fb8 --- /dev/null +++ b/benchmark/tpmv.c @@ -0,0 +1,132 @@ +/*************************************************************************** +Copyright (c) 2014, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "bench.h" + +#undef TPMV + +#ifndef COMPLEX + +#ifdef DOUBLE +#define TPMV BLASFUNC(dtpmv) +#else +#define TPMV BLASFUNC(stpmv) +#endif + +#else + +#ifdef DOUBLE +#define TPMV BLASFUNC(ztpmv) +#else +#define TPMV BLASFUNC(ctpmv) +#endif + +#endif + +int main(int argc, char *argv[]) +{ + + FLOAT *a, *x; + char *p; + + char uplo ='U'; + char trans='N'; + char diag ='U'; + + int loops = 1; + int l; + blasint inc_x=1; + + if ((p = getenv("OPENBLAS_UPLO"))) uplo=*p; + if ((p = getenv("OPENBLAS_TRANS"))) trans=*p; + if ((p = getenv("OPENBLAS_DIAG"))) diag=*p; + if ((p = getenv("OPENBLAS_LOOPS"))) loops = atoi(p); + if ((p = getenv("OPENBLAS_INCX"))) inc_x = atoi(p); + + blasint n, i, j; + + int from = 1; + int to = 200; + int step = 1; + + double time1, timeg; + + argc--;argv++; + + if (argc > 0) { from = atol(*argv); argc--; argv++;} + if (argc > 0) { to = MAX(atol(*argv), from); argc--; argv++;} + if (argc > 0) { step = atol(*argv); argc--; argv++;} + + fprintf(stderr, "From : %3d To : %3d Step = %3d Uplo = %c Trans = %c Diag = %c Loops=%d Inc_x=%d\n", from, + to, step, uplo, trans, diag, loops, inc_x); + + if (( a = (FLOAT *)malloc(sizeof(FLOAT) * to * to * COMPSIZE)) == NULL) { + fprintf(stderr,"Out of Memory!!\n");exit(1); + } + + if (( x = (FLOAT *)malloc(sizeof(FLOAT) * to * abs(inc_x) * COMPSIZE)) == NULL){ + fprintf(stderr,"Out of Memory!!\n");exit(1); + } + +#ifdef __linux + srandom(getpid()); +#endif + + fprintf(stderr, " SIZE Flops\n"); + + for(n = from; n <= to; n += step) { + timeg=0; + + fprintf(stderr, " %6d : ", (int)n); + for(j = 0; j < n; j++) { + for(i = 0; i < n * COMPSIZE; i++) { + a[(long)i + (long)j * (long)n * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + } + } + + for (i = 0; i < n * COMPSIZE * abs(inc_x); i++) { + x[i] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + } + + for (l = 0; l < loops; l++) { + begin(); + TPMV (&uplo, &trans, &diag, &n, a, x, &inc_x); + end(); + + time1 = getsec(); + timeg += time1; + } + + timeg /= loops; + fprintf(stderr, " %10.2f MFlops %12.9f sec\n", + COMPSIZE * COMPSIZE * 1. * (double)n * (double)n / timeg / 1.e6, timeg); + } + + return 0; +} + +// void main(int argc, char *argv[]) __attribute__((weak, alias("MAIN__"))); diff --git a/benchmark/tpsv.c b/benchmark/tpsv.c new file mode 100644 index 000000000..ebfa29692 --- /dev/null +++ b/benchmark/tpsv.c @@ -0,0 +1,132 @@ +/*************************************************************************** +Copyright (c) 2014, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "bench.h" + +#undef TPSV + +#ifndef COMPLEX + +#ifdef DOUBLE +#define TPSV BLASFUNC(dtpsv) +#else +#define TPSV BLASFUNC(stpsv) +#endif + +#else + +#ifdef DOUBLE +#define TPSV BLASFUNC(ztpsv) +#else +#define TPSV BLASFUNC(ctpsv) +#endif + +#endif + +int main(int argc, char *argv[]) +{ + + FLOAT *a, *x; + char *p; + + char uplo ='U'; + char trans='N'; + char diag ='U'; + + int loops = 1; + int l; + blasint inc_x=1; + + if ((p = getenv("OPENBLAS_UPLO"))) uplo=*p; + if ((p = getenv("OPENBLAS_TRANS"))) trans=*p; + if ((p = getenv("OPENBLAS_DIAG"))) diag=*p; + if ((p = getenv("OPENBLAS_LOOPS"))) loops = atoi(p); + if ((p = getenv("OPENBLAS_INCX"))) inc_x = atoi(p); + + blasint n, i, j; + + int from = 1; + int to = 200; + int step = 1; + + double time1, timeg; + + argc--;argv++; + + if (argc > 0) { from = atol(*argv); argc--; argv++;} + if (argc > 0) { to = MAX(atol(*argv), from); argc--; argv++;} + if (argc > 0) { step = atol(*argv); argc--; argv++;} + + fprintf(stderr, "From : %3d To : %3d Step = %3d Uplo = %c Trans = %c Diag = %c Loops=%d Inc_x=%d\n", from, + to, step, uplo, trans, diag, loops, inc_x); + + if (( a = (FLOAT *)malloc(sizeof(FLOAT) * to * to * COMPSIZE)) == NULL) { + fprintf(stderr,"Out of Memory!!\n");exit(1); + } + + if (( x = (FLOAT *)malloc(sizeof(FLOAT) * to * abs(inc_x) * COMPSIZE)) == NULL){ + fprintf(stderr,"Out of Memory!!\n");exit(1); + } + +#ifdef __linux + srandom(getpid()); +#endif + + fprintf(stderr, " SIZE Flops\n"); + + for(n = from; n <= to; n += step) { + timeg=0; + + fprintf(stderr, " %6d : ", (int)n); + for(j = 0; j < n; j++) { + for(i = 0; i < n * COMPSIZE; i++) { + a[(long)i + (long)j * (long)n * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + } + } + + for (i = 0; i < n * COMPSIZE * abs(inc_x); i++) { + x[i] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + } + + for (l = 0; l < loops; l++) { + begin(); + TPSV (&uplo, &trans, &diag, &n, a, x, &inc_x); + end(); + + time1 = getsec(); + timeg += time1; + } + + timeg /= loops; + fprintf(stderr, " %10.2f MFlops %12.9f sec\n", + COMPSIZE * COMPSIZE * 1. * (double)n * (double)n / timeg / 1.e6, timeg); + } + + return 0; +} + +// void main(int argc, char *argv[]) __attribute__((weak, alias("MAIN__"))); diff --git a/benchmark/trmm.c b/benchmark/trmm.c index 54c7972db..3ab9fc255 100644 --- a/benchmark/trmm.c +++ b/benchmark/trmm.c @@ -25,12 +25,7 @@ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ -#include -#include -#ifdef __CYGWIN32__ -#include -#endif -#include "common.h" +#include "bench.h" #undef TRMM @@ -53,71 +48,6 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endif -#if defined(__WIN32__) || defined(__WIN64__) - -#ifndef DELTA_EPOCH_IN_MICROSECS -#define DELTA_EPOCH_IN_MICROSECS 11644473600000000ULL -#endif - -int gettimeofday(struct timeval *tv, void *tz){ - - FILETIME ft; - unsigned __int64 tmpres = 0; - static int tzflag; - - if (NULL != tv) - { - GetSystemTimeAsFileTime(&ft); - - tmpres |= ft.dwHighDateTime; - tmpres <<= 32; - tmpres |= ft.dwLowDateTime; - - /*converting file time to unix epoch*/ - tmpres /= 10; /*convert into microseconds*/ - tmpres -= DELTA_EPOCH_IN_MICROSECS; - tv->tv_sec = (long)(tmpres / 1000000UL); - tv->tv_usec = (long)(tmpres % 1000000UL); - } - - return 0; -} - -#endif - -#if !defined(__WIN32__) && !defined(__WIN64__) && !defined(__CYGWIN32__) && 0 - -static void *huge_malloc(BLASLONG size){ - int shmid; - void *address; - -#ifndef SHM_HUGETLB -#define SHM_HUGETLB 04000 -#endif - - if ((shmid =shmget(IPC_PRIVATE, - (size + HUGE_PAGESIZE) & ~(HUGE_PAGESIZE - 1), - SHM_HUGETLB | IPC_CREAT |0600)) < 0) { - printf( "Memory allocation failed(shmget).\n"); - exit(1); - } - - address = shmat(shmid, NULL, SHM_RND); - - if ((BLASLONG)address == -1){ - printf( "Memory allocation failed(shmat).\n"); - exit(1); - } - - shmctl(shmid, IPC_RMID, 0); - - return address; -} - -#define malloc huge_malloc - -#endif - int main(int argc, char *argv[]){ FLOAT *a, *b; @@ -141,7 +71,6 @@ int main(int argc, char *argv[]){ int to = 200; int step = 1; - struct timeval start, stop; double time1; argc--;argv++; @@ -162,7 +91,7 @@ int main(int argc, char *argv[]){ -#ifdef linux +#ifdef __linux srandom(getpid()); #endif @@ -175,20 +104,18 @@ int main(int argc, char *argv[]){ for(j = 0; j < m; j++){ for(i = 0; i < m * COMPSIZE; i++){ - a[i + j * m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; - b[i + j * m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + a[(long)i + (long)j * (long)m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + b[(long)i + (long)j * (long)m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; } } - gettimeofday( &start, (struct timezone *)0); + begin(); TRMM (&side, &uplo, &trans, &diag, &m, &m, alpha, a, &m, b, &m); - gettimeofday( &stop, (struct timezone *)0); - - time1 = (double)(stop.tv_sec - start.tv_sec) + (double)((stop.tv_usec - start.tv_usec)) * 1.e-6; + end(); - gettimeofday( &start, (struct timezone *)0); + time1 = getsec(); fprintf(stderr, " %10.2f MFlops %10.6f sec\n", diff --git a/benchmark/trmv.c b/benchmark/trmv.c new file mode 100644 index 000000000..0e8088b54 --- /dev/null +++ b/benchmark/trmv.c @@ -0,0 +1,132 @@ +/*************************************************************************** +Copyright (c) 2014, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "bench.h" + +#undef TRMV + +#ifndef COMPLEX + +#ifdef DOUBLE +#define TRMV BLASFUNC(dtrmv) +#else +#define TRMV BLASFUNC(strmv) +#endif + +#else + +#ifdef DOUBLE +#define TRMV BLASFUNC(ztrmv) +#else +#define TRMV BLASFUNC(ctrmv) +#endif + +#endif + +int main(int argc, char *argv[]) +{ + + FLOAT *a, *x; + char *p; + + char uplo ='U'; + char trans='N'; + char diag ='U'; + + int loops = 1; + int l; + blasint inc_x=1; + + if ((p = getenv("OPENBLAS_UPLO"))) uplo=*p; + if ((p = getenv("OPENBLAS_TRANS"))) trans=*p; + if ((p = getenv("OPENBLAS_DIAG"))) diag=*p; + if ((p = getenv("OPENBLAS_LOOPS"))) loops = atoi(p); + if ((p = getenv("OPENBLAS_INCX"))) inc_x = atoi(p); + + blasint n, i, j; + + int from = 1; + int to = 200; + int step = 1; + + double time1, timeg; + + argc--;argv++; + + if (argc > 0) { from = atol(*argv); argc--; argv++;} + if (argc > 0) { to = MAX(atol(*argv), from); argc--; argv++;} + if (argc > 0) { step = atol(*argv); argc--; argv++;} + + fprintf(stderr, "From : %3d To : %3d Step = %3d Uplo = %c Trans = %c Diag = %c Loops=%d Inc_x=%d\n", from, + to, step, uplo, trans, diag, loops, inc_x); + + if (( a = (FLOAT *)malloc(sizeof(FLOAT) * to * to * COMPSIZE)) == NULL) { + fprintf(stderr,"Out of Memory!!\n");exit(1); + } + + if (( x = (FLOAT *)malloc(sizeof(FLOAT) * to * abs(inc_x) * COMPSIZE)) == NULL){ + fprintf(stderr,"Out of Memory!!\n");exit(1); + } + +#ifdef __linux + srandom(getpid()); +#endif + + fprintf(stderr, " SIZE Flops\n"); + + for(n = from; n <= to; n += step) { + timeg=0; + + fprintf(stderr, " %6d : ", (int)n); + for(j = 0; j < n; j++) { + for(i = 0; i < n * COMPSIZE; i++) { + a[(long)i + (long)j * (long)n * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + } + } + + for (i = 0; i < n * COMPSIZE * abs(inc_x); i++) { + x[i] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + } + + for (l = 0; l < loops; l++) { + begin(); + TRMV (&uplo, &trans, &diag, &n, a, &n, x, &inc_x); + end(); + + time1 = getsec(); + timeg += time1; + } + + timeg /= loops; + fprintf(stderr, " %10.2f MFlops %12.9f sec\n", + COMPSIZE * COMPSIZE * 1. * (double)n * (double)n / timeg / 1.e6, timeg); + } + + return 0; +} + +// void main(int argc, char *argv[]) __attribute__((weak, alias("MAIN__"))); diff --git a/benchmark/trsm.c b/benchmark/trsm.c index 9eae3380c..d2ebd7f54 100644 --- a/benchmark/trsm.c +++ b/benchmark/trsm.c @@ -25,12 +25,7 @@ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ -#include -#include -#ifdef __CYGWIN32__ -#include -#endif -#include "common.h" +#include "bench.h" #undef TRSM @@ -53,71 +48,6 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endif -#if defined(__WIN32__) || defined(__WIN64__) - -#ifndef DELTA_EPOCH_IN_MICROSECS -#define DELTA_EPOCH_IN_MICROSECS 11644473600000000ULL -#endif - -int gettimeofday(struct timeval *tv, void *tz){ - - FILETIME ft; - unsigned __int64 tmpres = 0; - static int tzflag; - - if (NULL != tv) - { - GetSystemTimeAsFileTime(&ft); - - tmpres |= ft.dwHighDateTime; - tmpres <<= 32; - tmpres |= ft.dwLowDateTime; - - /*converting file time to unix epoch*/ - tmpres /= 10; /*convert into microseconds*/ - tmpres -= DELTA_EPOCH_IN_MICROSECS; - tv->tv_sec = (long)(tmpres / 1000000UL); - tv->tv_usec = (long)(tmpres % 1000000UL); - } - - return 0; -} - -#endif - -#if !defined(__WIN32__) && !defined(__WIN64__) && !defined(__CYGWIN32__) && 0 - -static void *huge_malloc(BLASLONG size){ - int shmid; - void *address; - -#ifndef SHM_HUGETLB -#define SHM_HUGETLB 04000 -#endif - - if ((shmid =shmget(IPC_PRIVATE, - (size + HUGE_PAGESIZE) & ~(HUGE_PAGESIZE - 1), - SHM_HUGETLB | IPC_CREAT |0600)) < 0) { - printf( "Memory allocation failed(shmget).\n"); - exit(1); - } - - address = shmat(shmid, NULL, SHM_RND); - - if ((BLASLONG)address == -1){ - printf( "Memory allocation failed(shmat).\n"); - exit(1); - } - - shmctl(shmid, IPC_RMID, 0); - - return address; -} - -#define malloc huge_malloc - -#endif - int main(int argc, char *argv[]){ FLOAT *a, *b; @@ -151,7 +81,6 @@ int main(int argc, char *argv[]){ int to = 200; int step = 1; - struct timeval start, stop; double time1; argc--;argv++; @@ -172,7 +101,7 @@ int main(int argc, char *argv[]){ -#ifdef linux +#ifdef __linux srandom(getpid()); #endif @@ -191,18 +120,18 @@ int main(int argc, char *argv[]){ for(j = 0; j < m; j++){ for(i = 0; i < m * COMPSIZE; i++){ - a[i + j * m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; - b[i + j * m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + a[(long)i + (long)j * (long)m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + b[(long)i + (long)j * (long)m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; } } - gettimeofday( &start, (struct timezone *)0); + begin(); TRSM (&side, &uplo, &trans, &diag, &m, &m, alpha, a, &m, b, &m); - gettimeofday( &stop, (struct timezone *)0); + end(); - time1 = (double)(stop.tv_sec - start.tv_sec) + (double)((stop.tv_usec - start.tv_usec)) * 1.e-6; + time1 = getsec(); timeg += time1; } diff --git a/benchmark/trsv.c b/benchmark/trsv.c new file mode 100644 index 000000000..66ac3a3c7 --- /dev/null +++ b/benchmark/trsv.c @@ -0,0 +1,143 @@ +/*************************************************************************** +Copyright (c) 2014, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "bench.h" + +#undef GEMV +#undef TRSV + +#ifndef COMPLEX + +#ifdef DOUBLE +#define TRSV BLASFUNC(dtrsv) +#else +#define TRSV BLASFUNC(strsv) +#endif + +#else + +#ifdef DOUBLE +#define TRSV BLASFUNC(ztrsv) +#else +#define TRSV BLASFUNC(ctrsv) +#endif + +#endif + +int main(int argc, char *argv[]){ + + FLOAT *a, *x; + blasint n = 0, i, j; + blasint inc_x=1; + int loops = 1; + int l; + char *p; + + int from = 1; + int to = 200; + int step = 1; + + time_t seconds = 0; + + double time1,timeg; + long long nanos = 0; + + argc--;argv++; + + if (argc > 0) { from = atol(*argv); argc--; argv++;} + if (argc > 0) { to = MAX(atol(*argv), from); argc--; argv++;} + if (argc > 0) { step = atol(*argv); argc--; argv++;} + + char uplo ='L'; + char transa = 'N'; + char diag ='U'; + + if ((p = getenv("OPENBLAS_LOOPS"))) loops = atoi(p); + if ((p = getenv("OPENBLAS_INCX"))) inc_x = atoi(p); + if ((p = getenv("OPENBLAS_TRANSA"))) transa=*p; + if ((p = getenv("OPENBLAS_DIAG"))) diag=*p; + if ((p = getenv("OPENBLAS_UPLO"))) uplo=*p; + + fprintf(stderr, "From : %3d To : %3d Step = %3d Transa = '%c' Inc_x = %d uplo=%c diag=%c loop = %d\n", from, to, step,transa,inc_x, + uplo,diag,loops); + + +#ifdef __linux + srandom(getpid()); +#endif + + fprintf(stderr, " SIZE Flops\n"); + fprintf(stderr, "============================================\n"); + + for(n = from; n <= to; n += step) + { + timeg=0; + if (( a = (FLOAT *)malloc(sizeof(FLOAT) * n * n * COMPSIZE)) == NULL){ + fprintf(stderr,"Out of Memory!!\n");exit(1); + } + + if (( x = (FLOAT *)malloc(sizeof(FLOAT) * n * abs(inc_x) * COMPSIZE)) == NULL){ + fprintf(stderr,"Out of Memory!!\n");exit(1); + } + + for(j = 0; j < n; j++){ + for(i = 0; i < n * COMPSIZE; i++){ + a[i + j * n * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + } + } + + for(i = 0; i < n * COMPSIZE * abs(inc_x); i++){ + x[i] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + } + + for(l =0;l< loops;l++){ + + begin(); + TRSV(&uplo,&transa,&diag,&n,a,&n,x,&inc_x); + end(); + time1 = getsec(); + timeg += time1; + } + + timeg /= loops; + long long muls = n*(n+1)/2.0; + long long adds = (n - 1.0)*n/2.0; + + fprintf(stderr, "%10d %10.2f MFlops %10.6f sec\n", n,(muls+adds) / timeg * 1.e-6, timeg); + if(a != NULL){ + free(a); + } + + if( x != NULL){ + free(x); + } + + } + + return 0; +} + diff --git a/benchmark/zdot-intel.c b/benchmark/zdot-intel.c index bb2c40f38..06cdde13a 100644 --- a/benchmark/zdot-intel.c +++ b/benchmark/zdot-intel.c @@ -25,90 +25,18 @@ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ -#include -#include -#ifdef __CYGWIN32__ -#include -#endif -#define RETURN_BY_STACK 1 -#include "common.h" +#include "bench.h" +#define RETURN_BY_STACK 1 #undef DOT - #ifdef DOUBLE #define DOT BLASFUNC(zdotu) #else #define DOT BLASFUNC(cdotu) #endif - -#if defined(__WIN32__) || defined(__WIN64__) - -#ifndef DELTA_EPOCH_IN_MICROSECS -#define DELTA_EPOCH_IN_MICROSECS 11644473600000000ULL -#endif - -int gettimeofday(struct timeval *tv, void *tz){ - - FILETIME ft; - unsigned __int64 tmpres = 0; - static int tzflag; - - if (NULL != tv) - { - GetSystemTimeAsFileTime(&ft); - - tmpres |= ft.dwHighDateTime; - tmpres <<= 32; - tmpres |= ft.dwLowDateTime; - - /*converting file time to unix epoch*/ - tmpres /= 10; /*convert into microseconds*/ - tmpres -= DELTA_EPOCH_IN_MICROSECS; - tv->tv_sec = (long)(tmpres / 1000000UL); - tv->tv_usec = (long)(tmpres % 1000000UL); - } - - return 0; -} - -#endif - -#if !defined(__WIN32__) && !defined(__WIN64__) && !defined(__CYGWIN32__) && 0 - -static void *huge_malloc(BLASLONG size){ - int shmid; - void *address; - -#ifndef SHM_HUGETLB -#define SHM_HUGETLB 04000 -#endif - - if ((shmid =shmget(IPC_PRIVATE, - (size + HUGE_PAGESIZE) & ~(HUGE_PAGESIZE - 1), - SHM_HUGETLB | IPC_CREAT |0600)) < 0) { - printf( "Memory allocation failed(shmget).\n"); - exit(1); - } - - address = shmat(shmid, NULL, SHM_RND); - - if ((BLASLONG)address == -1){ - printf( "Memory allocation failed(shmat).\n"); - exit(1); - } - - shmctl(shmid, IPC_RMID, 0); - - return address; -} - -#define malloc huge_malloc - -#endif - int main(int argc, char *argv[]){ FLOAT *x, *y; @@ -123,7 +51,6 @@ int main(int argc, char *argv[]){ int to = 200; int step = 1; - struct timeval start, stop; double time1,timeg; argc--;argv++; @@ -146,7 +73,7 @@ int main(int argc, char *argv[]){ fprintf(stderr,"Out of Memory!!\n");exit(1); } -#ifdef linux +#ifdef __linux srandom(getpid()); #endif @@ -170,13 +97,13 @@ int main(int argc, char *argv[]){ for(i = 0; i < m * COMPSIZE * abs(inc_y); i++){ y[i] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; } - gettimeofday( &start, (struct timezone *)0); + begin(); DOT (&result, &m, x, &inc_x, y, &inc_y ); - gettimeofday( &stop, (struct timezone *)0); + end(); - time1 = (double)(stop.tv_sec - start.tv_sec) + (double)((stop.tv_usec - start.tv_usec)) * 1.e-6; + time1 = getsec(); timeg += time1; diff --git a/benchmark/zdot.c b/benchmark/zdot.c index ed9d4d2e8..23b3efcad 100644 --- a/benchmark/zdot.c +++ b/benchmark/zdot.c @@ -25,13 +25,7 @@ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ -#include -#include -#ifdef __CYGWIN32__ -#include -#endif -#include "common.h" - +#include "bench.h" #undef DOT @@ -42,72 +36,6 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define DOT BLASFUNC(cdotu) #endif - -#if defined(__WIN32__) || defined(__WIN64__) - -#ifndef DELTA_EPOCH_IN_MICROSECS -#define DELTA_EPOCH_IN_MICROSECS 11644473600000000ULL -#endif - -int gettimeofday(struct timeval *tv, void *tz){ - - FILETIME ft; - unsigned __int64 tmpres = 0; - static int tzflag; - - if (NULL != tv) - { - GetSystemTimeAsFileTime(&ft); - - tmpres |= ft.dwHighDateTime; - tmpres <<= 32; - tmpres |= ft.dwLowDateTime; - - /*converting file time to unix epoch*/ - tmpres /= 10; /*convert into microseconds*/ - tmpres -= DELTA_EPOCH_IN_MICROSECS; - tv->tv_sec = (long)(tmpres / 1000000UL); - tv->tv_usec = (long)(tmpres % 1000000UL); - } - - return 0; -} - -#endif - -#if !defined(__WIN32__) && !defined(__WIN64__) && !defined(__CYGWIN32__) && 0 - -static void *huge_malloc(BLASLONG size){ - int shmid; - void *address; - -#ifndef SHM_HUGETLB -#define SHM_HUGETLB 04000 -#endif - - if ((shmid =shmget(IPC_PRIVATE, - (size + HUGE_PAGESIZE) & ~(HUGE_PAGESIZE - 1), - SHM_HUGETLB | IPC_CREAT |0600)) < 0) { - printf( "Memory allocation failed(shmget).\n"); - exit(1); - } - - address = shmat(shmid, NULL, SHM_RND); - - if ((BLASLONG)address == -1){ - printf( "Memory allocation failed(shmat).\n"); - exit(1); - } - - shmctl(shmid, IPC_RMID, 0); - - return address; -} - -#define malloc huge_malloc - -#endif - int main(int argc, char *argv[]){ FLOAT *x, *y; @@ -122,7 +50,6 @@ int main(int argc, char *argv[]){ int to = 200; int step = 1; - struct timeval start, stop; double time1,timeg; argc--;argv++; @@ -145,7 +72,7 @@ int main(int argc, char *argv[]){ fprintf(stderr,"Out of Memory!!\n");exit(1); } -#ifdef linux +#ifdef __linux srandom(getpid()); #endif @@ -169,13 +96,15 @@ int main(int argc, char *argv[]){ for(i = 0; i < m * COMPSIZE * abs(inc_y); i++){ y[i] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; } - gettimeofday( &start, (struct timezone *)0); - + begin(); +#ifdef RETURN_BY_STACK + DOT (&result , &m, x, &inc_x, y, &inc_y ); +#else result = DOT (&m, x, &inc_x, y, &inc_y ); +#endif + end(); - gettimeofday( &stop, (struct timezone *)0); - - time1 = (double)(stop.tv_sec - start.tv_sec) + (double)((stop.tv_usec - start.tv_usec)) * 1.e-6; + time1 = getsec(); timeg += time1; diff --git a/c_check b/c_check index d93b756d5..9c8b1abac 100644 --- a/c_check +++ b/c_check @@ -5,9 +5,11 @@ # Checking cross compile $hostos = `uname -s | sed -e s/\-.*//`; chop($hostos); -$hostarch = `uname -m | sed -e s/i.86/x86/`;chop($hostarch); +$hostarch = `uname -m | sed -e s/i.86/x86/`; +$hostarch = `uname -p` if ($hostos eq "AIX" || $hostos eq "SunOS"); +chop($hostarch); $hostarch = "x86_64" if ($hostarch eq "amd64"); -$hostarch = "arm" if ($hostarch =~ /^arm.*/); +$hostarch = "arm" if ($hostarch ne "arm64" && $hostarch =~ /^arm.*/); $hostarch = "arm64" if ($hostarch eq "aarch64"); $hostarch = "power" if ($hostarch =~ /^(powerpc|ppc).*/); $hostarch = "zarch" if ($hostarch eq "s390x"); @@ -18,11 +20,12 @@ $binary = $ENV{"BINARY"}; $makefile = shift(@ARGV); $config = shift(@ARGV); -$compiler_name = join(" ", @ARGV); +$compiler_name = shift(@ARGV); +$flags = join(" ", @ARGV); # First, we need to know the target OS and compiler name -$data = `$compiler_name -E ctest.c`; +$data = `$compiler_name $flags -E ctest.c`; if ($?) { printf STDERR "C Compiler ($compiler_name) is something wrong.\n"; @@ -90,6 +93,7 @@ $architecture = ia64 if ($data =~ /ARCH_IA64/); $architecture = arm if ($data =~ /ARCH_ARM/); $architecture = arm64 if ($data =~ /ARCH_ARM64/); $architecture = zarch if ($data =~ /ARCH_ZARCH/); +$architecture = riscv64 if ($data =~ /ARCH_RISCV64/); $defined = 0; @@ -134,6 +138,11 @@ if (($architecture eq "x86") && ($os ne Darwin) && ($os ne SunOS)) { $binary =32; } +if ($architecture eq "riscv64") { + $defined = 1; + $binary = 64; +} + if ($compiler eq "PGI") { $compiler_name .= " -tp p7" if ($binary eq "32"); $compiler_name .= " -tp p7-64" if ($binary eq "64"); @@ -175,7 +184,7 @@ if ($defined == 0) { # Do again -$data = `$compiler_name -E ctest.c`; +$data = `$compiler_name $flags -E ctest.c`; if ($?) { printf STDERR "C Compiler ($compiler_name) is something wrong.\n"; @@ -188,14 +197,14 @@ if (($architecture eq "mips") || ($architecture eq "mips64")) { if ($@){ warn "could not load PERL module File::Temp, so could not check MSA capatibility"; } else { - $tmpf = new File::Temp( UNLINK => 1 ); + $tmpf = new File::Temp( SUFFIX => '.c' , UNLINK => 1 ); $code = '"addvi.b $w0, $w1, 1"'; - $msa_flags = "-mmsa -mfp64 -msched-weight -mload-store-pairs"; + $msa_flags = "-mmsa -mfp64 -mload-store-pairs"; print $tmpf "#include \n\n"; print $tmpf "void main(void){ __asm__ volatile($code); }\n"; - $args = "$msa_flags -o $tmpf.o -x c $tmpf"; - my @cmd = ("$compiler_name $args"); + $args = "$msa_flags -o $tmpf.o $tmpf"; + my @cmd = ("$compiler_name $flags $args >/dev/null 2>/dev/null"); system(@cmd) == 0; if ($? != 0) { $have_msa = 0; @@ -229,22 +238,56 @@ if (($architecture eq "x86") || ($architecture eq "x86_64")) { $no_avx512 = 0; } else { # $tmpf = new File::Temp( UNLINK => 1 ); - ($fh,$tmpf) = tempfile( UNLINK => 1 ); + ($fh,$tmpf) = tempfile( SUFFIX => '.c' , UNLINK => 1 ); $code = '"vbroadcastss -4 * 4(%rsi), %zmm2"'; print $tmpf "#include \n\nint main(void){ __asm__ volatile($code); }\n"; - $args = " -march=skylake-avx512 -c -o $tmpf.o -x c $tmpf"; - my @cmd = ("$compiler_name $args >/dev/null 2>/dev/null"); + $args = " -march=skylake-avx512 -c -o $tmpf.o $tmpf"; + if ($compiler eq "PGI") { + $args = " -tp skylake -c -o $tmpf.o $tmpf"; + } + my @cmd = ("$compiler_name $flags $args >/dev/null 2>/dev/null"); system(@cmd) == 0; if ($? != 0) { $no_avx512 = 1; } else { $no_avx512 = 0; } - unlink("tmpf.o"); + unlink("$tmpf.o"); + } +} + +$c11_atomics = 0; +if ($data =~ /HAVE_C11/) { + eval "use File::Temp qw(tempfile)"; + if ($@){ + warn "could not load PERL module File::Temp, so could not check compiler compatibility with C11"; + $c11_atomics = 0; + } else { + ($fh,$tmpf) = tempfile( SUFFIX => '.c' , UNLINK => 1 ); + print $tmpf "#include \nint main(void){}\n"; + $args = " -c -o $tmpf.o $tmpf"; + my @cmd = ("$compiler_name $flags $args >/dev/null 2>/dev/null"); + system(@cmd) == 0; + if ($? != 0) { + $c11_atomics = 0; + } else { + $c11_atomics = 1; + } + unlink("$tmpf.o"); } } -$data = `$compiler_name -S ctest1.c && grep globl ctest1.s | head -n 1 && rm -f ctest1.s`; +if ($compiler eq "GCC" &&( ($architecture eq "x86") || ($architecture eq "x86_64"))) { + $no_avx2 = 0; + $oldgcc = 0; + $data = `$compiler_name -dumpversion`; + if ($data <= 4.6) { + $no_avx2 = 1; + $oldgcc = 1; + } +} + +$data = `$compiler_name $flags -S ctest1.c && grep globl ctest1.s | head -n 1 && rm -f ctest1.s`; $data =~ /globl\s([_\.]*)(.*)/; @@ -267,7 +310,7 @@ $linker_l = ""; $linker_a = ""; { - $link = `$compiler_name -c ctest2.c -o ctest2.o 2>&1 && $compiler_name $openmp -v ctest2.o -o ctest2 2>&1 && rm -f ctest2.o ctest2 ctest2.exe`; + $link = `$compiler_name $flags -c ctest2.c -o ctest2.o 2>&1 && $compiler_name $flags $openmp -v ctest2.o -o ctest2 2>&1 && rm -f ctest2.o ctest2 ctest2.exe`; $link =~ s/\-Y\sP\,/\-Y/g; @@ -305,6 +348,8 @@ $linker_a = ""; && ($flags !~ /kernel32/) && ($flags !~ /advapi32/) && ($flags !~ /shell32/) + && ($flags !~ /omp/) + && ($flags !~ /[0-9]+/) ) { $linker_l .= $flags . " " } @@ -333,6 +378,8 @@ print MAKEFILE "CEXTRALIB=$linker_L $linker_l $linker_a\n"; print MAKEFILE "HAVE_MSA=1\n" if $have_msa eq 1; print MAKEFILE "MSA_FLAGS=$msa_flags\n" if $have_msa eq 1; print MAKEFILE "NO_AVX512=1\n" if $no_avx512 eq 1; +print MAKEFILE "NO_AVX2=1\n" if $no_avx2 eq 1; +print MAKEFILE "OLDGCC=1\n" if $oldgcc eq 1; $os =~ tr/[a-z]/[A-Z]/; $architecture =~ tr/[a-z]/[A-Z]/; @@ -345,6 +392,8 @@ print CONFFILE "#define __32BIT__\t1\n" if $binformat eq bin32; print CONFFILE "#define __64BIT__\t1\n" if $binformat eq bin64; print CONFFILE "#define FUNDERSCORE\t$need_fu\n" if $need_fu ne ""; print CONFFILE "#define HAVE_MSA\t1\n" if $have_msa eq 1; +print CONFFILE "#define HAVE_C11\t1\n" if $c11_atomics eq 1; + if ($os eq "LINUX") { diff --git a/cblas.h b/cblas.h index 1a87074d6..f0220eb99 100644 --- a/cblas.h +++ b/cblas.h @@ -25,6 +25,11 @@ char* openblas_get_config(void); /*Get the CPU corename on runtime.*/ char* openblas_get_corename(void); +#ifdef OPENBLAS_OS_LINUX +/* Sets thread affinity for OpenBLAS threads. `thread_idx` is in [0, openblas_get_num_threads()-1]. */ +int openblas_setaffinity(int thread_idx, size_t cpusetsize, cpu_set_t* cpu_set); +#endif + /* Get the parallelization type which is used by OpenBLAS */ int openblas_get_parallel(void); /* OpenBLAS is compiled for sequential use */ @@ -120,9 +125,14 @@ void cblas_zswap(OPENBLAS_CONST blasint n, void *x, OPENBLAS_CONST blasint incx, void cblas_srot(OPENBLAS_CONST blasint N, float *X, OPENBLAS_CONST blasint incX, float *Y, OPENBLAS_CONST blasint incY, OPENBLAS_CONST float c, OPENBLAS_CONST float s); void cblas_drot(OPENBLAS_CONST blasint N, double *X, OPENBLAS_CONST blasint incX, double *Y, OPENBLAS_CONST blasint incY, OPENBLAS_CONST double c, OPENBLAS_CONST double s); +void cblas_csrot(OPENBLAS_CONST blasint n, OPENBLAS_CONST void *x, OPENBLAS_CONST blasint incx, void *y, OPENBLAS_CONST blasint incY, OPENBLAS_CONST float c, OPENBLAS_CONST float s); +void cblas_zdrot(OPENBLAS_CONST blasint n, OPENBLAS_CONST void *x, OPENBLAS_CONST blasint incx, void *y, OPENBLAS_CONST blasint incY, OPENBLAS_CONST double c, OPENBLAS_CONST double s); void cblas_srotg(float *a, float *b, float *c, float *s); void cblas_drotg(double *a, double *b, double *c, double *s); +void cblas_crotg(void *a, void *b, float *c, void *s); +void cblas_zrotg(void *a, void *b, double *c, void *s); + void cblas_srotm(OPENBLAS_CONST blasint N, float *X, OPENBLAS_CONST blasint incX, float *Y, OPENBLAS_CONST blasint incY, OPENBLAS_CONST float *P); void cblas_drotm(OPENBLAS_CONST blasint N, double *X, OPENBLAS_CONST blasint incX, double *Y, OPENBLAS_CONST blasint incY, OPENBLAS_CONST double *P); @@ -377,6 +387,18 @@ void cblas_cgeadd(OPENBLAS_CONST enum CBLAS_ORDER CORDER,OPENBLAS_CONST blasint void cblas_zgeadd(OPENBLAS_CONST enum CBLAS_ORDER CORDER,OPENBLAS_CONST blasint crows, OPENBLAS_CONST blasint ccols, OPENBLAS_CONST double *calpha, double *a, OPENBLAS_CONST blasint clda, OPENBLAS_CONST double *cbeta, double *c, OPENBLAS_CONST blasint cldc); +/*** BFLOAT16 and INT8 extensions ***/ +/* convert float array to BFLOAT16 array by rounding */ +void cblas_sbstobf16(OPENBLAS_CONST blasint n, OPENBLAS_CONST float *in, OPENBLAS_CONST blasint incin, bfloat16 *out, OPENBLAS_CONST blasint incout); +/* convert double array to BFLOAT16 array by rounding */ +void cblas_sbdtobf16(OPENBLAS_CONST blasint n, OPENBLAS_CONST double *in, OPENBLAS_CONST blasint incin, bfloat16 *out, OPENBLAS_CONST blasint incout); +/* convert BFLOAT16 array to float array */ +void cblas_sbf16tos(OPENBLAS_CONST blasint n, OPENBLAS_CONST bfloat16 *in, OPENBLAS_CONST blasint incin, float *out, OPENBLAS_CONST blasint incout); +/* convert BFLOAT16 array to double array */ +void cblas_dbf16tod(OPENBLAS_CONST blasint n, OPENBLAS_CONST bfloat16 *in, OPENBLAS_CONST blasint incin, double *out, OPENBLAS_CONST blasint incout); +/* dot production of BFLOAT16 input arrays, and output as float */ +float cblas_sbdot(OPENBLAS_CONST blasint n, OPENBLAS_CONST bfloat16 *x, OPENBLAS_CONST blasint incx, OPENBLAS_CONST bfloat16 *y, OPENBLAS_CONST blasint incy); +void cblas_sbgemv(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST enum CBLAS_TRANSPOSE trans, OPENBLAS_CONST blasint m, OPENBLAS_CONST blasint n, OPENBLAS_CONST float alpha, OPENBLAS_CONST bfloat16 *a, OPENBLAS_CONST blasint lda, OPENBLAS_CONST bfloat16 *x, OPENBLAS_CONST blasint incx, OPENBLAS_CONST float beta, float *y, OPENBLAS_CONST blasint incy); #ifdef __cplusplus } diff --git a/cmake/arch.cmake b/cmake/arch.cmake index 470ea2a8f..4451f9eaa 100644 --- a/cmake/arch.cmake +++ b/cmake/arch.cmake @@ -1,4 +1,3 @@ -## ## Author: Hank Anderson ## Description: Ported from portion of OpenBLAS/Makefile.system ## Sets various variables based on architecture. @@ -45,7 +44,15 @@ endif () if (DYNAMIC_ARCH) if (ARM64) - set(DYNAMIC_CORE ARMV8 CORTEXA53 CORTEXA57 CORTEXA72 CORTEXA73 FALKOR THUNDERX THUNDERX2T99) + set(DYNAMIC_CORE ARMV8 CORTEXA53 CORTEXA57 CORTEXA72 CORTEXA73 FALKOR THUNDERX THUNDERX2T99 TSV110 EMAG8180 NEOVERSEN1 THUNDERX3T110) + if (DYNAMIC_LIST) + set(DYNAMIC_CORE ARMV8 ${DYNAMIC_LIST}) + endif () + endif () + + if (POWER) + set(DYNAMIC_CORE POWER6 POWER8 POWER9 POWER10) + set(CCOMMON_OPT "${CCOMMON_OPT} -DHAVE_P10_SUPPORT") endif () if (X86) @@ -72,15 +79,21 @@ if (DYNAMIC_ARCH) set(DYNAMIC_CORE ${DYNAMIC_CORE} HASWELL ZEN) endif () if (NOT NO_AVX512) - set(DYNAMIC_CORE ${DYNAMIC_CORE} SKYLAKEX) + set(DYNAMIC_CORE ${DYNAMIC_CORE} SKYLAKEX COOPERLAKE) + string(REGEX REPLACE "-march=native" "" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}") endif () if (DYNAMIC_LIST) - set(DYNAMIC_CORE PRESCOTT ${DYNAMIC_LIST}) + set(DYNAMIC_CORE PRESCOTT ${DYNAMIC_LIST}) endif () endif () + if (EXISTS ${PROJECT_SOURCE_DIR}/config_kernel.h) + message (FATAL_ERROR "Your build directory contains a file config_kernel.h, probably from a previous compilation with make. This will conflict with the cmake compilation and cause strange compiler errors - please remove the file before trying again") + endif () + if (NOT DYNAMIC_CORE) - unset(DYNAMIC_ARCH) + message (STATUS "DYNAMIC_ARCH is not supported on this architecture, removing from options") + unset(DYNAMIC_ARCH CACHE) endif () endif () diff --git a/cmake/cc.cmake b/cmake/cc.cmake index 98f9298f8..76952152b 100644 --- a/cmake/cc.cmake +++ b/cmake/cc.cmake @@ -3,7 +3,7 @@ ## Description: Ported from portion of OpenBLAS/Makefile.system ## Sets C related variables. -if (${CMAKE_C_COMPILER} STREQUAL "GNU" OR ${CMAKE_C_COMPILER} STREQUAL "LSB" OR ${CMAKE_C_COMPILER} STREQUAL "Clang") +if (${CMAKE_C_COMPILER_ID} STREQUAL "GNU" OR ${CMAKE_C_COMPILER_ID} STREQUAL "LSB" OR ${CMAKE_C_COMPILER_ID} MATCHES "Clang") set(CCOMMON_OPT "${CCOMMON_OPT} -Wall") set(COMMON_PROF "${COMMON_PROF} -fno-inline") @@ -43,7 +43,7 @@ if (${CMAKE_C_COMPILER} STREQUAL "GNU" OR ${CMAKE_C_COMPILER} STREQUAL "LSB" OR endif () endif () -if (${CMAKE_C_COMPILER} STREQUAL "PGI") +if (${CMAKE_C_COMPILER_ID} STREQUAL "PGI") if (BINARY64) set(CCOMMON_OPT "${CCOMMON_OPT} -tp p7-64") else () @@ -51,7 +51,7 @@ if (${CMAKE_C_COMPILER} STREQUAL "PGI") endif () endif () -if (${CMAKE_C_COMPILER} STREQUAL "PATHSCALE") +if (${CMAKE_C_COMPILER_ID} STREQUAL "PATHSCALE") if (BINARY64) set(CCOMMON_OPT "${CCOMMON_OPT} -m64") else () @@ -59,7 +59,7 @@ if (${CMAKE_C_COMPILER} STREQUAL "PATHSCALE") endif () endif () -if (${CMAKE_C_COMPILER} STREQUAL "OPEN64") +if (${CMAKE_C_COMPILER_ID} STREQUAL "OPEN64") if (MIPS64) @@ -87,7 +87,7 @@ if (${CMAKE_C_COMPILER} STREQUAL "OPEN64") endif () endif () -if (${CMAKE_C_COMPILER} STREQUAL "SUN") +if (${CMAKE_C_COMPILER_ID} STREQUAL "SUN") set(CCOMMON_OPT "${CCOMMON_OPT} -w") if (X86) set(CCOMMON_OPT "${CCOMMON_OPT} -m32") @@ -96,3 +96,50 @@ if (${CMAKE_C_COMPILER} STREQUAL "SUN") endif () endif () +if (${CORE} STREQUAL SKYLAKEX) + if (NOT DYNAMIC_ARCH) + if (NOT NO_AVX512) + set (CCOMMON_OPT "${CCOMMON_OPT} -march=skylake-avx512") + endif () + endif () +endif () + +if (${CORE} STREQUAL COOPERLAKE) + if (NOT DYNAMIC_ARCH) + if (NOT NO_AVX512) + execute_process(COMMAND ${CMAKE_C_COMPILER} -dumpversion OUTPUT_VARIABLE GCC_VERSION) + if (${GCC_VERSION} VERSION_GREATER 10.1 OR ${GCC_VERSION} VERSION_EQUAL 10.1) + set (CCOMMON_OPT "${CCOMMON_OPT} -march=cooperlake") + else () + set (CCOMMON_OPT "${CCOMMON_OPT} -march=skylake-avx512") + endif() + endif () + endif () +endif () + +if (NOT DYNAMIC_ARCH) + if (HAVE_AVX2) + set (CCOMMON_OPT "${CCOMMON_OPT} -mavx2") + endif () + if (HAVE_AVX) + set (CCOMMON_OPT "${CCOMMON_OPT} -mavx") + endif () + if (HAVE_FMA3) + set (CCOMMON_OPT "${CCOMMON_OPT} -mfma") + endif () + if (HAVE_SSE) + set (CCOMMON_OPT "${CCOMMON_OPT} -msse") + endif () + if (HAVE_SSE2) + set (CCOMMON_OPT "${CCOMMON_OPT} -msse2") + endif () + if (HAVE_SSE3) + set (CCOMMON_OPT "${CCOMMON_OPT} -msse3") + endif () + if (HAVE_SSSE3) + set (CCOMMON_OPT "${CCOMMON_OPT} -mssse3") + endif () + if (HAVE_SSE4_1) + set (CCOMMON_OPT "${CCOMMON_OPT} -msse4.1") + endif () +endif() diff --git a/cmake/f_check.cmake b/cmake/f_check.cmake index f877fc3e1..0f5d0e15d 100644 --- a/cmake/f_check.cmake +++ b/cmake/f_check.cmake @@ -21,7 +21,15 @@ # NEED2UNDERSCORES if (NOT NO_LAPACK) - enable_language(Fortran) + include(CheckLanguage) + check_language(Fortran) + if(CMAKE_Fortran_COMPILER) + enable_language(Fortran) + else() + message(STATUS "No Fortran compiler found, can build only BLAS but not LAPACK") + set (NOFORTRAN 1) + set (NO_LAPACK 1) + endif() else() include(CMakeForceCompiler) CMAKE_FORCE_Fortran_COMPILER(gfortran GNU) diff --git a/cmake/fc.cmake b/cmake/fc.cmake index adec28a91..fc1f9bb22 100644 --- a/cmake/fc.cmake +++ b/cmake/fc.cmake @@ -16,6 +16,7 @@ if (${F_COMPILER} STREQUAL "FLANG") if (USE_OPENMP) set(FCOMMON_OPT "${FCOMMON_OPT} -fopenmp") endif () + set(FCOMMON_OPT "${FCOMMON_OPT} -Mrecursive -Kieee") endif () if (${F_COMPILER} STREQUAL "G77") @@ -44,7 +45,10 @@ endif () if (${F_COMPILER} STREQUAL "GFORTRAN") set(CCOMMON_OPT "${CCOMMON_OPT} -DF_INTERFACE_GFORT") + # ensure reentrancy of lapack codes set(FCOMMON_OPT "${FCOMMON_OPT} -Wall -frecursive") + # work around ABI violation in passing string arguments from C + set(FCOMMON_OPT "${FCOMMON_OPT} -fno-optimize-sibling-calls") #Don't include -lgfortran, when NO_LAPACK=1 or lsbcc if (NOT NO_LAPACK) set(EXTRALIB "{EXTRALIB} -lgfortran") @@ -78,6 +82,7 @@ if (${F_COMPILER} STREQUAL "INTEL") if (INTERFACE64) set(FCOMMON_OPT "${FCOMMON_OPT} -i8") endif () + set(FCOMMON_OPT "${FCOMMON_OPT} -recursive") if (USE_OPENMP) set(FCOMMON_OPT "${FCOMMON_OPT} -openmp") endif () @@ -117,6 +122,7 @@ if (${F_COMPILER} STREQUAL "PGI") else () set(FCOMMON_OPT "${FCOMMON_OPT} -tp p7") endif () + set(FCOMMON_OPT "${FCOMMON_OPT} -Mrecursive") if (USE_OPENMP) set(FCOMMON_OPT "${FCOMMON_OPT} -mp") endif () diff --git a/cmake/kernel.cmake b/cmake/kernel.cmake index 9b238f004..0c102bae5 100644 --- a/cmake/kernel.cmake +++ b/cmake/kernel.cmake @@ -113,11 +113,33 @@ macro(SetDefaultL1) set(ZSUMKERNEL zsum.S) set(QSUMKERNEL sum.S) set(XSUMKERNEL zsum.S) +if (BUILD_BFLOAT16) + set(SHAMINKERNEL ../arm/amin.c) + set(SHAMAXKERNEL ../arm/amax.c) + set(SHMAXKERNEL ../arm/max.c) + set(SHMINKERNEL ../arm/min.c) + set(ISHAMAXKERNEL ../arm/iamax.c) + set(ISHAMINKERNEL ../arm/iamin.c) + set(ISHMAXKERNEL ../arm/imax.c) + set(ISHMINKERNEL ../arm/imin.c) + set(SHASUMKERNEL ../arm/asum.c) + set(SHAXPYKERNEL ../arm/axpy.c) + set(SHAXPBYKERNEL ../arm/axpby.c) + set(SHCOPYKERNEL ../arm/copy.c) + set(SBDOTKERNEL ../x86_64/sbdot.c) + set(SHROTKERNEL ../arm/rot.c) + set(SHSCALKERNEL ../arm/scal.c) + set(SHNRM2KERNEL ../arm/nrm2.c) + set(SHSUMKERNEL ../arm/sum.c) + set(SHSWAPKERNEL ../arm/swap.c) + set(TOBF16KERNEL ../x86_64/tobf16.c) + set(BF16TOKERNEL ../x86_64/bf16to.c) +endif () endmacro () macro(SetDefaultL2) - set(SGEMVNKERNEL gemv_n.S) - set(SGEMVTKERNEL gemv_t.S) + set(SGEMVNKERNEL ../arm/gemv_n.c) + set(SGEMVTKERNEL ../arm/gemv_t.c) set(DGEMVNKERNEL gemv_n.S) set(DGEMVTKERNEL gemv_t.S) set(CGEMVNKERNEL zgemv_n.S) @@ -161,6 +183,11 @@ macro(SetDefaultL2) set(XHEMV_L_KERNEL ../generic/zhemv_k.c) set(XHEMV_V_KERNEL ../generic/zhemv_k.c) set(XHEMV_M_KERNEL ../generic/zhemv_k.c) +if (BUILD_BFLOAT16) + set(SBGEMVNKERNEL ../x86_64/sbgemv_n.c) + set(SBGEMVTKERNEL ../x86_64/sbgemv_t.c) + set(SHGERKERNEL ../generic/ger.c) +endif () endmacro () macro(SetDefaultL3) @@ -168,4 +195,18 @@ macro(SetDefaultL3) set(DGEADD_KERNEL ../generic/geadd.c) set(CGEADD_KERNEL ../generic/zgeadd.c) set(ZGEADD_KERNEL ../generic/zgeadd.c) +if (BUILD_BFLOAT16) + set(SHGEADD_KERNEL ../generic/geadd.c) + set(SBGEMMKERNEL ../generic/gemmkernel_2x2.c) + set(SBGEMM_BETA ../generic/gemm_beta.c) + set(SBGEMMINCOPY ../generic/gemm_ncopy_2.c) + set(SBGEMMITCOPY ../generic/gemm_tcopy_2.c) + set(SBGEMMONCOPY ../generic/gemm_ncopy_2.c) + set(SBGEMMOTCOPY ../generic/gemm_tcopy_2.c) + set(SBGEMMINCOPYOBJ sbgemm_incopy.o) + set(SBGEMMITCOPYOBJ sbgemm_itcopy.o) + set(SBGEMMONCOPYOBJ sbgemm_oncopy.o) + set(SBGEMMOTCOPYOBJ sbgemm_otcopy.o) +endif () + endmacro () diff --git a/cmake/lapack.cmake b/cmake/lapack.cmake index d1d2cdd3b..73f2592ef 100644 --- a/cmake/lapack.cmake +++ b/cmake/lapack.cmake @@ -1,11 +1,12 @@ # Sources for compiling lapack-netlib. Can't use CMakeLists.txt because lapack-netlib already has its own cmake files. set(ALLAUX ilaenv.f ilaenv2stage.f ieeeck.f lsamen.f iparmq.f iparam2stage.F - ilaprec.f ilatrans.f ilauplo.f iladiag.f chla_transtype.f + ilaprec.f ilatrans.f ilauplo.f iladiag.f chla_transtype.f dlaset.f ../INSTALL/ilaver.f xerbla_array.f ../INSTALL/slamch.f) set(SCLAUX + scombssq.f sbdsvdx.f sstevx.f sstein.f sbdsdc.f sbdsqr.f sdisna.f slabad.f slacpy.f sladiv.f slae2.f slaebz.f slaed0.f slaed1.f slaed2.f slaed3.f slaed4.f slaed5.f slaed6.f @@ -25,6 +26,7 @@ set(SCLAUX set(DZLAUX dbdsdc.f + dbdsvdx.f dstevx.f dstein.f dbdsqr.f ddisna.f dlabad.f dlacpy.f dladiv.f dlae2.f dlaebz.f dlaed0.f dlaed1.f dlaed2.f dlaed3.f dlaed4.f dlaed5.f dlaed6.f dlaed7.f dlaed8.f dlaed9.f dlaeda.f dlaev2.f dlagtf.f @@ -35,14 +37,14 @@ set(DZLAUX dlartg.f dlaruv.f dlas2.f dlascl.f dlasd0.f dlasd1.f dlasd2.f dlasd3.f dlasd4.f dlasd5.f dlasd6.f dlasd7.f dlasd8.f dlasda.f dlasdq.f dlasdt.f - dlaset.f dlasq1.f dlasq2.f dlasq3.f dlasq4.f dlasq5.f dlasq6.f + dlasq1.f dlasq2.f dlasq3.f dlasq4.f dlasq5.f dlasq6.f dlasr.f dlasrt.f dlassq.f dlasv2.f dpttrf.f dstebz.f dstedc.f dsteqr.f dsterf.f dlaisnan.f disnan.f dlartgp.f dlartgs.f ../INSTALL/dlamch.f ../INSTALL/dsecnd_${TIMER}.f) set(SLASRC - sbdsvdx.f sgbbrd.f sgbcon.f sgbequ.f sgbrfs.f sgbsv.f + sgbbrd.f sgbcon.f sgbequ.f sgbrfs.f sgbsv.f sgbsvx.f sgbtf2.f sgbtrf.f sgbtrs.f sgebak.f sgebal.f sgebd2.f sgebrd.f sgecon.f sgeequ.f sgees.f sgeesx.f sgeev.f sgeevx.f sgehd2.f sgehrd.f sgelq2.f sgelqf.f @@ -83,8 +85,8 @@ set(SLASRC ssbev.f ssbevd.f ssbevx.f ssbgst.f ssbgv.f ssbgvd.f ssbgvx.f ssbtrd.f sspcon.f sspev.f sspevd.f sspevx.f sspgst.f sspgv.f sspgvd.f sspgvx.f ssprfs.f sspsv.f sspsvx.f ssptrd.f - ssptrf.f ssptri.f ssptrs.f sstegr.f sstein.f sstev.f sstevd.f sstevr.f - sstevx.f ssycon.f ssyev.f ssyevd.f ssyevr.f ssyevx.f ssygs2.f + ssptrf.f ssptri.f ssptrs.f sstegr.f sstev.f sstevd.f sstevr.f + ssycon.f ssyev.f ssyevd.f ssyevr.f ssyevx.f ssygs2.f ssygst.f ssygv.f ssygvd.f ssygvx.f ssyrfs.f ssysv.f ssysvx.f ssytd2.f ssytf2.f ssytrd.f ssytrf.f ssytri.f ssytri2.f ssytri2x.f ssyswapr.f ssytrs.f ssytrs2.f @@ -115,7 +117,9 @@ set(SLASRC stplqt.f stplqt2.f stpmlqt.f ssytrd_2stage.f ssytrd_sy2sb.f ssytrd_sb2st.F ssb2st_kernels.f ssyevd_2stage.f ssyev_2stage.f ssyevx_2stage.f ssyevr_2stage.f - ssbev_2stage.f ssbevx_2stage.f ssbevd_2stage.f ssygv_2stage.f) + ssbev_2stage.f ssbevx_2stage.f ssbevd_2stage.f ssygv_2stage.f + sgesvdq.f slaorhr_col_getrfnp.f + slaorhr_col_getrfnp2.f sorgtsqr.f sorhr_col.f ) set(SXLASRC sgesvxx.f sgerfsx.f sla_gerfsx_extended.f sla_geamv.f sla_gercond.f sla_gerpvgrw.f ssysvxx.f ssyrfsx.f @@ -210,7 +214,9 @@ set(CLASRC ctplqt.f ctplqt2.f ctpmlqt.f chetrd_2stage.f chetrd_he2hb.f chetrd_hb2st.F chb2st_kernels.f cheevd_2stage.f cheev_2stage.f cheevx_2stage.f cheevr_2stage.f - chbev_2stage.f chbevx_2stage.f chbevd_2stage.f chegv_2stage.f) + chbev_2stage.f chbevx_2stage.f chbevd_2stage.f chegv_2stage.f + cgesvdq.f claunhr_col_getrfnp.f claunhr_col_getrfnp2.f + cungtsqr.f cunhr_col.f ) set(CXLASRC cgesvxx.f cgerfsx.f cla_gerfsx_extended.f cla_geamv.f cla_gercond_c.f cla_gercond_x.f cla_gerpvgrw.f @@ -225,7 +231,7 @@ set(CXLASRC cgesvxx.f cgerfsx.f cla_gerfsx_extended.f cla_geamv.f cla_lin_berr.f clarscl2.f clascl2.f cla_wwaddw.f) set(DLASRC - dbdsvdx.f dgbbrd.f dgbcon.f dgbequ.f dgbrfs.f dgbsv.f + dgbbrd.f dgbcon.f dgbequ.f dgbrfs.f dgbsv.f dgbsvx.f dgbtf2.f dgbtrf.f dgbtrs.f dgebak.f dgebal.f dgebd2.f dgebrd.f dgecon.f dgeequ.f dgees.f dgeesx.f dgeev.f dgeevx.f dgehd2.f dgehrd.f dgelq2.f dgelqf.f @@ -266,8 +272,8 @@ set(DLASRC dsbev.f dsbevd.f dsbevx.f dsbgst.f dsbgv.f dsbgvd.f dsbgvx.f dsbtrd.f dspcon.f dspev.f dspevd.f dspevx.f dspgst.f dspgv.f dspgvd.f dspgvx.f dsprfs.f dspsv.f dspsvx.f dsptrd.f - dsptrf.f dsptri.f dsptrs.f dstegr.f dstein.f dstev.f dstevd.f dstevr.f - dstevx.f dsycon.f dsyev.f dsyevd.f dsyevr.f + dsptrf.f dsptri.f dsptrs.f dstegr.f dstev.f dstevd.f dstevr.f + dsycon.f dsyev.f dsyevd.f dsyevr.f dsyevx.f dsygs2.f dsygst.f dsygv.f dsygvd.f dsygvx.f dsyrfs.f dsysv.f dsysvx.f dsytd2.f dsytf2.f dsytrd.f dsytrf.f dsytri.f dsytrs.f dsytrs2.f @@ -299,7 +305,9 @@ set(DLASRC dtplqt.f dtplqt2.f dtpmlqt.f dsytrd_2stage.f dsytrd_sy2sb.f dsytrd_sb2st.F dsb2st_kernels.f dsyevd_2stage.f dsyev_2stage.f dsyevx_2stage.f dsyevr_2stage.f - dsbev_2stage.f dsbevx_2stage.f dsbevd_2stage.f dsygv_2stage.f) + dsbev_2stage.f dsbevx_2stage.f dsbevd_2stage.f dsygv_2stage.f + dcombssq.f dgesvdq.f dlaorhr_col_getrfnp.f + dlaorhr_col_getrfnp2.f dorgtsqr.f dorhr_col.f ) set(DXLASRC dgesvxx.f dgerfsx.f dla_gerfsx_extended.f dla_geamv.f dla_gercond.f dla_gerpvgrw.f dsysvxx.f dsyrfsx.f @@ -398,7 +406,9 @@ set(ZLASRC zgelq.f zlaswlq.f zlamswlq.f zgemlq.f zhetrd_2stage.f zhetrd_he2hb.f zhetrd_hb2st.F zhb2st_kernels.f zheevd_2stage.f zheev_2stage.f zheevx_2stage.f zheevr_2stage.f - zhbev_2stage.f zhbevx_2stage.f zhbevd_2stage.f zhegv_2stage.f) + zhbev_2stage.f zhbevx_2stage.f zhbevd_2stage.f zhegv_2stage.f + zgesvdq.f zlaunhr_col_getrfnp.f zlaunhr_col_getrfnp2.f + zungtsqr.f zunhr_col.f) set(ZXLASRC zgesvxx.f zgerfsx.f zla_gerfsx_extended.f zla_geamv.f zla_gercond_c.f zla_gercond_x.f zla_gerpvgrw.f zsysvxx.f zsyrfsx.f @@ -466,12 +476,16 @@ endif() if(BUILD_COMPLEX) set(LA_REL_SRC ${LA_REL_SRC} ${CLASRC} ${ZCLASRC} ${ALLAUX} ${SCLAUX}) SET(LA_GEN_SRC ${LA_GEN_SRC} ${CMATGEN} ${SCATGEN}) - message(STATUS "Building Complex Precision") + message(STATUS "Building Single Precision Complex") endif() if(BUILD_COMPLEX16) set(LA_REL_SRC ${LA_REL_SRC} ${ZLASRC} ${ZCLASRC} ${ALLAUX} ${DZLAUX}) SET(LA_GEN_SRC ${LA_GEN_SRC} ${ZMATGEN} ${DZATGEN}) - message(STATUS "Building Double Complex Precision") +# for zlange/zlanhe + if (NOT BUILD_DOUBLE) + set (LA_REL_SRC ${LA_REL_SRC} dcombssq.f) + endif () + message(STATUS "Building Double Precision Complex") endif() # add lapack-netlib folder to the sources diff --git a/cmake/lapacke.cmake b/cmake/lapacke.cmake index 0fc88b882..f10905c4d 100644 --- a/cmake/lapacke.cmake +++ b/cmake/lapacke.cmake @@ -715,6 +715,8 @@ set(DSRC lapacke_dgesv_work.c lapacke_dgesvd.c lapacke_dgesvd_work.c + lapacke_dgesvdq.c + lapacke_dgesvdq_work.c lapacke_dgesvdx.c lapacke_dgesvdx_work.c lapacke_dgesvj.c @@ -1287,6 +1289,8 @@ set(SSRC lapacke_sgesv_work.c lapacke_sgesvd.c lapacke_sgesvd_work.c + lapacke_sgesvdq.c + lapacke_sgesvdq_work.c lapacke_sgesvdx.c lapacke_sgesvdx_work.c lapacke_sgesvj.c @@ -1853,6 +1857,8 @@ set(ZSRC lapacke_zgesv_work.c lapacke_zgesvd.c lapacke_zgesvd_work.c + lapacke_zgesvdq.c + lapacke_zgesvdq_work.c lapacke_zgesvdx.c lapacke_zgesvdx_work.c lapacke_zgesvj.c diff --git a/cmake/openblas.pc.in b/cmake/openblas.pc.in index df4b2ab06..0bd49f996 100644 --- a/cmake/openblas.pc.in +++ b/cmake/openblas.pc.in @@ -7,5 +7,5 @@ Name: OpenBLAS Description: OpenBLAS is an optimized BLAS library based on GotoBLAS2 1.13 BSD version Version: @OPENBLAS_VERSION@ URL: https://github.com/xianyi/OpenBLAS -Libs: -L${libdir} -lopenblas${libsuffix} +Libs: @OpenMP_C_FLAGS@ -L${libdir} -lopenblas${libsuffix} Cflags: -I${includedir} diff --git a/cmake/os.cmake b/cmake/os.cmake index 2d25e7aaa..e24059dd5 100644 --- a/cmake/os.cmake +++ b/cmake/os.cmake @@ -8,7 +8,7 @@ if (${CMAKE_SYSTEM_NAME} STREQUAL "Linux") set(NO_EXPRECISION 1) endif () -if (${CMAKE_SYSTEM_NAME} MATCHES "FreeBSD|OpenBSD|NetBSD|DragonFly") +if (${CMAKE_SYSTEM_NAME} MATCHES "FreeBSD|OpenBSD|NetBSD|DragonFly|Darwin") set(EXTRALIB "${EXTRALIB} -lm") set(NO_EXPRECISION 1) endif () @@ -84,6 +84,14 @@ if (X86) set(NO_EXPRECISION 1) endif () +if (DYNAMIC_ARCH) +if (TARGET) +if (${TARGET} STREQUAL "GENERIC") + set(NO_EXPRECISION 1) +endif () +endif () +endif () + if (UTEST_CHECK) set(CCOMMON_OPT "${CCOMMON_OPT} -DUTEST_CHECK") set(SANITY_CHECK 1) diff --git a/cmake/prebuild.cmake b/cmake/prebuild.cmake index a67c44bf5..da7686c33 100644 --- a/cmake/prebuild.cmake +++ b/cmake/prebuild.cmake @@ -16,6 +16,8 @@ # HAVE_SSE2 # HAVE_SSE3 # MAKE +# SBGEMM_UNROLL_M +# SBGEMM_UNROLL_N # SGEMM_UNROLL_M # SGEMM_UNROLL_N # DGEMM_UNROLL_M @@ -59,6 +61,9 @@ set(FU "") if (APPLE OR (MSVC AND NOT ${CMAKE_C_COMPILER_ID} MATCHES "Clang")) set(FU "_") endif() +if(MINGW AND NOT MINGW64) + set(FU "_") +endif() set(COMPILER_ID ${CMAKE_C_COMPILER_ID}) if (${COMPILER_ID} STREQUAL "GNU") @@ -82,6 +87,11 @@ endif () # f_check if (NOT NOFORTRAN) include("${PROJECT_SOURCE_DIR}/cmake/f_check.cmake") +else () + file(APPEND ${TARGET_CONF_TEMP} + "#define BUNDERSCORE _\n" + "#define NEEDBUNDERSCORE 1\n") + set(BU "_") endif () # Cannot run getarch on target if we are cross-compiling @@ -97,8 +107,39 @@ if (DEFINED CORE AND CMAKE_CROSSCOMPILING AND NOT (${HOST_OS} STREQUAL "WINDOWSS # Perhaps this should be inside a different file as it grows larger file(APPEND ${TARGET_CONF_TEMP} "#define ${TCORE}\n" + "#define CORE_${TCORE}\n" "#define CHAR_CORENAME \"${TCORE}\"\n") - if ("${TCORE}" STREQUAL "ARMV7") + if ("${TCORE}" STREQUAL "CORE2") + file(APPEND ${TARGET_CONF_TEMP} + "#define L1_DATA_SIZE\t32768\n" + "#define L1_DATA_LINESIZE\t64\n" + "#define L2_SIZE\t1048576\n" + "#define L2_LINESIZE\t64\n" + "#define DTB_DEFAULT_ENTRIES\t256\n" + "#define DTB_SIZE\t4096\n" + "#define HAVE_CMOV\n" + "#define HAVE_MMX\n" + "#define HAVE_SSE\n" + "#define HAVE_SSE2\n" + "#define HAVE_SSE3\n" + "#define HAVE_SSSE3\n" + "#define SLOCAL_BUFFER_SIZE\t16384\n" + "#define DLOCAL_BUFFER_SIZE\t16384\n" + "#define CLOCAL_BUFFER_SIZE\t16384\n" + "#define ZLOCAL_BUFFER_SIZE\t16384\n") + set(SGEMM_UNROLL_M 8) + set(SGEMM_UNROLL_N 4) + set(DGEMM_UNROLL_M 4) + set(DGEMM_UNROLL_N 4) + set(CGEMM_UNROLL_M 4) + set(CGEMM_UNROLL_N 2) + set(ZGEMM_UNROLL_M 2) + set(ZGEMM_UNROLL_N 2) + set(CGEMM3M_UNROLL_M 8) + set(CGEMM3M_UNROLL_N 4) + set(ZGEMM3M_UNROLL_M 4) + set(ZGEMM3M_UNROLL_N 4) + elseif ("${TCORE}" STREQUAL "ARMV7") file(APPEND ${TARGET_CONF_TEMP} "#define L1_DATA_SIZE\t65536\n" "#define L1_DATA_LINESIZE\t32\n" @@ -113,6 +154,10 @@ if (DEFINED CORE AND CMAKE_CROSSCOMPILING AND NOT (${HOST_OS} STREQUAL "WINDOWSS set(SGEMM_UNROLL_N 4) set(DGEMM_UNROLL_M 4) set(DGEMM_UNROLL_N 4) + set(CGEMM_UNROLL_M 2) + set(CGEMM_UNROLL_N 2) + set(ZGEMM_UNROLL_M 2) + set(ZGEMM_UNROLL_N 2) elseif ("${TCORE}" STREQUAL "ARMV8") file(APPEND ${TARGET_CONF_TEMP} "#define L1_DATA_SIZE\t32768\n" @@ -150,8 +195,13 @@ if (DEFINED CORE AND CMAKE_CROSSCOMPILING AND NOT (${HOST_OS} STREQUAL "WINDOWSS "#define HAVE_VFP\n" "#define HAVE_NEON\n" "#define ARMV8\n") +if ("${TCORE}" STREQUAL "CORTEXA57") set(SGEMM_UNROLL_M 16) set(SGEMM_UNROLL_N 4) +else () + set(SGEMM_UNROLL_M 8) + set(SGEMM_UNROLL_N 8) +endif () set(DGEMM_UNROLL_M 8) set(DGEMM_UNROLL_N 4) set(CGEMM_UNROLL_M 8) @@ -186,6 +236,33 @@ if (DEFINED CORE AND CMAKE_CROSSCOMPILING AND NOT (${HOST_OS} STREQUAL "WINDOWSS set(ZGEMM_UNROLL_M 4) set(ZGEMM_UNROLL_N 4) set(SYMV_P 16) + elseif ("${TCORE}" STREQUAL "NEOVERSEN1") + file(APPEND ${TARGET_CONF_TEMP} + "#define L1_CODE_SIZE\t65536\n" + "#define L1_CODE_LINESIZE\t64\n" + "#define L1_CODE_ASSOCIATIVE\t4\n" + "#define L1_DATA_SIZE\t65536\n" + "#define L1_DATA_LINESIZE\t64\n" + "#define L1_DATA_ASSOCIATIVE\t2\n" + "#define L2_SIZE\t1048576\n\n" + "#define L2_LINESIZE\t64\n" + "#define L2_ASSOCIATIVE\t16\n" + "#define DTB_DEFAULT_ENTRIES\t64\n" + "#define DTB_SIZE\t4096\n" + "#define HAVE_VFPV4\n" + "#define HAVE_VFPV3\n" + "#define HAVE_VFP\n" + "#define HAVE_NEON\n" + "#define ARMV8\n") + set(SGEMM_UNROLL_M 16) + set(SGEMM_UNROLL_N 4) + set(DGEMM_UNROLL_M 8) + set(DGEMM_UNROLL_N 4) + set(CGEMM_UNROLL_M 8) + set(CGEMM_UNROLL_N 4) + set(ZGEMM_UNROLL_M 4) + set(ZGEMM_UNROLL_N 4) + set(SYMV_P 16) elseif ("${TCORE}" STREQUAL "FALKOR") file(APPEND ${TARGET_CONF_TEMP} "#define L1_CODE_SIZE\t65536\n" @@ -266,7 +343,159 @@ if (DEFINED CORE AND CMAKE_CROSSCOMPILING AND NOT (${HOST_OS} STREQUAL "WINDOWSS set(ZGEMM_UNROLL_M 4) set(ZGEMM_UNROLL_N 4) set(SYMV_P 16) + elseif ("${TCORE}" STREQUAL "THUNDERX3T110") + file(APPEND ${TARGET_CONF_TEMP} + "#define THUNDERX3T110\n" + "#define L1_CODE_SIZE\t65536\n" + "#define L1_CODE_LINESIZE\t64\n" + "#define L1_CODE_ASSOCIATIVE\t8\n" + "#define L1_DATA_SIZE\t65536\n" + "#define L1_DATA_LINESIZE\t64\n" + "#define L1_DATA_ASSOCIATIVE\t8\n" + "#define L2_SIZE\t524288\n" + "#define L2_LINESIZE\t64\n" + "#define L2_ASSOCIATIVE\t8\n" + "#define L3_SIZE\t94371840\n" + "#define L3_LINESIZE\t64\n" + "#define L3_ASSOCIATIVE\t32\n" + "#define DTB_DEFAULT_ENTRIES\t64\n" + "#define DTB_SIZE\t4096\n" + "#define ARMV8\n") + set(SGEMM_UNROLL_M 16) + set(SGEMM_UNROLL_N 4) + set(DGEMM_UNROLL_M 8) + set(DGEMM_UNROLL_N 4) + set(CGEMM_UNROLL_M 8) + set(CGEMM_UNROLL_N 4) + set(ZGEMM_UNROLL_M 4) + set(ZGEMM_UNROLL_N 4) + set(SYMV_P 16) + elseif ("${TCORE}" STREQUAL "TSV110") + file(APPEND ${TARGET_CONF_TEMP} + "#define ARMV8\n" + "#define L1_CODE_SIZE\t65536\n" + "#define L1_CODE_LINESIZE\t64\n" + "#define L1_CODE_ASSOCIATIVE\t4\n" + "#define L1_DATA_SIZE\t65536\n" + "#define L1_DATA_LINESIZE\t64\n" + "#define L1_DATA_ASSOCIATIVE\t4\n" + "#define L2_SIZE\t524288\n" + "#define L2_LINESIZE\t64\n" + "#define L2_ASSOCIATIVE\t8\n" + "#define DTB_DEFAULT_ENTRIES\t64\n" + "#define DTB_SIZE\t4096\n") + set(SGEMM_UNROLL_M 16) + set(SGEMM_UNROLL_N 4) + set(DGEMM_UNROLL_M 8) + set(DGEMM_UNROLL_N 4) + set(CGEMM_UNROLL_M 8) + set(CGEMM_UNROLL_N 4) + set(ZGEMM_UNROLL_M 4) + set(ZGEMM_UNROLL_N 4) + set(SYMV_P 16) + elseif ("${TCORE}" STREQUAL "EMAG8180") + file(APPEND ${TARGET_CONF_TEMP} + "#define ARMV8\n" + "#define L1_CODE_SIZE\t32768\n" + "#define L1_CODE_LINESIZE\t64\n" + "#define L1_CODE_ASSOCIATIVE\t4\n" + "#define L1_DATA_SIZE\t32768\n" + "#define L1_DATA_LINESIZE\t64\n" + "#define L1_DATA_ASSOCIATIVE\t4\n" + "#define L2_SIZE\t5262144\n" + "#define L2_LINESIZE\t64\n" + "#define L2_ASSOCIATIVE\t8\n" + "#define DTB_DEFAULT_ENTRIES\t64\n" + "#define DTB_SIZE\t4096\n") + set(SGEMM_UNROLL_M 16) + set(SGEMM_UNROLL_N 4) + set(DGEMM_UNROLL_M 8) + set(DGEMM_UNROLL_N 4) + set(CGEMM_UNROLL_M 8) + set(CGEMM_UNROLL_N 4) + set(ZGEMM_UNROLL_M 4) + set(ZGEMM_UNROLL_N 4) + set(SYMV_P 16) +elseif ("${TCORE}" STREQUAL "VORTEX") + file(APPEND ${TARGET_CONF_TEMP} + "#define ARMV8\n" + "#define L1_CODE_SIZE\t32768\n" + "#define L1_CODE_LINESIZE\t64\n" + "#define L1_CODE_ASSOCIATIVE\t4\n" + "#define L1_DATA_SIZE\t32768\n" + "#define L1_DATA_LINESIZE\t64\n" + "#define L1_DATA_ASSOCIATIVE\t4\n" + "#define L2_SIZE\t5262144\n" + "#define L2_LINESIZE\t64\n" + "#define L2_ASSOCIATIVE\t8\n" + "#define DTB_DEFAULT_ENTRIES\t64\n" + "#define DTB_SIZE\t4096\n") + set(SGEMM_UNROLL_M 16) + set(SGEMM_UNROLL_N 4) + set(DGEMM_UNROLL_M 8) + set(DGEMM_UNROLL_N 4) + set(CGEMM_UNROLL_M 8) + set(CGEMM_UNROLL_N 4) + set(ZGEMM_UNROLL_M 4) + set(ZGEMM_UNROLL_N 4) + set(SYMV_P 16) + elseif ("${TCORE}" STREQUAL "POWER6") + file(APPEND ${TARGET_CONF_TEMP} + "#define L1_DATA_SIZE 32768\n" + "#define L1_DATA_LINESIZE 128\n" + "#define L2_SIZE 524288\n" + "#define L2_LINESIZE 128 \n" + "#define DTB_DEFAULT_ENTRIES 128\n" + "#define DTB_SIZE 4096\n" + "#define L2_ASSOCIATIVE 8\n") + set(SGEMM_UNROLL_M 4) + set(SGEMM_UNROLL_N 4) + set(DGEMM_UNROLL_M 4) + set(DGEMM_UNROLL_N 4) + set(CGEMM_UNROLL_M 2) + set(CGEMM_UNROLL_N 4) + set(ZGEMM_UNROLL_M 2) + set(ZGEMM_UNROLL_N 4) + set(SYMV_P 8) + elseif ("${TCORE}" STREQUAL "POWER8") + file(APPEND ${TARGET_CONF_TEMP} + "#define L1_DATA_SIZE 32768\n" + "#define L1_DATA_LINESIZE 128\n" + "#define L2_SIZE 524288\n" + "#define L2_LINESIZE 128 \n" + "#define DTB_DEFAULT_ENTRIES 128\n" + "#define DTB_SIZE 4096\n" + "#define L2_ASSOCIATIVE 8\n") + set(SGEMM_UNROLL_M 16) + set(SGEMM_UNROLL_N 8) + set(DGEMM_UNROLL_M 16) + set(DGEMM_UNROLL_N 4) + set(CGEMM_UNROLL_M 8) + set(CGEMM_UNROLL_N 4) + set(ZGEMM_UNROLL_M 8) + set(ZGEMM_UNROLL_N 2) + set(SYMV_P 8) + elseif ("${TCORE}" STREQUAL "POWER9" OR "${TCORE}" STREQUAL "POWER10") + file(APPEND ${TARGET_CONF_TEMP} + "#define L1_DATA_SIZE 32768\n" + "#define L1_DATA_LINESIZE 128\n" + "#define L2_SIZE 524288\n" + "#define L2_LINESIZE 128 \n" + "#define DTB_DEFAULT_ENTRIES 128\n" + "#define DTB_SIZE 4096\n" + "#define L2_ASSOCIATIVE 8\n") + set(SGEMM_UNROLL_M 16) + set(SGEMM_UNROLL_N 8) + set(DGEMM_UNROLL_M 16) + set(DGEMM_UNROLL_N 4) + set(CGEMM_UNROLL_M 8) + set(CGEMM_UNROLL_N 4) + set(ZGEMM_UNROLL_M 8) + set(ZGEMM_UNROLL_N 2) + set(SYMV_P 8) endif() + set(SBGEMM_UNROLL_M 8) + set(SBGEMM_UNROLL_N 4) # Or should this actually be NUM_CORES? if (${NUM_THREADS} GREATER 0) @@ -301,6 +530,9 @@ else(NOT CMAKE_CROSSCOMPILING) set(GETARCH_FLAGS ${GETARCH_FLAGS} -DFORCE_GENERIC) else() list(APPEND GETARCH_SRC ${PROJECT_SOURCE_DIR}/cpuid.S) + if (DEFINED TARGET_CORE) + set(GETARCH_FLAGS ${GETARCH_FLAGS} -DFORCE_${TARGET_CORE}) + endif () endif () if ("${CMAKE_SYSTEM_NAME}" STREQUAL "WindowsStore") @@ -315,7 +547,7 @@ else(NOT CMAKE_CROSSCOMPILING) if (NOT "${CMAKE_SYSTEM_NAME}" STREQUAL "WindowsStore") try_compile(GETARCH_RESULT ${GETARCH_DIR} SOURCES ${GETARCH_SRC} - COMPILE_DEFINITIONS ${EXFLAGS} ${GETARCH_FLAGS} -I${GETARCH_DIR} -I"${PROJECT_SOURCE_DIR}" -I"${PROJECT_BINARY_DIR}" + COMPILE_DEFINITIONS ${EXFLAGS} ${GETARCH_FLAGS} -I"${GETARCH_DIR}" -I"${PROJECT_SOURCE_DIR}" -I"${PROJECT_BINARY_DIR}" OUTPUT_VARIABLE GETARCH_LOG COPY_FILE ${PROJECT_BINARY_DIR}/${GETARCH_BIN} ) @@ -324,6 +556,21 @@ else(NOT CMAKE_CROSSCOMPILING) MESSAGE(FATAL_ERROR "Compiling getarch failed ${GETARCH_LOG}") endif () endif () + unset (HAVE_AVX2) + unset (HAVE_AVX) + unset (HAVE_FMA3) + unset (HAVE_MMX) + unset (HAVE_SSE) + unset (HAVE_SSE2) + unset (HAVE_SSE3) + unset (HAVE_SSSE3) + unset (HAVE_SSE4A) + unset (HAVE_SSE4_1) + unset (HAVE_SSE4_2) + unset (HAVE_NEON) + unset (HAVE_VFP) + unset (HAVE_VFPV3) + unset (HAVE_VFPV4) message(STATUS "Running getarch") # use the cmake binary w/ the -E param to run a shell command in a cross-platform way @@ -343,7 +590,7 @@ execute_process(COMMAND "${PROJECT_BINARY_DIR}/${GETARCH_BIN}" 1 OUTPUT_VARIABLE if (NOT "${CMAKE_SYSTEM_NAME}" STREQUAL "WindowsStore") try_compile(GETARCH2_RESULT ${GETARCH2_DIR} SOURCES ${PROJECT_SOURCE_DIR}/getarch_2nd.c - COMPILE_DEFINITIONS ${EXFLAGS} ${GETARCH_FLAGS} ${GETARCH2_FLAGS} -I${GETARCH2_DIR} -I"${PROJECT_SOURCE_DIR}" -I"${PROJECT_BINARY_DIR}" + COMPILE_DEFINITIONS ${EXFLAGS} ${GETARCH_FLAGS} ${GETARCH2_FLAGS} -I"${GETARCH2_DIR}" -I"${PROJECT_SOURCE_DIR}" -I"${PROJECT_BINARY_DIR}" OUTPUT_VARIABLE GETARCH2_LOG COPY_FILE ${PROJECT_BINARY_DIR}/${GETARCH2_BIN} ) diff --git a/cmake/system.cmake b/cmake/system.cmake index d0f560872..66e95c6d3 100644 --- a/cmake/system.cmake +++ b/cmake/system.cmake @@ -33,7 +33,7 @@ endif () if (DEFINED BINARY AND DEFINED TARGET AND BINARY EQUAL 32) message(STATUS "Compiling a ${BINARY}-bit binary.") set(NO_AVX 1) - if (${TARGET} STREQUAL "HASWELL" OR ${TARGET} STREQUAL "SANDYBRIDGE" OR ${TARGET} STREQUAL "SKYLAKEX") + if (${TARGET} STREQUAL "HASWELL" OR ${TARGET} STREQUAL "SANDYBRIDGE" OR ${TARGET} STREQUAL "SKYLAKEX" OR ${TARGET} STREQUAL "COOPERLAKE") set(TARGET "NEHALEM") endif () if (${TARGET} STREQUAL "BULLDOZER" OR ${TARGET} STREQUAL "PILEDRIVER" OR ${TARGET} STREQUAL "ZEN") @@ -44,27 +44,25 @@ if (DEFINED BINARY AND DEFINED TARGET AND BINARY EQUAL 32) endif () endif () -if (DEFINED TARGET) - if (${TARGET} STREQUAL "SKYLAKEX" AND NOT NO_AVX512) - set (KERNEL_DEFINITIONS "${KERNEL_DEFINITIONS} -march=skylake-avx512") - endif() - if (${TARGET} STREQUAL "HASWELL" AND NOT NO_AVX2) - if (${CMAKE_C_COMPILER_ID} STREQUAL "GNU") - execute_process(COMMAND ${CMAKE_C_COMPILER} -dumpversion OUTPUT_VARIABLE GCC_VERSION) - if (${GCC_VERSION} VERSION_GREATER 4.7 OR ${GCC_VERSION} VERSION_EQUAL 4.7) - set (KERNEL_DEFINITIONS "${KERNEL_DEFINITIONS} -mavx2") - endif() - elseif (${CMAKE_C_COMPILER_ID} STREQUAL "CLANG") - set (KERNEL_DEFINITIONS "${KERNEL_DEFINITIONS} -mavx2") - endif() - endif() -endif() if (DEFINED TARGET) + message(STATUS "-- -- -- -- -- -- -- -- -- -- -- -- --") message(STATUS "Targeting the ${TARGET} architecture.") set(GETARCH_FLAGS "-DFORCE_${TARGET}") endif () +# On x86_64 build getarch with march=native. This is required to detect AVX512 support in getarch. +if (X86_64 AND NOT ${CMAKE_C_COMPILER_ID} STREQUAL "PGI") + set(GETARCH_FLAGS "${GETARCH_FLAGS} -march=native") +endif () + +# On x86 no AVX support is available +if (X86 OR X86_64) +if ((DEFINED BINARY AND BINARY EQUAL 32) OR ("$CMAKE_SIZEOF_VOID_P}" EQUAL "4")) + set(GETARCH_FLAGS "${GETARCH_FLAGS} -DNO_AVX -DNO_AVX2 -DNO_AVX512") +endif () +endif () + if (INTERFACE64) message(STATUS "Using 64-bit integers.") set(GETARCH_FLAGS "${GETARCH_FLAGS} -DUSE64BITINT") @@ -86,6 +84,11 @@ if (NO_AVX2) set(GETARCH_FLAGS "${GETARCH_FLAGS} -DNO_AVX2") endif () +if (NO_AVX512) + message(STATUS "Disabling Advanced Vector Extensions 512 (AVX512).") + set(GETARCH_FLAGS "${GETARCH_FLAGS} -DNO_AVX512") +endif () + if (CMAKE_BUILD_TYPE STREQUAL "Debug") set(GETARCH_FLAGS "${GETARCH_FLAGS} ${CMAKE_C_FLAGS_DEBUG}") endif () @@ -136,10 +139,73 @@ endif () if (USE_THREAD) message(STATUS "Multi-threading enabled with ${NUM_THREADS} threads.") +else() + if (${USE_LOCKING}) + set(CCOMMON_OPT "${CCOMMON_OPT} -DUSE_LOCKING") + endif () endif () include("${PROJECT_SOURCE_DIR}/cmake/prebuild.cmake") - +if (DEFINED TARGET) + if (${TARGET} STREQUAL COOPERLAKE AND NOT NO_AVX512) +# if (${CMAKE_C_COMPILER_ID} STREQUAL "GNU") + execute_process(COMMAND ${CMAKE_C_COMPILER} -dumpversion OUTPUT_VARIABLE GCC_VERSION) + if (${GCC_VERSION} VERSION_GREATER 10.1 OR ${GCC_VERSION} VERSION_EQUAL 10.1) + set (KERNEL_DEFINITIONS "${KERNEL_DEFINITIONS} -march=cooperlake") + else() + set (KERNEL_DEFINITIONS "${KERNEL_DEFINITIONS} -march=skylake-avx512") + endif() +# elseif (${CMAKE_C_COMPILER_ID} STREQUAL "CLANG") +# set (KERNEL_DEFINITIONS "${KERNEL_DEFINITIONS} -mavx2") +# endif() + endif() + if (${TARGET} STREQUAL SKYLAKEX AND NOT NO_AVX512) + set (KERNEL_DEFINITIONS "${KERNEL_DEFINITIONS} -march=skylake-avx512") + endif() + if (${TARGET} STREQUAL HASWELL AND NOT NO_AVX2) + if (${CMAKE_C_COMPILER_ID} STREQUAL "GNU") + execute_process(COMMAND ${CMAKE_C_COMPILER} -dumpversion OUTPUT_VARIABLE GCC_VERSION) + if (${GCC_VERSION} VERSION_GREATER 4.7 OR ${GCC_VERSION} VERSION_EQUAL 4.7) + set (KERNEL_DEFINITIONS "${KERNEL_DEFINITIONS} -mavx2") + endif() + elseif (${CMAKE_C_COMPILER_ID} STREQUAL "CLANG") + set (KERNEL_DEFINITIONS "${KERNEL_DEFINITIONS} -mavx2") + endif() + endif() + if (DEFINED HAVE_AVX) + if (NOT NO_AVX) + set (KERNEL_DEFINITIONS "${KERNEL_DEFINITIONS} -mavx") + endif() + endif() + if (DEFINED HAVE_AVX2) + if (NOT NO_AVX2) + set (KERNEL_DEFINITIONS "${KERNEL_DEFINITIONS} -mavx2") + endif() + endif() + if (DEFINED HAVE_FMA3) + if (NOT NO_AVX2) + set (KERNEL_DEFINITIONS "${KERNEL_DEFINITIONS} -mfma") + endif() + endif() + if (DEFINED HAVE_SSE) + set (KERNEL_DEFINITIONS "${KERNEL_DEFINITIONS} -msse") + endif() + if (DEFINED HAVE_SSE2) + set (KERNEL_DEFINITIONS "${KERNEL_DEFINITIONS} -msse2") + endif() + if (DEFINED HAVE_SSE3) + set (KERNEL_DEFINITIONS "${KERNEL_DEFINITIONS} -msse3") + endif() + if (DEFINED HAVE_SSSE3) + set (KERNEL_DEFINITIONS "${KERNEL_DEFINITIONS} -mssse3") + endif() + if (DEFINED HAVE_SSE4_1) + set (KERNEL_DEFINITIONS "${KERNEL_DEFINITIONS} -msse4.1") + endif() +endif() +if (DEFINED BINARY) + message(STATUS "Compiling a ${BINARY}-bit binary.") +endif () if (NOT DEFINED NEED_PIC) set(NEED_PIC 1) endif () @@ -156,6 +222,9 @@ include("${PROJECT_SOURCE_DIR}/cmake/cc.cmake") if (NOT NOFORTRAN) # Fortran Compiler dependent settings include("${PROJECT_SOURCE_DIR}/cmake/fc.cmake") +else () +set(NO_LAPACK 1) +set(NO_LAPACKE 1) endif () if (BINARY64) @@ -181,9 +250,14 @@ if (NEED_PIC) endif () if (DYNAMIC_ARCH) - set(CCOMMON_OPT "${CCOMMON_OPT} -DDYNAMIC_ARCH") - if (DYNAMIC_OLDER) - set(CCOMMON_OPT "${CCOMMON_OPT} -DDYNAMIC_OLDER") + if (X86 OR X86_64 OR ARM64 OR PPC) + set(CCOMMON_OPT "${CCOMMON_OPT} -DDYNAMIC_ARCH") + if (DYNAMIC_OLDER) + set(CCOMMON_OPT "${CCOMMON_OPT} -DDYNAMIC_OLDER") + endif () + else () + unset (DYNAMIC_ARCH) + message (STATUS "DYNAMIC_ARCH is not supported on the target architecture, removing") endif () endif () @@ -263,10 +337,30 @@ set(CCOMMON_OPT "${CCOMMON_OPT} -DMAX_CPU_NUMBER=${NUM_THREADS}") set(CCOMMON_OPT "${CCOMMON_OPT} -DMAX_PARALLEL_NUMBER=${NUM_PARALLEL}") +if (BUFFERSIZE) +set(CCOMMON_OPT "${CCOMMON_OPT} -DBUFFERSIZE=${BUFFERSIZE}") +endif () + if (USE_SIMPLE_THREADED_LEVEL3) set(CCOMMON_OPT "${CCOMMON_OPT} -DUSE_SIMPLE_THREADED_LEVEL3") endif () +if (NOT ${CMAKE_SYSTEM_NAME} STREQUAL "Windows") +if (DEFINED MAX_STACK_ALLOC) +if (NOT ${MAX_STACK_ALLOC} EQUAL 0) +set(CCOMMON_OPT "${CCOMMON_OPT} -DMAX_STACK_ALLOC=${MAX_STACK_ALLOC}") +endif () +else () +set(CCOMMON_OPT "${CCOMMON_OPT} -DMAX_STACK_ALLOC=2048") +endif () +endif () +if (NOT ${CMAKE_SYSTEM_NAME} STREQUAL "Windows") +if (DEFINED BLAS3_MEM_ALLOC_THRESHOLD) +if (NOT ${BLAS3_MEM_ALLOC_THRESHOLD} EQUAL 32) +set(CCOMMON_OPT "${CCOMMON_OPT} -DBLAS3_MEM_ALLOC_THRESHOLD=${BLAS3_MEM_ALLOC_THRESHOLD}") +endif() +endif() +endif() if (DEFINED LIBNAMESUFFIX) set(LIBPREFIX "libopenblas_${LIBNAMESUFFIX}") else () @@ -336,6 +430,25 @@ set(REVISION "-r${OpenBLAS_VERSION}") set(MAJOR_VERSION ${OpenBLAS_MAJOR_VERSION}) set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${CCOMMON_OPT}") + +if (NOT BUILD_SINGLE AND NOT BUILD_DOUBLE AND NOT BUILD_COMPLEX AND NOT BUILD_COMPLEX16) + set (BUILD_SINGLE ON) + set (BUILD_DOUBLE ON) + set (BUILD_COMPLEX ON) + set (BUILD_COMPLEX16 ON) +endif() +if (BUILD_SINGLE) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DBUILD_SINGLE") +endif() +if (BUILD_DOUBLE) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DBUILD_DOUBLE") +endif() +if (BUILD_COMPLEX) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DBUILD_COMPLEX") +endif() +if (BUILD_COMPLEX16) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DBUILD_COMPLEX16") +endif() if(NOT MSVC) set(CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} ${CCOMMON_OPT}") endif() @@ -377,6 +490,14 @@ if (${CMAKE_C_COMPILER} STREQUAL "LSB" OR ${CMAKE_SYSTEM_NAME} STREQUAL "Windows set(LAPACK_CFLAGS "${LAPACK_CFLAGS} -DLAPACK_COMPLEX_STRUCTURE") endif () +if ("${CMAKE_BUILD_TYPE}" STREQUAL "Release") +if ("${F_COMPILER}" STREQUAL "FLANG") +if (${CMAKE_Fortran_COMPILER_VERSION} VERSION_LESS_EQUAL 3) + set(CMAKE_Fortran_FLAGS_RELEASE "${CMAKE_Fortran_FLAGS_RELEASE} -fno-unroll-loops") +endif () +endif () +endif () + if (NOT DEFINED SUFFIX) set(SUFFIX o) endif () @@ -500,6 +621,8 @@ endif () #export FUNCTION_PROFILE #export TARGET_CORE # +#export SBGEMM_UNROLL_M +#export SBGEMM_UNROLL_N #export SGEMM_UNROLL_M #export SGEMM_UNROLL_N #export DGEMM_UNROLL_M diff --git a/cmake/system_check.cmake b/cmake/system_check.cmake index 94d3ba643..fdc79c8ce 100644 --- a/cmake/system_check.cmake +++ b/cmake/system_check.cmake @@ -15,7 +15,7 @@ if (${HOST_OS} STREQUAL "LINUX") EXECUTE_PROCESS( COMMAND uname -o COMMAND tr -d '\n' OUTPUT_VARIABLE OPERATING_SYSTEM) if(${OPERATING_SYSTEM} MATCHES "Android") set(HOST_OS ANDROID) - endif(${OPERATING_SYSTEM} MATCHES "Android") + endif() endif() @@ -39,21 +39,45 @@ elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "ppc.*|power.*|Power.*") elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "mips64.*") set(MIPS64 1) elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "amd64.*|x86_64.*|AMD64.*") - if("${CMAKE_SIZEOF_VOID_P}" EQUAL "8") - set(X86_64 1) + if (NOT BINARY) + if("${CMAKE_SIZEOF_VOID_P}" EQUAL "8") + set(X86_64 1) + else() + set(X86 1) + endif() else() - set(X86 1) + if (${BINARY} EQUAL "64") + set(X86_64 1) + else () + set(X86 1) + endif() endif() elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "i686.*|i386.*|x86.*|amd64.*|AMD64.*") set(X86 1) -elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(arm.*|ARM.*)") - set(ARM 1) -elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64.*|AARCH64.*)") +elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64.*|AARCH64.*|arm64.*|ARM64.*)") if("${CMAKE_SIZEOF_VOID_P}" EQUAL "8") set(ARM64 1) else() set(ARM 1) endif() +elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(arm.*|ARM.*)") + set(ARM 1) +elseif (${CMAKE_CROSSCOMPILING}) + if (${TARGET} STREQUAL "CORE2") + if (NOT BINARY) + set(X86 1) + elseif (${BINARY} EQUAL "64") + set(X86_64 1) + else () + set(X86 1) + endif() + elseif (${TARGET} STREQUAL "ARMV7") + set(ARM 1) + else() + set(ARM64 1) + endif () +else () + message(WARNING "Target ARCH could not be determined, got \"${CMAKE_SYSTEM_PROCESSOR}\"") endif() if (X86_64) @@ -85,11 +109,18 @@ else() endif() if (X86_64 OR X86) - file(WRITE ${PROJECT_BINARY_DIR}/avx512.tmp "#include \n\nint main(void){ __asm__ volatile(\"vbroadcastss -4 * 4(%rsi), %zmm2\"); }") -execute_process(COMMAND ${CMAKE_C_COMPILER} -march=skylake-avx512 -c -v -o ${PROJECT_BINARY_DIR}/avx512.o -x c ${PROJECT_BINARY_DIR}/avx512.tmp OUTPUT_QUIET ERROR_QUIET RESULT_VARIABLE NO_AVX512) +if (NOT NO_AVX512) + file(WRITE ${PROJECT_BINARY_DIR}/avx512.c "#include \n\nint main(void){ __asm__ volatile(\"vbroadcastss -4 * 4(%rsi), %zmm2\"); }") +execute_process(COMMAND ${CMAKE_C_COMPILER} -march=skylake-avx512 -c -v -o ${PROJECT_BINARY_DIR}/avx512.o ${PROJECT_BINARY_DIR}/avx512.c OUTPUT_QUIET ERROR_QUIET RESULT_VARIABLE NO_AVX512) if (NO_AVX512 EQUAL 1) set (CCOMMON_OPT "${CCOMMON_OPT} -DNO_AVX512") endif() - file(REMOVE "avx512.tmp" "avx512.o") + file(REMOVE "avx512.c" "avx512.o") +endif() endif() +include(CheckIncludeFile) +CHECK_INCLUDE_FILE("stdatomic.h" HAVE_C11) +if (HAVE_C11 EQUAL 1) +set (CCOMMON_OPT "${CCOMMON_OPT} -DHAVE_C11") +endif() diff --git a/cmake/utils.cmake b/cmake/utils.cmake index fd93f8a70..29b5a067b 100644 --- a/cmake/utils.cmake +++ b/cmake/utils.cmake @@ -15,12 +15,36 @@ endfunction () # Reads a Makefile into CMake vars. macro(ParseMakefileVars MAKEFILE_IN) message(STATUS "Reading vars from ${MAKEFILE_IN}...") + set (IfElse 0) + set (ElseSeen 0) file(STRINGS ${MAKEFILE_IN} makefile_contents) foreach (makefile_line ${makefile_contents}) +#message(STATUS "parsing ${makefile_line}") + if (${IfElse} GREATER 0) + string(REGEX MATCH "endif[ \t]*" line_match "${makefile_line}") + if (NOT "${line_match}" STREQUAL "") +# message(STATUS "ENDIF ${makefile_line}") + set (IfElse 0) + set (ElseSeen 0) + continue () + endif () + string(REGEX MATCH "else[ \t]*" line_match "${makefile_line}") + if (NOT "${line_match}" STREQUAL "") +# message(STATUS "ELSE ${makefile_line}") + set (ElseSeen 1) + continue () + endif() + if ( (${IfElse} EQUAL 2 AND ${ElseSeen} EQUAL 0) OR ( ${IfElse} EQUAL 1 AND ${ElseSeen} EQUAL 1)) +# message(STATUS "skipping ${makefile_line}") + continue () + endif () + endif () string(REGEX MATCH "([0-9_a-zA-Z]+)[ \t]*=[ \t]*(.+)$" line_match "${makefile_line}") if (NOT "${line_match}" STREQUAL "") +#message(STATUS "match on ${line_match}") set(var_name ${CMAKE_MATCH_1}) - set(var_value ${CMAKE_MATCH_2}) +# set(var_value ${CMAKE_MATCH_2}) + string(STRIP ${CMAKE_MATCH_2} var_value) # check for Makefile variables in the string, e.g. $(TSUFFIX) string(REGEX MATCHALL "\\$\\(([0-9_a-zA-Z]+)\\)" make_var_matches ${var_value}) foreach (make_var ${make_var_matches}) @@ -33,7 +57,34 @@ macro(ParseMakefileVars MAKEFILE_IN) else () string(REGEX MATCH "include \\$\\(KERNELDIR\\)/(.+)$" line_match "${makefile_line}") if (NOT "${line_match}" STREQUAL "") +#message(STATUS "match on include ${line_match}") ParseMakefileVars(${KERNELDIR}/${CMAKE_MATCH_1}) + else () +# message(STATUS "unmatched line ${line_match}") + string(REGEX MATCH "ifeq \\(\\$\\(([_A-Z]+)\\),[ \t]*([0-9_A-Z]+)\\)" line_match "${makefile_line}") + if (NOT "${line_match}" STREQUAL "") +# message(STATUS "IFEQ: ${line_match} first: ${CMAKE_MATCH_1} second: ${CMAKE_MATCH_2}") + if (DEFINED ${${CMAKE_MATCH_1}} AND ${${CMAKE_MATCH_1}} STREQUAL ${CMAKE_MATCH_2}) +# message (STATUS "condition is true") + set (IfElse 1) + else () + set (IfElse 2) + endif () + else () + string(REGEX MATCH "ifneq \\(\\$\\(([_A-Z]+)\\),[ \t]*([0-9_A-Z]+)\\)" line_match "${makefile_line}") + if (NOT "${line_match}" STREQUAL "") +# message(STATUS "IFNEQ: ${line_match} first: ${CMAKE_MATCH_1} second: ${CMAKE_MATCH_2}") + if ( ${CMAKE_MATCH_1} STREQUAL C_COMPILER) + set (CMAKE_MATCH_1 CMAKE_C_COMPILER) + endif () + if (NOT ( ${${CMAKE_MATCH_1}} STREQUAL ${CMAKE_MATCH_2})) +# message (STATUS "condition is true") + set (IfElse 1) + else () + set (IfElse 2) + endif () + endif () + endif () endif () endif () endforeach () @@ -163,6 +214,7 @@ function(GenerateNamedObjects sources_in) if (complex_only) list(REMOVE_ITEM float_list "SINGLE") list(REMOVE_ITEM float_list "DOUBLE") + list(REMOVE_ITEM float_list "BFLOAT16") elseif (real_only) list(REMOVE_ITEM float_list "COMPLEX") list(REMOVE_ITEM float_list "ZCOMPLEX") @@ -176,6 +228,9 @@ function(GenerateNamedObjects sources_in) if (NOT no_float_type) string(SUBSTRING ${float_type} 0 1 float_char) string(TOLOWER ${float_char} float_char) + if (${float_type} STREQUAL "BFLOAT16") + set (float_char "sb") + endif () endif () if (NOT name_in) @@ -210,6 +265,9 @@ function(GenerateNamedObjects sources_in) if (${float_type} STREQUAL "DOUBLE" OR ${float_type} STREQUAL "ZCOMPLEX") list(APPEND obj_defines "DOUBLE") endif () + if (${float_type} STREQUAL "BFLOAT16") + list(APPEND obj_defines "BFLOAT16") + endif () if (${float_type} STREQUAL "COMPLEX" OR ${float_type} STREQUAL "ZCOMPLEX") list(APPEND obj_defines "COMPLEX") if (mangle_complex_sources) diff --git a/common.h b/common.h index 0ac74bb20..2825407cb 100644 --- a/common.h +++ b/common.h @@ -131,7 +131,7 @@ extern "C" { #include #include #include -#ifdef SMP +#if defined(SMP) || defined(USE_LOCKING) #include #endif #endif @@ -200,7 +200,7 @@ extern "C" { #error "You can't specify both LOCK operation!" #endif -#ifdef SMP +#if defined(SMP) || defined(USE_LOCKING) #define USE_PTHREAD_LOCK #undef USE_PTHREAD_SPINLOCK #endif @@ -257,6 +257,12 @@ typedef long BLASLONG; typedef unsigned long BLASULONG; #endif +#ifndef bfloat16 +#include +typedef uint16_t bfloat16; +#define BFLOAT16CONVERSION 1 +#endif + #ifdef USE64BITINT typedef BLASLONG blasint; #if defined(OS_WINDOWS) && defined(__64BIT__) @@ -297,6 +303,13 @@ typedef int blasint; #define SIZE 8 #define BASE_SHIFT 3 #define ZBASE_SHIFT 4 +#elif defined(BFLOAT16) +#define IFLOAT bfloat16 +#define XFLOAT IFLOAT +#define FLOAT float +#define SIZE 2 +#define BASE_SHIFT 1 +#define ZBASE_SHIFT 2 #else #define FLOAT float #define SIZE 4 @@ -308,6 +321,10 @@ typedef int blasint; #define XFLOAT FLOAT #endif +#ifndef IFLOAT +#define IFLOAT FLOAT +#endif + #ifndef COMPLEX #define COMPSIZE 1 #else @@ -335,7 +352,7 @@ typedef int blasint; #endif #if defined(ARMV7) || defined(ARMV6) || defined(ARMV8) || defined(ARMV5) -#define YIELDING asm volatile ("nop;nop;nop;nop;nop;nop;nop;nop; \n"); +#define YIELDING __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop; \n"); #endif #ifdef BULLDOZER @@ -344,13 +361,8 @@ typedef int blasint; #endif #endif -#ifdef POWER8 -#ifndef YIELDING -#define YIELDING __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop;\n"); -#endif -#endif -#ifdef POWER9 +#if defined(POWER8) || defined(POWER9) || defined(POWER10) #ifndef YIELDING #define YIELDING __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop;\n"); #endif @@ -390,7 +402,7 @@ please https://github.com/xianyi/OpenBLAS/issues/246 #endif #ifndef BLAS3_MEM_ALLOC_THRESHOLD -#define BLAS3_MEM_ALLOC_THRESHOLD 160 +#define BLAS3_MEM_ALLOC_THRESHOLD 32 #endif #ifdef QUAD_PRECISION @@ -425,6 +437,11 @@ please https://github.com/xianyi/OpenBLAS/issues/246 #include "common_mips.h" #endif + +#ifdef ARCH_RISCV64 +#include "common_riscv64.h" +#endif + #ifdef ARCH_MIPS64 #include "common_mips64.h" #endif @@ -657,6 +674,8 @@ void gotoblas_dynamic_init(void); void gotoblas_dynamic_quit(void); void gotoblas_profile_init(void); void gotoblas_profile_quit(void); + +int support_avx512(void); #ifdef USE_OPENMP @@ -668,7 +687,7 @@ __declspec(dllimport) int __cdecl omp_in_parallel(void); __declspec(dllimport) int __cdecl omp_get_num_procs(void); #endif -#if (__STDC_VERSION__ >= 201112L) +#ifdef HAVE_C11 #if defined(C_GCC) && ( __GNUC__ < 7) // workaround for GCC bug 65467 #ifndef _Atomic diff --git a/common_alpha.h b/common_alpha.h index 9739c941d..f1ea8ff94 100644 --- a/common_alpha.h +++ b/common_alpha.h @@ -43,6 +43,7 @@ #define MB asm("mb") #define WMB asm("wmb") +#define RMB asm("rmb") static void __inline blas_lock(unsigned long *address){ #ifndef __DECC diff --git a/common_arm.h b/common_arm.h index 27fa76b76..682315de5 100644 --- a/common_arm.h +++ b/common_arm.h @@ -37,11 +37,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define MB #define WMB +#define RMB #else #define MB __asm__ __volatile__ ("dmb ish" : : : "memory") #define WMB __asm__ __volatile__ ("dmb ishst" : : : "memory") +#define RMB __asm__ __volatile__ ("dmb ish" : : : "memory") #endif @@ -121,7 +123,7 @@ REALNAME: #endif #define HUGE_PAGESIZE ( 4 << 20) -#define BUFFER_SIZE (16 << 20) +#define BUFFER_SIZE (32 << 20) #define BASE_ADDRESS (START_ADDRESS - BUFFER_SIZE * MAX_CPU_NUMBER) diff --git a/common_arm64.h b/common_arm64.h index c6ef2fb5d..2270ffba7 100644 --- a/common_arm64.h +++ b/common_arm64.h @@ -35,11 +35,11 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define MB __asm__ __volatile__ ("dmb ish" : : : "memory") #define WMB __asm__ __volatile__ ("dmb ishst" : : : "memory") - +#define RMB __asm__ __volatile__ ("dmb ishld" : : : "memory") #define INLINE inline -#ifdef F_INTERFACE_FLANG +#if defined( F_INTERFACE_FLANG) || defined(F_INTERFACE_PGI) #define RETURN_BY_STACK #else #define RETURN_BY_COMPLEX @@ -53,16 +53,16 @@ static void __inline blas_lock(volatile BLASULONG *address){ BLASULONG ret; do { - while (*address) {YIELDING;}; - __asm__ __volatile__( "mov x4, #1 \n\t" + "sevl \n\t" "1: \n\t" + "wfe \n\t" + "2: \n\t" "ldaxr x2, [%1] \n\t" "cbnz x2, 1b \n\t" - "2: \n\t" "stxr w3, x4, [%1] \n\t" - "cbnz w3, 1b \n\t" + "cbnz w3, 2b \n\t" "mov %0, #0 \n\t" : "=r"(ret), "=r"(address) : "1"(address) @@ -78,7 +78,20 @@ static void __inline blas_lock(volatile BLASULONG *address){ #define BLAS_LOCK_DEFINED +#if !defined(OS_DARWIN) && !defined (OS_ANDROID) +static __inline BLASULONG rpcc(void){ + BLASULONG ret = 0; + blasint shift; + + __asm__ __volatile__ ("isb; mrs %0,cntvct_el0":"=r"(ret)); + __asm__ __volatile__ ("mrs %0,cntfrq_el0; clz %w0, %w0":"=&r"(shift)); + return ret << shift; +} + +#define RPCC_DEFINED +#define RPCC64BIT +#endif static inline int blas_quickdivide(blasint x, blasint y){ return x / y; @@ -103,12 +116,16 @@ static inline int blas_quickdivide(blasint x, blasint y){ #if defined(ASSEMBLER) && !defined(NEEDPARAM) -#define PROLOGUE \ - .text ;\ - .align 4 ;\ - .global REALNAME ;\ - .type REALNAME, %function ;\ +.macro PROLOGUE + .text ; + .p2align 2 ; + .global REALNAME ; +#ifndef __APPLE__ + .type REALNAME, %function ; +#endif REALNAME: +.endm + #define EPILOGUE @@ -124,13 +141,12 @@ REALNAME: #endif #define HUGE_PAGESIZE ( 4 << 20) -#if defined(CORTEXA57) -#define BUFFER_SIZE (20 << 20) +#ifndef BUFFERSIZE +#define BUFFER_SIZE (32 << 20) #else -#define BUFFER_SIZE (16 << 20) +#define BUFFER_SIZE (32 << BUFFERSIZE) #endif - #define BASE_ADDRESS (START_ADDRESS - BUFFER_SIZE * MAX_CPU_NUMBER) #ifndef MAP_ANONYMOUS diff --git a/common_ia64.h b/common_ia64.h index 72b75fc4e..59aefbd6d 100644 --- a/common_ia64.h +++ b/common_ia64.h @@ -47,6 +47,7 @@ #define MB #define WMB +#define RMB #ifdef __ECC #include diff --git a/common_interface.h b/common_interface.h index c350ac8ec..b9ebb2772 100644 --- a/common_interface.h +++ b/common_interface.h @@ -54,6 +54,11 @@ double BLASFUNC(dsdot) (blasint *, float *, blasint *, float *, blasint *); double BLASFUNC(ddot) (blasint *, double *, blasint *, double *, blasint *); xdouble BLASFUNC(qdot) (blasint *, xdouble *, blasint *, xdouble *, blasint *); +float BLASFUNC(sbdot) (blasint *, bfloat16 *, blasint *, bfloat16 *, blasint *); +void BLASFUNC(sbstobf16) (blasint *, float *, blasint *, bfloat16 *, blasint *); +void BLASFUNC(sbdtobf16) (blasint *, double *, blasint *, bfloat16 *, blasint *); +void BLASFUNC(sbf16tos) (blasint *, bfloat16 *, blasint *, float *, blasint *); +void BLASFUNC(dbf16tod) (blasint *, bfloat16 *, blasint *, double *, blasint *); #ifdef RETURN_BY_STRUCT typedef struct { @@ -245,6 +250,8 @@ void BLASFUNC(xgeru)(blasint *, blasint *, xdouble *, xdouble *, blasint *, void BLASFUNC(xgerc)(blasint *, blasint *, xdouble *, xdouble *, blasint *, xdouble *, blasint *, xdouble *, blasint *); +void BLASFUNC(sbgemv)(char *, blasint *, blasint *, float *, bfloat16 *, blasint *, + bfloat16 *, blasint *, float *, float *, blasint *); void BLASFUNC(sgemv)(char *, blasint *, blasint *, float *, float *, blasint *, float *, blasint *, float *, float *, blasint *); void BLASFUNC(dgemv)(char *, blasint *, blasint *, double *, double *, blasint *, @@ -469,6 +476,8 @@ void BLASFUNC(xhbmv)(char *, blasint *, blasint *, xdouble *, xdouble *, blasint /* Level 3 routines */ +void BLASFUNC(sbgemm)(char *, char *, blasint *, blasint *, blasint *, float *, + bfloat16 *, blasint *, bfloat16 *, blasint *, float *, float *, blasint *); void BLASFUNC(sgemm)(char *, char *, blasint *, blasint *, blasint *, float *, float *, blasint *, float *, blasint *, float *, float *, blasint *); void BLASFUNC(dgemm)(char *, char *, blasint *, blasint *, blasint *, double *, diff --git a/common_lapack.h b/common_lapack.h index f6d1956fc..f9c36646a 100644 --- a/common_lapack.h +++ b/common_lapack.h @@ -293,4 +293,150 @@ blasint zlarf_R(blas_arg_t *, BLASLONG *, BLASLONG *, double *, double *, BLASLO blasint xlarf_L(blas_arg_t *, BLASLONG *, BLASLONG *, xdouble *, xdouble *, BLASLONG); blasint xlarf_R(blas_arg_t *, BLASLONG *, BLASLONG *, xdouble *, xdouble *, BLASLONG); +blasint strtrs_UNU_single(blas_arg_t *, BLASLONG *, BLASLONG *, float *, float *, BLASLONG); +blasint strtrs_UNN_single(blas_arg_t *, BLASLONG *, BLASLONG *, float *, float *, BLASLONG); +blasint strtrs_UTU_single(blas_arg_t *, BLASLONG *, BLASLONG *, float *, float *, BLASLONG); +blasint strtrs_UTN_single(blas_arg_t *, BLASLONG *, BLASLONG *, float *, float *, BLASLONG); +blasint strtrs_LNU_single(blas_arg_t *, BLASLONG *, BLASLONG *, float *, float *, BLASLONG); +blasint strtrs_LNN_single(blas_arg_t *, BLASLONG *, BLASLONG *, float *, float *, BLASLONG); +blasint strtrs_LTU_single(blas_arg_t *, BLASLONG *, BLASLONG *, float *, float *, BLASLONG); +blasint strtrs_LTN_single(blas_arg_t *, BLASLONG *, BLASLONG *, float *, float *, BLASLONG); +blasint dtrtrs_UNU_single(blas_arg_t *, BLASLONG *, BLASLONG *, double *, double *, BLASLONG); +blasint dtrtrs_UNN_single(blas_arg_t *, BLASLONG *, BLASLONG *, double *, double *, BLASLONG); +blasint dtrtrs_UTU_single(blas_arg_t *, BLASLONG *, BLASLONG *, double *, double *, BLASLONG); +blasint dtrtrs_UTN_single(blas_arg_t *, BLASLONG *, BLASLONG *, double *, double *, BLASLONG); +blasint dtrtrs_LNU_single(blas_arg_t *, BLASLONG *, BLASLONG *, double *, double *, BLASLONG); +blasint dtrtrs_LNN_single(blas_arg_t *, BLASLONG *, BLASLONG *, double *, double *, BLASLONG); +blasint dtrtrs_LTU_single(blas_arg_t *, BLASLONG *, BLASLONG *, double *, double *, BLASLONG); +blasint dtrtrs_LTN_single(blas_arg_t *, BLASLONG *, BLASLONG *, double *, double *, BLASLONG); +blasint qtrtrs_UNU_single(blas_arg_t *, BLASLONG *, BLASLONG *, xdouble *, xdouble *, BLASLONG); +blasint qtrtrs_UNN_single(blas_arg_t *, BLASLONG *, BLASLONG *, xdouble *, xdouble *, BLASLONG); +blasint qtrtrs_UTU_single(blas_arg_t *, BLASLONG *, BLASLONG *, xdouble *, xdouble *, BLASLONG); +blasint qtrtrs_UTN_single(blas_arg_t *, BLASLONG *, BLASLONG *, xdouble *, xdouble *, BLASLONG); +blasint qtrtrs_LNU_single(blas_arg_t *, BLASLONG *, BLASLONG *, xdouble *, xdouble *, BLASLONG); +blasint qtrtrs_LNN_single(blas_arg_t *, BLASLONG *, BLASLONG *, xdouble *, xdouble *, BLASLONG); +blasint qtrtrs_LTU_single(blas_arg_t *, BLASLONG *, BLASLONG *, xdouble *, xdouble *, BLASLONG); +blasint qtrtrs_LTN_single(blas_arg_t *, BLASLONG *, BLASLONG *, xdouble *, xdouble *, BLASLONG); +blasint ctrtrs_UNU_single(blas_arg_t *, BLASLONG *, BLASLONG *, float *, float *, BLASLONG); +blasint ctrtrs_UNN_single(blas_arg_t *, BLASLONG *, BLASLONG *, float *, float *, BLASLONG); +blasint ctrtrs_UTU_single(blas_arg_t *, BLASLONG *, BLASLONG *, float *, float *, BLASLONG); +blasint ctrtrs_UTN_single(blas_arg_t *, BLASLONG *, BLASLONG *, float *, float *, BLASLONG); +blasint ctrtrs_URU_single(blas_arg_t *, BLASLONG *, BLASLONG *, float *, float *, BLASLONG); +blasint ctrtrs_URN_single(blas_arg_t *, BLASLONG *, BLASLONG *, float *, float *, BLASLONG); +blasint ctrtrs_UCU_single(blas_arg_t *, BLASLONG *, BLASLONG *, float *, float *, BLASLONG); +blasint ctrtrs_UCN_single(blas_arg_t *, BLASLONG *, BLASLONG *, float *, float *, BLASLONG); +blasint ctrtrs_LNU_single(blas_arg_t *, BLASLONG *, BLASLONG *, float *, float *, BLASLONG); +blasint ctrtrs_LNN_single(blas_arg_t *, BLASLONG *, BLASLONG *, float *, float *, BLASLONG); +blasint ctrtrs_LTU_single(blas_arg_t *, BLASLONG *, BLASLONG *, float *, float *, BLASLONG); +blasint ctrtrs_LTN_single(blas_arg_t *, BLASLONG *, BLASLONG *, float *, float *, BLASLONG); +blasint ctrtrs_LRU_single(blas_arg_t *, BLASLONG *, BLASLONG *, float *, float *, BLASLONG); +blasint ctrtrs_LRN_single(blas_arg_t *, BLASLONG *, BLASLONG *, float *, float *, BLASLONG); +blasint ctrtrs_LCU_single(blas_arg_t *, BLASLONG *, BLASLONG *, float *, float *, BLASLONG); +blasint ctrtrs_LCN_single(blas_arg_t *, BLASLONG *, BLASLONG *, float *, float *, BLASLONG); +blasint ztrtrs_UNU_single(blas_arg_t *, BLASLONG *, BLASLONG *, double *, double *, BLASLONG); +blasint ztrtrs_UNN_single(blas_arg_t *, BLASLONG *, BLASLONG *, double *, double *, BLASLONG); +blasint ztrtrs_UTU_single(blas_arg_t *, BLASLONG *, BLASLONG *, double *, double *, BLASLONG); +blasint ztrtrs_UTN_single(blas_arg_t *, BLASLONG *, BLASLONG *, double *, double *, BLASLONG); +blasint ztrtrs_URU_single(blas_arg_t *, BLASLONG *, BLASLONG *, double *, double *, BLASLONG); +blasint ztrtrs_URN_single(blas_arg_t *, BLASLONG *, BLASLONG *, double *, double *, BLASLONG); +blasint ztrtrs_UCU_single(blas_arg_t *, BLASLONG *, BLASLONG *, double *, double *, BLASLONG); +blasint ztrtrs_UCN_single(blas_arg_t *, BLASLONG *, BLASLONG *, double *, double *, BLASLONG); +blasint ztrtrs_LNU_single(blas_arg_t *, BLASLONG *, BLASLONG *, double *, double *, BLASLONG); +blasint ztrtrs_LNN_single(blas_arg_t *, BLASLONG *, BLASLONG *, double *, double *, BLASLONG); +blasint ztrtrs_LTU_single(blas_arg_t *, BLASLONG *, BLASLONG *, double *, double *, BLASLONG); +blasint ztrtrs_LTN_single(blas_arg_t *, BLASLONG *, BLASLONG *, double *, double *, BLASLONG); +blasint ztrtrs_LRU_single(blas_arg_t *, BLASLONG *, BLASLONG *, double *, double *, BLASLONG); +blasint ztrtrs_LRN_single(blas_arg_t *, BLASLONG *, BLASLONG *, double *, double *, BLASLONG); +blasint ztrtrs_LCU_single(blas_arg_t *, BLASLONG *, BLASLONG *, double *, double *, BLASLONG); +blasint ztrtrs_LCN_single(blas_arg_t *, BLASLONG *, BLASLONG *, double *, double *, BLASLONG); +blasint xtrtrs_UNU_single(blas_arg_t *, BLASLONG *, BLASLONG *, xdouble *, xdouble *, BLASLONG); +blasint xtrtrs_UNN_single(blas_arg_t *, BLASLONG *, BLASLONG *, xdouble *, xdouble *, BLASLONG); +blasint xtrtrs_UTU_single(blas_arg_t *, BLASLONG *, BLASLONG *, xdouble *, xdouble *, BLASLONG); +blasint xtrtrs_UTN_single(blas_arg_t *, BLASLONG *, BLASLONG *, xdouble *, xdouble *, BLASLONG); +blasint xtrtrs_URU_single(blas_arg_t *, BLASLONG *, BLASLONG *, xdouble *, xdouble *, BLASLONG); +blasint xtrtrs_URN_single(blas_arg_t *, BLASLONG *, BLASLONG *, xdouble *, xdouble *, BLASLONG); +blasint xtrtrs_UCU_single(blas_arg_t *, BLASLONG *, BLASLONG *, xdouble *, xdouble *, BLASLONG); +blasint xtrtrs_UCN_single(blas_arg_t *, BLASLONG *, BLASLONG *, xdouble *, xdouble *, BLASLONG); +blasint xtrtrs_LNU_single(blas_arg_t *, BLASLONG *, BLASLONG *, xdouble *, xdouble *, BLASLONG); +blasint xtrtrs_LNN_single(blas_arg_t *, BLASLONG *, BLASLONG *, xdouble *, xdouble *, BLASLONG); +blasint xtrtrs_LTU_single(blas_arg_t *, BLASLONG *, BLASLONG *, xdouble *, xdouble *, BLASLONG); +blasint xtrtrs_LTN_single(blas_arg_t *, BLASLONG *, BLASLONG *, xdouble *, xdouble *, BLASLONG); +blasint xtrtrs_LRU_single(blas_arg_t *, BLASLONG *, BLASLONG *, xdouble *, xdouble *, BLASLONG); +blasint xtrtrs_LRN_single(blas_arg_t *, BLASLONG *, BLASLONG *, xdouble *, xdouble *, BLASLONG); +blasint xtrtrs_LCU_single(blas_arg_t *, BLASLONG *, BLASLONG *, xdouble *, xdouble *, BLASLONG); +blasint xtrtrs_LCN_single(blas_arg_t *, BLASLONG *, BLASLONG *, xdouble *, xdouble *, BLASLONG); + +blasint strtrs_UNU_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, float *, float *, BLASLONG); +blasint strtrs_UNN_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, float *, float *, BLASLONG); +blasint strtrs_UTU_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, float *, float *, BLASLONG); +blasint strtrs_UTN_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, float *, float *, BLASLONG); +blasint strtrs_LNU_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, float *, float *, BLASLONG); +blasint strtrs_LNN_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, float *, float *, BLASLONG); +blasint strtrs_LTU_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, float *, float *, BLASLONG); +blasint strtrs_LTN_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, float *, float *, BLASLONG); +blasint dtrtrs_UNU_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, double *, double *, BLASLONG); +blasint dtrtrs_UNN_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, double *, double *, BLASLONG); +blasint dtrtrs_UTU_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, double *, double *, BLASLONG); +blasint dtrtrs_UTN_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, double *, double *, BLASLONG); +blasint dtrtrs_LNU_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, double *, double *, BLASLONG); +blasint dtrtrs_LNN_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, double *, double *, BLASLONG); +blasint dtrtrs_LTU_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, double *, double *, BLASLONG); +blasint dtrtrs_LTN_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, double *, double *, BLASLONG); +blasint qtrtrs_UNU_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, xdouble *, xdouble *, BLASLONG); +blasint qtrtrs_UNN_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, xdouble *, xdouble *, BLASLONG); +blasint qtrtrs_UTU_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, xdouble *, xdouble *, BLASLONG); +blasint qtrtrs_UTN_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, xdouble *, xdouble *, BLASLONG); +blasint qtrtrs_LNU_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, xdouble *, xdouble *, BLASLONG); +blasint qtrtrs_LNN_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, xdouble *, xdouble *, BLASLONG); +blasint qtrtrs_LTU_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, xdouble *, xdouble *, BLASLONG); +blasint qtrtrs_LTN_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, xdouble *, xdouble *, BLASLONG); +blasint ctrtrs_UNU_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, float *, float *, BLASLONG); +blasint ctrtrs_UNN_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, float *, float *, BLASLONG); +blasint ctrtrs_UTU_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, float *, float *, BLASLONG); +blasint ctrtrs_UTN_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, float *, float *, BLASLONG); +blasint ctrtrs_URU_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, float *, float *, BLASLONG); +blasint ctrtrs_URN_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, float *, float *, BLASLONG); +blasint ctrtrs_UCU_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, float *, float *, BLASLONG); +blasint ctrtrs_UCN_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, float *, float *, BLASLONG); +blasint ctrtrs_LNU_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, float *, float *, BLASLONG); +blasint ctrtrs_LNN_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, float *, float *, BLASLONG); +blasint ctrtrs_LTU_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, float *, float *, BLASLONG); +blasint ctrtrs_LTN_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, float *, float *, BLASLONG); +blasint ctrtrs_LRU_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, float *, float *, BLASLONG); +blasint ctrtrs_LRN_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, float *, float *, BLASLONG); +blasint ctrtrs_LCU_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, float *, float *, BLASLONG); +blasint ctrtrs_LCN_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, float *, float *, BLASLONG); +blasint ztrtrs_UNU_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, double *, double *, BLASLONG); +blasint ztrtrs_UNN_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, double *, double *, BLASLONG); +blasint ztrtrs_UTU_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, double *, double *, BLASLONG); +blasint ztrtrs_UTN_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, double *, double *, BLASLONG); +blasint ztrtrs_URU_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, double *, double *, BLASLONG); +blasint ztrtrs_URN_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, double *, double *, BLASLONG); +blasint ztrtrs_UCU_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, double *, double *, BLASLONG); +blasint ztrtrs_UCN_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, double *, double *, BLASLONG); +blasint ztrtrs_LNU_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, double *, double *, BLASLONG); +blasint ztrtrs_LNN_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, double *, double *, BLASLONG); +blasint ztrtrs_LTU_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, double *, double *, BLASLONG); +blasint ztrtrs_LTN_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, double *, double *, BLASLONG); +blasint ztrtrs_LRU_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, double *, double *, BLASLONG); +blasint ztrtrs_LRN_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, double *, double *, BLASLONG); +blasint ztrtrs_LCU_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, double *, double *, BLASLONG); +blasint ztrtrs_LCN_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, double *, double *, BLASLONG); +blasint xtrtrs_UNU_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, xdouble *, xdouble *, BLASLONG); +blasint xtrtrs_UNN_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, xdouble *, xdouble *, BLASLONG); +blasint xtrtrs_UTU_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, xdouble *, xdouble *, BLASLONG); +blasint xtrtrs_UTN_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, xdouble *, xdouble *, BLASLONG); +blasint xtrtrs_URU_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, xdouble *, xdouble *, BLASLONG); +blasint xtrtrs_URN_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, xdouble *, xdouble *, BLASLONG); +blasint xtrtrs_UCU_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, xdouble *, xdouble *, BLASLONG); +blasint xtrtrs_UCN_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, xdouble *, xdouble *, BLASLONG); +blasint xtrtrs_LNU_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, xdouble *, xdouble *, BLASLONG); +blasint xtrtrs_LNN_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, xdouble *, xdouble *, BLASLONG); +blasint xtrtrs_LTU_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, xdouble *, xdouble *, BLASLONG); +blasint xtrtrs_LTN_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, xdouble *, xdouble *, BLASLONG); +blasint xtrtrs_LRU_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, xdouble *, xdouble *, BLASLONG); +blasint xtrtrs_LRN_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, xdouble *, xdouble *, BLASLONG); +blasint xtrtrs_LCU_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, xdouble *, xdouble *, BLASLONG); +blasint xtrtrs_LCN_parallel(blas_arg_t *, BLASLONG *, BLASLONG *, xdouble *, xdouble *, BLASLONG); + #endif diff --git a/common_level1.h b/common_level1.h index 74cafb6db..d2ed47e56 100644 --- a/common_level1.h +++ b/common_level1.h @@ -46,6 +46,12 @@ float sdot_k(BLASLONG, float *, BLASLONG, float *, BLASLONG); double dsdot_k(BLASLONG, float *, BLASLONG, float *, BLASLONG); double ddot_k(BLASLONG, double *, BLASLONG, double *, BLASLONG); xdouble qdot_k(BLASLONG, xdouble *, BLASLONG, xdouble *, BLASLONG); +float sbdot_k(BLASLONG, bfloat16 *, BLASLONG, bfloat16 *, BLASLONG); + +void sbstobf16_k(BLASLONG, float *, BLASLONG, bfloat16 *, BLASLONG); +void sbdtobf16_k(BLASLONG, double *, BLASLONG, bfloat16 *, BLASLONG); +void sbf16tos_k (BLASLONG, bfloat16 *, BLASLONG, float *, BLASLONG); +void dbf16tod_k (BLASLONG, bfloat16 *, BLASLONG, double *, BLASLONG); openblas_complex_float cdotc_k (BLASLONG, float *, BLASLONG, float *, BLASLONG); openblas_complex_float cdotu_k (BLASLONG, float *, BLASLONG, float *, BLASLONG); diff --git a/common_level2.h b/common_level2.h index 640d4a073..9a5ebb4d9 100644 --- a/common_level2.h +++ b/common_level2.h @@ -44,6 +44,10 @@ extern "C" { #endif +int sbgemv_n(BLASLONG, BLASLONG, float, bfloat16 *, BLASLONG, bfloat16 *, BLASLONG, float, float *, BLASLONG); +int sbgemv_t(BLASLONG, BLASLONG, float, bfloat16 *, BLASLONG, bfloat16 *, BLASLONG, float, float *, BLASLONG); +int sbgemv_thread_n(BLASLONG, BLASLONG, float, bfloat16 *, BLASLONG, bfloat16 *, BLASLONG, float, float *, BLASLONG, int); +int sbgemv_thread_t(BLASLONG, BLASLONG, float, bfloat16 *, BLASLONG, bfloat16 *, BLASLONG, float, float *, BLASLONG, int); int sger_k (BLASLONG, BLASLONG, BLASLONG, float, float *, BLASLONG, float *, BLASLONG, float *, BLASLONG, float *); int dger_k (BLASLONG, BLASLONG, BLASLONG, double, double *, BLASLONG, double *, BLASLONG, double *, BLASLONG, double *); int qger_k (BLASLONG, BLASLONG, BLASLONG, xdouble, xdouble *, BLASLONG, xdouble *, BLASLONG, xdouble *, BLASLONG, xdouble *); diff --git a/common_level3.h b/common_level3.h index 6fa902be8..c4f9435a9 100644 --- a/common_level3.h +++ b/common_level3.h @@ -47,14 +47,16 @@ __global__ void cuda_dgemm_kernel(int, int, int, double *, double *, double *); extern "C" { #endif -extern void sgemm_kernel_direct(BLASLONG M, BLASLONG N, BLASLONG K, +void sgemm_direct(BLASLONG M, BLASLONG N, BLASLONG K, float * A, BLASLONG strideA, float * B, BLASLONG strideB, float * R, BLASLONG strideR); -extern int sgemm_kernel_direct_performant(BLASLONG M, BLASLONG N, BLASLONG K); +int sgemm_direct_performant(BLASLONG M, BLASLONG N, BLASLONG K); +int sbgemm_beta(BLASLONG, BLASLONG, BLASLONG, float, + bfloat16 *, BLASLONG, bfloat16 *, BLASLONG, float *, BLASLONG); int sgemm_beta(BLASLONG, BLASLONG, BLASLONG, float, float *, BLASLONG, float *, BLASLONG, float *, BLASLONG); int dgemm_beta(BLASLONG, BLASLONG, BLASLONG, double, @@ -76,6 +78,10 @@ int xgemm_beta(BLASLONG, BLASLONG, BLASLONG, xdouble *, xdouble *, BLASLONG, xdouble *, BLASLONG, xdouble *, BLASLONG); #endif +int sbgemm_incopy(BLASLONG m, BLASLONG n, bfloat16 *a, BLASLONG lda, bfloat16 *b); +int sbgemm_itcopy(BLASLONG m, BLASLONG n, bfloat16 *a, BLASLONG lda, bfloat16 *b); +int sbgemm_oncopy(BLASLONG m, BLASLONG n, bfloat16 *a, BLASLONG lda, bfloat16 *b); +int sbgemm_otcopy(BLASLONG m, BLASLONG n, bfloat16 *a, BLASLONG lda, bfloat16 *b); int sgemm_incopy(BLASLONG m, BLASLONG n, float *a, BLASLONG lda, float *b); int sgemm_itcopy(BLASLONG m, BLASLONG n, float *a, BLASLONG lda, float *b); int sgemm_oncopy(BLASLONG m, BLASLONG n, float *a, BLASLONG lda, float *b); @@ -499,6 +505,7 @@ int xher2k_kernel_UC(BLASLONG m, BLASLONG n, BLASLONG k, xdouble alpha_r, xdoubl int xher2k_kernel_LN(BLASLONG m, BLASLONG n, BLASLONG k, xdouble alpha_r, xdouble alpha_i, xdouble *a, xdouble *b, xdouble *c, BLASLONG ldc, BLASLONG offset, int flag); int xher2k_kernel_LC(BLASLONG m, BLASLONG n, BLASLONG k, xdouble alpha_r, xdouble alpha_i, xdouble *a, xdouble *b, xdouble *c, BLASLONG ldc, BLASLONG offset, int flag); +int sbgemm_kernel(BLASLONG, BLASLONG, BLASLONG, float, bfloat16 *, bfloat16 *, float *, BLASLONG); int sgemm_kernel(BLASLONG, BLASLONG, BLASLONG, float, float *, float *, float *, BLASLONG); int dgemm_kernel(BLASLONG, BLASLONG, BLASLONG, double, double *, double *, double *, BLASLONG); @@ -527,6 +534,11 @@ int cgemm3m_kernel(BLASLONG, BLASLONG, BLASLONG, float, float, float *, float int zgemm3m_kernel(BLASLONG, BLASLONG, BLASLONG, double, double, double *, double *, double *, BLASLONG); int xgemm3m_kernel(BLASLONG, BLASLONG, BLASLONG, xdouble, xdouble, xdouble *, xdouble *, xdouble *, BLASLONG); +int sbgemm_nn(blas_arg_t *, BLASLONG *, BLASLONG *, bfloat16 *, bfloat16 *, BLASLONG); +int sbgemm_nt(blas_arg_t *, BLASLONG *, BLASLONG *, bfloat16 *, bfloat16 *, BLASLONG); +int sbgemm_tn(blas_arg_t *, BLASLONG *, BLASLONG *, bfloat16 *, bfloat16 *, BLASLONG); +int sbgemm_tt(blas_arg_t *, BLASLONG *, BLASLONG *, bfloat16 *, bfloat16 *, BLASLONG); + int sgemm_nn(blas_arg_t *, BLASLONG *, BLASLONG *, float *, float *, BLASLONG); int sgemm_nt(blas_arg_t *, BLASLONG *, BLASLONG *, float *, float *, BLASLONG); int sgemm_tn(blas_arg_t *, BLASLONG *, BLASLONG *, float *, float *, BLASLONG); @@ -619,6 +631,11 @@ int xgemm_cr(blas_arg_t *, BLASLONG *, BLASLONG *, xdouble *, xdouble *, BLASLON int xgemm_cc(blas_arg_t *, BLASLONG *, BLASLONG *, xdouble *, xdouble *, BLASLONG); #endif +int sbgemm_thread_nn(blas_arg_t *, BLASLONG *, BLASLONG *, bfloat16 *, bfloat16 *, BLASLONG); +int sbgemm_thread_nt(blas_arg_t *, BLASLONG *, BLASLONG *, bfloat16 *, bfloat16 *, BLASLONG); +int sbgemm_thread_tn(blas_arg_t *, BLASLONG *, BLASLONG *, bfloat16 *, bfloat16 *, BLASLONG); +int sbgemm_thread_tt(blas_arg_t *, BLASLONG *, BLASLONG *, bfloat16 *, bfloat16 *, BLASLONG); + int sgemm_thread_nn(blas_arg_t *, BLASLONG *, BLASLONG *, float *, float *, BLASLONG); int sgemm_thread_nt(blas_arg_t *, BLASLONG *, BLASLONG *, float *, float *, BLASLONG); int sgemm_thread_tn(blas_arg_t *, BLASLONG *, BLASLONG *, float *, float *, BLASLONG); diff --git a/common_linux.h b/common_linux.h index 35f3fb658..5a1c4e150 100644 --- a/common_linux.h +++ b/common_linux.h @@ -75,18 +75,10 @@ static inline int my_mbind(void *addr, unsigned long len, int mode, // https://lsbbugs.linuxfoundation.org/show_bug.cgi?id=3482 return 0; #else -#if defined (LOONGSON3B) -#if defined (__64BIT__) - return syscall(SYS_mbind, addr, len, mode, nodemask, maxnode, flags); -#else - return 0; //NULL Implementation on Loongson 3B 32bit. -#endif -#else //Fixed randomly SEGFAULT when nodemask==NULL with above Linux 2.6.34 // unsigned long null_nodemask=0; return syscall(SYS_mbind, addr, len, mode, nodemask, maxnode, flags); #endif -#endif } static inline int my_set_mempolicy(int mode, const unsigned long *addr, unsigned long flag) { diff --git a/common_macro.h b/common_macro.h index d2503aa65..c6ea1bfd9 100644 --- a/common_macro.h +++ b/common_macro.h @@ -39,6 +39,7 @@ #ifndef COMMON_MACRO #define COMMON_MACRO +#include "common_sb.h" #include "common_s.h" #include "common_d.h" #include "common_q.h" @@ -641,7 +642,297 @@ #define IMATCOPY_K_CT DIMATCOPY_K_CT #define IMATCOPY_K_RT DIMATCOPY_K_RT -#define GEADD_K DGEADD_K +#define GEADD_K DGEADD_K + +#elif defined(BFLOAT16) + +#define D_TO_BF16_K SBDTOBF16_K +#define D_BF16_TO_K DBF16TOD_K +#define S_TO_BF16_K SBSTOBF16_K +#define S_BF16_TO_K SBF16TOS_K +#define SBGEMV_N SBGEMV_N_K +#define SBGEMV_T SBGEMV_T_K + +#define AMAX_K SAMAX_K +#define AMIN_K SAMIN_K +#define MAX_K SMAX_K +#define MIN_K SMIN_K +#define IAMAX_K ISAMAX_K +#define IAMIN_K ISAMIN_K +#define IMAX_K ISMAX_K +#define IMIN_K ISMIN_K +#define ASUM_K SASUM_K +#define DOTU_K SDOTU_K +#define DOTC_K SDOTC_K +#define BF16_DOT_K SBDOT_K +#define AXPYU_K SAXPYU_K +#define AXPYC_K SAXPYC_K +#define AXPBY_K SAXPBY_K +#define SCAL_K SSCAL_K +#define GEMV_N SGEMV_N +#define GEMV_T SGEMV_T +#define SYMV_U SSYMV_U +#define SYMV_L SSYMV_L +#define GERU_K SGERU_K +#define GERC_K SGERC_K +#define GERV_K SGERV_K +#define GERD_K SGERD_K +#define SUM_K SSUM_K +#define SWAP_K SSWAP_K +#define ROT_K SROT_K +#define COPY_K SCOPY_K +#define NRM2_K SNRM2_K +#define SYMV_THREAD_U SSYMV_THREAD_U +#define SYMV_THREAD_L SSYMV_THREAD_L +#define GEMM_BETA SBGEMM_BETA +#define GEMM_KERNEL_N SBGEMM_KERNEL +#define GEMM_KERNEL_L SBGEMM_KERNEL +#define GEMM_KERNEL_R SBGEMM_KERNEL +#define GEMM_KERNEL_B SBGEMM_KERNEL + +#define GEMM_NN SBGEMM_NN +#define GEMM_CN SBGEMM_TN +#define GEMM_TN SBGEMM_TN +#define GEMM_NC SBGEMM_NT +#define GEMM_NT SBGEMM_NT +#define GEMM_CC SBGEMM_TT +#define GEMM_CT SBGEMM_TT +#define GEMM_TC SBGEMM_TT +#define GEMM_TT SBGEMM_TT +#define GEMM_NR SBGEMM_NN +#define GEMM_TR SBGEMM_TN +#define GEMM_CR SBGEMM_TN +#define GEMM_RN SBGEMM_NN +#define GEMM_RT SBGEMM_NT +#define GEMM_RC SBGEMM_NT +#define GEMM_RR SBGEMM_NN +#define GEMM_ONCOPY SBGEMM_ONCOPY +#define GEMM_OTCOPY SBGEMM_OTCOPY +#define GEMM_INCOPY SBGEMM_INCOPY +#define GEMM_ITCOPY SBGEMM_ITCOPY +#define SYMM_THREAD_LU SSYMM_THREAD_LU +#define SYMM_THREAD_LL SSYMM_THREAD_LL +#define SYMM_THREAD_RU SSYMM_THREAD_RU +#define SYMM_THREAD_RL SSYMM_THREAD_RL +#define SYMM_LU SSYMM_LU +#define SYMM_LL SSYMM_LL +#define SYMM_RU SSYMM_RU +#define SYMM_RL SSYMM_RL + + +#define HEMM_THREAD_LU SHEMM_THREAD_LU +#define HEMM_THREAD_LL SHEMM_THREAD_LL +#define HEMM_THREAD_RU SHEMM_THREAD_RU +#define HEMM_THREAD_RL SHEMM_THREAD_RL + +#define GEMM_THREAD_NN SBGEMM_THREAD_NN +#define GEMM_THREAD_CN SBGEMM_THREAD_TN +#define GEMM_THREAD_TN SBGEMM_THREAD_TN +#define GEMM_THREAD_NC SBGEMM_THREAD_NT +#define GEMM_THREAD_NT SBGEMM_THREAD_NT +#define GEMM_THREAD_CC SBGEMM_THREAD_TT +#define GEMM_THREAD_CT SBGEMM_THREAD_TT +#define GEMM_THREAD_TC SBGEMM_THREAD_TT +#define GEMM_THREAD_TT SBGEMM_THREAD_TT +#define GEMM_THREAD_NR SBGEMM_THREAD_NN +#define GEMM_THREAD_TR SBGEMM_THREAD_TN +#define GEMM_THREAD_CR SBGEMM_THREAD_TN +#define GEMM_THREAD_RN SBGEMM_THREAD_NN +#define GEMM_THREAD_RT SBGEMM_THREAD_NT +#define GEMM_THREAD_RC SBGEMM_THREAD_NT +#define GEMM_THREAD_RR SBGEMM_THREAD_NN + +#ifdef UNIT + +#define TRMM_OUNCOPY STRMM_OUNUCOPY +#define TRMM_OUTCOPY STRMM_OUTUCOPY +#define TRMM_OLNCOPY STRMM_OLNUCOPY +#define TRMM_OLTCOPY STRMM_OLTUCOPY +#define TRSM_OUNCOPY STRSM_OUNUCOPY +#define TRSM_OUTCOPY STRSM_OUTUCOPY +#define TRSM_OLNCOPY STRSM_OLNUCOPY +#define TRSM_OLTCOPY STRSM_OLTUCOPY + +#define TRMM_IUNCOPY STRMM_IUNUCOPY +#define TRMM_IUTCOPY STRMM_IUTUCOPY +#define TRMM_ILNCOPY STRMM_ILNUCOPY +#define TRMM_ILTCOPY STRMM_ILTUCOPY +#define TRSM_IUNCOPY STRSM_IUNUCOPY +#define TRSM_IUTCOPY STRSM_IUTUCOPY +#define TRSM_ILNCOPY STRSM_ILNUCOPY +#define TRSM_ILTCOPY STRSM_ILTUCOPY + +#else + +#define TRMM_OUNCOPY STRMM_OUNNCOPY +#define TRMM_OUTCOPY STRMM_OUTNCOPY +#define TRMM_OLNCOPY STRMM_OLNNCOPY +#define TRMM_OLTCOPY STRMM_OLTNCOPY +#define TRSM_OUNCOPY STRSM_OUNNCOPY +#define TRSM_OUTCOPY STRSM_OUTNCOPY +#define TRSM_OLNCOPY STRSM_OLNNCOPY +#define TRSM_OLTCOPY STRSM_OLTNCOPY + +#define TRMM_IUNCOPY STRMM_IUNNCOPY +#define TRMM_IUTCOPY STRMM_IUTNCOPY +#define TRMM_ILNCOPY STRMM_ILNNCOPY +#define TRMM_ILTCOPY STRMM_ILTNCOPY +#define TRSM_IUNCOPY STRSM_IUNNCOPY +#define TRSM_IUTCOPY STRSM_IUTNCOPY +#define TRSM_ILNCOPY STRSM_ILNNCOPY +#define TRSM_ILTCOPY STRSM_ILTNCOPY + +#define TRMM_KERNEL_LN STRMM_KERNEL_LN +#define TRMM_KERNEL_LT STRMM_KERNEL_LT +#define TRMM_KERNEL_LR STRMM_KERNEL_LN +#define TRMM_KERNEL_LC STRMM_KERNEL_LT +#define TRMM_KERNEL_RN STRMM_KERNEL_RN +#define TRMM_KERNEL_RT STRMM_KERNEL_RT +#define TRMM_KERNEL_RR STRMM_KERNEL_RN +#define TRMM_KERNEL_RC STRMM_KERNEL_RT + +#define TRSM_KERNEL_LN STRSM_KERNEL_LN +#define TRSM_KERNEL_LT STRSM_KERNEL_LT +#define TRSM_KERNEL_LR STRSM_KERNEL_LN +#define TRSM_KERNEL_LC STRSM_KERNEL_LT +#define TRSM_KERNEL_RN STRSM_KERNEL_RN +#define TRSM_KERNEL_RT STRSM_KERNEL_RT +#define TRSM_KERNEL_RR STRSM_KERNEL_RN +#define TRSM_KERNEL_RC STRSM_KERNEL_RT + +#define SYMM_IUTCOPY SSYMM_IUTCOPY +#define SYMM_ILTCOPY SSYMM_ILTCOPY +#define SYMM_OUTCOPY SSYMM_OUTCOPY +#define SYMM_OLTCOPY SSYMM_OLTCOPY +#define TRMM_LNUU STRMM_LNUU +#define TRMM_LNUN STRMM_LNUN +#define TRMM_LNLU STRMM_LNLU +#define TRMM_LNLN STRMM_LNLN +#define TRMM_LTUU STRMM_LTUU +#define TRMM_LTUN STRMM_LTUN +#define TRMM_LTLU STRMM_LTLU +#define TRMM_LTLN STRMM_LTLN +#define TRMM_LRUU STRMM_LNUU +#define TRMM_LRUN STRMM_LNUN +#define TRMM_LRLU STRMM_LNLU +#define TRMM_LRLN STRMM_LNLN +#define TRMM_LCUU STRMM_LTUU +#define TRMM_LCUN STRMM_LTUN +#define TRMM_LCLU STRMM_LTLU +#define TRMM_LCLN STRMM_LTLN +#define TRMM_RNUU STRMM_RNUU +#define TRMM_RNUN STRMM_RNUN +#define TRMM_RNLU STRMM_RNLU +#define TRMM_RNLN STRMM_RNLN +#define TRMM_RTUU STRMM_RTUU +#define TRMM_RTUN STRMM_RTUN +#define TRMM_RTLU STRMM_RTLU +#define TRMM_RTLN STRMM_RTLN +#define TRMM_RRUU STRMM_RNUU +#define TRMM_RRUN STRMM_RNUN +#define TRMM_RRLU STRMM_RNLU +#define TRMM_RRLN STRMM_RNLN +#define TRMM_RCUU STRMM_RTUU +#define TRMM_RCUN STRMM_RTUN +#define TRMM_RCLU STRMM_RTLU +#define TRMM_RCLN STRMM_RTLN + +#define TRSM_LNUU STRSM_LNUU +#define TRSM_LNUN STRSM_LNUN +#define TRSM_LNLU STRSM_LNLU +#define TRSM_LNLN STRSM_LNLN +#define TRSM_LTUU STRSM_LTUU +#define TRSM_LTUN STRSM_LTUN +#define TRSM_LTLU STRSM_LTLU +#define TRSM_LTLN STRSM_LTLN +#define TRSM_LRUU STRSM_LNUU +#define TRSM_LRUN STRSM_LNUN +#define TRSM_LRLU STRSM_LNLU +#define TRSM_LRLN STRSM_LNLN +#define TRSM_LCUU STRSM_LTUU +#define TRSM_LCUN STRSM_LTUN +#define TRSM_LCLU STRSM_LTLU +#define TRSM_LCLN STRSM_LTLN +#define TRSM_RNUU STRSM_RNUU +#define TRSM_RNUN STRSM_RNUN +#define TRSM_RNLU STRSM_RNLU +#define TRSM_RNLN STRSM_RNLN +#define TRSM_RTUU STRSM_RTUU +#define TRSM_RTUN STRSM_RTUN +#define TRSM_RTLU STRSM_RTLU +#define TRSM_RTLN STRSM_RTLN +#define TRSM_RRUU STRSM_RNUU +#define TRSM_RRUN STRSM_RNUN +#define TRSM_RRLU STRSM_RNLU +#define TRSM_RRLN STRSM_RNLN +#define TRSM_RCUU STRSM_RTUU +#define TRSM_RCUN STRSM_RTUN +#define TRSM_RCLU STRSM_RTLU +#define TRSM_RCLN STRSM_RTLN +#define SYRK_UN SSYRK_UN +#define SYRK_UT SSYRK_UT +#define SYRK_LN SSYRK_LN +#define SYRK_LT SSYRK_LT +#define SYRK_UR SSYRK_UN +#define SYRK_UC SSYRK_UT +#define SYRK_LR SSYRK_LN +#define SYRK_LC SSYRK_LT + +#define SYRK_KERNEL_U SSYRK_KERNEL_U +#define SYRK_KERNEL_L SSYRK_KERNEL_L + +#define HERK_UN SSYRK_UN +#define HERK_LN SSYRK_LN +#define HERK_UC SSYRK_UT +#define HERK_LC SSYRK_LT + +#define HER2K_UN SSYR2K_UN +#define HER2K_LN SSYR2K_LN +#define HER2K_UC SSYR2K_UT +#define HER2K_LC SSYR2K_LT + +#define SYR2K_UN SSYR2K_UN +#define SYR2K_UT SSYR2K_UT +#define SYR2K_LN SSYR2K_LN +#define SYR2K_LT SSYR2K_LT +#define SYR2K_UR SSYR2K_UN +#define SYR2K_UC SSYR2K_UT +#define SYR2K_LR SSYR2K_LN +#define SYR2K_LC SSYR2K_LT + +#define SYR2K_KERNEL_U SSYR2K_KERNEL_U +#define SYR2K_KERNEL_L SSYR2K_KERNEL_L +#define SYRK_THREAD_UN SSYRK_THREAD_UN +#define SYRK_THREAD_UT SSYRK_THREAD_UT +#define SYRK_THREAD_LN SSYRK_THREAD_LN +#define SYRK_THREAD_LT SSYRK_THREAD_LT +#define SYRK_THREAD_UR SSYRK_THREAD_UR +#define SYRK_THREAD_UC SSYRK_THREAD_UC +#define SYRK_THREAD_LR SSYRK_THREAD_LN +#define SYRK_THREAD_LC SSYRK_THREAD_LT + +#define HERK_THREAD_UN SSYRK_THREAD_UN +#define HERK_THREAD_UT SSYRK_THREAD_UT +#define HERK_THREAD_LN SSYRK_THREAD_LN +#define HERK_THREAD_LT SSYRK_THREAD_LT +#define HERK_THREAD_UR SSYRK_THREAD_UR +#define HERK_THREAD_UC SSYRK_THREAD_UC +#define HERK_THREAD_LR SSYRK_THREAD_LN +#define HERK_THREAD_LC SSYRK_THREAD_LT + +#define OMATCOPY_K_CN SOMATCOPY_K_CN +#define OMATCOPY_K_RN SOMATCOPY_K_RN +#define OMATCOPY_K_CT SOMATCOPY_K_CT +#define OMATCOPY_K_RT SOMATCOPY_K_RT +#define IMATCOPY_K_CN SIMATCOPY_K_CN +#define IMATCOPY_K_RN SIMATCOPY_K_RN +#define IMATCOPY_K_CT SIMATCOPY_K_CT +#define IMATCOPY_K_RT SIMATCOPY_K_RT + +#define GEADD_K SGEADD_K + +#endif + #else #define AMAX_K SAMAX_K @@ -673,14 +964,14 @@ #define GEMV_S SGEMV_S #define GEMV_D SGEMV_D + +#define SYMV_U SSYMV_U +#define SYMV_L SSYMV_L #define GERU_K SGERU_K #define GERC_K SGERC_K #define GERV_K SGERV_K #define GERD_K SGERD_K -#define SYMV_U SSYMV_U -#define SYMV_L SSYMV_L - #define SYMV_THREAD_U SSYMV_THREAD_U #define SYMV_THREAD_L SSYMV_THREAD_L @@ -944,7 +1235,7 @@ #define IMATCOPY_K_CT SIMATCOPY_K_CT #define IMATCOPY_K_RT SIMATCOPY_K_RT -#define GEADD_K SGEADD_K +#define GEADD_K SGEADD_K #endif #else #ifdef XDOUBLE @@ -1770,7 +2061,7 @@ #define IMATCOPY_K_CTC ZIMATCOPY_K_CTC #define IMATCOPY_K_RTC ZIMATCOPY_K_RTC -#define GEADD_K ZGEADD_K +#define GEADD_K ZGEADD_K #else @@ -2193,7 +2484,7 @@ #define IMATCOPY_K_CTC CIMATCOPY_K_CTC #define IMATCOPY_K_RTC CIMATCOPY_K_RTC -#define GEADD_K CGEADD_K +#define GEADD_K CGEADD_K #endif #endif @@ -2202,6 +2493,9 @@ #if defined(ARCH_X86) || defined(ARCH_X86_64) || defined(ARCH_IA64) || defined(ARCH_MIPS64) || defined(ARCH_ARM64) extern BLASLONG gemm_offset_a; extern BLASLONG gemm_offset_b; +extern BLASLONG sbgemm_p; +extern BLASLONG sbgemm_q; +extern BLASLONG sbgemm_r; extern BLASLONG sgemm_p; extern BLASLONG sgemm_q; extern BLASLONG sgemm_r; @@ -2806,3 +3100,160 @@ typedef struct { #endif #endif + +#ifndef COMPLEX +#ifdef XDOUBLE +#define TRTRS_UNU_SINGLE qtrtrs_UNU_single +#define TRTRS_UNN_SINGLE qtrtrs_UNN_single +#define TRTRS_UTU_SINGLE qtrtrs_UTU_single +#define TRTRS_UTN_SINGLE qtrtrs_UTN_single +#define TRTRS_LNU_SINGLE qtrtrs_LNU_single +#define TRTRS_LNN_SINGLE qtrtrs_LNN_single +#define TRTRS_LTU_SINGLE qtrtrs_LTU_single +#define TRTRS_LTN_SINGLE qtrtrs_LTN_single +#define TRTRS_UNU_PARALLEL qtrtrs_UNU_parallel +#define TRTRS_UNN_PARALLEL qtrtrs_UNN_parallel +#define TRTRS_UTU_PARALLEL qtrtrs_UTU_parallel +#define TRTRS_UTN_PARALLEL qtrtrs_UTN_parallel +#define TRTRS_LNU_PARALLEL qtrtrs_LNU_parallel +#define TRTRS_LNN_PARALLEL qtrtrs_LNN_parallel +#define TRTRS_LTU_PARALLEL qtrtrs_LTU_parallel +#define TRTRS_LTN_PARALLEL qtrtrs_LTN_parallel + +#elif defined(DOUBLE) +#define TRTRS_UNU_SINGLE dtrtrs_UNU_single +#define TRTRS_UNN_SINGLE dtrtrs_UNN_single +#define TRTRS_UTU_SINGLE dtrtrs_UTU_single +#define TRTRS_UTN_SINGLE dtrtrs_UTN_single +#define TRTRS_LNU_SINGLE dtrtrs_LNU_single +#define TRTRS_LNN_SINGLE dtrtrs_LNN_single +#define TRTRS_LTU_SINGLE dtrtrs_LTU_single +#define TRTRS_LTN_SINGLE dtrtrs_LTN_single +#define TRTRS_UNU_PARALLEL dtrtrs_UNU_parallel +#define TRTRS_UNN_PARALLEL dtrtrs_UNN_parallel +#define TRTRS_UTU_PARALLEL dtrtrs_UTU_parallel +#define TRTRS_UTN_PARALLEL dtrtrs_UTN_parallel +#define TRTRS_LNU_PARALLEL dtrtrs_LNU_parallel +#define TRTRS_LNN_PARALLEL dtrtrs_LNN_parallel +#define TRTRS_LTU_PARALLEL dtrtrs_LTU_parallel +#define TRTRS_LTN_PARALLEL dtrtrs_LTN_parallel +#else +#define TRTRS_UNU_SINGLE strtrs_UNU_single +#define TRTRS_UNN_SINGLE strtrs_UNN_single +#define TRTRS_UTU_SINGLE strtrs_UTU_single +#define TRTRS_UTN_SINGLE strtrs_UTN_single +#define TRTRS_LNU_SINGLE strtrs_LNU_single +#define TRTRS_LNN_SINGLE strtrs_LNN_single +#define TRTRS_LTU_SINGLE strtrs_LTU_single +#define TRTRS_LTN_SINGLE strtrs_LTN_single +#define TRTRS_UNU_PARALLEL strtrs_UNU_parallel +#define TRTRS_UNN_PARALLEL strtrs_UNN_parallel +#define TRTRS_UTU_PARALLEL strtrs_UTU_parallel +#define TRTRS_UTN_PARALLEL strtrs_UTN_parallel +#define TRTRS_LNU_PARALLEL strtrs_LNU_parallel +#define TRTRS_LNN_PARALLEL strtrs_LNN_parallel +#define TRTRS_LTU_PARALLEL strtrs_LTU_parallel +#define TRTRS_LTN_PARALLEL strtrs_LTN_parallel +#endif +#else +#ifdef XDOUBLE +#define TRTRS_UNU_SINGLE xtrtrs_UNU_single +#define TRTRS_UNN_SINGLE xtrtrs_UNN_single +#define TRTRS_UTU_SINGLE xtrtrs_UTU_single +#define TRTRS_UTN_SINGLE xtrtrs_UTN_single +#define TRTRS_URU_SINGLE xtrtrs_URU_single +#define TRTRS_URN_SINGLE xtrtrs_URN_single +#define TRTRS_UCU_SINGLE xtrtrs_UCU_single +#define TRTRS_UCN_SINGLE xtrtrs_UCN_single +#define TRTRS_LNU_SINGLE xtrtrs_LNU_single +#define TRTRS_LNN_SINGLE xtrtrs_LNN_single +#define TRTRS_LTU_SINGLE xtrtrs_LTU_single +#define TRTRS_LTN_SINGLE xtrtrs_LTN_single +#define TRTRS_LRU_SINGLE xtrtrs_LRU_single +#define TRTRS_LRN_SINGLE xtrtrs_LRN_single +#define TRTRS_LCU_SINGLE xtrtrs_LCU_single +#define TRTRS_LCN_SINGLE xtrtrs_LCN_single +#define TRTRS_UNU_PARALLEL xtrtrs_UNU_parallel +#define TRTRS_UNN_PARALLEL xtrtrs_UNN_parallel +#define TRTRS_UTU_PARALLEL xtrtrs_UTU_parallel +#define TRTRS_UTN_PARALLEL xtrtrs_UTN_parallel +#define TRTRS_URU_PARALLEL xtrtrs_URU_parallel +#define TRTRS_URN_PARALLEL xtrtrs_URN_parallel +#define TRTRS_UCU_PARALLEL xtrtrs_UCU_parallel +#define TRTRS_UCN_PARALLEL xtrtrs_UCN_parallel +#define TRTRS_LNU_PARALLEL xtrtrs_LNU_parallel +#define TRTRS_LNN_PARALLEL xtrtrs_LNN_parallel +#define TRTRS_LTU_PARALLEL xtrtrs_LTU_parallel +#define TRTRS_LTN_PARALLEL xtrtrs_LTN_parallel +#define TRTRS_LRU_PARALLEL xtrtrs_LRU_parallel +#define TRTRS_LRN_PARALLEL xtrtrs_LRN_parallel +#define TRTRS_LCU_PARALLEL xtrtrs_LCU_parallel +#define TRTRS_LCN_PARALLEL xtrtrs_LCN_parallel +#elif defined(DOUBLE) +#define TRTRS_UNU_SINGLE ztrtrs_UNU_single +#define TRTRS_UNN_SINGLE ztrtrs_UNN_single +#define TRTRS_UTU_SINGLE ztrtrs_UTU_single +#define TRTRS_UTN_SINGLE ztrtrs_UTN_single +#define TRTRS_URU_SINGLE ztrtrs_URU_single +#define TRTRS_URN_SINGLE ztrtrs_URN_single +#define TRTRS_UCU_SINGLE ztrtrs_UCU_single +#define TRTRS_UCN_SINGLE ztrtrs_UCN_single +#define TRTRS_LNU_SINGLE ztrtrs_LNU_single +#define TRTRS_LNN_SINGLE ztrtrs_LNN_single +#define TRTRS_LTU_SINGLE ztrtrs_LTU_single +#define TRTRS_LTN_SINGLE ztrtrs_LTN_single +#define TRTRS_LRU_SINGLE ztrtrs_LRU_single +#define TRTRS_LRN_SINGLE ztrtrs_LRN_single +#define TRTRS_LCU_SINGLE ztrtrs_LCU_single +#define TRTRS_LCN_SINGLE ztrtrs_LCN_single +#define TRTRS_UNU_PARALLEL ztrtrs_UNU_parallel +#define TRTRS_UNN_PARALLEL ztrtrs_UNN_parallel +#define TRTRS_UTU_PARALLEL ztrtrs_UTU_parallel +#define TRTRS_UTN_PARALLEL ztrtrs_UTN_parallel +#define TRTRS_URU_PARALLEL ztrtrs_URU_parallel +#define TRTRS_URN_PARALLEL ztrtrs_URN_parallel +#define TRTRS_UCU_PARALLEL ztrtrs_UCU_parallel +#define TRTRS_UCN_PARALLEL ztrtrs_UCN_parallel +#define TRTRS_LNU_PARALLEL ztrtrs_LNU_parallel +#define TRTRS_LNN_PARALLEL ztrtrs_LNN_parallel +#define TRTRS_LTU_PARALLEL ztrtrs_LTU_parallel +#define TRTRS_LTN_PARALLEL ztrtrs_LTN_parallel +#define TRTRS_LRU_PARALLEL ztrtrs_LRU_parallel +#define TRTRS_LRN_PARALLEL ztrtrs_LRN_parallel +#define TRTRS_LCU_PARALLEL ztrtrs_LCU_parallel +#define TRTRS_LCN_PARALLEL ztrtrs_LCN_parallel +#else +#define TRTRS_UNU_SINGLE ctrtrs_UNU_single +#define TRTRS_UNN_SINGLE ctrtrs_UNN_single +#define TRTRS_UTU_SINGLE ctrtrs_UTU_single +#define TRTRS_UTN_SINGLE ctrtrs_UTN_single +#define TRTRS_URU_SINGLE ctrtrs_URU_single +#define TRTRS_URN_SINGLE ctrtrs_URN_single +#define TRTRS_UCU_SINGLE ctrtrs_UCU_single +#define TRTRS_UCN_SINGLE ctrtrs_UCN_single +#define TRTRS_LNU_SINGLE ctrtrs_LNU_single +#define TRTRS_LNN_SINGLE ctrtrs_LNN_single +#define TRTRS_LTU_SINGLE ctrtrs_LTU_single +#define TRTRS_LTN_SINGLE ctrtrs_LTN_single +#define TRTRS_LRU_SINGLE ctrtrs_LRU_single +#define TRTRS_LRN_SINGLE ctrtrs_LRN_single +#define TRTRS_LCU_SINGLE ctrtrs_LCU_single +#define TRTRS_LCN_SINGLE ctrtrs_LCN_single +#define TRTRS_UNU_PARALLEL ctrtrs_UNU_parallel +#define TRTRS_UNN_PARALLEL ctrtrs_UNN_parallel +#define TRTRS_UTU_PARALLEL ctrtrs_UTU_parallel +#define TRTRS_UTN_PARALLEL ctrtrs_UTN_parallel +#define TRTRS_URU_PARALLEL ctrtrs_URU_parallel +#define TRTRS_URN_PARALLEL ctrtrs_URN_parallel +#define TRTRS_UCU_PARALLEL ctrtrs_UCU_parallel +#define TRTRS_UCN_PARALLEL ctrtrs_UCN_parallel +#define TRTRS_LNU_PARALLEL ctrtrs_LNU_parallel +#define TRTRS_LNN_PARALLEL ctrtrs_LNN_parallel +#define TRTRS_LTU_PARALLEL ctrtrs_LTU_parallel +#define TRTRS_LTN_PARALLEL ctrtrs_LTN_parallel +#define TRTRS_LRU_PARALLEL ctrtrs_LRU_parallel +#define TRTRS_LRN_PARALLEL ctrtrs_LRN_parallel +#define TRTRS_LCU_PARALLEL ctrtrs_LCU_parallel +#define TRTRS_LCN_PARALLEL ctrtrs_LCN_parallel +#endif +#endif diff --git a/common_mips.h b/common_mips.h index 35bff5083..7dc3ba246 100644 --- a/common_mips.h +++ b/common_mips.h @@ -35,6 +35,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define MB __sync_synchronize() #define WMB __sync_synchronize() +#define RMB __sync_synchronize() #define INLINE inline @@ -42,6 +43,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #ifndef ASSEMBLER +#if !defined(MIPS24K) static inline unsigned int rpcc(void){ unsigned long ret; @@ -52,6 +54,7 @@ static inline unsigned int rpcc(void){ return ret; } #define RPCC_DEFINED +#endif static inline int blas_quickdivide(blasint x, blasint y){ return x / y; @@ -91,7 +94,7 @@ REALNAME: #endif #define HUGE_PAGESIZE ( 4 << 20) -#define BUFFER_SIZE (16 << 20) +#define BUFFER_SIZE (16 << 21) #define BASE_ADDRESS (START_ADDRESS - BUFFER_SIZE * MAX_CPU_NUMBER) diff --git a/common_mips64.h b/common_mips64.h index 1163413dc..287459e7d 100644 --- a/common_mips64.h +++ b/common_mips64.h @@ -73,6 +73,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define MB __sync_synchronize() #define WMB __sync_synchronize() +#define RMB __sync_synchronize() #define INLINE inline @@ -226,14 +227,9 @@ REALNAME: ;\ #define SEEK_ADDRESS -#define BUFFER_SIZE ( 32 << 20) +#define BUFFER_SIZE ( 32 << 21) -#if defined(LOONGSON3A) -#define PAGESIZE (16UL << 10) -#define FIXED_PAGESIZE (16UL << 10) -#endif - -#if defined(LOONGSON3B) +#if defined(LOONGSON3R3) || defined(LOONGSON3R4) #define PAGESIZE (16UL << 10) #define FIXED_PAGESIZE (16UL << 10) #endif @@ -249,7 +245,7 @@ REALNAME: ;\ #define MAP_ANONYMOUS MAP_ANON #endif -#if defined(LOONGSON3A) || defined(LOONGSON3B) +#if defined(LOONGSON3R3) || defined(LOONGSON3R4) #define PREFETCHD_(x) ld $0, x #define PREFETCHD(x) PREFETCHD_(x) #else diff --git a/common_param.h b/common_param.h index 574d5e176..3e3ae06f8 100644 --- a/common_param.h +++ b/common_param.h @@ -47,26 +47,133 @@ typedef struct { int dtb_entries; int offsetA, offsetB, align; +#ifdef BUILD_BFLOAT16 + int sbgemm_p, sbgemm_q, sbgemm_r; + int sbgemm_unroll_m, sbgemm_unroll_n, sbgemm_unroll_mn; + + void (*sbstobf16_k) (BLASLONG, float *, BLASLONG, bfloat16 *, BLASLONG); + void (*sbdtobf16_k) (BLASLONG, double *, BLASLONG, bfloat16 *, BLASLONG); + void (*sbf16tos_k) (BLASLONG, bfloat16 *, BLASLONG, float *, BLASLONG); + void (*dbf16tod_k) (BLASLONG, bfloat16 *, BLASLONG, double *, BLASLONG); + + float (*sbamax_k) (BLASLONG, float *, BLASLONG); + float (*sbamin_k) (BLASLONG, float *, BLASLONG); + float (*sbmax_k) (BLASLONG, float *, BLASLONG); + float (*sbmin_k) (BLASLONG, float *, BLASLONG); +BLASLONG (*isbamax_k)(BLASLONG, float *, BLASLONG); +BLASLONG (*isbamin_k)(BLASLONG, float *, BLASLONG); +BLASLONG (*isbmax_k) (BLASLONG, float *, BLASLONG); +BLASLONG (*isbmin_k) (BLASLONG, float *, BLASLONG); + + float (*sbnrm2_k) (BLASLONG, float *, BLASLONG); + float (*sbasum_k) (BLASLONG, float *, BLASLONG); + float (*sbsum_k) (BLASLONG, float *, BLASLONG); + int (*sbcopy_k) (BLASLONG, float *, BLASLONG, float *, BLASLONG); + float (*sbdot_k) (BLASLONG, bfloat16 *, BLASLONG, bfloat16 *, BLASLONG); + double (*dsbdot_k) (BLASLONG, float *, BLASLONG, float *, BLASLONG); + + int (*sbrot_k) (BLASLONG, float *, BLASLONG, float *, BLASLONG, float, float); + + int (*sbaxpy_k) (BLASLONG, BLASLONG, BLASLONG, float, float *, BLASLONG, float *, BLASLONG, float *, BLASLONG); + int (*sbscal_k) (BLASLONG, BLASLONG, BLASLONG, float, float *, BLASLONG, float *, BLASLONG, float *, BLASLONG); + int (*sbswap_k) (BLASLONG, BLASLONG, BLASLONG, float, float *, BLASLONG, float *, BLASLONG, float *, BLASLONG); + + int (*sbgemv_n) (BLASLONG, BLASLONG, float, bfloat16 *, BLASLONG, bfloat16 *, BLASLONG, float, float *, BLASLONG); + int (*sbgemv_t) (BLASLONG, BLASLONG, float, bfloat16 *, BLASLONG, bfloat16 *, BLASLONG, float, float *, BLASLONG); + int (*sbger_k) (BLASLONG, BLASLONG, BLASLONG, float, float *, BLASLONG, float *, BLASLONG, float *, BLASLONG, float *); + + int (*sbsymv_L) (BLASLONG, BLASLONG, float, float *, BLASLONG, float *, BLASLONG, float *, BLASLONG, float *); + int (*sbsymv_U) (BLASLONG, BLASLONG, float, float *, BLASLONG, float *, BLASLONG, float *, BLASLONG, float *); + + int (*sbgemm_kernel )(BLASLONG, BLASLONG, BLASLONG, float, bfloat16 *, bfloat16 *, float *, BLASLONG); + int (*sbgemm_beta )(BLASLONG, BLASLONG, BLASLONG, float, bfloat16 *, BLASLONG, bfloat16 *, BLASLONG, float *, BLASLONG); + + int (*sbgemm_incopy )(BLASLONG, BLASLONG, bfloat16 *, BLASLONG, bfloat16 *); + int (*sbgemm_itcopy )(BLASLONG, BLASLONG, bfloat16 *, BLASLONG, bfloat16 *); + int (*sbgemm_oncopy )(BLASLONG, BLASLONG, bfloat16 *, BLASLONG, bfloat16 *); + int (*sbgemm_otcopy )(BLASLONG, BLASLONG, bfloat16 *, BLASLONG, bfloat16 *); + + int (*sbtrsm_kernel_LN)(BLASLONG, BLASLONG, BLASLONG, float, float *, float *, float *, BLASLONG, BLASLONG); + int (*sbtrsm_kernel_LT)(BLASLONG, BLASLONG, BLASLONG, float, float *, float *, float *, BLASLONG, BLASLONG); + int (*sbtrsm_kernel_RN)(BLASLONG, BLASLONG, BLASLONG, float, float *, float *, float *, BLASLONG, BLASLONG); + int (*sbtrsm_kernel_RT)(BLASLONG, BLASLONG, BLASLONG, float, float *, float *, float *, BLASLONG, BLASLONG); + + int (*sbtrsm_iunucopy)(BLASLONG, BLASLONG, float *, BLASLONG, BLASLONG, float *); + int (*sbtrsm_iunncopy)(BLASLONG, BLASLONG, float *, BLASLONG, BLASLONG, float *); + int (*sbtrsm_iutucopy)(BLASLONG, BLASLONG, float *, BLASLONG, BLASLONG, float *); + int (*sbtrsm_iutncopy)(BLASLONG, BLASLONG, float *, BLASLONG, BLASLONG, float *); + int (*sbtrsm_ilnucopy)(BLASLONG, BLASLONG, float *, BLASLONG, BLASLONG, float *); + int (*sbtrsm_ilnncopy)(BLASLONG, BLASLONG, float *, BLASLONG, BLASLONG, float *); + int (*sbtrsm_iltucopy)(BLASLONG, BLASLONG, float *, BLASLONG, BLASLONG, float *); + int (*sbtrsm_iltncopy)(BLASLONG, BLASLONG, float *, BLASLONG, BLASLONG, float *); + int (*sbtrsm_ounucopy)(BLASLONG, BLASLONG, float *, BLASLONG, BLASLONG, float *); + int (*sbtrsm_ounncopy)(BLASLONG, BLASLONG, float *, BLASLONG, BLASLONG, float *); + int (*sbtrsm_outucopy)(BLASLONG, BLASLONG, float *, BLASLONG, BLASLONG, float *); + int (*sbtrsm_outncopy)(BLASLONG, BLASLONG, float *, BLASLONG, BLASLONG, float *); + int (*sbtrsm_olnucopy)(BLASLONG, BLASLONG, float *, BLASLONG, BLASLONG, float *); + int (*sbtrsm_olnncopy)(BLASLONG, BLASLONG, float *, BLASLONG, BLASLONG, float *); + int (*sbtrsm_oltucopy)(BLASLONG, BLASLONG, float *, BLASLONG, BLASLONG, float *); + int (*sbtrsm_oltncopy)(BLASLONG, BLASLONG, float *, BLASLONG, BLASLONG, float *); + + int (*sbtrmm_kernel_RN)(BLASLONG, BLASLONG, BLASLONG, float, float *, float *, float *, BLASLONG, BLASLONG); + int (*sbtrmm_kernel_RT)(BLASLONG, BLASLONG, BLASLONG, float, float *, float *, float *, BLASLONG, BLASLONG); + int (*sbtrmm_kernel_LN)(BLASLONG, BLASLONG, BLASLONG, float, float *, float *, float *, BLASLONG, BLASLONG); + int (*sbtrmm_kernel_LT)(BLASLONG, BLASLONG, BLASLONG, float, float *, float *, float *, BLASLONG, BLASLONG); + + int (*sbtrmm_iunucopy)(BLASLONG, BLASLONG, float *, BLASLONG, BLASLONG, BLASLONG, float *); + int (*sbtrmm_iunncopy)(BLASLONG, BLASLONG, float *, BLASLONG, BLASLONG, BLASLONG, float *); + int (*sbtrmm_iutucopy)(BLASLONG, BLASLONG, float *, BLASLONG, BLASLONG, BLASLONG, float *); + int (*sbtrmm_iutncopy)(BLASLONG, BLASLONG, float *, BLASLONG, BLASLONG, BLASLONG, float *); + int (*sbtrmm_ilnucopy)(BLASLONG, BLASLONG, float *, BLASLONG, BLASLONG, BLASLONG, float *); + int (*sbtrmm_ilnncopy)(BLASLONG, BLASLONG, float *, BLASLONG, BLASLONG, BLASLONG, float *); + int (*sbtrmm_iltucopy)(BLASLONG, BLASLONG, float *, BLASLONG, BLASLONG, BLASLONG, float *); + int (*sbtrmm_iltncopy)(BLASLONG, BLASLONG, float *, BLASLONG, BLASLONG, BLASLONG, float *); + int (*sbtrmm_ounucopy)(BLASLONG, BLASLONG, float *, BLASLONG, BLASLONG, BLASLONG, float *); + int (*sbtrmm_ounncopy)(BLASLONG, BLASLONG, float *, BLASLONG, BLASLONG, BLASLONG, float *); + int (*sbtrmm_outucopy)(BLASLONG, BLASLONG, float *, BLASLONG, BLASLONG, BLASLONG, float *); + int (*sbtrmm_outncopy)(BLASLONG, BLASLONG, float *, BLASLONG, BLASLONG, BLASLONG, float *); + int (*sbtrmm_olnucopy)(BLASLONG, BLASLONG, float *, BLASLONG, BLASLONG, BLASLONG, float *); + int (*sbtrmm_olnncopy)(BLASLONG, BLASLONG, float *, BLASLONG, BLASLONG, BLASLONG, float *); + int (*sbtrmm_oltucopy)(BLASLONG, BLASLONG, float *, BLASLONG, BLASLONG, BLASLONG, float *); + int (*sbtrmm_oltncopy)(BLASLONG, BLASLONG, float *, BLASLONG, BLASLONG, BLASLONG, float *); + + int (*sbsymm_iutcopy)(BLASLONG, BLASLONG, float *, BLASLONG, BLASLONG, BLASLONG, float *); + int (*sbsymm_iltcopy)(BLASLONG, BLASLONG, float *, BLASLONG, BLASLONG, BLASLONG, float *); + int (*sbsymm_outcopy)(BLASLONG, BLASLONG, float *, BLASLONG, BLASLONG, BLASLONG, float *); + int (*sbsymm_oltcopy)(BLASLONG, BLASLONG, float *, BLASLONG, BLASLONG, BLASLONG, float *); + + int (*sbneg_tcopy) (BLASLONG, BLASLONG, float *, BLASLONG, float *); + int (*sblaswp_ncopy) (BLASLONG, BLASLONG, BLASLONG, float *, BLASLONG, blasint *, float *); + +#endif + +#if defined(BUILD_SINGLE) || defined(BUILD_COMPLEX) int sgemm_p, sgemm_q, sgemm_r; int sgemm_unroll_m, sgemm_unroll_n, sgemm_unroll_mn; +#endif int exclusive_cache; +#if defined(BUILD_SINGLE) || defined(BUILD_COMPLEX) float (*samax_k) (BLASLONG, float *, BLASLONG); float (*samin_k) (BLASLONG, float *, BLASLONG); float (*smax_k) (BLASLONG, float *, BLASLONG); float (*smin_k) (BLASLONG, float *, BLASLONG); + BLASLONG (*isamax_k)(BLASLONG, float *, BLASLONG); BLASLONG (*isamin_k)(BLASLONG, float *, BLASLONG); BLASLONG (*ismax_k) (BLASLONG, float *, BLASLONG); BLASLONG (*ismin_k) (BLASLONG, float *, BLASLONG); - float (*snrm2_k) (BLASLONG, float *, BLASLONG); float (*sasum_k) (BLASLONG, float *, BLASLONG); +#endif +#ifdef BUILD_SINGLE float (*ssum_k) (BLASLONG, float *, BLASLONG); +#endif +#if defined(BUILD_SINGLE) || defined(BUILD_COMPLEX) int (*scopy_k) (BLASLONG, float *, BLASLONG, float *, BLASLONG); float (*sdot_k) (BLASLONG, float *, BLASLONG, float *, BLASLONG); - double (*dsdot_k) (BLASLONG, float *, BLASLONG, float *, BLASLONG); + //double (*dsdot_k) (BLASLONG, float *, BLASLONG, float *, BLASLONG); int (*srot_k) (BLASLONG, float *, BLASLONG, float *, BLASLONG, float, float); @@ -76,19 +183,30 @@ BLASLONG (*ismin_k) (BLASLONG, float *, BLASLONG); int (*sgemv_n) (BLASLONG, BLASLONG, BLASLONG, float, float *, BLASLONG, float *, BLASLONG, float *, BLASLONG, float *); int (*sgemv_t) (BLASLONG, BLASLONG, BLASLONG, float, float *, BLASLONG, float *, BLASLONG, float *, BLASLONG, float *); +#endif +#ifdef BUILD_SINGLE int (*sger_k) (BLASLONG, BLASLONG, BLASLONG, float, float *, BLASLONG, float *, BLASLONG, float *, BLASLONG, float *); int (*ssymv_L) (BLASLONG, BLASLONG, float, float *, BLASLONG, float *, BLASLONG, float *, BLASLONG, float *); int (*ssymv_U) (BLASLONG, BLASLONG, float, float *, BLASLONG, float *, BLASLONG, float *, BLASLONG, float *); +#endif +#if defined(BUILD_SINGLE) || defined(BUILD_COMPLEX) +#ifdef ARCH_X86_64 + void (*sgemm_direct) (BLASLONG, BLASLONG, BLASLONG, float *, BLASLONG , float *, BLASLONG , float * , BLASLONG); + int (*sgemm_direct_performant) (BLASLONG M, BLASLONG N, BLASLONG K); +#endif + int (*sgemm_kernel )(BLASLONG, BLASLONG, BLASLONG, float, float *, float *, float *, BLASLONG); int (*sgemm_beta )(BLASLONG, BLASLONG, BLASLONG, float, float *, BLASLONG, float *, BLASLONG, float *, BLASLONG); + int (*sgemm_incopy )(BLASLONG, BLASLONG, float *, BLASLONG, float *); int (*sgemm_itcopy )(BLASLONG, BLASLONG, float *, BLASLONG, float *); int (*sgemm_oncopy )(BLASLONG, BLASLONG, float *, BLASLONG, float *); int (*sgemm_otcopy )(BLASLONG, BLASLONG, float *, BLASLONG, float *); - +#endif +#ifdef BUILD_SINGLE int (*strsm_kernel_LN)(BLASLONG, BLASLONG, BLASLONG, float, float *, float *, float *, BLASLONG, BLASLONG); int (*strsm_kernel_LT)(BLASLONG, BLASLONG, BLASLONG, float, float *, float *, float *, BLASLONG, BLASLONG); int (*strsm_kernel_RN)(BLASLONG, BLASLONG, BLASLONG, float, float *, float *, float *, BLASLONG, BLASLONG); @@ -140,10 +258,14 @@ BLASLONG (*ismin_k) (BLASLONG, float *, BLASLONG); int (*sneg_tcopy) (BLASLONG, BLASLONG, float *, BLASLONG, float *); int (*slaswp_ncopy) (BLASLONG, BLASLONG, BLASLONG, float *, BLASLONG, blasint *, float *); +#endif +#if defined(BUILD_DOUBLE) || defined(BUILD_COMPLEX16) int dgemm_p, dgemm_q, dgemm_r; int dgemm_unroll_m, dgemm_unroll_n, dgemm_unroll_mn; +#endif +#if defined(BUILD_DOUBLE) || defined(BUILD_COMPLEX16) double (*damax_k) (BLASLONG, double *, BLASLONG); double (*damin_k) (BLASLONG, double *, BLASLONG); double (*dmax_k) (BLASLONG, double *, BLASLONG); @@ -152,25 +274,37 @@ BLASLONG (*idamax_k)(BLASLONG, double *, BLASLONG); BLASLONG (*idamin_k)(BLASLONG, double *, BLASLONG); BLASLONG (*idmax_k) (BLASLONG, double *, BLASLONG); BLASLONG (*idmin_k) (BLASLONG, double *, BLASLONG); +#endif +#if defined(BUILD_DOUBLE) || defined(BUILD_COMPLEX16) double (*dnrm2_k) (BLASLONG, double *, BLASLONG); double (*dasum_k) (BLASLONG, double *, BLASLONG); +#endif +#ifdef BUILD_DOUBLE double (*dsum_k) (BLASLONG, double *, BLASLONG); +#endif +#if defined(BUILD_DOUBLE) || defined(BUILD_COMPLEX16) int (*dcopy_k) (BLASLONG, double *, BLASLONG, double *, BLASLONG); double (*ddot_k) (BLASLONG, double *, BLASLONG, double *, BLASLONG); +#endif +#if defined (BUILD_SINGLE) || defined(BUILD_DOUBLE) + double (*dsdot_k) (BLASLONG, float *, BLASLONG, float *, BLASLONG); +#endif +#if defined(BUILD_DOUBLE) || defined(BUILD_COMPLEX16) int (*drot_k) (BLASLONG, double *, BLASLONG, double *, BLASLONG, double, double); - int (*daxpy_k) (BLASLONG, BLASLONG, BLASLONG, double, double *, BLASLONG, double *, BLASLONG, double *, BLASLONG); int (*dscal_k) (BLASLONG, BLASLONG, BLASLONG, double, double *, BLASLONG, double *, BLASLONG, double *, BLASLONG); int (*dswap_k) (BLASLONG, BLASLONG, BLASLONG, double, double *, BLASLONG, double *, BLASLONG, double *, BLASLONG); - int (*dgemv_n) (BLASLONG, BLASLONG, BLASLONG, double, double *, BLASLONG, double *, BLASLONG, double *, BLASLONG, double *); int (*dgemv_t) (BLASLONG, BLASLONG, BLASLONG, double, double *, BLASLONG, double *, BLASLONG, double *, BLASLONG, double *); +#endif +#ifdef BUILD_DOUBLE int (*dger_k) (BLASLONG, BLASLONG, BLASLONG, double, double *, BLASLONG, double *, BLASLONG, double *, BLASLONG, double *); int (*dsymv_L) (BLASLONG, BLASLONG, double, double *, BLASLONG, double *, BLASLONG, double *, BLASLONG, double *); int (*dsymv_U) (BLASLONG, BLASLONG, double, double *, BLASLONG, double *, BLASLONG, double *, BLASLONG, double *); - +#endif +#if defined(BUILD_DOUBLE) || defined(BUILD_COMPLEX16) int (*dgemm_kernel )(BLASLONG, BLASLONG, BLASLONG, double, double *, double *, double *, BLASLONG); int (*dgemm_beta )(BLASLONG, BLASLONG, BLASLONG, double, double *, BLASLONG, double *, BLASLONG, double *, BLASLONG); @@ -178,7 +312,8 @@ BLASLONG (*idmin_k) (BLASLONG, double *, BLASLONG); int (*dgemm_itcopy )(BLASLONG, BLASLONG, double *, BLASLONG, double *); int (*dgemm_oncopy )(BLASLONG, BLASLONG, double *, BLASLONG, double *); int (*dgemm_otcopy )(BLASLONG, BLASLONG, double *, BLASLONG, double *); - +#endif +#ifdef BUILD_DOUBLE int (*dtrsm_kernel_LN)(BLASLONG, BLASLONG, BLASLONG, double, double *, double *, double *, BLASLONG, BLASLONG); int (*dtrsm_kernel_LT)(BLASLONG, BLASLONG, BLASLONG, double, double *, double *, double *, BLASLONG, BLASLONG); int (*dtrsm_kernel_RN)(BLASLONG, BLASLONG, BLASLONG, double, double *, double *, double *, BLASLONG, BLASLONG); @@ -230,7 +365,7 @@ BLASLONG (*idmin_k) (BLASLONG, double *, BLASLONG); int (*dneg_tcopy) (BLASLONG, BLASLONG, double *, BLASLONG, double *); int (*dlaswp_ncopy) (BLASLONG, BLASLONG, BLASLONG, double *, BLASLONG, blasint *, double *); - +#endif #ifdef EXPRECISION int qgemm_p, qgemm_q, qgemm_r; @@ -325,6 +460,7 @@ BLASLONG (*iqmin_k) (BLASLONG, xdouble *, BLASLONG); #endif +#ifdef BUILD_COMPLEX int cgemm_p, cgemm_q, cgemm_r; int cgemm_unroll_m, cgemm_unroll_n, cgemm_unroll_mn; @@ -488,7 +624,9 @@ BLASLONG (*icamin_k)(BLASLONG, float *, BLASLONG); int (*cneg_tcopy) (BLASLONG, BLASLONG, float *, BLASLONG, float *); int (*claswp_ncopy) (BLASLONG, BLASLONG, BLASLONG, float *, BLASLONG, blasint *, float *); +#endif +#ifdef BUILD_COMPLEX16 int zgemm_p, zgemm_q, zgemm_r; int zgemm_unroll_m, zgemm_unroll_n, zgemm_unroll_mn; @@ -652,6 +790,7 @@ BLASLONG (*izamin_k)(BLASLONG, double *, BLASLONG); int (*zneg_tcopy) (BLASLONG, BLASLONG, double *, BLASLONG, double *); int (*zlaswp_ncopy) (BLASLONG, BLASLONG, BLASLONG, double *, BLASLONG, blasint *, double *); +#endif #ifdef EXPRECISION @@ -825,22 +964,34 @@ BLASLONG (*ixamin_k)(BLASLONG, xdouble *, BLASLONG); void (*init)(void); int snum_opt, dnum_opt, qnum_opt; - +#ifdef BUILD_SINGLE int (*saxpby_k) (BLASLONG, float, float*, BLASLONG,float, float*, BLASLONG); +#endif +#ifdef BUILD_DOUBLE int (*daxpby_k) (BLASLONG, double, double*, BLASLONG,double, double*, BLASLONG); +#endif +#ifdef BUILD_COMPLEX int (*caxpby_k) (BLASLONG, float, float, float*, BLASLONG,float,float, float*, BLASLONG); +#endif +#ifdef BUILD_COMPLEX16 int (*zaxpby_k) (BLASLONG, double, double, double*, BLASLONG,double,double, double*, BLASLONG); +#endif +#ifdef BUILD_SINGLE int (*somatcopy_k_cn) (BLASLONG, BLASLONG, float, float*, BLASLONG, float*, BLASLONG); int (*somatcopy_k_ct) (BLASLONG, BLASLONG, float, float*, BLASLONG, float*, BLASLONG); int (*somatcopy_k_rn) (BLASLONG, BLASLONG, float, float*, BLASLONG, float*, BLASLONG); int (*somatcopy_k_rt) (BLASLONG, BLASLONG, float, float*, BLASLONG, float*, BLASLONG); +#endif +#ifdef BUILD_DOUBLE int (*domatcopy_k_cn) (BLASLONG, BLASLONG, double, double*, BLASLONG, double*, BLASLONG); int (*domatcopy_k_ct) (BLASLONG, BLASLONG, double, double*, BLASLONG, double*, BLASLONG); int (*domatcopy_k_rn) (BLASLONG, BLASLONG, double, double*, BLASLONG, double*, BLASLONG); int (*domatcopy_k_rt) (BLASLONG, BLASLONG, double, double*, BLASLONG, double*, BLASLONG); +#endif +#ifdef BUILD_COMPLEX int (*comatcopy_k_cn) (BLASLONG, BLASLONG, float, float, float*, BLASLONG, float*, BLASLONG); int (*comatcopy_k_ct) (BLASLONG, BLASLONG, float, float, float*, BLASLONG, float*, BLASLONG); int (*comatcopy_k_rn) (BLASLONG, BLASLONG, float, float, float*, BLASLONG, float*, BLASLONG); @@ -850,7 +1001,9 @@ BLASLONG (*ixamin_k)(BLASLONG, xdouble *, BLASLONG); int (*comatcopy_k_ctc) (BLASLONG, BLASLONG, float, float, float*, BLASLONG, float*, BLASLONG); int (*comatcopy_k_rnc) (BLASLONG, BLASLONG, float, float, float*, BLASLONG, float*, BLASLONG); int (*comatcopy_k_rtc) (BLASLONG, BLASLONG, float, float, float*, BLASLONG, float*, BLASLONG); +#endif +#ifdef BUILD_COMPLEX16 int (*zomatcopy_k_cn) (BLASLONG, BLASLONG, double, double, double*, BLASLONG, double*, BLASLONG); int (*zomatcopy_k_ct) (BLASLONG, BLASLONG, double, double, double*, BLASLONG, double*, BLASLONG); int (*zomatcopy_k_rn) (BLASLONG, BLASLONG, double, double, double*, BLASLONG, double*, BLASLONG); @@ -860,17 +1013,23 @@ BLASLONG (*ixamin_k)(BLASLONG, xdouble *, BLASLONG); int (*zomatcopy_k_ctc) (BLASLONG, BLASLONG, double, double, double*, BLASLONG, double*, BLASLONG); int (*zomatcopy_k_rnc) (BLASLONG, BLASLONG, double, double, double*, BLASLONG, double*, BLASLONG); int (*zomatcopy_k_rtc) (BLASLONG, BLASLONG, double, double, double*, BLASLONG, double*, BLASLONG); +#endif +#ifdef BUILD_SINGLE int (*simatcopy_k_cn) (BLASLONG, BLASLONG, float, float*, BLASLONG); int (*simatcopy_k_ct) (BLASLONG, BLASLONG, float, float*, BLASLONG); int (*simatcopy_k_rn) (BLASLONG, BLASLONG, float, float*, BLASLONG); int (*simatcopy_k_rt) (BLASLONG, BLASLONG, float, float*, BLASLONG); +#endif +#ifdef BUILD_DOUBLE int (*dimatcopy_k_cn) (BLASLONG, BLASLONG, double, double*, BLASLONG); int (*dimatcopy_k_ct) (BLASLONG, BLASLONG, double, double*, BLASLONG); int (*dimatcopy_k_rn) (BLASLONG, BLASLONG, double, double*, BLASLONG); int (*dimatcopy_k_rt) (BLASLONG, BLASLONG, double, double*, BLASLONG); +#endif +#ifdef BUILD_COMPLEX int (*cimatcopy_k_cn) (BLASLONG, BLASLONG, float, float, float*, BLASLONG); int (*cimatcopy_k_ct) (BLASLONG, BLASLONG, float, float, float*, BLASLONG); int (*cimatcopy_k_rn) (BLASLONG, BLASLONG, float, float, float*, BLASLONG); @@ -880,7 +1039,9 @@ BLASLONG (*ixamin_k)(BLASLONG, xdouble *, BLASLONG); int (*cimatcopy_k_ctc) (BLASLONG, BLASLONG, float, float, float*, BLASLONG); int (*cimatcopy_k_rnc) (BLASLONG, BLASLONG, float, float, float*, BLASLONG); int (*cimatcopy_k_rtc) (BLASLONG, BLASLONG, float, float, float*, BLASLONG); +#endif +#ifdef BUILD_COMPLEX16 int (*zimatcopy_k_cn) (BLASLONG, BLASLONG, double, double, double*, BLASLONG); int (*zimatcopy_k_ct) (BLASLONG, BLASLONG, double, double, double*, BLASLONG); int (*zimatcopy_k_rn) (BLASLONG, BLASLONG, double, double, double*, BLASLONG); @@ -890,12 +1051,20 @@ BLASLONG (*ixamin_k)(BLASLONG, xdouble *, BLASLONG); int (*zimatcopy_k_ctc) (BLASLONG, BLASLONG, double, double, double*, BLASLONG); int (*zimatcopy_k_rnc) (BLASLONG, BLASLONG, double, double, double*, BLASLONG); int (*zimatcopy_k_rtc) (BLASLONG, BLASLONG, double, double, double*, BLASLONG); +#endif +#ifdef BUILD_SINGLE int (*sgeadd_k) (BLASLONG, BLASLONG, float, float *, BLASLONG, float, float *, BLASLONG); +#endif +#ifdef BUILD_DOUBLE int (*dgeadd_k) (BLASLONG, BLASLONG, double, double *, BLASLONG, double, double *, BLASLONG); +#endif +#ifdef BUILD_COMPLEX int (*cgeadd_k) (BLASLONG, BLASLONG, float, float, float *, BLASLONG, float, float, float *, BLASLONG); +#endif +#ifdef BUILD_COMPLEX16 int (*zgeadd_k) (BLASLONG, BLASLONG, double, double, double *, BLASLONG, double, double, double *, BLASLONG); - +#endif } gotoblas_t; extern gotoblas_t *gotoblas; @@ -907,19 +1076,32 @@ extern gotoblas_t *gotoblas; #define HAVE_EX_L2 gotoblas -> exclusive_cache +#ifdef BUILD_BFLOAT16 +#define SBGEMM_P gotoblas -> sbgemm_p +#define SBGEMM_Q gotoblas -> sbgemm_q +#define SBGEMM_R gotoblas -> sbgemm_r +#define SBGEMM_UNROLL_M gotoblas -> sbgemm_unroll_m +#define SBGEMM_UNROLL_N gotoblas -> sbgemm_unroll_n +#define SBGEMM_UNROLL_MN gotoblas -> sbgemm_unroll_mn +#endif + +#if defined (BUILD_SINGLE) #define SGEMM_P gotoblas -> sgemm_p #define SGEMM_Q gotoblas -> sgemm_q #define SGEMM_R gotoblas -> sgemm_r #define SGEMM_UNROLL_M gotoblas -> sgemm_unroll_m #define SGEMM_UNROLL_N gotoblas -> sgemm_unroll_n #define SGEMM_UNROLL_MN gotoblas -> sgemm_unroll_mn +#endif +#if defined (BUILD_DOUBLE) #define DGEMM_P gotoblas -> dgemm_p #define DGEMM_Q gotoblas -> dgemm_q #define DGEMM_R gotoblas -> dgemm_r #define DGEMM_UNROLL_M gotoblas -> dgemm_unroll_m #define DGEMM_UNROLL_N gotoblas -> dgemm_unroll_n #define DGEMM_UNROLL_MN gotoblas -> dgemm_unroll_mn +#endif #define QGEMM_P gotoblas -> qgemm_p #define QGEMM_Q gotoblas -> qgemm_q @@ -928,19 +1110,39 @@ extern gotoblas_t *gotoblas; #define QGEMM_UNROLL_N gotoblas -> qgemm_unroll_n #define QGEMM_UNROLL_MN gotoblas -> qgemm_unroll_mn +#ifdef BUILD_COMPLEX #define CGEMM_P gotoblas -> cgemm_p #define CGEMM_Q gotoblas -> cgemm_q #define CGEMM_R gotoblas -> cgemm_r #define CGEMM_UNROLL_M gotoblas -> cgemm_unroll_m #define CGEMM_UNROLL_N gotoblas -> cgemm_unroll_n #define CGEMM_UNROLL_MN gotoblas -> cgemm_unroll_mn +#ifndef BUILD_SINGLE +#define SGEMM_P gotoblas -> sgemm_p +#define SGEMM_Q gotoblas -> sgemm_q +#define SGEMM_R 1024 +#define SGEMM_UNROLL_M gotoblas -> sgemm_unroll_m +#define SGEMM_UNROLL_N gotoblas -> sgemm_unroll_n +#define SGEMM_UNROLL_MN gotoblas -> sgemm_unroll_mn +#endif +#endif +#ifdef BUILD_COMPLEX16 #define ZGEMM_P gotoblas -> zgemm_p #define ZGEMM_Q gotoblas -> zgemm_q #define ZGEMM_R gotoblas -> zgemm_r #define ZGEMM_UNROLL_M gotoblas -> zgemm_unroll_m #define ZGEMM_UNROLL_N gotoblas -> zgemm_unroll_n #define ZGEMM_UNROLL_MN gotoblas -> zgemm_unroll_mn +#ifndef BUILD_DOUBLE +#define DGEMM_P gotoblas -> dgemm_p +#define DGEMM_Q gotoblas -> dgemm_q +#define DGEMM_R 1024 +#define DGEMM_UNROLL_M gotoblas -> dgemm_unroll_m +#define DGEMM_UNROLL_N gotoblas -> dgemm_unroll_n +#define DGEMM_UNROLL_MN gotoblas -> dgemm_unroll_mn +#endif +#endif #define XGEMM_P gotoblas -> xgemm_p #define XGEMM_Q gotoblas -> xgemm_q @@ -984,6 +1186,19 @@ extern gotoblas_t *gotoblas; #define HAVE_EX_L2 0 #endif +#ifdef BUILD_BFLOAT16 +#define SBGEMM_P SBGEMM_DEFAULT_P +#define SBGEMM_Q SBGEMM_DEFAULT_Q +#define SBGEMM_R SBGEMM_DEFAULT_R +#define SBGEMM_UNROLL_M SBGEMM_DEFAULT_UNROLL_M +#define SBGEMM_UNROLL_N SBGEMM_DEFAULT_UNROLL_N +#ifdef SBGEMM_DEFAULT_UNROLL_MN +#define SBGEMM_UNROLL_MN SBGEMM_DEFAULT_UNROLL_MN +#else +#define SBGEMM_UNROLL_MN MAX((SBGEMM_UNROLL_M), (SBGEMM_UNROLL_N)) +#endif +#endif + #define SGEMM_P SGEMM_DEFAULT_P #define SGEMM_Q SGEMM_DEFAULT_Q #define SGEMM_R SGEMM_DEFAULT_R @@ -1119,6 +1334,18 @@ extern gotoblas_t *gotoblas; #define GEMM_DEFAULT_R DGEMM_DEFAULT_R #define GEMM_DEFAULT_UNROLL_M DGEMM_DEFAULT_UNROLL_M #define GEMM_DEFAULT_UNROLL_N DGEMM_DEFAULT_UNROLL_N +#elif defined(BFLOAT16) +#define GEMM_P SBGEMM_P +#define GEMM_Q SBGEMM_Q +#define GEMM_R SBGEMM_R +#define GEMM_UNROLL_M SBGEMM_UNROLL_M +#define GEMM_UNROLL_N SBGEMM_UNROLL_N +#define GEMM_UNROLL_MN SBGEMM_UNROLL_MN +#define GEMM_DEFAULT_P SBGEMM_DEFAULT_P +#define GEMM_DEFAULT_Q SBGEMM_DEFAULT_Q +#define GEMM_DEFAULT_R SBGEMM_DEFAULT_R +#define GEMM_DEFAULT_UNROLL_M SBGEMM_DEFAULT_UNROLL_M +#define GEMM_DEFAULT_UNROLL_N SBGEMM_DEFAULT_UNROLL_N #else #define GEMM_P SGEMM_P #define GEMM_Q SGEMM_Q @@ -1204,28 +1431,32 @@ extern gotoblas_t *gotoblas; #define GEMM_THREAD gemm_thread_n #endif +#ifndef SBGEMM_DEFAULT_R +#define SBGEMM_DEFAULT_R (((BUFFER_SIZE - ((SBGEMM_DEFAULT_P * SBGEMM_DEFAULT_Q * 4 + GEMM_DEFAULT_OFFSET_A + GEMM_DEFAULT_ALIGN) & ~GEMM_DEFAULT_ALIGN)) / (SBGEMM_DEFAULT_Q * 4) - 15) & ~15UL) +#endif + #ifndef SGEMM_DEFAULT_R -#define SGEMM_DEFAULT_R (((BUFFER_SIZE - ((SGEMM_DEFAULT_P * SGEMM_DEFAULT_Q * 4 + GEMM_DEFAULT_OFFSET_A + GEMM_DEFAULT_ALIGN) & ~GEMM_DEFAULT_ALIGN)) / (SGEMM_DEFAULT_Q * 4) - 15) & ~15) +#define SGEMM_DEFAULT_R (((BUFFER_SIZE - ((SGEMM_DEFAULT_P * SGEMM_DEFAULT_Q * 4 + GEMM_DEFAULT_OFFSET_A + GEMM_DEFAULT_ALIGN) & ~GEMM_DEFAULT_ALIGN)) / (SGEMM_DEFAULT_Q * 4) - 15) & ~15UL) #endif #ifndef DGEMM_DEFAULT_R -#define DGEMM_DEFAULT_R (((BUFFER_SIZE - ((DGEMM_DEFAULT_P * DGEMM_DEFAULT_Q * 8 + GEMM_DEFAULT_OFFSET_A + GEMM_DEFAULT_ALIGN) & ~GEMM_DEFAULT_ALIGN)) / (DGEMM_DEFAULT_Q * 8) - 15) & ~15) +#define DGEMM_DEFAULT_R (((BUFFER_SIZE - ((DGEMM_DEFAULT_P * DGEMM_DEFAULT_Q * 8 + GEMM_DEFAULT_OFFSET_A + GEMM_DEFAULT_ALIGN) & ~GEMM_DEFAULT_ALIGN)) / (DGEMM_DEFAULT_Q * 8) - 15) & ~15UL) #endif #ifndef QGEMM_DEFAULT_R -#define QGEMM_DEFAULT_R (((BUFFER_SIZE - ((QGEMM_DEFAULT_P * QGEMM_DEFAULT_Q * 16 + GEMM_DEFAULT_OFFSET_A + GEMM_DEFAULT_ALIGN) & ~GEMM_DEFAULT_ALIGN)) / (QGEMM_DEFAULT_Q * 16) - 15) & ~15) +#define QGEMM_DEFAULT_R (((BUFFER_SIZE - ((QGEMM_DEFAULT_P * QGEMM_DEFAULT_Q * 16 + GEMM_DEFAULT_OFFSET_A + GEMM_DEFAULT_ALIGN) & ~GEMM_DEFAULT_ALIGN)) / (QGEMM_DEFAULT_Q * 16) - 15) & ~15UL) #endif #ifndef CGEMM_DEFAULT_R -#define CGEMM_DEFAULT_R (((BUFFER_SIZE - ((CGEMM_DEFAULT_P * CGEMM_DEFAULT_Q * 8 + GEMM_DEFAULT_OFFSET_A + GEMM_DEFAULT_ALIGN) & ~GEMM_DEFAULT_ALIGN)) / (CGEMM_DEFAULT_Q * 8) - 15) & ~15) +#define CGEMM_DEFAULT_R (((BUFFER_SIZE - ((CGEMM_DEFAULT_P * CGEMM_DEFAULT_Q * 8 + GEMM_DEFAULT_OFFSET_A + GEMM_DEFAULT_ALIGN) & ~GEMM_DEFAULT_ALIGN)) / (CGEMM_DEFAULT_Q * 8) - 15) & ~15UL) #endif #ifndef ZGEMM_DEFAULT_R -#define ZGEMM_DEFAULT_R (((BUFFER_SIZE - ((ZGEMM_DEFAULT_P * ZGEMM_DEFAULT_Q * 16 + GEMM_DEFAULT_OFFSET_A + GEMM_DEFAULT_ALIGN) & ~GEMM_DEFAULT_ALIGN)) / (ZGEMM_DEFAULT_Q * 16) - 15) & ~15) +#define ZGEMM_DEFAULT_R (((BUFFER_SIZE - ((ZGEMM_DEFAULT_P * ZGEMM_DEFAULT_Q * 16 + GEMM_DEFAULT_OFFSET_A + GEMM_DEFAULT_ALIGN) & ~GEMM_DEFAULT_ALIGN)) / (ZGEMM_DEFAULT_Q * 16) - 15) & ~15UL) #endif #ifndef XGEMM_DEFAULT_R -#define XGEMM_DEFAULT_R (((BUFFER_SIZE - ((XGEMM_DEFAULT_P * XGEMM_DEFAULT_Q * 32 + GEMM_DEFAULT_OFFSET_A + GEMM_DEFAULT_ALIGN) & ~GEMM_DEFAULT_ALIGN)) / (XGEMM_DEFAULT_Q * 32) - 15) & ~15) +#define XGEMM_DEFAULT_R (((BUFFER_SIZE - ((XGEMM_DEFAULT_P * XGEMM_DEFAULT_Q * 32 + GEMM_DEFAULT_OFFSET_A + GEMM_DEFAULT_ALIGN) & ~GEMM_DEFAULT_ALIGN)) / (XGEMM_DEFAULT_Q * 32) - 15) & ~15UL) #endif #ifndef SNUMOPT diff --git a/common_power.h b/common_power.h index 889205c75..a49197fd7 100644 --- a/common_power.h +++ b/common_power.h @@ -39,12 +39,43 @@ #ifndef COMMON_POWER #define COMMON_POWER -#if defined(POWER8) || defined(POWER9) +#define str(x) #x + +#ifdef OS_AIX +#define XXSPLTD(T,A,z) xxpermdi T, A, A, 0b##z##z +#define XXMRGHD(T,A,B) xxpermdi T, A, B, 0b00 +#define XXMRGLD(T,A,B) xxpermdi T, A, B, 0b11 +#define XXSWAPD(T,A) xxpermdi T, A, A, 0b10 +#define XVMOVDP(T,A) xvcpsgndp T, A, A + +#define XXSPLTD_S(T,A,z) "xxpermdi " str(T) ", " str(A) ", " str(A) ", 0b" str(z ## z) " \n\t" +#define XXMRGHD_S(T,A,B) "xxpermdi " str(T) ", " str(A) ", " str(B) ", 0b00 \n\t" +#define XXMRGLD_S(T,A,B) "xxpermdi " str(T) ", " str(A) ", " str(B) ", 0b11 \n\t" +#define XXSWAPD_S(T,A) "xxpermdi " str(T) ", " str(A) ", " str(A) ", 0b10 \n\t" + +#else +#define XXSPLTD(T,A,z) xxspltd T, A, z +#define XXMRGHD(T,A,B) xxmrghd T, A, B +#define XXMRGLD(T,A,B) xxmrgld T, A, B +#define XXSWAPD(T,A) xxswapd T, A +#define XVMOVDP(T,A) xvmovdp T, A + +#define XXSPLTD_S(T,A,z) "xxspltd " str(T) ", " str(A) ", " str(z)" \n\t" +#define XXMRGHD_S(T,A,B) "xxmrghd " str(T) ", " str(A) ", " str(B)" \n\t" +#define XXMRGLD_S(T,A,B) "xxmrgld " str(T) ", " str(A) ", " str(B)" \n\t" +#define XXSWAPD_S(T,A) "xxswapd " str(T) ", " str(A) " \n\t" + +#endif + + +#if defined(POWER8) || defined(POWER9) || defined(POWER10) #define MB __asm__ __volatile__ ("eieio":::"memory") #define WMB __asm__ __volatile__ ("eieio":::"memory") +#define RMB __asm__ __volatile__ ("eieio":::"memory") #else #define MB __asm__ __volatile__ ("sync") #define WMB __asm__ __volatile__ ("sync") +#define RMB __asm__ __volatile__ ("sync") #endif #define INLINE inline @@ -74,6 +105,7 @@ static void INLINE blas_lock(volatile unsigned long *address){ " bne- 1f\n" " stwcx. %2,0, %1\n" " bne- 0b\n" + " isync\n" "1: " : "=&r"(ret) : "r"(address), "r" (val) @@ -241,7 +273,7 @@ static inline int blas_quickdivide(blasint x, blasint y){ #define HAVE_PREFETCH #endif -#if defined(POWER3) || defined(POWER6) || defined(PPCG4) || defined(CELL) || defined(POWER8) || defined(POWER9) || ( defined(PPC970) && defined(OS_DARWIN) ) +#if defined(POWER3) || defined(POWER6) || defined(PPCG4) || defined(CELL) || defined(POWER8) || defined(POWER9) || defined(POWER10) || defined(PPC970) #define DCBT_ARG 0 #else #define DCBT_ARG 8 @@ -263,7 +295,7 @@ static inline int blas_quickdivide(blasint x, blasint y){ #define L1_PREFETCH dcbtst #endif -#if defined(POWER8) || defined(POWER9) +#if defined(POWER8) || defined(POWER9) || defined(POWER10) #define L1_DUALFETCH #define L1_PREFETCHSIZE (16 + 128 * 100) #define L1_PREFETCH dcbtst @@ -499,7 +531,7 @@ static inline int blas_quickdivide(blasint x, blasint y){ #if defined(ASSEMBLER) && !defined(NEEDPARAM) -#ifdef OS_LINUX +#if defined(OS_LINUX) || defined(OS_FREEBSD) #ifndef __64BIT__ #define PROLOGUE \ .section .text;\ @@ -784,7 +816,7 @@ Lmcount$lazy_ptr: #define HALT mfspr r0, 1023 -#ifdef OS_LINUX +#if defined(OS_LINUX) || defined(OS_FREEBSD) #if defined(PPC440) || defined(PPC440FP2) #undef MAX_CPU_NUMBER #define MAX_CPU_NUMBER 1 @@ -812,11 +844,15 @@ Lmcount$lazy_ptr: #define BUFFER_SIZE ( 2 << 20) #elif defined(PPC440FP2) #define BUFFER_SIZE ( 16 << 20) -#elif defined(POWER8) || defined(POWER9) -#define BUFFER_SIZE ( 64 << 20) +#elif defined(POWER6) || defined(POWER8) || defined(POWER9) || defined(POWER10) +#define BUFFER_SIZE ( 64 << 22) #else #define BUFFER_SIZE ( 16 << 20) #endif +#ifdef DYNAMIC_ARCH +#undef BUFFER_SIZE +#define BUFFER_SIZE (64 << 22) +#endif #ifndef PAGESIZE #define PAGESIZE ( 4 << 10) @@ -829,7 +865,7 @@ Lmcount$lazy_ptr: #define MAP_ANONYMOUS MAP_ANON #endif -#ifdef OS_LINUX +#if defined(OS_LINUX) || defined(OS_FREEBSD) #ifndef __64BIT__ #define FRAMESLOT(X) (((X) * 4) + 8) #else diff --git a/common_riscv64.h b/common_riscv64.h new file mode 100644 index 000000000..27f385dfd --- /dev/null +++ b/common_riscv64.h @@ -0,0 +1,98 @@ +/***************************************************************************** +Copyright (c) 2011-2014, The OpenBLAS Project +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + 3. Neither the name of the OpenBLAS project nor the names of + its contributors may be used to endorse or promote products + derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +**********************************************************************************/ + +/*********************************************************************/ +/* Copyright 2009, 2010 The University of Texas at Austin. */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* 1. Redistributions of source code must retain the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer. */ +/* */ +/* 2. Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials */ +/* provided with the distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ +/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ +/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ +/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ +/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ +/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ +/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* */ +/* The views and conclusions contained in the software and */ +/* documentation are those of the authors and should not be */ +/* interpreted as representing official policies, either expressed */ +/* or implied, of The University of Texas at Austin. */ +/*********************************************************************/ + +#ifndef COMMON_RISCV64 +#define COMMON_RISCV64 + +#define MB __sync_synchronize() +#define WMB __sync_synchronize() +#define RMB __sync_synchronize() + +#define INLINE inline + +#ifndef ASSEMBLER + + +static inline int blas_quickdivide(blasint x, blasint y){ + return x / y; +} + +#endif + + + +#define BUFFER_SIZE ( 32 << 20) +#define SEEK_ADDRESS + +#if defined(C910V) +#include +#endif + +#endif diff --git a/common_s.h b/common_s.h index 23c432f7c..34903ec49 100644 --- a/common_s.h +++ b/common_s.h @@ -45,6 +45,10 @@ #define SSYMV_THREAD_U ssymv_thread_U #define SSYMV_THREAD_L ssymv_thread_L + +#define SGEMM_DIRECT_PERFORMANT sgemm_direct_performant +#define SGEMM_DIRECT sgemm_direct + #define SGEMM_ONCOPY sgemm_oncopy #define SGEMM_OTCOPY sgemm_otcopy @@ -204,6 +208,14 @@ #define SSYMV_THREAD_U ssymv_thread_U #define SSYMV_THREAD_L ssymv_thread_L +#ifdef ARCH_X86_64 +#define SGEMM_DIRECT_PERFORMANT gotoblas -> sgemm_direct_performant +#define SGEMM_DIRECT gotoblas -> sgemm_direct +#else +#define SGEMM_DIRECT_PERFORMANT sgemm_direct_performant +#define SGEMM_DIRECT sgemm_direct +#endif + #define SGEMM_ONCOPY gotoblas -> sgemm_oncopy #define SGEMM_OTCOPY gotoblas -> sgemm_otcopy #define SGEMM_INCOPY gotoblas -> sgemm_incopy diff --git a/common_sb.h b/common_sb.h new file mode 100644 index 000000000..9976e812e --- /dev/null +++ b/common_sb.h @@ -0,0 +1,81 @@ +#ifndef COMMON_SB_H +#define COMMON_SB_H + +#ifndef DYNAMIC_ARCH + +#define SBDOT_K sbdot_k +#define SBSTOBF16_K sbstobf16_k +#define SBDTOBF16_K sbdtobf16_k +#define SBF16TOS_K sbf16tos_k +#define DBF16TOD_K dbf16tod_k +#define SBGEMV_N_K sbgemv_n +#define SBGEMV_T_K sbgemv_t + +#define SBGEMM_ONCOPY sbgemm_oncopy +#define SBGEMM_OTCOPY sbgemm_otcopy + +#if SBGEMM_DEFAULT_UNROLL_M == SBGEMM_DEFAULT_UNROLL_N +#define SBGEMM_INCOPY sbgemm_oncopy +#define SBGEMM_ITCOPY sbgemm_otcopy +#else +#define SBGEMM_INCOPY sbgemm_incopy +#define SBGEMM_ITCOPY sbgemm_itcopy +#endif +#define SBGEMM_BETA sbgemm_beta +#define SBGEMM_KERNEL sbgemm_kernel + +#else + +#define SBDOT_K gotoblas -> sbdot_k +#define SBSTOBF16_K gotoblas -> sbstobf16_k +#define SBDTOBF16_K gotoblas -> sbdtobf16_k +#define SBF16TOS_K gotoblas -> sbf16tos_k +#define DBF16TOD_K gotoblas -> dbf16tod_k +#define SBGEMV_N_K gotoblas -> sbgemv_n +#define SBGEMV_T_K gotoblas -> sbgemv_t + +#define SBGEMM_ONCOPY gotoblas -> sbgemm_oncopy +#define SBGEMM_OTCOPY gotoblas -> sbgemm_otcopy +#define SBGEMM_INCOPY gotoblas -> sbgemm_incopy +#define SBGEMM_ITCOPY gotoblas -> sbgemm_itcopy +#define SBGEMM_BETA gotoblas -> sbgemm_beta +#define SBGEMM_KERNEL gotoblas -> sbgemm_kernel + +#endif + +#define SBGEMM_NN sbgemm_nn +#define SBGEMM_CN sbgemm_tn +#define SBGEMM_TN sbgemm_tn +#define SBGEMM_NC sbgemm_nt +#define SBGEMM_NT sbgemm_nt +#define SBGEMM_CC sbgemm_tt +#define SBGEMM_CT sbgemm_tt +#define SBGEMM_TC sbgemm_tt +#define SBGEMM_TT sbgemm_tt +#define SBGEMM_NR sbgemm_nn +#define SBGEMM_TR sbgemm_tn +#define SBGEMM_CR sbgemm_tn +#define SBGEMM_RN sbgemm_nn +#define SBGEMM_RT sbgemm_nt +#define SBGEMM_RC sbgemm_nt +#define SBGEMM_RR sbgemm_nn + +#define SBGEMM_THREAD_NN sbgemm_thread_nn +#define SBGEMM_THREAD_CN sbgemm_thread_tn +#define SBGEMM_THREAD_TN sbgemm_thread_tn +#define SBGEMM_THREAD_NC sbgemm_thread_nt +#define SBGEMM_THREAD_NT sbgemm_thread_nt +#define SBGEMM_THREAD_CC sbgemm_thread_tt +#define SBGEMM_THREAD_CT sbgemm_thread_tt +#define SBGEMM_THREAD_TC sbgemm_thread_tt +#define SBGEMM_THREAD_TT sbgemm_thread_tt +#define SBGEMM_THREAD_NR sbgemm_thread_nn +#define SBGEMM_THREAD_TR sbgemm_thread_tn +#define SBGEMM_THREAD_CR sbgemm_thread_tn +#define SBGEMM_THREAD_RN sbgemm_thread_nn +#define SBGEMM_THREAD_RT sbgemm_thread_nt +#define SBGEMM_THREAD_RC sbgemm_thread_nt +#define SBGEMM_THREAD_RR sbgemm_thread_nn + +#endif + diff --git a/common_sparc.h b/common_sparc.h index f99972db9..90a24ebf1 100644 --- a/common_sparc.h +++ b/common_sparc.h @@ -41,6 +41,7 @@ #define MB __asm__ __volatile__ ("nop") #define WMB __asm__ __volatile__ ("nop") +#define RMB __asm__ __volatile__ ("nop") #ifndef ASSEMBLER @@ -77,6 +78,12 @@ static __inline unsigned long rpcc(void){ #define __BIG_ENDIAN__ #endif +#ifdef C_SUN +#ifndef __64BIT +#define RETURN_BY_STACK +#endif +#endif + #ifdef DOUBLE #define GET_IMAGE(res) __asm__ __volatile__("fmovd %%f2, %0" : "=f"(res) : : "memory") #else diff --git a/common_thread.h b/common_thread.h index bd964445e..a18df0d78 100644 --- a/common_thread.h +++ b/common_thread.h @@ -59,12 +59,19 @@ extern int blas_omp_linked; #define BLAS_PTHREAD 0x4000U #define BLAS_NODE 0x2000U -#define BLAS_PREC 0x0003U -#define BLAS_SINGLE 0x0000U -#define BLAS_DOUBLE 0x0001U -#define BLAS_XDOUBLE 0x0002U -#define BLAS_REAL 0x0000U -#define BLAS_COMPLEX 0x0004U +#define BLAS_PREC 0x000FU +#define BLAS_INT8 0x0000U +#define BLAS_BFLOAT16 0x0001U +#define BLAS_SINGLE 0x0002U +#define BLAS_DOUBLE 0x0003U +#define BLAS_XDOUBLE 0x0004U +#define BLAS_STOBF16 0x0008U +#define BLAS_DTOBF16 0x0009U +#define BLAS_BF16TOS 0x000AU +#define BLAS_BF16TOD 0x000BU + +#define BLAS_REAL 0x0000U +#define BLAS_COMPLEX 0x1000U #define BLAS_TRANSA 0x0030U /* 2bit */ #define BLAS_TRANSA_N 0x0000U @@ -132,18 +139,18 @@ extern int blas_server_avail; static __inline int num_cpu_avail(int level) { #ifdef USE_OPENMP - int openmp_nthreads=0; + int openmp_nthreads=omp_get_max_threads(); #endif +#ifndef USE_OPENMP if (blas_cpu_number == 1 - +#endif #ifdef USE_OPENMP - || omp_in_parallel() + if (openmp_nthreads == 1 || omp_in_parallel() #endif - ) return 1; + ) return 1; #ifdef USE_OPENMP - openmp_nthreads=omp_get_max_threads(); if (blas_cpu_number != openmp_nthreads) { goto_set_num_threads(openmp_nthreads); } @@ -194,10 +201,6 @@ int trsm_thread(int mode, BLASLONG m, BLASLONG n, int syrk_thread(int mode, blas_arg_t *, BLASLONG *, BLASLONG *, int (*function)(), void *, void *, BLASLONG); -int beta_thread(int mode, BLASLONG m, BLASLONG n, - double alpha_r, double alpha_i, - void *c, BLASLONG ldc, int (*fuction)()); - int getrf_thread(int mode, BLASLONG m, BLASLONG n, BLASLONG k, void *offsetA, BLASLONG lda, void *offsetB, BLASLONG jb, diff --git a/common_x86.h b/common_x86.h index 99adc9f5b..ec928e236 100644 --- a/common_x86.h +++ b/common_x86.h @@ -47,6 +47,7 @@ #define MB #define WMB +#define RMB #ifdef C_SUN #define __asm__ __asm diff --git a/common_x86_64.h b/common_x86_64.h index f59ff6627..b813336c6 100644 --- a/common_x86_64.h +++ b/common_x86_64.h @@ -63,13 +63,16 @@ #ifdef __GNUC__ #define MB do { __asm__ __volatile__("": : :"memory"); } while (0) #define WMB do { __asm__ __volatile__("": : :"memory"); } while (0) +#define RMB #else #define MB do {} while (0) #define WMB do {} while (0) +#define RMB #endif static void __inline blas_lock(volatile BLASULONG *address){ + #ifndef C_MSVC int ret; #else @@ -77,7 +80,7 @@ static void __inline blas_lock(volatile BLASULONG *address){ #endif do { - while (*address) {YIELDING;}; + while (*address) {YIELDING;} #ifndef C_MSVC __asm__ __volatile__( @@ -129,12 +132,36 @@ static __inline void cpuid(int op, int *eax, int *ebx, int *ecx, int *edx){ *ecx=cpuinfo[2]; *edx=cpuinfo[3]; #else - __asm__ __volatile__("cpuid" + __asm__ __volatile__("mov $0, %%ecx;" + "cpuid" : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx) - : "0" (op), "c"(0)); + : "0" (op)); +#endif +} + +static __inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx, int *edx) +{ +#ifdef C_MSVC + int cpuInfo[4] = {-1}; + __cpuidex(cpuInfo, op, count); + *eax = cpuInfo[0]; + *ebx = cpuInfo[1]; + *ecx = cpuInfo[2]; + *edx = cpuInfo[3]; +#else +#if defined(__i386__) && defined(__PIC__) + __asm__ __volatile__ + ("mov %%ebx, %%edi;" + "cpuid;" + "xchgl %%ebx, %%edi;" + : "=a" (*eax), "=D" (*ebx), "=c" (*ecx), "=d" (*edx) : "0" (op), "2" (count) : "cc"); +#else + __asm__ __volatile__ + ("cpuid": "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx) : "0" (op), "2" (count) : "cc"); +#endif #endif } @@ -195,9 +222,9 @@ static __inline BLASLONG blas_quickdivide(BLASLONG x, BLASLONG y){ #else extern unsigned int blas_quick_divide_table[]; -static __inline int blas_quickdivide(unsigned int x, unsigned int y){ +static __inline unsigned int blas_quickdivide(unsigned int x, unsigned int y){ - unsigned int result; + volatile unsigned int result; if (y <= 1) return x; @@ -211,7 +238,6 @@ static __inline int blas_quickdivide(unsigned int x, unsigned int y){ y = blas_quick_divide_table[y]; __asm__ __volatile__ ("mull %0" :"=d" (result), "+a"(x) : "0" (y)); - return result; } #endif @@ -224,7 +250,11 @@ static __inline int blas_quickdivide(unsigned int x, unsigned int y){ #endif #define HUGE_PAGESIZE ( 2 << 20) -#define BUFFER_SIZE (32 << 20) +#ifndef BUFFERSIZE +#define BUFFER_SIZE (32 << 22) +#else +#define BUFFER_SIZE (32 << BUFFERSIZE) +#endif #define SEEK_ADDRESS diff --git a/common_zarch.h b/common_zarch.h index e105574e0..442bae821 100644 --- a/common_zarch.h +++ b/common_zarch.h @@ -34,9 +34,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define COMMON_ZARCH #define MB -//__asm__ __volatile__ ("dmb ish" : : : "memory") #define WMB -//__asm__ __volatile__ ("dmb ishst" : : : "memory") +#define RMB #define INLINE inline @@ -123,11 +122,7 @@ REALNAME: #endif #define HUGE_PAGESIZE ( 4 << 20) -#if defined(CORTEXA57) -#define BUFFER_SIZE (20 << 20) -#else -#define BUFFER_SIZE (16 << 20) -#endif +#define BUFFER_SIZE (32 << 22) #define BASE_ADDRESS (START_ADDRESS - BUFFER_SIZE * MAX_CPU_NUMBER) diff --git a/cpp_thread_test/CMakeLists.txt b/cpp_thread_test/CMakeLists.txt new file mode 100644 index 000000000..5eccb12ce --- /dev/null +++ b/cpp_thread_test/CMakeLists.txt @@ -0,0 +1,23 @@ +include_directories(${PROJECT_SOURCE_DIR}) +include_directories(${PROJECT_BINARY_DIR}) + +enable_language(CXX) + +set(CMAKE_CXX_FLAGS "${CMAKE_C_FLAGS} -DADD${BU} -DCBLAS") + +if (USE_OPENMP) +if (CPP_THREAD_SAFETY_TEST) + message(STATUS building thread safety test) + add_executable(dgemm_thread_safety dgemm_thread_safety.cpp) + target_link_libraries(dgemm_thread_safety ${OpenBLAS_LIBNAME}) + add_test( dgemm_thread_safety ${CMAKE_CURRENT_BINARY_DIR}/dgemm_thread_safety) +endif() + + +if (CPP_THREAD_SAFETY_TEST OR CPP_THREAD_SAFETY_GEMV) + add_executable(dgemv_thread_safety dgemv_thread_safety.cpp) + target_link_libraries(dgemv_thread_safety ${OpenBLAS_LIBNAME}) + add_test(dgemv_thread_safety ${CMAKE_CURRENT_BINARY_DIR}/dgemv_thread_safety) +endif() + +endif() diff --git a/cpp_thread_test/Makefile b/cpp_thread_test/Makefile new file mode 100644 index 000000000..81e3470ef --- /dev/null +++ b/cpp_thread_test/Makefile @@ -0,0 +1,14 @@ +include ../Makefile.rule + +all :: dgemv_tester dgemm_tester + +dgemv_tester : + $(CXX) $(COMMON_OPT) -Wall -Wextra -Wshadow -fopenmp -std=c++11 dgemv_thread_safety.cpp ../libopenblas.a -lpthread -o dgemv_tester + ./dgemv_tester + +dgemm_tester : dgemv_tester + $(CXX) $(COMMON_OPT) -Wall -Wextra -Wshadow -fopenmp -std=c++11 dgemm_thread_safety.cpp ../libopenblas.a -lpthread -o dgemm_tester + ./dgemm_tester + +clean :: + rm -f dgemv_tester dgemm_tester diff --git a/cpp_thread_test/cpp_thread_safety_common.h b/cpp_thread_test/cpp_thread_safety_common.h new file mode 100644 index 000000000..8005369a8 --- /dev/null +++ b/cpp_thread_test/cpp_thread_safety_common.h @@ -0,0 +1,63 @@ +inline void pauser(){ + /// a portable way to pause a program + std::string dummy; + std::cout << "Press enter to continue..."; + std::getline(std::cin, dummy); +} + +void FailIfThreadsAreZero(uint32_t numConcurrentThreads) { + if(numConcurrentThreads == 0) { + std::cout<<"ERROR: Invalid parameter 0 for number of concurrent calls into OpenBLAS!"<>& matBlock, std::mt19937_64& PRNG, std::uniform_real_distribution& rngdist, const blasint randomMatSize, const uint32_t numConcurrentThreads, const uint32_t numMat){ + for(uint32_t i=0; i(randomMatSize*randomMatSize); j++){ + matBlock[i][j] = rngdist(PRNG); + } + } + for(uint32_t i=numMat; i<(numConcurrentThreads*numMat); i+=numMat){ + for(uint32_t j=0; j>& vecBlock, std::mt19937_64& PRNG, std::uniform_real_distribution& rngdist, const blasint randomMatSize, const uint32_t numConcurrentThreads, const uint32_t numVec){ + for(uint32_t i=0; i(randomMatSize); j++){ + vecBlock[i][j] = rngdist(PRNG); + } + } + for(uint32_t i=numVec; i<(numConcurrentThreads*numVec); i+=numVec){ + for(uint32_t j=0; j rngdist{-1.0, 1.0}; + //make sure the internal state of the PRNG is properly mixed by generating 10M random numbers + //PRNGs often have unreliable distribution uniformity and other statistical properties before their internal state is sufficiently mixed + for (uint32_t i=0;i<10000000;i++) rngdist(PRNG); + return PRNG; +} + +void PrintMatrices(const std::vector>& matBlock, const blasint randomMatSize, const uint32_t numConcurrentThreads, const uint32_t numMat){ + for (uint32_t i=0;i(randomMatSize); j++){ + for (uint32_t k = 0; k < static_cast(randomMatSize); k++){ + std::cout< +#include +#include +#include +#include +#include "../cblas.h" +#include "cpp_thread_safety_common.h" + +void launch_cblas_dgemm(double* A, double* B, double* C, const blasint randomMatSize){ + cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, randomMatSize, randomMatSize, randomMatSize, 1.0, A, randomMatSize, B, randomMatSize, 0.1, C, randomMatSize); +} + +int main(int argc, char* argv[]){ + blasint randomMatSize = 1024; //dimension of the random square matrices used + uint32_t numConcurrentThreads = 96; //number of concurrent calls of the functions being tested + uint32_t numTestRounds = 16; //number of testing rounds before success exit + uint32_t maxHwThreads = omp_get_max_threads(); + + if (maxHwThreads < 96) + numConcurrentThreads = maxHwThreads; + + if (argc > 4){ + std::cout<<"ERROR: too many arguments for thread safety tester"< cliArgs; + for (int i = 1; i < argc; i++){ + cliArgs.push_back(argv[i]); + std::cout< rngdist{-1.0, 1.0}; + std::vector> matBlock(numConcurrentThreads*3); + std::vector> futureBlock(numConcurrentThreads); + + std::cout<<"*----------------------------*\n"; + std::cout<<"| DGEMM thread safety tester |\n"; + std::cout<<"*----------------------------*\n"; + std::cout<<"Size of random matrices(N=M=K): "<(randomMatSize*randomMatSize)*numConcurrentThreads*3*8)/static_cast(1024*1024)<<" MiB of RAM\n"<(randomMatSize*randomMatSize); j++){ + if (std::abs(matBlock[i+2][j] - matBlock[2][j]) > 1.0E-13){ //i+2 is the index of matrix C, for a given thread + std::cout<<"ERROR: one of the threads returned a different result! Index : "< +#include +#include +#include +#include +#include "../cblas.h" +#include "cpp_thread_safety_common.h" + +void launch_cblas_dgemv(double* A, double* x, double* y, const blasint randomMatSize){ + const blasint inc = 1; + cblas_dgemv(CblasColMajor, CblasNoTrans, randomMatSize, randomMatSize, 1.0, A, randomMatSize, x, inc, 0.1, y, inc); +} + +int main(int argc, char* argv[]){ + blasint randomMatSize = 1024; //dimension of the random square matrices and vectors being used + uint32_t numConcurrentThreads = 52; //number of concurrent calls of the functions being tested + uint32_t numTestRounds = 16; //number of testing rounds before success exit + uint32_t maxHwThreads = omp_get_max_threads(); + + if (maxHwThreads < 52) + numConcurrentThreads = maxHwThreads; + + if (argc > 4){ + std::cout<<"ERROR: too many arguments for thread safety tester"< cliArgs; + for (int i = 1; i < argc; i++){ + cliArgs.push_back(argv[i]); + std::cout< rngdist{-1.0, 1.0}; + std::vector> matBlock(numConcurrentThreads); + std::vector> vecBlock(numConcurrentThreads*2); + std::vector> futureBlock(numConcurrentThreads); + + std::cout<<"*----------------------------*\n"; + std::cout<<"| DGEMV thread safety tester |\n"; + std::cout<<"*----------------------------*\n"; + std::cout<<"Size of random matrices and vectors(N=M): "<(randomMatSize*randomMatSize)*numConcurrentThreads*8)+(static_cast(randomMatSize)*numConcurrentThreads*8*2))/static_cast(1024*1024)<<" MiB of RAM\n"<(randomMatSize); j++){ + if (std::abs(vecBlock[i+1][j] - vecBlock[1][j]) > 1.0E-13){ //i+1 is the index of vector y, for a given thread + std::cout<<"ERROR: one of the threads returned a different result! Index : "< +#ifdef OS_DARWIN +#include +int32_t value; +size_t length=sizeof(value); +#endif #define CPU_UNKNOWN 0 #define CPU_ARMV8 1 @@ -34,13 +39,19 @@ #define CPU_CORTEXA57 3 #define CPU_CORTEXA72 4 #define CPU_CORTEXA73 5 +#define CPU_NEOVERSEN1 11 // Qualcomm #define CPU_FALKOR 6 // Cavium #define CPU_THUNDERX 7 #define CPU_THUNDERX2T99 8 +#define CPU_THUNDERX3T110 12 //Hisilicon #define CPU_TSV110 9 +// Ampere +#define CPU_EMAG8180 10 +// Apple +#define CPU_VORTEX 13 static char *cpuname[] = { "UNKNOWN", @@ -52,7 +63,11 @@ static char *cpuname[] = { "FALKOR", "THUNDERX", "THUNDERX2T99", - "TSV110" + "TSV110", + "EMAG8180", + "NEOVERSEN1", + "THUNDERX3T110", + "VORTEX" }; static char *cpuname_lower[] = { @@ -65,13 +80,17 @@ static char *cpuname_lower[] = { "falkor", "thunderx", "thunderx2t99", - "tsv110" + "tsv110", + "emag8180", + "neoversen1", + "thunderx3t110", + "vortex" }; int get_feature(char *search) { -#ifdef linux +#ifdef __linux FILE *infile; char buffer[2048], *p,*t; p = (char *) NULL ; @@ -94,7 +113,7 @@ int get_feature(char *search) if( p == NULL ) return 0; t = strtok(p," "); - while( t = strtok(NULL," ")) + while( (t = strtok(NULL," "))) { if (!strcmp(t, search)) { return(1); } } @@ -107,7 +126,7 @@ int get_feature(char *search) int detect(void) { -#ifdef linux +#ifdef __linux FILE *infile; char buffer[512], *p, *cpu_part = NULL, *cpu_implementer = NULL; @@ -140,6 +159,8 @@ int detect(void) return CPU_CORTEXA72; else if (strstr(cpu_part, "0xd09")) return CPU_CORTEXA73; + else if (strstr(cpu_part, "0xd0c")) + return CPU_NEOVERSEN1; } // Qualcomm else if (strstr(cpu_implementer, "0x51") && strstr(cpu_part, "0xc00")) @@ -149,9 +170,14 @@ int detect(void) return CPU_THUNDERX; else if (strstr(cpu_implementer, "0x43") && strstr(cpu_part, "0x0af")) return CPU_THUNDERX2T99; + else if (strstr(cpu_implementer, "0x43") && strstr(cpu_part, "0x0b8")) + return CPU_THUNDERX3T110; // HiSilicon else if (strstr(cpu_implementer, "0x48") && strstr(cpu_part, "0xd01")) return CPU_TSV110; + // Ampere + else if (strstr(cpu_implementer, "0x50") && strstr(cpu_part, "0x000")) + return CPU_EMAG8180; } p = (char *) NULL ; @@ -180,6 +206,12 @@ int detect(void) } +#else +#ifdef DARWIN + sysctlbyname("hw.cpufamily",&value,&length,NULL,0); + if (value ==131287967) return CPU_VORTEX; +#endif + return CPU_ARMV8; #endif return CPU_UNKNOWN; @@ -206,6 +238,36 @@ void get_subdirname(void) printf("arm64"); } +void get_cpucount(void) +{ +int n=0; + +#ifdef __linux + FILE *infile; + char buffer[2048], *p,*t; + p = (char *) NULL ; + + infile = fopen("/proc/cpuinfo", "r"); + + while (fgets(buffer, sizeof(buffer), infile)) + { + + if (!strncmp("processor", buffer, 9)) + n++; + } + + fclose(infile); + + printf("#define NUM_CORES %d\n",n); +#endif +#ifdef DARWIN + sysctlbyname("hw.physicalcpu_max",&value,&length,NULL,0); + printf("#define NUM_CORES %d\n",value); +#endif +} + + + void get_cpuconfig(void) { @@ -251,6 +313,20 @@ void get_cpuconfig(void) printf("#define DTB_DEFAULT_ENTRIES 64\n"); printf("#define DTB_SIZE 4096\n"); break; + case CPU_NEOVERSEN1: + printf("#define %s\n", cpuname[d]); + printf("#define L1_CODE_SIZE 65536\n"); + printf("#define L1_CODE_LINESIZE 64\n"); + printf("#define L1_CODE_ASSOCIATIVE 4\n"); + printf("#define L1_DATA_SIZE 65536\n"); + printf("#define L1_DATA_LINESIZE 64\n"); + printf("#define L1_DATA_ASSOCIATIVE 4\n"); + printf("#define L2_SIZE 1048576\n"); + printf("#define L2_LINESIZE 64\n"); + printf("#define L2_ASSOCIATIVE 16\n"); + printf("#define DTB_DEFAULT_ENTRIES 64\n"); + printf("#define DTB_SIZE 4096\n"); + break; case CPU_FALKOR: printf("#define FALKOR\n"); @@ -308,7 +384,51 @@ void get_cpuconfig(void) printf("#define DTB_DEFAULT_ENTRIES 64 \n"); printf("#define DTB_SIZE 4096 \n"); break; + + case CPU_EMAG8180: + // Minimum parameters for ARMv8 (based on A53) + printf("#define EMAG8180\n"); + printf("#define L1_CODE_SIZE 32768\n"); + printf("#define L1_DATA_SIZE 32768\n"); + printf("#define L1_DATA_LINESIZE 64\n"); + printf("#define L2_SIZE 262144\n"); + printf("#define L2_LINESIZE 64\n"); + printf("#define DTB_DEFAULT_ENTRIES 64\n"); + printf("#define DTB_SIZE 4096\n"); + break; + + case CPU_THUNDERX3T110: + printf("#define THUNDERX3T110 \n"); + printf("#define L1_CODE_SIZE 65536 \n"); + printf("#define L1_CODE_LINESIZE 64 \n"); + printf("#define L1_CODE_ASSOCIATIVE 8 \n"); + printf("#define L1_DATA_SIZE 32768 \n"); + printf("#define L1_DATA_LINESIZE 64 \n"); + printf("#define L1_DATA_ASSOCIATIVE 8 \n"); + printf("#define L2_SIZE 524288 \n"); + printf("#define L2_LINESIZE 64 \n"); + printf("#define L2_ASSOCIATIVE 8 \n"); + printf("#define L3_SIZE 94371840 \n"); + printf("#define L3_LINESIZE 64 \n"); + printf("#define L3_ASSOCIATIVE 32 \n"); + printf("#define DTB_DEFAULT_ENTRIES 64 \n"); + printf("#define DTB_SIZE 4096 \n"); + break; +#ifdef DARWIN + case CPU_VORTEX: + printf("#define VORTEX \n"); + sysctlbyname("hw.l1icachesize",&value,&length,NULL,0); + printf("#define L1_CODE_SIZE %d \n",value); + sysctlbyname("hw.cachelinesize",&value,&length,NULL,0); + printf("#define L1_CODE_LINESIZE %d \n",value); + sysctlbyname("hw.l1dcachesize",&value,&length,NULL,0); + printf("#define L1_DATA_SIZE %d \n",value); + sysctlbyname("hw.l2dcachesize",&value,&length,NULL,0); + printf("#define L2_SIZE %d \n",value); + break; +#endif } + get_cpucount(); } @@ -321,7 +441,7 @@ void get_libname(void) void get_features(void) { -#ifdef linux +#ifdef __linux FILE *infile; char buffer[2048], *p,*t; p = (char *) NULL ; @@ -344,12 +464,10 @@ void get_features(void) if( p == NULL ) return; t = strtok(p," "); - while( t = strtok(NULL," ")) + while( (t = strtok(NULL," "))) { } #endif return; } - - diff --git a/cpuid_mips.c b/cpuid_mips.c index 6f2932c94..e6e837f73 100644 --- a/cpuid_mips.c +++ b/cpuid_mips.c @@ -73,16 +73,18 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define CPU_UNKNOWN 0 #define CPU_P5600 1 #define CPU_1004K 2 +#define CPU_24K 3 static char *cpuname[] = { "UNKNOWN", "P5600", - "1004K" + "MIPS1004K", + "MIPS24K" }; int detect(void){ -#ifdef linux +#ifdef __linux FILE *infile; char buffer[512], *p; @@ -105,6 +107,8 @@ int detect(void){ return CPU_P5600; } else if (strstr(p, "1004K")) { return CPU_1004K; + } else if (strstr(p, " 24K")) { + return CPU_24K; } else return CPU_UNKNOWN; } @@ -121,7 +125,7 @@ void get_architecture(void){ } void get_subarchitecture(void){ - if(detect()==CPU_P5600|| detect()==CPU_1004K){ + if(detect()==CPU_P5600|| detect()==CPU_1004K|| detect()==CPU_24K){ printf("P5600"); }else{ printf("UNKNOWN"); @@ -146,7 +150,15 @@ void get_cpuconfig(void){ printf("#define MIPS1004K\n"); printf("#define L1_DATA_SIZE 32768\n"); printf("#define L1_DATA_LINESIZE 32\n"); - printf("#define L2_SIZE 26144\n"); + printf("#define L2_SIZE 262144\n"); + printf("#define DTB_DEFAULT_ENTRIES 8\n"); + printf("#define DTB_SIZE 4096\n"); + printf("#define L2_ASSOCIATIVE 4\n"); + } else if (detect()==CPU_24K) { + printf("#define MIPS24K\n"); + printf("#define L1_DATA_SIZE 32768\n"); + printf("#define L1_DATA_LINESIZE 32\n"); + printf("#define L2_SIZE 32768\n"); printf("#define DTB_DEFAULT_ENTRIES 8\n"); printf("#define DTB_SIZE 4096\n"); printf("#define L2_ASSOCIATIVE 4\n"); @@ -159,7 +171,9 @@ void get_libname(void){ if(detect()==CPU_P5600) { printf("p5600\n"); } else if (detect()==CPU_1004K) { - printf("1004K\n"); + printf("mips1004K\n"); + } else if (detect()==CPU_24K) { + printf("mips24K\n"); }else{ printf("mips\n"); } diff --git a/cpuid_mips64.c b/cpuid_mips64.c index 0e32bfc0b..674b65908 100644 --- a/cpuid_mips64.c +++ b/cpuid_mips64.c @@ -70,19 +70,19 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. /* or implied, of The University of Texas at Austin. */ /*********************************************************************/ -#define CPU_UNKNOWN 0 -#define CPU_SICORTEX 1 -#define CPU_LOONGSON3A 2 -#define CPU_LOONGSON3B 3 -#define CPU_I6400 4 -#define CPU_P6600 5 -#define CPU_I6500 6 +#define CPU_UNKNOWN 0 +#define CPU_SICORTEX 1 +#define CPU_LOONGSON3R3 2 +#define CPU_LOONGSON3R4 3 +#define CPU_I6400 4 +#define CPU_P6600 5 +#define CPU_I6500 6 static char *cpuname[] = { "UNKNOWN", "SICORTEX", - "LOONGSON3A", - "LOONGSON3B", + "LOONGSON3R3", + "LOONGSON3R4", "I6400", "P6600", "I6500" @@ -95,43 +95,8 @@ int detect(void){ char buffer[512], *p; p = (char *)NULL; - infile = fopen("/proc/cpuinfo", "r"); - while (fgets(buffer, sizeof(buffer), infile)){ - if (!strncmp("cpu", buffer, 3)){ - p = strchr(buffer, ':') + 2; -#if 0 - fprintf(stderr, "%s\n", p); -#endif - break; - } - } - - fclose(infile); - - if(p != NULL){ - if (strstr(p, "Loongson-3A")){ - return CPU_LOONGSON3A; - }else if(strstr(p, "Loongson-3B")){ - return CPU_LOONGSON3B; - }else if (strstr(p, "Loongson-3")){ - infile = fopen("/proc/cpuinfo", "r"); - p = (char *)NULL; - while (fgets(buffer, sizeof(buffer), infile)){ - if (!strncmp("system type", buffer, 11)){ - p = strchr(buffer, ':') + 2; - break; - } - } - fclose(infile); - if (strstr(p, "loongson3a")) - return CPU_LOONGSON3A; - }else{ - return CPU_SICORTEX; - } - } //Check model name for Loongson3 infile = fopen("/proc/cpuinfo", "r"); - p = (char *)NULL; while (fgets(buffer, sizeof(buffer), infile)){ if (!strncmp("model name", buffer, 10)){ p = strchr(buffer, ':') + 2; @@ -140,14 +105,16 @@ int detect(void){ } fclose(infile); if(p != NULL){ - if (strstr(p, "Loongson-3A")){ - return CPU_LOONGSON3A; - }else if(strstr(p, "Loongson-3B")){ - return CPU_LOONGSON3B; - } + if (strstr(p, "Loongson-3A3000") || strstr(p, "Loongson-3B3000")){ + return CPU_LOONGSON3R3; + }else if(strstr(p, "Loongson-3A4000") || strstr(p, "Loongson-3B4000")){ + return CPU_LOONGSON3R4; + } else{ + return CPU_SICORTEX; } #endif return CPU_UNKNOWN; + } } char *get_corename(void){ @@ -159,10 +126,10 @@ void get_architecture(void){ } void get_subarchitecture(void){ - if(detect()==CPU_LOONGSON3A) { - printf("LOONGSON3A"); - }else if(detect()==CPU_LOONGSON3B){ - printf("LOONGSON3B"); + if(detect()==CPU_LOONGSON3R3) { + printf("LOONGSON3R3"); + }else if(detect()==CPU_LOONGSON3R4){ + printf("LOONGSON3R4"); }else if(detect()==CPU_I6400){ printf("I6400"); }else if(detect()==CPU_P6600){ @@ -179,8 +146,8 @@ void get_subdirname(void){ } void get_cpuconfig(void){ - if(detect()==CPU_LOONGSON3A) { - printf("#define LOONGSON3A\n"); + if(detect()==CPU_LOONGSON3R3) { + printf("#define LOONGSON3R3\n"); printf("#define L1_DATA_SIZE 65536\n"); printf("#define L1_DATA_LINESIZE 32\n"); printf("#define L2_SIZE 512488\n"); @@ -188,8 +155,8 @@ void get_cpuconfig(void){ printf("#define DTB_DEFAULT_ENTRIES 64\n"); printf("#define DTB_SIZE 4096\n"); printf("#define L2_ASSOCIATIVE 4\n"); - }else if(detect()==CPU_LOONGSON3B){ - printf("#define LOONGSON3B\n"); + }else if(detect()==CPU_LOONGSON3R4){ + printf("#define LOONGSON3R4\n"); printf("#define L1_DATA_SIZE 65536\n"); printf("#define L1_DATA_LINESIZE 32\n"); printf("#define L2_SIZE 512488\n"); @@ -237,10 +204,10 @@ void get_cpuconfig(void){ } void get_libname(void){ - if(detect()==CPU_LOONGSON3A) { - printf("loongson3a\n"); - }else if(detect()==CPU_LOONGSON3B) { - printf("loongson3b\n"); + if(detect()==CPU_LOONGSON3R3) { + printf("loongson3r3\n"); + }else if(detect()==CPU_LOONGSON3R4) { + printf("loongson3r4\n"); }else if(detect()==CPU_I6400) { printf("i6400\n"); }else if(detect()==CPU_P6600) { diff --git a/cpuid_power.c b/cpuid_power.c index d5ba6fb2c..2526e8d0e 100644 --- a/cpuid_power.c +++ b/cpuid_power.c @@ -38,6 +38,7 @@ #include #ifdef _AIX +#include #include #endif #ifdef __APPLE__ @@ -57,6 +58,7 @@ #define CPUTYPE_PPCG4 7 #define CPUTYPE_POWER8 8 #define CPUTYPE_POWER9 9 +#define CPUTYPE_POWER10 10 char *cpuname[] = { "UNKNOWN", @@ -68,7 +70,8 @@ char *cpuname[] = { "CELL", "PPCG4", "POWER8", - "POWER9" + "POWER9", + "POWER10" }; char *lowercpuname[] = { @@ -81,7 +84,8 @@ char *lowercpuname[] = { "cell", "ppcg4", "power8", - "power9" + "power9", + "power10" }; char *corename[] = { @@ -94,12 +98,13 @@ char *corename[] = { "CELL", "PPCG4", "POWER8", - "POWER9" + "POWER9", + "POWER10" }; int detect(void){ -#ifdef linux +#ifdef __linux FILE *infile; char buffer[512], *p; @@ -125,6 +130,7 @@ int detect(void){ if (!strncasecmp(p, "POWER7", 6)) return CPUTYPE_POWER6; if (!strncasecmp(p, "POWER8", 6)) return CPUTYPE_POWER8; if (!strncasecmp(p, "POWER9", 6)) return CPUTYPE_POWER9; + if (!strncasecmp(p, "POWER10", 7)) return CPUTYPE_POWER10; if (!strncasecmp(p, "Cell", 4)) return CPUTYPE_CELL; if (!strncasecmp(p, "7447", 4)) return CPUTYPE_PPCG4; @@ -132,34 +138,19 @@ int detect(void){ #endif #ifdef _AIX - FILE *infile; - char buffer[512], *p; - - p = (char *)NULL; - infile = popen("prtconf|grep 'Processor Type'", "r"); - while (fgets(buffer, sizeof(buffer), infile)){ - if (!strncmp("Pro", buffer, 3)){ - p = strchr(buffer, ':') + 2; -#if 0 - fprintf(stderr, "%s\n", p); -#endif - break; - } - } + // Cast from int to unsigned to ensure comparisons work for all bits in + // the bit mask, even the top bit + unsigned implementation = (unsigned) _system_configuration.implementation; - pclose(infile); - - if (!strncasecmp(p, "POWER3", 6)) return CPUTYPE_POWER3; - if (!strncasecmp(p, "POWER4", 6)) return CPUTYPE_POWER4; - if (!strncasecmp(p, "PPC970", 6)) return CPUTYPE_PPC970; - if (!strncasecmp(p, "POWER5", 6)) return CPUTYPE_POWER5; - if (!strncasecmp(p, "POWER6", 6)) return CPUTYPE_POWER6; - if (!strncasecmp(p, "POWER7", 6)) return CPUTYPE_POWER6; - if (!strncasecmp(p, "POWER8", 6)) return CPUTYPE_POWER8; - if (!strncasecmp(p, "POWER9", 6)) return CPUTYPE_POWER9; - if (!strncasecmp(p, "Cell", 4)) return CPUTYPE_CELL; - if (!strncasecmp(p, "7447", 4)) return CPUTYPE_PPCG4; - return CPUTYPE_POWER5; + if (implementation >= 0x40000u) return CPUTYPE_POWER10; + else if (implementation & 0x20000) return CPUTYPE_POWER9; + else if (implementation & 0x10000) return CPUTYPE_POWER8; + else if (implementation & 0x08000) return CPUTYPE_POWER6; // POWER 7 + else if (implementation & 0x04000) return CPUTYPE_POWER6; + else if (implementation & 0x02000) return CPUTYPE_POWER5; + else if (implementation & 0x01000) return CPUTYPE_POWER4; // MPC7450 + else if (implementation & 0x00800) return CPUTYPE_POWER4; + else return CPUTYPE_POWER3; #endif #ifdef __APPLE__ @@ -179,6 +170,9 @@ int detect(void){ int id; __asm __volatile("mfpvr %0" : "=r"(id)); switch ( id >> 16 ) { + case 0x80: // POWER10 + return CPUTYPE_POWER10; + break; case 0x4e: // POWER9 return CPUTYPE_POWER9; break; @@ -220,6 +214,8 @@ switch ( id >> 16 ) { return CPUTYPE_UNKNOWN; } #endif + + return CPUTYPE_UNKNOWN; } void get_architecture(void){ diff --git a/cpuid_riscv64.c b/cpuid_riscv64.c new file mode 100644 index 000000000..0eb50e001 --- /dev/null +++ b/cpuid_riscv64.c @@ -0,0 +1,113 @@ +/***************************************************************************** +Copyright (c) 2011-2014, The OpenBLAS Project +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + 3. Neither the name of the OpenBLAS project nor the names of + its contributors may be used to endorse or promote products + derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +**********************************************************************************/ + + +/*********************************************************************/ +/* Copyright 2009, 2010 The University of Texas at Austin. */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* 1. Redistributions of source code must retain the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer. */ +/* */ +/* 2. Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials */ +/* provided with the distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ +/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ +/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ +/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ +/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ +/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ +/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* */ +/* The views and conclusions contained in the software and */ +/* documentation are those of the authors and should not be */ +/* interpreted as representing official policies, either expressed */ +/* or implied, of The University of Texas at Austin. */ +/*********************************************************************/ + +#define CPU_UNKNOWN 0 +#define CPU_C910V 1 + +static char *cpuname[] = { + "UNKOWN", + "C910V" +}; + +int detect(void){ + return CPU_UNKNOWN; +} + +char *get_corename(void){ + return cpuname[detect()]; +} + +void get_architecture(void){ + printf("RISCV64"); +} + +void get_subarchitecture(void){ +} + +void get_subdirname(void){ + printf("riscv64"); +} + +void get_cpuconfig(void){ + printf("#define UNKNOWN\n"); + printf("#define L1_DATA_SIZE 65536\n"); + printf("#define L1_DATA_LINESIZE 32\n"); + printf("#define L2_SIZE 512488\n"); + printf("#define L2_LINESIZE 32\n"); + printf("#define DTB_DEFAULT_ENTRIES 64\n"); + printf("#define DTB_SIZE 4096\n"); + printf("#define L2_ASSOCIATIVE 4\n"); +} + +void get_libname(void){ + printf("riscv64\n"); +} diff --git a/cpuid_x86.c b/cpuid_x86.c index 884d4b78a..aca37da45 100644 --- a/cpuid_x86.c +++ b/cpuid_x86.c @@ -202,7 +202,7 @@ int support_avx(){ if ((ecx & (1 << 28)) != 0 && (ecx & (1 << 27)) != 0 && (ecx & (1 << 26)) != 0){ xgetbv(0, &eax, &edx); if((eax & 6) == 6){ - ret=1; //OS support AVX + ret=1; //OS supports saving xmm and ymm registers (6 = (1<<1) | (1<<2)) } } return ret; @@ -219,8 +219,8 @@ int support_avx2(){ if (!support_avx()) return 0; cpuid(7, &eax, &ebx, &ecx, &edx); - if((ebx & (1<<7)) != 0) - ret=1; //OS supports AVX2 + if((ebx & (1<<5)) != 0) + ret=1; //CPU supports AVX2 return ret; #else return 0; @@ -235,20 +235,36 @@ int support_avx512(){ if (!support_avx()) return 0; cpuid(7, &eax, &ebx, &ecx, &edx); - if((ebx & 32) != 32){ - ret=0; //OS does not even support AVX2 + if((ebx & (1<<5)) == 0){ + ret=0; //cpu does not have avx2 flag } - if((ebx & (1<<31)) != 0){ + if((ebx & (1<<31)) != 0){ //AVX512VL flag xgetbv(0, &eax, &edx); if((eax & 0xe0) == 0xe0) - ret=1; //OS supports AVX512VL - } + ret=1; //OS supports saving zmm registers + } return ret; #else return 0; #endif } +int support_avx512_bf16(){ +#if !defined(NO_AVX) && !defined(NO_AVX512) + int eax, ebx, ecx, edx; + int ret=0; + + if (!support_avx512()) + return 0; + cpuid_count(7, 1, &eax, &ebx, &ecx, &edx); + if((eax & 32) == 32){ + ret=1; // CPUID.7.1:EAX[bit 5] indicates whether avx512_bf16 supported or not + } + return ret; +#else + return 0; +#endif +} int get_vendor(void){ int eax, ebx, ecx, edx; @@ -335,6 +351,7 @@ int get_cputype(int gettype){ if (support_avx()) feature |= HAVE_AVX; if (support_avx2()) feature |= HAVE_AVX2; if (support_avx512()) feature |= HAVE_AVX512VL; + if (support_avx512_bf16()) feature |= HAVE_AVX512BF16; if ((ecx & (1 << 12)) != 0) feature |= HAVE_FMA3; #endif @@ -1197,7 +1214,11 @@ int get_cpuname(void){ case 3: case 5: case 6: +#if defined(__x86_64__) || defined(__amd64__) + return CPUTYPE_CORE2; +#else return CPUTYPE_PENTIUM2; +#endif case 7: case 8: case 10: @@ -1211,7 +1232,7 @@ int get_cpuname(void){ return CPUTYPE_CORE2; } break; - case 1: + case 1: // family 6 exmodel 1 switch (model) { case 6: return CPUTYPE_CORE2; @@ -1228,7 +1249,7 @@ int get_cpuname(void){ return CPUTYPE_DUNNINGTON; } break; - case 2: + case 2: // family 6 exmodel 2 switch (model) { case 5: //Intel Core (Clarkdale) / Core (Arrandale) @@ -1257,7 +1278,7 @@ int get_cpuname(void){ return CPUTYPE_NEHALEM; } break; - case 3: + case 3: // family 6 exmodel 3 switch (model) { case 7: // Bay Trail @@ -1287,7 +1308,7 @@ int get_cpuname(void){ return CPUTYPE_NEHALEM; } break; - case 4: + case 4: // family 6 exmodel 4 switch (model) { case 5: case 6: @@ -1321,7 +1342,7 @@ int get_cpuname(void){ return CPUTYPE_NEHALEM; } break; - case 5: + case 5: // family 6 exmodel 5 switch (model) { case 6: //Broadwell @@ -1333,6 +1354,8 @@ int get_cpuname(void){ return CPUTYPE_NEHALEM; case 5: // Skylake X + if(support_avx512_bf16()) + return CPUTYPE_COOPERLAKE; if(support_avx512()) return CPUTYPE_SKYLAKEX; if(support_avx2()) @@ -1364,7 +1387,7 @@ int get_cpuname(void){ return CPUTYPE_NEHALEM; } break; - case 6: + case 6: // family 6 exmodel 6 switch (model) { case 6: // Cannon Lake if(support_avx512()) @@ -1376,7 +1399,22 @@ int get_cpuname(void){ else return CPUTYPE_NEHALEM; } - break; + break; + case 7: // family 6 exmodel 7 + switch (model) { + case 10: // Goldmont Plus + return CPUTYPE_NEHALEM; + case 14: // Ice Lake + if(support_avx512()) + return CPUTYPE_SKYLAKEX; + if(support_avx2()) + return CPUTYPE_HASWELL; + if(support_avx()) + return CPUTYPE_SANDYBRIDGE; + else + return CPUTYPE_NEHALEM; + } + break; case 9: case 8: switch (model) { @@ -1387,6 +1425,26 @@ int get_cpuname(void){ return CPUTYPE_SANDYBRIDGE; else return CPUTYPE_NEHALEM; + } + case 10: //family 6 exmodel 10 + switch (model) { + case 5: // Comet Lake H and S + case 6: // Comet Lake U + if(support_avx2()) + return CPUTYPE_HASWELL; + if(support_avx()) + return CPUTYPE_SANDYBRIDGE; + else + return CPUTYPE_NEHALEM; + case 7: // Rocket Lake + if(support_avx512()) + return CPUTYPE_SKYLAKEX; + if(support_avx2()) + return CPUTYPE_HASWELL; + if(support_avx()) + return CPUTYPE_SANDYBRIDGE; + else + return CPUTYPE_NEHALEM; } break; } @@ -1412,7 +1470,11 @@ int get_cpuname(void){ case 0x5: return CPUTYPE_AMDK6; case 0x6: +#if defined(__x86_64__) || defined(__amd64__) + return CPUTYPE_BARCELONA; +#else return CPUTYPE_ATHLON; +#endif case 0xf: switch (exfamily) { case 0: @@ -1420,10 +1482,11 @@ int get_cpuname(void){ return CPUTYPE_OPTERON; case 1: case 3: - case 7: - case 10: +// case 7: +// case 10: return CPUTYPE_BARCELONA; case 5: + case 7: return CPUTYPE_BOBCAT; case 6: switch (model) { @@ -1473,6 +1536,8 @@ int get_cpuname(void){ // AMD Ryzen case 8: // AMD Ryzen2 + default: + // Matisse/Renoir and other recent Ryzen2 if(support_avx()) #ifndef NO_AVX2 return CPUTYPE_ZEN; @@ -1482,6 +1547,16 @@ int get_cpuname(void){ else return CPUTYPE_BARCELONA; } + break; + case 10: // Zen3 + if(support_avx()) +#ifndef NO_AVX2 + return CPUTYPE_ZEN; +#else + return CPUTYPE_SANDYBRIDGE; // Zen is closer in architecture to Sandy Bridge than to Excavator +#endif + else + return CPUTYPE_BARCELONA; } break; } @@ -1630,7 +1705,8 @@ static char *cpuname[] = { "EXCAVATOR", "ZEN", "SKYLAKEX", - "DHYANA" + "DHYANA", + "COOPERLAKE" }; static char *lowercpuname[] = { @@ -1686,7 +1762,8 @@ static char *lowercpuname[] = { "excavator", "zen", "skylakex", - "dhyana" + "dhyana", + "cooperlake" }; static char *corename[] = { @@ -1719,7 +1796,8 @@ static char *corename[] = { "EXCAVATOR", "ZEN", "SKYLAKEX", - "DHYANA" + "DHYANA", + "COOPERLAKE" }; static char *corename_lower[] = { @@ -1752,7 +1830,8 @@ static char *corename_lower[] = { "excavator", "zen", "skylakex", - "dhyana" + "dhyana", + "cooperlake" }; @@ -1795,7 +1874,11 @@ int get_coretype(void){ case 4: case 5: case 6: +#if defined(__x86_64__) || defined(__amd64__) + return CORE_CORE2; +#else return CORE_P6; +#endif case 7: return CORE_KATMAI; case 8: @@ -1928,6 +2011,32 @@ int get_coretype(void){ return CORE_NEHALEM; } break; + case 10: + switch (model) { + case 5: // Comet Lake H and S + case 6: // Comet Lake U + if(support_avx()) + #ifndef NO_AVX2 + return CORE_HASWELL; + #else + return CORE_SANDYBRIDGE; + #endif + else + return CORE_NEHALEM; + case 7:// Rocket Lake +#ifndef NO_AVX512 + if(support_avx512()) + return CORE_SKYLAKEX; +#endif +#ifndef NO_AVX2 + if(support_avx2()) + return CORE_HASWELL; +#endif + if(support_avx()) + return CORE_SANDYBRIDGE; + else + return CORE_NEHALEM; + } case 5: switch (model) { case 6: @@ -1943,7 +2052,9 @@ int get_coretype(void){ case 5: // Skylake X #ifndef NO_AVX512 - return CORE_SKYLAKEX; + if(support_avx512_bf16()) + return CORE_COOPERLAKE; + return CORE_SKYLAKEX; #else if(support_avx()) #ifndef NO_AVX2 @@ -1979,6 +2090,38 @@ int get_coretype(void){ return CORE_NEHALEM; } break; + case 6: + if (model == 6) +#ifndef NO_AVX512 + return CORE_SKYLAKEX; +#else + if(support_avx()) +#ifndef NO_AVX2 + return CORE_HASWELL; +#else + return CORE_SANDYBRIDGE; +#endif + else + return CORE_NEHALEM; +#endif + break; + case 7: + if (model == 10) + return CORE_NEHALEM; + if (model == 14) +#ifndef NO_AVX512 + return CORE_SKYLAKEX; +#else + if(support_avx()) +#ifndef NO_AVX2 + return CORE_HASWELL; +#else + return CORE_SANDYBRIDGE; +#endif + else + return CORE_NEHALEM; +#endif + break; case 9: case 8: if (model == 14) { // Kaby Lake @@ -2002,7 +2145,11 @@ int get_coretype(void){ if (vendor == VENDOR_AMD){ if (family <= 0x5) return CORE_80486; +#if defined(__x86_64__) || defined(__amd64__) + if (family <= 0xe) return CORE_BARCELONA; +#else if (family <= 0xe) return CORE_ATHLON; +#endif if (family == 0xf){ if ((exfamily == 0) || (exfamily == 2)) return CORE_OPTERON; else if (exfamily == 5) return CORE_BOBCAT; @@ -2020,7 +2167,7 @@ int get_coretype(void){ return CORE_PILEDRIVER; else return CORE_BARCELONA; //OS don't support AVX. - case 5: // New EXCAVATOR + case 5: // New EXCAVATOR if(support_avx()) return CORE_EXCAVATOR; else @@ -2048,12 +2195,14 @@ int get_coretype(void){ } break; } - } else if (exfamily == 8) { + } else if (exfamily == 8 || exfamily == 10) { switch (model) { case 1: // AMD Ryzen case 8: - // Ryzen 2 + // Ryzen 2 + default: + // Matisse,Renoir Ryzen2 models if(support_avx()) #ifndef NO_AVX2 return CORE_ZEN; @@ -2174,6 +2323,7 @@ void get_cpuconfig(void){ if (features & HAVE_AVX ) printf("#define HAVE_AVX\n"); if (features & HAVE_AVX2 ) printf("#define HAVE_AVX2\n"); if (features & HAVE_AVX512VL ) printf("#define HAVE_AVX512VL\n"); + if (features & HAVE_AVX512BF16 ) printf("#define HAVE_AVX512BF16\n"); if (features & HAVE_3DNOWEX) printf("#define HAVE_3DNOWEX\n"); if (features & HAVE_3DNOW) printf("#define HAVE_3DNOW\n"); if (features & HAVE_FMA4 ) printf("#define HAVE_FMA4\n"); @@ -2244,6 +2394,7 @@ void get_sse(void){ if (features & HAVE_AVX ) printf("HAVE_AVX=1\n"); if (features & HAVE_AVX2 ) printf("HAVE_AVX2=1\n"); if (features & HAVE_AVX512VL ) printf("HAVE_AVX512VL=1\n"); + if (features & HAVE_AVX512BF16 ) printf("HAVE_AVX512BF16=1\n"); if (features & HAVE_3DNOWEX) printf("HAVE_3DNOWEX=1\n"); if (features & HAVE_3DNOW) printf("HAVE_3DNOW=1\n"); if (features & HAVE_FMA4 ) printf("HAVE_FMA4=1\n"); diff --git a/cpuid_zarch.c b/cpuid_zarch.c index 896ed94f5..df3b7898f 100644 --- a/cpuid_zarch.c +++ b/cpuid_zarch.c @@ -30,17 +30,20 @@ #define CPU_GENERIC 0 #define CPU_Z13 1 #define CPU_Z14 2 +#define CPU_Z15 3 static char *cpuname[] = { "ZARCH_GENERIC", "Z13", - "Z14" + "Z14", + "Z15" }; static char *cpuname_lower[] = { "zarch_generic", "z13", - "z14" + "z14", + "z15" }; int detect(void) @@ -66,6 +69,8 @@ int detect(void) if (strstr(p, "2965")) return CPU_Z13; if (strstr(p, "3906")) return CPU_Z14; if (strstr(p, "3907")) return CPU_Z14; + if (strstr(p, "8561")) return CPU_Z14; // fallback z15 to z14 + if (strstr(p, "8562")) return CPU_Z14; // fallback z15 to z14 return CPU_GENERIC; } diff --git a/ctest.c b/ctest.c index 5e869b901..d674a8cbd 100644 --- a/ctest.c +++ b/ctest.c @@ -153,3 +153,11 @@ ARCH_ARM ARCH_ARM64 #endif +#if defined(__riscv) +ARCH_RISCV64 +#endif + +#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L) +HAVE_C11 +#endif + diff --git a/ctest/CMakeLists.txt b/ctest/CMakeLists.txt index 022379d83..17f29fe69 100644 --- a/ctest/CMakeLists.txt +++ b/ctest/CMakeLists.txt @@ -21,6 +21,9 @@ endif() foreach(float_type ${FLOAT_TYPES}) string(SUBSTRING ${float_type} 0 1 float_char_upper) string(TOLOWER ${float_char_upper} float_char) + if (${float_char} STREQUAL "b") + continue() + endif() #level1 add_executable(x${float_char}cblat1 c_${float_char}blat1.f diff --git a/ctest/Makefile b/ctest/Makefile index 569a5dda3..2a893cae8 100644 --- a/ctest/Makefile +++ b/ctest/Makefile @@ -6,6 +6,8 @@ TOPDIR = .. include $(TOPDIR)/Makefile.system override CFLAGS += -DADD$(BU) -DCBLAS +override TARGET_ARCH= +override TARGET_MACH= LIB = $(TOPDIR)/$(LIBNAME) @@ -38,59 +40,161 @@ ztestl3o = c_zblas3.o c_z3chke.o auxiliary.o c_xerbla.o constant.o ztestl3o_3m = c_zblas3_3m.o c_z3chke_3m.o auxiliary.o c_xerbla.o constant.o - +ifeq ($(NOFORTRAN),1) +all :: +else all :: all1 all2 all3 +endif + +ifeq ($(BUILD_SINGLE),1) +all1targets += xscblat1 +endif +ifeq ($(BUILD_DOUBLE),1) +all1targets += xdcblat1 +endif +ifeq ($(BUILD_COMPLEX),1) +all1targets += xccblat1 +endif +ifeq ($(BUILD_COMPLEX16),1) +all1targets += xzcblat1 +endif -all1: xscblat1 xdcblat1 xccblat1 xzcblat1 -ifndef CROSS +all1: $(all1targets) + +ifneq ($(CROSS), 1) ifeq ($(USE_OPENMP), 1) +ifeq ($(BUILD_SINGLE),1) OMP_NUM_THREADS=2 ./xscblat1 +endif +ifeq ($(BUILD_DOUBLE),1) OMP_NUM_THREADS=2 ./xdcblat1 +endif +ifeq ($(BUILD_COMPLEX),1) OMP_NUM_THREADS=2 ./xccblat1 +endif +ifeq ($(BUILD_COMPLEX16),1) OMP_NUM_THREADS=2 ./xzcblat1 +endif else +ifeq ($(BUILD_SINGLE),1) OPENBLAS_NUM_THREADS=2 ./xscblat1 +endif +ifeq ($(BUILD_DOUBLE),1) OPENBLAS_NUM_THREADS=2 ./xdcblat1 +endif +ifeq ($(BUILD_COMPLEX),1) OPENBLAS_NUM_THREADS=2 ./xccblat1 +endif +ifeq ($(BUILD_COMPLEX16),1) OPENBLAS_NUM_THREADS=2 ./xzcblat1 endif endif +endif + +ifeq ($(BUILD_SINGLE),1) +all2targets += xscblat2 +endif +ifeq ($(BUILD_DOUBLE),1) +all2targets += xdcblat2 +endif +ifeq ($(BUILD_COMPLEX),1) +all2targets += xccblat2 +endif +ifeq ($(BUILD_COMPLEX16),1) +all2targets += xzcblat2 +endif -all2: xscblat2 xdcblat2 xccblat2 xzcblat2 -ifndef CROSS +all2: $(all2targets) + +ifneq ($(CROSS), 1) ifeq ($(USE_OPENMP), 1) +ifeq ($(BUILD_SINGLE),1) OMP_NUM_THREADS=2 ./xscblat2 < sin2 +endif +ifeq ($(BUILD_DOUBLE),1) OMP_NUM_THREADS=2 ./xdcblat2 < din2 +endif +ifeq ($(BUILD_COMPLEX),1) OMP_NUM_THREADS=2 ./xccblat2 < cin2 +endif +ifeq ($(BUILD_COMPLEX16),1) OMP_NUM_THREADS=2 ./xzcblat2 < zin2 +endif else +ifeq ($(BUILD_SINGLE),1) OPENBLAS_NUM_THREADS=2 ./xscblat2 < sin2 +endif +ifeq ($(BUILD_DOUBLE),1) OPENBLAS_NUM_THREADS=2 ./xdcblat2 < din2 +endif +ifeq ($(BUILD_COMPLEX),1) OPENBLAS_NUM_THREADS=2 ./xccblat2 < cin2 +endif +ifeq ($(BUILD_COMPLEX16),1) OPENBLAS_NUM_THREADS=2 ./xzcblat2 < zin2 endif endif +endif -all3: xscblat3 xdcblat3 xccblat3 xzcblat3 -ifndef CROSS + +ifeq ($(BUILD_SINGLE),1) +all3targets += xscblat3 +endif +ifeq ($(BUILD_DOUBLE),1) +all3targets += xdcblat3 +endif +ifeq ($(BUILD_COMPLEX),1) +all3targets += xccblat3 +endif +ifeq ($(BUILD_COMPLEX16),1) +all3targets += xzcblat3 +endif + +all3: $(all3targets) + +ifneq ($(CROSS), 1) ifeq ($(USE_OPENMP), 1) +ifeq ($(BUILD_SINGLE),1) OMP_NUM_THREADS=2 ./xscblat3 < sin3 +endif +ifeq ($(BUILD_DOUBLE),1) OMP_NUM_THREADS=2 ./xdcblat3 < din3 +endif +ifeq ($(BUILD_COMPLEX),1) OMP_NUM_THREADS=2 ./xccblat3 < cin3 +endif +ifeq ($(BUILD_COMPLEX16),1) OMP_NUM_THREADS=2 ./xzcblat3 < zin3 +endif else +ifeq ($(BUILD_SINGLE),1) OPENBLAS_NUM_THREADS=2 ./xscblat3 < sin3 +endif +ifeq ($(BUILD_DOUBLE),1) OPENBLAS_NUM_THREADS=2 ./xdcblat3 < din3 +endif +ifeq ($(BUILD_COMPLEX),1) OPENBLAS_NUM_THREADS=2 ./xccblat3 < cin3 +endif +ifeq ($(BUILD_COMPLEX16),1) OPENBLAS_NUM_THREADS=2 ./xzcblat3 < zin3 endif +endif +endif all3_3m: xzcblat3_3m xccblat3_3m ifeq ($(USE_OPENMP), 1) +ifeq ($(BUILD_SINGLE),1) OMP_NUM_THREADS=2 ./xccblat3_3m < cin3_3m +endif +ifeq ($(BUILD_COMPLEX16),1) OMP_NUM_THREADS=2 ./xzcblat3_3m < zin3_3m +endif else +ifeq ($(BUILD_COMPLEX),1) OPENBLAS_NUM_THREADS=2 ./xccblat3_3m < cin3_3m +endif +ifeq ($(BUILD_COMPLEX16),1) OPENBLAS_NUM_THREADS=2 ./xzcblat3_3m < zin3_3m endif endif @@ -110,13 +214,19 @@ endif endif endif +ifeq ($(BUILD_SINGLE),1) # Single real xscblat1: $(stestl1o) c_sblat1.o $(TOPDIR)/$(LIBNAME) $(FC) $(FLDFLAGS) -o xscblat1 c_sblat1.o $(stestl1o) $(LIB) $(EXTRALIB) $(CEXTRALIB) + xscblat2: $(stestl2o) c_sblat2.o $(TOPDIR)/$(LIBNAME) $(FC) $(FLDFLAGS) -o xscblat2 c_sblat2.o $(stestl2o) $(LIB) $(EXTRALIB) $(CEXTRALIB) + xscblat3: $(stestl3o) c_sblat3.o $(TOPDIR)/$(LIBNAME) $(FC) $(FLDFLAGS) -o xscblat3 c_sblat3.o $(stestl3o) $(LIB) $(EXTRALIB) $(CEXTRALIB) +endif + +ifeq ($(BUILD_DOUBLE),1) # Double real xdcblat1: $(dtestl1o) c_dblat1.o $(TOPDIR)/$(LIBNAME) $(FC) $(FLDFLAGS) -o xdcblat1 c_dblat1.o $(dtestl1o) $(LIB) $(EXTRALIB) $(CEXTRALIB) @@ -124,7 +234,10 @@ xdcblat2: $(dtestl2o) c_dblat2.o $(TOPDIR)/$(LIBNAME) $(FC) $(FLDFLAGS) -o xdcblat2 c_dblat2.o $(dtestl2o) $(LIB) $(EXTRALIB) $(CEXTRALIB) xdcblat3: $(dtestl3o) c_dblat3.o $(TOPDIR)/$(LIBNAME) $(FC) $(FLDFLAGS) -o xdcblat3 c_dblat3.o $(dtestl3o) $(LIB) $(EXTRALIB) $(CEXTRALIB) +endif + +ifeq ($(BUILD_COMPLEX),1) # Single complex xccblat1: $(ctestl1o) c_cblat1.o $(TOPDIR)/$(LIBNAME) $(FC) $(FLDFLAGS) -o xccblat1 c_cblat1.o $(ctestl1o) $(LIB) $(EXTRALIB) $(CEXTRALIB) @@ -135,7 +248,10 @@ xccblat3: $(ctestl3o) c_cblat3.o $(TOPDIR)/$(LIBNAME) xccblat3_3m: $(ctestl3o_3m) c_cblat3_3m.o $(TOPDIR)/$(LIBNAME) $(FC) $(FLDFLAGS) -o xccblat3_3m c_cblat3_3m.o $(ctestl3o_3m) $(LIB) $(EXTRALIB) $(CEXTRALIB) +endif + +ifeq ($(BUILD_COMPLEX16),1) # Double complex xzcblat1: $(ztestl1o) c_zblat1.o $(TOPDIR)/$(LIBNAME) $(FC) $(FLDFLAGS) -o xzcblat1 c_zblat1.o $(ztestl1o) $(LIB) $(EXTRALIB) $(CEXTRALIB) @@ -147,6 +263,6 @@ xzcblat3: $(ztestl3o) c_zblat3.o $(TOPDIR)/$(LIBNAME) xzcblat3_3m: $(ztestl3o_3m) c_zblat3_3m.o $(TOPDIR)/$(LIBNAME) $(FC) $(FLDFLAGS) -o xzcblat3_3m c_zblat3_3m.o $(ztestl3o_3m) $(LIB) $(EXTRALIB) $(CEXTRALIB) - +endif include $(TOPDIR)/Makefile.tail diff --git a/ctest/c_cblat3.f b/ctest/c_cblat3.f index 96f190352..74293ce53 100644 --- a/ctest/c_cblat3.f +++ b/ctest/c_cblat3.f @@ -1503,6 +1503,8 @@ C $ ' .' ) NC = 0 RESET = .TRUE. ERRMAX = RZERO + RALS = RONE + RBETS = RONE * DO 100 IN = 1, NIDIM N = IDIM( IN ) diff --git a/ctest/c_dblas1.c b/ctest/c_dblas1.c index a288154c2..8e13afcaa 100644 --- a/ctest/c_dblas1.c +++ b/ctest/c_dblas1.c @@ -53,6 +53,13 @@ void F77_drot( const int *N, double *X, const int *incX, double *Y, return; } +void F77_drotm(const int *N, double *X, const int *incX, double *Y, + const int *incY, const double *dparam) +{ + cblas_drotm(*N, X, *incX, Y, *incY, dparam); + return; +} + void F77_dscal(const int *N, const double *alpha, double *X, const int *incX) { @@ -67,16 +74,6 @@ void F77_dswap( const int *N, double *X, const int *incX, return; } -double F77_dzasum(const int *N, void *X, const int *incX) -{ - return cblas_dzasum(*N, X, *incX); -} - -double F77_dznrm2(const int *N, OPENBLAS_CONST void *X, const int *incX) -{ - return cblas_dznrm2(*N, X, *incX); -} - int F77_idamax(const int *N, OPENBLAS_CONST double *X, const int *incX) { if (*N < 1 || *incX < 1) return(0); diff --git a/ctest/c_dblat1.f b/ctest/c_dblat1.f index 4a71b4dcf..0139ede63 100644 --- a/ctest/c_dblat1.f +++ b/ctest/c_dblat1.f @@ -19,7 +19,7 @@ DATA SFAC/9.765625D-4/ * .. Executable Statements .. WRITE (NOUT,99999) - DO 20 IC = 1, 10 + DO 20 IC = 1, 11 ICASE = IC CALL HEADER * @@ -40,7 +40,7 @@ ELSE IF (ICASE.EQ.1 .OR. ICASE.EQ.2 .OR. ICASE.EQ.5 .OR. + ICASE.EQ.6) THEN CALL CHECK2(SFAC) - ELSE IF (ICASE.EQ.4) THEN + ELSE IF (ICASE.EQ.4 .OR. ICASE.EQ.11) THEN CALL CHECK3(SFAC) END IF * -- Print @@ -59,7 +59,7 @@ INTEGER ICASE, INCX, INCY, MODE, N LOGICAL PASS * .. Local Arrays .. - CHARACTER*15 L(10) + CHARACTER*15 L(11) * .. Common blocks .. COMMON /COMBLA/ICASE, N, INCX, INCY, MODE, PASS * .. Data statements .. @@ -73,6 +73,7 @@ DATA L(8)/'CBLAS_DASUM '/ DATA L(9)/'CBLAS_DSCAL '/ DATA L(10)/'CBLAS_IDAMAX'/ + DATA L(11)/'CBLAS_DROTM'/ * .. Executable Statements .. WRITE (NOUT,99999) ICASE, L(ICASE) RETURN @@ -400,199 +401,81 @@ LOGICAL PASS * .. Local Scalars .. DOUBLE PRECISION SC, SS - INTEGER I, K, KI, KN, KSIZE, LENX, LENY, MX, MY + INTEGER I, KI, KN, KSIZE, LEN * .. Local Arrays .. - DOUBLE PRECISION COPYX(5), COPYY(5), DT9X(7,4,4), DT9Y(7,4,4), - + DX1(7), DY1(7), MWPC(11), MWPS(11), MWPSTX(5), - + MWPSTY(5), MWPTX(11,5), MWPTY(11,5), MWPX(5), - + MWPY(5), SSIZE2(14,2), STX(7), STY(7), SX(7), - + SY(7) - INTEGER INCXS(4), INCYS(4), LENS(4,2), MWPINX(11), - + MWPINY(11), MWPN(11), NS(4) + DOUBLE PRECISION DX(10), DY(10), SSIZE2(10,2), STX(10), + + STY(10), SX(10), SY(10), + + PARAM(5, 4), DPARAM(5) + INTEGER INCXS(7), INCYS(7), NS(5) * .. External Subroutines .. - EXTERNAL STEST,DROTTEST + EXTERNAL STEST, DROTTEST, DROT * .. Intrinsic Functions .. - INTRINSIC ABS, MIN + INTRINSIC MIN * .. Common blocks .. COMMON /COMBLA/ICASE, N, INCX, INCY, MODE, PASS * .. Data statements .. - DATA INCXS/1, 2, -2, -1/ - DATA INCYS/1, -2, 1, -2/ - DATA LENS/1, 1, 2, 4, 1, 1, 3, 7/ - DATA NS/0, 1, 2, 4/ - DATA DX1/0.6D0, 0.1D0, -0.5D0, 0.8D0, 0.9D0, -0.3D0, - + -0.4D0/ - DATA DY1/0.5D0, -0.9D0, 0.3D0, 0.7D0, -0.6D0, 0.2D0, - + 0.8D0/ + DATA INCXS/1, 1, 2, 2, -2, -1, -2/ + DATA INCYS/1, 2, 2, -2, 1, -2, -2/ + DATA NS/0, 1, 2, 4, 5/ + DATA DX/0.6D0, 0.1D0, -0.5D0, 0.8D0, 0.9D0, -0.3D0, + + -0.4D0, 0.7D0, 0.5D0, 0.2D0/ + DATA DY/0.5D0, -0.9D0, 0.3D0, 0.7D0, -0.6D0, 0.2D0, + + 0.8D0, -0.5D0, 0.1D0, -0.3D0/ DATA SC, SS/0.8D0, 0.6D0/ - DATA DT9X/0.6D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, - + 0.0D0, 0.78D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, - + 0.0D0, 0.0D0, 0.78D0, -0.46D0, 0.0D0, 0.0D0, - + 0.0D0, 0.0D0, 0.0D0, 0.78D0, -0.46D0, -0.22D0, - + 1.06D0, 0.0D0, 0.0D0, 0.0D0, 0.6D0, 0.0D0, - + 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.78D0, - + 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, - + 0.66D0, 0.1D0, -0.1D0, 0.0D0, 0.0D0, 0.0D0, - + 0.0D0, 0.96D0, 0.1D0, -0.76D0, 0.8D0, 0.90D0, - + -0.3D0, -0.02D0, 0.6D0, 0.0D0, 0.0D0, 0.0D0, - + 0.0D0, 0.0D0, 0.0D0, 0.78D0, 0.0D0, 0.0D0, - + 0.0D0, 0.0D0, 0.0D0, 0.0D0, -0.06D0, 0.1D0, - + -0.1D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.90D0, - + 0.1D0, -0.22D0, 0.8D0, 0.18D0, -0.3D0, -0.02D0, - + 0.6D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, - + 0.78D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, - + 0.0D0, 0.78D0, 0.26D0, 0.0D0, 0.0D0, 0.0D0, - + 0.0D0, 0.0D0, 0.78D0, 0.26D0, -0.76D0, 1.12D0, - + 0.0D0, 0.0D0, 0.0D0/ - DATA DT9Y/0.5D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, - + 0.0D0, 0.04D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, - + 0.0D0, 0.0D0, 0.04D0, -0.78D0, 0.0D0, 0.0D0, - + 0.0D0, 0.0D0, 0.0D0, 0.04D0, -0.78D0, 0.54D0, - + 0.08D0, 0.0D0, 0.0D0, 0.0D0, 0.5D0, 0.0D0, - + 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.04D0, - + 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.7D0, - + -0.9D0, -0.12D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, - + 0.64D0, -0.9D0, -0.30D0, 0.7D0, -0.18D0, 0.2D0, - + 0.28D0, 0.5D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, - + 0.0D0, 0.0D0, 0.04D0, 0.0D0, 0.0D0, 0.0D0, - + 0.0D0, 0.0D0, 0.0D0, 0.7D0, -1.08D0, 0.0D0, - + 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.64D0, -1.26D0, - + 0.54D0, 0.20D0, 0.0D0, 0.0D0, 0.0D0, 0.5D0, - + 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, - + 0.04D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, - + 0.0D0, 0.04D0, -0.9D0, 0.18D0, 0.0D0, 0.0D0, - + 0.0D0, 0.0D0, 0.04D0, -0.9D0, 0.18D0, 0.7D0, - + -0.18D0, 0.2D0, 0.16D0/ + DATA LEN/10/ + DATA PARAM/-2.0D0, 1.0D0, 0.0D0, 0.0D0, 1.0D0, + + -1.0D0, 0.2D0, 0.3D0, 0.4D0, 0.5D0, + + 0.0D0, 1.0D0, 0.3D0, 0.4D0, 1.0D0, + + 1.0D0, 0.2D0, -1.0D0, 1.0D0, 0.5D0/ DATA SSIZE2/0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, - + 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, - + 0.0D0, 1.17D0, 1.17D0, 1.17D0, 1.17D0, 1.17D0, + + 0.0D0, 0.0D0, 0.0D0, 0.0D0, 1.17D0, 1.17D0, + 1.17D0, 1.17D0, 1.17D0, 1.17D0, 1.17D0, 1.17D0, - + 1.17D0, 1.17D0, 1.17D0/ + + 1.17D0, 1.17D0/ * .. Executable Statements .. * - DO 60 KI = 1, 4 + DO 60 KI = 1, 7 INCX = INCXS(KI) INCY = INCYS(KI) - MX = ABS(INCX) - MY = ABS(INCY) * - DO 40 KN = 1, 4 + DO 40 KN = 1, 5 N = NS(KN) KSIZE = MIN(2,KN) - LENX = LENS(KN,MX) - LENY = LENS(KN,MY) * IF (ICASE.EQ.4) THEN * .. DROTTEST .. - DO 20 I = 1, 7 - SX(I) = DX1(I) - SY(I) = DY1(I) - STX(I) = DT9X(I,KN,KI) - STY(I) = DT9Y(I,KN,KI) + DO 20 I = 1, 10 + SX(I) = DX(I) + SY(I) = DY(I) + STX(I) = DX(I) + STY(I) = DY(I) 20 CONTINUE CALL DROTTEST(N,SX,INCX,SY,INCY,SC,SS) - CALL STEST(LENX,SX,STX,SSIZE2(1,KSIZE),SFAC) - CALL STEST(LENY,SY,STY,SSIZE2(1,KSIZE),SFAC) - ELSE + CALL DROT(N,STX,INCX,STY,INCY,SC,SS) + CALL STEST(LEN,SX,STX,SSIZE2(1,KSIZE),SFAC) + CALL STEST(LEN,SY,STY,SSIZE2(1,KSIZE),SFAC) + ELSE IF (ICASE.EQ.11) THEN +* .. DROTMTEST .. + DO 90 I = 1, 10 + SX(I) = DX(I) + SY(I) = DY(I) + STX(I) = DX(I) + STY(I) = DY(I) + 90 CONTINUE + DO 70 I = 1, 4 + DO 80 K = 1, 5 + DPARAM(K) = PARAM(K,I) + 80 CONTINUE + CALL DROTMTEST(N,SX,INCX,SY,INCY,DPARAM) + CALL DROTM(N,STX,INCX,STY,INCY,DPARAM) + CALL STEST(LEN,SX,STX,SSIZE2(1,KSIZE),SFAC) + CALL STEST(LEN,SY,STY,SSIZE2(1,KSIZE),SFAC) + 70 CONTINUE + ELSE WRITE (NOUT,*) ' Shouldn''t be here in CHECK3' STOP END IF 40 CONTINUE 60 CONTINUE -* - MWPC(1) = 1 - DO 80 I = 2, 11 - MWPC(I) = 0 - 80 CONTINUE - MWPS(1) = 0.0 - DO 100 I = 2, 6 - MWPS(I) = 1.0 - 100 CONTINUE - DO 120 I = 7, 11 - MWPS(I) = -1.0 - 120 CONTINUE - MWPINX(1) = 1 - MWPINX(2) = 1 - MWPINX(3) = 1 - MWPINX(4) = -1 - MWPINX(5) = 1 - MWPINX(6) = -1 - MWPINX(7) = 1 - MWPINX(8) = 1 - MWPINX(9) = -1 - MWPINX(10) = 1 - MWPINX(11) = -1 - MWPINY(1) = 1 - MWPINY(2) = 1 - MWPINY(3) = -1 - MWPINY(4) = -1 - MWPINY(5) = 2 - MWPINY(6) = 1 - MWPINY(7) = 1 - MWPINY(8) = -1 - MWPINY(9) = -1 - MWPINY(10) = 2 - MWPINY(11) = 1 - DO 140 I = 1, 11 - MWPN(I) = 5 - 140 CONTINUE - MWPN(5) = 3 - MWPN(10) = 3 - DO 160 I = 1, 5 - MWPX(I) = I - MWPY(I) = I - MWPTX(1,I) = I - MWPTY(1,I) = I - MWPTX(2,I) = I - MWPTY(2,I) = -I - MWPTX(3,I) = 6 - I - MWPTY(3,I) = I - 6 - MWPTX(4,I) = I - MWPTY(4,I) = -I - MWPTX(6,I) = 6 - I - MWPTY(6,I) = I - 6 - MWPTX(7,I) = -I - MWPTY(7,I) = I - MWPTX(8,I) = I - 6 - MWPTY(8,I) = 6 - I - MWPTX(9,I) = -I - MWPTY(9,I) = I - MWPTX(11,I) = I - 6 - MWPTY(11,I) = 6 - I - 160 CONTINUE - MWPTX(5,1) = 1 - MWPTX(5,2) = 3 - MWPTX(5,3) = 5 - MWPTX(5,4) = 4 - MWPTX(5,5) = 5 - MWPTY(5,1) = -1 - MWPTY(5,2) = 2 - MWPTY(5,3) = -2 - MWPTY(5,4) = 4 - MWPTY(5,5) = -3 - MWPTX(10,1) = -1 - MWPTX(10,2) = -3 - MWPTX(10,3) = -5 - MWPTX(10,4) = 4 - MWPTX(10,5) = 5 - MWPTY(10,1) = 1 - MWPTY(10,2) = 2 - MWPTY(10,3) = 2 - MWPTY(10,4) = 4 - MWPTY(10,5) = 3 - DO 200 I = 1, 11 - INCX = MWPINX(I) - INCY = MWPINY(I) - DO 180 K = 1, 5 - COPYX(K) = MWPX(K) - COPYY(K) = MWPY(K) - MWPSTX(K) = MWPTX(I,K) - MWPSTY(K) = MWPTY(I,K) - 180 CONTINUE - CALL DROTTEST(MWPN(I),COPYX,INCX,COPYY,INCY,MWPC(I),MWPS(I)) - CALL STEST(5,COPYX,MWPSTX,MWPSTX,SFAC) - CALL STEST(5,COPYY,MWPSTY,MWPSTY,SFAC) - 200 CONTINUE RETURN END SUBROUTINE STEST(LEN,SCOMP,STRUE,SSIZE,SFAC) @@ -726,3 +609,144 @@ + /1X) 99997 FORMAT (1X,I4,I3,3I5,2I36,I12) END + SUBROUTINE DROT(N,DX,INCX,DY,INCY,C,S) +* .. Scalar Arguments .. + DOUBLE PRECISION C,S + INTEGER INCX,INCY,N +* .. +* .. Array Arguments .. + DOUBLE PRECISION DX(*),DY(*) +* .. +* applies a plane rotation. +* jack dongarra, linpack, 3/11/78. +* modified 12/3/93, array(1) declarations changed to array(*) +* +* .. Local Scalars .. + DOUBLE PRECISION DTEMP + INTEGER I,IX,IY +* .. + IF (N.LE.0) RETURN + IF (INCX.EQ.1 .AND. INCY.EQ.1) GO TO 20 + IX = 1 + IY = 1 + IF (INCX.LT.0) IX = (-N+1)*INCX + 1 + IF (INCY.LT.0) IY = (-N+1)*INCY + 1 + DO 10 I = 1,N + DTEMP = C*DX(IX) + S*DY(IY) + DY(IY) = C*DY(IY) - S*DX(IX) + DX(IX) = DTEMP + IX = IX + INCX + IY = IY + INCY + 10 CONTINUE + RETURN + 20 DO 30 I = 1,N + DTEMP = C*DX(I) + S*DY(I) + DY(I) = C*DY(I) - S*DX(I) + DX(I) = DTEMP + 30 CONTINUE + RETURN + END + SUBROUTINE drotm(N,DX,INCX,DY,INCY,DPARAM) +* +* -- Reference BLAS level1 routine (version 3.8.0) -- +* -- Reference BLAS is a software package provided by Univ. of Tennessee, -- +* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- +* November 2017 +* +* .. Scalar Arguments .. + INTEGER INCX,INCY,N +* .. +* .. Array Arguments .. + DOUBLE PRECISION DPARAM(5),DX(*),DY(*) +* .. +* +* ===================================================================== +* +* .. Local Scalars .. + DOUBLE PRECISION DFLAG,DH11,DH12,DH21,DH22,TWO,W,Z,ZERO + INTEGER I,KX,KY,NSTEPS +* .. +* .. Data statements .. + DATA zero,two/0.d0,2.d0/ +* .. +* + dflag = dparam(1) + IF (n.LE.0 .OR. (dflag+two.EQ.zero)) RETURN + IF (incx.EQ.incy.AND.incx.GT.0) THEN +* + nsteps = n*incx + IF (dflag.LT.zero) THEN + dh11 = dparam(2) + dh12 = dparam(4) + dh21 = dparam(3) + dh22 = dparam(5) + DO i = 1,nsteps,incx + w = dx(i) + z = dy(i) + dx(i) = w*dh11 + z*dh12 + dy(i) = w*dh21 + z*dh22 + END DO + ELSE IF (dflag.EQ.zero) THEN + dh12 = dparam(4) + dh21 = dparam(3) + DO i = 1,nsteps,incx + w = dx(i) + z = dy(i) + dx(i) = w + z*dh12 + dy(i) = w*dh21 + z + END DO + ELSE + dh11 = dparam(2) + dh22 = dparam(5) + DO i = 1,nsteps,incx + w = dx(i) + z = dy(i) + dx(i) = w*dh11 + z + dy(i) = -w + dh22*z + END DO + END IF + ELSE + kx = 1 + ky = 1 + IF (incx.LT.0) kx = 1 + (1-n)*incx + IF (incy.LT.0) ky = 1 + (1-n)*incy +* + IF (dflag.LT.zero) THEN + dh11 = dparam(2) + dh12 = dparam(4) + dh21 = dparam(3) + dh22 = dparam(5) + DO i = 1,n + w = dx(kx) + z = dy(ky) + dx(kx) = w*dh11 + z*dh12 + dy(ky) = w*dh21 + z*dh22 + kx = kx + incx + ky = ky + incy + END DO + ELSE IF (dflag.EQ.zero) THEN + dh12 = dparam(4) + dh21 = dparam(3) + DO i = 1,n + w = dx(kx) + z = dy(ky) + dx(kx) = w + z*dh12 + dy(ky) = w*dh21 + z + kx = kx + incx + ky = ky + incy + END DO + ELSE + dh11 = dparam(2) + dh22 = dparam(5) + DO i = 1,n + w = dx(kx) + z = dy(ky) + dx(kx) = w*dh11 + z + dy(ky) = -w + dh22*z + kx = kx + incx + ky = ky + incy + END DO + END IF + END IF + RETURN + END diff --git a/ctest/c_sblas1.c b/ctest/c_sblas1.c index 1f301a693..a562014a4 100644 --- a/ctest/c_sblas1.c +++ b/ctest/c_sblas1.c @@ -21,16 +21,6 @@ void F77_saxpy(blasint *N, const float *alpha, OPENBLAS_CONST float *X, return; } -float F77_scasum(blasint *N, float *X, blasint *incX) -{ - return cblas_scasum(*N, X, *incX); -} - -float F77_scnrm2(blasint *N, OPENBLAS_CONST float *X, blasint *incX) -{ - return cblas_scnrm2(*N, X, *incX); -} - void F77_scopy(blasint *N, OPENBLAS_CONST float *X, blasint *incX, float *Y, blasint *incY) { @@ -55,6 +45,13 @@ void F77_srotg( float *a, float *b, float *c, float *s) return; } +void F77_srotm(blasint *N, float *X, blasint *incX, float *Y, blasint *incY, + float *param) +{ + cblas_srotm(*N, X, *incX, Y, *incY, param); + return; +} + void F77_srot( blasint *N, float *X, blasint *incX, float *Y, blasint *incY, const float *c, const float *s) { diff --git a/ctest/c_sblat1.f b/ctest/c_sblat1.f index 89902f12d..66a5def89 100644 --- a/ctest/c_sblat1.f +++ b/ctest/c_sblat1.f @@ -19,7 +19,7 @@ DATA SFAC/9.765625E-4/ * .. Executable Statements .. WRITE (NOUT,99999) - DO 20 IC = 1, 10 + DO 20 IC = 1, 11 ICASE = IC CALL HEADER * @@ -40,7 +40,7 @@ ELSE IF (ICASE.EQ.1 .OR. ICASE.EQ.2 .OR. ICASE.EQ.5 .OR. + ICASE.EQ.6) THEN CALL CHECK2(SFAC) - ELSE IF (ICASE.EQ.4) THEN + ELSE IF (ICASE.EQ.4 .OR. ICASE.EQ.11) THEN CALL CHECK3(SFAC) END IF * -- Print @@ -59,7 +59,7 @@ INTEGER ICASE, INCX, INCY, MODE, N LOGICAL PASS * .. Local Arrays .. - CHARACTER*15 L(10) + CHARACTER*15 L(11) * .. Common blocks .. COMMON /COMBLA/ICASE, N, INCX, INCY, MODE, PASS * .. Data statements .. @@ -73,6 +73,7 @@ DATA L(8)/'CBLAS_SASUM '/ DATA L(9)/'CBLAS_SSCAL '/ DATA L(10)/'CBLAS_ISAMAX'/ + DATA L(11)/'CBLAS_SROTM'/ * .. Executable Statements .. WRITE (NOUT,99999) ICASE, L(ICASE) RETURN @@ -396,203 +397,92 @@ * .. Scalar Arguments .. REAL SFAC * .. Scalars in Common .. - INTEGER ICASE, INCX, INCY, MODE, N + INTEGER ICASE, INCX, INCY, N LOGICAL PASS * .. Local Scalars .. REAL SC, SS - INTEGER I, K, KI, KN, KSIZE, LENX, LENY, MX, MY + INTEGER I, K, KI, KN, KSIZE, LEN * .. Local Arrays .. - REAL COPYX(5), COPYY(5), DT9X(7,4,4), DT9Y(7,4,4), - + DX1(7), DY1(7), MWPC(11), MWPS(11), MWPSTX(5), - + MWPSTY(5), MWPTX(11,5), MWPTY(11,5), MWPX(5), - + MWPY(5), SSIZE2(14,2), STX(7), STY(7), SX(7), - + SY(7) - INTEGER INCXS(4), INCYS(4), LENS(4,2), MWPINX(11), - + MWPINY(11), MWPN(11), NS(4) + REAL DX(19), DY(19), + + SSIZE2(19,2), STX(19), STY(19), SX(19), SY(19), + + PARAM(5, 4), SPARAM(5) + INTEGER INCXS(7), INCYS(7), NS(7) * .. External Subroutines .. - EXTERNAL SROTTEST, STEST + EXTERNAL SROTMTEST, SROTM * .. Intrinsic Functions .. - INTRINSIC ABS, MIN + INTRINSIC MIN * .. Common blocks .. COMMON /COMBLA/ICASE, N, INCX, INCY, MODE, PASS * .. Data statements .. - DATA INCXS/1, 2, -2, -1/ - DATA INCYS/1, -2, 1, -2/ - DATA LENS/1, 1, 2, 4, 1, 1, 3, 7/ - DATA NS/0, 1, 2, 4/ - DATA DX1/0.6E0, 0.1E0, -0.5E0, 0.8E0, 0.9E0, -0.3E0, - + -0.4E0/ - DATA DY1/0.5E0, -0.9E0, 0.3E0, 0.7E0, -0.6E0, 0.2E0, + DATA INCXS/1, 1, 2, 2, -2, -1, -2/ + DATA INCYS/1, 2, 2, -2, 1, -2, -2/ + DATA NS/0, 1, 2, 4, 5, 8, 9/ + DATA DX/0.6E0, 0.1E0, -0.5E0, 0.8E0, 0.9E0, -0.3E0, + + -0.4E0, 0.5E0, -0.9E0, 0.3E0, 0.7E0, -0.6E0, + + 0.2E0, 0.8E0, -0.46E0, 0.78E0, -0.46E0, -0.22E0, + + 1.06E0/ + DATA DY/0.5E0, -0.9E0, 0.3E0, 0.7E0, -0.6E0, 0.2E0, + + 0.6E0, 0.1E0, -0.5E0, 0.8E0, 0.9E0, -0.3E0, + + 0.96E0, 0.1E0, -0.76E0, 0.8E0, 0.90E0, 0.66E0, + 0.8E0/ DATA SC, SS/0.8E0, 0.6E0/ - DATA DT9X/0.6E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, - + 0.0E0, 0.78E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, - + 0.0E0, 0.0E0, 0.78E0, -0.46E0, 0.0E0, 0.0E0, - + 0.0E0, 0.0E0, 0.0E0, 0.78E0, -0.46E0, -0.22E0, - + 1.06E0, 0.0E0, 0.0E0, 0.0E0, 0.6E0, 0.0E0, - + 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.78E0, - + 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, - + 0.66E0, 0.1E0, -0.1E0, 0.0E0, 0.0E0, 0.0E0, - + 0.0E0, 0.96E0, 0.1E0, -0.76E0, 0.8E0, 0.90E0, - + -0.3E0, -0.02E0, 0.6E0, 0.0E0, 0.0E0, 0.0E0, - + 0.0E0, 0.0E0, 0.0E0, 0.78E0, 0.0E0, 0.0E0, - + 0.0E0, 0.0E0, 0.0E0, 0.0E0, -0.06E0, 0.1E0, - + -0.1E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.90E0, - + 0.1E0, -0.22E0, 0.8E0, 0.18E0, -0.3E0, -0.02E0, - + 0.6E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, - + 0.78E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, - + 0.0E0, 0.78E0, 0.26E0, 0.0E0, 0.0E0, 0.0E0, - + 0.0E0, 0.0E0, 0.78E0, 0.26E0, -0.76E0, 1.12E0, - + 0.0E0, 0.0E0, 0.0E0/ - DATA DT9Y/0.5E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, - + 0.0E0, 0.04E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, - + 0.0E0, 0.0E0, 0.04E0, -0.78E0, 0.0E0, 0.0E0, - + 0.0E0, 0.0E0, 0.0E0, 0.04E0, -0.78E0, 0.54E0, - + 0.08E0, 0.0E0, 0.0E0, 0.0E0, 0.5E0, 0.0E0, - + 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.04E0, - + 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.7E0, - + -0.9E0, -0.12E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, - + 0.64E0, -0.9E0, -0.30E0, 0.7E0, -0.18E0, 0.2E0, - + 0.28E0, 0.5E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, - + 0.0E0, 0.0E0, 0.04E0, 0.0E0, 0.0E0, 0.0E0, - + 0.0E0, 0.0E0, 0.0E0, 0.7E0, -1.08E0, 0.0E0, - + 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.64E0, -1.26E0, - + 0.54E0, 0.20E0, 0.0E0, 0.0E0, 0.0E0, 0.5E0, - + 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, - + 0.04E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, - + 0.0E0, 0.04E0, -0.9E0, 0.18E0, 0.0E0, 0.0E0, - + 0.0E0, 0.0E0, 0.04E0, -0.9E0, 0.18E0, 0.7E0, - + -0.18E0, 0.2E0, 0.16E0/ + DATA PARAM/-2.0E0, 1.0E0, 0.0E0, 0.0E0, 1.0E0, + + -1.0E0, 0.2E0, 0.3E0, 0.4E0, 0.5E0, + + 0.0E0, 1.0E0, 0.3E0, 0.4E0, 1.0E0, + + 1.0E0, 0.2E0, -1.0E0, 1.0E0, 0.5E0/ + DATA LEN/19/ DATA SSIZE2/0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, + 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, - + 0.0E0, 1.17E0, 1.17E0, 1.17E0, 1.17E0, 1.17E0, + + 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, + 1.17E0, 1.17E0, 1.17E0, 1.17E0, 1.17E0, 1.17E0, - + 1.17E0, 1.17E0, 1.17E0/ + + 1.17E0, 1.17E0, 1.17E0, 1.17E0, 1.17E0, 1.17E0, + + 1.17E0, 1.17E0, 1.17E0, 1.17E0, 1.17E0, 1.17E0, + + 1.17E0/ * .. Executable Statements .. * - DO 60 KI = 1, 4 + DO 60 KI = 1, 7 INCX = INCXS(KI) INCY = INCYS(KI) - MX = ABS(INCX) - MY = ABS(INCY) * - DO 40 KN = 1, 4 + DO 40 KN = 1, 7 N = NS(KN) KSIZE = MIN(2,KN) - LENX = LENS(KN,MX) - LENY = LENS(KN,MY) * IF (ICASE.EQ.4) THEN * .. SROTTEST .. - DO 20 I = 1, 7 - SX(I) = DX1(I) - SY(I) = DY1(I) - STX(I) = DT9X(I,KN,KI) - STY(I) = DT9Y(I,KN,KI) + DO 20 I = 1, 19 + SX(I) = DX(I) + SY(I) = DY(I) + STX(I) = DX(I) + STY(I) = DY(I) 20 CONTINUE CALL SROTTEST(N,SX,INCX,SY,INCY,SC,SS) - CALL STEST(LENX,SX,STX,SSIZE2(1,KSIZE),SFAC) - CALL STEST(LENY,SY,STY,SSIZE2(1,KSIZE),SFAC) + CALL SROT(N,STX,INCX,STY,INCY,SC,SS) + CALL STEST(LEN,SX,STX,SSIZE2(1,KSIZE),SFAC) + CALL STEST(LEN,SY,STY,SSIZE2(1,KSIZE),SFAC) + ELSE IF (ICASE.EQ.11) THEN +* .. SROTMTEST .. + DO 90 I = 1, 19 + SX(I) = DX(I) + SY(I) = DY(I) + STX(I) = DX(I) + STY(I) = DY(I) + 90 CONTINUE + DO 70 I = 1, 4 + DO 80 K = 1, 5 + SPARAM(K) = PARAM(K,I) + 80 CONTINUE + CALL SROTMTEST(N,SX,INCX,SY,INCY,SPARAM) + CALL SROTM(N,STX,INCX,STY,INCY,SPARAM) + CALL STEST(LEN,SX,STX,SSIZE2(1,KSIZE),SFAC) + CALL STEST(LEN,SY,STY,SSIZE2(1,KSIZE),SFAC) + 70 CONTINUE ELSE WRITE (NOUT,*) ' Shouldn''t be here in CHECK3' STOP END IF 40 CONTINUE 60 CONTINUE -* - MWPC(1) = 1 - DO 80 I = 2, 11 - MWPC(I) = 0 - 80 CONTINUE - MWPS(1) = 0 - DO 100 I = 2, 6 - MWPS(I) = 1 - 100 CONTINUE - DO 120 I = 7, 11 - MWPS(I) = -1 - 120 CONTINUE - MWPINX(1) = 1 - MWPINX(2) = 1 - MWPINX(3) = 1 - MWPINX(4) = -1 - MWPINX(5) = 1 - MWPINX(6) = -1 - MWPINX(7) = 1 - MWPINX(8) = 1 - MWPINX(9) = -1 - MWPINX(10) = 1 - MWPINX(11) = -1 - MWPINY(1) = 1 - MWPINY(2) = 1 - MWPINY(3) = -1 - MWPINY(4) = -1 - MWPINY(5) = 2 - MWPINY(6) = 1 - MWPINY(7) = 1 - MWPINY(8) = -1 - MWPINY(9) = -1 - MWPINY(10) = 2 - MWPINY(11) = 1 - DO 140 I = 1, 11 - MWPN(I) = 5 - 140 CONTINUE - MWPN(5) = 3 - MWPN(10) = 3 - DO 160 I = 1, 5 - MWPX(I) = I - MWPY(I) = I - MWPTX(1,I) = I - MWPTY(1,I) = I - MWPTX(2,I) = I - MWPTY(2,I) = -I - MWPTX(3,I) = 6 - I - MWPTY(3,I) = I - 6 - MWPTX(4,I) = I - MWPTY(4,I) = -I - MWPTX(6,I) = 6 - I - MWPTY(6,I) = I - 6 - MWPTX(7,I) = -I - MWPTY(7,I) = I - MWPTX(8,I) = I - 6 - MWPTY(8,I) = 6 - I - MWPTX(9,I) = -I - MWPTY(9,I) = I - MWPTX(11,I) = I - 6 - MWPTY(11,I) = 6 - I - 160 CONTINUE - MWPTX(5,1) = 1 - MWPTX(5,2) = 3 - MWPTX(5,3) = 5 - MWPTX(5,4) = 4 - MWPTX(5,5) = 5 - MWPTY(5,1) = -1 - MWPTY(5,2) = 2 - MWPTY(5,3) = -2 - MWPTY(5,4) = 4 - MWPTY(5,5) = -3 - MWPTX(10,1) = -1 - MWPTX(10,2) = -3 - MWPTX(10,3) = -5 - MWPTX(10,4) = 4 - MWPTX(10,5) = 5 - MWPTY(10,1) = 1 - MWPTY(10,2) = 2 - MWPTY(10,3) = 2 - MWPTY(10,4) = 4 - MWPTY(10,5) = 3 - DO 200 I = 1, 11 - INCX = MWPINX(I) - INCY = MWPINY(I) - DO 180 K = 1, 5 - COPYX(K) = MWPX(K) - COPYY(K) = MWPY(K) - MWPSTX(K) = MWPTX(I,K) - MWPSTY(K) = MWPTY(I,K) - 180 CONTINUE - CALL SROTTEST(MWPN(I),COPYX,INCX,COPYY,INCY,MWPC(I),MWPS(I)) - CALL STEST(5,COPYX,MWPSTX,MWPSTX,SFAC) - CALL STEST(5,COPYY,MWPSTY,MWPSTY,SFAC) - 200 CONTINUE RETURN END SUBROUTINE STEST(LEN,SCOMP,STRUE,SSIZE,SFAC) @@ -726,3 +616,147 @@ + /1X) 99997 FORMAT (1X,I4,I3,3I5,2I36,I12) END + SUBROUTINE SROT(N,SX,INCX,SY,INCY,C,S) +* +* --Reference BLAS level1 routine (version 3.8.0) -- +* --Reference BLAS is a software package provided by Univ. of Tennessee, -- +* --Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- +* November 2017 +* +* .. Scalar Arguments .. + REAL C,S + INTEGER INCX,INCY,N +* .. +* .. Array Arguments .. + REAL SX(*),SY(*) +* .. +* .. Local Scalars .. + REAL STEMP + INTEGER I,IX,IY +* .. + IF (n.LE.0) RETURN + IF (incx.EQ.1 .AND. incy.EQ.1) THEN + DO i = 1,n + stemp = c*sx(i) + s*sy(i) + sy(i) = c*sy(i) - s*sx(i) + sx(i) = stemp + END DO + ELSE + ix = 1 + iy = 1 + IF (incx.LT.0) ix = (-n+1)*incx + 1 + IF (incy.LT.0) iy = (-n+1)*incy + 1 + DO i = 1,n + stemp = c*sx(ix) + s*sy(iy) + sy(iy) = c*sy(iy) - s*sx(ix) + sx(ix) = stemp + ix = ix + incx + iy = iy + incy + END DO + END IF + RETURN + END + SUBROUTINE srotm(N,SX,INCX,SY,INCY,SPARAM) +* +* --Reference BLAS level1 routine (version 3.8.0) -- +* --Reference BLAS is a software package provided by Univ. of Tennessee, -- +* --Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- +* November 2017 +* +* .. Scalar Arguments .. + INTEGER INCX,INCY,N +* .. +* .. Array Arguments .. + REAL SPARAM(5),SX(*),SY(*) +* .. +* +* ==================================================================== +* +* .. Local Scalars .. + REAL SFLAG,SH11,SH12,SH21,SH22,TWO,W,Z,ZERO + INTEGER I,KX,KY,NSTEPS +* .. +* .. Data statements .. + DATA zero,two/0.e0,2.e0/ +* .. +* + sflag = sparam(1) + IF (n.LE.0 .OR. (sflag+two.EQ.zero)) RETURN + IF (incx.EQ.incy.AND.incx.GT.0) THEN +* + nsteps = n*incx + IF (sflag.LT.zero) THEN + sh11 = sparam(2) + sh12 = sparam(4) + sh21 = sparam(3) + sh22 = sparam(5) + DO i = 1,nsteps,incx + w = sx(i) + z = sy(i) + sx(i) = w*sh11 + z*sh12 + sy(i) = w*sh21 + z*sh22 + END DO + ELSE IF (sflag.EQ.zero) THEN + sh12 = sparam(4) + sh21 = sparam(3) + DO i = 1,nsteps,incx + w = sx(i) + z = sy(i) + sx(i) = w + z*sh12 + sy(i) = w*sh21 + z + END DO + ELSE + sh11 = sparam(2) + sh22 = sparam(5) + DO i = 1,nsteps,incx + w = sx(i) + z = sy(i) + sx(i) = w*sh11 + z + sy(i) = -w + sh22*z + END DO + END IF + ELSE + kx = 1 + ky = 1 + IF (incx.LT.0) kx = 1 + (1-n)*incx + IF (incy.LT.0) ky = 1 + (1-n)*incy +* + IF (sflag.LT.zero) THEN + sh11 = sparam(2) + sh12 = sparam(4) + sh21 = sparam(3) + sh22 = sparam(5) + DO i = 1,n + w = sx(kx) + z = sy(ky) + sx(kx) = w*sh11 + z*sh12 + sy(ky) = w*sh21 + z*sh22 + kx = kx + incx + ky = ky + incy + END DO + ELSE IF (sflag.EQ.zero) THEN + sh12 = sparam(4) + sh21 = sparam(3) + DO i = 1,n + w = sx(kx) + z = sy(ky) + sx(kx) = w + z*sh12 + sy(ky) = w*sh21 + z + kx = kx + incx + ky = ky + incy + END DO + ELSE + sh11 = sparam(2) + sh22 = sparam(5) + DO i = 1,n + w = sx(kx) + z = sy(ky) + sx(kx) = w*sh11 + z + sy(ky) = -w + sh22*z + kx = kx + incx + ky = ky + incy + END DO + END IF + END IF + RETURN + END \ No newline at end of file diff --git a/ctest/c_zblat3.f b/ctest/c_zblat3.f index 5df834b2e..cc109d651 100644 --- a/ctest/c_zblat3.f +++ b/ctest/c_zblat3.f @@ -1504,6 +1504,8 @@ C $ ' .' ) NC = 0 RESET = .TRUE. ERRMAX = RZERO + RALS = RONE + RBETS = RONE * DO 100 IN = 1, NIDIM N = IDIM( IN ) diff --git a/ctest/din3 b/ctest/din3 index 23fedfe32..9919774ac 100644 --- a/ctest/din3 +++ b/ctest/din3 @@ -5,7 +5,7 @@ T LOGICAL FLAG, T TO STOP ON FAILURES. T LOGICAL FLAG, T TO TEST ERROR EXITS. 2 0 TO TEST COLUMN-MAJOR, 1 TO TEST ROW-MAJOR, 2 TO TEST BOTH 16.0 THRESHOLD VALUE OF TEST RATIO -6 NUMBER OF VALUES OF N +7 NUMBER OF VALUES OF N 1 2 3 5 7 9 35 VALUES OF N 3 NUMBER OF VALUES OF ALPHA 0.0 1.0 0.7 VALUES OF ALPHA diff --git a/ctest/sin3 b/ctest/sin3 index 644083f22..b74206b70 100644 --- a/ctest/sin3 +++ b/ctest/sin3 @@ -5,7 +5,7 @@ T LOGICAL FLAG, T TO STOP ON FAILURES. T LOGICAL FLAG, T TO TEST ERROR EXITS. 2 0 TO TEST COLUMN-MAJOR, 1 TO TEST ROW-MAJOR, 2 TO TEST BOTH 16.0 THRESHOLD VALUE OF TEST RATIO -6 NUMBER OF VALUES OF N +7 NUMBER OF VALUES OF N 0 1 2 3 5 9 35 VALUES OF N 3 NUMBER OF VALUES OF ALPHA 0.0 1.0 0.7 VALUES OF ALPHA diff --git a/driver/level2/CMakeLists.txt b/driver/level2/CMakeLists.txt index 8fceba905..61367e596 100644 --- a/driver/level2/CMakeLists.txt +++ b/driver/level2/CMakeLists.txt @@ -197,6 +197,19 @@ foreach (float_type ${FLOAT_TYPES}) endif () endforeach () +if ( BUILD_COMPLEX AND NOT BUILD_SINGLE) + if (USE_THREAD) + GenerateNamedObjects("gemv_thread.c" "" "gemv_thread_n" false "" "" false "SINGLE") + GenerateNamedObjects("gemv_thread.c" "TRANSA" "gemv_thread_t" false "" "" false "SINGLE") + endif () +endif () +if ( BUILD_COMPLEX16 AND NOT BUILD_DOUBLE) + if (USE_THREAD) + GenerateNamedObjects("gemv_thread.c" "" "gemv_thread_n" false "" "" false "DOUBLE") + GenerateNamedObjects("gemv_thread.c" "TRANSA" "gemv_thread_t" false "" "" false "DOUBLE") + endif () +endif () + if (USE_THREAD) GenerateCombinationObjects("${UL_SMP_SOURCES}" "LOWER" "U" "" 2) endif () diff --git a/driver/level2/Makefile b/driver/level2/Makefile index 79c4ca153..caecf4f97 100644 --- a/driver/level2/Makefile +++ b/driver/level2/Makefile @@ -413,23 +413,73 @@ XBLASOBJS += \ xtbmv_thread_RUU.$(SUFFIX) xtbmv_thread_RUN.$(SUFFIX) \ xtbmv_thread_RLU.$(SUFFIX) xtbmv_thread_RLN.$(SUFFIX) \ xtbmv_thread_CUU.$(SUFFIX) xtbmv_thread_CUN.$(SUFFIX) \ - xtbmv_thread_CLU.$(SUFFIX) xtbmv_thread_CLN.$(SUFFIX) \ + xtbmv_thread_CLU.$(SUFFIX) xtbmv_thread_CLN.$(SUFFIX) +ifeq ($(BUILD_BFLOAT16),1) +SBBLASOBJS += \ + sbgemv_thread_n$(TSUFFIX).$(SUFFIX) \ + sbgemv_thread_t$(TSUFFIX).$(SUFFIX) +endif + +endif + +ifneq ($(BUILD_SINGLE),1) + SBLASOBJS= +ifeq ($(BUILD_DOUBLE),1) +ifdef SMP +SBLASOBJS += \ + sgemv_thread_n.$(SUFFIX) sgemv_thread_t.$(SUFFIX) \ + strsv_NUU.$(SUFFIX) strsv_NUN.$(SUFFIX) strsv_NLU.$(SUFFIX) strsv_NLN.$(SUFFIX) \ + strsv_TUU.$(SUFFIX) strsv_TUN.$(SUFFIX) strsv_TLU.$(SUFFIX) strsv_TLN.$(SUFFIX) +endif +endif +ifeq ($(BUILD_COMPLEX),1) +ifdef SMP + SBLASOBJS = sgemv_thread_n.$(SUFFIX) sgemv_thread_t.$(SUFFIX) +endif +endif +endif +ifneq ($(BUILD_DOUBLE),1) + DBLASOBJS= +ifeq ($(BUILD_COMPLEX16),1) +ifdef SMP + DBLASOBJS = dgemv_thread_n.$(SUFFIX) dgemv_thread_t.$(SUFFIX) +endif +endif +endif +ifneq ($(BUILD_COMPLEX),1) + CBLASOBJS= +ifeq ($(BUILD_COMPLEX16),1) + CBLASOBJS= \ + ctrsv_NUU.$(SUFFIX) ctrsv_NUN.$(SUFFIX) ctrsv_NLU.$(SUFFIX) ctrsv_NLN.$(SUFFIX) \ + ctrsv_TUU.$(SUFFIX) ctrsv_TUN.$(SUFFIX) ctrsv_TLU.$(SUFFIX) ctrsv_TLN.$(SUFFIX) \ + ctrsv_RUU.$(SUFFIX) ctrsv_RUN.$(SUFFIX) ctrsv_RLU.$(SUFFIX) ctrsv_RLN.$(SUFFIX) \ + ctrsv_CUU.$(SUFFIX) ctrsv_CUN.$(SUFFIX) ctrsv_CLU.$(SUFFIX) ctrsv_CLN.$(SUFFIX) +endif +endif +ifneq ($(BUILD_COMPLEX16),1) + ZBLASOBJS= endif all :: +ifeq ($(BUILD_SINGLE),1) + sgbmv_n.$(SUFFIX) sgbmv_n.$(PSUFFIX) : gbmv_k.c $(CC) -c -UCOMPLEX -UDOUBLE -UTRANS $(CFLAGS) -o $(@F) $< sgbmv_t.$(SUFFIX) sgbmv_t.$(PSUFFIX) : gbmv_k.c $(CC) -c -UCOMPLEX -UDOUBLE -DTRANS $(CFLAGS) -o $(@F) $< +endif + +ifeq ($(BUILD_DOUBLE),1) dgbmv_n.$(SUFFIX) dgbmv_n.$(PSUFFIX) : gbmv_k.c $(CC) -c -UCOMPLEX -DDOUBLE -UTRANS $(CFLAGS) -o $(@F) $< dgbmv_t.$(SUFFIX) dgbmv_t.$(PSUFFIX) : gbmv_k.c $(CC) -c -UCOMPLEX -DDOUBLE -DTRANS $(CFLAGS) -o $(@F) $< +endif qgbmv_n.$(SUFFIX) qgbmv_n.$(PSUFFIX) : gbmv_k.c $(CC) -c -UCOMPLEX -DXDOUBLE -UTRANS $(CFLAGS) -o $(@F) $< @@ -437,6 +487,8 @@ qgbmv_n.$(SUFFIX) qgbmv_n.$(PSUFFIX) : gbmv_k.c qgbmv_t.$(SUFFIX) qgbmv_t.$(PSUFFIX) : gbmv_k.c $(CC) -c -UCOMPLEX -DXDOUBLE -DTRANS $(CFLAGS) -o $(@F) $< +ifeq ($(BUILD_COMPLEX),1) + cgbmv_n.$(SUFFIX) cgbmv_n.$(PSUFFIX) : zgbmv_k.c $(CC) -c -DCOMPLEX -UDOUBLE -UTRANS -UCONJ -UXCONJ $(CFLAGS) -o $(@F) $< @@ -460,6 +512,9 @@ cgbmv_s.$(SUFFIX) cgbmv_s.$(PSUFFIX) : zgbmv_k.c cgbmv_d.$(SUFFIX) cgbmv_d.$(PSUFFIX) : zgbmv_k.c $(CC) -c -DCOMPLEX -UDOUBLE -DTRANS -DCONJ -DXCONJ $(CFLAGS) -o $(@F) $< +endif + +ifeq ($(BUILD_COMPLEX16),1) zgbmv_n.$(SUFFIX) zgbmv_n.$(PSUFFIX) : zgbmv_k.c $(CC) -c -DCOMPLEX -DDOUBLE -UTRANS -UCONJ -UXCONJ $(CFLAGS) -o $(@F) $< @@ -484,6 +539,7 @@ zgbmv_s.$(SUFFIX) zgbmv_s.$(PSUFFIX) : zgbmv_k.c zgbmv_d.$(SUFFIX) zgbmv_d.$(PSUFFIX) : zgbmv_k.c $(CC) -c -DCOMPLEX -DDOUBLE -DTRANS -DCONJ -DXCONJ $(CFLAGS) -o $(@F) $< +endif xgbmv_n.$(SUFFIX) xgbmv_n.$(PSUFFIX) : zgbmv_k.c $(CC) -c -DCOMPLEX -DXDOUBLE -UTRANS -UCONJ -UXCONJ $(CFLAGS) -o $(@F) $< @@ -509,24 +565,34 @@ xgbmv_s.$(SUFFIX) xgbmv_s.$(PSUFFIX) : zgbmv_k.c xgbmv_d.$(SUFFIX) xgbmv_d.$(PSUFFIX) : zgbmv_k.c $(CC) -c -DCOMPLEX -DXDOUBLE -DTRANS -DCONJ -DXCONJ $(CFLAGS) -o $(@F) $< + +ifeq ($(BUILD_SINGLE),1) + sgbmv_thread_n.$(SUFFIX) sgbmv_thread_n.$(PSUFFIX) : gbmv_thread.c $(CC) -c -UCOMPLEX -UDOUBLE -UTRANSA $(CFLAGS) -o $(@F) $< sgbmv_thread_t.$(SUFFIX) sgbmv_thread_t.$(PSUFFIX) : gbmv_thread.c $(CC) -c -UCOMPLEX -UDOUBLE -DTRANSA $(CFLAGS) -o $(@F) $< +endif + + +ifeq ($(BUILD_DOUBLE),1) dgbmv_thread_n.$(SUFFIX) dgbmv_thread_n.$(PSUFFIX) : gbmv_thread.c $(CC) -c -UCOMPLEX -DDOUBLE -UTRANSA $(CFLAGS) -o $(@F) $< dgbmv_thread_t.$(SUFFIX) dgbmv_thread_t.$(PSUFFIX) : gbmv_thread.c $(CC) -c -UCOMPLEX -DDOUBLE -DTRANSA $(CFLAGS) -o $(@F) $< - +endif qgbmv_thread_n.$(SUFFIX) qgbmv_thread_n.$(PSUFFIX) : gbmv_thread.c $(CC) -c -UCOMPLEX -DXDOUBLE -UTRANSA $(CFLAGS) -o $(@F) $< qgbmv_thread_t.$(SUFFIX) qgbmv_thread_t.$(PSUFFIX) : gbmv_thread.c $(CC) -c -UCOMPLEX -DXDOUBLE -DTRANSA $(CFLAGS) -o $(@F) $< + +ifeq ($(BUILD_COMPLEX),1) + cgbmv_thread_n.$(SUFFIX) cgbmv_thread_n.$(PSUFFIX) : gbmv_thread.c $(CC) -c -DCOMPLEX -UDOUBLE -UTRANSA -UCONJ -UXCONJ $(CFLAGS) -o $(@F) $< @@ -550,6 +616,10 @@ cgbmv_thread_s.$(SUFFIX) cgbmv_thread_s.$(PSUFFIX) : gbmv_thread.c cgbmv_thread_d.$(SUFFIX) cgbmv_thread_d.$(PSUFFIX) : gbmv_thread.c $(CC) -c -DCOMPLEX -UDOUBLE -DTRANSA -DCONJ -DXCONJ $(CFLAGS) -o $(@F) $< +endif + + +ifeq ($(BUILD_COMPLEX16),1) zgbmv_thread_n.$(SUFFIX) zgbmv_thread_n.$(PSUFFIX) : gbmv_thread.c $(CC) -c -DCOMPLEX -DDOUBLE -UTRANSA -UCONJ -UXCONJ $(CFLAGS) -o $(@F) $< @@ -574,6 +644,7 @@ zgbmv_thread_s.$(SUFFIX) zgbmv_thread_s.$(PSUFFIX) : gbmv_thread.c zgbmv_thread_d.$(SUFFIX) zgbmv_thread_d.$(PSUFFIX) : gbmv_thread.c $(CC) -c -DCOMPLEX -DDOUBLE -DTRANSA -DCONJ -DXCONJ $(CFLAGS) -o $(@F) $< +endif xgbmv_thread_n.$(SUFFIX) xgbmv_thread_n.$(PSUFFIX) : gbmv_thread.c $(CC) -c -DCOMPLEX -DXDOUBLE -UTRANSA -UCONJ -UXCONJ $(CFLAGS) -o $(@F) $< @@ -599,24 +670,32 @@ xgbmv_thread_s.$(SUFFIX) xgbmv_thread_s.$(PSUFFIX) : gbmv_thread.c xgbmv_thread_d.$(SUFFIX) xgbmv_thread_d.$(PSUFFIX) : gbmv_thread.c $(CC) -c -DCOMPLEX -DXDOUBLE -DTRANSA -DCONJ -DXCONJ $(CFLAGS) -o $(@F) $< + +ifneq "$(or $(BUILD_SINGLE),$(BUILD_DOUBLE),$(BUILD_COMPLEX))" "" sgemv_thread_n.$(SUFFIX) sgemv_thread_n.$(PSUFFIX) : gemv_thread.c ../../common.h $(CC) -c $(CFLAGS) -UCOMPLEX -UDOUBLE -UTRANSA -UCONJ -UXCONJ $< -o $(@F) sgemv_thread_t.$(SUFFIX) sgemv_thread_t.$(PSUFFIX) : gemv_thread.c ../../common.h $(CC) -c $(CFLAGS) -UCOMPLEX -UDOUBLE -DTRANSA -UCONJ -UXCONJ $< -o $(@F) +endif + +ifneq "$(or $(BUILD_DOUBLE),$(BUILD_COMPLEX16))" "" dgemv_thread_n.$(SUFFIX) dgemv_thread_n.$(PSUFFIX) : gemv_thread.c ../../common.h $(CC) -c $(CFLAGS) -UCOMPLEX -DDOUBLE -UTRANSA -UCONJ -UXCONJ $< -o $(@F) dgemv_thread_t.$(SUFFIX) dgemv_thread_t.$(PSUFFIX) : gemv_thread.c ../../common.h $(CC) -c $(CFLAGS) -UCOMPLEX -DDOUBLE -DTRANSA -UCONJ -UXCONJ $< -o $(@F) - +endif qgemv_thread_n.$(SUFFIX) qgemv_thread_n.$(PSUFFIX) : gemv_thread.c ../../common.h $(CC) -c $(CFLAGS) -UCOMPLEX -DXDOUBLE -UTRANSA -UCONJ -UXCONJ $< -o $(@F) qgemv_thread_t.$(SUFFIX) qgemv_thread_t.$(PSUFFIX) : gemv_thread.c ../../common.h $(CC) -c $(CFLAGS) -UCOMPLEX -DXDOUBLE -DTRANSA -UCONJ -UXCONJ $< -o $(@F) + +ifeq ($(BUILD_COMPLEX),1) + cgemv_thread_n.$(SUFFIX) cgemv_thread_n.$(PSUFFIX) : gemv_thread.c ../../common.h $(CC) -c $(CFLAGS) -DCOMPLEX -UDOUBLE -UTRANSA -UCONJ -UXCONJ $< -o $(@F) @@ -640,6 +719,10 @@ cgemv_thread_s.$(SUFFIX) cgemv_thread_s.$(PSUFFIX) : gemv_thread.c ../../common. cgemv_thread_d.$(SUFFIX) cgemv_thread_d.$(PSUFFIX) : gemv_thread.c ../../common.h $(CC) -c $(CFLAGS) -DCOMPLEX -UDOUBLE -DTRANSA -DCONJ -DXCONJ $< -o $(@F) +endif + + +ifeq ($(BUILD_COMPLEX16),1) zgemv_thread_n.$(SUFFIX) zgemv_thread_n.$(PSUFFIX) : gemv_thread.c ../../common.h $(CC) -c $(CFLAGS) -DCOMPLEX -DDOUBLE -UTRANSA -UCONJ -UXCONJ $< -o $(@F) @@ -664,6 +747,7 @@ zgemv_thread_s.$(SUFFIX) zgemv_thread_s.$(PSUFFIX) : gemv_thread.c ../../common. zgemv_thread_d.$(SUFFIX) zgemv_thread_d.$(PSUFFIX) : gemv_thread.c ../../common.h $(CC) -c $(CFLAGS) -DCOMPLEX -DDOUBLE -DTRANSA -DCONJ -DXCONJ $< -o $(@F) +endif xgemv_thread_n.$(SUFFIX) xgemv_thread_n.$(PSUFFIX) : gemv_thread.c ../../common.h $(CC) -c $(CFLAGS) -DCOMPLEX -DXDOUBLE -UTRANSA -UCONJ -UXCONJ $< -o $(@F) @@ -3615,4 +3699,12 @@ xtrsv_CUU.$(SUFFIX) xtrsv_CUU.$(PSUFFIX) : ztrsv_L.c ../../param.h xtrsv_CUN.$(SUFFIX) xtrsv_CUN.$(PSUFFIX) : ztrsv_L.c ../../param.h $(CC) -c $(CFLAGS) -DXDOUBLE -DCOMPLEX -DTRANSA=4 -UUNIT $< -o $(@F) +ifeq ($(BUILD_BFLOAT16),1) +sbgemv_thread_n.$(SUFFIX) sbgemv_thread_n.$(PSUFFIX) : sbgemv_thread.c ../../common.h + $(CC) -c $(CFLAGS) -UCOMPLEX -UDOUBLE -UTRANSA -UCONJ -UXCONJ $< -o $(@F) +sbgemv_thread_t.$(SUFFIX) sbgemv_thread_t.$(PSUFFIX) : sbgemv_thread.c ../../common.h + $(CC) -c $(CFLAGS) -UCOMPLEX -UDOUBLE -DTRANSA -UCONJ -UXCONJ $< -o $(@F) +endif + + include ../../Makefile.tail diff --git a/driver/level2/gemv_thread.c b/driver/level2/gemv_thread.c index d57740314..0d8c6b005 100644 --- a/driver/level2/gemv_thread.c +++ b/driver/level2/gemv_thread.c @@ -72,9 +72,9 @@ defined __BORLANDC__ ) # define thread_local __declspec(thread) /* note that ICC (linux) and Clang are covered by __GNUC__ */ -# elif defined __GNUC__ || \ +# elif (defined __GNUC__ || \ defined __SUNPRO_C || \ - defined __xlC__ + defined __xlC__) && !defined(__APPLE__) # define thread_local __thread # else # define UNSAFE diff --git a/driver/level2/sbgemv_thread.c b/driver/level2/sbgemv_thread.c new file mode 100644 index 000000000..534c60f95 --- /dev/null +++ b/driver/level2/sbgemv_thread.c @@ -0,0 +1,149 @@ +/*********************************************************************/ +/* Copyright 2009, 2010 The University of Texas at Austin. */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* 1. Redistributions of source code must retain the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer. */ +/* */ +/* 2. Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials */ +/* provided with the distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ +/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ +/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ +/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ +/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ +/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ +/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* */ +/* The views and conclusions contained in the software and */ +/* documentation are those of the authors and should not be */ +/* interpreted as representing official policies, either expressed */ +/* or implied, of The University of Texas at Austin. */ +/*********************************************************************/ + +#include +#include +#include "common.h" + +#ifndef TRANSA +#define SBGEMV SBGEMV_N +#else +#define SBGEMV SBGEMV_T +#endif + +static int sbgemv_kernel(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *dummy1, FLOAT *dummy2, BLASLONG dummy3){ + + bfloat16 *a, *x; + float *y; + BLASLONG lda, incx, incy; + BLASLONG m_from, m_to, n_from, n_to; + + a = (bfloat16 *)args->a; + x = (bfloat16 *)args->b; + y = (float *)args->c; + + lda = args->lda; + incx = args->ldb; + incy = args->ldc; + +#ifndef TRANSA // N + m_from = *(range_m + 0); + m_to = *(range_m + 1); + n_from = 0; + n_to = args -> n; + a += m_from; + y += m_from * incy; +#else // T + m_from = 0; + m_to = args->m; + n_from = *(range_n + 0); + n_to = *(range_n + 1); + a += n_from * lda; + y += n_from * incy; +#endif + + SBGEMV(m_to - m_from, n_to - n_from, *((FLOAT *)(args->alpha)), a, lda, x, incx, *((FLOAT *)(args->beta)), y, incy); + + return 0; +} + +int CNAME(BLASLONG m, BLASLONG n, float alpha, bfloat16 *a, BLASLONG lda, bfloat16 *x, BLASLONG incx, float beta, float *y, BLASLONG incy, int threads) +{ + blas_arg_t args; + blas_queue_t queue[MAX_CPU_NUMBER]; + BLASLONG range[MAX_CPU_NUMBER + 1]; + +#ifndef TRANSA + BLASLONG width_for_split = m; +#else + BLASLONG width_for_split = n; +#endif + + BLASLONG BLOCK_WIDTH = width_for_split/threads; + + int mode = BLAS_BFLOAT16 | BLAS_REAL; + + args.m = m; + args.n = n; + args.a = (void *)a; + args.b = (void *)x; + args.c = (void *)y; + args.lda = lda; + args.ldb = incx; + args.ldc = incy; + args.alpha = (void *)α + args.beta = (void *)β + + range[0] = 0; + + int thread_idx; + + for (thread_idx=0; thread_idx GEMM3M_UNROLL_N) min_jj = GEMM3M_UNROLL_N; + if (min_jj > GEMM3M_UNROLL_N*3) min_jj = GEMM3M_UNROLL_N*3; START_RPCC(); @@ -398,7 +398,7 @@ int CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, for(jjs = js; jjs < js + min_j; jjs += min_jj){ min_jj = min_j + js - jjs; - if (min_jj > GEMM3M_UNROLL_N) min_jj = GEMM3M_UNROLL_N; + if (min_jj > GEMM3M_UNROLL_N*3) min_jj = GEMM3M_UNROLL_N*3; START_RPCC(); @@ -463,7 +463,7 @@ int CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, for(jjs = js; jjs < js + min_j; jjs += min_jj){ min_jj = min_j + js - jjs; - if (min_jj > GEMM3M_UNROLL_N) min_jj = GEMM3M_UNROLL_N; + if (min_jj > GEMM3M_UNROLL_N*3) min_jj = GEMM3M_UNROLL_N*3; START_RPCC(); diff --git a/driver/level3/level3.c b/driver/level3/level3.c index 1ab7a740e..9b44deb85 100644 --- a/driver/level3/level3.c +++ b/driver/level3/level3.c @@ -62,18 +62,18 @@ #ifndef ICOPY_OPERATION #if defined(NN) || defined(NT) || defined(NC) || defined(NR) || \ defined(RN) || defined(RT) || defined(RC) || defined(RR) -#define ICOPY_OPERATION(M, N, A, LDA, X, Y, BUFFER) GEMM_ITCOPY(M, N, (FLOAT *)(A) + ((Y) + (X) * (LDA)) * COMPSIZE, LDA, BUFFER); +#define ICOPY_OPERATION(M, N, A, LDA, X, Y, BUFFER) GEMM_ITCOPY(M, N, (IFLOAT *)(A) + ((Y) + (X) * (LDA)) * COMPSIZE, LDA, BUFFER); #else -#define ICOPY_OPERATION(M, N, A, LDA, X, Y, BUFFER) GEMM_INCOPY(M, N, (FLOAT *)(A) + ((X) + (Y) * (LDA)) * COMPSIZE, LDA, BUFFER); +#define ICOPY_OPERATION(M, N, A, LDA, X, Y, BUFFER) GEMM_INCOPY(M, N, (IFLOAT *)(A) + ((X) + (Y) * (LDA)) * COMPSIZE, LDA, BUFFER); #endif #endif #ifndef OCOPY_OPERATION #if defined(NN) || defined(TN) || defined(CN) || defined(RN) || \ defined(NR) || defined(TR) || defined(CR) || defined(RR) -#define OCOPY_OPERATION(M, N, A, LDA, X, Y, BUFFER) GEMM_ONCOPY(M, N, (FLOAT *)(A) + ((X) + (Y) * (LDA)) * COMPSIZE, LDA, BUFFER); +#define OCOPY_OPERATION(M, N, A, LDA, X, Y, BUFFER) GEMM_ONCOPY(M, N, (IFLOAT *)(A) + ((X) + (Y) * (LDA)) * COMPSIZE, LDA, BUFFER); #else -#define OCOPY_OPERATION(M, N, A, LDA, X, Y, BUFFER) GEMM_OTCOPY(M, N, (FLOAT *)(A) + ((Y) + (X) * (LDA)) * COMPSIZE, LDA, BUFFER); +#define OCOPY_OPERATION(M, N, A, LDA, X, Y, BUFFER) GEMM_OTCOPY(M, N, (IFLOAT *)(A) + ((Y) + (X) * (LDA)) * COMPSIZE, LDA, BUFFER); #endif #endif @@ -173,7 +173,8 @@ int CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, XFLOAT *sa, XFLOAT *sb, BLASLONG dummy){ BLASLONG k, lda, ldb, ldc; FLOAT *alpha, *beta; - FLOAT *a, *b, *c; + IFLOAT *a, *b; + FLOAT *c; BLASLONG m_from, m_to, n_from, n_to; BLASLONG ls, is, js; @@ -198,8 +199,8 @@ int CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, k = K; - a = (FLOAT *)A; - b = (FLOAT *)B; + a = (IFLOAT *)A; + b = (IFLOAT *)B; c = (FLOAT *)C; lda = LDA; @@ -332,13 +333,18 @@ int CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, #else for(jjs = js; jjs < js + min_j; jjs += min_jj){ min_jj = min_j + js - jjs; - +#if defined(SKYLAKEX) || defined(COOPERLAKE) + /* the current AVX512 s/d/c/z GEMM kernel requires n>=6*GEMM_UNROLL_N to achieve best performance */ + if (min_jj >= 6*GEMM_UNROLL_N) min_jj = 6*GEMM_UNROLL_N; +#else if (min_jj >= 3*GEMM_UNROLL_N) min_jj = 3*GEMM_UNROLL_N; else - if (min_jj >= 2*GEMM_UNROLL_N) min_jj = 2*GEMM_UNROLL_N; +/* + if (min_jj >= 2*GEMM_UNROLL_N) min_jj = 2*GEMM_UNROLL_N; else +*/ if (min_jj > GEMM_UNROLL_N) min_jj = GEMM_UNROLL_N; - +#endif START_RPCC(); diff --git a/driver/level3/level3_gemm3m_thread.c b/driver/level3/level3_gemm3m_thread.c index 4903aa5bd..39824fc5a 100644 --- a/driver/level3/level3_gemm3m_thread.c +++ b/driver/level3/level3_gemm3m_thread.c @@ -91,7 +91,7 @@ #endif typedef struct { -#if __STDC_VERSION__ >= 201112L +#ifdef HAVE_C11 _Atomic #else volatile @@ -104,7 +104,7 @@ typedef struct { #define BETA_OPERATION(M_FROM, M_TO, N_FROM, N_TO, BETA, C, LDC) \ GEMM_BETA((M_TO) - (M_FROM), (N_TO - N_FROM), 0, \ BETA[0], BETA[1], NULL, 0, NULL, 0, \ - (FLOAT *)(C) + (M_FROM) + (N_FROM) * (LDC) * COMPSIZE, LDC) + (FLOAT *)(C) + ((M_FROM) + (N_FROM) * (LDC)) * COMPSIZE, LDC) #endif #ifndef ICOPYB_OPERATION @@ -408,13 +408,13 @@ static int inner_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, /* Make sure if no one is using another buffer */ for (i = 0; i < args -> nthreads; i++) - while (job[mypos].working[i][CACHE_LINE_SIZE * bufferside]) {YIELDING;}; + while (job[mypos].working[i][CACHE_LINE_SIZE * bufferside]) {YIELDING;MB;}; STOP_RPCC(waiting1); for(jjs = xxx; jjs < MIN(n_to, xxx + div_n); jjs += min_jj){ min_jj = MIN(n_to, xxx + div_n) - jjs; - if (min_jj > GEMM3M_UNROLL_N) min_jj = GEMM3M_UNROLL_N; + if (min_jj > GEMM3M_UNROLL_N*3) min_jj = GEMM3M_UNROLL_N*3; START_RPCC(); @@ -441,7 +441,8 @@ static int inner_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, for (i = 0; i < args -> nthreads; i++) job[mypos].working[i][CACHE_LINE_SIZE * bufferside] = (BLASLONG)buffer[bufferside]; - } + WMB; + } current = mypos; @@ -458,7 +459,7 @@ static int inner_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, START_RPCC(); /* thread has to wait */ - while(job[current].working[mypos][CACHE_LINE_SIZE * bufferside] == 0) {YIELDING;}; + while(job[current].working[mypos][CACHE_LINE_SIZE * bufferside] == 0) {YIELDING;MB;}; STOP_RPCC(waiting2); @@ -477,6 +478,7 @@ static int inner_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, if (m_to - m_from == min_i) { job[current].working[mypos][CACHE_LINE_SIZE * bufferside] = 0; + WMB; } } } while (current != mypos); @@ -517,6 +519,7 @@ static int inner_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, if (is + min_i >= m_to) { /* Thread doesn't need this buffer any more */ job[current].working[mypos][CACHE_LINE_SIZE * bufferside] = 0; + WMB; } } @@ -541,13 +544,13 @@ static int inner_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, /* Make sure if no one is using another buffer */ for (i = 0; i < args -> nthreads; i++) - while (job[mypos].working[i][CACHE_LINE_SIZE * bufferside]) {YIELDING;}; + while (job[mypos].working[i][CACHE_LINE_SIZE * bufferside]) {YIELDING;MB;}; STOP_RPCC(waiting1); for(jjs = xxx; jjs < MIN(n_to, xxx + div_n); jjs += min_jj){ min_jj = MIN(n_to, xxx + div_n) - jjs; - if (min_jj > GEMM3M_UNROLL_N) min_jj = GEMM3M_UNROLL_N; + if (min_jj > GEMM3M_UNROLL_N*3) min_jj = GEMM3M_UNROLL_N*3; START_RPCC(); @@ -595,7 +598,7 @@ static int inner_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, START_RPCC(); /* thread has to wait */ - while(job[current].working[mypos][CACHE_LINE_SIZE * bufferside] == 0) {YIELDING;}; + while(job[current].working[mypos][CACHE_LINE_SIZE * bufferside] == 0) {YIELDING;MB;}; STOP_RPCC(waiting2); @@ -613,6 +616,7 @@ static int inner_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, if (m_to - m_from == min_i) { job[current].working[mypos][CACHE_LINE_SIZE * bufferside] = 0; + WMB; } } } while (current != mypos); @@ -677,13 +681,13 @@ static int inner_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, /* Make sure if no one is using another buffer */ for (i = 0; i < args -> nthreads; i++) - while (job[mypos].working[i][CACHE_LINE_SIZE * bufferside]) {YIELDING;}; + while (job[mypos].working[i][CACHE_LINE_SIZE * bufferside]) {YIELDING;MB;}; STOP_RPCC(waiting1); for(jjs = xxx; jjs < MIN(n_to, xxx + div_n); jjs += min_jj){ min_jj = MIN(n_to, xxx + div_n) - jjs; - if (min_jj > GEMM3M_UNROLL_N) min_jj = GEMM3M_UNROLL_N; + if (min_jj > GEMM3M_UNROLL_N*3) min_jj = GEMM3M_UNROLL_N*3; START_RPCC(); @@ -731,7 +735,7 @@ static int inner_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, START_RPCC(); /* thread has to wait */ - while(job[current].working[mypos][CACHE_LINE_SIZE * bufferside] == 0) {YIELDING;}; + while(job[current].working[mypos][CACHE_LINE_SIZE * bufferside] == 0) {YIELDING;MB;}; STOP_RPCC(waiting2); @@ -748,8 +752,9 @@ static int inner_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, } if (m_to - m_from == min_i) { - job[current].working[mypos][CACHE_LINE_SIZE * bufferside] = 0; - } + job[current].working[mypos][CACHE_LINE_SIZE * bufferside] &= 0; + WMB; +} } } while (current != mypos); @@ -787,7 +792,8 @@ static int inner_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, #endif if (is + min_i >= m_to) { /* Thread doesn't need this buffer any more */ - job[current].working[mypos][CACHE_LINE_SIZE * bufferside] = 0; + job[current].working[mypos][CACHE_LINE_SIZE * bufferside] &= 0; + WMB; } } @@ -804,7 +810,7 @@ static int inner_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, for (i = 0; i < args -> nthreads; i++) { for (xxx = 0; xxx < DIVIDE_RATE; xxx++) { - while (job[mypos].working[i][CACHE_LINE_SIZE * xxx] ) {YIELDING;}; + while (job[mypos].working[i][CACHE_LINE_SIZE * xxx] ) {YIELDING;MB;}; } } @@ -840,6 +846,15 @@ static int inner_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, static int gemm_driver(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, FLOAT *sb, BLASLONG mypos){ +#ifndef USE_OPENMP +#ifndef OS_WINDOWS +static pthread_mutex_t level3_lock = PTHREAD_MUTEX_INITIALIZER; +#else +CRITICAL_SECTION level3_lock; +InitializeCriticalSection((PCRITICAL_SECTION)&level3_lock); +#endif +#endif + blas_arg_t newarg; blas_queue_t queue[MAX_CPU_NUMBER]; @@ -869,6 +884,14 @@ static int gemm_driver(blas_arg_t *args, BLASLONG *range_m, BLASLONG mode = BLAS_SINGLE | BLAS_REAL | BLAS_NODE; #endif +#ifndef USE_OPENMP +#ifndef OS_WINDOWS +pthread_mutex_lock(&level3_lock); +#else +EnterCriticalSection((PCRITICAL_SECTION)&level3_lock); +#endif +#endif + newarg.m = args -> m; newarg.n = args -> n; newarg.k = args -> k; @@ -973,6 +996,14 @@ static int gemm_driver(blas_arg_t *args, BLASLONG *range_m, BLASLONG free(job); #endif +#ifndef USE_OPENMP +#ifndef OS_WINDOWS + pthread_mutex_unlock(&level3_lock); +#else + LeaveCriticalSection((PCRITICAL_SECTION)&level3_lock); +#endif +#endif + return 0; } diff --git a/driver/level3/level3_syrk_threaded.c b/driver/level3/level3_syrk_threaded.c index 574f825b0..d7dcd68a3 100644 --- a/driver/level3/level3_syrk_threaded.c +++ b/driver/level3/level3_syrk_threaded.c @@ -67,7 +67,7 @@ #endif typedef struct { -#if __STDC_VERSION__ >= 201112L +#ifdef HAVE_C11 _Atomic #else volatile @@ -526,7 +526,7 @@ int CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, FLO BLASLONG width, i, j, k; BLASLONG n, n_from, n_to; int mode, mask; - double dnum; + double dnum, di, dinum; if ((nthreads == 1) || (args -> n < nthreads * SWITCH_RATIO)) { SYRK_LOCAL(args, range_m, range_n, sa, sb, 0); @@ -601,9 +601,14 @@ int CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, FLO if (nthreads - num_cpu > 1) { - double di = (double)i; + di = (double)i; - width = (((BLASLONG)((sqrt(di * di + dnum) - di) + mask)/(mask+1)) * (mask+1) ); + dinum = di * di + dnum; + + if (dinum > 0) + width = (((BLASLONG)((sqrt(dinum) - di) + mask)/(mask+1)) * (mask+1) ); + else + width = (((BLASLONG)(- di + mask)/(mask+1)) * (mask+1) ); if (num_cpu == 0) width = n - (((n - width)/(mask+1)) * (mask+1) ); @@ -643,10 +648,15 @@ int CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, FLO if (nthreads - num_cpu > 1) { - double di = (double)i; + di = (double)i; - width = (((BLASLONG)((sqrt(di * di + dnum) - di) + mask)/(mask+1)) * (mask+1)); + dinum = di * di +dnum; + if (dinum > 0) + width = (((BLASLONG)((sqrt(di * di + dnum) - di) + mask)/(mask+1)) * (mask+1)); + else + width = (((BLASLONG)(- di + mask)/(mask+1)) * (mask+1)); + if ((width > n - i) || (width < mask)) width = n - i; } else { diff --git a/driver/level3/level3_thread.c b/driver/level3/level3_thread.c index cfbff7554..2b33c9589 100644 --- a/driver/level3/level3_thread.c +++ b/driver/level3/level3_thread.c @@ -117,18 +117,18 @@ typedef struct { #ifndef ICOPY_OPERATION #if defined(NN) || defined(NT) || defined(NC) || defined(NR) || \ defined(RN) || defined(RT) || defined(RC) || defined(RR) -#define ICOPY_OPERATION(M, N, A, LDA, X, Y, BUFFER) GEMM_ITCOPY(M, N, (FLOAT *)(A) + ((Y) + (X) * (LDA)) * COMPSIZE, LDA, BUFFER); +#define ICOPY_OPERATION(M, N, A, LDA, X, Y, BUFFER) GEMM_ITCOPY(M, N, (IFLOAT *)(A) + ((Y) + (X) * (LDA)) * COMPSIZE, LDA, BUFFER); #else -#define ICOPY_OPERATION(M, N, A, LDA, X, Y, BUFFER) GEMM_INCOPY(M, N, (FLOAT *)(A) + ((X) + (Y) * (LDA)) * COMPSIZE, LDA, BUFFER); +#define ICOPY_OPERATION(M, N, A, LDA, X, Y, BUFFER) GEMM_INCOPY(M, N, (IFLOAT *)(A) + ((X) + (Y) * (LDA)) * COMPSIZE, LDA, BUFFER); #endif #endif #ifndef OCOPY_OPERATION #if defined(NN) || defined(TN) || defined(CN) || defined(RN) || \ defined(NR) || defined(TR) || defined(CR) || defined(RR) -#define OCOPY_OPERATION(M, N, A, LDA, X, Y, BUFFER) GEMM_ONCOPY(M, N, (FLOAT *)(A) + ((X) + (Y) * (LDA)) * COMPSIZE, LDA, BUFFER); +#define OCOPY_OPERATION(M, N, A, LDA, X, Y, BUFFER) GEMM_ONCOPY(M, N, (IFLOAT *)(A) + ((X) + (Y) * (LDA)) * COMPSIZE, LDA, BUFFER); #else -#define OCOPY_OPERATION(M, N, A, LDA, X, Y, BUFFER) GEMM_OTCOPY(M, N, (FLOAT *)(A) + ((Y) + (X) * (LDA)) * COMPSIZE, LDA, BUFFER); +#define OCOPY_OPERATION(M, N, A, LDA, X, Y, BUFFER) GEMM_OTCOPY(M, N, (IFLOAT *)(A) + ((Y) + (X) * (LDA)) * COMPSIZE, LDA, BUFFER); #endif #endif @@ -219,15 +219,16 @@ typedef struct { #define STOP_RPCC(COUNTER) #endif -static int inner_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, FLOAT *sb, BLASLONG mypos){ +static int inner_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, IFLOAT *sa, IFLOAT *sb, BLASLONG mypos){ - FLOAT *buffer[DIVIDE_RATE]; + IFLOAT *buffer[DIVIDE_RATE]; BLASLONG k, lda, ldb, ldc; BLASLONG m_from, m_to, n_from, n_to; FLOAT *alpha, *beta; - FLOAT *a, *b, *c; + IFLOAT *a, *b; + FLOAT *c; job_t *job = (job_t *)args -> common; BLASLONG nthreads_m; @@ -255,8 +256,8 @@ static int inner_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, k = K; - a = (FLOAT *)A; - b = (FLOAT *)B; + a = (IFLOAT *)A; + b = (IFLOAT *)B; c = (FLOAT *)C; lda = LDA; @@ -351,8 +352,9 @@ static int inner_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, /* Make sure if no one is using workspace */ START_RPCC(); for (i = 0; i < args -> nthreads; i++) - while (job[mypos].working[i][CACHE_LINE_SIZE * bufferside]) {YIELDING;MB;}; + while (job[mypos].working[i][CACHE_LINE_SIZE * bufferside]) {YIELDING;}; STOP_RPCC(waiting1); + MB; #if defined(FUSED_GEMM) && !defined(TIMING) @@ -365,12 +367,18 @@ static int inner_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, /* Split local region of B into parts */ for(jjs = js; jjs < MIN(n_to, js + div_n); jjs += min_jj){ min_jj = MIN(n_to, js + div_n) - jjs; +#if defined(SKYLAKEX) || defined(COOPERLAKE) + /* the current AVX512 s/d/c/z GEMM kernel requires n>=6*GEMM_UNROLL_N to achieve the best performance */ + if (min_jj >= 6*GEMM_UNROLL_N) min_jj = 6*GEMM_UNROLL_N; +#else if (min_jj >= 3*GEMM_UNROLL_N) min_jj = 3*GEMM_UNROLL_N; else +/* if (min_jj >= 2*GEMM_UNROLL_N) min_jj = 2*GEMM_UNROLL_N; else +*/ if (min_jj > GEMM_UNROLL_N) min_jj = GEMM_UNROLL_N; - +#endif /* Copy part of local region of B into workspace */ START_RPCC(); OCOPY_OPERATION(min_l, min_jj, b, ldb, ls, jjs, @@ -391,10 +399,10 @@ static int inner_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, } #endif + WMB; /* Set flag so other threads can access local region of B */ for (i = mypos_n * nthreads_m; i < (mypos_n + 1) * nthreads_m; i++) job[mypos].working[i][CACHE_LINE_SIZE * bufferside] = (BLASLONG)buffer[bufferside]; - WMB; } /* Get regions of B from other threads and apply kernel */ @@ -413,13 +421,14 @@ static int inner_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, /* Wait until other region of B is initialized */ START_RPCC(); - while(job[current].working[mypos][CACHE_LINE_SIZE * bufferside] == 0) {YIELDING;MB;}; + while(job[current].working[mypos][CACHE_LINE_SIZE * bufferside] == 0) {YIELDING;}; STOP_RPCC(waiting2); + MB; /* Apply kernel with local region of A and part of other region of B */ START_RPCC(); KERNEL_OPERATION(min_i, MIN(range_n[current + 1] - js, div_n), min_l, alpha, - sa, (FLOAT *)job[current].working[mypos][CACHE_LINE_SIZE * bufferside], + sa, (IFLOAT *)job[current].working[mypos][CACHE_LINE_SIZE * bufferside], c, ldc, m_from, js); STOP_RPCC(kernel); @@ -430,8 +439,8 @@ static int inner_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, /* Clear synchronization flag if this thread is done with other region of B */ if (m_to - m_from == min_i) { - job[current].working[mypos][CACHE_LINE_SIZE * bufferside] &= 0; WMB; + job[current].working[mypos][CACHE_LINE_SIZE * bufferside] &= 0; } } } while (current != mypos); @@ -463,7 +472,7 @@ static int inner_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, /* Apply kernel with local region of A and part of region of B */ START_RPCC(); KERNEL_OPERATION(min_i, MIN(range_n[current + 1] - js, div_n), min_l, alpha, - sa, (FLOAT *)job[current].working[mypos][CACHE_LINE_SIZE * bufferside], + sa, (IFLOAT *)job[current].working[mypos][CACHE_LINE_SIZE * bufferside], c, ldc, is, js); STOP_RPCC(kernel); @@ -473,8 +482,8 @@ static int inner_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, /* Clear synchronization flag if this thread is done with region of B */ if (is + min_i >= m_to) { - job[current].working[mypos][CACHE_LINE_SIZE * bufferside] &= 0; WMB; + job[current].working[mypos][CACHE_LINE_SIZE * bufferside] &= 0; } } @@ -493,10 +502,11 @@ static int inner_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, START_RPCC(); for (i = 0; i < args -> nthreads; i++) { for (js = 0; js < DIVIDE_RATE; js++) { - while (job[mypos].working[i][CACHE_LINE_SIZE * js] ) {YIELDING;MB;}; + while (job[mypos].working[i][CACHE_LINE_SIZE * js] ) {YIELDING;}; } } STOP_RPCC(waiting3); + MB; #ifdef TIMING BLASLONG waiting = waiting1 + waiting2 + waiting3; @@ -525,7 +535,7 @@ static int round_up(int remainder, int width, int multiple) static int gemm_driver(blas_arg_t *args, BLASLONG *range_m, BLASLONG - *range_n, FLOAT *sa, FLOAT *sb, + *range_n, IFLOAT *sa, IFLOAT *sb, BLASLONG nthreads_m, BLASLONG nthreads_n) { #ifndef USE_OPENMP @@ -701,7 +711,7 @@ EnterCriticalSection((PCRITICAL_SECTION)&level3_lock); } } } - + WMB; /* Execute parallel computation */ exec_blas(nthreads, queue); } @@ -721,7 +731,7 @@ EnterCriticalSection((PCRITICAL_SECTION)&level3_lock); return 0; } -int CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, FLOAT *sb, BLASLONG mypos){ +int CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, IFLOAT *sa, IFLOAT *sb, BLASLONG mypos){ BLASLONG m = args -> m; BLASLONG n = args -> n; diff --git a/driver/level3/syrk_thread.c b/driver/level3/syrk_thread.c index b26d363c4..12808afd5 100644 --- a/driver/level3/syrk_thread.c +++ b/driver/level3/syrk_thread.c @@ -56,12 +56,16 @@ int CNAME(int mode, blas_arg_t *arg, BLASLONG *range_m, BLASLONG *range_n, int ( if (!(mode & BLAS_COMPLEX)) { switch (mode & BLAS_PREC) { +#if defined(BUILD_SINGLE) || defined(BUILD_COMPLEX) case BLAS_SINGLE: mask = SGEMM_UNROLL_MN - 1; break; +#endif +#if defined(BUILD_DOUBLE) || defined(BUILD_COMPLEX16) case BLAS_DOUBLE: mask = DGEMM_UNROLL_MN - 1; break; +#endif #ifdef EXPRECISION case BLAS_XDOUBLE: mask = MAX(QGEMM_UNROLL_M, QGEMM_UNROLL_N) - 1; @@ -70,12 +74,16 @@ int CNAME(int mode, blas_arg_t *arg, BLASLONG *range_m, BLASLONG *range_n, int ( } } else { switch (mode & BLAS_PREC) { +#ifdef BUILD_COMPLEX case BLAS_SINGLE: mask = CGEMM_UNROLL_MN - 1; break; +#endif +#ifdef BUILD_COMPLEX16 case BLAS_DOUBLE: mask = ZGEMM_UNROLL_MN - 1; break; +#endif #ifdef EXPRECISION case BLAS_XDOUBLE: mask = MAX(XGEMM_UNROLL_M, XGEMM_UNROLL_N) - 1; diff --git a/driver/level3/trmm_L.c b/driver/level3/trmm_L.c index 8a81d31a0..880de4df4 100644 --- a/driver/level3/trmm_L.c +++ b/driver/level3/trmm_L.c @@ -122,6 +122,9 @@ int CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, FLO if (min_l > GEMM_Q) min_l = GEMM_Q; min_i = min_l; if (min_i > GEMM_P) min_i = GEMM_P; + if( min_i > GEMM_UNROLL_M){ + min_i = (min_i / GEMM_UNROLL_M) * GEMM_UNROLL_M; + } START_RPCC(); @@ -135,10 +138,14 @@ int CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, FLO for(jjs = js; jjs < js + min_j; jjs += min_jj){ min_jj = min_j + js - jjs; - if (min_jj > GEMM_UNROLL_N*3) min_jj = GEMM_UNROLL_N*3; +#if defined(SKYLAKEX) || defined(COOPERLAKE) + /* the current AVX512 s/d/c/z GEMM kernel requires n>=6*GEMM_UNROLL_N to achieve the best performance */ + if (min_jj >= 6*GEMM_UNROLL_N) min_jj = 6*GEMM_UNROLL_N; +#else + if (min_jj >= GEMM_UNROLL_N*3) min_jj = GEMM_UNROLL_N*3; else if (min_jj > GEMM_UNROLL_N) min_jj = GEMM_UNROLL_N; - +#endif START_RPCC(); GEMM_ONCOPY(min_l, min_jj, b + (jjs * ldb) * COMPSIZE, ldb, sb + min_l * (jjs - js) * COMPSIZE); @@ -157,9 +164,12 @@ int CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, FLO } - for(is = min_i; is < min_l; is += GEMM_P){ + for(is = min_i; is < min_l; is += min_i){ min_i = min_l - is; if (min_i > GEMM_P) min_i = GEMM_P; + if( min_i > GEMM_UNROLL_M){ + min_i = (min_i / GEMM_UNROLL_M) * GEMM_UNROLL_M; + } START_RPCC(); @@ -188,6 +198,10 @@ int CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, FLO if (min_l > GEMM_Q) min_l = GEMM_Q; min_i = ls; if (min_i > GEMM_P) min_i = GEMM_P; + if( min_i > GEMM_UNROLL_M){ + min_i = (min_i / GEMM_UNROLL_M) * GEMM_UNROLL_M; + } + START_RPCC(); @@ -201,10 +215,14 @@ int CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, FLO for(jjs = js; jjs < js + min_j; jjs += min_jj){ min_jj = min_j + js - jjs; - if (min_jj > GEMM_UNROLL_N*3) min_jj = GEMM_UNROLL_N*3; +#if defined(SKYLAKEX) || defined(COOPERLAKE) + /* the current AVX512 s/d/c/z GEMM kernel requires n>=6*GEMM_UNROLL_N to achieve the best performance */ + if (min_jj >= 6*GEMM_UNROLL_N) min_jj = 6*GEMM_UNROLL_N; +#else + if (min_jj >= GEMM_UNROLL_N*3) min_jj = GEMM_UNROLL_N*3; else if (min_jj > GEMM_UNROLL_N) min_jj = GEMM_UNROLL_N; - +#endif START_RPCC(); GEMM_ONCOPY(min_l, min_jj, b + (ls + jjs * ldb) * COMPSIZE, ldb, sb + min_l * (jjs - js) * COMPSIZE); @@ -223,9 +241,12 @@ int CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, FLO STOP_RPCC(gemmcost); } - for(is = min_i; is < ls; is += GEMM_P){ + for(is = min_i; is < ls; is += min_i){ min_i = ls - is; if (min_i > GEMM_P) min_i = GEMM_P; + if( min_i > GEMM_UNROLL_M){ + min_i = (min_i / GEMM_UNROLL_M) * GEMM_UNROLL_M; + } START_RPCC(); @@ -248,9 +269,12 @@ int CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, FLO STOP_RPCC(gemmcost); } - for(is = ls; is < ls + min_l; is += GEMM_P){ + for(is = ls; is < ls + min_l; is += min_i){ min_i = ls + min_l - is; if (min_i > GEMM_P) min_i = GEMM_P; + if( min_i > GEMM_UNROLL_M){ + min_i = (min_i / GEMM_UNROLL_M) * GEMM_UNROLL_M; + } START_RPCC(); @@ -279,6 +303,10 @@ int CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, FLO if (min_l > GEMM_Q) min_l = GEMM_Q; min_i = min_l; if (min_i > GEMM_P) min_i = GEMM_P; + if (min_i > GEMM_UNROLL_M){ + min_i = (min_i / GEMM_UNROLL_M) * GEMM_UNROLL_M; + } + START_RPCC(); @@ -292,10 +320,14 @@ int CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, FLO for(jjs = js; jjs < js + min_j; jjs += min_jj){ min_jj = min_j + js - jjs; - if (min_jj > GEMM_UNROLL_N*3) min_jj = GEMM_UNROLL_N*3; +#if defined(SKYLAKEX) || defined(COOPERLAKE) + /* the current AVX512 s/d/c/z GEMM kernel requires n>=6*GEMM_UNROLL_N to achieve the best performance */ + if (min_jj >= 6*GEMM_UNROLL_N) min_jj = 6*GEMM_UNROLL_N; +#else + if (min_jj >= GEMM_UNROLL_N*3) min_jj = GEMM_UNROLL_N*3; else if (min_jj > GEMM_UNROLL_N) min_jj = GEMM_UNROLL_N; - +#endif START_RPCC(); GEMM_ONCOPY(min_l, min_jj, b + (m - min_l + jjs * ldb) * COMPSIZE, ldb, @@ -315,9 +347,14 @@ int CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, FLO STOP_RPCC(trmmcost); } - for(is = m - min_l + min_i; is < m; is += GEMM_P){ + for(is = m - min_l + min_i; is < m; is += min_i){ min_i = m - is; if (min_i > GEMM_P) min_i = GEMM_P; + if (min_i > GEMM_UNROLL_M){ + min_i = (min_i / GEMM_UNROLL_M) * GEMM_UNROLL_M; + } + + START_RPCC(); @@ -345,6 +382,10 @@ int CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, FLO if (min_l > GEMM_Q) min_l = GEMM_Q; min_i = min_l; if (min_i > GEMM_P) min_i = GEMM_P; + if (min_i > GEMM_UNROLL_M){ + min_i = (min_i / GEMM_UNROLL_M) * GEMM_UNROLL_M; + } + START_RPCC(); @@ -358,10 +399,14 @@ int CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, FLO for(jjs = js; jjs < js + min_j; jjs += min_jj){ min_jj = min_j + js - jjs; - if (min_jj > GEMM_UNROLL_N*3) min_jj = GEMM_UNROLL_N*3; +#if defined(SKYLAKEX) || defined(COOPERLAKE) + /* the current AVX512 s/d/c/z GEMM kernel requires n>=6*GEMM_UNROLL_N to achieve the best performance */ + if (min_jj >= 6*GEMM_UNROLL_N) min_jj = 6*GEMM_UNROLL_N; +#else + if (min_jj >= GEMM_UNROLL_N*3) min_jj = GEMM_UNROLL_N*3; else if (min_jj > GEMM_UNROLL_N) min_jj = GEMM_UNROLL_N; - +#endif START_RPCC(); GEMM_ONCOPY(min_l, min_jj, b + (ls - min_l + jjs * ldb) * COMPSIZE, ldb, @@ -381,9 +426,13 @@ int CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, FLO STOP_RPCC(trmmcost); } - for(is = ls - min_l + min_i; is < ls; is += GEMM_P){ + for(is = ls - min_l + min_i; is < ls; is += min_i){ min_i = ls - is; if (min_i > GEMM_P) min_i = GEMM_P; + if (min_i > GEMM_UNROLL_M){ + min_i = (min_i / GEMM_UNROLL_M) * GEMM_UNROLL_M; + } + START_RPCC(); @@ -407,9 +456,12 @@ int CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, FLO } - for(is = ls; is < m; is += GEMM_P){ + for(is = ls; is < m; is += min_i){ min_i = m - is; if (min_i > GEMM_P) min_i = GEMM_P; + if (min_i > GEMM_UNROLL_M){ + min_i = (min_i / GEMM_UNROLL_M) * GEMM_UNROLL_M; + } START_RPCC(); diff --git a/driver/level3/trmm_R.c b/driver/level3/trmm_R.c index 0882aa496..3be43edde 100644 --- a/driver/level3/trmm_R.c +++ b/driver/level3/trmm_R.c @@ -122,10 +122,14 @@ int CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, FLO for(jjs = 0; jjs < ls - js; jjs += min_jj){ min_jj = ls - js - jjs; - if (min_jj > GEMM_UNROLL_N*3) min_jj = GEMM_UNROLL_N*3; +#if defined(SKYLAKEX) || defined(COOPERLAKE) + /* the current AVX512 s/d/c/z GEMM kernel requires n>=6*GEMM_UNROLL_N to achieve the best performance */ + if (min_jj >= 6*GEMM_UNROLL_N) min_jj = 6*GEMM_UNROLL_N; +#else + if (min_jj >= GEMM_UNROLL_N*3) min_jj = GEMM_UNROLL_N*3; else if (min_jj > GEMM_UNROLL_N) min_jj = GEMM_UNROLL_N; - +#endif #ifndef TRANSA GEMM_ONCOPY(min_l, min_jj, a + (ls + (js + jjs) * lda) * COMPSIZE, lda, sb + min_l * jjs * COMPSIZE); #else @@ -142,10 +146,14 @@ int CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, FLO for(jjs = 0; jjs < min_l; jjs += min_jj){ min_jj = min_l - jjs; - if (min_jj > GEMM_UNROLL_N*3) min_jj = GEMM_UNROLL_N*3; +#if defined(SKYLAKEX) || defined(COOPERLAKE) + /* the current AVX512 s/d/c/z GEMM kernel requires n>=6*GEMM_UNROLL_N to achieve the best performance */ + if (min_jj >= 6*GEMM_UNROLL_N) min_jj = 6*GEMM_UNROLL_N; +#else + if (min_jj >= GEMM_UNROLL_N*3) min_jj = GEMM_UNROLL_N*3; else if (min_jj > GEMM_UNROLL_N) min_jj = GEMM_UNROLL_N; - +#endif #ifndef TRANSA TRMM_OLNCOPY(min_l, min_jj, a, lda, ls, ls + jjs, sb + min_l * (ls - js + jjs) * COMPSIZE); #else @@ -195,10 +203,14 @@ int CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, FLO for(jjs = js; jjs < js + min_j; jjs += min_jj){ min_jj = min_j + js - jjs; - if (min_jj > GEMM_UNROLL_N*3) min_jj = GEMM_UNROLL_N*3; +#if defined(SKYLAKEX) || defined(COOPERLAKE) + /* the current AVX512 s/d/c/z GEMM kernel requires n>=6*GEMM_UNROLL_N to achieve the best performance */ + if (min_jj >= 6*GEMM_UNROLL_N) min_jj = 6*GEMM_UNROLL_N; +#else + if (min_jj >= GEMM_UNROLL_N*3) min_jj = GEMM_UNROLL_N*3; else if (min_jj > GEMM_UNROLL_N) min_jj = GEMM_UNROLL_N; - +#endif #ifndef TRANSA GEMM_ONCOPY(min_l, min_jj, a + (ls + jjs * lda) * COMPSIZE, lda, sb + min_l * (jjs - js) * COMPSIZE); #else @@ -246,10 +258,14 @@ int CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, FLO for(jjs = 0; jjs < min_l; jjs += min_jj){ min_jj = min_l - jjs; - if (min_jj > GEMM_UNROLL_N*3) min_jj = GEMM_UNROLL_N*3; +#if defined(SKYLAKEX) || defined(COOPERLAKE) + /* the current AVX512 s/d/c/z GEMM kernel requires n>=6*GEMM_UNROLL_N to achieve the best performance */ + if (min_jj >= 6*GEMM_UNROLL_N) min_jj = 6*GEMM_UNROLL_N; +#else + if (min_jj >= GEMM_UNROLL_N*3) min_jj = GEMM_UNROLL_N*3; else if (min_jj > GEMM_UNROLL_N) min_jj = GEMM_UNROLL_N; - +#endif #ifndef TRANSA TRMM_OUNCOPY(min_l, min_jj, a, lda, ls, ls + jjs, sb + min_l * jjs * COMPSIZE); #else @@ -267,10 +283,14 @@ int CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, FLO for(jjs = 0; jjs < js - ls - min_l; jjs += min_jj){ min_jj = js - ls - min_l - jjs; - if (min_jj > GEMM_UNROLL_N*3) min_jj = GEMM_UNROLL_N*3; +#if defined(SKYLAKEX) || defined(COOPERLAKE) + /* the current AVX512 s/d/c/z GEMM kernel requires n>=6*GEMM_UNROLL_N to achieve the best performance */ + if (min_jj >= 6*GEMM_UNROLL_N) min_jj = 6*GEMM_UNROLL_N; +#else + if (min_jj >= GEMM_UNROLL_N*3) min_jj = GEMM_UNROLL_N*3; else if (min_jj > GEMM_UNROLL_N) min_jj = GEMM_UNROLL_N; - +#endif #ifndef TRANSA GEMM_ONCOPY(min_l, min_jj, a + (ls + (ls + min_l + jjs) * lda) * COMPSIZE, lda, sb + min_l * (min_l + jjs) * COMPSIZE); @@ -324,10 +344,14 @@ int CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, FLO for(jjs = js; jjs < js + min_j; jjs += min_jj){ min_jj = min_j + js - jjs; - if (min_jj > GEMM_UNROLL_N*3) min_jj = GEMM_UNROLL_N*3; +#if defined(SKYLAKEX) || defined(COOPERLAKE) + /* the current AVX512 s/d/c/z GEMM kernel requires n>=6*GEMM_UNROLL_N to achieve the best performance */ + if (min_jj >= 6*GEMM_UNROLL_N) min_jj = 6*GEMM_UNROLL_N; +#else + if (min_jj >= GEMM_UNROLL_N*3) min_jj = GEMM_UNROLL_N*3; else if (min_jj > GEMM_UNROLL_N) min_jj = GEMM_UNROLL_N; - +#endif #ifndef TRANSA GEMM_ONCOPY(min_l, min_jj, a + (ls + (jjs - min_j) * lda) * COMPSIZE, lda, sb + min_l * (jjs - js) * COMPSIZE); #else diff --git a/driver/level3/trsm_L.c b/driver/level3/trsm_L.c index d8130ee7e..d842efa93 100644 --- a/driver/level3/trsm_L.c +++ b/driver/level3/trsm_L.c @@ -131,7 +131,7 @@ int CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, FLO for(jjs = js; jjs < js + min_j; jjs += min_jj){ min_jj = min_j + js - jjs; - if (min_jj > GEMM_UNROLL_N*3) min_jj = GEMM_UNROLL_N*3; + if (min_jj >= GEMM_UNROLL_N*3) min_jj = GEMM_UNROLL_N*3; else if (min_jj > GEMM_UNROLL_N) min_jj = GEMM_UNROLL_N; @@ -197,7 +197,7 @@ int CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, FLO for(jjs = js; jjs < js + min_j; jjs += min_jj){ min_jj = min_j + js - jjs; - if (min_jj > GEMM_UNROLL_N*3) min_jj = GEMM_UNROLL_N*3; + if (min_jj >= GEMM_UNROLL_N*3) min_jj = GEMM_UNROLL_N*3; else if (min_jj > GEMM_UNROLL_N) min_jj = GEMM_UNROLL_N; diff --git a/driver/level3/trsm_R.c b/driver/level3/trsm_R.c index f6a57f93f..f76a8f7f3 100644 --- a/driver/level3/trsm_R.c +++ b/driver/level3/trsm_R.c @@ -126,7 +126,7 @@ int CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, FLO for(jjs = js; jjs < js + min_j; jjs += min_jj){ min_jj = min_j + js - jjs; - if (min_jj > GEMM_UNROLL_N*3) min_jj = GEMM_UNROLL_N*3; + if (min_jj >= GEMM_UNROLL_N*3) min_jj = GEMM_UNROLL_N*3; else if (min_jj > GEMM_UNROLL_N) min_jj = GEMM_UNROLL_N; @@ -182,7 +182,7 @@ int CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, FLO for(jjs = 0; jjs < min_j - min_l - ls + js; jjs += min_jj){ min_jj = min_j - min_l - ls + js - jjs; - if (min_jj > GEMM_UNROLL_N*3) min_jj = GEMM_UNROLL_N*3; + if (min_jj >= GEMM_UNROLL_N*3) min_jj = GEMM_UNROLL_N*3; else if (min_jj > GEMM_UNROLL_N) min_jj = GEMM_UNROLL_N; @@ -243,7 +243,7 @@ int CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, FLO for(jjs = js; jjs < js + min_j; jjs += min_jj){ min_jj = min_j + js - jjs; - if (min_jj > GEMM_UNROLL_N*3) min_jj = GEMM_UNROLL_N*3; + if (min_jj >= GEMM_UNROLL_N*3) min_jj = GEMM_UNROLL_N*3; else if (min_jj > GEMM_UNROLL_N) min_jj = GEMM_UNROLL_N; @@ -304,7 +304,7 @@ int CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, FLO for(jjs = 0; jjs < min_j - js + ls; jjs += min_jj){ min_jj = min_j - js + ls - jjs; - if (min_jj > GEMM_UNROLL_N*3) min_jj = GEMM_UNROLL_N*3; + if (min_jj >= GEMM_UNROLL_N*3) min_jj = GEMM_UNROLL_N*3; else if (min_jj > GEMM_UNROLL_N) min_jj = GEMM_UNROLL_N; diff --git a/driver/others/Makefile b/driver/others/Makefile index d4b5c26d5..4a421ef31 100644 --- a/driver/others/Makefile +++ b/driver/others/Makefile @@ -7,7 +7,7 @@ COMMONOBJS = memory.$(SUFFIX) xerbla.$(SUFFIX) c_abs.$(SUFFIX) z_abs.$(SUFFIX) ifdef SMP COMMONOBJS += blas_server.$(SUFFIX) divtable.$(SUFFIX) blasL1thread.$(SUFFIX) -ifndef NO_AFFINITY +ifneq ($(NO_AFFINITY), 1) COMMONOBJS += init.$(SUFFIX) endif endif @@ -21,18 +21,26 @@ else ifeq ($(ARCH),power) COMMONOBJS += dynamic_power.$(SUFFIX) else +ifeq ($(ARCH),zarch) +COMMONOBJS += dynamic_zarch.$(SUFFIX) +else +ifeq ($(ARCH),mips64) +COMMONOBJS += dynamic_mips64.$(SUFFIX) +else COMMONOBJS += dynamic.$(SUFFIX) endif endif +endif +endif else COMMONOBJS += parameter.$(SUFFIX) endif -ifdef EXPRECISION +ifeq ($(EXPRECISION), 1) COMMONOBJS += x_abs.$(SUFFIX) qlamch.$(SUFFIX) qlamc3.$(SUFFIX) endif -ifdef QUAD_PRECISION +ifeq ($(QUAD_PRECISION), 1) COMMONOBJS += addx.$(SUFFIX) mulx.$(SUFFIX) endif @@ -42,7 +50,7 @@ ifeq ($(C_COMPILER), PGI) endif endif -ifdef USE_CUDA +ifeq ($(USE_CUDA), 1) COMMONOBJS += cuda_init.$(SUFFIX) endif @@ -85,9 +93,17 @@ else ifeq ($(ARCH),power) HPLOBJS = memory.$(SUFFIX) xerbla.$(SUFFIX) dynamic_power.$(SUFFIX) else +ifeq ($(ARCH),zarch) +HPLOBJS = memory.$(SUFFIX) xerbla.$(SUFFIX) dynamic_zarch.$(SUFFIX) +else +ifeq ($(ARCH),mips64) +HPLOBJS = memory.$(SUFFIX) xerbla.$(SUFFIX) dynamic_mips64.$(SUFFIX) +else HPLOBJS = memory.$(SUFFIX) xerbla.$(SUFFIX) dynamic.$(SUFFIX) endif endif +endif +endif else HPLOBJS = memory.$(SUFFIX) xerbla.$(SUFFIX) parameter.$(SUFFIX) endif diff --git a/driver/others/blas_l1_thread.c b/driver/others/blas_l1_thread.c index e405c7465..06039c952 100644 --- a/driver/others/blas_l1_thread.c +++ b/driver/others/blas_l1_thread.c @@ -49,11 +49,38 @@ int blas_level1_thread(int mode, BLASLONG m, BLASLONG n, BLASLONG k, void *alpha blas_arg_t args [MAX_CPU_NUMBER]; BLASLONG i, width, astride, bstride; - int num_cpu, calc_type; - - calc_type = (mode & BLAS_PREC) + ((mode & BLAS_COMPLEX) != 0) + 2; + int num_cpu, calc_type_a, calc_type_b; + + switch (mode & BLAS_PREC) { + case BLAS_INT8 : + case BLAS_BFLOAT16: + case BLAS_SINGLE : + case BLAS_DOUBLE : + case BLAS_XDOUBLE : + calc_type_a = calc_type_b = (mode & BLAS_PREC) + ((mode & BLAS_COMPLEX) != 0); + break; + case BLAS_STOBF16 : + calc_type_a = 2 + ((mode & BLAS_COMPLEX) != 0); + calc_type_b = 1 + ((mode & BLAS_COMPLEX) != 0); + break; + case BLAS_DTOBF16 : + calc_type_a = 3 + ((mode & BLAS_COMPLEX) != 0); + calc_type_b = 1 + ((mode & BLAS_COMPLEX) != 0); + break; + case BLAS_BF16TOS : + calc_type_a = 1 + ((mode & BLAS_COMPLEX) != 0); + calc_type_b = 2 + ((mode & BLAS_COMPLEX) != 0); + break; + case BLAS_BF16TOD : + calc_type_a = 1 + ((mode & BLAS_COMPLEX) != 0); + calc_type_b = 3 + ((mode & BLAS_COMPLEX) != 0); + break; + default: + calc_type_a = calc_type_b = 0; + break; + } - mode |= BLAS_LEGACY; + if(!(mode & BLAS_PTHREAD)) mode |= BLAS_LEGACY; for (i = 0; i < nthreads; i++) blas_queue_init(&queue[i]); @@ -77,8 +104,8 @@ int blas_level1_thread(int mode, BLASLONG m, BLASLONG n, BLASLONG k, void *alpha bstride = width; } - astride <<= calc_type; - bstride <<= calc_type; + astride <<= calc_type_a; + bstride <<= calc_type_b; args[num_cpu].m = width; args[num_cpu].n = n; @@ -120,9 +147,36 @@ int blas_level1_thread_with_return_value(int mode, BLASLONG m, BLASLONG n, BLASL blas_arg_t args [MAX_CPU_NUMBER]; BLASLONG i, width, astride, bstride; - int num_cpu, calc_type; - - calc_type = (mode & BLAS_PREC) + ((mode & BLAS_COMPLEX) != 0) + 2; + int num_cpu, calc_type_a, calc_type_b; + + switch (mode & BLAS_PREC) { + case BLAS_INT8 : + case BLAS_BFLOAT16: + case BLAS_SINGLE : + case BLAS_DOUBLE : + case BLAS_XDOUBLE : + calc_type_a = calc_type_b = (mode & BLAS_PREC) + ((mode & BLAS_COMPLEX) != 0); + break; + case BLAS_STOBF16 : + calc_type_a = 2 + ((mode & BLAS_COMPLEX) != 0); + calc_type_b = 1 + ((mode & BLAS_COMPLEX) != 0); + break; + case BLAS_DTOBF16 : + calc_type_a = 3 + ((mode & BLAS_COMPLEX) != 0); + calc_type_b = 1 + ((mode & BLAS_COMPLEX) != 0); + break; + case BLAS_BF16TOS : + calc_type_a = 1 + ((mode & BLAS_COMPLEX) != 0); + calc_type_b = 2 + ((mode & BLAS_COMPLEX) != 0); + break; + case BLAS_BF16TOD : + calc_type_a = 1 + ((mode & BLAS_COMPLEX) != 0); + calc_type_b = 3 + ((mode & BLAS_COMPLEX) != 0); + break; + default: + calc_type_a = calc_type_b = 0; + break; + } mode |= BLAS_LEGACY; @@ -148,8 +202,8 @@ int blas_level1_thread_with_return_value(int mode, BLASLONG m, BLASLONG n, BLASL bstride = width; } - astride <<= calc_type; - bstride <<= calc_type; + astride <<= calc_type_a; + bstride <<= calc_type_b; args[num_cpu].m = width; args[num_cpu].n = n; diff --git a/driver/others/blas_server.c b/driver/others/blas_server.c index 6f4e20610..5e0943c2e 100644 --- a/driver/others/blas_server.c +++ b/driver/others/blas_server.c @@ -72,6 +72,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if defined(OS_LINUX) || defined(OS_NETBSD) || defined(OS_DARWIN) || defined(OS_ANDROID) || defined(OS_SUNOS) || defined(OS_FREEBSD) || defined(OS_OPENBSD) || defined(OS_DRAGONFLY) || defined(OS_HAIKU) #include +#include #include #include #include @@ -140,6 +141,16 @@ typedef struct { } thread_status_t; +#ifdef HAVE_C11 +#define atomic_load_queue(p) __atomic_load_n(p, __ATOMIC_RELAXED) +#define atomic_store_queue(p, v) __atomic_store_n(p, v, __ATOMIC_RELAXED) +#else +#define atomic_load_queue(p) (blas_queue_t*)(*(volatile blas_queue_t**)(p)) +#define atomic_store_queue(p, v) (*(volatile blas_queue_t* volatile*)(p) = (v)) +#endif + + + static thread_status_t thread_status[MAX_CPU_NUMBER] __attribute__((aligned(ATTRIBUTE_SIZE))); #ifndef THREAD_TIMEOUT @@ -181,7 +192,7 @@ static void legacy_exec(void *func, int mode, blas_arg_t *args, void *sb){ if (!(mode & BLAS_COMPLEX)){ #ifdef EXPRECISION - if (mode & BLAS_XDOUBLE){ + if ((mode & BLAS_PREC) == BLAS_XDOUBLE){ /* REAL / Extended Double */ void (*afunc)(BLASLONG, BLASLONG, BLASLONG, xdouble, xdouble *, BLASLONG, xdouble *, BLASLONG, @@ -194,7 +205,7 @@ static void legacy_exec(void *func, int mode, blas_arg_t *args, void *sb){ args -> c, args -> ldc, sb); } else #endif - if (mode & BLAS_DOUBLE){ + if ((mode & BLAS_PREC) == BLAS_DOUBLE){ /* REAL / Double */ void (*afunc)(BLASLONG, BLASLONG, BLASLONG, double, double *, BLASLONG, double *, BLASLONG, @@ -205,21 +216,58 @@ static void legacy_exec(void *func, int mode, blas_arg_t *args, void *sb){ args -> a, args -> lda, args -> b, args -> ldb, args -> c, args -> ldc, sb); - } else { - /* REAL / Single */ - void (*afunc)(BLASLONG, BLASLONG, BLASLONG, float, - float *, BLASLONG, float *, BLASLONG, - float *, BLASLONG, void *) = func; - - afunc(args -> m, args -> n, args -> k, - ((float *)args -> alpha)[0], - args -> a, args -> lda, - args -> b, args -> ldb, - args -> c, args -> ldc, sb); + } else if ((mode & BLAS_PREC) == BLAS_SINGLE){ + /* REAL / Single */ + void (*afunc)(BLASLONG, BLASLONG, BLASLONG, float, + float *, BLASLONG, float *, BLASLONG, + float *, BLASLONG, void *) = func; + + afunc(args -> m, args -> n, args -> k, + ((float *)args -> alpha)[0], + args -> a, args -> lda, + args -> b, args -> ldb, + args -> c, args -> ldc, sb); +#ifdef BUILD_BFLOAT16 + } else if ((mode & BLAS_PREC) == BLAS_BFLOAT16){ + /* REAL / BFLOAT16 */ + void (*afunc)(BLASLONG, BLASLONG, BLASLONG, bfloat16, + bfloat16 *, BLASLONG, bfloat16 *, BLASLONG, + bfloat16 *, BLASLONG, void *) = func; + + afunc(args -> m, args -> n, args -> k, + ((bfloat16 *)args -> alpha)[0], + args -> a, args -> lda, + args -> b, args -> ldb, + args -> c, args -> ldc, sb); + } else if ((mode & BLAS_PREC) == BLAS_STOBF16){ + /* REAL / BLAS_STOBF16 */ + void (*afunc)(BLASLONG, BLASLONG, BLASLONG, float, + float *, BLASLONG, bfloat16 *, BLASLONG, + float *, BLASLONG, void *) = func; + + afunc(args -> m, args -> n, args -> k, + ((float *)args -> alpha)[0], + args -> a, args -> lda, + args -> b, args -> ldb, + args -> c, args -> ldc, sb); + } else if ((mode & BLAS_PREC) == BLAS_DTOBF16){ + /* REAL / BLAS_DTOBF16 */ + void (*afunc)(BLASLONG, BLASLONG, BLASLONG, double, + double *, BLASLONG, bfloat16 *, BLASLONG, + double *, BLASLONG, void *) = func; + + afunc(args -> m, args -> n, args -> k, + ((double *)args -> alpha)[0], + args -> a, args -> lda, + args -> b, args -> ldb, + args -> c, args -> ldc, sb); +#endif + } else { + /* REAL / Other types in future */ } } else { #ifdef EXPRECISION - if (mode & BLAS_XDOUBLE){ + if ((mode & BLAS_PREC) == BLAS_XDOUBLE){ /* COMPLEX / Extended Double */ void (*afunc)(BLASLONG, BLASLONG, BLASLONG, xdouble, xdouble, xdouble *, BLASLONG, xdouble *, BLASLONG, @@ -233,7 +281,7 @@ static void legacy_exec(void *func, int mode, blas_arg_t *args, void *sb){ args -> c, args -> ldc, sb); } else #endif - if (mode & BLAS_DOUBLE){ + if ((mode & BLAS_PREC) == BLAS_DOUBLE) { /* COMPLEX / Double */ void (*afunc)(BLASLONG, BLASLONG, BLASLONG, double, double, double *, BLASLONG, double *, BLASLONG, @@ -245,7 +293,7 @@ static void legacy_exec(void *func, int mode, blas_arg_t *args, void *sb){ args -> a, args -> lda, args -> b, args -> ldb, args -> c, args -> ldc, sb); - } else { + } else if ((mode & BLAS_PREC) == BLAS_SINGLE) { /* COMPLEX / Single */ void (*afunc)(BLASLONG, BLASLONG, BLASLONG, float, float, float *, BLASLONG, float *, BLASLONG, @@ -257,11 +305,13 @@ static void legacy_exec(void *func, int mode, blas_arg_t *args, void *sb){ args -> a, args -> lda, args -> b, args -> ldb, args -> c, args -> ldc, sb); - } + } else { + /* COMPLEX / Other types in future */ + } } } -#if defined(OS_LINUX) && !defined(NO_AFFINITY) +#if defined(OS_LINUX) && !defined(NO_AFFINITY) int gotoblas_set_affinity(int); int gotoblas_set_affinity2(int); int get_node(void); @@ -269,6 +319,25 @@ int get_node(void); static int increased_threads = 0; +#ifdef OS_LINUX +extern int openblas_get_num_threads(void); + +int openblas_setaffinity(int thread_idx, size_t cpusetsize, cpu_set_t* cpu_set) { + const int active_threads = openblas_get_num_threads(); + + if (thread_idx < 0 || thread_idx >= active_threads) { + errno = EINVAL; + return -1; + } + + pthread_t thread = (thread_idx == active_threads - 1) + ? pthread_self() + : blas_threads[thread_idx]; + + return pthread_setaffinity_np(thread, cpusetsize, cpu_set); +} +#endif + static void* blas_thread_server(void *arg){ /* Thread identifier */ @@ -312,20 +381,19 @@ blas_queue_t *tscq; last_tick = (unsigned int)rpcc(); - pthread_mutex_lock (&thread_status[cpu].lock); - tscq=thread_status[cpu].queue; - pthread_mutex_unlock (&thread_status[cpu].lock); + tscq = atomic_load_queue(&thread_status[cpu].queue); while(!tscq) { YIELDING; if ((unsigned int)rpcc() - last_tick > thread_timeout) { - pthread_mutex_lock (&thread_status[cpu].lock); - if (!thread_status[cpu].queue) { + if (!atomic_load_queue(&thread_status[cpu].queue)) { + pthread_mutex_lock (&thread_status[cpu].lock); thread_status[cpu].status = THREAD_STATUS_SLEEP; - while (thread_status[cpu].status == THREAD_STATUS_SLEEP) { + while (thread_status[cpu].status == THREAD_STATUS_SLEEP && + !atomic_load_queue(&thread_status[cpu].queue)) { #ifdef MONITOR main_status[cpu] = MAIN_SLEEPING; @@ -333,19 +401,18 @@ blas_queue_t *tscq; pthread_cond_wait(&thread_status[cpu].wakeup, &thread_status[cpu].lock); } + pthread_mutex_unlock(&thread_status[cpu].lock); } - pthread_mutex_unlock(&thread_status[cpu].lock); - last_tick = (unsigned int)rpcc(); } - pthread_mutex_lock (&thread_status[cpu].lock); - tscq=thread_status[cpu].queue; - pthread_mutex_unlock (&thread_status[cpu].lock); + + tscq = atomic_load_queue(&thread_status[cpu].queue); } - queue = thread_status[cpu].queue; + queue = atomic_load_queue(&thread_status[cpu].queue); + MB; if ((long)queue == -1) break; @@ -360,9 +427,7 @@ blas_queue_t *tscq; if (queue) { int (*routine)(blas_arg_t *, void *, void *, void *, void *, BLASLONG) = queue -> routine; - pthread_mutex_lock (&thread_status[cpu].lock); - thread_status[cpu].queue = (blas_queue_t *)1; - pthread_mutex_unlock (&thread_status[cpu].lock); + atomic_store_queue(&thread_status[cpu].queue, (blas_queue_t *)1); sa = queue -> sa; sb = queue -> sb; @@ -388,33 +453,44 @@ blas_queue_t *tscq; if (sb == NULL) { if (!(queue -> mode & BLAS_COMPLEX)){ #ifdef EXPRECISION - if (queue -> mode & BLAS_XDOUBLE){ + if ((queue -> mode & BLAS_PREC) == BLAS_XDOUBLE){ sb = (void *)(((BLASLONG)sa + ((QGEMM_P * QGEMM_Q * sizeof(xdouble) + GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B); } else #endif - if (queue -> mode & BLAS_DOUBLE){ + if ((queue -> mode & BLAS_PREC) == BLAS_DOUBLE) { +#ifdef BUILD_DOUBLE sb = (void *)(((BLASLONG)sa + ((DGEMM_P * DGEMM_Q * sizeof(double) + GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B); - - } else { +#endif + } else if ((queue -> mode & BLAS_PREC) == BLAS_SINGLE) { +#ifdef BUILD_SINGLE sb = (void *)(((BLASLONG)sa + ((SGEMM_P * SGEMM_Q * sizeof(float) + GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B); - } +#endif + } else { + /* Other types in future */ + } } else { #ifdef EXPRECISION - if (queue -> mode & BLAS_XDOUBLE){ + if ((queue -> mode & BLAS_PREC) == BLAS_XDOUBLE){ sb = (void *)(((BLASLONG)sa + ((XGEMM_P * XGEMM_Q * 2 * sizeof(xdouble) + GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B); } else #endif - if (queue -> mode & BLAS_DOUBLE){ + if ((queue -> mode & BLAS_PREC) == BLAS_DOUBLE){ +#ifdef BUILD_COMPLEX16 sb = (void *)(((BLASLONG)sa + ((ZGEMM_P * ZGEMM_Q * 2 * sizeof(double) + GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B); - } else { +#endif + } else if ((queue -> mode & BLAS_PREC) == BLAS_SINGLE) { +#ifdef BUILD_COMPLEX sb = (void *)(((BLASLONG)sa + ((CGEMM_P * CGEMM_Q * 2 * sizeof(float) + GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B); - } +#endif + } else { + /* Other types in future */ + } } queue->sb=sb; } @@ -442,13 +518,9 @@ blas_queue_t *tscq; // arm: make sure all results are written out _before_ // thread is marked as done and other threads use them - WMB; + MB; + atomic_store_queue(&thread_status[cpu].queue, (blas_queue_t *)0); - pthread_mutex_lock (&thread_status[cpu].lock); - thread_status[cpu].queue = (blas_queue_t * volatile) ((long)thread_status[cpu].queue & 0); /* Need a trick */ - pthread_mutex_unlock (&thread_status[cpu].lock); - - WMB; } @@ -566,7 +638,7 @@ int blas_thread_init(void){ for(i = 0; i < blas_num_threads - 1; i++){ - thread_status[i].queue = (blas_queue_t *)NULL; + atomic_store_queue(&thread_status[i].queue, (blas_queue_t *)0); thread_status[i].status = THREAD_STATUS_WAKEUP; pthread_mutex_init(&thread_status[i].lock, NULL); @@ -582,7 +654,7 @@ int blas_thread_init(void){ if(ret!=0){ struct rlimit rlim; const char *msg = strerror(ret); - fprintf(STDERR, "OpenBLAS blas_thread_init: pthread_create failed for thread %ld of %ld: %s\n", i+1,blas_num_threads,msg); + fprintf(STDERR, "OpenBLAS blas_thread_init: pthread_create failed for thread %ld of %d: %s\n", i+1,blas_num_threads,msg); #ifdef RLIMIT_NPROC if(0 == getrlimit(RLIMIT_NPROC, &rlim)) { fprintf(STDERR, "OpenBLAS blas_thread_init: RLIMIT_NPROC " @@ -655,7 +727,8 @@ int exec_blas_async(BLASLONG pos, blas_queue_t *queue){ if (queue -> mode & BLAS_NODE) { do { - while((thread_status[i].node != node || thread_status[i].queue) && (i < blas_num_threads - 1)) i ++; + + while((thread_status[i].node != node || atomic_load_queue(&thread_status[i].queue)) && (i < blas_num_threads - 1)) i ++; if (i < blas_num_threads - 1) break; @@ -669,36 +742,26 @@ int exec_blas_async(BLASLONG pos, blas_queue_t *queue){ } while (1); } else { - pthread_mutex_lock (&thread_status[i].lock); - tsiq = thread_status[i].queue; - pthread_mutex_unlock (&thread_status[i].lock); + tsiq = atomic_load_queue(&thread_status[i].queue); while(tsiq) { i ++; if (i >= blas_num_threads - 1) i = 0; - pthread_mutex_lock (&thread_status[i].lock); - tsiq = thread_status[i].queue; - pthread_mutex_unlock (&thread_status[i].lock); + tsiq = atomic_load_queue(&thread_status[i].queue); } } #else - pthread_mutex_lock (&thread_status[i].lock); - tsiq=thread_status[i].queue ; - pthread_mutex_unlock (&thread_status[i].lock); + tsiq = atomic_load_queue(&thread_status[i].queue); while(tsiq) { i ++; if (i >= blas_num_threads - 1) i = 0; - pthread_mutex_lock (&thread_status[i].lock); - tsiq=thread_status[i].queue ; - pthread_mutex_unlock (&thread_status[i].lock); + tsiq = atomic_load_queue(&thread_status[i].queue); } #endif queue -> assigned = i; - WMB; - pthread_mutex_lock (&thread_status[i].lock); - thread_status[i].queue = queue; - pthread_mutex_unlock (&thread_status[i].lock); - WMB; + MB; + + atomic_store_queue(&thread_status[i].queue, queue); queue = queue -> next; pos ++; @@ -718,9 +781,7 @@ int exec_blas_async(BLASLONG pos, blas_queue_t *queue){ pos = current -> assigned; - pthread_mutex_lock (&thread_status[pos].lock); - tspq=thread_status[pos].queue; - pthread_mutex_unlock (&thread_status[pos].lock); + tspq = atomic_load_queue(&thread_status[pos].queue); if ((BLASULONG)tspq > 1) { pthread_mutex_lock (&thread_status[pos].lock); @@ -752,24 +813,20 @@ int exec_blas_async_wait(BLASLONG num, blas_queue_t *queue){ while ((num > 0) && queue) { - pthread_mutex_lock(&thread_status[queue->assigned].lock); - tsqq=thread_status[queue -> assigned].queue; - pthread_mutex_unlock(&thread_status[queue->assigned].lock); + tsqq = atomic_load_queue(&thread_status[queue->assigned].queue); while(tsqq) { YIELDING; - pthread_mutex_lock(&thread_status[queue->assigned].lock); - tsqq=thread_status[queue -> assigned].queue; - pthread_mutex_unlock(&thread_status[queue->assigned].lock); - - + tsqq = atomic_load_queue(&thread_status[queue->assigned].queue); }; queue = queue -> next; num --; } + MB; + #ifdef SMP_DEBUG fprintf(STDERR, "Done.\n\n"); #endif @@ -880,7 +937,7 @@ void goto_set_num_threads(int num_threads) { for(i = blas_num_threads - 1; i < num_threads - 1; i++){ - thread_status[i].queue = (blas_queue_t *)NULL; + atomic_store_queue(&thread_status[i].queue, (blas_queue_t *)0); thread_status[i].status = THREAD_STATUS_WAKEUP; pthread_mutex_init(&thread_status[i].lock, NULL); @@ -910,9 +967,11 @@ void goto_set_num_threads(int num_threads) { blas_cpu_number = num_threads; #if defined(ARCH_MIPS64) +#ifndef DYNAMIC_ARCH //set parameters for different number of threads. blas_set_parameter(); #endif +#endif } @@ -971,12 +1030,11 @@ int BLASFUNC(blas_thread_shutdown)(void){ for (i = 0; i < blas_num_threads - 1; i++) { - pthread_mutex_lock (&thread_status[i].lock); - thread_status[i].queue = (blas_queue_t *)-1; + pthread_mutex_lock (&thread_status[i].lock); + atomic_store_queue(&thread_status[i].queue, (blas_queue_t *)-1); thread_status[i].status = THREAD_STATUS_WAKEUP; - pthread_cond_signal (&thread_status[i].wakeup); pthread_mutex_unlock(&thread_status[i].lock); diff --git a/driver/others/blas_server_omp.c b/driver/others/blas_server_omp.c index 4255852c8..a576127aa 100644 --- a/driver/others/blas_server_omp.c +++ b/driver/others/blas_server_omp.c @@ -48,6 +48,21 @@ #else +#ifndef likely +#ifdef __GNUC__ +#define likely(x) __builtin_expect(!!(x), 1) +#else +#define likely(x) (x) +#endif +#endif +#ifndef unlikely +#ifdef __GNUC__ +#define unlikely(x) __builtin_expect(!!(x), 0) +#else +#define unlikely(x) (x) +#endif +#endif + #ifndef OMP_SCHED #define OMP_SCHED static #endif @@ -55,16 +70,34 @@ int blas_server_avail = 0; static void * blas_thread_buffer[MAX_PARALLEL_NUMBER][MAX_CPU_NUMBER]; -#if __STDC_VERSION__ >= 201112L +#ifdef HAVE_C11 static atomic_bool blas_buffer_inuse[MAX_PARALLEL_NUMBER]; #else static _Bool blas_buffer_inuse[MAX_PARALLEL_NUMBER]; #endif -void goto_set_num_threads(int num_threads) { +static void adjust_thread_buffers() { int i=0, j=0; + //adjust buffer for each thread + for(i=0; i < MAX_PARALLEL_NUMBER; i++) { + for(j=0; j < blas_cpu_number; j++){ + if(blas_thread_buffer[i][j] == NULL){ + blas_thread_buffer[i][j] = blas_memory_alloc(2); + } + } + for(; j < MAX_CPU_NUMBER; j++){ + if(blas_thread_buffer[i][j] != NULL){ + blas_memory_free(blas_thread_buffer[i][j]); + blas_thread_buffer[i][j] = NULL; + } + } + } +} + +void goto_set_num_threads(int num_threads) { + if (num_threads < 1) num_threads = blas_num_threads; if (num_threads > MAX_CPU_NUMBER) num_threads = MAX_CPU_NUMBER; @@ -77,20 +110,7 @@ void goto_set_num_threads(int num_threads) { omp_set_num_threads(blas_cpu_number); - //adjust buffer for each thread - for(i=0; i c, args -> ldc, sb); } else #endif - if (mode & BLAS_DOUBLE){ + if ((mode & BLAS_PREC) == BLAS_DOUBLE){ /* REAL / Double */ void (*afunc)(BLASLONG, BLASLONG, BLASLONG, double, double *, BLASLONG, double *, BLASLONG, @@ -166,7 +177,7 @@ static void legacy_exec(void *func, int mode, blas_arg_t *args, void *sb){ args -> a, args -> lda, args -> b, args -> ldb, args -> c, args -> ldc, sb); - } else { + } else if ((mode & BLAS_PREC) == BLAS_SINGLE){ /* REAL / Single */ void (*afunc)(BLASLONG, BLASLONG, BLASLONG, float, float *, BLASLONG, float *, BLASLONG, @@ -177,10 +188,47 @@ static void legacy_exec(void *func, int mode, blas_arg_t *args, void *sb){ args -> a, args -> lda, args -> b, args -> ldb, args -> c, args -> ldc, sb); +#ifdef BUILD_BFLOAT16 + } else if ((mode & BLAS_PREC) == BLAS_BFLOAT16){ + /* REAL / BFLOAT16 */ + void (*afunc)(BLASLONG, BLASLONG, BLASLONG, bfloat16, + bfloat16 *, BLASLONG, bfloat16 *, BLASLONG, + bfloat16 *, BLASLONG, void *) = func; + + afunc(args -> m, args -> n, args -> k, + ((bfloat16 *)args -> alpha)[0], + args -> a, args -> lda, + args -> b, args -> ldb, + args -> c, args -> ldc, sb); + } else if ((mode & BLAS_PREC) == BLAS_STOBF16){ + /* REAL / BLAS_STOBF16 */ + void (*afunc)(BLASLONG, BLASLONG, BLASLONG, float, + float *, BLASLONG, bfloat16 *, BLASLONG, + float *, BLASLONG, void *) = func; + + afunc(args -> m, args -> n, args -> k, + ((float *)args -> alpha)[0], + args -> a, args -> lda, + args -> b, args -> ldb, + args -> c, args -> ldc, sb); + } else if ((mode & BLAS_PREC) == BLAS_DTOBF16){ + /* REAL / BLAS_DTOBF16 */ + void (*afunc)(BLASLONG, BLASLONG, BLASLONG, double, + double *, BLASLONG, bfloat16 *, BLASLONG, + double *, BLASLONG, void *) = func; + + afunc(args -> m, args -> n, args -> k, + ((double *)args -> alpha)[0], + args -> a, args -> lda, + args -> b, args -> ldb, + args -> c, args -> ldc, sb); +#endif + } else { + /* REAL / Other types in future */ } } else { #ifdef EXPRECISION - if (mode & BLAS_XDOUBLE){ + if ((mode & BLAS_PREC) == BLAS_XDOUBLE){ /* COMPLEX / Extended Double */ void (*afunc)(BLASLONG, BLASLONG, BLASLONG, xdouble, xdouble, xdouble *, BLASLONG, xdouble *, BLASLONG, @@ -194,7 +242,7 @@ static void legacy_exec(void *func, int mode, blas_arg_t *args, void *sb){ args -> c, args -> ldc, sb); } else #endif - if (mode & BLAS_DOUBLE){ + if ((mode & BLAS_PREC) == BLAS_DOUBLE){ /* COMPLEX / Double */ void (*afunc)(BLASLONG, BLASLONG, BLASLONG, double, double, double *, BLASLONG, double *, BLASLONG, @@ -206,7 +254,7 @@ static void legacy_exec(void *func, int mode, blas_arg_t *args, void *sb){ args -> a, args -> lda, args -> b, args -> ldb, args -> c, args -> ldc, sb); - } else { + } else if ((mode & BLAS_PREC) == BLAS_SINGLE){ /* COMPLEX / Single */ void (*afunc)(BLASLONG, BLASLONG, BLASLONG, float, float, float *, BLASLONG, float *, BLASLONG, @@ -218,8 +266,10 @@ static void legacy_exec(void *func, int mode, blas_arg_t *args, void *sb){ args -> a, args -> lda, args -> b, args -> ldb, args -> c, args -> ldc, sb); - } - } + } else { + /* COMPLEX / Other types in future */ + } + } } static void exec_threads(blas_queue_t *queue, int buf_index){ @@ -255,32 +305,47 @@ static void exec_threads(blas_queue_t *queue, int buf_index){ if (sb == NULL) { if (!(queue -> mode & BLAS_COMPLEX)){ #ifdef EXPRECISION - if (queue -> mode & BLAS_XDOUBLE){ + if ((queue -> mode & BLAS_PREC) == BLAS_XDOUBLE){ sb = (void *)(((BLASLONG)sa + ((QGEMM_P * QGEMM_Q * sizeof(xdouble) + GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B); } else #endif - if (queue -> mode & BLAS_DOUBLE){ + if ((queue -> mode & BLAS_PREC) == BLAS_DOUBLE){ +#if defined ( BUILD_DOUBLE) || defined (BUILD_COMPLEX16) sb = (void *)(((BLASLONG)sa + ((DGEMM_P * DGEMM_Q * sizeof(double) + GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B); - - } else { +#endif + } else if ((queue -> mode & BLAS_PREC) == BLAS_SINGLE){ +#if defined (BUILD_SINGLE) || defined (BUILD_COMPLEX) sb = (void *)(((BLASLONG)sa + ((SGEMM_P * SGEMM_Q * sizeof(float) + GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B); +#endif + } else { + /* Other types in future */ } } else { #ifdef EXPRECISION - if (queue -> mode & BLAS_XDOUBLE){ + if ((queue -> mode & BLAS_PREC) == BLAS_XDOUBLE){ sb = (void *)(((BLASLONG)sa + ((XGEMM_P * XGEMM_Q * 2 * sizeof(xdouble) + GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B); } else #endif - if (queue -> mode & BLAS_DOUBLE){ + if ((queue -> mode & BLAS_PREC) == BLAS_DOUBLE){ +#ifdef BUILD_COMPLEX16 sb = (void *)(((BLASLONG)sa + ((ZGEMM_P * ZGEMM_Q * 2 * sizeof(double) + GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B); - } else { +#else +fprintf(stderr,"UNHANDLED COMPLEX16\n"); +#endif + } else if ((queue -> mode & BLAS_PREC) == BLAS_SINGLE) { +#ifdef BUILD_COMPLEX sb = (void *)(((BLASLONG)sa + ((CGEMM_P * CGEMM_Q * 2 * sizeof(float) + GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B); +#else +fprintf(stderr,"UNHANDLED COMPLEX\n"); +#endif + } else { + /* Other types in future */ } } queue->sb=sb; @@ -307,6 +372,9 @@ static void exec_threads(blas_queue_t *queue, int buf_index){ int exec_blas(BLASLONG num, blas_queue_t *queue){ + // Handle lazy re-init of the thread-pool after a POSIX fork + if (unlikely(blas_server_avail == 0)) blas_thread_init(); + BLASLONG i, buf_index; if ((num <= 0) || (queue == NULL)) return 0; @@ -320,7 +388,7 @@ int exec_blas(BLASLONG num, blas_queue_t *queue){ while(true) { for(i=0; i < MAX_PARALLEL_NUMBER; i++) { -#if __STDC_VERSION__ >= 201112L +#ifdef HAVE_C11 _Bool inuse = false; if(atomic_compare_exchange_weak(&blas_buffer_inuse[i], &inuse, true)) { #else @@ -335,7 +403,7 @@ int exec_blas(BLASLONG num, blas_queue_t *queue){ break; } -#pragma omp parallel for schedule(OMP_SCHED) +#pragma omp parallel for num_threads(num) schedule(OMP_SCHED) for (i = 0; i < num; i ++) { #ifndef USE_SIMPLE_THREADED_LEVEL3 @@ -345,7 +413,7 @@ int exec_blas(BLASLONG num, blas_queue_t *queue){ exec_threads(&queue[i], buf_index); } -#if __STDC_VERSION__ >= 201112L +#ifdef HAVE_C11 atomic_store(&blas_buffer_inuse[buf_index], false); #else blas_buffer_inuse[buf_index] = false; diff --git a/driver/others/blas_server_win32.c b/driver/others/blas_server_win32.c index bace54a23..42f289441 100644 --- a/driver/others/blas_server_win32.c +++ b/driver/others/blas_server_win32.c @@ -77,7 +77,7 @@ static void legacy_exec(void *func, int mode, blas_arg_t *args, void *sb){ if (!(mode & BLAS_COMPLEX)){ #ifdef EXPRECISION - if (mode & BLAS_XDOUBLE){ + if ((mode & BLAS_PREC) == BLAS_XDOUBLE){ /* REAL / Extended Double */ void (*afunc)(BLASLONG, BLASLONG, BLASLONG, xdouble, xdouble *, BLASLONG, xdouble *, BLASLONG, @@ -90,7 +90,7 @@ static void legacy_exec(void *func, int mode, blas_arg_t *args, void *sb){ args -> c, args -> ldc, sb); } else #endif - if (mode & BLAS_DOUBLE){ + if ((mode & BLAS_PREC) == BLAS_DOUBLE){ /* REAL / Double */ void (*afunc)(BLASLONG, BLASLONG, BLASLONG, double, double *, BLASLONG, double *, BLASLONG, @@ -101,7 +101,7 @@ static void legacy_exec(void *func, int mode, blas_arg_t *args, void *sb){ args -> a, args -> lda, args -> b, args -> ldb, args -> c, args -> ldc, sb); - } else { + } else if ((mode & BLAS_PREC) == BLAS_SINGLE){ /* REAL / Single */ void (*afunc)(BLASLONG, BLASLONG, BLASLONG, float, float *, BLASLONG, float *, BLASLONG, @@ -112,10 +112,47 @@ static void legacy_exec(void *func, int mode, blas_arg_t *args, void *sb){ args -> a, args -> lda, args -> b, args -> ldb, args -> c, args -> ldc, sb); +#ifdef BUILD_BFLOAT16 + } else if ((mode & BLAS_PREC) == BLAS_BFLOAT16){ + /* REAL / BFLOAT16 */ + void (*afunc)(BLASLONG, BLASLONG, BLASLONG, bfloat16, + bfloat16 *, BLASLONG, bfloat16 *, BLASLONG, + bfloat16 *, BLASLONG, void *) = func; + + afunc(args -> m, args -> n, args -> k, + ((bfloat16 *)args -> alpha)[0], + args -> a, args -> lda, + args -> b, args -> ldb, + args -> c, args -> ldc, sb); + } else if ((mode & BLAS_PREC) == BLAS_STOBF16){ + /* REAL / BLAS_STOBF16 */ + void (*afunc)(BLASLONG, BLASLONG, BLASLONG, float, + float *, BLASLONG, bfloat16 *, BLASLONG, + float *, BLASLONG, void *) = func; + + afunc(args -> m, args -> n, args -> k, + ((float *)args -> alpha)[0], + args -> a, args -> lda, + args -> b, args -> ldb, + args -> c, args -> ldc, sb); + } else if ((mode & BLAS_PREC) == BLAS_DTOBF16){ + /* REAL / BLAS_DTOBF16 */ + void (*afunc)(BLASLONG, BLASLONG, BLASLONG, double, + double *, BLASLONG, bfloat16 *, BLASLONG, + double *, BLASLONG, void *) = func; + + afunc(args -> m, args -> n, args -> k, + ((double *)args -> alpha)[0], + args -> a, args -> lda, + args -> b, args -> ldb, + args -> c, args -> ldc, sb); +#endif + } else { + /* REAL / Other types in future */ } } else { #ifdef EXPRECISION - if (mode & BLAS_XDOUBLE){ + if ((mode & BLAS_PREC) == BLAS_XDOUBLE){ /* COMPLEX / Extended Double */ void (*afunc)(BLASLONG, BLASLONG, BLASLONG, xdouble, xdouble, xdouble *, BLASLONG, xdouble *, BLASLONG, @@ -129,7 +166,7 @@ static void legacy_exec(void *func, int mode, blas_arg_t *args, void *sb){ args -> c, args -> ldc, sb); } else #endif - if (mode & BLAS_DOUBLE){ + if ((mode & BLAS_PREC) == BLAS_DOUBLE){ /* COMPLEX / Double */ void (*afunc)(BLASLONG, BLASLONG, BLASLONG, double, double, double *, BLASLONG, double *, BLASLONG, @@ -141,7 +178,7 @@ static void legacy_exec(void *func, int mode, blas_arg_t *args, void *sb){ args -> a, args -> lda, args -> b, args -> ldb, args -> c, args -> ldc, sb); - } else { + } else if ((mode & BLAS_PREC) == BLAS_SINGLE) { /* COMPLEX / Single */ void (*afunc)(BLASLONG, BLASLONG, BLASLONG, float, float, float *, BLASLONG, float *, BLASLONG, @@ -153,7 +190,9 @@ static void legacy_exec(void *func, int mode, blas_arg_t *args, void *sb){ args -> a, args -> lda, args -> b, args -> ldb, args -> c, args -> ldc, sb); - } + } else { + /* COMPLEX / Other types in future */ + } } } @@ -233,32 +272,36 @@ static DWORD WINAPI blas_thread_server(void *arg){ if (sb == NULL) { if (!(queue -> mode & BLAS_COMPLEX)){ #ifdef EXPRECISION - if (queue -> mode & BLAS_XDOUBLE){ + if ((queue -> mode & BLAS_PREC) == BLAS_XDOUBLE){ sb = (void *)(((BLASLONG)sa + ((XGEMM_P * XGEMM_Q * sizeof(xdouble) + GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B); } else #endif - if (queue -> mode & BLAS_DOUBLE){ + if ((queue -> mode & BLAS_PREC) == BLAS_DOUBLE){ sb = (void *)(((BLASLONG)sa + ((DGEMM_P * DGEMM_Q * sizeof(double) + GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B); - } else { + } else if ((queue -> mode & BLAS_PREC) == BLAS_SINGLE) { sb = (void *)(((BLASLONG)sa + ((SGEMM_P * SGEMM_Q * sizeof(float) + GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B); + } else { + /* Other types in future */ } } else { #ifdef EXPRECISION - if (queue -> mode & BLAS_XDOUBLE){ + if ((queue -> mode & BLAS_PREC) == BLAS_XDOUBLE){ sb = (void *)(((BLASLONG)sa + ((XGEMM_P * XGEMM_Q * 2 * sizeof(xdouble) + GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B); } else #endif - if (queue -> mode & BLAS_DOUBLE){ + if ((queue -> mode & BLAS_PREC) == BLAS_DOUBLE){ sb = (void *)(((BLASLONG)sa + ((ZGEMM_P * ZGEMM_Q * 2 * sizeof(double) + GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B); - } else { + } else if ((queue -> mode & BLAS_PREC) == BLAS_SINGLE) { sb = (void *)(((BLASLONG)sa + ((CGEMM_P * CGEMM_Q * 2 * sizeof(float) + GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B); + } else { + /* Other types in future */ } } queue->sb=sb; @@ -433,12 +476,15 @@ int exec_blas(BLASLONG num, blas_queue_t *queue){ routine = queue -> routine; - if (!(queue -> mode & BLAS_LEGACY)) { + if (queue -> mode & BLAS_LEGACY) { + legacy_exec(routine, queue -> mode, queue -> args, queue -> sb); + } else + if (queue -> mode & BLAS_PTHREAD) { + void (*pthreadcompat)(void *) = queue -> routine; + (pthreadcompat)(queue -> args); + } else (routine)(queue -> args, queue -> range_m, queue -> range_n, queue -> sa, queue -> sb, 0); - } else { - legacy_exec(routine, queue -> mode, queue -> args, queue -> sb); - } if ((num > 1) && queue -> next) exec_blas_async_wait(num - 1, queue -> next); @@ -462,11 +508,15 @@ int BLASFUNC(blas_thread_shutdown)(void){ for(i = 0; i < blas_num_threads - 1; i++){ // Could also just use WaitForMultipleObjects - WaitForSingleObject(blas_threads[i], 5); //INFINITE); + DWORD wait_thread_value = WaitForSingleObject(blas_threads[i], 50); + #ifndef OS_WINDOWSSTORE -// TerminateThread is only available with WINAPI_DESKTOP and WINAPI_SYSTEM not WINAPI_APP in UWP - TerminateThread(blas_threads[i],0); + // TerminateThread is only available with WINAPI_DESKTOP and WINAPI_SYSTEM not WINAPI_APP in UWP + if (WAIT_OBJECT_0 != wait_thread_value) { + TerminateThread(blas_threads[i],0); + } #endif + CloseHandle(blas_threads[i]); } diff --git a/driver/others/dynamic.c b/driver/others/dynamic.c index 045fc65b8..7845d6951 100644 --- a/driver/others/dynamic.c +++ b/driver/others/dynamic.c @@ -207,6 +207,19 @@ extern gotoblas_t gotoblas_SKYLAKEX; #else #define gotoblas_SKYLAKEX gotoblas_PRESCOTT #endif +#ifdef DYN_COOPERLAKE +extern gotoblas_t gotoblas_COOPERLAKE; +#elif defined(DYN_SKYLAKEX) +#define gotoblas_COOPERLAKE gotoblas_SKYLAKEX +#elif defined(DYN_HASWELL) +#define gotoblas_COOPERLAKE gotoblas_HASWELL +#elif defined(DYN_SANDYBRIDGE) +#define gotoblas_COOPERLAKE gotoblas_SANDYBRIDGE +#elif defined(DYN_NEHALEM) +#define gotoblas_COOPERLAKE gotoblas_NEHALEM +#else +#define gotoblas_COOPERLAKE gotoblas_PRESCOTT +#endif #else // not DYNAMIC_LIST @@ -247,14 +260,17 @@ extern gotoblas_t gotoblas_EXCAVATOR; #ifdef NO_AVX2 #define gotoblas_HASWELL gotoblas_SANDYBRIDGE #define gotoblas_SKYLAKEX gotoblas_SANDYBRIDGE +#define gotoblas_COOPERLAKE gotoblas_SANDYBRIDGE #define gotoblas_ZEN gotoblas_SANDYBRIDGE #else extern gotoblas_t gotoblas_HASWELL; extern gotoblas_t gotoblas_ZEN; #ifndef NO_AVX512 extern gotoblas_t gotoblas_SKYLAKEX; +extern gotoblas_t gotoblas_COOPERLAKE; #else #define gotoblas_SKYLAKEX gotoblas_HASWELL +#define gotoblas_COOPERLAKE gotoblas_HASWELL #endif #endif #else @@ -262,6 +278,7 @@ extern gotoblas_t gotoblas_SKYLAKEX; #define gotoblas_SANDYBRIDGE gotoblas_NEHALEM #define gotoblas_HASWELL gotoblas_NEHALEM #define gotoblas_SKYLAKEX gotoblas_NEHALEM +#define gotoblas_COOPERLAKE gotoblas_NEHALEM #define gotoblas_BULLDOZER gotoblas_BARCELONA #define gotoblas_PILEDRIVER gotoblas_BARCELONA #define gotoblas_STEAMROLLER gotoblas_BARCELONA @@ -313,8 +330,8 @@ int support_avx2(){ if (!support_avx()) return 0; cpuid(7, &eax, &ebx, &ecx, &edx); - if((ebx & (1<<7)) != 0) - ret=1; //OS supports AVX2 + if((ebx & (1<<5)) != 0) + ret=1; //AVX2 flag is set return ret; #else return 0; @@ -329,13 +346,30 @@ int support_avx512(){ if (!support_avx()) return 0; cpuid(7, &eax, &ebx, &ecx, &edx); - if((ebx & (1<<7)) != 1){ - ret=0; //OS does not even support AVX2 + if((ebx & (1<<5)) == 0){ + ret=0; //cpu does not have avx2 flag } - if((ebx & (1<<31)) != 0){ + if((ebx & (1<<31)) != 0){ //AVX512VL flag is set xgetbv(0, &eax, &edx); if((eax & 0xe0) == 0xe0) - ret=1; //OS supports AVX512VL + ret=1; //OS supports saving zmm register + } + return ret; +#else + return 0; +#endif +} + +int support_avx512_bf16(){ +#if !defined(NO_AVX) && !defined(NO_AVX512) + int eax, ebx, ecx, edx; + int ret=0; + + if (!support_avx512()) + return 0; + cpuid_count(7, 1, &eax, &ebx, &ecx, &edx); + if((eax & 32) == 32){ + ret=1; // CPUID.7.1:EAX[bit 5] indicates whether avx512_bf16 supported or not } return ret; #else @@ -524,7 +558,10 @@ static gotoblas_t *get_coretype(void){ return &gotoblas_NEHALEM; //OS doesn't support AVX. Use old kernels. } } - if (model == 5) { + if (model == 5) { + // Intel Cooperlake + if(support_avx512_bf16()) + return &gotoblas_COOPERLAKE; // Intel Skylake X if (support_avx512()) return &gotoblas_SKYLAKEX; @@ -585,9 +622,29 @@ static gotoblas_t *get_coretype(void){ } } return NULL; + case 7: + if (model == 10) // Goldmont Plus + return &gotoblas_NEHALEM; + if (model == 14) { + // Ice Lake + if (support_avx512()) + return &gotoblas_SKYLAKEX; + if(support_avx2()){ + openblas_warning(FALLBACK_VERBOSE, HASWELL_FALLBACK); + return &gotoblas_HASWELL; + } + if(support_avx()) { + openblas_warning(FALLBACK_VERBOSE, SANDYBRIDGE_FALLBACK); + return &gotoblas_SANDYBRIDGE; + } else { + openblas_warning(FALLBACK_VERBOSE, NEHALEM_FALLBACK); + return &gotoblas_NEHALEM; + } + } + return NULL; case 9: case 8: - if (model == 14 ) { // Kaby Lake + if (model == 14 ) { // Kaby Lake, Coffee Lake if(support_avx2()) return &gotoblas_HASWELL; if(support_avx()) { @@ -598,6 +655,31 @@ static gotoblas_t *get_coretype(void){ return &gotoblas_NEHALEM; //OS doesn't support AVX. Use old kernels. } } + case 10: + if (model == 5 || model == 6) { + if(support_avx2()) + return &gotoblas_HASWELL; + if(support_avx()) { + openblas_warning(FALLBACK_VERBOSE, SANDYBRIDGE_FALLBACK); + return &gotoblas_SANDYBRIDGE; + } else { + openblas_warning(FALLBACK_VERBOSE, NEHALEM_FALLBACK); + return &gotoblas_NEHALEM; //OS doesn't support AVX. Use old kernels. + } + } + if (model == 7) { + if (support_avx512()) + return &gotoblas_SKYLAKEX; + if(support_avx2()) + return &gotoblas_HASWELL; + if(support_avx()) { + openblas_warning(FALLBACK_VERBOSE, SANDYBRIDGE_FALLBACK); + return &gotoblas_SANDYBRIDGE; + } else { + openblas_warning(FALLBACK_VERBOSE, NEHALEM_FALLBACK); + return &gotoblas_NEHALEM; //OS doesn't support AVX. Use old kernels. + } + } return NULL; } case 0xf: @@ -612,7 +694,7 @@ static gotoblas_t *get_coretype(void){ cpuid(0x80000000, &eax, &ebx, &ecx, &edx); if ( (eax & 0xffff) >= 0x01) { cpuid(0x80000001, &eax, &ebx, &ecx, &edx); - if ((edx & (1 << 30)) == 0 || (edx & (1 << 31)) == 0) + if ((edx & (1 << 30)) == 0 || (edx & (1u << 31)) == 0) return NULL; } else @@ -624,7 +706,7 @@ static gotoblas_t *get_coretype(void){ if ((exfamily == 0) || (exfamily == 2)) { if (ecx & (1 << 0)) return &gotoblas_OPTERON_SSE3; else return &gotoblas_OPTERON; - } else if (exfamily == 5) { + } else if (exfamily == 5 || exfamily == 7) { return &gotoblas_BOBCAT; } else if (exfamily == 6) { if(model == 1){ @@ -678,7 +760,7 @@ static gotoblas_t *get_coretype(void){ } } } else if (exfamily == 8) { - if (model == 1 || model == 8) { + /* if (model == 1 || model == 8) */ { if(support_avx()) return &gotoblas_ZEN; else{ @@ -686,16 +768,24 @@ static gotoblas_t *get_coretype(void){ return &gotoblas_BARCELONA; //OS doesn't support AVX. Use old kernels. } } - } else if (exfamily == 9) { + } else if (exfamily == 9) { if(support_avx()) return &gotoblas_ZEN; else{ openblas_warning(FALLBACK_VERBOSE, BARCELONA_FALLBACK); return &gotoblas_BARCELONA; //OS doesn't support AVX. Use old kernels. - } + } + } else if (exfamily == 10) { + if(support_avx()) + return &gotoblas_ZEN; + else{ + openblas_warning(FALLBACK_VERBOSE, BARCELONA_FALLBACK); + return &gotoblas_BARCELONA; //OS doesn't support AVX. Use old kernels. + } }else { return &gotoblas_BARCELONA; } + } } @@ -734,7 +824,8 @@ static char *corename[] = { "Steamroller", "Excavator", "Zen", - "SkylakeX" + "SkylakeX", + "Cooperlake" }; char *gotoblas_corename(void) { @@ -744,18 +835,53 @@ char *gotoblas_corename(void) { if (gotoblas == &gotoblas_NORTHWOOD) return corename[ 3]; if (gotoblas == &gotoblas_PRESCOTT) return corename[ 4]; if (gotoblas == &gotoblas_BANIAS) return corename[ 5]; - if (gotoblas == &gotoblas_ATOM) return corename[ 6]; + if (gotoblas == &gotoblas_ATOM) +#ifdef DYNAMIC_OLDER + return corename[ 6]; +#else + return corename[10]; +#endif if (gotoblas == &gotoblas_CORE2) return corename[ 7]; - if (gotoblas == &gotoblas_PENRYN) return corename[ 8]; - if (gotoblas == &gotoblas_DUNNINGTON) return corename[ 9]; + if (gotoblas == &gotoblas_PENRYN) +#ifdef DYNAMIC_OLDER + return corename[ 8]; +#else + return corename[7]; +#endif + if (gotoblas == &gotoblas_DUNNINGTON) +#ifdef DYNAMIC_OLDER + return corename[ 9]; +#else + return corename[7]; +#endif if (gotoblas == &gotoblas_NEHALEM) return corename[10]; if (gotoblas == &gotoblas_ATHLON) return corename[11]; - if (gotoblas == &gotoblas_OPTERON_SSE3) return corename[12]; - if (gotoblas == &gotoblas_OPTERON) return corename[13]; + if (gotoblas == &gotoblas_OPTERON_SSE3) +#ifdef DYNAMIC_OLDER + return corename[12]; +#else + return corename[7]; +#endif + if (gotoblas == &gotoblas_OPTERON) +#ifdef DYNAMIC_OLDER + return corename[13]; +#else + return corename[7]; +#endif if (gotoblas == &gotoblas_BARCELONA) return corename[14]; - if (gotoblas == &gotoblas_NANO) return corename[15]; + if (gotoblas == &gotoblas_NANO) +#ifdef DYNAMIC_OLDER + return corename[15]; +#else + return corename[10]; +#endif if (gotoblas == &gotoblas_SANDYBRIDGE) return corename[16]; - if (gotoblas == &gotoblas_BOBCAT) return corename[17]; + if (gotoblas == &gotoblas_BOBCAT) +#ifdef DYNAMIC_OLDER + return corename[17]; +#else + return corename[7]; +#endif if (gotoblas == &gotoblas_BULLDOZER) return corename[18]; if (gotoblas == &gotoblas_PILEDRIVER) return corename[19]; if (gotoblas == &gotoblas_HASWELL) return corename[20]; @@ -763,10 +889,12 @@ char *gotoblas_corename(void) { if (gotoblas == &gotoblas_EXCAVATOR) return corename[22]; if (gotoblas == &gotoblas_ZEN) return corename[23]; if (gotoblas == &gotoblas_SKYLAKEX) return corename[24]; + if (gotoblas == &gotoblas_COOPERLAKE) return corename[25]; return corename[0]; } + static gotoblas_t *force_coretype(char *coretype){ int i ; @@ -792,6 +920,7 @@ static gotoblas_t *force_coretype(char *coretype){ switch (found) { + case 25: return (&gotoblas_COOPERLAKE); case 24: return (&gotoblas_SKYLAKEX); case 23: return (&gotoblas_ZEN); case 22: return (&gotoblas_EXCAVATOR); diff --git a/driver/others/dynamic_arm64.c b/driver/others/dynamic_arm64.c index b4ce6b67d..6c68ba98a 100644 --- a/driver/others/dynamic_arm64.c +++ b/driver/others/dynamic_arm64.c @@ -37,17 +37,85 @@ /*********************************************************************/ #include "common.h" +#if (defined OS_LINUX || defined OS_ANDROID) #include #include +#endif extern gotoblas_t gotoblas_ARMV8; +#ifdef DYNAMIC_LIST +#ifdef DYN_CORTEXA53 +extern gotoblas_t gotoblas_CORTEXA53; +#else +#define gotoblas_CORTEXA53 gotoblas_ARMV8 +#endif +#ifdef DYN_CORTEXA57 +extern gotoblas_t gotoblas_CORTEXA57; +#else +#define gotoblas_CORTEXA57 gotoblas_ARMV8 +#endif +#ifdef DYN_CORTEXA72 +extern gotoblas_t gotoblas_CORTEXA72; +#else +#define gotoblas_CORTEXA72 gotoblas_ARMV8 +#endif +#ifdef DYN_CORTEXA73 +extern gotoblas_t gotoblas_CORTEXA73; +#else +#define gotoblas_CORTEXA73 gotoblas_ARMV8 +#endif +#ifdef DYN_FALKOR +extern gotoblas_t gotoblas_FALKOR; +#else +#define gotoblas_FALKOR gotoblas_ARMV8 +#endif +#ifdef DYN_TSV110 +extern gotoblas_t gotoblas_TSV110; +#else +#define gotoblas_TSV110 gotoblas_ARMV8 +#endif +#ifdef DYN_THUNDERX +extern gotoblas_t gotoblas_THUNDERX; +#else +#define gotoblas_THUNDERX gotoblas_ARMV8 +#endif +#ifdef DYN_THUNDERX2T99 +extern gotoblas_t gotoblas_THUNDERX2T99; +#else +#define gotoblas_THUNDERX2T99 gotoblas_ARMV8 +#endif +#ifdef DYN_THUNDERX3T110 +extern gotoblas_t gotoblas_THUNDERX3T110; +#else +#define gotoblas_THUNDERX3T110 gotoblas_ARMV8 +#endif +#ifdef DYN_EMAG8180 +extern gotoblas_t gotoblas_EMAG8180; +#else +#define gotoblas_EMAG8180 gotoblas_ARMV8 +#endif +#ifdef DYN_NEOVERSEN1 +extern gotoblas_t gotoblas_NEOVERSEN1; +#else +#define gotoblas_NEOVERSEN1 gotoblas_ARMV8 +#endif +#else +extern gotoblas_t gotoblas_CORTEXA53; extern gotoblas_t gotoblas_CORTEXA57; +extern gotoblas_t gotoblas_CORTEXA72; +extern gotoblas_t gotoblas_CORTEXA73; +extern gotoblas_t gotoblas_FALKOR; extern gotoblas_t gotoblas_THUNDERX; extern gotoblas_t gotoblas_THUNDERX2T99; +extern gotoblas_t gotoblas_TSV110; +extern gotoblas_t gotoblas_EMAG8180; +extern gotoblas_t gotoblas_NEOVERSEN1; +extern gotoblas_t gotoblas_THUNDERX3T110; +#endif extern void openblas_warning(int verbose, const char * msg); -#define NUM_CORETYPES 4 +#define NUM_CORETYPES 12 /* * In case asm/hwcap.h is outdated on the build system, make sure @@ -58,22 +126,38 @@ extern void openblas_warning(int verbose, const char * msg); #endif #define get_cpu_ftr(id, var) ({ \ - asm("mrs %0, "#id : "=r" (var)); \ + __asm__ ("mrs %0, "#id : "=r" (var)); \ }) static char *corename[] = { "armv8", + "cortexa53", "cortexa57", + "cortexa72", + "cortexa73", + "falkor", "thunderx", "thunderx2t99", + "tsv110", + "emag8180", + "neoversen1", + "thunderx3t110", "unknown" }; char *gotoblas_corename(void) { if (gotoblas == &gotoblas_ARMV8) return corename[ 0]; - if (gotoblas == &gotoblas_CORTEXA57) return corename[ 1]; - if (gotoblas == &gotoblas_THUNDERX) return corename[ 2]; - if (gotoblas == &gotoblas_THUNDERX2T99) return corename[ 3]; + if (gotoblas == &gotoblas_CORTEXA53) return corename[ 1]; + if (gotoblas == &gotoblas_CORTEXA57) return corename[ 2]; + if (gotoblas == &gotoblas_CORTEXA72) return corename[ 3]; + if (gotoblas == &gotoblas_CORTEXA73) return corename[ 4]; + if (gotoblas == &gotoblas_FALKOR) return corename[ 5]; + if (gotoblas == &gotoblas_THUNDERX) return corename[ 6]; + if (gotoblas == &gotoblas_THUNDERX2T99) return corename[ 7]; + if (gotoblas == &gotoblas_TSV110) return corename[ 8]; + if (gotoblas == &gotoblas_EMAG8180) return corename[ 9]; + if (gotoblas == &gotoblas_NEOVERSEN1) return corename[10]; + if (gotoblas == &gotoblas_THUNDERX3T110) return corename[11]; return corename[NUM_CORETYPES]; } @@ -94,9 +178,17 @@ static gotoblas_t *force_coretype(char *coretype) { switch (found) { case 0: return (&gotoblas_ARMV8); - case 1: return (&gotoblas_CORTEXA57); - case 2: return (&gotoblas_THUNDERX); - case 3: return (&gotoblas_THUNDERX2T99); + case 1: return (&gotoblas_CORTEXA53); + case 2: return (&gotoblas_CORTEXA57); + case 3: return (&gotoblas_CORTEXA72); + case 4: return (&gotoblas_CORTEXA73); + case 5: return (&gotoblas_FALKOR); + case 6: return (&gotoblas_THUNDERX); + case 7: return (&gotoblas_THUNDERX2T99); + case 8: return (&gotoblas_TSV110); + case 9: return (&gotoblas_EMAG8180); + case 10: return (&gotoblas_NEOVERSEN1); + case 11: return (&gotoblas_THUNDERX3T110); } snprintf(message, 128, "Core not found: %s\n", coretype); openblas_warning(1, message); @@ -105,15 +197,30 @@ static gotoblas_t *force_coretype(char *coretype) { static gotoblas_t *get_coretype(void) { int implementer, variant, part, arch, revision, midr_el1; - + char coremsg[128]; + +#if (!defined OS_LINUX && !defined OS_ANDROID) + return NULL; +#else + if (!(getauxval(AT_HWCAP) & HWCAP_CPUID)) { - char coremsg[128]; +#ifdef __linux + FILE *infile; + char buffer[512], *p, *cpu_part = NULL, *cpu_implementer = NULL; + p = (char *) NULL ; + infile = fopen("/sys/devices/system/cpu/cpu0/regs/identification/midr_el1","r"); + if (!infile) return NULL; + fgets(buffer, sizeof(buffer), infile); + midr_el1=strtoul(buffer,NULL,16); + fclose(infile); +#else snprintf(coremsg, 128, "Kernel lacks cpuid feature support. Auto detection of core type failed !!!\n"); openblas_warning(1, coremsg); return NULL; +#endif + } else { + get_cpu_ftr(MIDR_EL1, midr_el1); } - - get_cpu_ftr(MIDR_EL1, midr_el1); /* * MIDR_EL1 * @@ -130,10 +237,16 @@ static gotoblas_t *get_coretype(void) { case 0x41: // ARM switch (part) { - case 0xd07: // Cortex A57 - case 0xd08: // Cortex A72 case 0xd03: // Cortex A53 + return &gotoblas_CORTEXA53; + case 0xd07: // Cortex A57 return &gotoblas_CORTEXA57; + case 0xd08: // Cortex A72 + return &gotoblas_CORTEXA72; + case 0xd09: // Cortex A73 + return &gotoblas_CORTEXA73; + case 0xd0c: // Neoverse N1 + return &gotoblas_NEOVERSEN1; } break; case 0x42: // Broadcom @@ -150,10 +263,37 @@ static gotoblas_t *get_coretype(void) { return &gotoblas_THUNDERX; case 0x0af: // ThunderX2 return &gotoblas_THUNDERX2T99; + case 0x0b8: // ThunderX3 + return &gotoblas_THUNDERX3T110; + } + break; + case 0x48: // HiSilicon + switch (part) + { + case 0xd01: // tsv110 + return &gotoblas_TSV110; + } + break; + case 0x50: // Ampere + switch (part) + { + case 0x000: // Skylark/EMAG8180 + return &gotoblas_EMAG8180; } break; + case 0x51: // Qualcomm + switch (part) + { + case 0xc00: // Falkor + return &gotoblas_FALKOR; + } + break; + default: + snprintf(coremsg, 128, "Unknown CPU model - implementer %x part %x\n",implementer,part); + openblas_warning(1, coremsg); } return NULL; +#endif } void gotoblas_dynamic_init(void) { diff --git a/driver/others/dynamic_mips64.c b/driver/others/dynamic_mips64.c new file mode 100644 index 000000000..9fd19d739 --- /dev/null +++ b/driver/others/dynamic_mips64.c @@ -0,0 +1,230 @@ +/***************************************************************************** +Copyright (c) 2020, The OpenBLAS Project +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + 3. Neither the name of the OpenBLAS project nor the names of + its contributors may be used to endorse or promote products + derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +**********************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include "common.h" + +extern gotoblas_t gotoblas_LOONGSON3R3; +extern gotoblas_t gotoblas_LOONGSON3R4; + +extern void openblas_warning(int verbose, const char * msg); + +#define NUM_CORETYPES 2 + +static char *corename[] = { + "loongson3r3", + "loongson3r4", + "UNKNOWN" +}; + +char *gotoblas_corename(void) { + if (gotoblas == &gotoblas_LOONGSON3R3) return corename[0]; + if (gotoblas == &gotoblas_LOONGSON3R4) return corename[1]; + return corename[NUM_CORETYPES]; +} + +static gotoblas_t *force_coretype(char *coretype) { + int i; + int found = -1; + char message[128]; + + for ( i=0 ; i < NUM_CORETYPES; i++) + { + if (!strncasecmp(coretype, corename[i], 20)) + { + found = i; + break; + } + } + + switch (found) + { + case 0: return (&gotoblas_LOONGSON3R3); + case 1: return (&gotoblas_LOONGSON3R4); + } + snprintf(message, 128, "Core not found: %s\n", coretype); + openblas_warning(1, message); + return NULL; +} + +#define MMI_MASK 0x00000010 +#define MSA_MASK 0x00000020 + +int fd[2]; +int support_cpucfg; + +static void handler(int signum) +{ + close(fd[1]); + exit(1); +} + +/* Brief : Function to check if cpucfg supported on loongson + * Return: 1 supported + * 0 not supported + */ +static int cpucfg_test(void) { + pid_t pid; + int status = 0; + + support_cpucfg = 0; + pipe(fd); + pid = fork(); + if (pid == 0) { /* Subprocess */ + struct sigaction act; + close(fd[0]); + /* Set signal action for SIGILL. */ + act.sa_handler = handler; + sigaction(SIGILL,&act,NULL); + + /* Execute cpucfg in subprocess. */ + __asm__ volatile( + ".insn \n\t" + ".word (0xc8080118) \n\t" + ::: + ); + support_cpucfg = 1; + write(fd[1],&support_cpucfg,sizeof(support_cpucfg)); + close(fd[1]); + exit(0); + } else if (pid > 0){ /* Parent process*/ + close(fd[1]); + if ((waitpid(pid,&status,0) <= 0) || + (read(fd[0],&support_cpucfg,sizeof(support_cpucfg)) <= 0)) + support_cpucfg = 0; + close(fd[0]); + } else { + support_cpucfg = 0; + } + + return support_cpucfg; +} + +static gotoblas_t *get_coretype_from_cpucfg(void) { + int flag = 0; + __asm__ volatile( + ".insn \n\t" + "dli $8, 0x01 \n\t" + ".word (0xc9084918) \n\t" + "usw $9, 0x00(%0) \n\t" + : + : "r"(&flag) + : "memory" + ); + if (flag & MSA_MASK) + return (&gotoblas_LOONGSON3R4); + if (flag & MMI_MASK) + return (&gotoblas_LOONGSON3R3); + return NULL; +} + +static gotoblas_t *get_coretype_from_cpuinfo(void) { +#ifdef linux + FILE *infile; + char buffer[512], *p; + + p = (char *)NULL; + //Check model name for Loongson3 + infile = fopen("/proc/cpuinfo", "r"); + while (fgets(buffer, sizeof(buffer), infile)){ + if (!strncmp("model name", buffer, 10)){ + p = strchr(buffer, ':') + 2; + break; + } + } + fclose(infile); + if(p != NULL){ + if (strstr(p, "Loongson-3A3000") || strstr(p, "Loongson-3B3000")) + return (&gotoblas_LOONGSON3R3); + else if(strstr(p, "Loongson-3A4000") || strstr(p, "Loongson-3B4000")) + return (&gotoblas_LOONGSON3R4); + else + return NULL; + } +#endif + return NULL; +} + +static gotoblas_t *get_coretype(void) { + int ret = 0; + + ret = cpucfg_test(); + if (ret == 1) + return get_coretype_from_cpucfg(); + else + return get_coretype_from_cpuinfo(); +} + +void gotoblas_dynamic_init(void) { + char coremsg[128]; + char coren[22]; + char *p; + + if (gotoblas) return; + + p = getenv("OPENBLAS_CORETYPE"); + if ( p ) + { + gotoblas = force_coretype(p); + } + else + { + gotoblas = get_coretype(); + } + + if (gotoblas == NULL) + { + snprintf(coremsg, 128, "Falling back to loongson3r3 core\n"); + openblas_warning(1, coremsg); + gotoblas = &gotoblas_LOONGSON3R3; + } + + if (gotoblas && gotoblas->init) { + strncpy(coren, gotoblas_corename(), 20); + sprintf(coremsg, "Core: %s\n", coren); + openblas_warning(2, coremsg); + gotoblas -> init(); + } else { + openblas_warning(0, "OpenBLAS : Architecture Initialization failed. No initialization function found.\n"); + exit(1); + } + +} + +void gotoblas_dynamic_quit(void) { + gotoblas = NULL; +} diff --git a/driver/others/dynamic_power.c b/driver/others/dynamic_power.c index 0c4a87a5e..b8e5840a3 100644 --- a/driver/others/dynamic_power.c +++ b/driver/others/dynamic_power.c @@ -3,7 +3,16 @@ extern gotoblas_t gotoblas_POWER6; extern gotoblas_t gotoblas_POWER8; +#if (!defined __GNUC__) || ( __GNUC__ >= 6) extern gotoblas_t gotoblas_POWER9; +#endif +//#if (!defined __GNUC__) || ( __GNUC__ >= 11) \ +// || (__GNUC__ == 10 && __GNUC_MINOR__ >= 2) +//#define HAVE_P10_SUPPORT 1 +//#endif +#ifdef HAVE_P10_SUPPORT +extern gotoblas_t gotoblas_POWER10; +#endif extern void openblas_warning(int verbose, const char *msg); @@ -11,26 +20,192 @@ static char *corename[] = { "unknown", "POWER6", "POWER8", - "POWER9" + "POWER9", + "POWER10" }; #define NUM_CORETYPES 4 char *gotoblas_corename(void) { +#ifndef C_PGI if (gotoblas == &gotoblas_POWER6) return corename[1]; +#endif if (gotoblas == &gotoblas_POWER8) return corename[2]; +#if (!defined __GNUC__) || ( __GNUC__ >= 6) if (gotoblas == &gotoblas_POWER9) return corename[3]; +#endif +#ifdef HAVE_P10_SUPPORT + if (gotoblas == &gotoblas_POWER10) return corename[4]; +#endif return corename[0]; } +#ifdef C_PGI +/* + * NV HPC compilers do not yet implement __builtin_cpu_is(). + * Fake a version here for use in the CPU detection code below. + * + * Strategy here is to first check the CPU to see what it actually is, + * and then test the input to see if what the CPU actually is matches + * what was requested. + */ + +#include + +/* + * Define POWER processor version table. + * + * NOTE NV HPC SDK compilers only support POWER8 and POWER9 at this time + */ + +#define CPU_UNKNOWN 0 +#define CPU_POWER5 5 +#define CPU_POWER6 6 +#define CPU_POWER8 8 +#define CPU_POWER9 9 +#define CPU_POWER10 10 + +static struct { + uint32_t pvr_mask; + uint32_t pvr_value; + const char* cpu_name; + uint32_t cpu_type; +} pvrPOWER [] = { + + { /* POWER6 in P5+ mode; 2.04-compliant processor */ + .pvr_mask = 0xffffffff, + .pvr_value = 0x0f000001, + .cpu_name = "POWER5+", + .cpu_type = CPU_POWER5, + }, + + { /* Power6 aka POWER6X*/ + .pvr_mask = 0xffff0000, + .pvr_value = 0x003e0000, + .cpu_name = "POWER6 (raw)", + .cpu_type = CPU_POWER6, + }, + + { /* Power7 */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x003f0000, + .cpu_name = "POWER7 (raw)", + .cpu_type = CPU_POWER6, + }, + + { /* Power7+ */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x004A0000, + .cpu_name = "POWER7+ (raw)", + .cpu_type = CPU_POWER6, + }, + + { /* Power8E */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x004b0000, + .cpu_name = "POWER8E (raw)", + .cpu_type = CPU_POWER8, + }, + + { /* Power8NVL */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x004c0000, + .cpu_name = "POWER8NVL (raw)", + .cpu_type = CPU_POWER8, + }, + + { /* Power8 */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x004d0000, + .cpu_name = "POWER8 (raw)", + .cpu_type = CPU_POWER8, + }, + + { /* Power9 DD2.0 */ + .pvr_mask = 0xffffefff, + .pvr_value = 0x004e0200, + .cpu_name = "POWER9 (raw)", + .cpu_type = CPU_POWER9, + }, + + { /* Power9 DD 2.1 */ + .pvr_mask = 0xffffefff, + .pvr_value = 0x004e0201, + .cpu_name = "POWER9 (raw)", + .cpu_type = CPU_POWER9, + }, + + { /* Power9 DD2.2 or later */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x004e0000, + .cpu_name = "POWER9 (raw)", + .cpu_type = CPU_POWER9, + }, + + { /* Power10 */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x00800000, + .cpu_name = "POWER10 (raw)", + .cpu_type = CPU_POWER10, + }, + + { /* End of table, pvr_mask and pvr_value must be zero */ + .pvr_mask = 0x0, + .pvr_value = 0x0, + .cpu_name = "Unknown", + .cpu_type = CPU_UNKNOWN, + }, +}; + +static int __builtin_cpu_is(const char *cpu) { + int i; + uint32_t pvr; + uint32_t cpu_type; + + asm("mfpvr %0" : "=r"(pvr)); + + for (i = 0 ; i < sizeof pvrPOWER / sizeof *pvrPOWER ; ++i) { + if ((pvr & pvrPOWER[i].pvr_mask) == pvrPOWER[i].pvr_value) { + break; + } + } + +#if defined(DEBUG) + printf("%s: returning CPU=%s, cpu_type=%p\n", __func__, + pvrPOWER[i].cpu_name, pvrPOWER[i].cpu_type); +#endif + cpu_type = pvrPOWER[i].cpu_type; + + if (!strcmp(cpu, "power8")) + return cpu_type == CPU_POWER8; + if (!strcmp(cpu, "power9")) + return cpu_type == CPU_POWER9; + return 0; +} + +#endif /* C_PGI */ + static gotoblas_t *get_coretype(void) { +#ifndef C_PGI if (__builtin_cpu_is("power6") || __builtin_cpu_is("power6x")) return &gotoblas_POWER6; +#endif if (__builtin_cpu_is("power8")) return &gotoblas_POWER8; +#if (!defined __GNUC__) || ( __GNUC__ >= 6) if (__builtin_cpu_is("power9")) return &gotoblas_POWER9; +#endif +#ifdef HAVE_P10_SUPPORT + if (__builtin_cpu_supports ("arch_3_1") && __builtin_cpu_supports ("mma")) + return &gotoblas_POWER10; +#endif + /* Fall back to the POWER9 implementation if the toolchain is too old or the MMA feature is not set */ +#if (!defined __GNUC__) || ( __GNUC__ >= 11) || (__GNUC__ == 10 && __GNUC_MINOR__ >= 2) + if (__builtin_cpu_is("power10")) + return &gotoblas_POWER9; +#endif return NULL; } @@ -51,9 +226,16 @@ static gotoblas_t *force_coretype(char * coretype) { switch (found) { +#ifndef C_PGI case 1: return (&gotoblas_POWER6); +#endif case 2: return (&gotoblas_POWER8); +#if (!defined __GNUC__) || ( __GNUC__ >= 6) case 3: return (&gotoblas_POWER9); +#endif +#ifdef HAVE_P10_SUPPORT + case 4: return (&gotoblas_POWER10); +#endif default: return NULL; } snprintf(message, 128, "Core not found: %s\n", coretype); diff --git a/driver/others/dynamic_zarch.c b/driver/others/dynamic_zarch.c new file mode 100644 index 000000000..bf5eab9b2 --- /dev/null +++ b/driver/others/dynamic_zarch.c @@ -0,0 +1,181 @@ +#include "common.h" +#include + +// Guard the use of getauxval() on glibc version >= 2.16 +#ifdef __GLIBC__ +#include +#if __GLIBC_PREREQ(2, 16) +#include +#define HAVE_GETAUXVAL 1 + +static unsigned long get_hwcap(void) +{ + unsigned long hwcap = getauxval(AT_HWCAP); + char *maskenv; + + // honor requests for not using specific CPU features in LD_HWCAP_MASK + maskenv = getenv("LD_HWCAP_MASK"); + if (maskenv) + hwcap &= strtoul(maskenv, NULL, 0); + + return hwcap; + // note that a missing auxval is interpreted as no capabilities + // available, which is safe. +} + +#else // __GLIBC_PREREQ(2, 16) +#warn "Cannot detect SIMD support in Z13 or newer architectures since glibc is older than 2.16" + +static unsigned long get_hwcap(void) { + // treat missing support for getauxval() as no capabilities available, + // which is safe. + return 0; +} +#endif // __GLIBC_PREREQ(2, 16) +#endif // __GLIBC + +extern gotoblas_t gotoblas_ZARCH_GENERIC; +#ifdef DYN_Z13 +extern gotoblas_t gotoblas_Z13; +#endif +#ifdef DYN_Z14 +extern gotoblas_t gotoblas_Z14; +#endif + +#define NUM_CORETYPES 4 + +extern void openblas_warning(int verbose, const char* msg); + +static char* corename[] = { + "unknown", + "Z13", + "Z14", + "ZARCH_GENERIC", +}; + +char* gotoblas_corename(void) { +#ifdef DYN_Z13 + if (gotoblas == &gotoblas_Z13) return corename[1]; +#endif +#ifdef DYN_Z14 + if (gotoblas == &gotoblas_Z14) return corename[2]; +#endif + if (gotoblas == &gotoblas_ZARCH_GENERIC) return corename[3]; + + return corename[0]; +} + +#ifndef HWCAP_S390_VXE +#define HWCAP_S390_VXE 8192 +#endif + +/** + * Detect the fitting set of kernels by retrieving the CPU features supported by + * OS from the auxiliary value AT_HWCAP and choosing the set of kernels + * ("coretype") that exploits most of the features and can be compiled with the + * available gcc version. + * Note that we cannot use vector registers on a z13 or newer unless supported + * by the OS kernel (which needs to handle them properly during context switch). + */ +static gotoblas_t* get_coretype(void) { + + unsigned long hwcap __attribute__((unused)) = get_hwcap(); + +#ifdef DYN_Z14 + // z14 and z15 systems: exploit Vector Facility (SIMD) and + // Vector-Enhancements Facility 1 (float SIMD instructions), if present. + if ((hwcap & HWCAP_S390_VX) && (hwcap & HWCAP_S390_VXE)) + return &gotoblas_Z14; +#endif + +#ifdef DYN_Z13 + // z13: Vector Facility (SIMD for double) + if (hwcap & HWCAP_S390_VX) + return &gotoblas_Z13; +#endif + + // fallback in case of missing compiler support, systems before z13, or + // when the OS does not advertise support for the Vector Facility (e.g., + // missing support in the OS kernel) + return &gotoblas_ZARCH_GENERIC; +} + +static gotoblas_t* force_coretype(char* coretype) { + + int i; + int found = -1; + char message[128]; + + for (i = 0; i < NUM_CORETYPES; i++) + { + if (!strncasecmp(coretype, corename[i], 20)) + { + found = i; + break; + } + } + + if (found == 1) { +#ifdef DYN_Z13 + return &gotoblas_Z13; +#else + openblas_warning(1, "Z13 support not compiled in"); + return NULL; +#endif + } else if (found == 2) { +#ifdef DYN_Z14 + return &gotoblas_Z14; +#else + openblas_warning(1, "Z14 support not compiled in"); + return NULL; +#endif + } else if (found == 3) { + return &gotoblas_ZARCH_GENERIC; + } + + snprintf(message, 128, "Core not found: %s\n", coretype); + openblas_warning(1, message); + return NULL; +} + +void gotoblas_dynamic_init(void) { + + char coremsg[128]; + char coren[22]; + char* p; + + + if (gotoblas) return; + + p = getenv("OPENBLAS_CORETYPE"); + if (p) + { + gotoblas = force_coretype(p); + } + else + { + gotoblas = get_coretype(); + } + + if (gotoblas == NULL) + { + snprintf(coremsg, 128, "Failed to detect system, falling back to generic z support.\n"); + openblas_warning(1, coremsg); + gotoblas = &gotoblas_ZARCH_GENERIC; + } + + if (gotoblas && gotoblas->init) { + strncpy(coren, gotoblas_corename(), 20); + sprintf(coremsg, "Core: %s\n", coren); + openblas_warning(2, coremsg); + gotoblas->init(); + } + else { + openblas_warning(0, "OpenBLAS : Architecture Initialization failed. No initialization function found.\n"); + exit(1); + } +} + +void gotoblas_dynamic_quit(void) { + gotoblas = NULL; +} diff --git a/driver/others/init.c b/driver/others/init.c index 0aad9c407..a29dce971 100644 --- a/driver/others/init.c +++ b/driver/others/init.c @@ -857,7 +857,14 @@ void gotoblas_affinity_init(void) { common -> shmid = pshmid; if (common -> magic != SH_MAGIC) { + +#if defined(__GLIBC_PREREQ) +#if __GLIBC_PREREQ(2, 7) cpu_set_t *cpusetp; +#else + cpu_set_t cpuset; +#endif +#endif int nums; int ret; @@ -890,7 +897,7 @@ void gotoblas_affinity_init(void) { } CPU_FREE(cpusetp); #else - ret = sched_getaffinity(0,sizeof(cpu_set_t), cpusetp); + ret = sched_getaffinity(0,sizeof(cpu_set_t), &cpuset); if (ret!=0) { common->num_procs = nums; } else { @@ -898,11 +905,11 @@ void gotoblas_affinity_init(void) { int i; int n = 0; for (i=0;inum_procs = n; } #else - common->num_procs = CPU_COUNT(sizeof(cpu_set_t),cpusetp); + common->num_procs = CPU_COUNT(&cpuset); } #endif diff --git a/driver/others/memory.c b/driver/others/memory.c index 02352b3ae..91d21a88e 100644 --- a/driver/others/memory.c +++ b/driver/others/memory.c @@ -80,13 +80,37 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #undef COMPILE_TLS #endif -#if defined(__GLIBC_PREREQ) +#if defined(__GLIBC_PREREQ) #if !__GLIBC_PREREQ(2,20) #undef COMPILE_TLS #endif #endif #endif +/* Memory buffer must fit two matrix subblocks of maximal size */ +#define XSTR(x) STR(x) +#define STR(x) #x +#if BUFFER_SIZE < (SGEMM_DEFAULT_P * SGEMM_DEFAULT_Q * 4 * 2) || \ + BUFFER_SIZE < (SGEMM_DEFAULT_P * SGEMM_DEFAULT_R * 4 * 2) || \ + BUFFER_SIZE < (SGEMM_DEFAULT_R * SGEMM_DEFAULT_Q * 4 * 2) +#warning BUFFER_SIZE is too small for P, Q, and R of SGEMM - large calculations may crash ! +#endif +#if BUFFER_SIZE < (DGEMM_DEFAULT_P * DGEMM_DEFAULT_Q * 8 * 2) || \ + BUFFER_SIZE < (DGEMM_DEFAULT_P * DGEMM_DEFAULT_R * 8 * 2) || \ + BUFFER_SIZE < (DGEMM_DEFAULT_R * DGEMM_DEFAULT_Q * 8 * 2) +#warning BUFFER_SIZE is too small for P, Q, and R of DGEMM - large calculations may crash ! +#endif +#if BUFFER_SIZE < (CGEMM_DEFAULT_P * CGEMM_DEFAULT_Q * 8 * 2) || \ + BUFFER_SIZE < (CGEMM_DEFAULT_P * CGEMM_DEFAULT_R * 8 * 2) || \ + BUFFER_SIZE < (CGEMM_DEFAULT_R * CGEMM_DEFAULT_Q * 8 * 2) +#warning BUFFER_SIZE is too small for P, Q, and R of CGEMM - large calculations may crash ! +#endif +#if BUFFER_SIZE < (ZGEMM_DEFAULT_P * ZGEMM_DEFAULT_Q * 16 * 2) || \ + BUFFER_SIZE < (ZGEMM_DEFAULT_P * ZGEMM_DEFAULT_R * 16 * 2) || \ + BUFFER_SIZE < (ZGEMM_DEFAULT_R * ZGEMM_DEFAULT_Q * 16 * 2) +#warning BUFFER_SIZE is too small for P, Q, and R of ZGEMM - large calculations may crash ! +#endif + #if defined(COMPILE_TLS) #include @@ -129,7 +153,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include #endif -#if defined(OS_FREEBSD) || defined(OS_OPENBSD) || defined(OS_DRAGONFLY) || defined(OS_DARWIN) +#if defined(OS_FREEBSD) || defined(OS_OPENBSD) || defined(OS_NETBSD) || defined(OS_DRAGONFLY) || defined(OS_DARWIN) #include #include #endif @@ -137,7 +161,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #if defined(OS_WINDOWS) && (defined(__MINGW32__) || defined(__MINGW64__)) #include #undef printf -#define printf _cprintf +#define printf _cprintf #endif #ifdef OS_LINUX @@ -166,14 +190,14 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define CONSTRUCTOR __cdecl #define DESTRUCTOR __cdecl #elif (defined(OS_DARWIN) || defined(OS_SUNOS)) && defined(C_GCC) -#define CONSTRUCTOR __attribute__ ((constructor)) -#define DESTRUCTOR __attribute__ ((destructor)) +#define CONSTRUCTOR __attribute__ ((constructor)) +#define DESTRUCTOR __attribute__ ((destructor)) #elif __GNUC__ && INIT_PRIORITY && ((GCC_VERSION >= 40300) || (CLANG_VERSION >= 20900)) -#define CONSTRUCTOR __attribute__ ((constructor(101))) -#define DESTRUCTOR __attribute__ ((destructor(101))) +#define CONSTRUCTOR __attribute__ ((constructor(101))) +#define DESTRUCTOR __attribute__ ((destructor(101))) #else -#define CONSTRUCTOR __attribute__ ((constructor)) -#define DESTRUCTOR __attribute__ ((destructor)) +#define CONSTRUCTOR __attribute__ ((constructor)) +#define DESTRUCTOR __attribute__ ((destructor)) #endif #ifdef DYNAMIC_ARCH @@ -192,7 +216,7 @@ void goto_set_num_threads(int num_threads) {}; #else -#if defined(OS_LINUX) || defined(OS_SUNOS) || defined(OS_NETBSD) +#if defined(OS_LINUX) || defined(OS_SUNOS) #ifndef NO_AFFINITY int get_num_procs(void); #else @@ -248,7 +272,7 @@ int get_num_procs(void) { return nums; } ret = CPU_COUNT_S(size,cpusetp); - if (ret > 0 && ret < nums) nums = ret; + if (ret > 0 && ret < nums) nums = ret; CPU_FREE(cpusetp); return nums; } else { @@ -257,7 +281,7 @@ int get_num_procs(void) { return nums; } ret = CPU_COUNT(&cpuset); - if (ret > 0 && ret < nums) nums = ret; + if (ret > 0 && ret < nums) nums = ret; return nums; } #endif @@ -312,7 +336,7 @@ int get_num_procs(void) { #endif -#if defined(OS_FREEBSD) || defined(OS_OPENBSD) || defined(OS_DRAGONFLY) +#if defined(OS_FREEBSD) || defined(OS_OPENBSD) || defined(OS_NETBSD) || defined(OS_DRAGONFLY) int get_num_procs(void) { @@ -404,7 +428,7 @@ extern int openblas_goto_num_threads_env(); extern int openblas_omp_num_threads_env(); int blas_get_cpu_number(void){ -#if defined(OS_LINUX) || defined(OS_WINDOWS) || defined(OS_FREEBSD) || defined(OS_OPENBSD) || defined(OS_DRAGONFLY) || defined(OS_DARWIN) || defined(OS_ANDROID) +#if defined(OS_LINUX) || defined(OS_WINDOWS) || defined(OS_FREEBSD) || defined(OS_OPENBSD) || defined(OS_NETBSD) || defined(OS_DRAGONFLY) || defined(OS_DARWIN) || defined(OS_ANDROID) int max_num; #endif int blas_goto_num = 0; @@ -412,7 +436,7 @@ int blas_get_cpu_number(void){ if (blas_num_threads) return blas_num_threads; -#if defined(OS_LINUX) || defined(OS_WINDOWS) || defined(OS_FREEBSD) || defined(OS_OPENBSD) || defined(OS_DRAGONFLY) || defined(OS_DARWIN) || defined(OS_ANDROID) +#if defined(OS_LINUX) || defined(OS_WINDOWS) || defined(OS_FREEBSD) || defined(OS_OPENBSD) || defined(OS_NETBSD) || defined(OS_DRAGONFLY) || defined(OS_DARWIN) || defined(OS_ANDROID) max_num = get_num_procs(); #endif @@ -436,7 +460,7 @@ int blas_get_cpu_number(void){ else if (blas_omp_num > 0) blas_num_threads = blas_omp_num; else blas_num_threads = MAX_CPU_NUMBER; -#if defined(OS_LINUX) || defined(OS_WINDOWS) || defined(OS_FREEBSD) || defined(OS_OPENBSD) || defined(OS_DRAGONFLY) || defined(OS_DARWIN) || defined(OS_ANDROID) +#if defined(OS_LINUX) || defined(OS_WINDOWS) || defined(OS_FREEBSD) || defined(OS_OPENBSD) || defined(OS_NETBSD) || defined(OS_DRAGONFLY) || defined(OS_DARWIN) || defined(OS_ANDROID) if (blas_num_threads > max_num) blas_num_threads = max_num; #endif @@ -604,12 +628,12 @@ static void *alloc_mmap(void *address){ if (address){ map_address = mmap(address, - allocation_block_size, - MMAP_ACCESS, MMAP_POLICY | MAP_FIXED, -1, 0); + allocation_block_size, + MMAP_ACCESS, MMAP_POLICY | MAP_FIXED, -1, 0); } else { map_address = mmap(address, - allocation_block_size, - MMAP_ACCESS, MMAP_POLICY, -1, 0); + allocation_block_size, + MMAP_ACCESS, MMAP_POLICY, -1, 0); } STORE_RELEASE_FUNC(map_address, alloc_mmap_free); @@ -624,7 +648,7 @@ static void *alloc_mmap(void *address){ #else #define BENCH_ITERATION 4 -#define SCALING 2 +#define SCALING 2 static inline BLASULONG run_bench(BLASULONG address, BLASULONG size) { @@ -687,60 +711,60 @@ static void *alloc_mmap(void *address){ #endif map_address = mmap(NULL, allocation_block_size * SCALING, - MMAP_ACCESS, MMAP_POLICY, -1, 0); + MMAP_ACCESS, MMAP_POLICY, -1, 0); if (map_address != (void *)-1) { #ifdef OS_LINUX #ifdef DEBUG - int ret=0; - ret=my_mbind(map_address, allocation_block_size * SCALING, MPOL_PREFERRED, NULL, 0, 0); - if(ret==-1){ - int errsv=errno; - perror("OpenBLAS alloc_mmap:"); - printf("error code=%d,\tmap_address=%lx\n",errsv,map_address); - } + int ret=0; + ret=my_mbind(map_address, allocation_block_size * SCALING, MPOL_PREFERRED, NULL, 0, 0); + if(ret==-1){ + int errsv=errno; + perror("OpenBLAS alloc_mmap:"); + printf("error code=%d,\tmap_address=%lx\n",errsv,map_address); + } #else - my_mbind(map_address, allocation_block_size * SCALING, MPOL_PREFERRED, NULL, 0, 0); + my_mbind(map_address, allocation_block_size * SCALING, MPOL_PREFERRED, NULL, 0, 0); #endif #endif - allocsize = DGEMM_P * DGEMM_Q * sizeof(double); + allocsize = DGEMM_P * DGEMM_Q * sizeof(double); - start = (BLASULONG)map_address; - current = (SCALING - 1) * allocation_block_size; - original = current; + start = (BLASULONG)map_address; + current = (SCALING - 1) * allocation_block_size; + original = current; - while(current > 0 && current <= original) { - *(BLASLONG *)start = (BLASLONG)start + PAGESIZE; - start += PAGESIZE; - current -= PAGESIZE; - } + while(current > 0 && current <= original) { + *(BLASLONG *)start = (BLASLONG)start + PAGESIZE; + start += PAGESIZE; + current -= PAGESIZE; + } - *(BLASLONG *)(start - PAGESIZE) = (BLASULONG)map_address; + *(BLASLONG *)(start - PAGESIZE) = (BLASULONG)map_address; - start = (BLASULONG)map_address; + start = (BLASULONG)map_address; - best = (BLASULONG)-1; - best_address = map_address; + best = (BLASULONG)-1; + best_address = map_address; - while ((start + allocsize < (BLASULONG)map_address + (SCALING - 1) * allocation_block_size)) { + while ((start + allocsize < (BLASULONG)map_address + (SCALING - 1) * allocation_block_size)) { - current = run_bench(start, allocsize); + current = run_bench(start, allocsize); - if (best > current) { - best = current; - best_address = (void *)start; - } + if (best > current) { + best = current; + best_address = (void *)start; + } - start += PAGESIZE; + start += PAGESIZE; - } + } if ((BLASULONG)best_address > (BLASULONG)map_address) - munmap(map_address, (BLASULONG)best_address - (BLASULONG)map_address); + munmap(map_address, (BLASULONG)best_address - (BLASULONG)map_address); munmap((void *)((BLASULONG)best_address + allocation_block_size), (SCALING - 1) * allocation_block_size + (BLASULONG)map_address - (BLASULONG)best_address); @@ -822,7 +846,7 @@ static void *alloc_qalloc(void *address){ static void alloc_windows_free(struct alloc_t *alloc_info){ - VirtualFree(alloc_info, allocation_block_size, MEM_DECOMMIT); + VirtualFree(alloc_info, 0, MEM_RELEASE); } @@ -830,9 +854,9 @@ static void *alloc_windows(void *address){ void *map_address; map_address = VirtualAlloc(address, - allocation_block_size, - MEM_RESERVE | MEM_COMMIT, - PAGE_READWRITE); + allocation_block_size, + MEM_RESERVE | MEM_COMMIT, + PAGE_READWRITE); if (map_address == (void *)NULL) map_address = (void *)-1; @@ -873,9 +897,9 @@ static void *alloc_devicedirver(void *address){ } map_address = mmap(address, allocation_block_size, - PROT_READ | PROT_WRITE, - MAP_FILE | MAP_SHARED, - fd, 0); + PROT_READ | PROT_WRITE, + MAP_FILE | MAP_SHARED, + fd, 0); STORE_RELEASE_FUNC_WITH_ATTR(map_address, alloc_devicedirver_free, fd); @@ -935,7 +959,7 @@ static void alloc_hugetlb_free(struct alloc_t *alloc_info){ #ifdef OS_WINDOWS - VirtualFree(alloc_info, allocation_block_size, MEM_LARGE_PAGES | MEM_DECOMMIT); + VirtualFree(alloc_info, 0, MEM_LARGE_PAGES | MEM_RELEASE); #endif @@ -950,12 +974,12 @@ static void *alloc_hugetlb(void *address){ shmid = shmget(IPC_PRIVATE, allocation_block_size, #ifdef OS_LINUX - SHM_HUGETLB | + SHM_HUGETLB | #endif #ifdef OS_AIX - SHM_LGPAGE | SHM_PIN | + SHM_LGPAGE | SHM_PIN | #endif - IPC_CREAT | SHM_R | SHM_W); + IPC_CREAT | SHM_R | SHM_W); if (shmid != -1) { map_address = (void *)shmat(shmid, address, SHM_RND); @@ -1002,9 +1026,9 @@ static void *alloc_hugetlb(void *address){ } map_address = (void *)VirtualAlloc(address, - allocation_block_size, - MEM_LARGE_PAGES | MEM_RESERVE | MEM_COMMIT, - PAGE_READWRITE); + allocation_block_size, + MEM_LARGE_PAGES | MEM_RESERVE | MEM_COMMIT, + PAGE_READWRITE); tp.Privileges[0].Attributes = 0; AdjustTokenPrivileges(hToken, FALSE, &tp, 0, NULL, NULL); @@ -1054,9 +1078,9 @@ static void *alloc_hugetlbfile(void *address){ unlink(filename); map_address = mmap(address, allocation_block_size, - PROT_READ | PROT_WRITE, - MAP_SHARED, - fd, 0); + PROT_READ | PROT_WRITE, + MAP_SHARED, + fd, 0); STORE_RELEASE_FUNC_WITH_ATTR(map_address, alloc_hugetlbfile_free, fd); @@ -1071,7 +1095,7 @@ static BLASULONG base_address = 0UL; static BLASULONG base_address = BASE_ADDRESS; #endif -#if __STDC_VERSION__ >= 201112L +#ifdef HAVE_C11 static _Atomic int memory_initialized = 0; #else static volatile int memory_initialized = 0; @@ -1083,7 +1107,7 @@ static volatile int memory_initialized = 0; /* 1 : Level 2 functions */ /* 2 : Thread */ - static void blas_memory_cleanup(void* ptr){ +static void blas_memory_cleanup(void* ptr){ if (ptr) { struct alloc_t ** table = (struct alloc_t **)ptr; int pos; @@ -1217,29 +1241,29 @@ UNLOCK_COMMAND(&alloc_lock); func = &memoryalloc[0]; - while ((func != NULL) && (map_address == (void *) -1)) { + while ((*func != NULL) && (map_address == (void *) -1)) { - map_address = (*func)((void *)base_address); + map_address = (*func)((void *)base_address); #ifdef ALLOC_DEVICEDRIVER - if ((*func == alloc_devicedirver) && (map_address == (void *)-1)) { - fprintf(stderr, "OpenBLAS Warning ... Physically contiguous allocation failed.\n"); - } + if ((*func == alloc_devicedirver) && (map_address == (void *)-1)) { + fprintf(stderr, "OpenBLAS Warning ... Physically contiguous allocation failed.\n"); + } #endif #ifdef ALLOC_HUGETLBFILE - if ((*func == alloc_hugetlbfile) && (map_address == (void *)-1)) { + if ((*func == alloc_hugetlbfile) && (map_address == (void *)-1)) { #ifndef OS_WINDOWS - fprintf(stderr, "OpenBLAS Warning ... HugeTLB(File) allocation failed.\n"); + fprintf(stderr, "OpenBLAS Warning ... HugeTLB(File) allocation failed.\n"); #endif - } + } #endif #if (defined ALLOC_SHM) && (defined OS_LINUX || defined OS_AIX || defined __sun__ || defined OS_WINDOWS) - if ((*func == alloc_hugetlb) && (map_address != (void *)-1)) hugetlb_allocated = 1; + if ((*func == alloc_hugetlb) && (map_address != (void *)-1)) hugetlb_allocated = 1; #endif - func ++; + func ++; } #ifdef DEBUG @@ -1353,7 +1377,7 @@ static BLASULONG init_lock = 0UL; #endif static void _touch_memory(blas_arg_t *arg, BLASLONG *range_m, BLASLONG *range_n, - void *sa, void *sb, BLASLONG pos) { + void *sa, void *sb, BLASLONG pos) { #if !defined(ARCH_POWER) && !defined(ARCH_SPARC) @@ -1483,11 +1507,11 @@ void CONSTRUCTOR gotoblas_init(void) { struct rlimit curlimit; if ( getrlimit(RLIMIT_STACK, &curlimit ) == 0 ) { - if ( curlimit.rlim_cur != curlimit.rlim_max ) - { - curlimit.rlim_cur = curlimit.rlim_max; - setrlimit(RLIMIT_STACK, &curlimit); - } + if ( curlimit.rlim_cur != curlimit.rlim_max ) + { + curlimit.rlim_cur = curlimit.rlim_max; + setrlimit(RLIMIT_STACK, &curlimit); + } } #endif @@ -1521,7 +1545,7 @@ void DESTRUCTOR gotoblas_quit(void) { TlsFree(local_storage_key); #else pthread_key_delete(local_storage_key); -#endif +#endif #endif #ifdef PROFILE @@ -1581,8 +1605,8 @@ BOOL APIENTRY DllMain(HMODULE hModule, DWORD ul_reason_for_call, LPVOID lpReser */ static int on_process_term(void) { - gotoblas_quit(); - return 0; + gotoblas_quit(); + return 0; } #ifdef _WIN64 #pragma comment(linker, "/INCLUDE:_tls_used") @@ -1622,6 +1646,7 @@ void gotoblas_dummy_for_PGI(void) { gotoblas_init(); gotoblas_quit(); +#if __PGIC__ < 19 #if 0 asm ("\t.section\t.ctors,\"aw\",@progbits; .align 8; .quad gotoblas_init; .section .text"); asm ("\t.section\t.dtors,\"aw\",@progbits; .align 8; .quad gotoblas_quit; .section .text"); @@ -1629,6 +1654,7 @@ void gotoblas_dummy_for_PGI(void) { asm (".section .init,\"ax\"; call gotoblas_init@PLT; .section .text"); asm (".section .fini,\"ax\"; call gotoblas_quit@PLT; .section .text"); #endif +#endif } #endif @@ -1671,7 +1697,7 @@ void gotoblas_dummy_for_PGI(void) { #include #endif -#if defined(OS_FREEBSD) || defined(OS_OPENBSD) || defined(OS_DRAGONFLY) || defined(OS_DARWIN) +#if defined(OS_FREEBSD) || defined(OS_OPENBSD) || defined(OS_NETBSD) || defined(OS_DRAGONFLY) || defined(OS_DARWIN) #include #include #endif @@ -1679,7 +1705,7 @@ void gotoblas_dummy_for_PGI(void) { #if defined(OS_WINDOWS) && (defined(__MINGW32__) || defined(__MINGW64__)) #include #undef printf -#define printf _cprintf +#define printf _cprintf #endif #ifdef OS_LINUX @@ -1708,14 +1734,14 @@ void gotoblas_dummy_for_PGI(void) { #define CONSTRUCTOR __cdecl #define DESTRUCTOR __cdecl #elif (defined(OS_DARWIN) || defined(OS_SUNOS)) && defined(C_GCC) -#define CONSTRUCTOR __attribute__ ((constructor)) -#define DESTRUCTOR __attribute__ ((destructor)) +#define CONSTRUCTOR __attribute__ ((constructor)) +#define DESTRUCTOR __attribute__ ((destructor)) #elif __GNUC__ && INIT_PRIORITY && ((GCC_VERSION >= 40300) || (CLANG_VERSION >= 20900)) -#define CONSTRUCTOR __attribute__ ((constructor(101))) -#define DESTRUCTOR __attribute__ ((destructor(101))) +#define CONSTRUCTOR __attribute__ ((constructor(101))) +#define DESTRUCTOR __attribute__ ((destructor(101))) #else -#define CONSTRUCTOR __attribute__ ((constructor)) -#define DESTRUCTOR __attribute__ ((destructor)) +#define CONSTRUCTOR __attribute__ ((constructor)) +#define DESTRUCTOR __attribute__ ((destructor)) #endif #ifdef DYNAMIC_ARCH @@ -1734,18 +1760,18 @@ void goto_set_num_threads(int num_threads) {}; #else -#if defined(OS_LINUX) || defined(OS_SUNOS) || defined(OS_NETBSD) +#if defined(OS_LINUX) || defined(OS_SUNOS) #ifndef NO_AFFINITY int get_num_procs(void); #else int get_num_procs(void) { static int nums = 0; + +#if defined(__GLIBC_PREREQ) cpu_set_t cpuset,*cpusetp; size_t size; int ret; - -#if defined(__GLIBC_PREREQ) #if !__GLIBC_PREREQ(2, 7) int i; #if !__GLIBC_PREREQ(2, 6) @@ -1791,7 +1817,7 @@ int get_num_procs(void) { return nums; } ret = CPU_COUNT_S(size,cpusetp); - if (ret > 0 && ret < nums) nums = ret; + if (ret > 0 && ret < nums) nums = ret; CPU_FREE(cpusetp); return nums; } else { @@ -1800,7 +1826,7 @@ int get_num_procs(void) { return nums; } ret = CPU_COUNT(&cpuset); - if (ret > 0 && ret < nums) nums = ret; + if (ret > 0 && ret < nums) nums = ret; return nums; } #endif @@ -1853,7 +1879,7 @@ int get_num_procs(void) { #endif -#if defined(OS_FREEBSD) || defined(OS_OPENBSD) || defined(OS_DRAGONFLY) +#if defined(OS_FREEBSD) || defined(OS_OPENBSD) || defined(OS_NETBSD) || defined(OS_DRAGONFLY) int get_num_procs(void) { @@ -1943,7 +1969,7 @@ extern int openblas_goto_num_threads_env(); extern int openblas_omp_num_threads_env(); int blas_get_cpu_number(void){ -#if defined(OS_LINUX) || defined(OS_WINDOWS) || defined(OS_FREEBSD) || defined(OS_OPENBSD) || defined(OS_DRAGONFLY) || defined(OS_DARWIN) || defined(OS_ANDROID) +#if defined(OS_LINUX) || defined(OS_WINDOWS) || defined(OS_FREEBSD) || defined(OS_OPENBSD) || defined(OS_NETBSD) || defined(OS_DRAGONFLY) || defined(OS_DARWIN) || defined(OS_ANDROID) int max_num; #endif int blas_goto_num = 0; @@ -1951,7 +1977,7 @@ int blas_get_cpu_number(void){ if (blas_num_threads) return blas_num_threads; -#if defined(OS_LINUX) || defined(OS_WINDOWS) || defined(OS_FREEBSD) || defined(OS_OPENBSD) || defined(OS_DRAGONFLY) || defined(OS_DARWIN) || defined(OS_ANDROID) +#if defined(OS_LINUX) || defined(OS_WINDOWS) || defined(OS_FREEBSD) || defined(OS_OPENBSD) || defined(OS_NETBSD) || defined(OS_DRAGONFLY) || defined(OS_DARWIN) || defined(OS_ANDROID) max_num = get_num_procs(); #endif @@ -1975,7 +2001,7 @@ int blas_get_cpu_number(void){ else if (blas_omp_num > 0) blas_num_threads = blas_omp_num; else blas_num_threads = MAX_CPU_NUMBER; -#if defined(OS_LINUX) || defined(OS_WINDOWS) || defined(OS_FREEBSD) || defined(OS_OPENBSD) || defined(OS_DRAGONFLY) || defined(OS_DARWIN) || defined(OS_ANDROID) +#if defined(OS_LINUX) || defined(OS_WINDOWS) || defined(OS_FREEBSD) || defined(OS_OPENBSD) || defined(OS_NETBSD) || defined(OS_DRAGONFLY) || defined(OS_DARWIN) || defined(OS_ANDROID) if (blas_num_threads > max_num) blas_num_threads = max_num; #endif @@ -2039,8 +2065,12 @@ static BLASULONG alloc_lock = 0UL; static void alloc_mmap_free(struct release_t *release){ +if (!release->address) return; + if (munmap(release -> address, BUFFER_SIZE)) { - printf("OpenBLAS : munmap failed\n"); + int errsv=errno; + perror("OpenBLAS : munmap failed:"); + printf("error code=%d,\trelease->address=%p\n",errsv,release->address); } } @@ -2053,24 +2083,30 @@ static void *alloc_mmap(void *address){ if (address){ map_address = mmap(address, - BUFFER_SIZE, - MMAP_ACCESS, MMAP_POLICY | MAP_FIXED, -1, 0); + BUFFER_SIZE, + MMAP_ACCESS, MMAP_POLICY | MAP_FIXED, -1, 0); } else { map_address = mmap(address, - BUFFER_SIZE, - MMAP_ACCESS, MMAP_POLICY, -1, 0); + BUFFER_SIZE, + MMAP_ACCESS, MMAP_POLICY, -1, 0); } if (map_address != (void *)-1) { -#if defined(SMP) && !defined(USE_OPENMP) +#if (defined(SMP) || defined(USE_LOCKING)) && !defined(USE_OPENMP) LOCK_COMMAND(&alloc_lock); -#endif +#endif release_info[release_pos].address = map_address; release_info[release_pos].func = alloc_mmap_free; release_pos ++; -#if defined(SMP) && !defined(USE_OPENMP) +#if (defined(SMP) || defined(USE_LOCKING)) && !defined(USE_OPENMP) UNLOCK_COMMAND(&alloc_lock); -#endif +#endif + } else { +#ifdef DEBUG + int errsv=errno; + perror("OpenBLAS : mmap failed:"); + printf("error code=%d,\tmap_address=%lx\n",errsv,map_address); +#endif } #ifdef OS_LINUX @@ -2083,7 +2119,7 @@ static void *alloc_mmap(void *address){ #else #define BENCH_ITERATION 4 -#define SCALING 2 +#define SCALING 2 static inline BLASULONG run_bench(BLASULONG address, BLASULONG size) { @@ -2146,59 +2182,65 @@ static void *alloc_mmap(void *address){ #endif map_address = mmap(NULL, BUFFER_SIZE * SCALING, - MMAP_ACCESS, MMAP_POLICY, -1, 0); + MMAP_ACCESS, MMAP_POLICY, -1, 0); if (map_address != (void *)-1) { #ifdef OS_LINUX #ifdef DEBUG - int ret=0; - ret=my_mbind(map_address, BUFFER_SIZE * SCALING, MPOL_PREFERRED, NULL, 0, 0); - if(ret==-1){ - int errsv=errno; - perror("OpenBLAS alloc_mmap:"); - printf("error code=%d,\tmap_address=%lx\n",errsv,map_address); - } + int ret=0; + ret=my_mbind(map_address, BUFFER_SIZE * SCALING, MPOL_PREFERRED, NULL, 0, 0); + if(ret==-1){ + int errsv=errno; + perror("OpenBLAS alloc_mmap:"); + printf("error code=%d,\tmap_address=%lx\n",errsv,map_address); + } #else - my_mbind(map_address, BUFFER_SIZE * SCALING, MPOL_PREFERRED, NULL, 0, 0); + my_mbind(map_address, BUFFER_SIZE * SCALING, MPOL_PREFERRED, NULL, 0, 0); #endif #endif - +#ifdef BUILD_DOUBLE allocsize = DGEMM_P * DGEMM_Q * sizeof(double); - +#elif defined(BUILD_COMPLEX16) + allocsize = ZGEMM_P * ZGEMM_Q * sizeof(double); +#elif defined(BUILD_COMPLEX) + allocsize = CGEMM_P * CGEMM_Q * sizeof(double); +#else + allocsize = SGEMM_P * SGEMM_Q * sizeof(double); +#endif start = (BLASULONG)map_address; current = (SCALING - 1) * BUFFER_SIZE; - while(current > 0) { - *(BLASLONG *)start = (BLASLONG)start + PAGESIZE; - start += PAGESIZE; - current -= PAGESIZE; - } + while(current > 0) { + *(BLASLONG *)start = (BLASLONG)start + PAGESIZE; + start += PAGESIZE; + current -= PAGESIZE; + } - *(BLASLONG *)(start - PAGESIZE) = (BLASULONG)map_address; + *(BLASLONG *)(start - PAGESIZE) = (BLASULONG)map_address; - start = (BLASULONG)map_address; + start = (BLASULONG)map_address; - best = (BLASULONG)-1; - best_address = map_address; + best = (BLASULONG)-1; + best_address = map_address; - while ((start + allocsize < (BLASULONG)map_address + (SCALING - 1) * BUFFER_SIZE)) { + while ((start + allocsize < (BLASULONG)map_address + (SCALING - 1) * BUFFER_SIZE)) { - current = run_bench(start, allocsize); + current = run_bench(start, allocsize); - if (best > current) { - best = current; - best_address = (void *)start; - } + if (best > current) { + best = current; + best_address = (void *)start; + } - start += PAGESIZE; + start += PAGESIZE; - } + } if ((BLASULONG)best_address > (BLASULONG)map_address) - munmap(map_address, (BLASULONG)best_address - (BLASULONG)map_address); + munmap(map_address, (BLASULONG)best_address - (BLASULONG)map_address); munmap((void *)((BLASULONG)best_address + BUFFER_SIZE), (SCALING - 1) * BUFFER_SIZE + (BLASULONG)map_address - (BLASULONG)best_address); @@ -2214,13 +2256,13 @@ static void *alloc_mmap(void *address){ #endif if (map_address != (void *)-1) { -#if defined(SMP) && !defined(USE_OPENMP) +#if (defined(SMP) || defined(USE_LOCKING)) && !defined(USE_OPENMP) LOCK_COMMAND(&alloc_lock); #endif release_info[release_pos].address = map_address; release_info[release_pos].func = alloc_mmap_free; release_pos ++; -#if defined(SMP) && !defined(USE_OPENMP) +#if (defined(SMP) || defined(USE_LOCKING)) && !defined(USE_OPENMP) UNLOCK_COMMAND(&alloc_lock); #endif } @@ -2298,7 +2340,7 @@ static void *alloc_qalloc(void *address){ static void alloc_windows_free(struct release_t *release){ - VirtualFree(release -> address, BUFFER_SIZE, MEM_DECOMMIT); + VirtualFree(release -> address, 0, MEM_RELEASE); } @@ -2306,9 +2348,9 @@ static void *alloc_windows(void *address){ void *map_address; map_address = VirtualAlloc(address, - BUFFER_SIZE, - MEM_RESERVE | MEM_COMMIT, - PAGE_READWRITE); + BUFFER_SIZE, + MEM_RESERVE | MEM_COMMIT, + PAGE_READWRITE); if (map_address == (void *)NULL) map_address = (void *)-1; @@ -2352,9 +2394,9 @@ static void *alloc_devicedirver(void *address){ } map_address = mmap(address, BUFFER_SIZE, - PROT_READ | PROT_WRITE, - MAP_FILE | MAP_SHARED, - fd, 0); + PROT_READ | PROT_WRITE, + MAP_FILE | MAP_SHARED, + fd, 0); if (map_address != (void *)-1) { release_info[release_pos].address = map_address; @@ -2420,7 +2462,7 @@ static void alloc_hugetlb_free(struct release_t *release){ #ifdef OS_WINDOWS - VirtualFree(release -> address, BUFFER_SIZE, MEM_LARGE_PAGES | MEM_DECOMMIT); + VirtualFree(release -> address, 0, MEM_LARGE_PAGES | MEM_RELEASE); #endif @@ -2435,12 +2477,12 @@ static void *alloc_hugetlb(void *address){ shmid = shmget(IPC_PRIVATE, BUFFER_SIZE, #ifdef OS_LINUX - SHM_HUGETLB | + SHM_HUGETLB | #endif #ifdef OS_AIX - SHM_LGPAGE | SHM_PIN | + SHM_LGPAGE | SHM_PIN | #endif - IPC_CREAT | SHM_R | SHM_W); + IPC_CREAT | SHM_R | SHM_W); if (shmid != -1) { map_address = (void *)shmat(shmid, address, SHM_RND); @@ -2475,7 +2517,7 @@ static void *alloc_hugetlb(void *address){ tp.PrivilegeCount = 1; tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED; - + if (LookupPrivilegeValue(NULL, SE_LOCK_MEMORY_NAME, &tp.Privileges[0].Luid) != TRUE) { CloseHandle(hToken); return (void*)-1; @@ -2487,9 +2529,9 @@ static void *alloc_hugetlb(void *address){ } map_address = (void *)VirtualAlloc(address, - BUFFER_SIZE, - MEM_LARGE_PAGES | MEM_RESERVE | MEM_COMMIT, - PAGE_READWRITE); + BUFFER_SIZE, + MEM_LARGE_PAGES | MEM_RESERVE | MEM_COMMIT, + PAGE_READWRITE); tp.Privileges[0].Attributes = 0; AdjustTokenPrivileges(hToken, FALSE, &tp, 0, NULL, NULL); @@ -2542,9 +2584,9 @@ static void *alloc_hugetlbfile(void *address){ unlink(filename); map_address = mmap(address, BUFFER_SIZE, - PROT_READ | PROT_WRITE, - MAP_SHARED, - fd, 0); + PROT_READ | PROT_WRITE, + MAP_SHARED, + fd, 0); if (map_address != (void *)-1) { release_info[release_pos].address = map_address; @@ -2681,7 +2723,7 @@ void *blas_memory_alloc(int procpos){ if (!memory[position].used && (memory[position].pos == mypos)) { #if defined(SMP) && !defined(USE_OPENMP) LOCK_COMMAND(&alloc_lock); -#else +#else blas_lock(&memory[position].lock); #endif if (!memory[position].used) goto allocation; @@ -2689,7 +2731,7 @@ void *blas_memory_alloc(int procpos){ UNLOCK_COMMAND(&alloc_lock); #else blas_unlock(&memory[position].lock); -#endif +#endif } position ++; @@ -2701,25 +2743,26 @@ void *blas_memory_alloc(int procpos){ position = 0; -#if defined(SMP) && !defined(USE_OPENMP) +#if (defined(SMP) || defined(USE_LOCKING)) && !defined(USE_OPENMP) LOCK_COMMAND(&alloc_lock); #endif do { -#if defined(USE_OPENMP) - if (!memory[position].used) { + RMB; +#if defined(USE_OPENMP) + if (!memory[position].used) { blas_lock(&memory[position].lock); #endif if (!memory[position].used) goto allocation; - + #if defined(USE_OPENMP) - blas_unlock(&memory[position].lock); + blas_unlock(&memory[position].lock); } #endif position ++; } while (position < NUM_BUFFERS); -#if defined(SMP) && !defined(USE_OPENMP) - UNLOCK_COMMAND(&alloc_lock); +#if (defined(SMP) || defined(USE_LOCKING)) && !defined(USE_OPENMP) + UNLOCK_COMMAND(&alloc_lock); #endif goto error; @@ -2730,10 +2773,10 @@ void *blas_memory_alloc(int procpos){ #endif memory[position].used = 1; -#if defined(SMP) && !defined(USE_OPENMP) +#if (defined(SMP) || defined(USE_LOCKING)) && !defined(USE_OPENMP) UNLOCK_COMMAND(&alloc_lock); #else - blas_unlock(&memory[position].lock); + blas_unlock(&memory[position].lock); #endif if (!memory[position].addr) { do { @@ -2747,27 +2790,27 @@ void *blas_memory_alloc(int procpos){ while ((func != NULL) && (map_address == (void *) -1)) { - map_address = (*func)((void *)base_address); + map_address = (*func)((void *)base_address); #ifdef ALLOC_DEVICEDRIVER - if ((*func == alloc_devicedirver) && (map_address == (void *)-1)) { - fprintf(stderr, "OpenBLAS Warning ... Physically contiguous allocation was failed.\n"); - } + if ((*func == alloc_devicedirver) && (map_address == (void *)-1)) { + fprintf(stderr, "OpenBLAS Warning ... Physically contiguous allocation was failed.\n"); + } #endif #ifdef ALLOC_HUGETLBFILE - if ((*func == alloc_hugetlbfile) && (map_address == (void *)-1)) { + if ((*func == alloc_hugetlbfile) && (map_address == (void *)-1)) { #ifndef OS_WINDOWS - fprintf(stderr, "OpenBLAS Warning ... HugeTLB(File) allocation was failed.\n"); + fprintf(stderr, "OpenBLAS Warning ... HugeTLB(File) allocation was failed.\n"); #endif - } + } #endif #if (defined ALLOC_SHM) && (defined OS_LINUX || defined OS_AIX || defined __sun__ || defined OS_WINDOWS) - if ((*func == alloc_hugetlb) && (map_address != (void *)-1)) hugetlb_allocated = 1; + if ((*func == alloc_hugetlb) && (map_address != (void *)-1)) hugetlb_allocated = 1; #endif - func ++; + func ++; } #ifdef DEBUG @@ -2779,11 +2822,11 @@ void *blas_memory_alloc(int procpos){ } while ((BLASLONG)map_address == -1); -#if defined(SMP) && !defined(USE_OPENMP) +#if (defined(SMP) || defined(USE_LOCKING)) && !defined(USE_OPENMP) LOCK_COMMAND(&alloc_lock); -#endif +#endif memory[position].addr = map_address; -#if defined(SMP) && !defined(USE_OPENMP) +#if (defined(SMP) || defined(USE_LOCKING)) && !defined(USE_OPENMP) UNLOCK_COMMAND(&alloc_lock); #endif @@ -2819,7 +2862,7 @@ void *blas_memory_alloc(int procpos){ #ifdef DEBUG printf("Mapped : %p %3d\n\n", - (void *)memory[position].addr, position); + (void *)memory[position].addr, position); #endif return (void *)memory[position].addr; @@ -2839,15 +2882,16 @@ void blas_memory_free(void *free_area){ #endif position = 0; -#if defined(SMP) && !defined(USE_OPENMP) +#if (defined(SMP) || defined(USE_LOCKING)) && !defined(USE_OPENMP) LOCK_COMMAND(&alloc_lock); #endif while ((position < NUM_BUFFERS) && (memory[position].addr != free_area)) position++; - if (memory[position].addr != free_area) goto error; + if (position >= NUM_BUFFERS) goto error; #ifdef DEBUG + if (memory[position].addr != free_area) goto error; printf(" Position : %d\n", position); #endif @@ -2855,7 +2899,7 @@ void blas_memory_free(void *free_area){ WMB; memory[position].used = 0; -#if defined(SMP) && !defined(USE_OPENMP) +#if (defined(SMP) || defined(USE_LOCKING)) && !defined(USE_OPENMP) UNLOCK_COMMAND(&alloc_lock); #endif @@ -2872,7 +2916,7 @@ void blas_memory_free(void *free_area){ for (position = 0; position < NUM_BUFFERS; position++) printf("%4ld %p : %d\n", position, memory[position].addr, memory[position].used); #endif -#if defined(SMP) && !defined(USE_OPENMP) +#if (defined(SMP) || defined(USE_LOCKING)) && !defined(USE_OPENMP) UNLOCK_COMMAND(&alloc_lock); #endif return; @@ -2924,7 +2968,7 @@ void blas_shutdown(void){ #if defined(OS_LINUX) && !defined(NO_WARMUP) -#ifdef SMP +#if defined(SMP) || defined(USE_LOCKING) #if defined(USE_PTHREAD_LOCK) static pthread_mutex_t init_lock = PTHREAD_MUTEX_INITIALIZER; #elif defined(USE_PTHREAD_SPINLOCK) @@ -2935,7 +2979,7 @@ static BLASULONG init_lock = 0UL; #endif static void _touch_memory(blas_arg_t *arg, BLASLONG *range_m, BLASLONG *range_n, - void *sa, void *sb, BLASLONG pos) { + void *sa, void *sb, BLASLONG pos) { #if !defined(ARCH_POWER) && !defined(ARCH_SPARC) @@ -2949,7 +2993,7 @@ static void _touch_memory(blas_arg_t *arg, BLASLONG *range_m, BLASLONG *range_n, if (hot_alloc != 2) { #endif -#ifdef SMP +#if defined(SMP) || defined(USE_LOCKING) LOCK_COMMAND(&init_lock); #endif @@ -2959,7 +3003,7 @@ static void _touch_memory(blas_arg_t *arg, BLASLONG *range_m, BLASLONG *range_n, size -= PAGESIZE; } -#ifdef SMP +#if defined(SMP) || defined(USE_LOCKING) UNLOCK_COMMAND(&init_lock); #endif @@ -3062,15 +3106,15 @@ void CONSTRUCTOR gotoblas_init(void) { //#if defined(OS_LINUX) #if 0 - struct rlimit curlimit; - if ( getrlimit(RLIMIT_STACK, &curlimit ) == 0 ) - { - if ( curlimit.rlim_cur != curlimit.rlim_max ) - { - curlimit.rlim_cur = curlimit.rlim_max; - setrlimit(RLIMIT_STACK, &curlimit); - } - } + struct rlimit curlimit; + if ( getrlimit(RLIMIT_STACK, &curlimit ) == 0 ) + { + if ( curlimit.rlim_cur != curlimit.rlim_max ) + { + curlimit.rlim_cur = curlimit.rlim_max; + setrlimit(RLIMIT_STACK, &curlimit); + } + } #endif #ifdef SMP @@ -3152,8 +3196,8 @@ BOOL APIENTRY DllMain(HMODULE hModule, DWORD ul_reason_for_call, LPVOID lpReser */ static int on_process_term(void) { - gotoblas_quit(); - return 0; + gotoblas_quit(); + return 0; } #ifdef _WIN64 #pragma comment(linker, "/INCLUDE:_tls_used") @@ -3192,7 +3236,7 @@ void gotoblas_dummy_for_PGI(void) { gotoblas_init(); gotoblas_quit(); - +#if __PGIC__ < 19 #if 0 asm ("\t.section\t.ctors,\"aw\",@progbits; .align 8; .quad gotoblas_init; .section .text"); asm ("\t.section\t.dtors,\"aw\",@progbits; .align 8; .quad gotoblas_quit; .section .text"); @@ -3200,6 +3244,7 @@ void gotoblas_dummy_for_PGI(void) { asm (".section .init,\"ax\"; call gotoblas_init@PLT; .section .text"); asm (".section .fini,\"ax\"; call gotoblas_quit@PLT; .section .text"); #endif +#endif } #endif diff --git a/driver/others/memory_qalloc.c b/driver/others/memory_qalloc.c index 17b7f5d60..6174d9b75 100644 --- a/driver/others/memory_qalloc.c +++ b/driver/others/memory_qalloc.c @@ -38,21 +38,29 @@ #include #include "common.h" +#ifdef OS_LINUX +#include +#include +#include +#include +#include +#include +#include +#endif -#ifndef SMP -#define blas_cpu_number 1 -#else - -int blas_cpu_number = 1; - -int blas_get_cpu_number(void){ +#ifdef OS_HAIKU +#include +#endif - return blas_cpu_number; -} +#if defined(OS_FREEBSD) || defined(OS_OPENBSD) || defined(OS_NETBSD) || defined(OS_DRAGONFLY) || defined(OS_DARWIN) +#include +#include #endif + #define FIXED_PAGESIZE 4096 + void *sa = NULL; void *sb = NULL; static double static_buffer[BUFFER_SIZE/sizeof(double)]; @@ -60,7 +68,7 @@ static double static_buffer[BUFFER_SIZE/sizeof(double)]; void *blas_memory_alloc(int numproc){ if (sa == NULL){ -#if 1 +#if 0 sa = (void *)qalloc(QFAST, BUFFER_SIZE); #else sa = (void *)malloc(BUFFER_SIZE); @@ -75,3 +83,296 @@ void blas_memory_free(void *free_area){ return; } + + +extern void openblas_warning(int verbose, const char * msg); + +#ifndef SMP + +#define blas_cpu_number 1 +#define blas_num_threads 1 + +/* Dummy Function */ +int goto_get_num_procs (void) { return 1;}; +void goto_set_num_threads(int num_threads) {}; + +#else + +#if defined(OS_LINUX) || defined(OS_SUNOS) +#ifndef NO_AFFINITY +int get_num_procs(void); +#else +int get_num_procs(void) { + + static int nums = 0; + cpu_set_t cpuset,*cpusetp; + size_t size; + int ret; + +#if defined(__GLIBC_PREREQ) +#if !__GLIBC_PREREQ(2, 7) + int i; +#if !__GLIBC_PREREQ(2, 6) + int n; +#endif +#endif +#endif + + if (!nums) nums = sysconf(_SC_NPROCESSORS_CONF); +#if !defined(OS_LINUX) + return nums; +#endif + +/* +#if !defined(__GLIBC_PREREQ) + return nums; +#else + #if !__GLIBC_PREREQ(2, 3) + return nums; + #endif + + #if !__GLIBC_PREREQ(2, 7) + ret = sched_getaffinity(0,sizeof(cpuset), &cpuset); + if (ret!=0) return nums; + n=0; + #if !__GLIBC_PREREQ(2, 6) + for (i=0;i= CPU_SETSIZE) { + cpusetp = CPU_ALLOC(nums); + if (cpusetp == NULL) { + return nums; + } + size = CPU_ALLOC_SIZE(nums); + ret = sched_getaffinity(0,size,cpusetp); + if (ret!=0) { + CPU_FREE(cpusetp); + return nums; + } + ret = CPU_COUNT_S(size,cpusetp); + if (ret > 0 && ret < nums) nums = ret; + CPU_FREE(cpusetp); + return nums; + } else { + ret = sched_getaffinity(0,sizeof(cpuset),&cpuset); + if (ret!=0) { + return nums; + } + ret = CPU_COUNT(&cpuset); + if (ret > 0 && ret < nums) nums = ret; + return nums; + } + #endif +#endif +*/ + return 1; +} +#endif +#endif + +#ifdef OS_ANDROID +int get_num_procs(void) { + static int nums = 0; + if (!nums) nums = sysconf(_SC_NPROCESSORS_CONF); + return nums; +} +#endif + +#ifdef OS_HAIKU +int get_num_procs(void) { + static int nums = 0; + if (!nums) nums = sysconf(_SC_NPROCESSORS_CONF); + return nums; +} +#endif + +#ifdef OS_AIX +int get_num_procs(void) { + static int nums = 0; + if (!nums) nums = sysconf(_SC_NPROCESSORS_CONF); + return nums; +} +#endif + +#ifdef OS_WINDOWS + +int get_num_procs(void) { + + static int nums = 0; + + if (nums == 0) { + + SYSTEM_INFO sysinfo; + + GetSystemInfo(&sysinfo); + + nums = sysinfo.dwNumberOfProcessors; + } + + return nums; +} + +#endif + +#if defined(OS_FREEBSD) || defined(OS_OPENBSD) || defined(OS_NETBSD) || defined(OS_DRAGONFLY) + +int get_num_procs(void) { + + static int nums = 0; + + int m[2]; + size_t len; + + if (nums == 0) { + m[0] = CTL_HW; + m[1] = HW_NCPU; + len = sizeof(int); + sysctl(m, 2, &nums, &len, NULL, 0); + } + + return nums; +} + +#endif + +#if defined(OS_DARWIN) +int get_num_procs(void) { + static int nums = 0; + size_t len; + if (nums == 0){ + len = sizeof(int); + sysctlbyname("hw.physicalcpu", &nums, &len, NULL, 0); + } + return nums; +} +/* +void set_stack_limit(int limitMB){ + int result=0; + struct rlimit rl; + rlim_t StackSize; + + StackSize=limitMB*1024*1024; + result=getrlimit(RLIMIT_STACK, &rl); + if(result==0){ + if(rl.rlim_cur < StackSize){ + rl.rlim_cur=StackSize; + result=setrlimit(RLIMIT_STACK, &rl); + if(result !=0){ + fprintf(stderr, "OpenBLAS: set stack limit error =%d\n", result); + } + } + } +} +*/ +#endif + + +/* +OpenBLAS uses the numbers of CPU cores in multithreading. +It can be set by openblas_set_num_threads(int num_threads); +*/ +int blas_cpu_number = 0; +/* +The numbers of threads in the thread pool. +This value is equal or large than blas_cpu_number. This means some threads are sleep. +*/ +int blas_num_threads = 0; + +int goto_get_num_procs (void) { + return blas_cpu_number; +} + +void openblas_fork_handler() +{ + // This handler shuts down the OpenBLAS-managed PTHREAD pool when OpenBLAS is + // built with "make USE_OPENMP=0". + // Hanging can still happen when OpenBLAS is built against the libgomp + // implementation of OpenMP. The problem is tracked at: + // http://gcc.gnu.org/bugzilla/show_bug.cgi?id=60035 + // In the mean time build with USE_OPENMP=0 or link against another + // implementation of OpenMP. +#if !((defined(OS_WINDOWS) && !defined(OS_CYGWIN_NT)) || defined(OS_ANDROID)) && defined(SMP_SERVER) + int err; + err = pthread_atfork ((void (*)(void)) BLASFUNC(blas_thread_shutdown), NULL, NULL); + if(err != 0) + openblas_warning(0, "OpenBLAS Warning ... cannot install fork handler. You may meet hang after fork.\n"); +#endif +} + +extern int openblas_num_threads_env(); +extern int openblas_goto_num_threads_env(); +extern int openblas_omp_num_threads_env(); + +int blas_get_cpu_number(void){ +#if defined(OS_LINUX) || defined(OS_WINDOWS) || defined(OS_FREEBSD) || defined(OS_OPENBSD) || defined(OS_NETBSD) || defined(OS_DRAGONFLY) || defined(OS_DARWIN) || defined(OS_ANDROID) + int max_num; +#endif + int blas_goto_num = 0; + int blas_omp_num = 0; + + if (blas_num_threads) return blas_num_threads; + +#if defined(OS_LINUX) || defined(OS_WINDOWS) || defined(OS_FREEBSD) || defined(OS_OPENBSD) || defined(OS_NETBSD) || defined(OS_DRAGONFLY) || defined(OS_DARWIN) || defined(OS_ANDROID) + max_num = get_num_procs(); +#endif + + // blas_goto_num = 0; +#ifndef USE_OPENMP + blas_goto_num=openblas_num_threads_env(); + if (blas_goto_num < 0) blas_goto_num = 0; + + if (blas_goto_num == 0) { + blas_goto_num=openblas_goto_num_threads_env(); + if (blas_goto_num < 0) blas_goto_num = 0; + } + +#endif + + // blas_omp_num = 0; + blas_omp_num=openblas_omp_num_threads_env(); + if (blas_omp_num < 0) blas_omp_num = 0; + + if (blas_goto_num > 0) blas_num_threads = blas_goto_num; + else if (blas_omp_num > 0) blas_num_threads = blas_omp_num; + else blas_num_threads = MAX_CPU_NUMBER; + +#if defined(OS_LINUX) || defined(OS_WINDOWS) || defined(OS_FREEBSD) || defined(OS_OPENBSD) || defined(OS_NETBSD) || defined(OS_DRAGONFLY) || defined(OS_DARWIN) || defined(OS_ANDROID) + if (blas_num_threads > max_num) blas_num_threads = max_num; +#endif + + if (blas_num_threads > MAX_CPU_NUMBER) blas_num_threads = MAX_CPU_NUMBER; + +#ifdef DEBUG + printf( "Adjusted number of threads : %3d\n", blas_num_threads); +#endif + + blas_cpu_number = blas_num_threads; + + return blas_num_threads; +} +#endif + + +int openblas_get_num_procs(void) { +#ifndef SMP + return 1; +#else + return get_num_procs(); +#endif +} + +int openblas_get_num_threads(void) { +#ifndef SMP + return 1; +#else + // init blas_cpu_number if needed + blas_get_cpu_number(); + return blas_cpu_number; +#endif +} diff --git a/driver/others/openblas_get_config.c b/driver/others/openblas_get_config.c index 81648fb7c..7fefee33d 100644 --- a/driver/others/openblas_get_config.c +++ b/driver/others/openblas_get_config.c @@ -78,10 +78,10 @@ char tmpstr[20]; #ifdef DYNAMIC_ARCH strcat(tmp_config_str, gotoblas_corename()); #endif -if (openblas_get_parallel() == 0) - sprintf(tmpstr, " SINGLE_THREADED"); -else - snprintf(tmpstr,19," MAX_THREADS=%d",MAX_CPU_NUMBER); + if (openblas_get_parallel() == 0) + sprintf(tmpstr, " SINGLE_THREADED"); + else + snprintf(tmpstr,19," MAX_THREADS=%d",MAX_CPU_NUMBER); strcat(tmp_config_str, tmpstr); return tmp_config_str; } diff --git a/driver/others/parameter.c b/driver/others/parameter.c index 8bf7da78b..36da13369 100644 --- a/driver/others/parameter.c +++ b/driver/others/parameter.c @@ -62,6 +62,11 @@ BLASLONG gemm_offset_b = DEFAULT_GEMM_OFFSET_B; BLASLONG gemm_offset_b = GEMM_OFFSET_B; #endif +#if SBGEMM_P == sbgemm_p +BLASLONG sbgemm_p = DEFAULT_GEMM_P; +#else +BLASLONG sbgemm_p = SBGEMM_P; +#endif #if SGEMM_P == sgemm_p BLASLONG sgemm_p = DEFAULT_GEMM_P; #else @@ -83,6 +88,11 @@ BLASLONG zgemm_p = DEFAULT_GEMM_P; BLASLONG zgemm_p = ZGEMM_P; #endif +#if SBGEMM_Q == sbgemm_q +BLASLONG sbgemm_q = DEFAULT_GEMM_Q; +#else +BLASLONG sbgemm_q = SBGEMM_Q; +#endif #if SGEMM_Q == sgemm_q BLASLONG sgemm_q = DEFAULT_GEMM_Q; #else @@ -104,6 +114,11 @@ BLASLONG zgemm_q = DEFAULT_GEMM_Q; BLASLONG zgemm_q = ZGEMM_Q; #endif +#if SBGEMM_R == sbgemm_r +BLASLONG sbgemm_r = DEFAULT_GEMM_R; +#else +BLASLONG sbgemm_r = SBGEMM_R; +#endif #if SGEMM_R == sgemm_r BLASLONG sgemm_r = DEFAULT_GEMM_R; #else @@ -165,9 +180,10 @@ int get_L2_size(void){ int eax, ebx, ecx, edx; #if defined(ATHLON) || defined(OPTERON) || defined(BARCELONA) || defined(BOBCAT) || defined(BULLDOZER) || \ - defined(CORE_PRESCOTT) || defined(CORE_CORE2) || defined(PENRYN) || defined(DUNNINGTON) || \ - defined(CORE_NEHALEM) || defined(CORE_SANDYBRIDGE) || defined(ATOM) || defined(GENERIC) || \ - defined(PILEDRIVER) || defined(HASWELL) || defined(STEAMROLLER) || defined(EXCAVATOR) || defined(ZEN) || defined(SKYLAKEX) + defined(CORE_PRESCOTT) || defined(CORE_CORE2) || defined(PENRYN) || defined(DUNNINGTON) || \ + defined(CORE_NEHALEM) || defined(CORE_SANDYBRIDGE) || defined(ATOM) || defined(GENERIC) || \ + defined(PILEDRIVER) || defined(HASWELL) || defined(STEAMROLLER) || defined(EXCAVATOR) || \ + defined(ZEN) || defined(SKYLAKEX) || defined(COOPERLAKE) cpuid(0x80000006, &eax, &ebx, &ecx, &edx); @@ -251,7 +267,9 @@ int get_L2_size(void){ void blas_set_parameter(void){ int factor; -#if defined(BULLDOZER) || defined(PILEDRIVER) || defined(SANDYBRIDGE) || defined(NEHALEM) || defined(HASWELL) || defined(STEAMROLLER) || defined(EXCAVATOR) || defined(ZEN) || defined(SKYLAKEX) +#if defined(BULLDOZER) || defined(PILEDRIVER) || defined(SANDYBRIDGE) || defined(NEHALEM) || \ + defined(HASWELL) || defined(STEAMROLLER) || defined(EXCAVATOR) || defined(ZEN) || \ + defined(SKYLAKEX) || defined(COOPERLAKE) int size = 16; #else int size = get_L2_size(); @@ -597,6 +615,7 @@ void blas_set_parameter(void){ size = BITMASK(cpuid3, 16, 0xff); + sbgemm_p = 192 * (size + 1); sgemm_p = 192 * (size + 1); dgemm_p = 96 * (size + 1); cgemm_p = 96 * (size + 1); @@ -610,6 +629,7 @@ void blas_set_parameter(void){ xgemm_p = 16 * (size + 1); #endif + sbgemm_r = (((BUFFER_SIZE - ((SBGEMM_P * SBGEMM_Q * 4 + GEMM_OFFSET_A + GEMM_ALIGN) & ~GEMM_ALIGN)) / (SBGEMM_Q * 4)) - 15) & ~15; sgemm_r = (((BUFFER_SIZE - ((SGEMM_P * SGEMM_Q * 4 + GEMM_OFFSET_A + GEMM_ALIGN) & ~GEMM_ALIGN)) / (SGEMM_Q * 4)) - 15) & ~15; dgemm_r = (((BUFFER_SIZE - ((DGEMM_P * DGEMM_Q * 8 + GEMM_OFFSET_A + GEMM_ALIGN) & ~GEMM_ALIGN)) / (DGEMM_Q * 8)) - 15) & ~15; cgemm_r = (((BUFFER_SIZE - ((CGEMM_P * CGEMM_Q * 8 + GEMM_OFFSET_A + GEMM_ALIGN) & ~GEMM_ALIGN)) / (CGEMM_Q * 8)) - 15) & ~15; @@ -697,7 +717,7 @@ void blas_set_parameter(void){ #if defined(ARCH_MIPS64) void blas_set_parameter(void){ -#if defined(LOONGSON3A) +#if defined(LOONGSON3R3) || defined(LOONGSON3R4) #ifdef SMP if(blas_num_threads == 1){ #endif @@ -711,20 +731,6 @@ void blas_set_parameter(void){ #endif #endif -#if defined(LOONGSON3B) -#ifdef SMP - if(blas_num_threads == 1 || blas_num_threads == 2){ -#endif - //single thread - dgemm_r = 640; -#ifdef SMP - }else{ - //multi thread - dgemm_r = 160; - } -#endif -#endif - } #endif diff --git a/exports/Makefile b/exports/Makefile index b1348bd4a..eec0593aa 100644 --- a/exports/Makefile +++ b/exports/Makefile @@ -30,6 +30,22 @@ ifndef BUILD_LAPACK_DEPRECATED BUILD_LAPACK_DEPRECATED = 0 endif +ifndef BUILD_BFLOAT16 +BUILD_BFLOAT16 = 0 +endif +ifndef BUILD_SINGLE +BUILD_SINGLE = 0 +endif +ifndef BUILD_DOUBLE +BUILD_DOUBLE = 0 +endif +ifndef BUILD_COMPLEX +BUILD_COMPLEX = 0 +endif +ifndef BUILD_COMPLEX16 +BUILD_COMPLEX16 = 0 +endif + ifeq ($(OSNAME), WINNT) ifeq ($(F_COMPILER), GFORTRAN) ifndef ONLY_CBLAS @@ -51,6 +67,10 @@ endif endif endif +ifeq ($(C_COMPILER), PGI) +EXTRALIB += -pgf90libs +endif + ifneq (,$(filter 1 2,$(NOFORTRAN))) FEXTRALIB = endif @@ -100,10 +120,14 @@ dll : ../$(LIBDLLNAME) -Wl,--whole-archive ../$(LIBNAME) -Wl,--no-whole-archive $(FEXTRALIB) $(EXTRALIB) $(LIBPREFIX).def : gensymbol - perl ./gensymbol win2k $(ARCH) dummy $(EXPRECISION) $(NO_CBLAS) $(NO_LAPACK) $(NO_LAPACKE) $(NEED2UNDERSCORES) $(ONLY_CBLAS) "$(SYMBOLPREFIX)" "$(SYMBOLSUFFIX)" $(BUILD_LAPACK_DEPRECATED) > $(@F) + perl ./gensymbol win2k $(ARCH) dummy $(EXPRECISION) $(NO_CBLAS) $(NO_LAPACK) $(NO_LAPACKE) $(NEED2UNDERSCORES) $(ONLY_CBLAS) "$(SYMBOLPREFIX)" "$(SYMBOLSUFFIX)" $(BUILD_LAPACK_DEPRECATED) $(BUILD_BFLOAT16) $(BUILD_SINGLE) $(BUILD_DOUBLE) $(BUILD_COMPLEX) $(BUILD_COMPLEX16) > $(@F) libgoto_hpl.def : gensymbol - perl ./gensymbol win2khpl $(ARCH) dummy $(EXPRECISION) $(NO_CBLAS) $(NO_LAPACK) $(NO_LAPACKE) $(NEED2UNDERSCORES) $(ONLY_CBLAS) "$(SYMBOLPREFIX)" "$(SYMBOLSUFFIX)" $(BUILD_LAPACK_DEPRECATED) > $(@F) + perl ./gensymbol win2khpl $(ARCH) dummy $(EXPRECISION) $(NO_CBLAS) $(NO_LAPACK) $(NO_LAPACKE) $(NEED2UNDERSCORES) $(ONLY_CBLAS) "$(SYMBOLPREFIX)" "$(SYMBOLSUFFIX)" $(BUILD_LAPACK_DEPRECATED) $(BUILD_BFLOAT16) $(BUILD_SINGLE) $(BUILD_DOUBLE) $(BUILD_COMPLEX) $(BUILD_COMPLEX16) > $(@F) + +ifeq ($(OSNAME), Darwin) +INTERNALNAME = $(LIBPREFIX).$(MAJOR_VERSION).dylib +endif ifeq (, $(SYMBOLPREFIX)$(SYMBOLSUFFIX)) $(LIBDYNNAME) : ../$(LIBNAME) osx.def @@ -114,15 +138,15 @@ $(LIBDYNNAME) : ../$(LIBNAME).osx.renamed osx.def endif ifneq (,$(filter 1 2,$(NOFORTRAN))) #only build without Fortran - $(CC) $(CFLAGS) $(LDFLAGS) -all_load -headerpad_max_install_names -install_name "$(CURDIR)/../$(LIBDYNNAME)" -dynamiclib -o ../$(LIBDYNNAME) $< -Wl,-exported_symbols_list,osx.def $(FEXTRALIB) + $(CC) $(CFLAGS) $(LDFLAGS) -all_load -headerpad_max_install_names -install_name "$(CURDIR)/../$(INTERNALNAME)" -dynamiclib -o ../$(LIBDYNNAME) $< -Wl,-exported_symbols_list,osx.def $(FEXTRALIB) else - $(FC) $(FFLAGS) $(LDFLAGS) -all_load -headerpad_max_install_names -install_name "$(CURDIR)/../$(LIBDYNNAME)" -dynamiclib -o ../$(LIBDYNNAME) $< -Wl,-exported_symbols_list,osx.def $(FEXTRALIB) + $(FC) $(FFLAGS) $(LDFLAGS) -all_load -headerpad_max_install_names -install_name "$(CURDIR)/../$(INTERNALNAME)" -dynamiclib -o ../$(LIBDYNNAME) $< -Wl,-exported_symbols_list,osx.def $(FEXTRALIB) endif dllinit.$(SUFFIX) : dllinit.c $(CC) $(CFLAGS) -c -o $(@F) -s $< -ifeq ($(OSNAME), $(filter $(OSNAME),Linux SunOS Android Haiku)) +ifeq ($(OSNAME), $(filter $(OSNAME),Linux SunOS Android Haiku FreeBSD DragonFly)) so : ../$(LIBSONAME) @@ -147,8 +171,12 @@ ifeq ($(F_COMPILER), INTEL) -Wl,--whole-archive $< -Wl,--no-whole-archive \ -Wl,-soname,$(INTERNALNAME) $(EXTRALIB) $(CC) $(CFLAGS) $(LDFLAGS) -w -o linktest linktest.c ../$(LIBSONAME) $(FEXTRALIB) && echo OK. +else ifeq ($(F_COMPILER), FLANG) + $(FC) $(FFLAGS) $(LDFLAGS) -shared -o ../$(LIBSONAME) \ + -Wl,--whole-archive $< -Wl,--no-whole-archive \ + -Wl,-soname,$(INTERNALNAME) $(EXTRALIB) + $(CC) $(CFLAGS) $(LDFLAGS) -w -o linktest linktest.c ../$(LIBSONAME) $(FEXTRALIB) && echo OK. else - ifneq ($(C_COMPILER), LSB) $(CC) $(CFLAGS) $(LDFLAGS) -shared -o ../$(LIBSONAME) \ -Wl,--whole-archive $< -Wl,--no-whole-archive \ @@ -167,7 +195,7 @@ endif endif #http://stackoverflow.com/questions/7656425/makefile-ifeq-logical-or -ifeq ($(OSNAME), $(filter $(OSNAME),FreeBSD OpenBSD NetBSD DragonFly)) +ifeq ($(OSNAME), $(filter $(OSNAME),OpenBSD NetBSD)) so : ../$(LIBSONAME) @@ -230,23 +258,23 @@ static : ../$(LIBNAME) rm -f goto.$(SUFFIX) osx.def : gensymbol ../Makefile.system ../getarch.c - perl ./gensymbol osx $(ARCH) $(BU) $(EXPRECISION) $(NO_CBLAS) $(NO_LAPACK) $(NO_LAPACKE) $(NEED2UNDERSCORES) $(ONLY_CBLAS) "$(SYMBOLPREFIX)" "$(SYMBOLSUFFIX)" $(BUILD_LAPACK_DEPRECATED) > $(@F) + perl ./gensymbol osx $(ARCH) $(BU) $(EXPRECISION) $(NO_CBLAS) $(NO_LAPACK) $(NO_LAPACKE) $(NEED2UNDERSCORES) $(ONLY_CBLAS) "$(SYMBOLPREFIX)" "$(SYMBOLSUFFIX)" $(BUILD_LAPACK_DEPRECATED) $(BUILD_BFLOAT16) $(BUILD_SINGLE) $(BUILD_DOUBLE) $(BUILD_COMPLEX) $(BUILD_COMPLEX16) > $(@F) aix.def : gensymbol ../Makefile.system ../getarch.c - perl ./gensymbol aix $(ARCH) $(BU) $(EXPRECISION) $(NO_CBLAS) $(NO_LAPACK) $(NO_LAPACKE) $(NEED2UNDERSCORES) $(ONLY_CBLAS) "$(SYMBOLPREFIX)" "$(SYMBOLSUFFIX)" $(BUILD_LAPACK_DEPRECATED) > $(@F) + perl ./gensymbol aix $(ARCH) $(BU) $(EXPRECISION) $(NO_CBLAS) $(NO_LAPACK) $(NO_LAPACKE) $(NEED2UNDERSCORES) $(ONLY_CBLAS) "$(SYMBOLPREFIX)" "$(SYMBOLSUFFIX)" $(BUILD_LAPACK_DEPRECATED) $(BUILD_BFLOAT16) $(BUILD_SINGLE) $(BUILD_DOUBLE) $(BUILD_COMPLEX) $(BUILD_COMPLEX16) > $(@F) objcopy.def : gensymbol ../Makefile.system ../getarch.c - perl ./gensymbol objcopy $(ARCH) $(BU) $(EXPRECISION) $(NO_CBLAS) $(NO_LAPACK) $(NO_LAPACKE) $(NEED2UNDERSCORES) $(ONLY_CBLAS) "$(SYMBOLPREFIX)" "$(SYMBOLSUFFIX)" $(BUILD_LAPACK_DEPRECATED) > $(@F) + perl ./gensymbol objcopy $(ARCH) $(BU) $(EXPRECISION) $(NO_CBLAS) $(NO_LAPACK) $(NO_LAPACKE) $(NEED2UNDERSCORES) $(ONLY_CBLAS) "$(SYMBOLPREFIX)" "$(SYMBOLSUFFIX)" $(BUILD_LAPACK_DEPRECATED) $(BUILD_BFLOAT16) $(BUILD_SINGLE) $(BUILD_DOUBLE) $(BUILD_COMPLEX) $(BUILD_COMPLEX16) > $(@F) objconv.def : gensymbol ../Makefile.system ../getarch.c - perl ./gensymbol objconv $(ARCH) $(BU) $(EXPRECISION) $(NO_CBLAS) $(NO_LAPACK) $(NO_LAPACKE) $(NEED2UNDERSCORES) $(ONLY_CBLAS) "$(SYMBOLPREFIX)" "$(SYMBOLSUFFIX)" $(BUILD_LAPACK_DEPRECATED) > $(@F) + perl ./gensymbol objconv $(ARCH) $(BU) $(EXPRECISION) $(NO_CBLAS) $(NO_LAPACK) $(NO_LAPACKE) $(NEED2UNDERSCORES) $(ONLY_CBLAS) "$(SYMBOLPREFIX)" "$(SYMBOLSUFFIX)" $(BUILD_LAPACK_DEPRECATED) $(BUILD_BFLOAT16) $(BUILD_SINGLE) $(BUILD_DOUBLE) $(BUILD_COMPLEX) $(BUILD_COMPLEX16) > $(@F) test : linktest.c $(CC) $(CFLAGS) $(LDFLAGS) -w -o linktest linktest.c ../$(LIBSONAME) -lm && echo OK. rm -f linktest linktest.c : gensymbol ../Makefile.system ../getarch.c - perl ./gensymbol linktest $(ARCH) $(BU) $(EXPRECISION) $(NO_CBLAS) $(NO_LAPACK) $(NO_LAPACKE) $(NEED2UNDERSCORES) $(ONLY_CBLAS) "$(SYMBOLPREFIX)" "$(SYMBOLSUFFIX)" $(BUILD_LAPACK_DEPRECATED) > linktest.c + perl ./gensymbol linktest $(ARCH) $(BU) $(EXPRECISION) $(NO_CBLAS) $(NO_LAPACK) $(NO_LAPACKE) $(NEED2UNDERSCORES) $(ONLY_CBLAS) "$(SYMBOLPREFIX)" "$(SYMBOLSUFFIX)" $(BUILD_LAPACK_DEPRECATED) $(BUILD_BFLOAT16) $(BUILD_SINGLE) $(BUILD_DOUBLE) $(BUILD_COMPLEX) $(BUILD_COMPLEX16) > linktest.c clean :: @rm -f *.def *.dylib __.SYMDEF* *.renamed diff --git a/exports/dllinit.c b/exports/dllinit.c index 4a05c0e14..88f9af658 100644 --- a/exports/dllinit.c +++ b/exports/dllinit.c @@ -50,7 +50,10 @@ BOOL APIENTRY DllMain(HINSTANCE hInst, DWORD reason, LPVOID reserved) { gotoblas_init(); break; case DLL_PROCESS_DETACH: - gotoblas_quit(); + // If the process is about to exit, don't bother releasing any resources + // The kernel is much better at bulk releasing then. + if (!reserved) + gotoblas_quit(); break; case DLL_THREAD_ATTACH: break; diff --git a/exports/gensymbol b/exports/gensymbol index 21a1b703d..857a17a9e 100644 --- a/exports/gensymbol +++ b/exports/gensymbol @@ -16,73 +16,86 @@ # 2017/08/01 Saar # removed blas_thread_shutdown_ # -@blasobjs = ( - caxpy,ccopy,cdotc,cdotu,cgbmv,cgemm,cgemv,cgerc,cgeru, - chbmv,chemm,chemv,cher2,cher2k,cher,cherk, - chpmv,chpr2,chpr,crotg,cscal,csrot,csscal,cswap, +@blasobjsc = ( + caxpy,caxpby,ccopy,cdotc,cdotu,cgbmv,cgemm,cgemv,cgerc,cgeru, + chbmv,chemm,chemv,cher2,cher2k,cher,cherk,scabs1,scamax, + chpmv,chpr2,chpr,crotg,cscal,csrot,csscal,cswap,scamin,scasum,scnrm2, csymm,csyr2k,csyrk,ctbmv,ctbsv,ctpmv,ctpsv,ctrmm,ctrmv,ctrsm, - ctrsv, - damax,damin,dasum,daxpy,dcabs1,dcopy,ddot,dgbmv,dgemm, + ctrsv,icamax,icamin,cimatcopy,comatcopy,cgeadd,scsum); + +@blasobjsd = ( + damax,damin,dasum,daxpy,daxpby,dcabs1,dcopy,ddot,dgbmv,dgemm, dgemv,dger,dmax,dmin,dnrm2,drot,drotg,drotm,drotmg,dsbmv, - dscal,dsdot,dspmv,dspr2, + dscal,dsdot,dspmv,dspr2,dimatcopy,domatcopy, dspr,dswap,dsymm,dsymv,dsyr2,dsyr2k,dsyr,dsyrk,dtbmv,dtbsv, - dtpmv,dtpsv,dtrmm,dtrmv,dtrsm,dtrsv,dzamax,dzamin,dzasum,dznrm2, - icamax,icamin,idamax,idamin,idmax,idmin,isamax,isamin,ismax,ismin, - izamax,izamin,lsame,samax,samin,sasum,saxpy,scabs1,scamax, - scamin,scasum,scnrm2,scopy,sdot,sdsdot,sgbmv,sgemm,sgemv,sger, - smax,smin,snrm2, + dtpmv,dtpsv,dtrmm,dtrmv,dtrsm,dtrsv, + idamax,idamin,idmax,idmin,dgeadd,dsum); + +@blasobjss = ( + isamax,isamin,ismax,ismin, + samax,samin,sasum,saxpy, saxpby, + scopy,sdot,sdsdot,sgbmv,sgemm,sgemv,sger, + smax,smin,snrm2,simatcopy,somatcopy, srot,srotg,srotm,srotmg,ssbmv,sscal,sspmv,sspr2,sspr,sswap, ssymm,ssymv,ssyr2,ssyr2k,ssyr,ssyrk,stbmv,stbsv,stpmv,stpsv, - strmm,strmv,strsm,strsv,zaxpy,zcopy,zdotc,zdotu,zdrot, + strmm,strmv,strsm,strsv, sgeadd,ssum); + +@blasobjsz = ( + izamax,izamin,, + zaxpy,zaxpby,zcopy,zdotc,zdotu,zdrot, zdscal,zgbmv,zgemm,zgemv,zgerc,zgeru, zhbmv,zhemm,zhemv,zher2,zher2k,zher,zherk,zhpmv,zhpr2, zhpr,zrotg,zscal,zswap,zsymm,zsyr2k,zsyrk,ztbmv, ztbsv,ztpmv,ztpsv,ztrmm,ztrmv,ztrsm,ztrsv, - xerbla, - saxpby,daxpby,caxpby,zaxpby, - sgeadd,dgeadd,cgeadd,zgeadd, - somatcopy, - simatcopy, - domatcopy, - dimatcopy, - comatcopy, - cimatcopy, - zomatcopy, - zimatcopy, -); + zomatcopy, zimatcopy,dzamax,dzamin,dzasum,dznrm2, + zgeadd, dzsum); -@cblasobjs = ( +@blasobjs = (lsame, xerbla); +@bfblasobjs = (sbgemm, sbgemv, sbdot, sbstobf16, sbdtobf16, sbf16tos, dbf16tod); +@cblasobjsc = ( cblas_caxpy, cblas_ccopy, cblas_cdotc, cblas_cdotu, cblas_cgbmv, cblas_cgemm, cblas_cgemv, cblas_cgerc, cblas_cgeru, cblas_chbmv, cblas_chemm, cblas_chemv, cblas_cher2, cblas_cher2k, - cblas_cher, cblas_cherk, cblas_chpmv, cblas_chpr2, cblas_chpr, cblas_cscal, - cblas_csscal, cblas_cswap, cblas_csymm, cblas_csyr2k, cblas_csyrk, cblas_ctbmv, - cblas_ctbsv, cblas_ctpmv, cblas_ctpsv, cblas_ctrmm, cblas_ctrmv, cblas_ctrsm, cblas_ctrsv, + cblas_cher, cblas_cherk, cblas_chpmv, cblas_chpr2, cblas_chpr, cblas_cscal, cblas_caxpby, + cblas_csscal, cblas_cswap, cblas_csymm, cblas_csyr2k, cblas_csyrk, cblas_ctbmv, cblas_cgeadd, + cblas_ctbsv, cblas_ctpmv, cblas_ctpsv, cblas_ctrmm, cblas_ctrmv, cblas_ctrsm, cblas_ctrsv, + cblas_scnrm2, cblas_scasum, + cblas_icamax, cblas_icamin, cblas_icmin, cblas_icmax, cblas_scsum,cblas_cimatcopy,cblas_comatcopy + ); +@cblasobjsd = ( cblas_dasum, cblas_daxpy, cblas_dcopy, cblas_ddot, cblas_dgbmv, cblas_dgemm, cblas_dgemv, cblas_dger, cblas_dnrm2, cblas_drot, cblas_drotg, cblas_drotm, cblas_drotmg, cblas_dsbmv, cblas_dscal, cblas_dsdot, cblas_dspmv, cblas_dspr2, cblas_dspr, cblas_dswap, cblas_dsymm, cblas_dsymv, cblas_dsyr2, cblas_dsyr2k, cblas_dsyr, cblas_dsyrk, cblas_dtbmv, cblas_dtbsv, cblas_dtpmv, cblas_dtpsv, - cblas_dtrmm, cblas_dtrmv, cblas_dtrsm, cblas_dtrsv, cblas_dzasum, - cblas_dznrm2, cblas_icamax, cblas_idamax, - cblas_isamax, cblas_izamax, - cblas_sasum, cblas_saxpy, - cblas_scasum, cblas_scnrm2, cblas_scopy, cblas_sdot, cblas_sdsdot, cblas_sgbmv, cblas_sgemm, + cblas_dtrmm, cblas_dtrmv, cblas_dtrsm, cblas_dtrsv, cblas_daxpby, cblas_dgeadd, + cblas_idamax, cblas_idamin, cblas_idmin, cblas_idmax, cblas_dsum,cblas_dimatcopy,cblas_domatcopy + ); + +@cblasobjss = ( + cblas_sasum, cblas_saxpy, cblas_saxpby, + cblas_scopy, cblas_sdot, cblas_sdsdot, cblas_sgbmv, cblas_sgemm, cblas_sgemv, cblas_sger, cblas_snrm2, cblas_srot, cblas_srotg, cblas_srotm, cblas_srotmg, cblas_ssbmv, cblas_sscal, cblas_sspmv, cblas_sspr2, cblas_sspr, cblas_sswap, cblas_ssymm, cblas_ssymv, cblas_ssyr2, cblas_ssyr2k, cblas_ssyr, cblas_ssyrk, cblas_stbmv, cblas_stbsv, cblas_stpmv, cblas_stpsv, cblas_strmm, cblas_strmv, cblas_strsm, - cblas_strsv, cblas_zaxpy, cblas_zcopy, cblas_zdotc, cblas_zdotu, cblas_zdscal, + cblas_strsv, cblas_sgeadd, + cblas_isamax, cblas_isamin, cblas_ismin, cblas_ismax, cblas_ssum,cblas_simatcopy,cblas_somatcopy + ); +@cblasobjsz = ( + cblas_dzasum, cblas_dznrm2, cblas_zaxpy, cblas_zcopy, cblas_zdotc, cblas_zdotu, cblas_zdscal, cblas_zgbmv, cblas_zgemm, cblas_zgemv, cblas_zgerc, cblas_zgeru, cblas_zhbmv, cblas_zhemm, cblas_zhemv, cblas_zher2, cblas_zher2k, cblas_zher, cblas_zherk, cblas_zhpmv, cblas_zhpr2, cblas_zhpr, cblas_zscal, cblas_zswap, cblas_zsymm, cblas_zsyr2k, cblas_zsyrk, cblas_ztbmv, cblas_ztbsv, cblas_ztpmv, cblas_ztpsv, cblas_ztrmm, cblas_ztrmv, cblas_ztrsm, cblas_ztrsv, cblas_cdotc_sub, cblas_cdotu_sub, cblas_zdotc_sub, cblas_zdotu_sub, - cblas_saxpby,cblas_daxpby,cblas_caxpby,cblas_zaxpby, - cblas_somatcopy, cblas_domatcopy, cblas_comatcopy, cblas_zomatcopy, - cblas_simatcopy, cblas_dimatcopy, cblas_cimatcopy, cblas_zimatcopy, - cblas_sgeadd, cblas_dgeadd,cblas_cgeadd, cblas_zgeadd + cblas_zaxpby, cblas_zgeadd, + cblas_izamax, cblas_izamin, cblas_izmin, cblas_izmax, cblas_dzsum,cblas_zimatcopy,cblas_zomatcopy ); +@cblasobjs = ( cblas_xerbla ); + +@bfcblasobjs = (cblas_sbgemm, cblas_sbgemv, cblas_sbdot, cblas_sbstobf16, cblas_sbdtobf16, cblas_sbf16tos, cblas_dbf16tod); + @exblasobjs = ( qamax,qamin,qasum,qaxpy,qcabs1,qcopy,qdot,qgbmv,qgemm, qgemv,qger,qmax,qmin, @@ -100,12 +113,22 @@ # xdrot,xrotg, ); -@gemm3mobjs = ( - cgemm3m,zgemm3m + @gemm3mobjs=(); + + @cblasgemm3mobjs=(); + +@gemm3mobjsc = ( + cgemm3m, +); +@gemm3mobjsz = ( + zgemm3m ); -@cblasgemm3mobjs = ( - cblas_cgemm3m,cblas_zgemm3m +@cblasgemm3mobjsc = ( + cblas_cgemm3m +); +@cblasgemm3mobjsz = ( + cblas_zgemm3m ); @@ -128,22 +151,68 @@ @misc_underscore_objs = ( ); -@lapackobjs = ( +@lapackobjss = ( # These routines are provided by OpenBLAS. - sgesv, dgesv, cgesv, zgesv, - sgetf2, dgetf2, cgetf2, zgetf2, - sgetrf, dgetrf, cgetrf, zgetrf, - slaswp, dlaswp, claswp, zlaswp, - sgetrs, dgetrs, cgetrs, zgetrs, - slauu2, dlauu2, clauu2, zlauu2, - slauum, dlauum, clauum, zlauum, - spotf2, dpotf2, cpotf2, zpotf2, - spotrf, dpotrf, cpotrf, zpotrf, - strti2, dtrti2, ctrti2, ztrti2, - strtri, dtrtri, ctrtri, ztrtri, - spotri, dpotri, cpotri, zpotri, + sgesv, + sgetf2, + sgetrf, + slaswp, + sgetrs, + slauu2, + slauum, + spotf2, + spotrf, + strti2, + strtri, + spotri, +); + +@lapackobjsd = ( + dgesv, + dgetf2, + dgetrf, + dlaswp, + dgetrs, + dlauu2, + dlauum, + dpotf2, + dpotrf, + dtrti2, + dtrtri, + dpotri, +); + +@lapackobjsc = ( +cgesv, +cgetf2, +cgetrf, +claswp, +cgetrs, +clauu2, +clauum, +cpotf2, +cpotrf, +ctrti2, +ctrtri, +cpotri, +); + +@lapackobjsz = ( +zgesv, +zgetf2, +zgetrf, +zlaswp, +zgetrs, +zlauu2, +zlauum, +zpotf2, +zpotrf, +ztrti2, +ztrtri, +zpotri, ); + @lapackobjs2 = ( # These routines are provided by LAPACK (reference implementation). # @@ -159,7 +228,9 @@ ilaenv, ieeeck, lsamen, iparmq, ilaprec, ilatrans, ilauplo, iladiag, ilaver, slamch, slamc3, - +); + +@lapackobjs2sc = ( # SCLAUX -- Auxiliary routines called from both REAL and COMPLEX. # excluded: second_$(TIMER) sbdsdc, @@ -177,7 +248,9 @@ slasr, slasrt, slassq, slasv2, spttrf, sstebz, sstedc, ssteqr, ssterf, slaisnan, sisnan, slartgp, slartgs, +); +@lapackobjs2dz = ( # DZLAUX -- Auxiliary routines called from both DOUBLE and COMPLEX*16. # excluded: dsecnd_$(TIMER) dbdsdc, @@ -196,7 +269,9 @@ dsteqr, dsterf, dlaisnan, disnan, dlartgp, dlartgs, dlamch, dlamc3, +); +@lapackobjs2s = ( # SLASRC -- Single precision real LAPACK routines # already provided by @lapackobjs: # sgesv, sgetf2, slaswp, slauu2, slauum, spotf2, spotri, strti2, strtri @@ -259,7 +334,9 @@ sorbdb5, sorbdb6, sorcsd, sorcsd2by1, sgeqrt, sgeqrt2, sgeqrt3, sgemqrt, stpqrt, stpqrt2, stpmqrt, stprfb, +); +@lapackobjs2ds = ( # DSLASRC -- Double-single mixed precision real routines called from # single, single-extra and double precision real LAPACK # routines (i.e. from SLASRC, SXLASRC, DLASRC). @@ -267,7 +344,9 @@ # already provided by @lapackobjs: # sgetrs, spotrf, sgetrf spotrs, +); +@lapackobjs2c = ( # CLASRC -- Single precision complex LAPACK routines # already provided by @blasobjs: # already provided by @lapackobjs: @@ -335,7 +414,8 @@ cunbdb5, cunbdb6, cuncsd, cuncsd2by1, cgeqrt, cgeqrt2, cgeqrt3, cgemqrt, ctpqrt, ctpqrt2, ctpmqrt, ctprfb, - +); +@lapackobjs2zc = ( # ZCLASRC -- Double-single mixed precision complex routines called from # single, single-extra and double precision complex LAPACK # routines (i.e. from CLASRC, CXLASRC, ZLASRC). @@ -343,7 +423,9 @@ # already provided by @lapackobjs: # cgetrs, cpotrf, cgetrf cpotrs, +); +@lapackobjs2d = ( # DLASRC -- Double precision real LAPACK routines # already provided by @lapackobjs: # dgesv, dgetf2, dgetrs, dlaswp, dlauu2, dlauum, dpotf2, dpotrf, dpotri, @@ -408,7 +490,8 @@ dorbdb5, dorbdb6, dorcsd, dorcsd2by1, dgeqrt, dgeqrt2, dgeqrt3, dgemqrt, dtpqrt, dtpqrt2, dtpmqrt, dtprfb, - +); +@lapackobjs2z = ( # ZLASRC -- Double precision complex LAPACK routines # already provided by @blasobjs: # already provided by @lapackobjs: @@ -482,8 +565,10 @@ zunbdb5, zunbdb6, zuncsd, zuncsd2by1, zgeqrt, zgeqrt2, zgeqrt3, zgemqrt, ztpqrt, ztpqrt2, ztpmqrt, ztprfb, +); # functions added for lapack-3.6.0 +@lapackobjs2c = ( @lapackobjs2c, cgejsv, cgesvdx, cgesvj, @@ -518,6 +603,8 @@ cspr2, csyr2, cunm22, +); +@lapackobjs2d = (@lapackobjs2d, dbdsvdx, dgesvdx, dgetrf2, @@ -549,6 +636,8 @@ dorm22, dpotrf2, dsecnd, + ); + @lapackobjs2s = (@lapackobjs2s, sbdsvdx, second, sgesvdx, @@ -580,6 +669,8 @@ slatmt, sorm22, spotrf2, + ); + @lapackobjs2z = (@lapackobjs2z, zgejsv, zgesvdx, zgesvj, @@ -614,23 +705,10 @@ zspr2, zsyr2, zunm22, - +); # functions added for lapack-3.7.0 - +@lapackobjs2s = (@lapackobjs2s, slarfy, - slasyf_rk, - ssyconvf_rook, - ssytf2_rk, - ssytrf_rk, - ssytrs_3, - ssytri_3, - ssytri_3x, - ssycon_3, - ssysv_rk, - slasyf_aa, - ssysv_aa, - ssytrf_aa, - ssytrs_aa, strevc3, sgelqt, sgelqt3, @@ -647,33 +725,10 @@ stplqt, stplqt2, stpmlqt, - ssytrd_2stage, - ssytrd_sy2sb, - ssytrd_sb2st, - ssb2st_kernels, - ssyevd_2stage, - ssyev_2stage, - ssyevx_2stage, - ssyevr_2stage, - ssbev_2stage, - ssbevx_2stage, - ssbevd_2stage, - ssygv_2stage, + ); + @lapackobjs2d = (@lapackobjs2d, dlarfy, - dlasyf_rk, dsyconvf, - dsyconvf_rook, - dsytf2_rk, - dsytrf_rk, - dsytrs_3, - dsytri_3, - dsytri_3x, - dsycon_3, - dsysv_rk, - dlasyf_aa, - dsysv_aa, - dsytrf_aa, - dsytrs_aa, dtrevc3, dgelqt, dgelqt3, @@ -690,45 +745,10 @@ dtplqt, dtplqt2, dtpmlqt, - dsytrd_2stage, - dsytrd_sy2sb, - dsytrd_sb2st, - dsb2st_kernels, - dsyevd_2stage, - dsyev_2stage, - dsyevx_2stage, - dsyevr_2stage, - dsbev_2stage, - dsbevx_2stage, - dsbevd_2stage, - dsygv_2stage, - chetf2_rk, - chetrf_rk, - chetri_3, - chetri_3x, - chetrs_3, - checon_3, - chesv_rk, - chesv_aa, - chetrf_aa, - chetrs_aa, - clahef_aa, - clahef_rk, + ); + @lapackobjs2c = (@lapackobjs2c, clarfy, - clasyf_rk, - clasyf_aa, csyconvf, - csyconvf_rook, - csytf2_rk, - csytrf_rk, - csytrf_aa, - csytrs_3, - csytrs_aa, - csytri_3, - csytri_3x, - csycon_3, - csysv_rk, - csysv_aa, ctrevc3, cgelqt, cgelqt3, @@ -745,45 +765,10 @@ ctplqt, ctplqt2, ctpmlqt, - chetrd_2stage, - chetrd_he2hb, - chetrd_hb2st, - chb2st_kernels, - cheevd_2stage, - cheev_2stage, - cheevx_2stage, - cheevr_2stage, - chbev_2stage, - chbevx_2stage, - chbevd_2stage, - chegv_2stage, - zhetf2_rk, - zhetrf_rk, - zhetri_3, - zhetri_3x, - zhetrs_3, - zhecon_3, - zhesv_rk, - zhesv_aa, - zhetrf_aa, - zhetrs_aa, - zlahef_aa, - zlahef_rk, + ); + @lapackobjs2z = (@lapackobjs2z, zlarfy, - zlasyf_rk, - zlasyf_aa, zsyconvf, - zsyconvf_rook, - zsytrs_aa, - zsytf2_rk, - zsytrf_rk, - zsytrf_aa, - zsytrs_3, - zsytri_3, - zsytri_3x, - zsycon_3, - zsysv_rk, - zsysv_aa, ztrevc3, ztplqt, ztplqt2, @@ -800,43 +785,34 @@ zlaswlq, zlamswlq, zgemlq, - zhetrd_2stage, - zhetrd_he2hb, - zhetrd_hb2st, - zhb2st_kernels, - zheevd_2stage, - zheev_2stage, - zheevx_2stage, - zheevr_2stage, - zhbev_2stage, - zhbevx_2stage, - zhbevd_2stage, - zhegv_2stage, - sladiv1, - dladiv1, + ); + @lapackobjs2s = (@lapackobjs2s, + sladiv1); + @lapackobjs2d = (@lapackobjs2d, + dladiv1); + @lapackobjs = (@lapackobjs, iparam2stage, - # functions added for lapack-3.8.0 - ilaenv2stage, - ssysv_aa_2stage, - ssytrf_aa_2stage, - ssytrs_aa_2stage, - chesv_aa_2stage, - chetrf_aa_2stage, - chetrs_aa_2stage, - csysv_aa_2stage, - csytrf_aa_2stage, - csytrs_aa_2stage, - dsysv_aa_2stage, - dsytrf_aa_2stage, - dsytrs_aa_2stage, - zhesv_aa_2stage, - zhetrf_aa_2stage, - zhetrs_aa_2stage, - zsysv_aa_2stage, - zsytrf_aa_2stage, - zsytrs_aa_2stage + ); + # functions added for lapack-3.9.0 +@lapackobjs2c = (@lapackobjs2c, + cgesvdq, + cungtsqr + ); +@lapackobjs2d = (@lapackobjs2d, + dcombssq, + dgesvdq, + dorgtsqr, + ); +@lapackobjs2s = (@lapackobjs2s, + scombssq, + sgesvdq, + sorgtsqr, + ); +@lapackobjs2z = (@lapackobjs2z, + zgesvdq, + zungtsqr ); @lapack_extendedprecision_objs = ( @@ -844,36 +820,73 @@ dlagsy, dsysvxx, sporfsx, slatms, zlatms, zherfsx, csysvxx, ); -@lapack_deprecated_objs = ( - cgegs, cggsvd, ctzrqf, dgeqpf, dlatzm, sgelsx, slahrd, zgegv, zggsvp, - cgegv, cggsvp, dgegs, dggsvd, dtzrqf, sgeqpf, slatzm, zgelsx, zlahrd, - cgelsx, clahrd, dgegv, dggsvp, sgegs, sggsvd, stzrqf, zgeqpf, zlatzm, - cgeqpf, clatzm, dgelsx, dlahrd, sgegv, sggsvp, zgegs, zggsvd, ztzrqf, -); - -@lapacke_deprecated_objs = ( +@lapack_deprecated_objsc = ( + cgegs, cggsvd, + cgegv, cggsvp, + cgelsx, clahrd, + cgeqpf, clatzm, + ctzrqf, + ); +@lapack_deprecated_objsd = ( + dgegs, dgeqpf, + dgegv, dggsvd, + dgelsx, dggsvp, + dlahrd, + dlatzm, dtzrqf); + +@lapack_deprecated_objss = ( + sgelsx, + sgegs, + sgegv, + sgeqpf, + sggsvd, + sggsvp, + slahrd, + slatzm, + stzrqf + ); + +@lapack_deprecated_objsz = ( + zgegs, + zgegv, + zgelsx, + zgeqpf, + zggsvd, + zggsvp, + zlahrd, + zlatzm, + ztzrqf + ); + +@lapacke_deprecated_objsc = ( LAPACKE_cggsvp, LAPACKE_cggsvp_work, - LAPACKE_dggsvp, - LAPACKE_dggsvp_work, - LAPACKE_sggsvp, - LAPACKE_sggsvp_work, - LAPACKE_zggsvp, - LAPACKE_zggsvp_work, LAPACKE_cggsvd, LAPACKE_cggsvd_work, - LAPACKE_dggsvd, - LAPACKE_dggsvd_work, - LAPACKE_sggsvd, - LAPACKE_sggsvd_work, - LAPACKE_zggsvd, - LAPACKE_zggsvd_work, LAPACKE_cgeqpf, LAPACKE_cgeqpf_work, +); +@lapacke_deprecated_objsd = ( + LAPACKE_dggsvp, + LAPACKE_dggsvp_work, + LAPACKE_dggsvd, + LAPACKE_dggsvd_work, LAPACKE_dgeqpf, LAPACKE_dgeqpf_work, +); +@lapacke_deprecated_objss = ( + LAPACKE_sggsvp, + LAPACKE_sggsvp_work, + LAPACKE_sggsvd, + LAPACKE_sggsvd_work, LAPACKE_sgeqpf, LAPACKE_sgeqpf_work, +); +@lapacke_deprecated_objsz = ( + LAPACKE_zggsvp, + LAPACKE_zggsvp_work, + LAPACKE_zggsvd, + LAPACKE_zggsvd_work, LAPACKE_zgeqpf, LAPACKE_zgeqpf_work, ); @@ -890,6 +903,15 @@ # exported since the respective LAPACK routines are not built by default. # @(OBJ) from `lapack-3.4.1/lapacke/utils/Makefile` + LAPACKE_lsame, + LAPACKE_ilaver, + LAPACKE_xerbla, + lapack_make_complex_float, + lapack_make_complex_double, + LAPACKE_get_nancheck, + LAPACKE_set_nancheck, +); +@lapackeobjsc = ( LAPACKE_cgb_nancheck, LAPACKE_cgb_trans, LAPACKE_cge_nancheck, @@ -928,118 +950,6 @@ LAPACKE_ctp_trans, LAPACKE_ctr_nancheck, LAPACKE_ctr_trans, - LAPACKE_dgb_nancheck, - LAPACKE_dgb_trans, - LAPACKE_dge_nancheck, - LAPACKE_dge_trans, - LAPACKE_dgg_nancheck, - LAPACKE_dgg_trans, - LAPACKE_dgt_nancheck, - LAPACKE_dhs_nancheck, - LAPACKE_dhs_trans, - LAPACKE_d_nancheck, - LAPACKE_dpb_nancheck, - LAPACKE_dpb_trans, - LAPACKE_dpf_nancheck, - LAPACKE_dpf_trans, - LAPACKE_dpo_nancheck, - LAPACKE_dpo_trans, - LAPACKE_dpp_nancheck, - LAPACKE_dpp_trans, - LAPACKE_dpt_nancheck, - LAPACKE_dsb_nancheck, - LAPACKE_dsb_trans, - LAPACKE_dsp_nancheck, - LAPACKE_dsp_trans, - LAPACKE_dst_nancheck, - LAPACKE_dsy_nancheck, - LAPACKE_dsy_trans, - LAPACKE_dtb_nancheck, - LAPACKE_dtb_trans, - LAPACKE_dtf_nancheck, - LAPACKE_dtf_trans, - LAPACKE_dtp_nancheck, - LAPACKE_dtp_trans, - LAPACKE_dtr_nancheck, - LAPACKE_dtr_trans, - LAPACKE_lsame, - LAPACKE_sgb_nancheck, - LAPACKE_sgb_trans, - LAPACKE_sge_nancheck, - LAPACKE_sge_trans, - LAPACKE_sgg_nancheck, - LAPACKE_sgg_trans, - LAPACKE_sgt_nancheck, - LAPACKE_shs_nancheck, - LAPACKE_shs_trans, - LAPACKE_s_nancheck, - LAPACKE_spb_nancheck, - LAPACKE_spb_trans, - LAPACKE_spf_nancheck, - LAPACKE_spf_trans, - LAPACKE_spo_nancheck, - LAPACKE_spo_trans, - LAPACKE_spp_nancheck, - LAPACKE_spp_trans, - LAPACKE_spt_nancheck, - LAPACKE_ssb_nancheck, - LAPACKE_ssb_trans, - LAPACKE_ssp_nancheck, - LAPACKE_ssp_trans, - LAPACKE_sst_nancheck, - LAPACKE_ssy_nancheck, - LAPACKE_ssy_trans, - LAPACKE_stb_nancheck, - LAPACKE_stb_trans, - LAPACKE_stf_nancheck, - LAPACKE_stf_trans, - LAPACKE_stp_nancheck, - LAPACKE_stp_trans, - LAPACKE_str_nancheck, - LAPACKE_str_trans, - LAPACKE_xerbla, - LAPACKE_zgb_nancheck, - LAPACKE_zgb_trans, - LAPACKE_zge_nancheck, - LAPACKE_zge_trans, - LAPACKE_zgg_nancheck, - LAPACKE_zgg_trans, - LAPACKE_zgt_nancheck, - LAPACKE_zhb_nancheck, - LAPACKE_zhb_trans, - LAPACKE_zhe_nancheck, - LAPACKE_zhe_trans, - LAPACKE_zhp_nancheck, - LAPACKE_zhp_trans, - LAPACKE_zhs_nancheck, - LAPACKE_zhs_trans, - LAPACKE_z_nancheck, - LAPACKE_zpb_nancheck, - LAPACKE_zpb_trans, - LAPACKE_zpf_nancheck, - LAPACKE_zpf_trans, - LAPACKE_zpo_nancheck, - LAPACKE_zpo_trans, - LAPACKE_zpp_nancheck, - LAPACKE_zpp_trans, - LAPACKE_zpt_nancheck, - LAPACKE_zsp_nancheck, - LAPACKE_zsp_trans, - LAPACKE_zst_nancheck, - LAPACKE_zsy_nancheck, - LAPACKE_zsy_trans, - LAPACKE_ztb_nancheck, - LAPACKE_ztb_trans, - LAPACKE_ztf_nancheck, - LAPACKE_ztf_trans, - LAPACKE_ztp_nancheck, - LAPACKE_ztp_trans, - LAPACKE_ztr_nancheck, - LAPACKE_ztr_trans, - lapack_make_complex_float, - lapack_make_complex_double, - - # @(SRC_OBJ) from `lapack-3.5.0/lapacke/src/Makefile` LAPACKE_cbbcsd, LAPACKE_cbbcsd_work, LAPACKE_cbdsqr, @@ -1532,76 +1442,232 @@ LAPACKE_cupgtr_work, LAPACKE_cupmtr, LAPACKE_cupmtr_work, - LAPACKE_dbbcsd, - LAPACKE_dbbcsd_work, - LAPACKE_dbdsdc, - LAPACKE_dbdsdc_work, - LAPACKE_dbdsqr, - LAPACKE_dbdsqr_work, - LAPACKE_ddisna, - LAPACKE_ddisna_work, - LAPACKE_dgbbrd, - LAPACKE_dgbbrd_work, - LAPACKE_dgbcon, - LAPACKE_dgbcon_work, - LAPACKE_dgbequ, - LAPACKE_dgbequ_work, - LAPACKE_dgbequb, - LAPACKE_dgbequb_work, - LAPACKE_dgbrfs, - LAPACKE_dgbrfs_work, - LAPACKE_dgbsv, - LAPACKE_dgbsv_work, - LAPACKE_dgbsvx, - LAPACKE_dgbsvx_work, - LAPACKE_dgbtrf, - LAPACKE_dgbtrf_work, - LAPACKE_dgbtrs, - LAPACKE_dgbtrs_work, - LAPACKE_dgebak, - LAPACKE_dgebak_work, - LAPACKE_dgebal, - LAPACKE_dgebal_work, - LAPACKE_dgebrd, - LAPACKE_dgebrd_work, - LAPACKE_dgecon, - LAPACKE_dgecon_work, - LAPACKE_dgeequ, - LAPACKE_dgeequ_work, - LAPACKE_dgeequb, - LAPACKE_dgeequb_work, - LAPACKE_dgees, - LAPACKE_dgees_work, - LAPACKE_dgeesx, - LAPACKE_dgeesx_work, - LAPACKE_dgeev, - LAPACKE_dgeev_work, - LAPACKE_dgeevx, - LAPACKE_dgeevx_work, - LAPACKE_dgehrd, - LAPACKE_dgehrd_work, - LAPACKE_dgejsv, - LAPACKE_dgejsv_work, - LAPACKE_dgelq2, - LAPACKE_dgelq2_work, - LAPACKE_dgelqf, - LAPACKE_dgelqf_work, - LAPACKE_dgels, - LAPACKE_dgels_work, - LAPACKE_dgelsd, - LAPACKE_dgelsd_work, - LAPACKE_dgelss, - LAPACKE_dgelss_work, - LAPACKE_dgelsy, - LAPACKE_dgelsy_work, - LAPACKE_dgemqrt, - LAPACKE_dgemqrt_work, - LAPACKE_dgeqlf, - LAPACKE_dgeqlf_work, - LAPACKE_dgeqp3, - LAPACKE_dgeqp3_work, - LAPACKE_dgeqr2, - LAPACKE_dgeqr2_work, + LAPACKE_csyr, + LAPACKE_csyr_work, + LAPACKE_clatms, + LAPACKE_clatms_work, + LAPACKE_clagge, + LAPACKE_clagge_work, + LAPACKE_claghe, + LAPACKE_claghe_work, + LAPACKE_clagsy, + LAPACKE_clagsy_work, + LAPACKE_cgejsv, + LAPACKE_cgejsv_work, + LAPACKE_cgesvdx, + LAPACKE_cgesvdx_work, + LAPACKE_cgesvj, + LAPACKE_cgesvj_work, + LAPACKE_cgetrf2, + LAPACKE_cgetrf2_work, + LAPACKE_cgges3, + LAPACKE_cgges3_work, + LAPACKE_cggev3, + LAPACKE_cggev3_work, + LAPACKE_cgghd3, + LAPACKE_cgghd3_work, + LAPACKE_cggsvd3, + LAPACKE_cggsvd3_work, + LAPACKE_cggsvp3, + LAPACKE_cggsvp3_work, + LAPACKE_chetrf_rook, + LAPACKE_chetrf_rook_work, + LAPACKE_chetrs_rook, + LAPACKE_chetrs_rook_work, + LAPACKE_clapmt, + LAPACKE_clapmt_work, + LAPACKE_clascl, + LAPACKE_clascl_work, + LAPACKE_cpotrf2, + LAPACKE_cpotrf2_work, + LAPACKE_csytrf_rook, + LAPACKE_csytrf_rook_work, + LAPACKE_csytrs_rook, + LAPACKE_csytrs_rook_work, + LAPACKE_cuncsd2by1, + LAPACKE_cuncsd2by1_work, + LAPACKE_cgelq, + LAPACKE_cgelq_work, + LAPACKE_cgemlq, + LAPACKE_cgemlq_work, + LAPACKE_cgemqr, + LAPACKE_cgemqr_work, + LAPACKE_cgeqr, + LAPACKE_cgeqr_work, + LAPACKE_cgetsls, + LAPACKE_cgetsls_work, + LAPACKE_chbev_2stage, + LAPACKE_chbev_2stage_work, + LAPACKE_chbevd_2stage, + LAPACKE_chbevd_2stage_work, + LAPACKE_chbevx_2stage, + LAPACKE_chbevx_2stage_work, + LAPACKE_checon_3, + LAPACKE_checon_3_work, + LAPACKE_cheev_2stage, + LAPACKE_cheev_2stage_work, + LAPACKE_cheevd_2stage, + LAPACKE_cheevd_2stage_work, + LAPACKE_cheevr_2stage, + LAPACKE_cheevr_2stage_work, + LAPACKE_cheevx_2stage, + LAPACKE_cheevx_2stage_work, + LAPACKE_chegv_2stage, + LAPACKE_chegv_2stage_work, + LAPACKE_chesv_aa, + LAPACKE_chesv_aa_work, + LAPACKE_chesv_rk, + LAPACKE_chesv_rk_work, + LAPACKE_chetrf_aa, + LAPACKE_chetrf_aa_work, + LAPACKE_chetrf_rk, + LAPACKE_chetrf_rk_work, + LAPACKE_chetri_3, + LAPACKE_chetri_3_work, + LAPACKE_chetrs_aa, + LAPACKE_chetrs_aa_work, + LAPACKE_chetrs_3, + LAPACKE_chetrs_3_work, + LAPACKE_csycon_3, + LAPACKE_csycon_3_work, + LAPACKE_csysv_aa, + LAPACKE_csysv_aa_work, + LAPACKE_csysv_rk, + LAPACKE_csysv_rk_work, + LAPACKE_csytrf_aa, + LAPACKE_csytrf_aa_work, + LAPACKE_csytrf_rk, + LAPACKE_csytrf_rk_work, + LAPACKE_csytri_3, + LAPACKE_csytri_3_work, + LAPACKE_csytrs_aa, + LAPACKE_csytrs_aa_work, + LAPACKE_csytrs_3, + LAPACKE_csytrs_3_work, + LAPACKE_chesv_aa_2stage, + LAPACKE_chesv_aa_2stage_work, + LAPACKE_chetrf_aa_2stage, + LAPACKE_chetrf_aa_2stage_work, + LAPACKE_chetrs_aa_2stage, + LAPACKE_chetrs_aa_2stage_work, + LAPACKE_clacrm, + LAPACKE_clacrm_work, + LAPACKE_clarcm, + LAPACKE_clarcm_work, + LAPACKE_classq, + LAPACKE_classq_work, + LAPACKE_csysv_aa_2stage, + LAPACKE_csysv_aa_2stage_work, + LAPACKE_csytrf_aa_2stage, + LAPACKE_csytrf_aa_2stage_work, + LAPACKE_csytrs_aa_2stage, + LAPACKE_csytrs_aa_2stage_work, +); +@lapackeobjsd = ( + LAPACKE_dgb_nancheck, + LAPACKE_dgb_trans, + LAPACKE_dge_nancheck, + LAPACKE_dge_trans, + LAPACKE_dgg_nancheck, + LAPACKE_dgg_trans, + LAPACKE_dgt_nancheck, + LAPACKE_dhs_nancheck, + LAPACKE_dhs_trans, + LAPACKE_d_nancheck, + LAPACKE_dpb_nancheck, + LAPACKE_dpb_trans, + LAPACKE_dpf_nancheck, + LAPACKE_dpf_trans, + LAPACKE_dpo_nancheck, + LAPACKE_dpo_trans, + LAPACKE_dpp_nancheck, + LAPACKE_dpp_trans, + LAPACKE_dpt_nancheck, + LAPACKE_dsb_nancheck, + LAPACKE_dsb_trans, + LAPACKE_dsp_nancheck, + LAPACKE_dsp_trans, + LAPACKE_dst_nancheck, + LAPACKE_dsy_nancheck, + LAPACKE_dsy_trans, + LAPACKE_dtb_nancheck, + LAPACKE_dtb_trans, + LAPACKE_dtf_nancheck, + LAPACKE_dtf_trans, + LAPACKE_dtp_nancheck, + LAPACKE_dtp_trans, + LAPACKE_dtr_nancheck, + LAPACKE_dtr_trans, + LAPACKE_dbbcsd, + LAPACKE_dbbcsd_work, + LAPACKE_dbdsdc, + LAPACKE_dbdsdc_work, + LAPACKE_dbdsqr, + LAPACKE_dbdsqr_work, + LAPACKE_ddisna, + LAPACKE_ddisna_work, + LAPACKE_dgbbrd, + LAPACKE_dgbbrd_work, + LAPACKE_dgbcon, + LAPACKE_dgbcon_work, + LAPACKE_dgbequ, + LAPACKE_dgbequ_work, + LAPACKE_dgbequb, + LAPACKE_dgbequb_work, + LAPACKE_dgbrfs, + LAPACKE_dgbrfs_work, + LAPACKE_dgbsv, + LAPACKE_dgbsv_work, + LAPACKE_dgbsvx, + LAPACKE_dgbsvx_work, + LAPACKE_dgbtrf, + LAPACKE_dgbtrf_work, + LAPACKE_dgbtrs, + LAPACKE_dgbtrs_work, + LAPACKE_dgebak, + LAPACKE_dgebak_work, + LAPACKE_dgebal, + LAPACKE_dgebal_work, + LAPACKE_dgebrd, + LAPACKE_dgebrd_work, + LAPACKE_dgecon, + LAPACKE_dgecon_work, + LAPACKE_dgeequ, + LAPACKE_dgeequ_work, + LAPACKE_dgeequb, + LAPACKE_dgeequb_work, + LAPACKE_dgees, + LAPACKE_dgees_work, + LAPACKE_dgeesx, + LAPACKE_dgeesx_work, + LAPACKE_dgeev, + LAPACKE_dgeev_work, + LAPACKE_dgeevx, + LAPACKE_dgeevx_work, + LAPACKE_dgehrd, + LAPACKE_dgehrd_work, + LAPACKE_dgejsv, + LAPACKE_dgejsv_work, + LAPACKE_dgelq2, + LAPACKE_dgelq2_work, + LAPACKE_dgelqf, + LAPACKE_dgelqf_work, + LAPACKE_dgels, + LAPACKE_dgels_work, + LAPACKE_dgelsd, + LAPACKE_dgelsd_work, + LAPACKE_dgelss, + LAPACKE_dgelss_work, + LAPACKE_dgelsy, + LAPACKE_dgelsy_work, + LAPACKE_dgemqrt, + LAPACKE_dgemqrt_work, + LAPACKE_dgeqlf, + LAPACKE_dgeqlf_work, + LAPACKE_dgeqp3, + LAPACKE_dgeqp3_work, + LAPACKE_dgeqr2, + LAPACKE_dgeqr2_work, LAPACKE_dgeqrf, LAPACKE_dgeqrf_work, LAPACKE_dgeqrfp, @@ -2016,37 +2082,161 @@ LAPACKE_dtrttp_work, LAPACKE_dtzrzf, LAPACKE_dtzrzf_work, - LAPACKE_sbbcsd, - LAPACKE_sbbcsd_work, - LAPACKE_sbdsdc, - LAPACKE_sbdsdc_work, - LAPACKE_sbdsqr, - LAPACKE_sbdsqr_work, - LAPACKE_sdisna, - LAPACKE_sdisna_work, - LAPACKE_sgbbrd, - LAPACKE_sgbbrd_work, - LAPACKE_sgbcon, - LAPACKE_sgbcon_work, - LAPACKE_sgbequ, - LAPACKE_sgbequ_work, - LAPACKE_sgbequb, - LAPACKE_sgbequb_work, - LAPACKE_sgbrfs, - LAPACKE_sgbrfs_work, - LAPACKE_sgbsv, - LAPACKE_sgbsv_work, - LAPACKE_sgbsvx, - LAPACKE_sgbsvx_work, - LAPACKE_sgbtrf, - LAPACKE_sgbtrf_work, - LAPACKE_sgbtrs, - LAPACKE_sgbtrs_work, - LAPACKE_sgebak, - LAPACKE_sgebak_work, - LAPACKE_sgebal, - LAPACKE_sgebal_work, - LAPACKE_sgebrd, + LAPACKE_dlatms, + LAPACKE_dlatms_work, + LAPACKE_dlagge, + LAPACKE_dlagge_work, + LAPACKE_dlagsy, + LAPACKE_dlagsy_work, + LAPACKE_dbdsvdx, + LAPACKE_dbdsvdx_work, + LAPACKE_dgesvdx, + LAPACKE_dgesvdx_work, + LAPACKE_dgetrf2, + LAPACKE_dgetrf2_work, + LAPACKE_dgges3, + LAPACKE_dgges3_work, + LAPACKE_dggev3, + LAPACKE_dggev3_work, + LAPACKE_dgghd3, + LAPACKE_dgghd3_work, + LAPACKE_dggsvd3, + LAPACKE_dggsvd3_work, + LAPACKE_dggsvp3, + LAPACKE_dggsvp3_work, + LAPACKE_dlapmt, + LAPACKE_dlapmt_work, + LAPACKE_dlascl, + LAPACKE_dlascl_work, + LAPACKE_dorcsd2by1, + LAPACKE_dorcsd2by1_work, + LAPACKE_dpotrf2, + LAPACKE_dpotrf2_work, + LAPACKE_dsytrf_rook, + LAPACKE_dsytrf_rook_work, + LAPACKE_dsytrs_rook, + LAPACKE_dsytrs_rook_work, + LAPACKE_dgelq, + LAPACKE_dgelq_work, + LAPACKE_dgemlq, + LAPACKE_dgemlq_work, + LAPACKE_dgemqr, + LAPACKE_dgemqr_work, + LAPACKE_dgeqr, + LAPACKE_dgeqr_work, + LAPACKE_dgetsls, + LAPACKE_dgetsls_work, + LAPACKE_dsbev_2stage, + LAPACKE_dsbev_2stage_work, + LAPACKE_dsbevd_2stage, + LAPACKE_dsbevd_2stage_work, + LAPACKE_dsbevx_2stage, + LAPACKE_dsbevx_2stage_work, + LAPACKE_dsycon_3, + LAPACKE_dsycon_3_work, + LAPACKE_dsyev_2stage, + LAPACKE_dsyev_2stage_work, + LAPACKE_dsyevd_2stage, + LAPACKE_dsyevd_2stage_work, + LAPACKE_dsyevr_2stage, + LAPACKE_dsyevr_2stage_work, + LAPACKE_dsyevx_2stage, + LAPACKE_dsyevx_2stage_work, + LAPACKE_dsygv_2stage, + LAPACKE_dsygv_2stage_work, + LAPACKE_dsysv_aa, + LAPACKE_dsysv_aa_work, + LAPACKE_dsysv_rk, + LAPACKE_dsysv_rk_work, + LAPACKE_dsytrf_aa, + LAPACKE_dsytrf_aa_work, + LAPACKE_dsytrf_rk, + LAPACKE_dsytrf_rk_work, + LAPACKE_dsytri_3, + LAPACKE_dsytri_3_work, + LAPACKE_dsytrs_aa, + LAPACKE_dsytrs_aa_work, + LAPACKE_dsytrs_3, + LAPACKE_dsytrs_3_work, + LAPACKE_dlassq, + LAPACKE_dlassq_work, + LAPACKE_dsysv_aa_2stage, + LAPACKE_dsysv_aa_2stage_work, + LAPACKE_dsytrf_aa_2stage, + LAPACKE_dsytrf_aa_2stage_work, + LAPACKE_dsytrs_aa_2stage, + LAPACKE_dsytrs_aa_2stage_work, + LAPACKE_dgesvdq, + LAPACKE_dgesvdq_work, + LAPACKE_slag2d, + LAPACKE_slag2d_work, +); +@lapackeobjss = ( + LAPACKE_sgb_nancheck, + LAPACKE_sgb_trans, + LAPACKE_sge_nancheck, + LAPACKE_sge_trans, + LAPACKE_sgg_nancheck, + LAPACKE_sgg_trans, + LAPACKE_sgt_nancheck, + LAPACKE_shs_nancheck, + LAPACKE_shs_trans, + LAPACKE_s_nancheck, + LAPACKE_spb_nancheck, + LAPACKE_spb_trans, + LAPACKE_spf_nancheck, + LAPACKE_spf_trans, + LAPACKE_spo_nancheck, + LAPACKE_spo_trans, + LAPACKE_spp_nancheck, + LAPACKE_spp_trans, + LAPACKE_spt_nancheck, + LAPACKE_ssb_nancheck, + LAPACKE_ssb_trans, + LAPACKE_ssp_nancheck, + LAPACKE_ssp_trans, + LAPACKE_sst_nancheck, + LAPACKE_ssy_nancheck, + LAPACKE_ssy_trans, + LAPACKE_stb_nancheck, + LAPACKE_stb_trans, + LAPACKE_stf_nancheck, + LAPACKE_stf_trans, + LAPACKE_stp_nancheck, + LAPACKE_stp_trans, + LAPACKE_str_nancheck, + LAPACKE_str_trans, + LAPACKE_sbbcsd, + LAPACKE_sbbcsd_work, + LAPACKE_sbdsdc, + LAPACKE_sbdsdc_work, + LAPACKE_sbdsqr, + LAPACKE_sbdsqr_work, + LAPACKE_sdisna, + LAPACKE_sdisna_work, + LAPACKE_sgbbrd, + LAPACKE_sgbbrd_work, + LAPACKE_sgbcon, + LAPACKE_sgbcon_work, + LAPACKE_sgbequ, + LAPACKE_sgbequ_work, + LAPACKE_sgbequb, + LAPACKE_sgbequb_work, + LAPACKE_sgbrfs, + LAPACKE_sgbrfs_work, + LAPACKE_sgbsv, + LAPACKE_sgbsv_work, + LAPACKE_sgbsvx, + LAPACKE_sgbsvx_work, + LAPACKE_sgbtrf, + LAPACKE_sgbtrf_work, + LAPACKE_sgbtrs, + LAPACKE_sgbtrs_work, + LAPACKE_sgebak, + LAPACKE_sgebak_work, + LAPACKE_sgebal, + LAPACKE_sgebal_work, + LAPACKE_sgebrd, LAPACKE_sgebrd_work, LAPACKE_sgecon, LAPACKE_sgecon_work, @@ -2162,8 +2352,6 @@ LAPACKE_slacn2_work, LAPACKE_slacpy, LAPACKE_slacpy_work, - LAPACKE_slag2d, - LAPACKE_slag2d_work, LAPACKE_slamch, LAPACKE_slamch_work, LAPACKE_slange, @@ -2494,108 +2682,236 @@ LAPACKE_strttp_work, LAPACKE_stzrzf, LAPACKE_stzrzf_work, - LAPACKE_zbbcsd, - LAPACKE_zbbcsd_work, - LAPACKE_zbdsqr, - LAPACKE_zbdsqr_work, - LAPACKE_zcgesv, - LAPACKE_zcgesv_work, - LAPACKE_zcposv, - LAPACKE_zcposv_work, - LAPACKE_zgbbrd, - LAPACKE_zgbbrd_work, - LAPACKE_zgbcon, - LAPACKE_zgbcon_work, - LAPACKE_zgbequ, - LAPACKE_zgbequ_work, - LAPACKE_zgbequb, - LAPACKE_zgbequb_work, - LAPACKE_zgbrfs, - LAPACKE_zgbrfs_work, - LAPACKE_zgbsv, - LAPACKE_zgbsv_work, - LAPACKE_zgbsvx, - LAPACKE_zgbsvx_work, - LAPACKE_zgbtrf, - LAPACKE_zgbtrf_work, - LAPACKE_zgbtrs, - LAPACKE_zgbtrs_work, - LAPACKE_zgebak, - LAPACKE_zgebak_work, - LAPACKE_zgebal, - LAPACKE_zgebal_work, - LAPACKE_zgebrd, - LAPACKE_zgebrd_work, - LAPACKE_zgecon, - LAPACKE_zgecon_work, - LAPACKE_zgeequ, - LAPACKE_zgeequ_work, - LAPACKE_zgeequb, - LAPACKE_zgeequb_work, - LAPACKE_zgees, - LAPACKE_zgees_work, - LAPACKE_zgeesx, - LAPACKE_zgeesx_work, - LAPACKE_zgeev, - LAPACKE_zgeev_work, - LAPACKE_zgeevx, - LAPACKE_zgeevx_work, - LAPACKE_zgehrd, - LAPACKE_zgehrd_work, - LAPACKE_zgelq2, - LAPACKE_zgelq2_work, - LAPACKE_zgelqf, - LAPACKE_zgelqf_work, - LAPACKE_zgels, - LAPACKE_zgels_work, - LAPACKE_zgelsd, - LAPACKE_zgelsd_work, - LAPACKE_zgelss, - LAPACKE_zgelss_work, - LAPACKE_zgelsy, - LAPACKE_zgelsy_work, - LAPACKE_zgemqrt, - LAPACKE_zgemqrt_work, - LAPACKE_zgeqlf, - LAPACKE_zgeqlf_work, - LAPACKE_zgeqp3, - LAPACKE_zgeqp3_work, - LAPACKE_zgeqr2, - LAPACKE_zgeqr2_work, - LAPACKE_zgeqrf, - LAPACKE_zgeqrf_work, - LAPACKE_zgeqrfp, - LAPACKE_zgeqrfp_work, - LAPACKE_zgeqrt, - LAPACKE_zgeqrt2, - LAPACKE_zgeqrt2_work, - LAPACKE_zgeqrt3, - LAPACKE_zgeqrt3_work, - LAPACKE_zgeqrt_work, - LAPACKE_zgerfs, - LAPACKE_zgerfs_work, - LAPACKE_zgerqf, - LAPACKE_zgerqf_work, - LAPACKE_zgesdd, - LAPACKE_zgesdd_work, - LAPACKE_zgesv, - LAPACKE_zgesv_work, - LAPACKE_zgesvd, - LAPACKE_zgesvd_work, - LAPACKE_zgesvx, - LAPACKE_zgesvx_work, - LAPACKE_zgetf2, - LAPACKE_zgetf2_work, - LAPACKE_zgetrf, - LAPACKE_zgetrf_work, - LAPACKE_zgetri, - LAPACKE_zgetri_work, - LAPACKE_zgetrs, - LAPACKE_zgetrs_work, - LAPACKE_zggbak, - LAPACKE_zggbak_work, - LAPACKE_zggbal, - LAPACKE_zggbal_work, + LAPACKE_slatms, + LAPACKE_slatms_work, + LAPACKE_slagge, + LAPACKE_slagge_work, + LAPACKE_slagsy, + LAPACKE_slagsy_work, + LAPACKE_sbdsvdx, + LAPACKE_sbdsvdx_work, + LAPACKE_sgesvdx, + LAPACKE_sgesvdx_work, + LAPACKE_sgetrf2, + LAPACKE_sgetrf2_work, + LAPACKE_sgges3, + LAPACKE_sgges3_work, + LAPACKE_sggev3, + LAPACKE_sggev3_work, + LAPACKE_sgghd3, + LAPACKE_sgghd3_work, + LAPACKE_sggsvd3, + LAPACKE_sggsvd3_work, + LAPACKE_sggsvp3, + LAPACKE_sggsvp3_work, + LAPACKE_slapmt, + LAPACKE_slapmt_work, + LAPACKE_slascl, + LAPACKE_slascl_work, + LAPACKE_sorcsd2by1, + LAPACKE_sorcsd2by1_work, + LAPACKE_spotrf2, + LAPACKE_spotrf2_work, + LAPACKE_ssytrf_rook, + LAPACKE_ssytrf_rook_work, + LAPACKE_ssytrs_rook, + LAPACKE_ssytrs_rook_work, + LAPACKE_stpqrt, + LAPACKE_stpqrt_work, + LAPACKE_sgelq, + LAPACKE_sgelq_work, + LAPACKE_sgemlq, + LAPACKE_sgemlq_work, + LAPACKE_sgemqr, + LAPACKE_sgemqr_work, + LAPACKE_sgeqr, + LAPACKE_sgeqr_work, + LAPACKE_sgetsls, + LAPACKE_sgetsls_work, + LAPACKE_ssbev_2stage, + LAPACKE_ssbev_2stage_work, + LAPACKE_ssbevd_2stage, + LAPACKE_ssbevd_2stage_work, + LAPACKE_ssbevx_2stage, + LAPACKE_ssbevx_2stage_work, + LAPACKE_ssycon_3, + LAPACKE_ssycon_3_work, + LAPACKE_ssyev_2stage, + LAPACKE_ssyev_2stage_work, + LAPACKE_ssyevd_2stage, + LAPACKE_ssyevd_2stage_work, + LAPACKE_ssyevr_2stage, + LAPACKE_ssyevr_2stage_work, + LAPACKE_ssyevx_2stage, + LAPACKE_ssyevx_2stage_work, + LAPACKE_ssygv_2stage, + LAPACKE_ssygv_2stage_work, + LAPACKE_ssysv_aa, + LAPACKE_ssysv_aa_work, + LAPACKE_ssysv_rk, + LAPACKE_ssysv_rk_work, + LAPACKE_ssytrf_aa, + LAPACKE_ssytrf_aa_work, + LAPACKE_ssytrf_rk, + LAPACKE_ssytrf_rk_work, + LAPACKE_ssytri_3, + LAPACKE_ssytri_3_work, + LAPACKE_ssytrs_aa, + LAPACKE_ssytrs_aa_work, + LAPACKE_ssytrs_3, + LAPACKE_ssytrs_3_work, + LAPACKE_slassq, + LAPACKE_slassq_work, + LAPACKE_ssysv_aa_2stage, + LAPACKE_ssysv_aa_2stage_work, + LAPACKE_ssytrf_aa_2stage, + LAPACKE_ssytrf_aa_2stage_work, + LAPACKE_ssytrs_aa_2stage, + LAPACKE_ssytrs_aa_2stage_work, + LAPACKE_sgesvdq, + LAPACKE_sgesvdq_work, +); +@lapackeobjsz = ( + LAPACKE_zgb_nancheck, + LAPACKE_zgb_trans, + LAPACKE_zge_nancheck, + LAPACKE_zge_trans, + LAPACKE_zgg_nancheck, + LAPACKE_zgg_trans, + LAPACKE_zgt_nancheck, + LAPACKE_zhb_nancheck, + LAPACKE_zhb_trans, + LAPACKE_zhe_nancheck, + LAPACKE_zhe_trans, + LAPACKE_zhp_nancheck, + LAPACKE_zhp_trans, + LAPACKE_zhs_nancheck, + LAPACKE_zhs_trans, + LAPACKE_z_nancheck, + LAPACKE_zpb_nancheck, + LAPACKE_zpb_trans, + LAPACKE_zpf_nancheck, + LAPACKE_zpf_trans, + LAPACKE_zpo_nancheck, + LAPACKE_zpo_trans, + LAPACKE_zpp_nancheck, + LAPACKE_zpp_trans, + LAPACKE_zpt_nancheck, + LAPACKE_zsp_nancheck, + LAPACKE_zsp_trans, + LAPACKE_zst_nancheck, + LAPACKE_zsy_nancheck, + LAPACKE_zsy_trans, + LAPACKE_ztb_nancheck, + LAPACKE_ztb_trans, + LAPACKE_ztf_nancheck, + LAPACKE_ztf_trans, + LAPACKE_ztp_nancheck, + LAPACKE_ztp_trans, + LAPACKE_ztr_nancheck, + LAPACKE_ztr_trans, + LAPACKE_zbbcsd, + LAPACKE_zbbcsd_work, + LAPACKE_zbdsqr, + LAPACKE_zbdsqr_work, + LAPACKE_zcgesv, + LAPACKE_zcgesv_work, + LAPACKE_zcposv, + LAPACKE_zcposv_work, + LAPACKE_zgbbrd, + LAPACKE_zgbbrd_work, + LAPACKE_zgbcon, + LAPACKE_zgbcon_work, + LAPACKE_zgbequ, + LAPACKE_zgbequ_work, + LAPACKE_zgbequb, + LAPACKE_zgbequb_work, + LAPACKE_zgbrfs, + LAPACKE_zgbrfs_work, + LAPACKE_zgbsv, + LAPACKE_zgbsv_work, + LAPACKE_zgbsvx, + LAPACKE_zgbsvx_work, + LAPACKE_zgbtrf, + LAPACKE_zgbtrf_work, + LAPACKE_zgbtrs, + LAPACKE_zgbtrs_work, + LAPACKE_zgebak, + LAPACKE_zgebak_work, + LAPACKE_zgebal, + LAPACKE_zgebal_work, + LAPACKE_zgebrd, + LAPACKE_zgebrd_work, + LAPACKE_zgecon, + LAPACKE_zgecon_work, + LAPACKE_zgeequ, + LAPACKE_zgeequ_work, + LAPACKE_zgeequb, + LAPACKE_zgeequb_work, + LAPACKE_zgees, + LAPACKE_zgees_work, + LAPACKE_zgeesx, + LAPACKE_zgeesx_work, + LAPACKE_zgeev, + LAPACKE_zgeev_work, + LAPACKE_zgeevx, + LAPACKE_zgeevx_work, + LAPACKE_zgehrd, + LAPACKE_zgehrd_work, + LAPACKE_zgelq2, + LAPACKE_zgelq2_work, + LAPACKE_zgelqf, + LAPACKE_zgelqf_work, + LAPACKE_zgels, + LAPACKE_zgels_work, + LAPACKE_zgelsd, + LAPACKE_zgelsd_work, + LAPACKE_zgelss, + LAPACKE_zgelss_work, + LAPACKE_zgelsy, + LAPACKE_zgelsy_work, + LAPACKE_zgemqrt, + LAPACKE_zgemqrt_work, + LAPACKE_zgeqlf, + LAPACKE_zgeqlf_work, + LAPACKE_zgeqp3, + LAPACKE_zgeqp3_work, + LAPACKE_zgeqr2, + LAPACKE_zgeqr2_work, + LAPACKE_zgeqrf, + LAPACKE_zgeqrf_work, + LAPACKE_zgeqrfp, + LAPACKE_zgeqrfp_work, + LAPACKE_zgeqrt, + LAPACKE_zgeqrt2, + LAPACKE_zgeqrt2_work, + LAPACKE_zgeqrt3, + LAPACKE_zgeqrt3_work, + LAPACKE_zgeqrt_work, + LAPACKE_zgerfs, + LAPACKE_zgerfs_work, + LAPACKE_zgerqf, + LAPACKE_zgerqf_work, + LAPACKE_zgesdd, + LAPACKE_zgesdd_work, + LAPACKE_zgesv, + LAPACKE_zgesv_work, + LAPACKE_zgesvd, + LAPACKE_zgesvd_work, + LAPACKE_zgesvx, + LAPACKE_zgesvx_work, + LAPACKE_zgetf2, + LAPACKE_zgetf2_work, + LAPACKE_zgetrf, + LAPACKE_zgetrf_work, + LAPACKE_zgetri, + LAPACKE_zgetri_work, + LAPACKE_zgetrs, + LAPACKE_zgetrs_work, + LAPACKE_zggbak, + LAPACKE_zggbak_work, + LAPACKE_zggbal, + LAPACKE_zggbal_work, LAPACKE_zgges, LAPACKE_zgges_work, LAPACKE_zggesx, @@ -2991,11 +3307,7 @@ LAPACKE_zupmtr, LAPACKE_zupmtr_work, LAPACKE_zsyr, - LAPACKE_csyr, LAPACKE_zsyr_work, - LAPACKE_csyr_work, - LAPACKE_ilaver, - ## @(SRCX_OBJ) from `lapack-3.4.1/lapacke/src/Makefile` ## Not exported: requires LAPACKE_EXTENDED to be set and depends on the ## corresponding LAPACK extended precision routines. @@ -3075,128 +3387,15 @@ ## @(MATGEN_OBJ) from `lapack-3.4.1/lapacke/src/Makefile` ## Not exported: requires LAPACKE_TESTING to be set and depends on libtmg ## (see `lapack-3.4.1/TESTING/MATGEN`). - LAPACKE_clatms, - LAPACKE_clatms_work, - LAPACKE_dlatms, - LAPACKE_dlatms_work, - LAPACKE_slatms, - LAPACKE_slatms_work, LAPACKE_zlatms, LAPACKE_zlatms_work, - LAPACKE_clagge, - LAPACKE_clagge_work, - LAPACKE_dlagge, - LAPACKE_dlagge_work, - LAPACKE_slagge, - LAPACKE_slagge_work, LAPACKE_zlagge, LAPACKE_zlagge_work, - LAPACKE_claghe, - LAPACKE_claghe_work, LAPACKE_zlaghe, LAPACKE_zlaghe_work, - LAPACKE_clagsy, - LAPACKE_clagsy_work, - LAPACKE_dlagsy, - LAPACKE_dlagsy_work, - LAPACKE_slagsy, - LAPACKE_slagsy_work, LAPACKE_zlagsy, LAPACKE_zlagsy_work, ## new function from lapack-3.6.0 - - LAPACKE_cgejsv, - LAPACKE_cgejsv_work, - LAPACKE_cgesvdx, - LAPACKE_cgesvdx_work, - LAPACKE_cgesvj, - LAPACKE_cgesvj_work, - LAPACKE_cgetrf2, - LAPACKE_cgetrf2_work, - LAPACKE_cgges3, - LAPACKE_cgges3_work, - LAPACKE_cggev3, - LAPACKE_cggev3_work, - LAPACKE_cgghd3, - LAPACKE_cgghd3_work, - LAPACKE_cggsvd3, - LAPACKE_cggsvd3_work, - LAPACKE_cggsvp3, - LAPACKE_cggsvp3_work, - LAPACKE_chetrf_rook, - LAPACKE_chetrf_rook_work, - LAPACKE_chetrs_rook, - LAPACKE_chetrs_rook_work, - LAPACKE_clapmt, - LAPACKE_clapmt_work, - LAPACKE_clascl, - LAPACKE_clascl_work, - LAPACKE_cpotrf2, - LAPACKE_cpotrf2_work, - LAPACKE_csytrf_rook, - LAPACKE_csytrf_rook_work, - LAPACKE_csytrs_rook, - LAPACKE_csytrs_rook_work, - LAPACKE_cuncsd2by1, - LAPACKE_cuncsd2by1_work, - LAPACKE_dbdsvdx, - LAPACKE_dbdsvdx_work, - LAPACKE_dgesvdx, - LAPACKE_dgesvdx_work, - LAPACKE_dgetrf2, - LAPACKE_dgetrf2_work, - LAPACKE_dgges3, - LAPACKE_dgges3_work, - LAPACKE_dggev3, - LAPACKE_dggev3_work, - LAPACKE_dgghd3, - LAPACKE_dgghd3_work, - LAPACKE_dggsvd3, - LAPACKE_dggsvd3_work, - LAPACKE_dggsvp3, - LAPACKE_dggsvp3_work, - LAPACKE_dlapmt, - LAPACKE_dlapmt_work, - LAPACKE_dlascl, - LAPACKE_dlascl_work, - LAPACKE_dorcsd2by1, - LAPACKE_dorcsd2by1_work, - LAPACKE_dpotrf2, - LAPACKE_dpotrf2_work, - LAPACKE_dsytrf_rook, - LAPACKE_dsytrf_rook_work, - LAPACKE_dsytrs_rook, - LAPACKE_dsytrs_rook_work, - LAPACKE_sbdsvdx, - LAPACKE_sbdsvdx_work, - LAPACKE_sgesvdx, - LAPACKE_sgesvdx_work, - LAPACKE_sgetrf2, - LAPACKE_sgetrf2_work, - LAPACKE_sgges3, - LAPACKE_sgges3_work, - LAPACKE_sggev3, - LAPACKE_sggev3_work, - LAPACKE_sgghd3, - LAPACKE_sgghd3_work, - LAPACKE_sggsvd3, - LAPACKE_sggsvd3_work, - LAPACKE_sggsvp3, - LAPACKE_sggsvp3_work, - LAPACKE_slapmt, - LAPACKE_slapmt_work, - LAPACKE_slascl, - LAPACKE_slascl_work, - LAPACKE_sorcsd2by1, - LAPACKE_sorcsd2by1_work, - LAPACKE_spotrf2, - LAPACKE_spotrf2_work, - LAPACKE_ssytrf_rook, - LAPACKE_ssytrf_rook_work, - LAPACKE_ssytrs_rook, - LAPACKE_ssytrs_rook_work, - LAPACKE_stpqrt, - LAPACKE_stpqrt_work, LAPACKE_zgejsv, LAPACKE_zgejsv_work, LAPACKE_zgesvdx, @@ -3233,148 +3432,6 @@ LAPACKE_zuncsd2by1_work, ## new function from lapack-3.7.0 - LAPACKE_cgelq, - LAPACKE_cgelq_work, - LAPACKE_cgemlq, - LAPACKE_cgemlq_work, - LAPACKE_cgemqr, - LAPACKE_cgemqr_work, - LAPACKE_cgeqr, - LAPACKE_cgeqr_work, - LAPACKE_cgetsls, - LAPACKE_cgetsls_work, - LAPACKE_chbev_2stage, - LAPACKE_chbev_2stage_work, - LAPACKE_chbevd_2stage, - LAPACKE_chbevd_2stage_work, - LAPACKE_chbevx_2stage, - LAPACKE_chbevx_2stage_work, - LAPACKE_checon_3, - LAPACKE_checon_3_work, - LAPACKE_cheev_2stage, - LAPACKE_cheev_2stage_work, - LAPACKE_cheevd_2stage, - LAPACKE_cheevd_2stage_work, - LAPACKE_cheevr_2stage, - LAPACKE_cheevr_2stage_work, - LAPACKE_cheevx_2stage, - LAPACKE_cheevx_2stage_work, - LAPACKE_chegv_2stage, - LAPACKE_chegv_2stage_work, - LAPACKE_chesv_aa, - LAPACKE_chesv_aa_work, - LAPACKE_chesv_rk, - LAPACKE_chesv_rk_work, - LAPACKE_chetrf_aa, - LAPACKE_chetrf_aa_work, - LAPACKE_chetrf_rk, - LAPACKE_chetrf_rk_work, - LAPACKE_chetri_3, - LAPACKE_chetri_3_work, - LAPACKE_chetrs_aa, - LAPACKE_chetrs_aa_work, - LAPACKE_chetrs_3, - LAPACKE_chetrs_3_work, - LAPACKE_csycon_3, - LAPACKE_csycon_3_work, - LAPACKE_csysv_aa, - LAPACKE_csysv_aa_work, - LAPACKE_csysv_rk, - LAPACKE_csysv_rk_work, - LAPACKE_csytrf_aa, - LAPACKE_csytrf_aa_work, - LAPACKE_csytrf_rk, - LAPACKE_csytrf_rk_work, - LAPACKE_csytri_3, - LAPACKE_csytri_3_work, - LAPACKE_csytrs_aa, - LAPACKE_csytrs_aa_work, - LAPACKE_csytrs_3, - LAPACKE_csytrs_3_work, - LAPACKE_dgelq, - LAPACKE_dgelq_work, - LAPACKE_dgemlq, - LAPACKE_dgemlq_work, - LAPACKE_dgemqr, - LAPACKE_dgemqr_work, - LAPACKE_dgeqr, - LAPACKE_dgeqr_work, - LAPACKE_dgetsls, - LAPACKE_dgetsls_work, - LAPACKE_dsbev_2stage, - LAPACKE_dsbev_2stage_work, - LAPACKE_dsbevd_2stage, - LAPACKE_dsbevd_2stage_work, - LAPACKE_dsbevx_2stage, - LAPACKE_dsbevx_2stage_work, - LAPACKE_dsycon_3, - LAPACKE_dsycon_3_work, - LAPACKE_dsyev_2stage, - LAPACKE_dsyev_2stage_work, - LAPACKE_dsyevd_2stage, - LAPACKE_dsyevd_2stage_work, - LAPACKE_dsyevr_2stage, - LAPACKE_dsyevr_2stage_work, - LAPACKE_dsyevx_2stage, - LAPACKE_dsyevx_2stage_work, - LAPACKE_dsygv_2stage, - LAPACKE_dsygv_2stage_work, - LAPACKE_dsysv_aa, - LAPACKE_dsysv_aa_work, - LAPACKE_dsysv_rk, - LAPACKE_dsysv_rk_work, - LAPACKE_dsytrf_aa, - LAPACKE_dsytrf_aa_work, - LAPACKE_dsytrf_rk, - LAPACKE_dsytrf_rk_work, - LAPACKE_dsytri_3, - LAPACKE_dsytri_3_work, - LAPACKE_dsytrs_aa, - LAPACKE_dsytrs_aa_work, - LAPACKE_dsytrs_3, - LAPACKE_dsytrs_3_work, - LAPACKE_sgelq, - LAPACKE_sgelq_work, - LAPACKE_sgemlq, - LAPACKE_sgemlq_work, - LAPACKE_sgemqr, - LAPACKE_sgemqr_work, - LAPACKE_sgeqr, - LAPACKE_sgeqr_work, - LAPACKE_sgetsls, - LAPACKE_sgetsls_work, - LAPACKE_ssbev_2stage, - LAPACKE_ssbev_2stage_work, - LAPACKE_ssbevd_2stage, - LAPACKE_ssbevd_2stage_work, - LAPACKE_ssbevx_2stage, - LAPACKE_ssbevx_2stage_work, - LAPACKE_ssycon_3, - LAPACKE_ssycon_3_work, - LAPACKE_ssyev_2stage, - LAPACKE_ssyev_2stage_work, - LAPACKE_ssyevd_2stage, - LAPACKE_ssyevd_2stage_work, - LAPACKE_ssyevr_2stage, - LAPACKE_ssyevr_2stage_work, - LAPACKE_ssyevx_2stage, - LAPACKE_ssyevx_2stage_work, - LAPACKE_ssygv_2stage, - LAPACKE_ssygv_2stage_work, - LAPACKE_ssysv_aa, - LAPACKE_ssysv_aa_work, - LAPACKE_ssysv_rk, - LAPACKE_ssysv_rk_work, - LAPACKE_ssytrf_aa, - LAPACKE_ssytrf_aa_work, - LAPACKE_ssytrf_rk, - LAPACKE_ssytrf_rk_work, - LAPACKE_ssytri_3, - LAPACKE_ssytri_3_work, - LAPACKE_ssytrs_aa, - LAPACKE_ssytrs_aa_work, - LAPACKE_ssytrs_3, - LAPACKE_ssytrs_3_work, LAPACKE_zgelq, LAPACKE_zgelq_work, LAPACKE_zgemlq, @@ -3435,42 +3492,6 @@ LAPACKE_zsytrs_3_work, ## new function from lapack-3.8.0 - LAPACKE_chesv_aa_2stage, - LAPACKE_chesv_aa_2stage_work, - LAPACKE_chetrf_aa_2stage, - LAPACKE_chetrf_aa_2stage_work, - LAPACKE_chetrs_aa_2stage, - LAPACKE_chetrs_aa_2stage_work, - LAPACKE_clacrm, - LAPACKE_clacrm_work, - LAPACKE_clarcm, - LAPACKE_clarcm_work, - LAPACKE_classq, - LAPACKE_classq_work, - LAPACKE_csysv_aa_2stage, - LAPACKE_csysv_aa_2stage_work, - LAPACKE_csytrf_aa_2stage, - LAPACKE_csytrf_aa_2stage_work, - LAPACKE_csytrs_aa_2stage, - LAPACKE_csytrs_aa_2stage_work, - LAPACKE_dlassq, - LAPACKE_dlassq_work, - LAPACKE_dsysv_aa_2stage, - LAPACKE_dsysv_aa_2stage_work, - LAPACKE_dsytrf_aa_2stage, - LAPACKE_dsytrf_aa_2stage_work, - LAPACKE_dsytrs_aa_2stage, - LAPACKE_dsytrs_aa_2stage_work, - LAPACKE_get_nancheck, - LAPACKE_set_nancheck, - LAPACKE_slassq, - LAPACKE_slassq_work, - LAPACKE_ssysv_aa_2stage, - LAPACKE_ssysv_aa_2stage_work, - LAPACKE_ssytrf_aa_2stage, - LAPACKE_ssytrf_aa_2stage_work, - LAPACKE_ssytrs_aa_2stage, - LAPACKE_ssytrs_aa_2stage_work, LAPACKE_zhesv_aa_2stage, LAPACKE_zhesv_aa_2stage_work, LAPACKE_zhetrf_aa_2stage, @@ -3489,26 +3510,97 @@ LAPACKE_zsytrf_aa_2stage_work, LAPACKE_zsytrs_aa_2stage, LAPACKE_zsytrs_aa_2stage_work, + # new functions from 3.9.0 + LAPACKE_zgesvdq, + LAPACKE_zgesvdq_work ); #These function may need 2 underscores. @lapack_embeded_underscore_objs=( - xerbla_array, chla_transtype, slasyf_rook, + xerbla_array, chla_transtype, + ); +@lapack_embeded_underscore_objs_s=( + slasyf_rook, ssytf2_rook, ssytrf_rook, ssytrs_rook, ssytri_rook, ssycon_rook, ssysv_rook, + slasyf_rk, ssyconvf_rook, ssytf2_rk, + ssytrf_rk, ssytrs_3, ssytri_3, + ssytri_3x, ssycon_3, ssysv_rk, + slasyf_aa, ssysv_aa, ssytrf_aa, + ssytrs_aa, ssytrd_2stage, ssytrd_sy2sb, + ssytrd_sb2st, ssb2st_kernels, ssyevd_2stage, + ssyev_2stage, ssyevx_2stage, ssyevr_2stage, + ssbev_2stage, ssbevx_2stage, ssbevd_2stage, + ssygv_2stage, + ssysv_aa_2stage, ssytrf_aa_2stage, + ssytrs_aa_2stage, + slaorhr_col_getrfnp, slaorhr_col_getrfnp2, sorhr_col, +); +@lapack_embeded_underscore_objs_c=( chetf2_rook, chetrf_rook, chetri_rook, chetrs_rook, checon_rook, chesv_rook, clahef_rook, clasyf_rook, csytf2_rook, csytrf_rook, csytrs_rook, csytri_rook, csycon_rook, csysv_rook, + chetf2_rk, + chetrf_rk, chetri_3, chetri_3x, + chetrs_3, checon_3, chesv_rk, + chesv_aa, chetrf_aa, chetrs_aa, + clahef_aa, clahef_rk, clasyf_rk, + clasyf_aa, csytf2_rk, csytrf_rk, + csytrf_aa, csytrs_3, csytrs_aa, + csytri_3, csytri_3x, csycon_3, + csysv_rk, csysv_aa, csyconvf_rook, + chetrd_2stage, chetrd_he2hb, chetrd_hb2st, + chb2st_kernels, cheevd_2stage, cheev_2stage, + cheevx_2stage, cheevr_2stage, chbev_2stage, + chbevx_2stage, chbevd_2stage, chegv_2stage, + chesv_aa_2stage, + chetrf_aa_2stage, chetrs_aa_2stage, + csysv_aa_2stage, csytrf_aa_2stage, + csytrs_aa_2stage, + claunhr_col_getrfnp, claunhr_col_getrfnp2, cunhr_col, +); +@lapack_embeded_underscore_objs_d=( dlasyf_rook, dsytf2_rook, dsytrf_rook, dsytrs_rook, dsytri_rook, dsycon_rook, dsysv_rook, + dlasyf_rk, dsyconvf_rook, + dsytf2_rk, dsytrf_rk, dsytrs_3, + dsytri_3, dsytri_3x, dsycon_3, + dsysv_rk, dlasyf_aa, dsysv_aa, + dsytrf_aa, dsytrs_aa, dsytrd_2stage, + dsytrd_sy2sb, dsytrd_sb2st, dsb2st_kernels, + dsyevd_2stage, dsyev_2stage, dsyevx_2stage, + dsyevr_2stage, dsbev_2stage, dsbevx_2stage, + dsbevd_2stage, dsygv_2stage, + dsysv_aa_2stage, + dsytrf_aa_2stage, dsytrs_aa_2stage, + dlaorhr_col_getrfnp, dlaorhr_col_getrfnp2, dorhr_col, +); +@lapack_embeded_underscore_objs_z=( zhetf2_rook, zhetrf_rook, zhetri_rook, zhetrs_rook, zhecon_rook, zhesv_rook, zlahef_rook, zlasyf_rook, zsytf2_rook, zsytrf_rook, zsytrs_rook, zsytri_rook, zsycon_rook, zsysv_rook, + zhetf2_rk, zhetrf_rk, zhetri_3, + zhetri_3x, zhetrs_3, zhecon_3, + zhesv_rk, zhesv_aa, zhetrf_aa, + zhetrs_aa, zlahef_aa, zlahef_rk, + zlasyf_rk, zlasyf_aa, zsyconvf_rook, + zsytrs_aa, zsytf2_rk, zsytrf_rk, + zsytrf_aa, zsytrs_3, zsytri_3, + zsytri_3x, zsycon_3, zsysv_rk, + zsysv_aa, zhetrd_2stage, zhetrd_he2hb, + zhetrd_hb2st, zhb2st_kernels, zheevd_2stage, + zheev_2stage, zheevx_2stage, zheevr_2stage, + zhbev_2stage, zhbevx_2stage, zhbevd_2stage, + zhegv_2stage, + zhesv_aa_2stage, zhetrf_aa_2stage, + zhetrs_aa_2stage, zsysv_aa_2stage, + zsytrf_aa_2stage, zsytrs_aa_2stage, + zlaunhr_col_getrfnp, zlaunhr_col_getrfnp2, zunhr_col ); @@ -3516,6 +3608,68 @@ use File::Spec; use File::Basename; my $dirname = File::Spec->catfile(dirname(dirname(File::Spec->rel2abs(__FILE__))), "lapack-netlib"); +if ($ARGV[12] == 1) { + @blasobjs = (@blasobjs, @bfblasobjs); + @cblasobjs = (@cblasobjs, @bfcblasobjs); +} +if ($ARGV[13] == 1) { + @blasobjs = (@blasobjs, @blasobjss); + @cblasobjs = (@cblasobjs, @cblasobjss); + @lapackobjs = (@lapackobjs, @lapackobjss); + @lapackobjs2 = (@lapackobjs2, @lapackobjs2s); + @lapackobjs2 = (@lapackobjs2, @lapackobjs2sc); + @lapackobjs2 = (@lapackobjs2, @lapackobjs2ds); + @lapack_deprecated_objs = (@lapack_deprecated_objs, @lapack_deprecated_objss); + @lapacke_deprecated_objs = (@lapacke_deprecated_objs, @lapacke_deprecated_objss); + @lapack_embeded_underscore_objs = (@lapack_embeded_underscore_objs, @lapack_embeded_underscore_objs_s); + @lapackeobjs = (@lapackeobjs, @lapackeobjss); +} +if ($ARGV[14] == 1) { + @blasobjs = (@blasobjs, @blasobjsd); + @cblasobjs = (@cblasobjs, @cblasobjsd); + @lapackobjs = (@lapackobjs, @lapackobjsd); + if ($ARGV[13] == 0) { + @lapackobjs2 = (@lapackobjs2, @lapackobjs2ds); + } + @lapackobjs2 = (@lapackobjs2, @lapackobjs2d, @lapackobjs2dz); + @lapack_deprecated_objs = (@lapack_deprecated_objs, @lapack_deprecated_objsd); + @lapacke_deprecated_objs = (@lapacke_deprecated_objs, @lapacke_deprecated_objsd); + @lapack_embeded_underscore_objs = (@lapack_embeded_underscore_objs, @lapack_embeded_underscore_objs_d); + @lapackeobjs = (@lapackeobjs, @lapackeobjsd); +} +if ($ARGV[15] == 1) { + @blasobjs = (@blasobjs, @blasobjsc); + @cblasobjs = (@cblasobjs, @cblasobjsc); + @gemm3mobjs = (@gemm3mobjs, @gemm3mobjsc); + @cblasgemm3mobjs = (@cblasgemm3mobjs, @cblasgemm3mobjsc); + @lapackobjs = (@lapackobjs, @lapackobjsc); + @lapackobjs2 = (@lapackobjs2, @lapackobjs2c, @lapackobjs2zc); + if ($ARGV[13] == 0) { + @lapackobjs2 = (@lapackobjs2, @lapackobjs2sc); + } + @lapack_deprecated_objs = (@lapack_deprecated_objs, @lapack_deprecated_objsc); + @lapacke_deprecated_objs = (@lapacke_deprecated_objs, @lapacke_deprecated_objsc); + @lapack_embeded_underscore_objs = (@lapack_embeded_underscore_objs, @lapack_embeded_underscore_objs_c); + @lapackeobjs = (@lapackeobjs, @lapackeobjsc); +} +if ($ARGV[16] == 1) { + @blasobjs = (@blasobjs, @blasobjsz); + @cblasobjs = (@cblasobjs, @cblasobjsz); + @gemm3mobjs = (@gemm3mobjs, @gemm3mobjsz); + @cblasgemm3mobjs = (@cblasgemm3mobjs, @cblasgemm3mobjsz); + @lapackobjs = (@lapackobjs, @lapackobjsz); + @lapackobjs2 = (@lapackobjs2, @lapackobjs2z); + if ($ARGV[15] == 0) { + @lapackobjs2 = (@lapackobjs2, @lapackobjs2zc); + } + if ($ARGV[14] == 0) { + @lapackobjs2 = (@lapackobjs2, @lapackobjs2dz); + } + @lapack_deprecated_objs = (@lapack_deprecated_objs, @lapack_deprecated_objsz); + @lapacke_deprecated_objs = (@lapacke_deprecated_objs, @lapacke_deprecated_objsz); + @lapack_embeded_underscore_objs = (@lapack_embeded_underscore_objs, @lapack_embeded_underscore_objs_z); + @lapackeobjs = (@lapackeobjs, @lapackeobjsz); +} if ($ARGV[8] == 1) { #ONLY_CBLAS=1 @underscore_objs = (@misc_underscore_objs); @@ -3556,9 +3710,12 @@ if ($ARGV[1] eq "x86") { @underscore_objs = (@underscore_objs, @gemm3mobjs); if ($ARGV[1] eq "ia64") { @underscore_objs = (@underscore_objs, @gemm3mobjs); }; if ($ARGV[1] eq "MIPS") { @underscore_objs = (@underscore_objs, @gemm3mobjs); }; - if ($ARGV[4] == 0) { @no_underscore_objs = (@cblasobjs, @misc_no_underscore_objs); + if ($ARGV[1] eq "x86_64") { @no_underscore_objs = (@no_underscore_objs, @cblasgemm3mobjs); }; + if ($ARGV[1] eq "x86") { @no_underscore_objs = (@no_underscore_objs, @cblasgemm3mobjs); }; + if ($ARGV[1] eq "ia64") { @no_underscore_objs = (@no_underscore_objs, @cblasgemm3mobjs); }; + if ($ARGV[1] eq "MIPS") { @no_underscore_objs = (@no_underscore_objs, @cblasgemm3mobjs); }; }else{ #NO_CBLAS=1 @no_underscore_objs = (@misc_no_underscore_objs); diff --git a/f_check b/f_check index b05db85bd..e9aca4ff9 100644 --- a/f_check +++ b/f_check @@ -19,7 +19,7 @@ $nofortran = 0; $compiler = join(" ", @ARGV); $compiler_bin = shift(@ARGV); - + # f77 is too ambiguous $compiler = "" if $compiler eq "f77"; @@ -32,8 +32,8 @@ if ($compiler eq "") { "xlf95", "xlf90", "xlf", "ppuf77", "ppuf95", "ppuf90", "ppuxlf", "pathf90", "pathf95", - "pgf95", "pgf90", "pgf77", - "flang", + "pgf95", "pgf90", "pgf77", "pgfortran", "nvfortran", + "flang", "egfortran", "ifort"); OUTER: @@ -64,14 +64,18 @@ if ($compiler eq "") { if (!$?) { $data = `$compiler -O2 -S ftest.f > /dev/null 2>&1 && cat ftest.s && rm -f ftest.s`; - if ($data =~ /zhoge_/) { $bu = "_"; } - if ($data =~ /GNU/) { + if ($data =~ /Fujitsu/) { + + $vendor = FUJITSU; + $openmp = "-Kopenmp"; - $data =~ /(\d)\.(\d).(\d)/; + } elsif ($data =~ /GNU/ || $data =~ /GCC/ ) { + + $data =~ /(\d+)\.(\d+).(\d+)/; $major = $1; $minor = $2; @@ -82,6 +86,9 @@ if ($compiler eq "") { if ($compiler =~ /flang/) { $vendor = FLANG; $openmp = "-fopenmp"; + } elsif ($compiler =~ /pgf/ || $compiler =~ /nvf/) { + $vendor = PGI; + $openmp = "-mp"; } else { $vendor = G77; $openmp = ""; @@ -115,7 +122,7 @@ if ($compiler eq "") { $openmp = "-mp"; } - if ($data =~ /PGF/) { + if ($data =~ /PGF/ || $data =~ /NVF/) { $vendor = PGI; $openmp = "-mp"; } @@ -130,6 +137,11 @@ if ($compiler eq "") { if ($data =~ / zho_ge__/) { $need2bu = 1; } + if ($vendor =~ /G95/) { + if ($ENV{NO_LAPACKE} != 1) { + $need2bu = ""; + } + } } if ($vendor eq "") { @@ -164,7 +176,7 @@ if ($compiler eq "") { $openmp = "-mp"; } - if ($compiler =~ /pgf/) { + if ($compiler =~ /pgf/ || $compiler =~ /nvf/) { $vendor = PGI; $bu = "_"; $openmp = "-mp"; @@ -277,6 +289,8 @@ $linker_a = ""; if ($link ne "") { $link =~ s/\-Y\sP\,/\-Y/g; + + $link =~ s/\-R\s*/\-rpath\@/g; $link =~ s/\-rpath\s+/\-rpath\@/g; @@ -315,6 +329,9 @@ if ($link ne "") { $flags =~ s/\@/\,/g; $linker_L .= "-Wl,". $flags . " " ; } + if ($flags =~ /-lgomp/ && $CC =~ /clang/) { + $flags = "-lomp"; + } if ( ($flags =~ /^\-l/) @@ -327,6 +344,8 @@ if ($link ne "") { && ($flags !~ /kernel32/) && ($flags !~ /advapi32/) && ($flags !~ /shell32/) + && ($flags !~ /omp/ || ($vendor !~ /PGI/ && $vendor !~ /FUJITSU/ && $flags =~ /omp/)) + && ($flags !~ /[0-9]+/ || ($vendor == FUJITSU && $flags =~ /^-lfj90/)) && ($flags !~ /^\-l$/) ) { $linker_l .= $flags . " "; diff --git a/getarch.c b/getarch.c index 4d960356c..f48944f36 100644 --- a/getarch.c +++ b/getarch.c @@ -82,7 +82,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #ifdef OS_WINDOWS #include #endif -#if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__DragonFly__) || defined(__APPLE__) +#if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__) || defined(__DragonFly__) || defined(__APPLE__) #include #include #endif @@ -90,11 +90,18 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include #include #endif +#if defined(AIX) +#include +#endif +#if defined(__x86_64__) || defined(_M_X64) #if (( defined(__GNUC__) && __GNUC__ > 6 && defined(__AVX2__)) || (defined(__clang__) && __clang_major__ >= 6)) #else +#ifndef NO_AVX512 #define NO_AVX512 #endif +#endif +#endif /* #define FORCE_P2 */ /* #define FORCE_KATMAI */ /* #define FORCE_COPPERMINE */ @@ -133,8 +140,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. /* #define FORCE_PPC440FP2 */ /* #define FORCE_CELL */ /* #define FORCE_SICORTEX */ -/* #define FORCE_LOONGSON3A */ -/* #define FORCE_LOONGSON3B */ +/* #define FORCE_LOONGSON3R3 */ +/* #define FORCE_LOONGSON3R4 */ /* #define FORCE_I6400 */ /* #define FORCE_P6600 */ /* #define FORCE_P5600 */ @@ -319,16 +326,27 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define FORCE #define FORCE_INTEL #define ARCHITECTURE "X86" +#ifdef NO_AVX2 +#define SUBARCHITECTURE "SANDYBRIDGE" +#define ARCHCONFIG "-DSANDYBRIDGE " \ + "-DL1_DATA_SIZE=32768 -DL1_DATA_LINESIZE=64 " \ + "-DL2_SIZE=262144 -DL2_LINESIZE=64 " \ + "-DDTB_DEFAULT_ENTRIES=64 -DDTB_SIZE=4096 " \ + "-DHAVE_CMOV -DHAVE_MMX -DHAVE_SSE -DHAVE_SSE2 -DHAVE_SSE3 -DHAVE_SSSE3 -DHAVE_SSE4_1 -DHAVE_SSE4_2 -DHAVE_AVX" +#define LIBNAME "sandybridge" +#define CORENAME "SANDYBRIDGE" +#else #define SUBARCHITECTURE "HASWELL" #define ARCHCONFIG "-DHASWELL " \ "-DL1_DATA_SIZE=32768 -DL1_DATA_LINESIZE=64 " \ "-DL2_SIZE=262144 -DL2_LINESIZE=64 " \ "-DDTB_DEFAULT_ENTRIES=64 -DDTB_SIZE=4096 " \ "-DHAVE_CMOV -DHAVE_MMX -DHAVE_SSE -DHAVE_SSE2 -DHAVE_SSE3 -DHAVE_SSSE3 -DHAVE_SSE4_1 -DHAVE_SSE4_2 -DHAVE_AVX " \ - "-DFMA3" + "-DHAVE_AVX2 -DHAVE_FMA3 -DFMA3" #define LIBNAME "haswell" #define CORENAME "HASWELL" #endif +#endif #ifdef FORCE_SKYLAKEX #ifdef NO_AVX512 @@ -341,7 +359,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. "-DL2_SIZE=262144 -DL2_LINESIZE=64 " \ "-DDTB_DEFAULT_ENTRIES=64 -DDTB_SIZE=4096 " \ "-DHAVE_CMOV -DHAVE_MMX -DHAVE_SSE -DHAVE_SSE2 -DHAVE_SSE3 -DHAVE_SSSE3 -DHAVE_SSE4_1 -DHAVE_SSE4_2 -DHAVE_AVX " \ - "-DFMA3" + "-DHAVE_AVX2 -DHAVE_FMA3 -DFMA3" #define LIBNAME "haswell" #define CORENAME "HASWELL" #else @@ -354,12 +372,42 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. "-DL2_SIZE=262144 -DL2_LINESIZE=64 " \ "-DDTB_DEFAULT_ENTRIES=64 -DDTB_SIZE=4096 " \ "-DHAVE_CMOV -DHAVE_MMX -DHAVE_SSE -DHAVE_SSE2 -DHAVE_SSE3 -DHAVE_SSSE3 -DHAVE_SSE4_1 -DHAVE_SSE4_2 -DHAVE_AVX " \ - "-DFMA3 -DHAVE_AVX512VL -march=skylake-avx512" + "-DHAVE_AVX2 -DHAVE_FMA3 -DFMA3 -DHAVE_AVX512VL -march=skylake-avx512" #define LIBNAME "skylakex" #define CORENAME "SKYLAKEX" #endif #endif +#ifdef FORCE_COOPERLAKE +#ifdef NO_AVX512 +#define FORCE +#define FORCE_INTEL +#define ARCHITECTURE "X86" +#define SUBARCHITECTURE "HASWELL" +#define ARCHCONFIG "-DHASWELL " \ + "-DL1_DATA_SIZE=32768 -DL1_DATA_LINESIZE=64 " \ + "-DL2_SIZE=262144 -DL2_LINESIZE=64 " \ + "-DDTB_DEFAULT_ENTRIES=64 -DDTB_SIZE=4096 " \ + "-DHAVE_CMOV -DHAVE_MMX -DHAVE_SSE -DHAVE_SSE2 -DHAVE_SSE3 -DHAVE_SSSE3 -DHAVE_SSE4_1 -DHAVE_SSE4_2 -DHAVE_AVX " \ + "-DHAVE_AVX2 -DHAVE_FMA3 -DFMA3" +#define LIBNAME "haswell" +#define CORENAME "HASWELL" +#else +#define FORCE +#define FORCE_INTEL +#define ARCHITECTURE "X86" +#define SUBARCHITECTURE "COOPERLAKE" +#define ARCHCONFIG "-DCOOPERLAKE " \ + "-DL1_DATA_SIZE=32768 -DL1_DATA_LINESIZE=64 " \ + "-DL2_SIZE=262144 -DL2_LINESIZE=64 " \ + "-DDTB_DEFAULT_ENTRIES=64 -DDTB_SIZE=4096 " \ + "-DHAVE_CMOV -DHAVE_MMX -DHAVE_SSE -DHAVE_SSE2 -DHAVE_SSE3 -DHAVE_SSSE3 -DHAVE_SSE4_1 -DHAVE_SSE4_2 -DHAVE_AVX " \ + "-DHAVE_AVX2 -DHAVE_FMA3 -DFMA3 -DHAVE_AVX512VL -DHAVE_AVX512BF16 -march=cooperlake" +#define LIBNAME "cooperlake" +#define CORENAME "COOPERLAKE" +#endif +#endif + #ifdef FORCE_ATOM #define FORCE #define FORCE_INTEL @@ -457,7 +505,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. "-DDTB_DEFAULT_ENTRIES=32 -DDTB_SIZE=4096 " \ "-DHAVE_MMX -DHAVE_SSE -DHAVE_SSE2 -DHAVE_SSE3 " \ "-DHAVE_SSE4A -DHAVE_MISALIGNSSE -DHAVE_128BITFPU -DHAVE_FASTMOVU " \ - "-DHAVE_AVX -DHAVE_FMA4" + "-DHAVE_AVX" #define LIBNAME "bulldozer" #define CORENAME "BULLDOZER" #endif @@ -473,7 +521,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. "-DDTB_DEFAULT_ENTRIES=64 -DDTB_SIZE=4096 " \ "-DHAVE_MMX -DHAVE_SSE -DHAVE_SSE2 -DHAVE_SSE3 -DHAVE_SSE4_1 -DHAVE_SSE4_2 " \ "-DHAVE_SSE4A -DHAVE_MISALIGNSSE -DHAVE_128BITFPU -DHAVE_FASTMOVU -DHAVE_CFLUSH " \ - "-DHAVE_AVX -DHAVE_FMA4 -DHAVE_FMA3" + "-DHAVE_AVX -DHAVE_FMA3" #define LIBNAME "piledriver" #define CORENAME "PILEDRIVER" #endif @@ -489,7 +537,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. "-DDTB_DEFAULT_ENTRIES=64 -DDTB_SIZE=4096 " \ "-DHAVE_MMX -DHAVE_SSE -DHAVE_SSE2 -DHAVE_SSE3 -DHAVE_SSE4_1 -DHAVE_SSE4_2 " \ "-DHAVE_SSE4A -DHAVE_MISALIGNSSE -DHAVE_128BITFPU -DHAVE_FASTMOVU -DHAVE_CFLUSH " \ - "-DHAVE_AVX -DHAVE_FMA4 -DHAVE_FMA3" + "-DHAVE_AVX -DHAVE_FMA3" #define LIBNAME "steamroller" #define CORENAME "STEAMROLLER" #endif @@ -505,7 +553,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. "-DDTB_DEFAULT_ENTRIES=64 -DDTB_SIZE=4096 " \ "-DHAVE_MMX -DHAVE_SSE -DHAVE_SSE2 -DHAVE_SSE3 -DHAVE_SSE4_1 -DHAVE_SSE4_2 " \ "-DHAVE_SSE4A -DHAVE_MISALIGNSSE -DHAVE_128BITFPU -DHAVE_FASTMOVU -DHAVE_CFLUSH " \ - "-DHAVE_AVX -DHAVE_FMA4 -DHAVE_FMA3" + "-DHAVE_AVX -DHAVE_FMA3" #define LIBNAME "excavator" #define CORENAME "EXCAVATOR" #endif @@ -514,6 +562,16 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define FORCE #define FORCE_INTEL #define ARCHITECTURE "X86" +#ifdef NO_AVX2 +#define SUBARCHITECTURE "SANDYBRIDGE" +#define ARCHCONFIG "-DSANDYBRIDGE " \ + "-DL1_DATA_SIZE=32768 -DL1_DATA_LINESIZE=64 " \ + "-DL2_SIZE=262144 -DL2_LINESIZE=64 " \ + "-DDTB_DEFAULT_ENTRIES=64 -DDTB_SIZE=4096 " \ + "-DHAVE_CMOV -DHAVE_MMX -DHAVE_SSE -DHAVE_SSE2 -DHAVE_SSE3 -DHAVE_SSSE3 -DHAVE_SSE4_1 -DHAVE_SSE4_2 -DHAVE_AVX" +#define LIBNAME "sandybridge" +#define CORENAME "SANDYBRIDGE" +#else #define SUBARCHITECTURE "ZEN" #define ARCHCONFIG "-DZEN " \ "-DL1_CODE_SIZE=32768 -DL1_CODE_LINESIZE=64 -DL1_CODE_ASSOCIATIVE=8 " \ @@ -524,10 +582,11 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. "-DDTB_DEFAULT_ENTRIES=64 -DDTB_SIZE=4096 " \ "-DHAVE_MMX -DHAVE_SSE -DHAVE_SSE2 -DHAVE_SSE3 -DHAVE_SSE4_1 -DHAVE_SSE4_2 " \ "-DHAVE_SSE4A -DHAVE_MISALIGNSSE -DHAVE_128BITFPU -DHAVE_FASTMOVU -DHAVE_CFLUSH " \ - "-DHAVE_AVX -DHAVE_FMA3 -DFMA3" + "-DHAVE_AVX -DHAVE_AVX2 -DHAVE_FMA3 -DFMA3" #define LIBNAME "zen" #define CORENAME "ZEN" #endif +#endif #ifdef FORCE_SSE_GENERIC @@ -650,6 +709,19 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define CORENAME "POWER9" #endif +#if defined(FORCE_POWER10) +#define FORCE +#define ARCHITECTURE "POWER" +#define SUBARCHITECTURE "POWER10" +#define SUBDIRNAME "power" +#define ARCHCONFIG "-DPOWER10 " \ + "-DL1_DATA_SIZE=32768 -DL1_DATA_LINESIZE=128 " \ + "-DL2_SIZE=4194304 -DL2_LINESIZE=128 " \ + "-DDTB_DEFAULT_ENTRIES=128 -DDTB_SIZE=4096 -DL2_ASSOCIATIVE=8 " +#define LIBNAME "power10" +#define CORENAME "POWER10" +#endif + #ifdef FORCE_PPCG4 #define FORCE #define ARCHITECTURE "POWER" @@ -742,31 +814,31 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endif -#ifdef FORCE_LOONGSON3A +#if defined FORCE_LOONGSON3R3 || defined FORCE_LOONGSON3A || defined FORCE_LOONGSON3B #define FORCE #define ARCHITECTURE "MIPS" -#define SUBARCHITECTURE "LOONGSON3A" +#define SUBARCHITECTURE "LOONGSON3R3" #define SUBDIRNAME "mips64" -#define ARCHCONFIG "-DLOONGSON3A " \ +#define ARCHCONFIG "-DLOONGSON3R3 " \ "-DL1_DATA_SIZE=65536 -DL1_DATA_LINESIZE=32 " \ "-DL2_SIZE=512488 -DL2_LINESIZE=32 " \ "-DDTB_DEFAULT_ENTRIES=64 -DDTB_SIZE=4096 -DL2_ASSOCIATIVE=4 " -#define LIBNAME "loongson3a" -#define CORENAME "LOONGSON3A" +#define LIBNAME "loongson3r3" +#define CORENAME "LOONGSON3R3" #else #endif -#ifdef FORCE_LOONGSON3B +#ifdef FORCE_LOONGSON3R4 #define FORCE #define ARCHITECTURE "MIPS" -#define SUBARCHITECTURE "LOONGSON3B" +#define SUBARCHITECTURE "LOONGSON3R4" #define SUBDIRNAME "mips64" -#define ARCHCONFIG "-DLOONGSON3B " \ +#define ARCHCONFIG "-DLOONGSON3R4 " \ "-DL1_DATA_SIZE=65536 -DL1_DATA_LINESIZE=32 " \ "-DL2_SIZE=512488 -DL2_LINESIZE=32 " \ "-DDTB_DEFAULT_ENTRIES=64 -DDTB_SIZE=4096 -DL2_ASSOCIATIVE=4 " -#define LIBNAME "loongson3b" -#define CORENAME "LOONGSON3B" +#define LIBNAME "loongson3r4" +#define CORENAME "LOONGSON3R4" #else #endif @@ -812,6 +884,34 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #else #endif +#ifdef FORCE_MIPS1004K +#define FORCE +#define ARCHITECTURE "MIPS" +#define SUBARCHITECTURE "MIPS1004K" +#define SUBDIRNAME "mips" +#define ARCHCONFIG "-DMIPS1004K " \ + "-DL1_DATA_SIZE=32768 -DL1_DATA_LINESIZE=32 " \ + "-DL2_SIZE=262144 -DL2_LINESIZE=32 " \ + "-DDTB_DEFAULT_ENTRIES=64 -DDTB_SIZE=4096 -DL2_ASSOCIATIVE=8 " +#define LIBNAME "mips1004K" +#define CORENAME "MIPS1004K" +#else +#endif + +#ifdef FORCE_MIPS24K +#define FORCE +#define ARCHITECTURE "MIPS" +#define SUBARCHITECTURE "MIPS24K" +#define SUBDIRNAME "mips" +#define ARCHCONFIG "-DMIPS24K " \ + "-DL1_DATA_SIZE=32768 -DL1_DATA_LINESIZE=32 " \ + "-DL2_SIZE=32768 -DL2_LINESIZE=32 " \ + "-DDTB_DEFAULT_ENTRIES=64 -DDTB_SIZE=4096 -DL2_ASSOCIATIVE=8 " +#define LIBNAME "mips24K" +#define CORENAME "MIPS24K" +#else +#endif + #ifdef FORCE_I6500 #define FORCE #define ARCHITECTURE "MIPS" @@ -905,6 +1005,20 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #else #endif +#ifdef FORCE_RISCV64_GENERIC +#define FORCE +#define ARCHITECTURE "RISCV64" +#define SUBARCHITECTURE "RISCV64_GENERIC" +#define SUBDIRNAME "riscv64" +#define ARCHCONFIG "-DRISCV64_GENERIC " \ + "-DL1_DATA_SIZE=32768 -DL1_DATA_LINESIZE=32 " \ + "-DL2_SIZE=1048576 -DL2_LINESIZE=32 " \ + "-DDTB_DEFAULT_ENTRIES=128 -DDTB_SIZE=4096 -DL2_ASSOCIATIVE=4 " +#define LIBNAME "riscv64_generic" +#define CORENAME "RISCV64_GENERIC" +#else +#endif + #ifdef FORCE_CORTEXA15 #define FORCE #define ARCHITECTURE "ARM" @@ -1028,6 +1142,24 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #else #endif +#ifdef FORCE_NEOVERSEN1 +#define FORCE +#define ARCHITECTURE "ARM64" +#define SUBARCHITECTURE "NEOVERSEN1" +#define SUBDIRNAME "arm64" +#define ARCHCONFIG "-DNEOVERSEN1 " \ + "-DL1_CODE_SIZE=65536 -DL1_CODE_LINESIZE=64 -DL1_CODE_ASSOCIATIVE=4 " \ + "-DL1_DATA_SIZE=65536 -DL1_DATA_LINESIZE=64 -DL1_DATA_ASSOCIATIVE=4 " \ + "-DL2_SIZE=1048576 -DL2_LINESIZE=64 -DL2_ASSOCIATIVE=16 " \ + "-DDTB_DEFAULT_ENTRIES=64 -DDTB_SIZE=4096 " \ + "-DHAVE_VFPV4 -DHAVE_VFPV3 -DHAVE_VFP -DHAVE_NEON -DARMV8 " \ + "-march=armv8.2-a -mtune=cortex-a72" +#define LIBNAME "neoversen1" +#define CORENAME "NEOVERSEN1" +#else +#endif + + #ifdef FORCE_FALKOR #define FORCE #define ARCHITECTURE "ARM64" @@ -1093,6 +1225,54 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #else #endif +#ifdef FORCE_EMAG8180 +#define ARMV8 +#define FORCE +#define ARCHITECTURE "ARM64" +#define SUBARCHITECTURE "EMAG8180" +#define SUBDIRNAME "arm64" +#define ARCHCONFIG "-DEMAG8180 " \ + "-DL1_CODE_SIZE=32768 -DL1_CODE_LINESIZE=64 -DL1_CODE_ASSOCIATIVE=8 " \ + "-DL1_DATA_SIZE=32768 -DL1_DATA_LINESIZE=64 -DL1_DATA_ASSOCIATIVE=8 " \ + "-DL2_SIZE=262144 -DL2_LINESIZE=64 -DL2_ASSOCIATIVE=8 " \ + "-DL3_SIZE=33554432 -DL3_LINESIZE=64 -DL3_ASSOCIATIVE=32 " \ + "-DDTB_DEFAULT_ENTRIES=64 -DDTB_SIZE=4096 " \ + "-DHAVE_VFPV4 -DHAVE_VFPV3 -DHAVE_VFP -DHAVE_NEON -DARMV8" +#define LIBNAME "emag8180" +#define CORENAME "EMAG8180" +#endif + +#ifdef FORCE_THUNDERX3T110 +#define ARMV8 +#define FORCE +#define ARCHITECTURE "ARM64" +#define SUBARCHITECTURE "THUNDERX3T110" +#define SUBDIRNAME "arm64" +#define ARCHCONFIG "-DTHUNDERX3T110 " \ + "-DL1_CODE_SIZE=65536 -DL1_CODE_LINESIZE=64 -DL1_CODE_ASSOCIATIVE=8 " \ + "-DL1_DATA_SIZE=32768 -DL1_DATA_LINESIZE=64 -DL1_DATA_ASSOCIATIVE=8 " \ + "-DL2_SIZE=524288 -DL2_LINESIZE=64 -DL2_ASSOCIATIVE=8 " \ + "-DL3_SIZE=94371840 -DL3_LINESIZE=64 -DL3_ASSOCIATIVE=32 " \ + "-DDTB_DEFAULT_ENTRIES=64 -DDTB_SIZE=4096 " \ + "-DHAVE_VFPV4 -DHAVE_VFPV3 -DHAVE_VFP -DHAVE_NEON -DARMV8" +#define LIBNAME "thunderx3t110" +#define CORENAME "THUNDERX3T110" +#else +#endif + +#ifdef FORCE_VORTEX +#define FORCE +#define ARCHITECTURE "ARM64" +#define SUBARCHITECTURE "VORTEX" +#define SUBDIRNAME "arm64" +#define ARCHCONFIG "-DVORTEX " \ + "-DL1_DATA_SIZE=32768 -DL1_DATA_LINESIZE=64 " \ + "-DL2_SIZE=262144 -DL2_LINESIZE=64 " \ + "-DDTB_DEFAULT_ENTRIES=64 -DDTB_SIZE=4096 -DL2_ASSOCIATIVE=32 " \ + "-DHAVE_VFPV4 -DHAVE_VFPV3 -DHAVE_VFP -DHAVE_NEON -DARMV8" +#define LIBNAME "vortex" +#define CORENAME "VORTEX" +#endif #ifdef FORCE_ZARCH_GENERIC #define FORCE @@ -1124,6 +1304,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define CORENAME "Z14" #endif +#ifdef FORCE_C910V +#define FORCE +#define ARCHITECTURE "RISCV64" +#define SUBARCHITECTURE "C910V" +#define SUBDIRNAME "riscv64" +#define ARCHCONFIG "-DC910V " \ + "-DL1_DATA_SIZE=32768 -DL1_DATA_LINESIZE=32 " \ + "-DL2_SIZE=1048576 -DL2_LINESIZE=32 " \ + "-DDTB_DEFAULT_ENTRIES=128 -DDTB_SIZE=4096 -DL2_ASSOCIATIVE=4 " +#define LIBNAME "c910v" +#define CORENAME "C910V" +#else +#endif + + #ifndef FORCE #ifdef USER_TARGET @@ -1178,6 +1373,11 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define OPENBLAS_SUPPORTED #endif +#ifdef __riscv +#include "cpuid_riscv64.c" +#define OPENBLAS_SUPPORTED +#endif + #ifdef __arm__ #include "cpuid_arm.c" #define OPENBLAS_SUPPORTED @@ -1201,7 +1401,7 @@ static int get_num_cores(void) { #ifdef OS_WINDOWS SYSTEM_INFO sysinfo; -#elif defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__DragonFly__) || defined(__APPLE__) +#elif defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__) || defined(__DragonFly__) || defined(__APPLE__) int m[2], count; size_t len; #endif @@ -1215,13 +1415,18 @@ static int get_num_cores(void) { GetSystemInfo(&sysinfo); return sysinfo.dwNumberOfProcessors; -#elif defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__DragonFly__) || defined(__APPLE__) +#elif defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__) || defined(__DragonFly__) || defined(__APPLE__) m[0] = CTL_HW; m[1] = HW_NCPU; len = sizeof(int); sysctl(m, 2, &count, &len, NULL, 0); return count; + +#elif defined(AIX) + //returns the number of processors which are currently online + return sysconf(_SC_NPROCESSORS_ONLN); + #else return 2; #endif @@ -1258,8 +1463,41 @@ int main(int argc, char *argv[]){ printf("NUM_CORES=%d\n", get_num_cores()); -#if defined(__arm__) && !defined(FORCE) +#if defined(__arm__) +#if !defined(FORCE) + fprintf(stderr,"get features!\n"); get_features(); +#else + fprintf(stderr,"split archconfig!\n"); + sprintf(buffer, "%s", ARCHCONFIG); + + p = &buffer[0]; + + while (*p) { + if ((*p == '-') && (*(p + 1) == 'D')) { + p += 2; + if (*p != 'H') { + while( (*p != ' ') && (*p != '-') && (*p != '\0') && (*p != '\n')) {p++; } + if (*p == '-') continue; + } + while ((*p != ' ') && (*p != '\0')) { + + if (*p == '=') { + printf("="); + p ++; + while ((*p != ' ') && (*p != '\0')) { + printf("%c", *p); + p ++; + } + } else { + printf("%c", *p); + p ++; + if ((*p == ' ') || (*p =='\0')) printf("=1\n"); + } + } + } else p ++; + } +#endif #endif @@ -1298,6 +1536,15 @@ int main(int argc, char *argv[]){ #endif #endif +#if defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ +printf("__BYTE_ORDER__=__ORDER_BIG_ENDIAN__\n"); +#elif defined(__BIG_ENDIAN__) && __BIG_ENDIAN__ > 0 +printf("__BYTE_ORDER__=__ORDER_BIG_ENDIAN__\n"); +#endif +#if defined(_CALL_ELF) && (_CALL_ELF == 2) +printf("ELF_VERSION=2\n"); +#endif + #ifdef MAKE_NB_JOBS #if MAKE_NB_JOBS > 0 printf("MAKE += -j %d\n", MAKE_NB_JOBS); diff --git a/getarch_2nd.c b/getarch_2nd.c index cf9c578cb..c390ef52c 100644 --- a/getarch_2nd.c +++ b/getarch_2nd.c @@ -9,6 +9,8 @@ int main(int argc, char **argv) { if ( (argc <= 1) || ((argc >= 2) && (*argv[1] == '0'))) { + printf("SBGEMM_UNROLL_M=%d\n", SBGEMM_DEFAULT_UNROLL_M); + printf("SBGEMM_UNROLL_N=%d\n", SBGEMM_DEFAULT_UNROLL_N); printf("SGEMM_UNROLL_M=%d\n", SGEMM_DEFAULT_UNROLL_M); printf("SGEMM_UNROLL_N=%d\n", SGEMM_DEFAULT_UNROLL_N); printf("DGEMM_UNROLL_M=%d\n", DGEMM_DEFAULT_UNROLL_M); diff --git a/interface/CMakeLists.txt b/interface/CMakeLists.txt index 5ea39f864..5346ecadd 100644 --- a/interface/CMakeLists.txt +++ b/interface/CMakeLists.txt @@ -83,8 +83,12 @@ foreach (CBLAS_FLAG ${CBLAS_FLAGS}) GenerateNamedObjects("${BLAS3_MANGLED_SOURCES}" "" "" ${CBLAS_FLAG} "" "" false ${MANGLE_COMPLEX}) #sdsdot, dsdot + if (BUILD_SINGLE OR BUILD_DOUBLE) GenerateNamedObjects("sdsdot.c" "" "sdsdot" ${CBLAS_FLAG} "" "" true "SINGLE") +endif () +if (BUILD_DOUBLE) GenerateNamedObjects("dsdot.c" "" "dsdot" ${CBLAS_FLAG} "" "" true "SINGLE") +endif () # trmm is trsm with a compiler flag set GenerateNamedObjects("trsm.c" "TRMM" "trmm" ${CBLAS_FLAG}) @@ -115,7 +119,7 @@ foreach (float_type ${FLOAT_TYPES}) GenerateNamedObjects("syr2k.c" "HEMM" "her2k" ${CBLAS_FLAG} "" "" false ${float_type}) if (USE_GEMM3M) - GenerateNamedObjects("gemm.c" "GEMM3M" "gemm3m" false "" "" false ${float_type}) + GenerateNamedObjects("gemm.c" "GEMM3M" "gemm3m" ${CBLAS_FLAG} "" "" false ${float_type}) endif() endif () if (${float_type} STREQUAL "COMPLEX") @@ -167,4 +171,31 @@ if (NOT DEFINED NO_LAPACK) GenerateNamedObjects("${LAPACK_MANGLED_SOURCES}" "" "" 0 "" "" 0 3) endif () +if ( BUILD_COMPLEX AND NOT BUILD_SINGLE) + GenerateNamedObjects("scal.c" "" "scal" 0 "" "" false "SINGLE") + GenerateNamedObjects("copy.c" "" "copy" 0 "" "" false "SINGLE") + GenerateNamedObjects("dot.c" "" "dot" 0 "" "" false "SINGLE") + GenerateNamedObjects("rot.c" "" "rot" 0 "" "" false "SINGLE") + GenerateNamedObjects("nrm2.c" "" "nrm2" 0 "" "" false "SINGLE") + GenerateNamedObjects("gemv.c" "" "gemv" 0 "" "" false "SINGLE") + GenerateNamedObjects("gemm.c" "" "gemm" 0 "" "" false "SINGLE") + GenerateNamedObjects("asum.c" "" "asum" 0 "" "" false "SINGLE") + GenerateNamedObjects("swap.c" "" "swap" 0 "" "" false "SINGLE") + GenerateNamedObjects("axpy.c" "" "axpy" 0 "" "" false "SINGLE") + GenerateNamedObjects("imax.c" "USE_ABS" "i*amax" 0 "" "" false "SINGLE") +endif () +if ( BUILD_COMPLEX16 AND NOT BUILD_DOUBLE) + GenerateNamedObjects("scal.c" "" "scal" 0 "" "" false "DOUBLE") + GenerateNamedObjects("copy.c" "" "copy" 0 "" "" false "DOUBLE") + GenerateNamedObjects("dot.c" "" "dot" 0 "" "" false "DOUBLE") + GenerateNamedObjects("rot.c" "" "rot" 0 "" "" false "DOUBLE") + GenerateNamedObjects("nrm2.c" "" "nrm2" 0 "" "" false "DOUBLE") + GenerateNamedObjects("gemv.c" "" "gemv" 0 "" "" false "DOUBLE") + GenerateNamedObjects("gemm.c" "" "gemm" 0 "" "" false "DOUBLE") + GenerateNamedObjects("asum.c" "" "asum" 0 "" "" false "DOUBLE") + GenerateNamedObjects("swap.c" "" "swap" 0 "" "" false "DOUBLE") + GenerateNamedObjects("axpy.c" "" "axpy" 0 "" "" false "DOUBLE") + GenerateNamedObjects("imax.c" "USE_ABS" "i*amax" 0 "" "" false "DOUBLE") +endif () + add_library(interface OBJECT ${OPENBLAS_SRC}) diff --git a/interface/Makefile b/interface/Makefile index f0577796d..fab403c82 100644 --- a/interface/Makefile +++ b/interface/Makefile @@ -19,7 +19,7 @@ ifeq ($(ARCH), MIPS) SUPPORT_GEMM3M = 1 endif -ifndef NO_FBLAS +ifneq ($(NO_FBLAS), 1) SBLAS1OBJS = \ saxpy.$(SUFFIX) sswap.$(SUFFIX) \ @@ -46,6 +46,12 @@ SBLAS3OBJS = \ somatcopy.$(SUFFIX) simatcopy.$(SUFFIX)\ sgeadd.$(SUFFIX) +ifeq ($(BUILD_BFLOAT16),1) +SBBLAS1OBJS = sbdot.$(SUFFIX) +SBBLAS2OBJS = sbgemv.$(SUFFIX) +SBBLAS3OBJS = sbgemm.$(SUFFIX) +SBEXTOBJS = sbstobf16.$(SUFFIX) sbdtobf16.$(SUFFIX) sbf16tos.$(SUFFIX) dbf16tod.$(SUFFIX) +endif DBLAS1OBJS = \ daxpy.$(SUFFIX) dswap.$(SUFFIX) \ @@ -140,7 +146,7 @@ ZBLAS3OBJS += zgemm3m.$(SUFFIX) endif -ifdef EXPRECISION +ifeq ($(EXPRECISION), 1) QBLAS1OBJS = \ qaxpy.$(SUFFIX) qswap.$(SUFFIX) \ @@ -277,6 +283,13 @@ CSBLAS3OBJS = \ cblas_ssyrk.$(SUFFIX) cblas_ssyr2k.$(SUFFIX) cblas_somatcopy.$(SUFFIX) cblas_simatcopy.$(SUFFIX)\ cblas_sgeadd.$(SUFFIX) +ifeq ($(BUILD_BFLOAT16),1) +CSBBLAS1OBJS = cblas_sbdot.$(SUFFIX) +CSBBLAS2OBJS = cblas_sbgemv.$(SUFFIX) +CSBBLAS3OBJS = cblas_sbgemm.$(SUFFIX) +CSBEXTOBJS = cblas_sbstobf16.$(SUFFIX) cblas_sbdtobf16.$(SUFFIX) cblas_sbf16tos.$(SUFFIX) cblas_dbf16tod.$(SUFFIX) +endif + CDBLAS1OBJS = \ cblas_idamax.$(SUFFIX) cblas_idamin.$(SUFFIX) cblas_dasum.$(SUFFIX) cblas_daxpy.$(SUFFIX) \ cblas_dcopy.$(SUFFIX) cblas_ddot.$(SUFFIX) \ @@ -303,7 +316,7 @@ CCBLAS1OBJS = \ cblas_cscal.$(SUFFIX) cblas_csscal.$(SUFFIX) \ cblas_cswap.$(SUFFIX) cblas_scnrm2.$(SUFFIX) \ cblas_caxpby.$(SUFFIX) \ - cblas_icmin.$(SUFFIX) cblas_icmax.$(SUFFIX) cblas_scsum.$(SUFFIX) + cblas_icmin.$(SUFFIX) cblas_icmax.$(SUFFIX) cblas_scsum.$(SUFFIX) cblas_csrot.$(SUFFIX) cblas_crotg.$(SUFFIX) CCBLAS2OBJS = \ cblas_cgemv.$(SUFFIX) cblas_cgerc.$(SUFFIX) cblas_cgeru.$(SUFFIX) \ @@ -318,7 +331,10 @@ CCBLAS3OBJS = \ cblas_csyrk.$(SUFFIX) cblas_csyr2k.$(SUFFIX) \ cblas_chemm.$(SUFFIX) cblas_cherk.$(SUFFIX) cblas_cher2k.$(SUFFIX) \ cblas_comatcopy.$(SUFFIX) cblas_cimatcopy.$(SUFFIX)\ - cblas_cgeadd.$(SUFFIX) cblas_xerbla.$(SUFFIX) + cblas_cgeadd.$(SUFFIX) + +CXERBLAOBJ = \ + cblas_xerbla.$(SUFFIX) @@ -330,7 +346,7 @@ CZBLAS1OBJS = \ cblas_zscal.$(SUFFIX) cblas_zdscal.$(SUFFIX) \ cblas_zswap.$(SUFFIX) cblas_dznrm2.$(SUFFIX) \ cblas_zaxpby.$(SUFFIX) \ - cblas_izmin.$(SUFFIX) cblas_izmax.$(SUFFIX) cblas_dzsum.$(SUFFIX) + cblas_izmin.$(SUFFIX) cblas_izmax.$(SUFFIX) cblas_dzsum.$(SUFFIX) cblas_zdrot.$(SUFFIX) cblas_zrotg.$(SUFFIX) CZBLAS2OBJS = \ @@ -360,13 +376,16 @@ CZBLAS3OBJS += cblas_zgemm3m.$(SUFFIX) endif -ifndef NO_CBLAS +ifneq ($(NO_CBLAS), 1) override CFLAGS += -I. SBLAS1OBJS += $(CSBLAS1OBJS) SBLAS2OBJS += $(CSBLAS2OBJS) SBLAS3OBJS += $(CSBLAS3OBJS) +SBBLAS1OBJS += $(CSBBLAS1OBJS) +SBBLAS2OBJS += $(CSBBLAS2OBJS) +SBBLAS3OBJS += $(CSBBLAS3OBJS) DBLAS1OBJS += $(CDBLAS1OBJS) DBLAS2OBJS += $(CDBLAS2OBJS) DBLAS3OBJS += $(CDBLAS3OBJS) @@ -377,9 +396,13 @@ ZBLAS1OBJS += $(CZBLAS1OBJS) ZBLAS2OBJS += $(CZBLAS2OBJS) ZBLAS3OBJS += $(CZBLAS3OBJS) +SBEXTOBJS += $(CSBEXTOBJS) + +CBAUXOBJS += $(CXERBLAOBJ) endif SBLASOBJS = $(SBLAS1OBJS) $(SBLAS2OBJS) $(SBLAS3OBJS) +SBBLASOBJS = $(SBBLAS1OBJS) $(SBBLAS2OBJS) $(SBBLAS3OBJS) DBLASOBJS = $(DBLAS1OBJS) $(DBLAS2OBJS) $(DBLAS3OBJS) QBLASOBJS = $(QBLAS1OBJS) $(QBLAS2OBJS) $(QBLAS3OBJS) CBLASOBJS = $(CBLAS1OBJS) $(CBLAS2OBJS) $(CBLAS3OBJS) @@ -394,7 +417,7 @@ XBLASOBJS = $(XBLAS1OBJS) $(XBLAS2OBJS) $(XBLAS3OBJS) SLAPACKOBJS = \ sgetrf.$(SUFFIX) sgetrs.$(SUFFIX) spotrf.$(SUFFIX) sgetf2.$(SUFFIX) \ spotf2.$(SUFFIX) slaswp.$(SUFFIX) sgesv.$(SUFFIX) slauu2.$(SUFFIX) \ - slauum.$(SUFFIX) strti2.$(SUFFIX) strtri.$(SUFFIX) + slauum.$(SUFFIX) strti2.$(SUFFIX) strtri.$(SUFFIX) strtrs.$(SUFFIX) #DLAPACKOBJS = \ @@ -405,26 +428,24 @@ SLAPACKOBJS = \ DLAPACKOBJS = \ dgetrf.$(SUFFIX) dgetrs.$(SUFFIX) dpotrf.$(SUFFIX) dgetf2.$(SUFFIX) \ dpotf2.$(SUFFIX) dlaswp.$(SUFFIX) dgesv.$(SUFFIX) dlauu2.$(SUFFIX) \ - dlauum.$(SUFFIX) dtrti2.$(SUFFIX) dtrtri.$(SUFFIX) + dlauum.$(SUFFIX) dtrti2.$(SUFFIX) dtrtri.$(SUFFIX) dtrtrs.$(SUFFIX) QLAPACKOBJS = \ qgetf2.$(SUFFIX) qgetrf.$(SUFFIX) qlauu2.$(SUFFIX) qlauum.$(SUFFIX) \ qpotf2.$(SUFFIX) qpotrf.$(SUFFIX) qtrti2.$(SUFFIX) qtrtri.$(SUFFIX) \ - qlaswp.$(SUFFIX) qgetrs.$(SUFFIX) qgesv.$(SUFFIX) qpotri.$(SUFFIX) \ - + qlaswp.$(SUFFIX) qtrtrs.$(SUFFIX) qgesv.$(SUFFIX) qpotri.$(SUFFIX) \ + qtrtrs.$(SUFFIX) #CLAPACKOBJS = \ # cgetrf.$(SUFFIX) cgetrs.$(SUFFIX) cpotrf.$(SUFFIX) cgetf2.$(SUFFIX) \ # cpotf2.$(SUFFIX) claswp.$(SUFFIX) cgesv.$(SUFFIX) clauu2.$(SUFFIX) \ # clauum.$(SUFFIX) ctrti2.$(SUFFIX) ctrtri.$(SUFFIX) cpotri.$(SUFFIX) - CLAPACKOBJS = \ cgetrf.$(SUFFIX) cgetrs.$(SUFFIX) cpotrf.$(SUFFIX) cgetf2.$(SUFFIX) \ cpotf2.$(SUFFIX) claswp.$(SUFFIX) cgesv.$(SUFFIX) clauu2.$(SUFFIX) \ - clauum.$(SUFFIX) ctrti2.$(SUFFIX) ctrtri.$(SUFFIX) - + clauum.$(SUFFIX) ctrti2.$(SUFFIX) ctrtri.$(SUFFIX) ctrtrs.$(SUFFIX) #ZLAPACKOBJS = \ # zgetrf.$(SUFFIX) zgetrs.$(SUFFIX) zpotrf.$(SUFFIX) zgetf2.$(SUFFIX) \ @@ -435,13 +456,14 @@ CLAPACKOBJS = \ ZLAPACKOBJS = \ zgetrf.$(SUFFIX) zgetrs.$(SUFFIX) zpotrf.$(SUFFIX) zgetf2.$(SUFFIX) \ zpotf2.$(SUFFIX) zlaswp.$(SUFFIX) zgesv.$(SUFFIX) zlauu2.$(SUFFIX) \ - zlauum.$(SUFFIX) ztrti2.$(SUFFIX) ztrtri.$(SUFFIX) + zlauum.$(SUFFIX) ztrti2.$(SUFFIX) ztrtri.$(SUFFIX) ztrtrs.$(SUFFIX) XLAPACKOBJS = \ xgetf2.$(SUFFIX) xgetrf.$(SUFFIX) xlauu2.$(SUFFIX) xlauum.$(SUFFIX) \ xpotf2.$(SUFFIX) xpotrf.$(SUFFIX) xtrti2.$(SUFFIX) xtrtri.$(SUFFIX) \ - xlaswp.$(SUFFIX) xgetrs.$(SUFFIX) xgesv.$(SUFFIX) xpotri.$(SUFFIX) \ + xlaswp.$(SUFFIX) xtrtrs.$(SUFFIX) xgesv.$(SUFFIX) xpotri.$(SUFFIX) \ + xtrtrs.$(SUFFIX) ifneq ($(NO_LAPACK), 1) SBLASOBJS += $(SLAPACKOBJS) @@ -453,18 +475,53 @@ ZBLASOBJS += $(ZLAPACKOBJS) endif -FUNCOBJS = $(SBLASOBJS) $(DBLASOBJS) $(CBLASOBJS) $(ZBLASOBJS) +ifneq ($(BUILD_SINGLE),1) + SBLASOBJS= +ifeq ($(BUILD_DOUBLE),1) + SBLASOBJS = dsdot.$(SUFFIX) cblas_dsdot.$(SUFFIX) strsm.$(SUFFIX) \ + sgetrs.$(SUFFIX) sgetrf.$(SUFFIX) spotf2.$(SUFFIX) spotrf.$(SUFFIX) \ + ssyrk.$(SUFFIX) sgemv.$(SUFFIX) +endif +ifeq ($(BUILD_COMPLEX),1) + SBLASOBJS = \ + sdot.$(SUFFIX) srot.$(SUFFIX) snrm2.$(SUFFIX) sswap.$(SUFFIX) \ + isamax.$(SUFFIX) saxpy.$(SUFFIX) sscal.$(SUFFIX) scopy.$(SUFFIX) \ + sgemv.$(SUFFIX) sgemm.$(SUFFIX) +endif +endif +ifneq ($(BUILD_DOUBLE),1) + DBLASOBJS= +ifeq ($(BUILD_COMPLEX16),1) + DBLASOBJS = \ + ddot.$(SUFFIX) drot.$(SUFFIX) dnrm2.$(SUFFIX) dswap.$(SUFFIX) \ + idamax.$(SUFFIX) daxpy.$(SUFFIX) dscal.$(SUFFIX) dcopy.$(SUFFIX) \ + dgemv.$(SUFFIX) dgemm.$(SUFFIX) +endif +endif +ifneq ($(BUILD_COMPLEX),1) + CBLASOBJS= +ifeq ($(BUILD_COMPLEX16),1) + CBLASOBJS = cgetrs.$(SUFFIX) cblas_cdotu_sub.$(SUFFIX) cgetrf.$(SUFFIX) \ + cpotrf.$(SUFFIX) ctrsm.$(SUFFIX) cblas_cdotc_sub.$(SUFFIX) +endif +endif +ifneq ($(BUILD_COMPLEX16),1) + ZBLASOBJS= +endif + +FUNCOBJS = $(SBEXTOBJS) $(CXERBLAOBJS) $(SBBLASOBJS) $(SBLASOBJS) $(DBLASOBJS) $(CBLASOBJS) $(ZBLASOBJS) -ifdef EXPRECISION +ifeq ($(EXPRECISION), 1) FUNCOBJS += $(QBLASOBJS) $(XBLASOBJS) endif -ifdef QUAD_PRECISION +ifeq ($(QUAD_PRECISION), 1) FUNCOBJS += $(QBLASOBJS) $(XBLASOBJS) endif FUNCALLFILES = $(FUNCOBJS:.$(SUFFIX)=) + include $(TOPDIR)/Makefile.tail all :: libs @@ -481,17 +538,20 @@ endif clean :: @rm -f functable.h -level1 : $(SBLAS1OBJS) $(DBLAS1OBJS) $(QBLAS1OBJS) $(CBLAS1OBJS) $(ZBLAS1OBJS) $(XBLAS1OBJS) +level1 : $(SBEXTOBJS) $(SBBLAS1OBJS) $(SBLAS1OBJS) $(DBLAS1OBJS) $(QBLAS1OBJS) $(CBLAS1OBJS) $(ZBLAS1OBJS) $(XBLAS1OBJS) + $(AR) $(ARFLAGS) -ru $(TOPDIR)/$(LIBNAME) $^ + +level2 : $(SBBLAS2OBJS) $(SBLAS2OBJS) $(DBLAS2OBJS) $(QBLAS2OBJS) $(CBLAS2OBJS) $(ZBLAS2OBJS) $(XBLAS2OBJS) $(AR) $(ARFLAGS) -ru $(TOPDIR)/$(LIBNAME) $^ -level2 : $(SBLAS2OBJS) $(DBLAS2OBJS) $(QBLAS2OBJS) $(CBLAS2OBJS) $(ZBLAS2OBJS) $(XBLAS2OBJS) +level3 : $(SBBLAS3OBJS) $(SBLAS3OBJS) $(DBLAS3OBJS) $(QBLAS3OBJS) $(CBLAS3OBJS) $(ZBLAS3OBJS) $(XBLAS3OBJS) $(AR) $(ARFLAGS) -ru $(TOPDIR)/$(LIBNAME) $^ -level3 : $(SBLAS3OBJS) $(DBLAS3OBJS) $(QBLAS3OBJS) $(CBLAS3OBJS) $(ZBLAS3OBJS) $(XBLAS3OBJS) +aux : $(CBAUXOBJS) $(AR) $(ARFLAGS) -ru $(TOPDIR)/$(LIBNAME) $^ -$(CSBLASOBJS) $(CSBLASOBJS_P) $(CDBLASOBJS) $(CDBLASOBJS_P) $(CQBLASOBJS) $(CQBLASOBJS_P) \ -$(CCBLASOBJS) $(CCBLASOBJS_P) $(CZBLASOBJS) $(CZBLASOBJS_P) $(CXBLASOBJS) $(CXBLASOBJS_P) : override CFLAGS += -DCBLAS +$(CSBBLASOBJS) $(CSBBLASOBJS_P) $(CSBLASOBJS) $(CSBLASOBJS_P) $(CDBLASOBJS) $(CDBLASOBJS_P) $(CQBLASOBJS) $(CQBLASOBJS_P) \ +$(CCBLASOBJS) $(CCBLASOBJS_P) $(CZBLASOBJS) $(CZBLASOBJS_P) $(CXBLASOBJS) $(CXBLASOBJS_P) $(CBAUXOBJS_P) : override CFLAGS += -DCBLAS srot.$(SUFFIX) srot.$(PSUFFIX) : rot.c $(CC) $(CFLAGS) -c $< -o $(@F) @@ -715,6 +775,19 @@ sdsdot.$(SUFFIX) sdsdot.$(PSUFFIX) : sdsdot.c dsdot.$(SUFFIX) dsdot.$(PSUFFIX) : dsdot.c $(CC) $(CFLAGS) -c $< -o $(@F) +ifeq ($(BUILD_BFLOAT16),1) +sbdot.$(SUFFIX) sbdot.$(PSUFFIX) : bf16dot.c + $(CC) $(CFLAGS) -c $< -o $(@F) +sbstobf16.$(SUFFIX) sbstobf16.$(PSUFFIX) : tobf16.c + $(CC) $(CFLAGS) -DSINGLE_PREC -UDOUBLE_PREC -c $< -o $(@F) +sbdtobf16.$(SUFFIX) sbdtobf16.$(PSUFFIX) : tobf16.c + $(CC) $(CFLAGS) -USINGLE_PREC -DDOUBLE_PREC -c $< -o $(@F) +sbf16tos.$(SUFFIX) sbf16tos.$(PSUFFIX) : bf16to.c + $(CC) $(CFLAGS) -DSINGLE_PREC -UDOUBLE_PREC -c $< -o $(@F) +dbf16tod.$(SUFFIX) dbf16tod.$(PSUFFIX) : bf16to.c + $(CC) $(CFLAGS) -USINGLE_PREC -DDOUBLE_PREC -c $< -o $(@F) +endif + sdot.$(SUFFIX) sdot.$(PSUFFIX) : dot.c $(CC) $(CFLAGS) -c $< -o $(@F) @@ -859,6 +932,11 @@ xgeru.$(SUFFIX) xgeru.$(PSUFFIX) : zger.c xgerc.$(SUFFIX) xgerc.$(PSUFFIX) : zger.c $(CC) -c $(CFLAGS) -DCONJ $< -o $(@F) +ifeq ($(BUILD_BFLOAT16),1) +sbgemv.$(SUFFIX) sbgemv.$(PSUFFIX) : sbgemv.c + $(CC) $(CFLAGS) -c $< -o $(@F) +endif + ifndef USE_NETLIB_GEMV sgemv.$(SUFFIX) sgemv.$(PSUFFIX): gemv.c $(CC) -c $(CFLAGS) -o $(@F) $< @@ -1208,6 +1286,11 @@ zhpr2.$(SUFFIX) zhpr2.$(PSUFFIX) : zhpr2.c xhpr2.$(SUFFIX) xhpr2.$(PSUFFIX) : zhpr2.c $(CC) -c $(CFLAGS) $< -o $(@F) +ifeq ($(BUILD_BFLOAT16),1) +sbgemm.$(SUFFIX) sbgemm.$(PSUFFIX) : gemm.c ../param.h + $(CC) -c $(CFLAGS) $< -o $(@F) +endif + sgemm.$(SUFFIX) sgemm.$(PSUFFIX) : gemm.c ../param.h $(CC) -c $(CFLAGS) $< -o $(@F) @@ -1448,6 +1531,19 @@ cblas_sdsdot.$(SUFFIX) cblas_sdsdot.$(PSUFFIX) : sdsdot.c cblas_dsdot.$(SUFFIX) cblas_dsdot.$(PSUFFIX) : dsdot.c $(CC) $(CFLAGS) -DCBLAS -c $< -o $(@F) +ifeq ($(BUILD_BFLOAT16),1) +cblas_sbdot.$(SUFFIX) cblas_sbdot.$(PSUFFIX) : bf16dot.c + $(CC) $(CFLAGS) -DCBLAS -c $< -o $(@F) +cblas_sbstobf16.$(SUFFIX) cblas_sbstobf16.$(PSUFFIX) : tobf16.c + $(CC) $(CFLAGS) -DCBLAS -DSINGLE_PREC -UDOUBLE_PREC -c $< -o $(@F) +cblas_sbdtobf16.$(SUFFIX) cblas_sbdtobf16.$(PSUFFIX) : tobf16.c + $(CC) $(CFLAGS) -DCBLAS -USINGLE_PREC -DDOUBLE_PREC -c $< -o $(@F) +cblas_sbf16tos.$(SUFFIX) cblas_sbf16tos.$(PSUFFIX) : bf16to.c + $(CC) $(CFLAGS) -DCBLAS -DSINGLE_PREC -UDOUBLE_PREC -c $< -o $(@F) +cblas_dbf16tod.$(SUFFIX) cblas_dbf16tod.$(PSUFFIX) : bf16to.c + $(CC) $(CFLAGS) -DCBLAS -USINGLE_PREC -DDOUBLE_PREC -c $< -o $(@F) +endif + cblas_sdot.$(SUFFIX) cblas_sdot.$(PSUFFIX) : dot.c $(CC) $(CFLAGS) -DCBLAS -c $< -o $(@F) @@ -1538,6 +1634,12 @@ cblas_srotg.$(SUFFIX) cblas_srotg.$(PSUFFIX): rotg.c cblas_drotg.$(SUFFIX) cblas_drotg.$(PSUFFIX): rotg.c $(CC) $(CFLAGS) -DCBLAS -c $< -o $(@F) +cblas_crotg.$(SUFFIX) crotg.$(PSUFFIX): zrotg.c + $(CC) -c $(CFLAGS) -DCBLAS $< -o $(@F) + +cblas_zrotg.$(SUFFIX) zrotg.$(PSUFFIX): zrotg.c + $(CC) -c $(CFLAGS) -DCBLAS $< -o $(@F) + cblas_srotm.$(SUFFIX) cblas_srotm.$(PSUFFIX): rotm.c $(CC) $(CFLAGS) -DCBLAS -c $< -o $(@F) @@ -1568,6 +1670,17 @@ cblas_csscal.$(SUFFIX) cblas_csscal.$(PSUFFIX) : zscal.c cblas_zdscal.$(SUFFIX) cblas_zdscal.$(PSUFFIX) : zscal.c $(CC) $(CFLAGS) -DCBLAS -c -DSSCAL $< -o $(@F) +cblas_csrot.$(SUFFIX) cblas_csrot.$(PSUFFIX) : zrot.c + $(CC) $(CFLAGS) -DCBLAS -c $< -o $(@F) + +cblas_zdrot.$(SUFFIX) cblas_zdrot.$(PSUFFIX) : zrot.c + $(CC) $(CFLAGS) -DCBLAS -c $< -o $(@F) + +ifeq ($(BUILD_BFLOAT16),1) +cblas_sbgemv.$(SUFFIX) cblas_sbgemv.$(PSUFFIX) : sbgemv.c + $(CC) -DCBLAS -c $(CFLAGS) $< -o $(@F) +endif + cblas_sgemv.$(SUFFIX) cblas_sgemv.$(PSUFFIX): gemv.c $(CC) -DCBLAS -c $(CFLAGS) -o $(@F) $< @@ -1769,6 +1882,11 @@ cblas_zhemv.$(SUFFIX) cblas_zhemv.$(PSUFFIX) : zhemv.c cblas_sgemm.$(SUFFIX) cblas_sgemm.$(PSUFFIX) : gemm.c ../param.h $(CC) -DCBLAS -c $(CFLAGS) $< -o $(@F) +ifeq ($(BUILD_BFLOAT16),1) +cblas_sbgemm.$(SUFFIX) cblas_sbgemm.$(PSUFFIX) : gemm.c ../param.h + $(CC) -DCBLAS -c $(CFLAGS) $< -o $(@F) +endif + cblas_dgemm.$(SUFFIX) cblas_dgemm.$(PSUFFIX) : gemm.c ../param.h $(CC) -DCBLAS -c $(CFLAGS) $< -o $(@F) @@ -2031,7 +2149,7 @@ sgetrs.$(SUFFIX) sgetrs.$(PSUFFIX) : lapack/getrs.c dgetrs.$(SUFFIX) dgetrs.$(PSUFFIX) : lapack/getrs.c $(CC) -c $(CFLAGS) $< -o $(@F) -qgetrs.$(SUFFIX) qgetrs.$(PSUFFIX) : getrs.c +qgetrs.$(SUFFIX) qgetrs.$(PSUFFIX) : lapack/getrs.c $(CC) -c $(CFLAGS) $< -o $(@F) cgetrs.$(SUFFIX) cgetrs.$(PSUFFIX) : lapack/zgetrs.c @@ -2040,7 +2158,25 @@ cgetrs.$(SUFFIX) cgetrs.$(PSUFFIX) : lapack/zgetrs.c zgetrs.$(SUFFIX) zgetrs.$(PSUFFIX) : lapack/zgetrs.c $(CC) -c $(CFLAGS) $< -o $(@F) -xgetrs.$(SUFFIX) xgetrs.$(PSUFFIX) : zgetrs.c +xgetrs.$(SUFFIX) xgetrs.$(PSUFFIX) : lapack/zgetrs.c + $(CC) -c $(CFLAGS) $< -o $(@F) + +strtrs.$(SUFFIX) strtrs.$(PSUFFIX) : lapack/trtrs.c + $(CC) -c $(CFLAGS) $< -o $(@F) + +dtrtrs.$(SUFFIX) dtrtrs.$(PSUFFIX) : lapack/trtrs.c + $(CC) -c $(CFLAGS) $< -o $(@F) + +qtrtrs.$(SUFFIX) qtrtrs.$(PSUFFIX) : lapack/trtrs.c + $(CC) -c $(CFLAGS) $< -o $(@F) + +ctrtrs.$(SUFFIX) ctrtrs.$(PSUFFIX) : lapack/ztrtrs.c + $(CC) -c $(CFLAGS) $< -o $(@F) + +ztrtrs.$(SUFFIX) ztrtrs.$(PSUFFIX) : lapack/ztrtrs.c + $(CC) -c $(CFLAGS) $< -o $(@F) + +xtrtrs.$(SUFFIX) xtrtrs.$(PSUFFIX) : lapack/ztrtrs.c $(CC) -c $(CFLAGS) $< -o $(@F) sgesv.$(SUFFIX) sgesv.$(PSUFFIX) : lapack/gesv.c @@ -2198,3 +2334,4 @@ cblas_zgeadd.$(SUFFIX) cblas_zgeadd.$(PSUFFIX) : zgeadd.c cblas_xerbla.$(SUFFIX) cblas_xerbla.$(PSUFFIX) : xerbla.c $(CC) -c $(CFLAGS) -DCBLAS $< -o $(@F) + diff --git a/interface/bf16dot.c b/interface/bf16dot.c new file mode 100644 index 000000000..33717e374 --- /dev/null +++ b/interface/bf16dot.c @@ -0,0 +1,52 @@ +#include +#include "common.h" +#ifdef FUNCTION_PROFILE +#include "functable.h" +#endif + +#ifndef CBLAS +float NAME(blasint *N, bfloat16 *x, blasint *INCX, bfloat16 *y, blasint *INCY){ + BLASLONG n = *N; + BLASLONG incx = *INCX; + BLASLONG incy = *INCY; + float ret; + PRINT_DEBUG_NAME; + + if (n <= 0) return 0.; + + IDEBUG_START; + FUNCTION_PROFILE_START(); + + if (incx < 0) x -= (n - 1) * incx; + if (incy < 0) y -= (n - 1) * incy; + ret = BF16_DOT_K(n, x, incx, y, incy); + + FUNCTION_PROFILE_END(1, 2 * n, 2 * n); + IDEBUG_END; + + return ret; + } + +#else + +float CNAME(blasint n, bfloat16 *x, blasint incx, bfloat16 *y, blasint incy){ + + float ret; + PRINT_DEBUG_CNAME; + + if (n <= 0) return 0.; + + IDEBUG_START; + FUNCTION_PROFILE_START(); + + if (incx < 0) x -= (n - 1) * incx; + if (incy < 0) y -= (n - 1) * incy; + ret = BF16_DOT_K(n, x, incx, y, incy); + + FUNCTION_PROFILE_END(1, 2 * n, 2 * n); + IDEBUG_END; + + return ret; +} + +#endif diff --git a/interface/bf16to.c b/interface/bf16to.c new file mode 100644 index 000000000..036c0b142 --- /dev/null +++ b/interface/bf16to.c @@ -0,0 +1,62 @@ +#include +#include "common.h" +#ifdef FUNCTION_PROFILE +#include "functable.h" +#endif + +#if defined(DOUBLE_PREC) +#define FLOAT_TYPE double +#elif defined(SINGLE_PREC) +#define FLOAT_TYPE float +#else +#endif + +#ifndef CBLAS +void NAME(blasint *N, bfloat16 *in, blasint *INC_IN, FLOAT_TYPE *out, blasint *INC_OUT){ + BLASLONG n = *N; + BLASLONG inc_in = *INC_IN; + BLASLONG inc_out = *INC_OUT; + + PRINT_DEBUG_NAME; + + if (n <= 0) return; + + IDEBUG_START; + FUNCTION_PROFILE_START(); + + if (inc_in < 0) in -= (n - 1) * inc_in; + if (inc_out < 0) out -= (n - 1) * inc_out; + +#if defined(DOUBLE_PREC) + D_BF16_TO_K(n, in, inc_in, out, inc_out); +#elif defined(SINGLE_PREC) + S_BF16_TO_K(n, in, inc_in, out, inc_out); +#else +#endif + + FUNCTION_PROFILE_END(1, 2 * n, 2 * n); + IDEBUG_END; +} +#else +void CNAME(blasint n, bfloat16 * in, blasint inc_in, FLOAT_TYPE * out, blasint inc_out){ + PRINT_DEBUG_CNAME; + + if (n <= 0) return; + + IDEBUG_START; + FUNCTION_PROFILE_START(); + + if (inc_in < 0) in -= (n - 1) * inc_in; + if (inc_out < 0) out -= (n - 1) * inc_out; + +#if defined(DOUBLE_PREC) + D_BF16_TO_K(n, in, inc_in, out, inc_out); +#elif defined(SINGLE_PREC) + S_BF16_TO_K(n, in, inc_in, out, inc_out); +#else +#endif + + FUNCTION_PROFILE_END(1, 2 * n, 2 * n); + IDEBUG_END; +} +#endif diff --git a/interface/gemm.c b/interface/gemm.c index 97e71bc85..6fde69049 100644 --- a/interface/gemm.c +++ b/interface/gemm.c @@ -77,7 +77,7 @@ #define GEMM_MULTITHREAD_THRESHOLD 4 #endif -static int (*gemm[])(blas_arg_t *, BLASLONG *, BLASLONG *, FLOAT *, FLOAT *, BLASLONG) = { +static int (*gemm[])(blas_arg_t *, BLASLONG *, BLASLONG *, IFLOAT *, IFLOAT *, BLASLONG) = { #ifndef GEMM3M GEMM_NN, GEMM_TN, GEMM_RN, GEMM_CN, GEMM_NT, GEMM_TT, GEMM_RT, GEMM_CT, @@ -108,8 +108,8 @@ static int (*gemm[])(blas_arg_t *, BLASLONG *, BLASLONG *, FLOAT *, FLOAT *, BLA void NAME(char *TRANSA, char *TRANSB, blasint *M, blasint *N, blasint *K, FLOAT *alpha, - FLOAT *a, blasint *ldA, - FLOAT *b, blasint *ldB, + IFLOAT *a, blasint *ldA, + IFLOAT *b, blasint *ldB, FLOAT *beta, FLOAT *c, blasint *ldC){ @@ -119,8 +119,8 @@ void NAME(char *TRANSA, char *TRANSB, blasint info; char transA, transB; - FLOAT *buffer; - FLOAT *sa, *sb; + IFLOAT *buffer; + IFLOAT *sa, *sb; #ifdef SMP double MNK; @@ -246,6 +246,7 @@ void CNAME(enum CBLAS_ORDER order, enum CBLAS_TRANSPOSE TransA, enum CBLAS_TRANS #ifdef SMP double MNK; +#if defined(USE_SIMPLE_THREADED_LEVEL3) || !defined(NO_AFFINITY) #ifndef COMPLEX #ifdef XDOUBLE int mode = BLAS_XDOUBLE | BLAS_REAL; @@ -264,6 +265,7 @@ void CNAME(enum CBLAS_ORDER order, enum CBLAS_TRANSPOSE TransA, enum CBLAS_TRANS #endif #endif #endif +#endif #if defined(SMP) && !defined(NO_AFFINITY) && !defined(USE_SIMPLE_THREADED_LEVEL3) int nodes; @@ -272,8 +274,11 @@ void CNAME(enum CBLAS_ORDER order, enum CBLAS_TRANSPOSE TransA, enum CBLAS_TRANS PRINT_DEBUG_CNAME; #if !defined(COMPLEX) && !defined(DOUBLE) && defined(USE_SGEMM_KERNEL_DIRECT) - if (beta == 0 && alpha == 1.0 && order == CblasRowMajor && TransA == CblasNoTrans && TransB == CblasNoTrans && sgemm_kernel_direct_performant(m,n,k)) { - sgemm_kernel_direct(m, n, k, a, lda, b, ldb, c, ldc); +#ifdef DYNAMIC_ARCH + if (support_avx512() ) +#endif + if (beta == 0 && alpha == 1.0 && order == CblasRowMajor && TransA == CblasNoTrans && TransB == CblasNoTrans && SGEMM_DIRECT_PERFORMANT(m,n,k)) { + SGEMM_DIRECT(m, n, k, a, lda, b, ldb, c, ldc); return; } @@ -414,8 +419,10 @@ void CNAME(enum CBLAS_ORDER order, enum CBLAS_TRANSPOSE TransA, enum CBLAS_TRANS sb = (XFLOAT *)(((BLASLONG)sa + ((GEMM_P * GEMM_Q * COMPSIZE * SIZE + GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B); #ifdef SMP +#if defined(USE_SIMPLE_THREADED_LEVEL3) || !defined(NO_AFFINITY) mode |= (transa << BLAS_TRANSA_SHIFT); mode |= (transb << BLAS_TRANSB_SHIFT); +#endif MNK = (double) args.m * (double) args.n * (double) args.k; if ( MNK <= (SMP_THRESHOLD_MIN * (double) GEMM_MULTITHREAD_THRESHOLD) ) diff --git a/interface/gemv.c b/interface/gemv.c index c9d52cd69..d5d739fb1 100644 --- a/interface/gemv.c +++ b/interface/gemv.c @@ -191,7 +191,6 @@ void CNAME(enum CBLAS_ORDER order, } #endif - //printf("m=%d, n=%d, trans=%d, incx=%d, incy=%d, alpha=%f, beta=%f\n", m, n, trans, incx, incy, alpha, beta); if ((m==0) || (n==0)) return; lenx = n; diff --git a/interface/lapack/gesv.c b/interface/lapack/gesv.c index 721da970d..175350329 100644 --- a/interface/lapack/gesv.c +++ b/interface/lapack/gesv.c @@ -44,19 +44,19 @@ #ifndef COMPLEX #ifdef XDOUBLE -#define ERROR_NAME "QGESV " +#define ERROR_NAME "QGESV" #elif defined(DOUBLE) -#define ERROR_NAME "DGESV " +#define ERROR_NAME "DGESV" #else -#define ERROR_NAME "SGESV " +#define ERROR_NAME "SGESV" #endif #else #ifdef XDOUBLE -#define ERROR_NAME "XGESV " +#define ERROR_NAME "XGESV" #elif defined(DOUBLE) -#define ERROR_NAME "ZGESV " +#define ERROR_NAME "ZGESV" #else -#define ERROR_NAME "CGESV " +#define ERROR_NAME "CGESV" #endif #endif @@ -89,7 +89,7 @@ int NAME(blasint *N, blasint *NRHS, FLOAT *a, blasint *ldA, blasint *ipiv, if (args.m < 0) info = 1; if (info) { - BLASFUNC(xerbla)(ERROR_NAME, &info, sizeof(ERROR_NAME)); + BLASFUNC(xerbla)(ERROR_NAME, &info, sizeof(ERROR_NAME) - 1); *Info = - info; return 0; } diff --git a/interface/lapack/getf2.c b/interface/lapack/getf2.c index 3e66c0403..8506feca9 100644 --- a/interface/lapack/getf2.c +++ b/interface/lapack/getf2.c @@ -74,7 +74,7 @@ int NAME(blasint *M, blasint *N, FLOAT *a, blasint *ldA, blasint *ipiv, blasint if (args.n < 0) info = 2; if (args.m < 0) info = 1; if (info) { - BLASFUNC(xerbla)(ERROR_NAME, &info, sizeof(ERROR_NAME)); + BLASFUNC(xerbla)(ERROR_NAME, &info, sizeof(ERROR_NAME) - 1); *Info = - info; return 0; } diff --git a/interface/lapack/getrf.c b/interface/lapack/getrf.c index 44a92ddc4..02bb124b3 100644 --- a/interface/lapack/getrf.c +++ b/interface/lapack/getrf.c @@ -74,7 +74,7 @@ int NAME(blasint *M, blasint *N, FLOAT *a, blasint *ldA, blasint *ipiv, blasint if (args.n < 0) info = 2; if (args.m < 0) info = 1; if (info) { - BLASFUNC(xerbla)(ERROR_NAME, &info, sizeof(ERROR_NAME)); + BLASFUNC(xerbla)(ERROR_NAME, &info, sizeof(ERROR_NAME) - 1); *Info = - info; return 0; } diff --git a/interface/lapack/getrs.c b/interface/lapack/getrs.c index 1b8c83aca..c2a9eb882 100644 --- a/interface/lapack/getrs.c +++ b/interface/lapack/getrs.c @@ -102,7 +102,7 @@ int NAME(char *TRANS, blasint *N, blasint *NRHS, FLOAT *a, blasint *ldA, if (trans < 0) info = 1; if (info != 0) { - BLASFUNC(xerbla)(ERROR_NAME, &info, sizeof(ERROR_NAME)); + BLASFUNC(xerbla)(ERROR_NAME, &info, sizeof(ERROR_NAME) - 1); return 0; } diff --git a/interface/lapack/lauu2.c b/interface/lapack/lauu2.c index 3599a4791..e581e3c15 100644 --- a/interface/lapack/lauu2.c +++ b/interface/lapack/lauu2.c @@ -90,7 +90,7 @@ int NAME(char *UPLO, blasint *N, FLOAT *a, blasint *ldA, blasint *Info){ if (args.n < 0) info = 2; if (uplo < 0) info = 1; if (info) { - BLASFUNC(xerbla)(ERROR_NAME, &info, sizeof(ERROR_NAME)); + BLASFUNC(xerbla)(ERROR_NAME, &info, sizeof(ERROR_NAME) - 1); *Info = - info; return 0; } diff --git a/interface/lapack/lauum.c b/interface/lapack/lauum.c index 2c49eb0b0..70f6a0ec5 100644 --- a/interface/lapack/lauum.c +++ b/interface/lapack/lauum.c @@ -90,7 +90,7 @@ int NAME(char *UPLO, blasint *N, FLOAT *a, blasint *ldA, blasint *Info){ if (args.n < 0) info = 2; if (uplo < 0) info = 1; if (info) { - BLASFUNC(xerbla)(ERROR_NAME, &info, sizeof(ERROR_NAME)); + BLASFUNC(xerbla)(ERROR_NAME, &info, sizeof(ERROR_NAME) - 1); *Info = - info; return 0; } diff --git a/interface/lapack/potf2.c b/interface/lapack/potf2.c index 837192265..1537b6ee4 100644 --- a/interface/lapack/potf2.c +++ b/interface/lapack/potf2.c @@ -90,7 +90,7 @@ int NAME(char *UPLO, blasint *N, FLOAT *a, blasint *ldA, blasint *Info){ if (args.n < 0) info = 2; if (uplo < 0) info = 1; if (info) { - BLASFUNC(xerbla)(ERROR_NAME, &info, sizeof(ERROR_NAME)); + BLASFUNC(xerbla)(ERROR_NAME, &info, sizeof(ERROR_NAME) - 1); *Info = - info; return 0; } diff --git a/interface/lapack/potrf.c b/interface/lapack/potrf.c index 092272225..dbd55f62f 100644 --- a/interface/lapack/potrf.c +++ b/interface/lapack/potrf.c @@ -90,7 +90,7 @@ int NAME(char *UPLO, blasint *N, FLOAT *a, blasint *ldA, blasint *Info){ if (args.n < 0) info = 2; if (uplo < 0) info = 1; if (info) { - BLASFUNC(xerbla)(ERROR_NAME, &info, sizeof(ERROR_NAME)); + BLASFUNC(xerbla)(ERROR_NAME, &info, sizeof(ERROR_NAME) - 1); *Info = - info; return 0; } diff --git a/interface/lapack/potri.c b/interface/lapack/potri.c index d6230621f..2c0c64b6f 100644 --- a/interface/lapack/potri.c +++ b/interface/lapack/potri.c @@ -99,7 +99,7 @@ int NAME(char *UPLO, blasint *N, FLOAT *a, blasint *ldA, blasint *Info){ if (uplo < 0) info = 1; if (info) { - BLASFUNC(xerbla)(ERROR_NAME, &info, sizeof(ERROR_NAME)); + BLASFUNC(xerbla)(ERROR_NAME, &info, sizeof(ERROR_NAME) - 1); *Info = - info; return 0; } diff --git a/interface/lapack/trti2.c b/interface/lapack/trti2.c index 42c4c4815..47f04f06f 100644 --- a/interface/lapack/trti2.c +++ b/interface/lapack/trti2.c @@ -96,7 +96,7 @@ int NAME(char *UPLO, char *DIAG, blasint *N, FLOAT *a, blasint *ldA, blasint *In if (diag < 0) info = 2; if (uplo < 0) info = 1; if (info) { - BLASFUNC(xerbla)(ERROR_NAME, &info, sizeof(ERROR_NAME)); + BLASFUNC(xerbla)(ERROR_NAME, &info, sizeof(ERROR_NAME) - 1); *Info = - info; return 0; } diff --git a/interface/lapack/trtri.c b/interface/lapack/trtri.c index 6724a678a..028529389 100644 --- a/interface/lapack/trtri.c +++ b/interface/lapack/trtri.c @@ -99,7 +99,7 @@ int NAME(char *UPLO, char *DIAG, blasint *N, FLOAT *a, blasint *ldA, blasint *In if (diag < 0) info = 2; if (uplo < 0) info = 1; if (info) { - BLASFUNC(xerbla)(ERROR_NAME, &info, sizeof(ERROR_NAME)); + BLASFUNC(xerbla)(ERROR_NAME, &info, sizeof(ERROR_NAME) - 1); *Info = - info; return 0; } diff --git a/interface/lapack/trtrs.c b/interface/lapack/trtrs.c new file mode 100644 index 000000000..54fbe8394 --- /dev/null +++ b/interface/lapack/trtrs.c @@ -0,0 +1,171 @@ +/*********************************************************************/ +/* Copyright 2009, 2010 The University of Texas at Austin. */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* 1. Redistributions of source code must retain the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer. */ +/* */ +/* 2. Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials */ +/* provided with the distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ +/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ +/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ +/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ +/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ +/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ +/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* */ +/* The views and conclusions contained in the software and */ +/* documentation are those of the authors and should not be */ +/* interpreted as representing official policies, either expressed */ +/* or implied, of The University of Texas at Austin. */ +/*********************************************************************/ + +#include +#include "common.h" +#ifdef FUNCTION_PROFILE +#include "functable.h" +#endif + +#ifdef XDOUBLE +#define ERROR_NAME "QTRTRS" +#elif defined(DOUBLE) +#define ERROR_NAME "DTRTRS" +#else +#define ERROR_NAME "STRTRS" +#endif + +static blasint (*trtrs_single[])(blas_arg_t *, BLASLONG *, BLASLONG *, FLOAT *, FLOAT *, BLASLONG) = { + TRTRS_UNU_SINGLE, TRTRS_UNN_SINGLE, TRTRS_UTU_SINGLE, TRTRS_UTN_SINGLE, TRTRS_LNU_SINGLE, TRTRS_LNN_SINGLE, TRTRS_LTU_SINGLE, TRTRS_LTN_SINGLE, +}; + +#ifdef SMP +static blasint (*trtrs_parallel[])(blas_arg_t *, BLASLONG *, BLASLONG *, FLOAT *, FLOAT *, BLASLONG) = { + TRTRS_UNU_PARALLEL, TRTRS_UNN_PARALLEL, TRTRS_UTU_PARALLEL, TRTRS_UTN_PARALLEL, TRTRS_LNU_PARALLEL, TRTRS_LNN_PARALLEL, TRTRS_LTU_PARALLEL, TRTRS_LTN_PARALLEL, +}; +#endif + +int NAME(char *UPLO, char* TRANS, char* DIAG, blasint *N, blasint *NRHS, FLOAT *a, blasint *ldA, + FLOAT *b, blasint *ldB, blasint *Info){ + + char uplo_arg = *UPLO; + char trans_arg = *TRANS; + char diag_arg = *DIAG; + + blas_arg_t args; + + blasint info; + int uplo, trans, diag; + FLOAT *buffer; +#ifdef PPC440 + extern +#endif + FLOAT *sa, *sb; + + PRINT_DEBUG_NAME; + + args.m = *N; + args.n = *NRHS; + args.a = (void *)a; + args.lda = *ldA; + args.b = (void *)b; + args.ldb = *ldB; + + info = 0; + + TOUPPER(trans_arg); + trans = -1; + if (trans_arg == 'N') trans = 0; + if (trans_arg == 'T') trans = 1; + if (trans_arg == 'R') trans = 0; + if (trans_arg == 'C') trans = 1; + + uplo = -1; + if (uplo_arg == 'U') uplo = 0; + if (uplo_arg == 'L') uplo = 1; + + diag = -1; + if (diag_arg == 'U') diag = 0; + if (diag_arg == 'N') diag = 1; + + if (args.ldb < MAX(1, args.m)) info = 9; + if (args.lda < MAX(1, args.m)) info = 7; + if (args.n < 0) info = 5; + if (args.m < 0) info = 4; + if (trans < 0) info = 2; + if (uplo < 0) info = 1; + if (diag < 0) info = 3; + + if (info != 0) { + BLASFUNC(xerbla)(ERROR_NAME, &info, sizeof(ERROR_NAME) - 1); + *Info = - info; + return 0; + } + + args.alpha = NULL; + args.beta = NULL; + + *Info = 0; + + if (args.m == 0) return 0; + + if (diag) { + if (AMIN_K(args.m, args.a, args.lda + 1) == ZERO) { + *Info = IAMIN_K(args.m, args.a, args.lda + 1); + return 0; + } + } + + + IDEBUG_START; + + FUNCTION_PROFILE_START(); + +#ifndef PPC440 + buffer = (FLOAT *)blas_memory_alloc(1); + + sa = (FLOAT *)((BLASLONG)buffer + GEMM_OFFSET_A); + sb = (FLOAT *)(((BLASLONG)sa + ((GEMM_P * GEMM_Q * COMPSIZE * SIZE + GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B); +#endif + +#ifdef SMP + args.common = NULL; + args.nthreads = num_cpu_avail(4); + + if (args.nthreads == 1) { +#endif + + (trtrs_single[(uplo << 2) | (trans << 1) | diag])(&args, NULL, NULL, sa, sb, 0); + +#ifdef SMP + } else { + (trtrs_parallel[(uplo << 2) | (trans << 1) | diag])(&args, NULL, NULL, sa, sb, 0); + } +#endif + +#ifndef PPC440 + blas_memory_free(buffer); +#endif + + FUNCTION_PROFILE_END(COMPSIZE * COMPSIZE, args.m * args.n, 2 * args.m * args.m * args.n); + + IDEBUG_END; + + return 0; + +} diff --git a/interface/lapack/zgetf2.c b/interface/lapack/zgetf2.c index 59ec4874e..68b9a7e4b 100644 --- a/interface/lapack/zgetf2.c +++ b/interface/lapack/zgetf2.c @@ -74,7 +74,7 @@ int NAME(blasint *M, blasint *N, FLOAT *a, blasint *ldA, blasint *ipiv, blasint if (args.n < 0) info = 2; if (args.m < 0) info = 1; if (info) { - BLASFUNC(xerbla)(ERROR_NAME, &info, sizeof(ERROR_NAME)); + BLASFUNC(xerbla)(ERROR_NAME, &info, sizeof(ERROR_NAME) - 1); *Info = - info; return 0; } diff --git a/interface/lapack/zgetrf.c b/interface/lapack/zgetrf.c index 5031f587b..7f8db94f6 100644 --- a/interface/lapack/zgetrf.c +++ b/interface/lapack/zgetrf.c @@ -74,7 +74,7 @@ int NAME(blasint *M, blasint *N, FLOAT *a, blasint *ldA, blasint *ipiv, blasint if (args.n < 0) info = 2; if (args.m < 0) info = 1; if (info) { - BLASFUNC(xerbla)(ERROR_NAME, &info, sizeof(ERROR_NAME)); + BLASFUNC(xerbla)(ERROR_NAME, &info, sizeof(ERROR_NAME) - 1); *Info = - info; return 0; } diff --git a/interface/lapack/zgetrs.c b/interface/lapack/zgetrs.c index 54d4b0905..0add909ca 100644 --- a/interface/lapack/zgetrs.c +++ b/interface/lapack/zgetrs.c @@ -102,7 +102,7 @@ int NAME(char *TRANS, blasint *N, blasint *NRHS, FLOAT *a, blasint *ldA, if (trans < 0) info = 1; if (info != 0) { - BLASFUNC(xerbla)(ERROR_NAME, &info, sizeof(ERROR_NAME)); + BLASFUNC(xerbla)(ERROR_NAME, &info, sizeof(ERROR_NAME) - 1); return 0; } diff --git a/interface/lapack/zlauu2.c b/interface/lapack/zlauu2.c index b0698ef2e..ae972543c 100644 --- a/interface/lapack/zlauu2.c +++ b/interface/lapack/zlauu2.c @@ -91,7 +91,7 @@ int NAME(char *UPLO, blasint *N, FLOAT *a, blasint *ldA, blasint *Info){ if (args.n < 0) info = 2; if (uplo < 0) info = 1; if (info) { - BLASFUNC(xerbla)(ERROR_NAME, &info, sizeof(ERROR_NAME)); + BLASFUNC(xerbla)(ERROR_NAME, &info, sizeof(ERROR_NAME) - 1); *Info = - info; return 0; } diff --git a/interface/lapack/zpotf2.c b/interface/lapack/zpotf2.c index 27ee0891a..c74b66728 100644 --- a/interface/lapack/zpotf2.c +++ b/interface/lapack/zpotf2.c @@ -91,7 +91,7 @@ int NAME(char *UPLO, blasint *N, FLOAT *a, blasint *ldA, blasint *Info){ if (args.n < 0) info = 2; if (uplo < 0) info = 1; if (info) { - BLASFUNC(xerbla)(ERROR_NAME, &info, sizeof(ERROR_NAME)); + BLASFUNC(xerbla)(ERROR_NAME, &info, sizeof(ERROR_NAME) - 1); *Info = - info; return 0; } diff --git a/interface/lapack/zpotrf.c b/interface/lapack/zpotrf.c index 8cd3980d5..c4cd99bf6 100644 --- a/interface/lapack/zpotrf.c +++ b/interface/lapack/zpotrf.c @@ -90,7 +90,7 @@ int NAME(char *UPLO, blasint *N, FLOAT *a, blasint *ldA, blasint *Info){ if (args.n < 0) info = 2; if (uplo < 0) info = 1; if (info) { - BLASFUNC(xerbla)(ERROR_NAME, &info, sizeof(ERROR_NAME)); + BLASFUNC(xerbla)(ERROR_NAME, &info, sizeof(ERROR_NAME) - 1); *Info = - info; return 0; } diff --git a/interface/lapack/zpotri.c b/interface/lapack/zpotri.c index 7c72a7e62..8da211683 100644 --- a/interface/lapack/zpotri.c +++ b/interface/lapack/zpotri.c @@ -99,7 +99,7 @@ int NAME(char *UPLO, blasint *N, FLOAT *a, blasint *ldA, blasint *Info){ if (uplo < 0) info = 1; if (info) { - BLASFUNC(xerbla)(ERROR_NAME, &info, sizeof(ERROR_NAME)); + BLASFUNC(xerbla)(ERROR_NAME, &info, sizeof(ERROR_NAME) - 1); *Info = - info; return 0; } diff --git a/interface/lapack/ztrti2.c b/interface/lapack/ztrti2.c index a25476677..cb9c0d557 100644 --- a/interface/lapack/ztrti2.c +++ b/interface/lapack/ztrti2.c @@ -96,7 +96,7 @@ int NAME(char *UPLO, char *DIAG, blasint *N, FLOAT *a, blasint *ldA, blasint *In if (diag < 0) info = 2; if (uplo < 0) info = 1; if (info) { - BLASFUNC(xerbla)(ERROR_NAME, &info, sizeof(ERROR_NAME)); + BLASFUNC(xerbla)(ERROR_NAME, &info, sizeof(ERROR_NAME) - 1); *Info = - info; return 0; } diff --git a/interface/lapack/ztrtri.c b/interface/lapack/ztrtri.c index b3ce85b9f..dda4a9e4b 100644 --- a/interface/lapack/ztrtri.c +++ b/interface/lapack/ztrtri.c @@ -96,7 +96,7 @@ int NAME(char *UPLO, char *DIAG, blasint *N, FLOAT *a, blasint *ldA, blasint *In if (diag < 0) info = 2; if (uplo < 0) info = 1; if (info) { - BLASFUNC(xerbla)(ERROR_NAME, &info, sizeof(ERROR_NAME)); + BLASFUNC(xerbla)(ERROR_NAME, &info, sizeof(ERROR_NAME) - 1); *Info = - info; return 0; } diff --git a/interface/lapack/ztrtrs.c b/interface/lapack/ztrtrs.c new file mode 100644 index 000000000..7f1bd9af4 --- /dev/null +++ b/interface/lapack/ztrtrs.c @@ -0,0 +1,171 @@ +/*********************************************************************/ +/* Copyright 2009, 2010 The University of Texas at Austin. */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* 1. Redistributions of source code must retain the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer. */ +/* */ +/* 2. Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials */ +/* provided with the distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ +/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ +/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ +/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ +/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ +/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ +/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* */ +/* The views and conclusions contained in the software and */ +/* documentation are those of the authors and should not be */ +/* interpreted as representing official policies, either expressed */ +/* or implied, of The University of Texas at Austin. */ +/*********************************************************************/ + +#include +#include "common.h" +#ifdef FUNCTION_PROFILE +#include "functable.h" +#endif + +#ifdef XDOUBLE +#define ERROR_NAME "XTRTRS" +#elif defined(DOUBLE) +#define ERROR_NAME "ZTRTRS" +#else +#define ERROR_NAME "CTRTRS" +#endif + +static blasint (*trtrs_single[])(blas_arg_t *, BLASLONG *, BLASLONG *, FLOAT *, FLOAT *, BLASLONG) = { + TRTRS_UNU_SINGLE, TRTRS_UNN_SINGLE, TRTRS_UTU_SINGLE, TRTRS_UTN_SINGLE, TRTRS_URU_SINGLE, TRTRS_URN_SINGLE, TRTRS_UCU_SINGLE, TRTRS_UCN_SINGLE, TRTRS_LNU_SINGLE, TRTRS_LNN_SINGLE, TRTRS_LTU_SINGLE, TRTRS_LTN_SINGLE, TRTRS_LRU_SINGLE, TRTRS_LRN_SINGLE, TRTRS_LCU_SINGLE, TRTRS_LCN_SINGLE, +}; + +#ifdef SMP +static blasint (*trtrs_parallel[])(blas_arg_t *, BLASLONG *, BLASLONG *, FLOAT *, FLOAT *, BLASLONG) = { + TRTRS_UNU_PARALLEL, TRTRS_UNN_PARALLEL, TRTRS_UTU_PARALLEL, TRTRS_UTN_PARALLEL, TRTRS_URU_PARALLEL, TRTRS_URN_PARALLEL, TRTRS_UCU_PARALLEL, TRTRS_UCN_PARALLEL, TRTRS_LNU_PARALLEL, TRTRS_LNN_PARALLEL, TRTRS_LTU_PARALLEL, TRTRS_LTN_PARALLEL, TRTRS_LRU_PARALLEL, TRTRS_LRN_PARALLEL, TRTRS_LCU_PARALLEL, TRTRS_LCN_PARALLEL, +}; +#endif + +int NAME(char *UPLO, char* TRANS, char* DIAG, blasint *N, blasint *NRHS, FLOAT *a, blasint *ldA, + FLOAT *b, blasint *ldB, blasint *Info){ + + char uplo_arg = *UPLO; + char trans_arg = *TRANS; + char diag_arg = *DIAG; + + blas_arg_t args; + + blasint info; + int uplo, trans, diag; + FLOAT *buffer; +#ifdef PPC440 + extern +#endif + FLOAT *sa, *sb; + + PRINT_DEBUG_NAME; + + args.m = *N; + args.n = *NRHS; + args.a = (void *)a; + args.lda = *ldA; + args.b = (void *)b; + args.ldb = *ldB; + + info = 0; + + TOUPPER(trans_arg); + trans = -1; + if (trans_arg == 'N') trans = 0; + if (trans_arg == 'T') trans = 1; + if (trans_arg == 'R') trans = 2; + if (trans_arg == 'C') trans = 3; + + uplo = -1; + if (uplo_arg == 'U') uplo = 0; + if (uplo_arg == 'L') uplo = 1; + + diag = -1; + if (diag_arg == 'U') diag = 0; + if (diag_arg == 'N') diag = 1; + + if (args.ldb < MAX(1, args.m)) info = 9; + if (args.lda < MAX(1, args.m)) info = 7; + if (args.n < 0) info = 5; + if (args.m < 0) info = 4; + if (trans < 0) info = 2; + if (uplo < 0) info = 1; + if (diag < 0) info = 3; + + if (info != 0) { + BLASFUNC(xerbla)(ERROR_NAME, &info, sizeof(ERROR_NAME) - 1); + *Info = - info; + return 0; + } + + args.alpha = NULL; + args.beta = NULL; + + *Info = 0; + + if (args.m == 0) return 0; + + if (diag) { + if (AMIN_K(args.m, args.a, args.lda + 1) == ZERO) { + *Info = IAMIN_K(args.m, args.a, args.lda + 1); + return 0; + } + } + + + IDEBUG_START; + + FUNCTION_PROFILE_START(); + +#ifndef PPC440 + buffer = (FLOAT *)blas_memory_alloc(1); + + sa = (FLOAT *)((BLASLONG)buffer + GEMM_OFFSET_A); + sb = (FLOAT *)(((BLASLONG)sa + ((GEMM_P * GEMM_Q * COMPSIZE * SIZE + GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B); +#endif + +#ifdef SMP + args.common = NULL; + args.nthreads = num_cpu_avail(4); + + if (args.nthreads == 1) { +#endif + + (trtrs_single[(uplo << 3) | (trans << 1) | diag])(&args, NULL, NULL, sa, sb, 0); + +#ifdef SMP + } else { + (trtrs_parallel[(uplo << 3) | (trans << 1) | diag])(&args, NULL, NULL, sa, sb, 0); + } +#endif + +#ifndef PPC440 + blas_memory_free(buffer); +#endif + + FUNCTION_PROFILE_END(COMPSIZE * COMPSIZE, args.m * args.n, 2 * args.m * args.m * args.n); + + IDEBUG_END; + + return 0; + +} diff --git a/interface/rotmg.c b/interface/rotmg.c index ce3b146c1..3a5ca8f95 100644 --- a/interface/rotmg.c +++ b/interface/rotmg.c @@ -107,7 +107,6 @@ void CNAME(FLOAT *dd1, FLOAT *dd2, FLOAT *dx1, FLOAT dy1, FLOAT *dparam){ dq1 = dp1 * *dx1; if(ABS(dq1) > ABS(dq2)) { - dflag = ZERO; dh11 = ONE; dh22 = ONE; dh21 = - dy1 / *dx1; diff --git a/interface/sbgemv.c b/interface/sbgemv.c new file mode 100644 index 000000000..89debe82d --- /dev/null +++ b/interface/sbgemv.c @@ -0,0 +1,210 @@ +/*********************************************************************/ +/* Copyright 2009, 2010 The University of Texas at Austin. */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* 1. Redistributions of source code must retain the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer. */ +/* */ +/* 2. Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials */ +/* provided with the distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ +/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ +/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ +/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ +/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ +/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ +/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* */ +/* The views and conclusions contained in the software and */ +/* documentation are those of the authors and should not be */ +/* interpreted as representing official policies, either expressed */ +/* or implied, of The University of Texas at Austin. */ +/*********************************************************************/ + +#include +#include "common.h" +#include "l1param.h" +#ifdef FUNCTION_PROFILE +#include "functable.h" +#endif + +#define ERROR_NAME "SBGEMV " + +#ifdef SMP +static int (*sbgemv_thread[])(BLASLONG, BLASLONG, float, bfloat16 *, BLASLONG, bfloat16 * , BLASLONG, float, float *, BLASLONG, int) = { + sbgemv_thread_n, sbgemv_thread_t, +}; +#endif + +#ifndef CBLAS + +void NAME(char *TRANS, blasint *M, blasint *N, float *ALPHA, bfloat16 *a, blasint *LDA, bfloat16 *x, blasint *INCX, float *BETA, float *y, blasint *INCY) +{ + char trans = *TRANS; + blasint m = *M; + blasint n = *N; + blasint lda = *LDA; + blasint incx = *INCX; + blasint incy = *INCY; + float alpha = *ALPHA; + float beta = *BETA; +#ifdef SMP + int nthreads; +#endif + + int (*sbgemv[])(BLASLONG, BLASLONG, float, bfloat16 *, BLASLONG, bfloat16 * , BLASLONG, float, float *, BLASLONG) = { + SBGEMV_N, SBGEMV_T, + }; + + blasint info; + blasint lenx, leny; + blasint i; + + PRINT_DEBUG_NAME; + + TOUPPER(trans); + + info = 0; + + i = -1; + + if (trans == 'N') {i = 0;} + if (trans == 'T') {i = 1;} + if (trans == 'R') {i = 0;} + if (trans == 'C') {i = 1;} + + if (incy == 0) {info = 11;} + if (incx == 0) {info = 8;} + if (lda < MAX(1, m)) {info = 6;} + if (n < 0) {info = 3;} + if (m < 0) {info = 2;} + if (i < 0) {info = 1;} + + trans = i; + + if (info != 0) { + BLASFUNC(xerbla)(ERROR_NAME, &info, sizeof(ERROR_NAME)); + return; + } + +#else + +void CNAME(enum CBLAS_ORDER order, enum CBLAS_TRANSPOSE TransA, blasint m, blasint n, float alpha, bfloat16 *a, blasint lda, bfloat16 *x, blasint incx, float beta, float *y, blasint incy) +{ + blasint lenx, leny; + int trans; + blasint info, t; +#ifdef SMP + int nthreads; +#endif + + int (*sbgemv[])(BLASLONG, BLASLONG, float, bfloat16 *, BLASLONG, bfloat16 * , BLASLONG, float, float *, BLASLONG) = { + SBGEMV_N, SBGEMV_T, + }; + + PRINT_DEBUG_CNAME; + + trans = -1; + info = 0; + + if (order == CblasColMajor) { // Column Major + if (TransA == CblasNoTrans || TransA == CblasConjNoTrans) { + trans = 0; + } else if (TransA == CblasTrans || TransA == CblasConjTrans) { + trans = 1; + } + } else { // Row Major + if (TransA == CblasNoTrans || TransA == CblasConjNoTrans) { + trans = 1; + } else if (TransA == CblasTrans || TransA == CblasConjTrans) { + trans = 0; + } + + t = n; + n = m; + m = t; + } + + info = -1; + + if (incy == 0) {info = 11;} + if (incx == 0) {info = 8;} + if (lda < MAX(1, m)) {info = 6;} + if (n < 0) {info = 3;} + if (m < 0) {info = 2;} + if (trans < 0) {info = 1;} + + if (info >= 0) { + BLASFUNC(xerbla)(ERROR_NAME, &info, sizeof(ERROR_NAME)); + return; + } + +#endif + + if ((m==0) || (n==0)) return; + + if (trans) { + lenx = m; + leny = n; + } else { + lenx = n; + leny = m; + } + + if (alpha == ZERO) { + if (beta != ONE) SCAL_K(leny, 0, 0, beta, y, blasabs(incy), NULL, 0, NULL, 0); + return; + } + + IDEBUG_START; + FUNCTION_PROFILE_START(); + + if (incx < 0) {x -= (lenx - 1) * incx;} + if (incy < 0) {y -= (leny - 1) * incy;} + +#ifdef SMP + int thread_thres_row = 20480; + if (trans) { + if (n <= thread_thres_row) { + nthreads = 1; + } else { + nthreads = num_cpu_avail(1); + } + } else { + if (m <= thread_thres_row) { + nthreads = 1; + } else { + nthreads = num_cpu_avail(1); + } + } + + + if (nthreads == 1) { +#endif + (sbgemv[(int)trans])(m, n, alpha, a, lda, x, incx, beta, y, incy); +#ifdef SMP + } else { + (sbgemv_thread[(int)trans])(m, n, alpha, a, lda, x, incx, beta, y, incy, nthreads); + } +#endif + + FUNCTION_PROFILE_END(1, m * n + m + n, 2 * m * n); + IDEBUG_END; + + return; +} diff --git a/interface/swap.c b/interface/swap.c index 17a9868a9..ea40b1fc2 100644 --- a/interface/swap.c +++ b/interface/swap.c @@ -42,7 +42,7 @@ #include "functable.h" #endif -#if defined(THUNDERX2T99) || defined(VULCAN) || defined(ARMV8) +#if defined(THUNDERX2T99) || defined(VULCAN) || defined(ARMV8) || defined(THUNDERX3T110) // Multithreaded swap gives performance benefits in ThunderX2T99 #else // Disable multi-threading as it does not show any performance diff --git a/interface/tobf16.c b/interface/tobf16.c new file mode 100644 index 000000000..787d9d689 --- /dev/null +++ b/interface/tobf16.c @@ -0,0 +1,61 @@ +#include +#include "common.h" +#ifdef FUNCTION_PROFILE +#include "functable.h" +#endif + +#if defined(DOUBLE_PREC) +#define FLOAT_TYPE double +#elif defined(SINGLE_PREC) +#define FLOAT_TYPE float +#else +#endif + +#ifndef CBLAS +void NAME(blasint *N, FLOAT_TYPE *in, blasint *INC_IN, bfloat16 *out, blasint *INC_OUT){ + BLASLONG n = *N; + BLASLONG inc_in = *INC_IN; + BLASLONG inc_out = *INC_OUT; + + PRINT_DEBUG_NAME; + + if (n <= 0) return; + + IDEBUG_START; + FUNCTION_PROFILE_START(); + + if (inc_in < 0) in -= (n - 1) * inc_in; + if (inc_out < 0) out -= (n - 1) * inc_out; + +#if defined(DOUBLE_PREC) + D_TO_BF16_K(n, in, inc_in, out, inc_out); +#elif defined(SINGLE_PREC) + S_TO_BF16_K(n, in, inc_in, out, inc_out); +#else +#endif + + FUNCTION_PROFILE_END(1, 2 * n, 2 * n); + IDEBUG_END; +} +#else +void CNAME(blasint n, FLOAT_TYPE *in, blasint inc_in, bfloat16 *out, blasint inc_out){ + PRINT_DEBUG_CNAME; + + if (n <= 0) return; + + IDEBUG_START; + FUNCTION_PROFILE_START(); + + if (inc_in < 0) in -= (n - 1) * inc_in; + if (inc_out < 0) out -= (n - 1) * inc_out; + +#if defined(DOUBLE_PREC) + D_TO_BF16_K(n, in, inc_in, out, inc_out); +#elif defined(SINGLE_PREC) + S_TO_BF16_K(n, in, inc_in, out, inc_out); +#endif + + FUNCTION_PROFILE_END(1, 2 * n, 2 * n); + IDEBUG_END; +} +#endif diff --git a/interface/zswap.c b/interface/zswap.c index 372b15447..43971b73e 100644 --- a/interface/zswap.c +++ b/interface/zswap.c @@ -42,7 +42,7 @@ #include "functable.h" #endif -#if defined(THUNDERX2T99) || defined(VULCAN) || defined(ARMV8) +#if defined(THUNDERX2T99) || defined(VULCAN) || defined(ARMV8) || defined(THUNDERX3T110) // Multithreaded swap gives performance benefits in ThunderX2T99 #else // Disable multi-threading as it does not show any performance diff --git a/kernel/CMakeLists.txt b/kernel/CMakeLists.txt index ad15b8f25..f0793bdef 100644 --- a/kernel/CMakeLists.txt +++ b/kernel/CMakeLists.txt @@ -41,13 +41,16 @@ function (build_core TARGET_CORE KDIR TSUFFIX KERNEL_DEFINITIONS) foreach (float_type ${FLOAT_TYPES}) # a bit of metaprogramming here to pull out the appropriate KERNEL var string(SUBSTRING ${float_type} 0 1 float_char) + if (${float_type} STREQUAL "BFLOAT16") + set (float_char "SB") + endif () GenerateNamedObjects("${KERNELDIR}/${${float_char}AMAXKERNEL}" "USE_ABS" "amax_k" false "" "" false ${float_type}) GenerateNamedObjects("${KERNELDIR}/${${float_char}AMINKERNEL}" "USE_ABS;USE_MIN" "amin_k" false "" "" false ${float_type}) if (DEFINED ${float_char}MAXKERNEL) GenerateNamedObjects("${KERNELDIR}/${${float_char}MAXKERNEL}" "" "max_k" false "" "" false ${float_type}) endif () if (DEFINED ${float_char}MINKERNEL) - GenerateNamedObjects("${KERNELDIR}/${${float_char}MINKERNEL}" "" "min_k" false "" "" false ${float_type}) + GenerateNamedObjects("${KERNELDIR}/${${float_char}MINKERNEL}" "USE_MIN" "min_k" false "" "" false ${float_type}) endif () GenerateNamedObjects("${KERNELDIR}/${I${float_char}AMAXKERNEL}" "USE_ABS" "i*amax_k" false "" "" false ${float_type}) GenerateNamedObjects("${KERNELDIR}/${I${float_char}AMINKERNEL}" "USE_ABS;USE_MIN" "i*amin_k" false "" "" false ${float_type}) @@ -55,7 +58,7 @@ function (build_core TARGET_CORE KDIR TSUFFIX KERNEL_DEFINITIONS) GenerateNamedObjects("${KERNELDIR}/${I${float_char}MAXKERNEL}" "" "i*max_k" false "" "" false ${float_type}) endif () if (DEFINED I${float_char}MINKERNEL) - GenerateNamedObjects("${KERNELDIR}/${I${float_char}MINKERNEL}" "" "i*min_k" false "" "" false ${float_type}) + GenerateNamedObjects("${KERNELDIR}/${I${float_char}MINKERNEL}" "USE_MIN" "i*min_k" false "" "" false ${float_type}) endif () GenerateNamedObjects("${KERNELDIR}/${${float_char}ASUMKERNEL}" "" "asum_k" false "" "" false ${float_type}) GenerateNamedObjects("${KERNELDIR}/${${float_char}AXPYKERNEL}" "" "axpy_k" false "" "" false ${float_type}) @@ -88,11 +91,67 @@ function (build_core TARGET_CORE KDIR TSUFFIX KERNEL_DEFINITIONS) GenerateNamedObjects("${KERNELDIR}/${DSDOTKERNEL}" "DSDOT" "d*dot_k" false "" "" false "SINGLE") GenerateNamedObjects("${KERNELDIR}/${DSDOTKERNEL}" "DSDOT" "dsdot_k" false "" "" false "SINGLE") + if ((BUILD_COMPLEX OR BUILD_DOUBLE) AND NOT BUILD_SINGLE) + GenerateNamedObjects("${KERNELDIR}/${SAMAXKERNEL}" "USE_ABS" "amax_k" false "" "" false "SINGLE") + GenerateNamedObjects("${KERNELDIR}/${SAMINKERNEL}" "USE_ABS;USE_MIN" "amin_k" false "" "" false "SINGLE") + GenerateNamedObjects("${KERNELDIR}/${SASUMKERNEL}" "" "asum_k" false "" "" false "SINGLE") + if (DEFINED SMAXKERNEL) + GenerateNamedObjects("${KERNELDIR}/${SMAXKERNEL}" "" "max_k" false "" "" false "SINGLE") + endif () + if (DEFINED SMINKERNEL) + GenerateNamedObjects("${KERNELDIR}/${SMINKERNEL}" "USE_MIN" "min_k" false "" "" false "SINGLE") + endif () + if (DEFINED ISMINKERNEL) + GenerateNamedObjects("${KERNELDIR}/${ISMINKERNEL}" "USE_MIN" "i*min_k" false "" "" false "SINGLE") + endif () + if (DEFINED ISMAXKERNEL) + GenerateNamedObjects("${KERNELDIR}/${ISMAXKERNEL}" "" "i*max_k" false "" "" false "SINGLE") + endif () + GenerateNamedObjects("${KERNELDIR}/${ISAMAXKERNEL}" "USE_ABS" "i*amax_k" false "" "" false "SINGLE") + GenerateNamedObjects("${KERNELDIR}/${ISAMINKERNEL}" "USE_ABS;USE_MIN" "i*amin_k" false "" "" false "SINGLE") + GenerateNamedObjects("${KERNELDIR}/${SSCALKERNEL}" "" "scal_k" false "" "" false "SINGLE") + GenerateNamedObjects("${KERNELDIR}/${SCOPYKERNEL}" "C_INTERFACE" "copy_k" false "" "" false "SINGLE") + GenerateNamedObjects("${KERNELDIR}/${SSWAPKERNEL}" "" "swap_k" false "" "" false "SINGLE") + GenerateNamedObjects("${KERNELDIR}/${SAXPYKERNEL}" "" "axpy_k" false "" "" false "SINGLE") + GenerateNamedObjects("${KERNELDIR}/${SNRM2KERNEL}" "" "nrm2_k" false "" "" false "SINGLE") + GenerateNamedObjects("${KERNELDIR}/${SDOTKERNEL}" "" "dot_k" false "" "" false "SINGLE") + GenerateNamedObjects("${KERNELDIR}/${SROTKERNEL}" "" "rot_k" false "" "" false "SINGLE") + endif () + if (BUILD_COMPLEX16 AND NOT BUILD_DOUBLE) + GenerateNamedObjects("${KERNELDIR}/${DAMAXKERNEL}" "USE_ABS" "amax_k" false "" "" false "DOUBLE") + GenerateNamedObjects("${KERNELDIR}/${DAMINKERNEL}" "USE_ABS;USE_MIN" "amin_k" false "" "" false "DOUBLE") + GenerateNamedObjects("${KERNELDIR}/${DASUMKERNEL}" "" "asum_k" false "" "" false "DOUBLE") + if (DEFINED DMAXKERNEL) + GenerateNamedObjects("${KERNELDIR}/${DMAXKERNEL}" "" "max_k" false "" "" false "DOUBLE") + endif () + if (DEFINED DMINKERNEL) + GenerateNamedObjects("${KERNELDIR}/${DMINKERNEL}" "USE_MIN" "min_k" false "" "" false "DOUBLE") + endif () + if (DEFINED IDMINKERNEL) + GenerateNamedObjects("${KERNELDIR}/${IDMINKERNEL}" "USE_MIN" "i*min_k" false "" "" false "DOUBLE") + endif () + if (DEFINED IDMAXKERNEL) + GenerateNamedObjects("${KERNELDIR}/${IDMAXKERNEL}" "" "i*max_k" false "" "" false "DOUBLE") + endif () + GenerateNamedObjects("${KERNELDIR}/${IDAMAXKERNEL}" "USE_ABS" "i*amax_k" false "" "" false "DOUBLE") + GenerateNamedObjects("${KERNELDIR}/${IDAMINKERNEL}" "USE_ABS;USE_MIN" "i*amin_k" false "" "" false "DOUBLE") + GenerateNamedObjects("${KERNELDIR}/${DSCALKERNEL}" "" "scal_k" false "" "" false "DOUBLE") + GenerateNamedObjects("${KERNELDIR}/${DCOPYKERNEL}" "C_INTERFACE" "copy_k" false "" "" false "DOUBLE") + GenerateNamedObjects("${KERNELDIR}/${DNRM2KERNEL}" "" "nrm2_k" false "" "" false "DOUBLE") + GenerateNamedObjects("${KERNELDIR}/${DROTKERNEL}" "" "rot_k" false "" "" false "DOUBLE") + GenerateNamedObjects("${KERNELDIR}/${DDOTKERNEL}" "" "dot_k" false "" "" false "DOUBLE") + GenerateNamedObjects("${KERNELDIR}/${DSWAPKERNEL}" "" "swap_k" false "" "" false "DOUBLE") + GenerateNamedObjects("${KERNELDIR}/${DAXPYKERNEL}" "" "axpy_k" false "" "" false "DOUBLE") + endif () + # Makefile.L2 GenerateCombinationObjects("generic/symv_k.c" "LOWER" "U" "" 1 "" "" 3) GenerateNamedObjects("generic/ger.c" "" "ger_k" false "" "" "" 3) foreach (float_type ${FLOAT_TYPES}) string(SUBSTRING ${float_type} 0 1 float_char) + if (${float_type} STREQUAL "BFLOAT16") + set (float_char "SB") + endif () if (${float_type} STREQUAL "COMPLEX" OR ${float_type} STREQUAL "ZCOMPLEX") GenerateNamedObjects("${KERNELDIR}/${${float_char}GERUKERNEL}" "" "geru_k" false "" "" false ${float_type}) GenerateNamedObjects("${KERNELDIR}/${${float_char}GERCKERNEL}" "CONJ" "gerc_k" false "" "" false ${float_type}) @@ -118,21 +177,87 @@ function (build_core TARGET_CORE KDIR TSUFFIX KERNEL_DEFINITIONS) GenerateNamedObjects("${KERNELDIR}/${${float_char}GEMVTKERNEL}" "TRANS" "gemv_t" false "" "" false ${float_type}) endif () endforeach () - + if (BUILD_COMPLEX16 AND NOT BUILD_DOUBLE) + GenerateNamedObjects("${KERNELDIR}/${DGEMVNKERNEL}" "" "gemv_n" false "" "" false "DOUBLE") + GenerateNamedObjects("${KERNELDIR}/${DGEMVTKERNEL}" "TRANS" "gemv_t" false "" "" false "DOUBLE") + endif () + if (BUILD_COMPLEX AND NOT BUILD_SINGLE) + GenerateNamedObjects("${KERNELDIR}/${SGEMVNKERNEL}" "" "gemv_n" false "" "" false "SINGLE") + GenerateNamedObjects("${KERNELDIR}/${SGEMVTKERNEL}" "TRANS" "gemv_t" false "" "" false "SINGLE") + endif () # Makefile.L3 set(USE_TRMM false) - - if (ARM OR ARM64 OR "${TARGET_CORE}" STREQUAL "LONGSOON3B" OR "${TARGET_CORE}" STREQUAL "GENERIC" OR "${CORE}" STREQUAL "generic" OR "${TARGET_CORE}" STREQUAL "HASWELL" OR "${CORE}" STREQUAL "haswell" OR "${CORE}" STREQUAL "zen" OR "${TARGET_CORE}" STREQUAL "SKYLAKEX" OR "${CORE}" STREQUAL "skylakex") + string(TOUPPER ${TARGET_CORE} UC_TARGET_CORE) + if (ARM OR ARM64 OR (UC_TARGET_CORE MATCHES LONGSOON3B) OR (UC_TARGET_CORE MATCHES GENERIC) OR (UC_TARGET_CORE MATCHES HASWELL) OR (UC_TARGET_CORE MATCHES ZEN) OR (UC_TARGET_CORE MATCHES SKYLAKEX) OR (UC_TARGET_CORE MATCHES COOPERLAKE)) set(USE_TRMM true) endif () + if (ZARCH OR (UC_TARGET_CORE MATCHES POWER8) OR (UC_TARGET_CORE MATCHES POWER9) OR (UC_TARGET_CORE MATCHES POWER10)) + set(USE_TRMM true) + endif () + + set(USE_DIRECT_SGEMM false) + if (X86_64) + set(USE_DIRECT_SGEMM true) + endif() - foreach (float_type SINGLE DOUBLE) + if (USE_DIRECT_SGEMM) + # if (NOT DEFINED SGEMMDIRECTKERNEL) + set (SGEMMDIRECTKERNEL sgemm_direct_skylakex.c) + set (SGEMMDIRECTPERFORMANT sgemm_direct_performant.c) + # endif() + GenerateNamedObjects("${KERNELDIR}/${SGEMMDIRECTKERNEL}" "" "gemm_direct" false "" "" false SINGLE) + GenerateNamedObjects("${KERNELDIR}/${SGEMMDIRECTPERFORMANT}" "" "gemm_direct_performant" false "" "" false SINGLE) + endif() + + foreach (float_type SINGLE DOUBLE BFLOAT16) string(SUBSTRING ${float_type} 0 1 float_char) + if (${float_type} STREQUAL "BFLOAT16") + if (NOT ${BUILD_BFLOAT16}) + continue () + else () + set (float_char "SB") + endif () + endif () GenerateNamedObjects("${KERNELDIR}/${${float_char}GEMMKERNEL}" "" "gemm_kernel" false "" "" false ${float_type}) endforeach() + if (BUILD_COMPLEX16 AND NOT BUILD_DOUBLE) + GenerateNamedObjects("${KERNELDIR}/${DGEMMKERNEL}" "" "gemm_kernel" false "" "" false "DOUBLE") + if (DGEMMINCOPY) + GenerateNamedObjects("${KERNELDIR}/${DGEMMINCOPY}" "DOUBLE" "${DGEMMINCOPYOBJ}" false "" "" true "DOUBLE") + endif () + if (DGEMMITCOPY) + GenerateNamedObjects("${KERNELDIR}/${DGEMMITCOPY}" "DOUBLE" "${DGEMMITCOPYOBJ}" false "" "" true "DOUBLE") + endif () + if (DGEMMONCOPY) + GenerateNamedObjects("${KERNELDIR}/${DGEMMONCOPY}" "DOUBLE" "${DGEMMONCOPYOBJ}" false "" "" true "DOUBLE") + endif () + if (DGEMMOTCOPY) + GenerateNamedObjects("${KERNELDIR}/${DGEMMOTCOPY}" "DOUBLE" "${DGEMMOTCOPYOBJ}" false "" "" true "DOUBLE") + endif () + GenerateNamedObjects("${KERNELDIR}/${DGEMM_BETA}" "" "gemm_beta" false "" "" false "DOUBLE") + endif () + if ((BUILD_DOUBLE OR BUILD_COMPLEX) AND NOT BUILD_SINGLE) + GenerateNamedObjects("${KERNELDIR}/${SGEMMKERNEL}" "" "gemm_kernel" false "" "" false "SINGLE") + if (SGEMMINCOPY) + GenerateNamedObjects("${KERNELDIR}/${SGEMMINCOPY}" "SINGLE" "${SGEMMINCOPYOBJ}" false "" "" true "SINGLE") + endif () + if (SGEMMITCOPY) + GenerateNamedObjects("${KERNELDIR}/${SGEMMITCOPY}" "SINGLE" "${SGEMMITCOPYOBJ}" false "" "" true "SINGLE") + endif () + if (SGEMMONCOPY) + GenerateNamedObjects("${KERNELDIR}/${SGEMMONCOPY}" "SINGLE" "${SGEMMONCOPYOBJ}" false "" "" true "SINGLE") + endif () + if (SGEMMOTCOPY) + GenerateNamedObjects("${KERNELDIR}/${SGEMMOTCOPY}" "SINGLE" "${SGEMMOTCOPYOBJ}" false "" "" true "SINGLE") + endif () + GenerateNamedObjects("${KERNELDIR}/${SGEMM_BETA}" "" "gemm_beta" false "" "" false "SINGLE") + endif () foreach (float_type ${FLOAT_TYPES}) string(SUBSTRING ${float_type} 0 1 float_char) + if (${float_type} STREQUAL "BFLOAT16") + set (float_char "SB") + endif () if (${float_char}GEMMINCOPY) GenerateNamedObjects("${KERNELDIR}/${${float_char}GEMMINCOPY}" "${float_type}" "${${float_char}GEMMINCOPYOBJ}" false "" "" true ${float_type}) endif () @@ -467,10 +592,38 @@ function (build_core TARGET_CORE KDIR TSUFFIX KERNEL_DEFINITIONS) #geadd GenerateNamedObjects("${KERNELDIR}/${${float_char}GEADD_KERNEL}" "" "geadd_k" false "" "" false ${float_type}) endforeach () + if (BUILD_DOUBLE AND NOT BUILD_SINGLE) + GenerateNamedObjects("${KERNELDIR}/${STRSMKERNEL_LN}" "UPPER;LN;TRSMKERNEL" "trsm_kernel_LN" false "" "" false "SINGLE") + GenerateNamedObjects("${KERNELDIR}/${STRSMKERNEL_LT}" "LT;TRSMKERNEL" "trsm_kernel_LT" false "" "" false "SINGLE") + GenerateNamedObjects("${KERNELDIR}/${STRSMKERNEL_RN}" "UPPER;RN;TRSMKERNEL" "trsm_kernel_RN" false "" "" false "SINGLE") + GenerateNamedObjects("${KERNELDIR}/${STRSMKERNEL_RT}" "RT;TRSMKERNEL" "trsm_kernel_RT" false "" "" false "SINGLE") + GenerateNamedObjects("generic/trsm_uncopy_${SGEMM_UNROLL_M}.c" "UNIT" "trsm_iunucopy" false "" "" false "SINGLE") + GenerateNamedObjects("generic/trsm_uncopy_${SGEMM_UNROLL_M}.c" "" "trsm_iunncopy" false "" "" false "SINGLE") + GenerateNamedObjects("generic/trsm_uncopy_${SGEMM_UNROLL_N}.c" "OUTER;UNIT" "trsm_ounucopy" false "" "" false "SINGLE") + GenerateNamedObjects("generic/trsm_uncopy_${SGEMM_UNROLL_N}.c" "OUTER" "trsm_ounncopy" false "" "" false "SINGLE") + + GenerateNamedObjects("generic/trsm_lncopy_${SGEMM_UNROLL_M}.c" "LOWER;UNIT" "trsm_ilnucopy" false "" "" false "SINGLE") + GenerateNamedObjects("generic/trsm_lncopy_${SGEMM_UNROLL_M}.c" "LOWER" "trsm_ilnncopy" false "" "" false "SINGLE") + GenerateNamedObjects("generic/trsm_lncopy_${SGEMM_UNROLL_N}.c" "OUTER;LOWER;UNIT" "trsm_olnucopy" false "" "" false "SINGLE") + GenerateNamedObjects("generic/trsm_lncopy_${SGEMM_UNROLL_N}.c" "OUTER;LOWER" "trsm_olnncopy" false "" "" false "SINGLE") + + GenerateNamedObjects("generic/trsm_utcopy_${SGEMM_UNROLL_M}.c" "UNIT" "trsm_iutucopy" false "" "" false "SINGLE") + GenerateNamedObjects("generic/trsm_utcopy_${SGEMM_UNROLL_M}.c" "" "trsm_iutncopy" false "" "" false "SINGLE") + GenerateNamedObjects("generic/trsm_utcopy_${SGEMM_UNROLL_N}.c" "OUTER;UNIT" "trsm_outucopy" false "" "" false "SINGLE") + GenerateNamedObjects("generic/trsm_utcopy_${SGEMM_UNROLL_N}.c" "OUTER" "trsm_outncopy" false "" "" false "SINGLE") + + GenerateNamedObjects("generic/trsm_ltcopy_${SGEMM_UNROLL_M}.c" "LOWER;UNIT" "trsm_iltucopy" false "" "" false "SINGLE") + GenerateNamedObjects("generic/trsm_ltcopy_${SGEMM_UNROLL_M}.c" "LOWER" "trsm_iltncopy" false "" "" false "SINGLE") + GenerateNamedObjects("generic/trsm_ltcopy_${SGEMM_UNROLL_N}.c" "OUTER;LOWER;UNIT" "trsm_oltucopy" false "" "" false "SINGLE") + GenerateNamedObjects("generic/trsm_ltcopy_${SGEMM_UNROLL_N}.c" "OUTER;LOWER" "trsm_oltncopy" false "" "" false "SINGLE") + endif () # Makefile.LA if(NOT NO_LAPACK) foreach (float_type ${FLOAT_TYPES}) + if (${float_type} STREQUAL "BFLOAT16") + set (float_char "SB") + endif () if (NOT DEFINED ${float_char}NEG_TCOPY) if (${float_char} STREQUAL "Z" OR ${float_char} STREQUAL "C" OR ${float_char} STREQUAL "X") set(${float_char}NEG_TCOPY ../generic/zneg_tcopy.c) @@ -490,6 +643,28 @@ function (build_core TARGET_CORE KDIR TSUFFIX KERNEL_DEFINITIONS) GenerateNamedObjects("${KERNELDIR}/${${float_char}NEG_TCOPY}_${${float_char}GEMM_UNROLL_M}" "" "neg_tcopy" false "" "" false ${float_type}) GenerateNamedObjects("${KERNELDIR}/${${float_char}LASWP_NCOPY}_${${float_char}GEMM_UNROLL_N}" "" "laswp_ncopy" false "" "" false ${float_type}) endforeach() + if (BUILD_COMPLEX AND NOT BUILD_SINGLE) + if (NOT DEFINED SNEG_TCOPY) + set(SNEG_TCOPY ../generic/neg_tcopy.c) + endif () + + if (NOT DEFINED SLASWP_NCOPY) + set(SLASWP_NCOPY ../generic/laswp_ncopy.c) + endif () + GenerateNamedObjects("${KERNELDIR}/${SNEG_TCOPY}_${SGEMM_UNROLL_M}" "" "neg_tcopy" false "" "" false "SINGLE") + GenerateNamedObjects("${KERNELDIR}/${SLASWP_NCOPY}_${SGEMM_UNROLL_N}" "" "laswp_ncopy" false "" "" false "SINGLE") + endif() + if (BUILD_COMPLEX16 AND NOT BUILD_DOUBLE) + if (NOT DEFINED DNEG_TCOPY) + set(DNEG_TCOPY ../generic/neg_tcopy.c) + endif () + + if (NOT DEFINED DLASWP_NCOPY) + set(DLASWP_NCOPY ../generic/laswp_ncopy.c) + endif () + GenerateNamedObjects("${KERNELDIR}/${DNEG_TCOPY}_${DGEMM_UNROLL_M}" "" "neg_tcopy" false "" "" false "DOUBLE") + GenerateNamedObjects("${KERNELDIR}/${DLASWP_NCOPY}_${DGEMM_UNROLL_N}" "" "laswp_ncopy" false "" "" false "DOUBLE") + endif() endif() if (${DYNAMIC_ARCH}) @@ -514,12 +689,154 @@ function (build_core TARGET_CORE KDIR TSUFFIX KERNEL_DEFINITIONS) foreach (float_type ${FLOAT_TYPES}) # a bit of metaprogramming here to pull out the appropriate KERNEL var string(SUBSTRING ${float_type} 0 1 float_char) + if (${float_type} STREQUAL "BFLOAT16") + set (float_char "SB") + endif () GenerateNamedObjects("generic/neg_tcopy_${${float_char}GEMM_UNROLL_M}.c" "" "neg_tcopy" false "" ${TSUFFIX} false ${float_type}) GenerateNamedObjects("generic/laswp_ncopy_${${float_char}GEMM_UNROLL_N}.c" "" "laswp_ncopy" false "" ${TSUFFIX} false ${float_type}) endforeach () + if (BUILD_COMPLEX AND NOT BUILD_SINGLE) + GenerateNamedObjects("${KERNELDIR}/${SGEMVNKERNEL}" "" "gemv_n" false "" "" false "SINGLE") + GenerateNamedObjects("${KERNELDIR}/${SGEMVTKERNEL}" "TRANS" "gemv_t" false "" "" false "SINGLE") + GenerateNamedObjects("generic/neg_tcopy_${SGEMM_UNROLL_M}.c" "" "neg_tcopy" false "" ${TSUFFIX} false "SINGLE") + GenerateNamedObjects("generic/laswp_ncopy_${SGEMM_UNROLL_N}.c" "" "laswp_ncopy" false "" ${TSUFFIX} false "SINGLE") + endif () + if (BUILD_DOUBLE AND NOT BUILD_SINGLE) + GenerateNamedObjects("generic/neg_tcopy_${SGEMM_UNROLL_M}.c" "" "neg_tcopy" false "" ${TSUFFIX} false "SINGLE") + GenerateNamedObjects("generic/laswp_ncopy_${SGEMM_UNROLL_N}.c" "" "laswp_ncopy" false "" ${TSUFFIX} false "SINGLE") + GenerateNamedObjects("${KERNELDIR}/${STRSMKERNEL_LN}" "UPPER;LN;TRSMKERNEL" "trsm_kernel_LN" false "" ${TSUFFIX} false "SINGLE") + GenerateNamedObjects("${KERNELDIR}/${STRSMKERNEL_LT}" "LT;TRSMKERNEL" "trsm_kernel_LT" false "" ${TSUFFIX} false "SINGLE") + GenerateNamedObjects("${KERNELDIR}/${STRSMKERNEL_RN}" "UPPER;RN;TRSMKERNEL" "trsm_kernel_RN" false "" ${TSUFFIX} false "SINGLE") + GenerateNamedObjects("${KERNELDIR}/${STRSMKERNEL_RT}" "RT;TRSMKERNEL" "trsm_kernel_RT" false "" ${TSUFFIX} false "SINGLE") + GenerateNamedObjects("generic/trsm_uncopy_${SGEMM_UNROLL_M}.c" "UNIT" "trsm_iunucopy" false "" ${TSUFFIX} false "SINGLE") + GenerateNamedObjects("generic/trsm_uncopy_${SGEMM_UNROLL_M}.c" "" "trsm_iunncopy" false "" ${TSUFFIX} false "SINGLE") + GenerateNamedObjects("generic/trsm_uncopy_${SGEMM_UNROLL_N}.c" "OUTER;UNIT" "trsm_ounucopy" false "" ${TSUFFIX} false "SINGLE") + GenerateNamedObjects("generic/trsm_uncopy_${SGEMM_UNROLL_N}.c" "OUTER" "trsm_ounncopy" false "" ${TSUFFIX} false "SINGLE") + + GenerateNamedObjects("generic/trsm_lncopy_${SGEMM_UNROLL_M}.c" "LOWER;UNIT" "trsm_ilnucopy" false "" ${TSUFFIX} false "SINGLE") + GenerateNamedObjects("generic/trsm_lncopy_${SGEMM_UNROLL_M}.c" "LOWER" "trsm_ilnncopy" false "" ${TSUFFIX} false "SINGLE") + GenerateNamedObjects("generic/trsm_lncopy_${SGEMM_UNROLL_N}.c" "OUTER;LOWER;UNIT" "trsm_olnucopy" false "" ${TSUFFIX} false "SINGLE") + GenerateNamedObjects("generic/trsm_lncopy_${SGEMM_UNROLL_N}.c" "OUTER;LOWER" "trsm_olnncopy" false "" ${TSUFFIX} false "SINGLE") + + GenerateNamedObjects("generic/trsm_utcopy_${SGEMM_UNROLL_M}.c" "UNIT" "trsm_iutucopy" false "" ${TSUFFIX} false "SINGLE") + GenerateNamedObjects("generic/trsm_utcopy_${SGEMM_UNROLL_M}.c" "" "trsm_iutncopy" false "" ${TSUFFIX} false "SINGLE") + GenerateNamedObjects("generic/trsm_utcopy_${SGEMM_UNROLL_N}.c" "OUTER;UNIT" "trsm_outucopy" false "" ${TSUFFIX} false "SINGLE") + GenerateNamedObjects("generic/trsm_utcopy_${SGEMM_UNROLL_N}.c" "OUTER" "trsm_outncopy" false "" ${TSUFFIX} false "SINGLE") + + GenerateNamedObjects("generic/trsm_ltcopy_${SGEMM_UNROLL_M}.c" "LOWER;UNIT" "trsm_iltucopy" false "" ${TSUFFIX} false "SINGLE") + GenerateNamedObjects("generic/trsm_ltcopy_${SGEMM_UNROLL_M}.c" "LOWER" "trsm_iltncopy" false "" ${TSUFFIX} false "SINGLE") + GenerateNamedObjects("generic/trsm_ltcopy_${SGEMM_UNROLL_N}.c" "OUTER;LOWER;UNIT" "trsm_oltucopy" false "" ${TSUFFIX} false "SINGLE") + GenerateNamedObjects("generic/trsm_ltcopy_${SGEMM_UNROLL_N}.c" "OUTER;LOWER" "trsm_oltncopy" false "" ${TSUFFIX} false "SINGLE") + + if (SGEMMINCOPY) + GenerateNamedObjects("${KERNELDIR}/${SGEMMINCOPY}" "SINGLE" "${SGEMMINCOPYOBJ}" false "" "" true "SINGLE") + endif () + if (SGEMMITCOPY) + GenerateNamedObjects("${KERNELDIR}/${SGEMMITCOPY}" "SINGLE" "${SGEMMITCOPYOBJ}" false "" "" true "SINGLE") + endif () + if (SGEMMONCOPY) + GenerateNamedObjects("${KERNELDIR}/${SGEMMONCOPY}" "SINGLE" "${SGEMMONCOPYOBJ}" false "" "" true "SINGLE") + endif () + if (SGEMMOTCOPY) + GenerateNamedObjects("${KERNELDIR}/${SGEMMOTCOPY}" "SINGLE" "${SGEMMOTCOPYOBJ}" false "" "" true "SINGLE") + endif () + GenerateNamedObjects("${KERNELDIR}/${SGEMVNKERNEL}" "" "gemv_n" false "" "" false "SINGLE") + GenerateNamedObjects("${KERNELDIR}/${SGEMVTKERNEL}" "TRANS" "gemv_t" false "" "" false "SINGLE") + endif () + + if (BUILD_COMPLEX16 AND NOT BUILD_DOUBLE) + GenerateNamedObjects("generic/neg_tcopy_${DGEMM_UNROLL_M}.c" "" "neg_tcopy" false "" ${TSUFFIX} false "DOUBLE") + GenerateNamedObjects("generic/laswp_ncopy_${DGEMM_UNROLL_N}.c" "" "laswp_ncopy" false "" ${TSUFFIX} false "DOUBLE") + endif () + if (BUILD_COMPLEX16 AND NOT BUILD_COMPLEX) + GenerateNamedObjects("${KERNELDIR}/${CAMAXKERNEL}" "USE_ABS" "amax_k" false "" "" false "COMPLEX") + GenerateNamedObjects("${KERNELDIR}/${CAMINKERNEL}" "USE_ABS;USE_MIN" "amin_k" false "" "" false "COMPLEX") + if (DEFINED CMAXKERNEL) + GenerateNamedObjects("${KERNELDIR}/${CMAXKERNEL}" "" "max_k" false "" "" false "COMPLEX") + endif () + if (DEFINED CMINKERNEL) + GenerateNamedObjects("${KERNELDIR}/${CMINKERNEL}" "USE_MIN" "min_k" false "" "" false "COMPLEX") + endif () + GenerateNamedObjects("${KERNELDIR}/${ICAMAXKERNEL}" "USE_ABS" "i*amax_k" false "" "" false "COMPLEX") + GenerateNamedObjects("${KERNELDIR}/${ICAMINKERNEL}" "USE_ABS;USE_MIN" "i*amin_k" false "" "" false "COMPLEX") + if (DEFINED ICMAXKERNEL) + GenerateNamedObjects("${KERNELDIR}/${ICMAXKERNEL}" "" "i*max_k" false "" "" false "COMPLEX") + endif () + if (DEFINED ICMINKERNEL) + GenerateNamedObjects("${KERNELDIR}/${ICMINKERNEL}" "USE_MIN" "i*min_k" false "" "" false "COMPLEX") + endif () + GenerateNamedObjects("${KERNELDIR}/${CASUMKERNEL}" "" "asum_k" false "" "" false "COMPLEX") + GenerateNamedObjects("${KERNELDIR}/${CAXPYKERNEL}" "" "axpy_k" false "" "" false "COMPLEX") + GenerateNamedObjects("${KERNELDIR}/${CCOPYKERNEL}" "C_INTERFACE" "copy_k" false "" "" false "COMPLEX") + GenerateNamedObjects("${KERNELDIR}/${CNRM2KERNEL}" "" "nrm2_k" false "" "" false "COMPLEX") + GenerateNamedObjects("${KERNELDIR}/${CROTKERNEL}" "" "rot_k" false "" "" false "COMPLEX") + GenerateNamedObjects("${KERNELDIR}/${CSCALKERNEL}" "" "scal_k" false "" "" false "COMPLEX") + GenerateNamedObjects("${KERNELDIR}/${CSWAPKERNEL}" "" "swap_k" false "" "" false "COMPLEX") + GenerateNamedObjects("${KERNELDIR}/${CAXPBYKERNEL}" "" "axpby_k" false "" "" false "COMPLEX") + GenerateNamedObjects("${KERNELDIR}/${CSUMKERNEL}" "" "sum_k" false "" "" false "COMPLEX") + GenerateNamedObjects("${KERNELDIR}/${CAXPYKERNEL}" "CONJ" "axpyc_k" false "" "" false "COMPLEX") + GenerateNamedObjects("${KERNELDIR}/${CDOTKERNEL}" "" "dotu_k" false "" "" false "COMPLEX") + GenerateNamedObjects("${KERNELDIR}/${CDOTKERNEL}" "CONJ" "dotc_k" false "" "" false "COMPLEX") + GenerateNamedObjects("${KERNELDIR}/${CGEMVNKERNEL}" "" "gemv_n" false "" "" false "COMPLEX") + GenerateNamedObjects("${KERNELDIR}/${CGEMVTKERNEL}" "TRANSA" "gemv_t" false "" "" false "COMPLEX") + GenerateNamedObjects("${KERNELDIR}/${CGEMVNKERNEL}" "CONJ" "gemv_r" false "" "" false "COMPLEX") + GenerateNamedObjects("${KERNELDIR}/${CGEMVTKERNEL}" "CONJ;TRANSA" "gemv_c" false "" "" false "COMPLEX") + GenerateNamedObjects("${KERNELDIR}/${CGEMVNKERNEL}" "XCONJ" "gemv_o" false "" "" false "COMPLEX") + GenerateNamedObjects("${KERNELDIR}/${CGEMVTKERNEL}" "XCONJ;TRANSA" "gemv_u" false "" "" false "COMPLEX") + GenerateNamedObjects("${KERNELDIR}/${CGEMVNKERNEL}" "XCONJ;CONJ" "gemv_s" false "" "" false "COMPLEX") + GenerateNamedObjects("${KERNELDIR}/${CGEMVTKERNEL}" "XCONJ;CONJ;TRANSA" "gemv_d" false "" "" false "COMPLEX") + GenerateNamedObjects("${KERNELDIR}/${CTRSMKERNEL_LN}" "UPPER;LN;TRSMKERNEL;CONJ" "trsm_kernel_LR" false "" "" false "COMPLEX") + GenerateNamedObjects("${KERNELDIR}/${CTRSMKERNEL_LT}" "LT;TRSMKERNEL;CONJ" "trsm_kernel_LC" false "" "" false "COMPLEX") + GenerateNamedObjects("${KERNELDIR}/${CTRSMKERNEL_RN}" "UPPER;RN;TRSMKERNEL;CONJ" "trsm_kernel_RR" false "" "" false "COMPLEX") + GenerateNamedObjects("${KERNELDIR}/${CTRSMKERNEL_RT}" "RT;TRSMKERNEL;CONJ" "trsm_kernel_RC" false "" "" false "COMPLEX") + GenerateNamedObjects("${KERNELDIR}/${CTRSMKERNEL_LN}" "UPPER;LN;TRSMKERNEL" "trsm_kernel_LN" false "" "" false "COMPLEX") + GenerateNamedObjects("${KERNELDIR}/${CTRSMKERNEL_LT}" "LT;TRSMKERNEL" "trsm_kernel_LT" false "" "" false "COMPLEX") + GenerateNamedObjects("${KERNELDIR}/${CTRSMKERNEL_RN}" "UPPER;RN;TRSMKERNEL" "trsm_kernel_RN" false "" "" false "COMPLEX") + GenerateNamedObjects("${KERNELDIR}/${CTRSMKERNEL_RT}" "RT;TRSMKERNEL" "trsm_kernel_RT" false "" "" false "COMPLEX") + GenerateNamedObjects("${KERNELDIR}/${CGEMMKERNEL}" "NN" "gemm_kernel_n" false "" "" false "COMPLEX") + GenerateNamedObjects("${KERNELDIR}/${CGEMMKERNEL}" "CN" "gemm_kernel_l" false "" "" false "COMPLEX") + GenerateNamedObjects("${KERNELDIR}/${CGEMMKERNEL}" "NC" "gemm_kernel_r" false "" "" false "COMPLEX") + GenerateNamedObjects("${KERNELDIR}/${CGEMMKERNEL}" "CC" "gemm_kernel_b" false "" "" false "COMPLEX") + if (CGEMMINCOPY) + GenerateNamedObjects("${KERNELDIR}/${CGEMMINCOPY}" "COMPLEX" "${CGEMMINCOPYOBJ}" false "" "" true "COMPLEX") + endif () + + if (CGEMMITCOPY) + GenerateNamedObjects("${KERNELDIR}/${CGEMMITCOPY}" "COMPLEX" "${CGEMMITCOPYOBJ}" false "" "" true "COMPLEX") + endif () + if (CGEMMONCOPY) + GenerateNamedObjects("${KERNELDIR}/${CGEMMONCOPY}" "COMPLEX" "${CGEMMONCOPYOBJ}" false "" "" true "COMPLEX") + endif () + + if (CGEMMOTCOPY) + GenerateNamedObjects("${KERNELDIR}/${CGEMMOTCOPY}" "COMPLEX" "${CGEMMOTCOPYOBJ}" false "" "" true "COMPLEX") + endif () + GenerateNamedObjects("${KERNELDIR}/${CGEMM_BETA}" "" "gemm_beta" false "" "" false "COMPLEX") + GenerateNamedObjects("generic/ztrsm_uncopy_${CGEMM_UNROLL_M}.c" "UNIT" "trsm_iunucopy" false "" ${TSUFFIX} false "COMPLEX") + GenerateNamedObjects("generic/ztrsm_uncopy_${CGEMM_UNROLL_M}.c" "" "trsm_iunncopy" false "" ${TSUFFIX} false "COMPLEX") + GenerateNamedObjects("generic/ztrsm_uncopy_${CGEMM_UNROLL_N}.c" "OUTER;UNIT" "trsm_ounucopy" false "" ${TSUFFIX} false "COMPLEX") + GenerateNamedObjects("generic/ztrsm_uncopy_${CGEMM_UNROLL_N}.c" "OUTER" "trsm_ounncopy" false "" ${TSUFFIX} false "COMPLEX") + + GenerateNamedObjects("generic/ztrsm_lncopy_${CGEMM_UNROLL_M}.c" "LOWER;UNIT" "trsm_ilnucopy" false "" ${TSUFFIX} false "COMPLEX") + GenerateNamedObjects("generic/ztrsm_lncopy_${CGEMM_UNROLL_M}.c" "LOWER" "trsm_ilnncopy" false "" ${TSUFFIX} false "COMPLEX") + GenerateNamedObjects("generic/ztrsm_lncopy_${CGEMM_UNROLL_N}.c" "OUTER;LOWER;UNIT" "trsm_olnucopy" false "" ${TSUFFIX} false "COMPLEX") + GenerateNamedObjects("generic/ztrsm_lncopy_${CGEMM_UNROLL_N}.c" "OUTER;LOWER" "trsm_olnncopy" false "" ${TSUFFIX} false "COMPLEX") + + GenerateNamedObjects("generic/ztrsm_utcopy_${CGEMM_UNROLL_M}.c" "UNIT" "trsm_iutucopy" false "" ${TSUFFIX} false "COMPLEX") + GenerateNamedObjects("generic/ztrsm_utcopy_${CGEMM_UNROLL_M}.c" "" "trsm_iutncopy" false "" ${TSUFFIX} false "COMPLEX") + GenerateNamedObjects("generic/ztrsm_utcopy_${CGEMM_UNROLL_N}.c" "OUTER;UNIT" "trsm_outucopy" false "" ${TSUFFIX} false "COMPLEX") + GenerateNamedObjects("generic/ztrsm_utcopy_${CGEMM_UNROLL_N}.c" "OUTER" "trsm_outncopy" false "" ${TSUFFIX} false "COMPLEX") + + GenerateNamedObjects("generic/ztrsm_ltcopy_${CGEMM_UNROLL_M}.c" "LOWER;UNIT" "trsm_iltucopy" false "" ${TSUFFIX} false "COMPLEX") + GenerateNamedObjects("generic/ztrsm_ltcopy_${CGEMM_UNROLL_M}.c" "LOWER" "trsm_iltncopy" false "" ${TSUFFIX} false "COMPLEX") + GenerateNamedObjects("generic/ztrsm_ltcopy_${CGEMM_UNROLL_N}.c" "OUTER;LOWER;UNIT" "trsm_oltucopy" false "" ${TSUFFIX} false "COMPLEX") + GenerateNamedObjects("generic/ztrsm_ltcopy_${CGEMM_UNROLL_N}.c" "OUTER;LOWER" "trsm_oltncopy" false "" ${TSUFFIX} false "COMPLEX") + GenerateNamedObjects("generic/neg_tcopy_${DGEMM_UNROLL_M}.c" "" "neg_tcopy" false "" ${TSUFFIX} false "COMPLEX") + GenerateNamedObjects("generic/laswp_ncopy_${DGEMM_UNROLL_N}.c" "" "laswp_ncopy" false "" ${TSUFFIX} false "COMPLEX") endif () + endif () add_library(kernel${TSUFFIX} OBJECT ${OPENBLAS_SRC}) set_target_properties(kernel${TSUFFIX} PROPERTIES COMPILE_FLAGS "${KERNEL_DEFINITIONS}") @@ -534,7 +851,7 @@ if (${DYNAMIC_ARCH}) set(BUILD_KERNEL 1) set(KDIR "") set(TSUFFIX "_${TARGET_CORE}") - set(KERNEL_DEFINITIONS "-DBUILD_KERNEL -DTABLE_NAME=gotoblas_${TARGET_CORE} -DTS=${TSUFFIX}") + set(KERNEL_DEFINITIONS "-DBUILD_KERNEL -DTABLE_NAME=gotoblas_${TARGET_CORE} -DTS=${TSUFFIX}") build_core("${TARGET_CORE}" "${KDIR}" "${TSUFFIX}" "${KERNEL_DEFINITIONS}") set(ADD_COMMONOBJS 0) endforeach() diff --git a/kernel/Makefile b/kernel/Makefile index e81225075..1a6c9413f 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -5,20 +5,24 @@ endif TOPDIR = .. include $(TOPDIR)/Makefile.system +ifeq ($(ARCH), power) +ifeq ($(C_COMPILER), CLANG) + override CFLAGS += -fno-integrated-as +endif +endif + AVX2OPT = ifeq ($(C_COMPILER), GCC) # AVX2 support was added in 4.7.0 - GCCVERSIONGTEQ4 := $(shell expr `$(CC) -dumpversion | cut -f1 -d.` \>= 4) - GCCMINORVERSIONGTEQ7 := $(shell expr `$(CC) -dumpversion | cut -f2 -d.` \>= 7) - ifeq ($(GCCVERSIONGTEQ4)$(GCCMINORVERSIONGTEQ7), 11) +GCCVERSIONCHECK := $(GCCVERSIONGT4)$(GCCVERSIONGTEQ4)$(GCCMINORVERSIONGTEQ7) +ifeq ($(GCCVERSIONCHECK), $(filter $(GCCVERSIONCHECK), 011 110 111)) AVX2OPT = -mavx2 endif endif ifeq ($(C_COMPILER), CLANG) # Any clang posing as gcc 4.2 should be new enough (3.4 or later) - GCCVERSIONGTEQ4 := $(shell expr `$(CC) -dumpversion | cut -f1 -d.` \>= 4) - GCCMINORVERSIONGTEQ2 := $(shell expr `$(CC) -dumpversion | cut -f2 -d.` \>= 2) - ifeq ($(GCCVERSIONGTEQ4)$(GCCMINORVERSIONGTEQ2), 11) + GCCVERSIONCHECK := $(GCCVERSIONGT4)$(GCCVERSIONGTEQ4)$(GCCMINORVERSIONGTEQ2) + ifeq ($(GCCVERSIONCHECK), $(filter $(GCCVERSIONCHECK), 011 110 111)) AVX2OPT = -mavx2 endif endif @@ -27,8 +31,23 @@ ifdef NO_AVX2 endif ifdef TARGET_CORE -ifeq ($(TARGET_CORE), SKYLAKEX) - override CFLAGS += -DBUILD_KERNEL -DTABLE_NAME=gotoblas_$(TARGET_CORE) -march=skylake-avx512 +ifeq ($(TARGET_CORE), COOPERLAKE) + override CFLAGS += -DBUILD_KERNEL -DTABLE_NAME=gotoblas_$(TARGET_CORE) + ifeq ($(GCCVERSIONGTEQ10), 1) + override CFLAGS += -march=cooperlake + else + override CFLAGS += -march=skylake-avx512 -mavx512f + endif + ifeq ($(OSNAME), CYGWIN_NT) + override CFLAGS += -fno-asynchronous-unwind-tables + endif + ifeq ($(OSNAME), WINNT) + ifeq ($(C_COMPILER), GCC) + override CFLAGS += -fno-asynchronous-unwind-tables + endif + endif +else ifeq ($(TARGET_CORE), SKYLAKEX) + override CFLAGS += -DBUILD_KERNEL -DTABLE_NAME=gotoblas_$(TARGET_CORE) -march=skylake-avx512 -mavx512f ifeq ($(OSNAME), CYGWIN_NT) override CFLAGS += -fno-asynchronous-unwind-tables endif @@ -39,6 +58,8 @@ ifeq ($(TARGET_CORE), SKYLAKEX) endif else ifeq ($(TARGET_CORE), HASWELL) override CFLAGS += -DBUILD_KERNEL -DTABLE_NAME=gotoblas_$(TARGET_CORE) $(AVX2OPT) +else ifeq ($(TARGET_CORE), LOONGSON3R4) + override CFLAGS += -DBUILD_KERNEL -DTABLE_NAME=gotoblas_$(TARGET_CORE) $(MSA_FLAGS) else override CFLAGS += -DBUILD_KERNEL -DTABLE_NAME=gotoblas_$(TARGET_CORE) endif @@ -49,6 +70,9 @@ else TARGET_CORE = $(CORE) KDIR = TSUFFIX = +ifeq ($(TARGET_CORE), LOONGSON3R4) + override CFLAGS += $(MSA_FLAGS) +endif endif -include $(KERNELDIR)/KERNEL.$(TARGET_CORE) diff --git a/kernel/Makefile.L1 b/kernel/Makefile.L1 index 970703230..09337363d 100644 --- a/kernel/Makefile.L1 +++ b/kernel/Makefile.L1 @@ -1,3 +1,11 @@ +FMAFLAG= +ifndef OLDGCC +ifdef HAVE_FMA3 +FMAFLAG = -mfma +endif +endif + + ### AMAX ### ifndef SAMAXKERNEL @@ -262,6 +270,20 @@ ifndef XDOTKERNEL XDOTKERNEL = zdot.S endif +ifeq ($(BUILD_BFLOAT16),1) +ifndef SBDOTKERNEL +SBDOTKERNEL = ../x86_64/sbdot.c +endif + +ifndef TOBF16KERNEL +TOBF16KERNEL = ../x86_64/tobf16.c +endif + +ifndef BF16TOKERNEL +BF16TOKERNEL = ../x86_64/bf16to.c +endif +endif + ### NRM2 ### ifndef SNRM2KERNEL @@ -516,6 +538,15 @@ XBLASOBJS += \ xdotc_k$(TSUFFIX).$(SUFFIX) xdotu_k$(TSUFFIX).$(SUFFIX) xnrm2_k$(TSUFFIX).$(SUFFIX) xqrot_k$(TSUFFIX).$(SUFFIX) \ xscal_k$(TSUFFIX).$(SUFFIX) xswap_k$(TSUFFIX).$(SUFFIX) xsum_k$(TSUFFIX).$(SUFFIX) +ifeq ($(BUILD_BFLOAT16),1) +SBBLASOBJS += \ + sbdot_k$(TSUFFIX).$(SUFFIX) +SBEXTOBJS += \ + sbstobf16_k$(TSUFFIX).$(SUFFIX) sbdtobf16_k$(TSUFFIX).$(SUFFIX) +SBEXTOBJS += \ + sbf16tos_k$(TSUFFIX).$(SUFFIX) dbf16tod_k$(TSUFFIX).$(SUFFIX) +endif + ### AMAX ### @@ -734,6 +765,19 @@ $(KDIR)ddot_k$(TSUFFIX).$(SUFFIX) $(KDIR)ddot_k$(TPSUFFIX).$(PSUFFIX) : $(KERNEL $(KDIR)qdot_k$(TSUFFIX).$(SUFFIX) $(KDIR)qdot_k$(TPSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(QDOTKERNEL) $(CC) -c $(CFLAGS) -UCOMPLEX -DXDOUBLE $< -o $@ +ifeq ($(BUILD_BFLOAT16),1) +$(KDIR)sbdot_k$(TSUFFIX).$(SUFFIX) $(KDIR)sbdot_k$(TPSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(SBDOTKERNEL) + $(CC) -c $(CFLAGS) -UCOMPLEX $< -o $@ +$(KDIR)sbstobf16_k$(TSUFFIX).$(SUFFIX) : $(KERNELDIR)/$(TOBF16KERNEL) + $(CC) -c $(CFLAGS) -UDOUBLE -DSINGLE $< -o $@ +$(KDIR)sbdtobf16_k$(TSUFFIX).$(SUFFIX) : $(KERNELDIR)/$(TOBF16KERNEL) + $(CC) -c $(CFLAGS) -DDOUBLE -USINGLE $< -o $@ +$(KDIR)sbf16tos_k$(TSUFFIX).$(SUFFIX) : $(KERNELDIR)/$(BF16TOKERNEL) + $(CC) -c $(CFLAGS) -UDOUBLE -DSINGLE $< -o $@ +$(KDIR)dbf16tod_k$(TSUFFIX).$(SUFFIX) : $(KERNELDIR)/$(BF16TOKERNEL) + $(CC) -c $(CFLAGS) -DDOUBLE -USINGLE $< -o $@ +endif + $(KDIR)sdot_k$(TSUFFIX).$(SUFFIX) $(KDIR)sdot_k$(TPSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(SDOTKERNEL) $(CC) -c $(CFLAGS) -UCOMPLEX -UDOUBLE $< -o $@ @@ -792,10 +836,10 @@ $(KDIR)xnrm2_k$(TSUFFIX).$(SUFFIX) $(KDIR)xnrm2_k$(TPSUFFIX).$(PSUFFIX) : $(KE $(CC) $(CFLAGS) -DCOMPLEX -c -DXDOUBLE $< -o $@ $(KDIR)srot_k$(TSUFFIX).$(SUFFIX) $(KDIR)srot_k$(TPSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(SROTKERNEL) - $(CC) -c $(CFLAGS) -UCOMPLEX -UCOMPLEX -UDOUBLE $< -o $@ + $(CC) -c $(CFLAGS) $(FMAFLAG) -UCOMPLEX -UCOMPLEX -UDOUBLE $< -o $@ $(KDIR)drot_k$(TSUFFIX).$(SUFFIX) $(KDIR)drot_k$(TPSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(DROTKERNEL) - $(CC) -c $(CFLAGS) -UCOMPLEX -UCOMPLEX -DDOUBLE $< -o $@ + $(CC) -c $(CFLAGS) $(FMAFLAG) -UCOMPLEX -UCOMPLEX -DDOUBLE $< -o $@ $(KDIR)qrot_k$(TSUFFIX).$(SUFFIX) $(KDIR)qrot_k$(TPSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(QROTKERNEL) $(CC) -c $(CFLAGS) -UCOMPLEX -UCOMPLEX -DXDOUBLE $< -o $@ diff --git a/kernel/Makefile.L2 b/kernel/Makefile.L2 index 2aeb8f041..888a9b959 100644 --- a/kernel/Makefile.L2 +++ b/kernel/Makefile.L2 @@ -48,6 +48,16 @@ ifndef XGEMVTKERNEL XGEMVTKERNEL = zgemv_t.S endif +ifeq ($(BUILD_BFLOAT16),1) +ifndef SBGEMVNKERNEL +SBGEMVNKERNEL = ../x86_64/sbgemv_n.c +endif + +ifndef SBGEMVTKERNEL +SBGEMVTKERNEL = ../x86_64/sbgemv_t.c +endif +endif + ### GER ### ifndef SGERKERNEL @@ -186,31 +196,46 @@ ifndef XHEMV_M_KERNEL XHEMV_M_KERNEL = ../generic/zhemv_k.c endif +ifneq "$(or $(BUILD_SINGLE), $(BUILD_DOUBLE), $(BUILD_COMPLEX))" "" +SBLASOBJS += \ + sgemv_n$(TSUFFIX).$(SUFFIX) sgemv_t$(TSUFFIX).$(SUFFIX) +endif +ifeq ($(BUILD_SINGLE),1) SBLASOBJS += \ - sgemv_n$(TSUFFIX).$(SUFFIX) sgemv_t$(TSUFFIX).$(SUFFIX) ssymv_U$(TSUFFIX).$(SUFFIX) ssymv_L$(TSUFFIX).$(SUFFIX) \ + ssymv_U$(TSUFFIX).$(SUFFIX) ssymv_L$(TSUFFIX).$(SUFFIX) \ sger_k$(TSUFFIX).$(SUFFIX) - +endif +ifeq ($(BUILD_DOUBLE),1) DBLASOBJS += \ dgemv_n$(TSUFFIX).$(SUFFIX) dgemv_t$(TSUFFIX).$(SUFFIX) dsymv_U$(TSUFFIX).$(SUFFIX) dsymv_L$(TSUFFIX).$(SUFFIX) \ dger_k$(TSUFFIX).$(SUFFIX) - +endif QBLASOBJS += \ qgemv_n$(TSUFFIX).$(SUFFIX) qgemv_t$(TSUFFIX).$(SUFFIX) qsymv_U$(TSUFFIX).$(SUFFIX) qsymv_L$(TSUFFIX).$(SUFFIX) \ qger_k$(TSUFFIX).$(SUFFIX) - +ifeq ($(BUILD_COMPLEX),1) +SBLASOBJS += \ + sgemv_n$(TSUFFIX).$(SUFFIX) sgemv_t$(TSUFFIX).$(SUFFIX) CBLASOBJS += \ cgemv_n$(TSUFFIX).$(SUFFIX) cgemv_t$(TSUFFIX).$(SUFFIX) cgemv_r$(TSUFFIX).$(SUFFIX) cgemv_c$(TSUFFIX).$(SUFFIX) \ cgemv_o$(TSUFFIX).$(SUFFIX) cgemv_u$(TSUFFIX).$(SUFFIX) cgemv_s$(TSUFFIX).$(SUFFIX) cgemv_d$(TSUFFIX).$(SUFFIX) \ csymv_U$(TSUFFIX).$(SUFFIX) csymv_L$(TSUFFIX).$(SUFFIX) \ chemv_U$(TSUFFIX).$(SUFFIX) chemv_L$(TSUFFIX).$(SUFFIX) chemv_V$(TSUFFIX).$(SUFFIX) chemv_M$(TSUFFIX).$(SUFFIX) \ cgeru_k$(TSUFFIX).$(SUFFIX) cgerc_k$(TSUFFIX).$(SUFFIX) cgerv_k$(TSUFFIX).$(SUFFIX) cgerd_k$(TSUFFIX).$(SUFFIX) - +endif +ifeq ($(BUILD_COMPLEX16),1) +CBLASOBJS += \ + cgemv_n$(TSUFFIX).$(SUFFIX) cgemv_t$(TSUFFIX).$(SUFFIX) cgemv_r$(TSUFFIX).$(SUFFIX) cgemv_c$(TSUFFIX).$(SUFFIX) \ + cgemv_o$(TSUFFIX).$(SUFFIX) cgemv_u$(TSUFFIX).$(SUFFIX) cgemv_s$(TSUFFIX).$(SUFFIX) cgemv_d$(TSUFFIX).$(SUFFIX) +DBLASOBJS += \ + dgemv_n$(TSUFFIX).$(SUFFIX) dgemv_t$(TSUFFIX).$(SUFFIX) ZBLASOBJS += \ zgemv_n$(TSUFFIX).$(SUFFIX) zgemv_t$(TSUFFIX).$(SUFFIX) zgemv_r$(TSUFFIX).$(SUFFIX) zgemv_c$(TSUFFIX).$(SUFFIX) \ zgemv_o$(TSUFFIX).$(SUFFIX) zgemv_u$(TSUFFIX).$(SUFFIX) zgemv_s$(TSUFFIX).$(SUFFIX) zgemv_d$(TSUFFIX).$(SUFFIX) \ zsymv_U$(TSUFFIX).$(SUFFIX) zsymv_L$(TSUFFIX).$(SUFFIX) \ zhemv_U$(TSUFFIX).$(SUFFIX) zhemv_L$(TSUFFIX).$(SUFFIX) zhemv_V$(TSUFFIX).$(SUFFIX) zhemv_M$(TSUFFIX).$(SUFFIX) \ zgeru_k$(TSUFFIX).$(SUFFIX) zgerc_k$(TSUFFIX).$(SUFFIX) zgerv_k$(TSUFFIX).$(SUFFIX) zgerd_k$(TSUFFIX).$(SUFFIX) +endif XBLASOBJS += \ xgemv_n$(TSUFFIX).$(SUFFIX) xgemv_t$(TSUFFIX).$(SUFFIX) xgemv_r$(TSUFFIX).$(SUFFIX) xgemv_c$(TSUFFIX).$(SUFFIX) \ @@ -219,17 +244,27 @@ XBLASOBJS += \ xhemv_U$(TSUFFIX).$(SUFFIX) xhemv_L$(TSUFFIX).$(SUFFIX) xhemv_V$(TSUFFIX).$(SUFFIX) xhemv_M$(TSUFFIX).$(SUFFIX) \ xgeru_k$(TSUFFIX).$(SUFFIX) xgerc_k$(TSUFFIX).$(SUFFIX) xgerv_k$(TSUFFIX).$(SUFFIX) xgerd_k$(TSUFFIX).$(SUFFIX) +ifeq ($(BUILD_BFLOAT16),1) +SBBLASOBJS += \ + sbgemv_n$(TSUFFIX).$(SUFFIX) \ + sbgemv_t$(TSUFFIX).$(SUFFIX) +endif + +ifneq "$(or $(BUILD_SINGLE), $(BUILD_DOUBLE), $(BUILD_COMPLEX))" "" $(KDIR)sgemv_n$(TSUFFIX).$(SUFFIX) $(KDIR)sgemv_n$(TSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(SGEMVNKERNEL) $(TOPDIR)/common.h $(GEMVDEP) $(CC) -c $(CFLAGS) -UDOUBLE -UCOMPLEX -UTRANS $< -o $@ $(KDIR)sgemv_t$(TSUFFIX).$(SUFFIX) $(KDIR)sgemv_t$(TSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(SGEMVTKERNEL) $(TOPDIR)/common.h $(GEMVDEP) $(CC) -c $(CFLAGS) -UDOUBLE -UCOMPLEX -DTRANS $< -o $@ +endif +ifneq "$(or $(BUILD_DOUBLE),$(BUILD_COMPLEX16))" "" $(KDIR)dgemv_n$(TSUFFIX).$(SUFFIX) $(KDIR)dgemv_n$(TSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(DGEMVNKERNEL) $(TOPDIR)/common.h $(GEMVDEP) $(CC) -c $(CFLAGS) -DDOUBLE -UCOMPLEX -UTRANS $< -o $@ $(KDIR)dgemv_t$(TSUFFIX).$(SUFFIX) $(KDIR)dgemv_t$(TSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(DGEMVTKERNEL) $(TOPDIR)/common.h $(GEMVDEP) $(CC) -c $(CFLAGS) -DDOUBLE -UCOMPLEX -DTRANS $< -o $@ +endif $(KDIR)qgemv_n$(TSUFFIX).$(SUFFIX) $(KDIR)qgemv_n$(TSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(QGEMVNKERNEL) $(CC) -c $(CFLAGS) -DXDOUBLE -UCOMPLEX -UTRANS $< -o $@ @@ -237,6 +272,8 @@ $(KDIR)qgemv_n$(TSUFFIX).$(SUFFIX) $(KDIR)qgemv_n$(TSUFFIX).$(PSUFFIX) : $(KER $(KDIR)qgemv_t$(TSUFFIX).$(SUFFIX) $(KDIR)qgemv_t$(TSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(QGEMVTKERNEL) $(CC) -c $(CFLAGS) -DXDOUBLE -UCOMPLEX -DTRANS $< -o $@ + +ifneq "$(or $(BUILD_COMPLEX),$(BUILD_COMPLEX16))" "" $(KDIR)cgemv_n$(TSUFFIX).$(SUFFIX) $(KDIR)cgemv_n$(TSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(CGEMVNKERNEL) $(TOPDIR)/common.h $(GEMVDEP) $(CC) -c $(CFLAGS) -UDOUBLE -DCOMPLEX -UTRANS -UCONJ -UXCONJ $< -o $@ @@ -260,6 +297,10 @@ $(KDIR)cgemv_s$(TSUFFIX).$(SUFFIX) $(KDIR)cgemv_s$(TSUFFIX).$(PSUFFIX) : $(KERNE $(KDIR)cgemv_d$(TSUFFIX).$(SUFFIX) $(KDIR)cgemv_d$(TSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(CGEMVTKERNEL) $(TOPDIR)/common.h $(GEMVDEP) $(CC) -c $(CFLAGS) -UDOUBLE -DCOMPLEX -DTRANS -DCONJ -DXCONJ $< -o $@ +endif + + +ifeq ($(BUILD_COMPLEX16),1) $(KDIR)zgemv_n$(TSUFFIX).$(SUFFIX) $(KDIR)zgemv_n$(TSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(ZGEMVNKERNEL) $(TOPDIR)/common.h $(GEMVDEP) $(CC) -c $(CFLAGS) -DDOUBLE -DCOMPLEX -UTRANS -UCONJ -UXCONJ $< -o $@ @@ -284,6 +325,7 @@ $(KDIR)zgemv_s$(TSUFFIX).$(SUFFIX) $(KDIR)zgemv_s$(TSUFFIX).$(PSUFFIX) : $(KERNE $(KDIR)zgemv_d$(TSUFFIX).$(SUFFIX) $(KDIR)zgemv_d$(TSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(ZGEMVTKERNEL) $(TOPDIR)/common.h $(GEMVDEP) $(CC) -c $(CFLAGS) -DDOUBLE -DCOMPLEX -DTRANS -DCONJ -DXCONJ $< -o $@ +endif $(KDIR)xgemv_n$(TSUFFIX).$(SUFFIX) $(KDIR)xgemv_n$(TSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(XGEMVNKERNEL) $(CC) -c $(CFLAGS) -DXDOUBLE -DCOMPLEX -UTRANS -UCONJ -UXCONJ $< -o $@ @@ -309,17 +351,25 @@ $(KDIR)xgemv_s$(TSUFFIX).$(SUFFIX) $(KDIR)xgemv_s$(TSUFFIX).$(PSUFFIX) : $(KERNE $(KDIR)xgemv_d$(TSUFFIX).$(SUFFIX) $(KDIR)xgemv_d$(TSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(XGEMVTKERNEL) $(CC) -c $(CFLAGS) -DXDOUBLE -DCOMPLEX -DTRANS -DCONJ -DXCONJ $< -o $@ + +ifeq ($(BUILD_SINGLE),1) + $(KDIR)ssymv_U$(TSUFFIX).$(SUFFIX) $(KDIR)ssymv_U$(TSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(SSYMV_U_KERNEL) $(SSYMV_U_PARAM) $(CC) -c $(CFLAGS) -UCOMPLEX -UDOUBLE -ULOWER $< -o $@ $(KDIR)ssymv_L$(TSUFFIX).$(SUFFIX) $(KDIR)ssymv_L$(TSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(SSYMV_L_KERNEL) $(SSYMV_L_PARAM) $(CC) -c $(CFLAGS) -UCOMPLEX -UDOUBLE -DLOWER $< -o $@ +endif + + +ifeq ($(BUILD_DOUBLE),1) $(KDIR)dsymv_U$(TSUFFIX).$(SUFFIX) $(KDIR)dsymv_U$(TSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(DSYMV_U_KERNEL) $(DSYMV_U_PARAM) $(CC) -c $(CFLAGS) -UCOMPLEX -DDOUBLE -ULOWER $< -o $@ $(KDIR)dsymv_L$(TSUFFIX).$(SUFFIX) $(KDIR)dsymv_L$(TSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(DSYMV_L_KERNEL) $(DSYMV_L_PARAM) $(CC) -c $(CFLAGS) -UCOMPLEX -DDOUBLE -DLOWER $< -o $@ +endif $(KDIR)qsymv_U$(TSUFFIX).$(SUFFIX) $(KDIR)qsymv_U$(TSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(QSYMV_U_KERNEL) $(CC) -c $(CFLAGS) -UCOMPLEX -DXDOUBLE -ULOWER $< -o $@ @@ -327,17 +377,23 @@ $(KDIR)qsymv_U$(TSUFFIX).$(SUFFIX) $(KDIR)qsymv_U$(TSUFFIX).$(PSUFFIX) : $(KER $(KDIR)qsymv_L$(TSUFFIX).$(SUFFIX) $(KDIR)qsymv_L$(TSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(QSYMV_L_KERNEL) $(CC) -c $(CFLAGS) -UCOMPLEX -DXDOUBLE -DLOWER $< -o $@ +ifeq ($(BUILD_COMPLEX),1) + $(KDIR)csymv_U$(TSUFFIX).$(SUFFIX) $(KDIR)csymv_U$(TSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(CSYMV_U_KERNEL) $(CSYMV_U_PARAM) $(CC) -c $(CFLAGS) -DCOMPLEX -UDOUBLE -ULOWER $< -o $@ $(KDIR)csymv_L$(TSUFFIX).$(SUFFIX) $(KDIR)csymv_L$(TSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(CSYMV_L_KERNEL) $(CSYMV_L_PARAM) $(CC) -c $(CFLAGS) -DCOMPLEX -UDOUBLE -DLOWER $< -o $@ +endif + +ifeq ($(BUILD_COMPLEX16),1) $(KDIR)zsymv_U$(TSUFFIX).$(SUFFIX) $(KDIR)zsymv_U$(TSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(ZSYMV_U_KERNEL) $(ZSYMV_U_PARAM) $(CC) -c $(CFLAGS) -DCOMPLEX -DDOUBLE -ULOWER $< -o $@ $(KDIR)zsymv_L$(TSUFFIX).$(SUFFIX) $(KDIR)zsymv_L$(TSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(ZSYMV_L_KERNEL) $(ZSYMV_L_PARAM) $(CC) -c $(CFLAGS) -DCOMPLEX -DDOUBLE -DLOWER $< -o $@ +endif $(KDIR)xsymv_U$(TSUFFIX).$(SUFFIX) $(KDIR)xsymv_U$(TSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(XSYMV_U_KERNEL) $(CC) -c $(CFLAGS) -DCOMPLEX -DXDOUBLE -ULOWER $< -o $@ @@ -345,15 +401,23 @@ $(KDIR)xsymv_U$(TSUFFIX).$(SUFFIX) $(KDIR)xsymv_U$(TSUFFIX).$(PSUFFIX) : $(KER $(KDIR)xsymv_L$(TSUFFIX).$(SUFFIX) $(KDIR)xsymv_L$(TSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(XSYMV_L_KERNEL) $(CC) -c $(CFLAGS) -DCOMPLEX -DXDOUBLE -DLOWER $< -o $@ +ifeq ($(BUILD_SINGLE),1) + $(KDIR)sger_k$(TSUFFIX).$(SUFFIX) $(KDIR)sger_k$(TSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(SGERKERNEL) $(SGERPARAM) $(CC) -c $(CFLAGS) -UDOUBLE $< -o $@ +endif + +ifeq ($(BUILD_DOUBLE),1) $(KDIR)dger_k$(TSUFFIX).$(SUFFIX) $(KDIR)dger_k$(TSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(DGERKERNEL) $(DGERPARAM) $(CC) -c $(CFLAGS) -DDOUBLE $< -o $@ +endif $(KDIR)qger_k$(TSUFFIX).$(SUFFIX) $(KDIR)qger_k$(TSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(QGERKERNEL) $(QGERPARAM) $(CC) -c $(CFLAGS) -DXDOUBLE $< -o $@ +ifeq ($(BUILD_COMPLEX),1) + $(KDIR)cgeru_k$(TSUFFIX).$(SUFFIX) $(KDIR)cgeru_k$(TSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(CGERUKERNEL) $(CGERPARAM) $(CC) -c $(CFLAGS) -UDOUBLE -UCONJ $< -o $@ @@ -365,6 +429,9 @@ $(KDIR)cgerv_k$(TSUFFIX).$(SUFFIX) $(KDIR)cgerv_k$(TSUFFIX).$(PSUFFIX) : $(KER $(KDIR)cgerd_k$(TSUFFIX).$(SUFFIX) $(KDIR)cgerd_k$(TSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(CGERCKERNEL) $(CGERPARAM) $(CC) -c $(CFLAGS) -UDOUBLE -DCONJ -DXCONJ $< -o $@ +endif + +ifeq ($(BUILD_COMPLEX16),1) $(KDIR)zgeru_k$(TSUFFIX).$(SUFFIX) $(KDIR)zgeru_k$(TSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(ZGERUKERNEL) $(ZGERPARAM) $(CC) -c $(CFLAGS) -DDOUBLE -UCONJ $< -o $@ @@ -377,6 +444,7 @@ $(KDIR)zgerv_k$(TSUFFIX).$(SUFFIX) $(KDIR)zgerv_k$(TSUFFIX).$(PSUFFIX) : $(KER $(KDIR)zgerd_k$(TSUFFIX).$(SUFFIX) $(KDIR)zgerd_k$(TSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(ZGERCKERNEL) $(ZGERPARAM) $(CC) -c $(CFLAGS) -DDOUBLE -DCONJ -DXCONJ $< -o $@ +endif $(KDIR)xgeru_k$(TSUFFIX).$(SUFFIX) $(KDIR)xgeru_k$(TSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(XGERUKERNEL) $(XGERPARAM) $(CC) -c $(CFLAGS) -DXDOUBLE -UCONJ $< -o $@ @@ -390,6 +458,8 @@ $(KDIR)xgerv_k$(TSUFFIX).$(SUFFIX) $(KDIR)xgerv_k$(TSUFFIX).$(PSUFFIX) : $(KER $(KDIR)xgerd_k$(TSUFFIX).$(SUFFIX) $(KDIR)xgerd_k$(TSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(XGERCKERNEL) $(XGERPARAM) $(CC) -c $(CFLAGS) -DXDOUBLE -DCONJ -DXCONJ $< -o $@ +ifeq ($(BUILD_COMPLEX),1) + $(KDIR)chemv_U$(TSUFFIX).$(SUFFIX) $(KDIR)chemv_U$(TSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(CHEMV_U_KERNEL) $(CHEMV_U_PARAM) $(CC) -c $(CFLAGS) -DCOMPLEX -UDOUBLE -ULOWER -DHEMV $< -o $@ @@ -401,6 +471,9 @@ $(KDIR)chemv_V$(TSUFFIX).$(SUFFIX) $(KDIR)chemv_V$(TSUFFIX).$(PSUFFIX) : $(KER $(KDIR)chemv_M$(TSUFFIX).$(SUFFIX) $(KDIR)chemv_M$(TSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(CHEMV_M_KERNEL) $(CHEMV_L_PARAM) ../symcopy.h $(CC) -c $(CFLAGS) -DCOMPLEX -UDOUBLE -DLOWER -DHEMV -DHEMVREV $< -o $@ +endif + +ifeq ($(BUILD_COMPLEX16),1) $(KDIR)zhemv_U$(TSUFFIX).$(SUFFIX) $(KDIR)zhemv_U$(TSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(ZHEMV_U_KERNEL) $(ZHEMV_U_PARAM) $(CC) -c $(CFLAGS) -DCOMPLEX -DDOUBLE -ULOWER -DHEMV $< -o $@ @@ -413,7 +486,7 @@ $(KDIR)zhemv_V$(TSUFFIX).$(SUFFIX) $(KDIR)zhemv_V$(TSUFFIX).$(PSUFFIX) : $(KER $(KDIR)zhemv_M$(TSUFFIX).$(SUFFIX) $(KDIR)zhemv_M$(TSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(ZHEMV_M_KERNEL) $(ZHEMV_L_PARAM) ../symcopy.h $(CC) -c $(CFLAGS) -DCOMPLEX -DDOUBLE -DLOWER -DHEMV -DHEMVREV $< -o $@ - +endif $(KDIR)xhemv_U$(TSUFFIX).$(SUFFIX) $(KDIR)xhemv_U$(TSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(XHEMV_U_KERNEL) $(CC) -c $(CFLAGS) -DCOMPLEX -DXDOUBLE -ULOWER -DHEMV $< -o $@ @@ -426,3 +499,10 @@ $(KDIR)xhemv_V$(TSUFFIX).$(SUFFIX) $(KDIR)xhemv_V$(TSUFFIX).$(PSUFFIX) : $(KER $(KDIR)xhemv_M$(TSUFFIX).$(SUFFIX) $(KDIR)xhemv_M$(TSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(XHEMV_M_KERNEL) ../symcopy.h $(CC) -c $(CFLAGS) -DCOMPLEX -DXDOUBLE -DLOWER -DHEMV -DHEMVREV $< -o $@ +ifeq ($(BUILD_BFLOAT16),1) +$(KDIR)sbgemv_n$(TSUFFIX).$(SUFFIX) $(KDIR)sbgemv_n$(TPSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(SBGEMVNKERNEL) + $(CC) -c $(CFLAGS) -UCOMPLEX $< -o $@ +$(KDIR)sbgemv_t$(TSUFFIX).$(SUFFIX) $(KDIR)sbgemv_t$(TPSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(SBGEMVTKERNEL) + $(CC) -c $(CFLAGS) -UCOMPLEX $< -o $@ +endif + diff --git a/kernel/Makefile.L3 b/kernel/Makefile.L3 index f83def47b..d8d739965 100644 --- a/kernel/Makefile.L3 +++ b/kernel/Makefile.L3 @@ -1,4 +1,5 @@ USE_GEMM3M = 0 +OS := $(shell uname) ifeq ($(ARCH), x86) USE_GEMM3M = 1 @@ -8,6 +9,10 @@ ifeq ($(ARCH), x86_64) USE_GEMM3M = 1 endif +ifeq ($(ARCH), x86_64) +USE_DIRECT_SGEMM = 1 +endif + ifeq ($(ARCH), ia64) USE_GEMM3M = 1 endif @@ -20,13 +25,15 @@ ifeq ($(ARCH), arm64) USE_TRMM = 1 endif -ifeq ($(TARGET), LOONGSON3B) +ifeq ($(ARCH), riscv64) USE_TRMM = 1 endif -ifeq ($(CORE), GENERIC) +ifneq ($(DYNAMIC_ARCH), 1) +ifeq ($(TARGET), GENERIC) USE_TRMM = 1 endif +endif ifeq ($(CORE), HASWELL) USE_TRMM = 1 @@ -36,18 +43,28 @@ ifeq ($(CORE), SKYLAKEX) USE_TRMM = 1 endif +ifeq ($(CORE), COOPERLAKE) +USE_TRMM = 1 +endif + ifeq ($(CORE), ZEN) USE_TRMM = 1 endif ifeq ($(CORE), POWER8) +ifeq ($(BINARY64),1) USE_TRMM = 1 endif +endif ifeq ($(CORE), POWER9) USE_TRMM = 1 endif +ifeq ($(CORE), POWER10) +USE_TRMM = 1 +endif + ifeq ($(ARCH), zarch) USE_TRMM = 1 endif @@ -56,35 +73,75 @@ ifeq ($(CORE), Z14) USE_TRMM = 1 endif +ifdef USE_DIRECT_SGEMM +ifndef SGEMMDIRECTKERNEL +SGEMMDIRECTKERNEL = sgemm_direct_skylakex.c +SGEMMDIRECTPERFORMANT = sgemm_direct_performant.c +endif +endif +ifeq ($(BUILD_BFLOAT16), 1) +ifndef SBGEMMKERNEL +SBGEMM_BETA = ../generic/gemm_beta.c +SBGEMMKERNEL = ../generic/gemmkernel_2x2.c +SBGEMMINCOPY = ../generic/gemm_ncopy_2.c +SBGEMMITCOPY = ../generic/gemm_tcopy_2.c +SBGEMMONCOPY = ../generic/gemm_ncopy_2.c +SBGEMMOTCOPY = ../generic/gemm_tcopy_2.c +SBGEMMINCOPYOBJ = sbgemm_incopy$(TSUFFIX).$(SUFFIX) +SBGEMMITCOPYOBJ = sbgemm_itcopy$(TSUFFIX).$(SUFFIX) +SBGEMMONCOPYOBJ = sbgemm_oncopy$(TSUFFIX).$(SUFFIX) +SBGEMMOTCOPYOBJ = sbgemm_otcopy$(TSUFFIX).$(SUFFIX) +endif +SBKERNELOBJS += \ + sbgemm_kernel$(TSUFFIX).$(SUFFIX) \ + $(SBGEMMINCOPYOBJ) $(SBGEMMITCOPYOBJ) \ + $(SBGEMMONCOPYOBJ) $(SBGEMMOTCOPYOBJ) +endif +ifneq "$(or $(BUILD_SINGLE),$(BUILD_DOUBLE),$(BUILD_COMPLEX))" "" SKERNELOBJS += \ sgemm_kernel$(TSUFFIX).$(SUFFIX) \ + sgemm_beta$(TSUFFIX).$(SUFFIX) \ $(SGEMMINCOPYOBJ) $(SGEMMITCOPYOBJ) \ $(SGEMMONCOPYOBJ) $(SGEMMOTCOPYOBJ) +ifdef USE_DIRECT_SGEMM +SKERNELOBJS += \ + sgemm_direct$(TSUFFIX).$(SUFFIX) \ + sgemm_direct_performant$(TSUFFIX).$(SUFFIX) +endif +endif + +ifneq "$(or $(BUILD_DOUBLE),$(BUILD_COMPLEX16))" "" DKERNELOBJS += \ + dgemm_beta$(TSUFFIX).$(SUFFIX) \ dgemm_kernel$(TSUFFIX).$(SUFFIX) \ $(DGEMMINCOPYOBJ) $(DGEMMITCOPYOBJ) \ $(DGEMMONCOPYOBJ) $(DGEMMOTCOPYOBJ) +endif QKERNELOBJS += \ qgemm_kernel$(TSUFFIX).$(SUFFIX) \ $(QGEMMINCOPYOBJ) $(QGEMMITCOPYOBJ) \ $(QGEMMONCOPYOBJ) $(QGEMMOTCOPYOBJ) +ifneq "$(or $(BUILD_COMPLEX),$(BUILD_COMPLEX16))" "" CKERNELOBJS += \ cgemm_kernel_n$(TSUFFIX).$(SUFFIX) cgemm_kernel_r$(TSUFFIX).$(SUFFIX) \ cgemm_kernel_l$(TSUFFIX).$(SUFFIX) cgemm_kernel_b$(TSUFFIX).$(SUFFIX) \ $(CGEMMINCOPYOBJ) $(CGEMMITCOPYOBJ) \ $(CGEMMONCOPYOBJ) $(CGEMMOTCOPYOBJ) +endif +ifeq ($(BUILD_COMPLEX16),1) ZKERNELOBJS += \ zgemm_kernel_n$(TSUFFIX).$(SUFFIX) zgemm_kernel_r$(TSUFFIX).$(SUFFIX) \ zgemm_kernel_l$(TSUFFIX).$(SUFFIX) zgemm_kernel_b$(TSUFFIX).$(SUFFIX) \ $(ZGEMMINCOPYOBJ) $(ZGEMMITCOPYOBJ) \ $(ZGEMMONCOPYOBJ) $(ZGEMMOTCOPYOBJ) +endif XKERNELOBJS += \ xgemm_kernel_n$(TSUFFIX).$(SUFFIX) xgemm_kernel_r$(TSUFFIX).$(SUFFIX) \ @@ -92,6 +149,9 @@ XKERNELOBJS += \ $(XGEMMINCOPYOBJ) $(XGEMMITCOPYOBJ) \ $(XGEMMONCOPYOBJ) $(XGEMMOTCOPYOBJ) +ifeq ($(BUILD_BFLOAT16),1) +SBBLASOBJS += $(SBKERNELOBJS) +endif SBLASOBJS += $(SKERNELOBJS) DBLASOBJS += $(DKERNELOBJS) QBLASOBJS += $(QKERNELOBJS) @@ -99,38 +159,52 @@ CBLASOBJS += $(CKERNELOBJS) ZBLASOBJS += $(ZKERNELOBJS) XBLASOBJS += $(XKERNELOBJS) +ifeq ($(BUILD_BFLOAT16),1) +SBBLASOBJS += sbgemm_beta$(TSUFFIX).$(SUFFIX) +endif + +ifneq "$(or $(BUILD_SINGLE),$(BUILD_DOUBLE))" "" SBLASOBJS += \ sgemm_beta$(TSUFFIX).$(SUFFIX) \ strmm_kernel_LN$(TSUFFIX).$(SUFFIX) strmm_kernel_LT$(TSUFFIX).$(SUFFIX) \ strmm_kernel_RN$(TSUFFIX).$(SUFFIX) strmm_kernel_RT$(TSUFFIX).$(SUFFIX) \ strsm_kernel_LN$(TSUFFIX).$(SUFFIX) strsm_kernel_LT$(TSUFFIX).$(SUFFIX) \ - strsm_kernel_RN$(TSUFFIX).$(SUFFIX) strsm_kernel_RT$(TSUFFIX).$(SUFFIX) \ + strsm_kernel_RN$(TSUFFIX).$(SUFFIX) strsm_kernel_RT$(TSUFFIX).$(SUFFIX) +endif +ifeq ($(BUILD_DOUBLE),1) DBLASOBJS += \ dgemm_beta$(TSUFFIX).$(SUFFIX) \ dtrmm_kernel_LN$(TSUFFIX).$(SUFFIX) dtrmm_kernel_LT$(TSUFFIX).$(SUFFIX) \ dtrmm_kernel_RN$(TSUFFIX).$(SUFFIX) dtrmm_kernel_RT$(TSUFFIX).$(SUFFIX) \ dtrsm_kernel_LN$(TSUFFIX).$(SUFFIX) dtrsm_kernel_LT$(TSUFFIX).$(SUFFIX) \ - dtrsm_kernel_RN$(TSUFFIX).$(SUFFIX) dtrsm_kernel_RT$(TSUFFIX).$(SUFFIX) \ + dtrsm_kernel_RN$(TSUFFIX).$(SUFFIX) dtrsm_kernel_RT$(TSUFFIX).$(SUFFIX) +endif QBLASOBJS += \ qgemm_beta$(TSUFFIX).$(SUFFIX) \ qtrmm_kernel_LN$(TSUFFIX).$(SUFFIX) qtrmm_kernel_LT$(TSUFFIX).$(SUFFIX) \ qtrmm_kernel_RN$(TSUFFIX).$(SUFFIX) qtrmm_kernel_RT$(TSUFFIX).$(SUFFIX) \ qtrsm_kernel_LN$(TSUFFIX).$(SUFFIX) qtrsm_kernel_LT$(TSUFFIX).$(SUFFIX) \ - qtrsm_kernel_RN$(TSUFFIX).$(SUFFIX) qtrsm_kernel_RT$(TSUFFIX).$(SUFFIX) \ + qtrsm_kernel_RN$(TSUFFIX).$(SUFFIX) qtrsm_kernel_RT$(TSUFFIX).$(SUFFIX) +ifeq ($(BUILD_COMPLEX),1) CBLASOBJS += \ - cgemm_beta$(TSUFFIX).$(SUFFIX) \ ctrmm_kernel_LN$(TSUFFIX).$(SUFFIX) ctrmm_kernel_LT$(TSUFFIX).$(SUFFIX) \ ctrmm_kernel_LR$(TSUFFIX).$(SUFFIX) ctrmm_kernel_LC$(TSUFFIX).$(SUFFIX) \ ctrmm_kernel_RN$(TSUFFIX).$(SUFFIX) ctrmm_kernel_RT$(TSUFFIX).$(SUFFIX) \ - ctrmm_kernel_RR$(TSUFFIX).$(SUFFIX) ctrmm_kernel_RC$(TSUFFIX).$(SUFFIX) \ + ctrmm_kernel_RR$(TSUFFIX).$(SUFFIX) ctrmm_kernel_RC$(TSUFFIX).$(SUFFIX) +endif +ifneq "$(or $(BUILD_COMPLEX),$(BUILD_COMPLEX16))" "" +CBLASOBJS += \ + cgemm_beta$(TSUFFIX).$(SUFFIX) \ ctrsm_kernel_LN$(TSUFFIX).$(SUFFIX) ctrsm_kernel_LT$(TSUFFIX).$(SUFFIX) \ ctrsm_kernel_LR$(TSUFFIX).$(SUFFIX) ctrsm_kernel_LC$(TSUFFIX).$(SUFFIX) \ ctrsm_kernel_RN$(TSUFFIX).$(SUFFIX) ctrsm_kernel_RT$(TSUFFIX).$(SUFFIX) \ - ctrsm_kernel_RR$(TSUFFIX).$(SUFFIX) ctrsm_kernel_RC$(TSUFFIX).$(SUFFIX) \ + ctrsm_kernel_RR$(TSUFFIX).$(SUFFIX) ctrsm_kernel_RC$(TSUFFIX).$(SUFFIX) +endif +ifeq ($(BUILD_COMPLEX16),1) ZBLASOBJS += \ zgemm_beta$(TSUFFIX).$(SUFFIX) \ ztrmm_kernel_LN$(TSUFFIX).$(SUFFIX) ztrmm_kernel_LT$(TSUFFIX).$(SUFFIX) \ @@ -140,7 +214,8 @@ ZBLASOBJS += \ ztrsm_kernel_LN$(TSUFFIX).$(SUFFIX) ztrsm_kernel_LT$(TSUFFIX).$(SUFFIX) \ ztrsm_kernel_LR$(TSUFFIX).$(SUFFIX) ztrsm_kernel_LC$(TSUFFIX).$(SUFFIX) \ ztrsm_kernel_RN$(TSUFFIX).$(SUFFIX) ztrsm_kernel_RT$(TSUFFIX).$(SUFFIX) \ - ztrsm_kernel_RR$(TSUFFIX).$(SUFFIX) ztrsm_kernel_RC$(TSUFFIX).$(SUFFIX) \ + ztrsm_kernel_RR$(TSUFFIX).$(SUFFIX) ztrsm_kernel_RC$(TSUFFIX).$(SUFFIX) +endif XBLASOBJS += \ xgemm_beta$(TSUFFIX).$(SUFFIX) \ @@ -151,7 +226,7 @@ XBLASOBJS += \ xtrsm_kernel_LN$(TSUFFIX).$(SUFFIX) xtrsm_kernel_LT$(TSUFFIX).$(SUFFIX) \ xtrsm_kernel_LR$(TSUFFIX).$(SUFFIX) xtrsm_kernel_LC$(TSUFFIX).$(SUFFIX) \ xtrsm_kernel_RN$(TSUFFIX).$(SUFFIX) xtrsm_kernel_RT$(TSUFFIX).$(SUFFIX) \ - xtrsm_kernel_RR$(TSUFFIX).$(SUFFIX) xtrsm_kernel_RC$(TSUFFIX).$(SUFFIX) \ + xtrsm_kernel_RR$(TSUFFIX).$(SUFFIX) xtrsm_kernel_RC$(TSUFFIX).$(SUFFIX) ifeq ($(USE_GEMM3M), 1) @@ -161,6 +236,7 @@ XBLASOBJS += xgemm3m_kernel$(TSUFFIX).$(SUFFIX) endif +ifeq ($(BUILD_SINGLE),1) SBLASOBJS += \ strmm_iunucopy$(TSUFFIX).$(SUFFIX) strmm_iunncopy$(TSUFFIX).$(SUFFIX) \ strmm_ilnucopy$(TSUFFIX).$(SUFFIX) strmm_ilnncopy$(TSUFFIX).$(SUFFIX) \ @@ -169,7 +245,10 @@ SBLASOBJS += \ strmm_ounucopy$(TSUFFIX).$(SUFFIX) strmm_ounncopy$(TSUFFIX).$(SUFFIX) \ strmm_olnucopy$(TSUFFIX).$(SUFFIX) strmm_olnncopy$(TSUFFIX).$(SUFFIX) \ strmm_outucopy$(TSUFFIX).$(SUFFIX) strmm_outncopy$(TSUFFIX).$(SUFFIX) \ - strmm_oltucopy$(TSUFFIX).$(SUFFIX) strmm_oltncopy$(TSUFFIX).$(SUFFIX) \ + strmm_oltucopy$(TSUFFIX).$(SUFFIX) strmm_oltncopy$(TSUFFIX).$(SUFFIX) +endif +ifneq "$(or $(BUILD_SINGLE),$(BUILD_DOUBLE))" "" +SBLASOBJS += \ strsm_iunucopy$(TSUFFIX).$(SUFFIX) strsm_iunncopy$(TSUFFIX).$(SUFFIX) \ strsm_ilnucopy$(TSUFFIX).$(SUFFIX) strsm_ilnncopy$(TSUFFIX).$(SUFFIX) \ strsm_iutucopy$(TSUFFIX).$(SUFFIX) strsm_iutncopy$(TSUFFIX).$(SUFFIX) \ @@ -177,10 +256,15 @@ SBLASOBJS += \ strsm_ounucopy$(TSUFFIX).$(SUFFIX) strsm_ounncopy$(TSUFFIX).$(SUFFIX) \ strsm_olnucopy$(TSUFFIX).$(SUFFIX) strsm_olnncopy$(TSUFFIX).$(SUFFIX) \ strsm_outucopy$(TSUFFIX).$(SUFFIX) strsm_outncopy$(TSUFFIX).$(SUFFIX) \ - strsm_oltucopy$(TSUFFIX).$(SUFFIX) strsm_oltncopy$(TSUFFIX).$(SUFFIX) \ + strsm_oltucopy$(TSUFFIX).$(SUFFIX) strsm_oltncopy$(TSUFFIX).$(SUFFIX) +endif +ifeq ($(BUILD_SINGLE),1) +SBLASOBJS += \ ssymm_iutcopy$(TSUFFIX).$(SUFFIX) ssymm_iltcopy$(TSUFFIX).$(SUFFIX) \ ssymm_outcopy$(TSUFFIX).$(SUFFIX) ssymm_oltcopy$(TSUFFIX).$(SUFFIX) +endif +ifeq ($(BUILD_DOUBLE),1) DBLASOBJS += \ dtrmm_iunucopy$(TSUFFIX).$(SUFFIX) dtrmm_iunncopy$(TSUFFIX).$(SUFFIX) \ dtrmm_ilnucopy$(TSUFFIX).$(SUFFIX) dtrmm_ilnncopy$(TSUFFIX).$(SUFFIX) \ @@ -200,6 +284,7 @@ DBLASOBJS += \ dtrsm_oltucopy$(TSUFFIX).$(SUFFIX) dtrsm_oltncopy$(TSUFFIX).$(SUFFIX) \ dsymm_iutcopy$(TSUFFIX).$(SUFFIX) dsymm_iltcopy$(TSUFFIX).$(SUFFIX) \ dsymm_outcopy$(TSUFFIX).$(SUFFIX) dsymm_oltcopy$(TSUFFIX).$(SUFFIX) +endif QBLASOBJS += \ qtrmm_iunucopy$(TSUFFIX).$(SUFFIX) qtrmm_iunncopy$(TSUFFIX).$(SUFFIX) \ @@ -219,8 +304,9 @@ QBLASOBJS += \ qtrsm_outucopy$(TSUFFIX).$(SUFFIX) qtrsm_outncopy$(TSUFFIX).$(SUFFIX) \ qtrsm_oltucopy$(TSUFFIX).$(SUFFIX) qtrsm_oltncopy$(TSUFFIX).$(SUFFIX) \ qsymm_iutcopy$(TSUFFIX).$(SUFFIX) qsymm_iltcopy$(TSUFFIX).$(SUFFIX) \ - qsymm_outcopy$(TSUFFIX).$(SUFFIX) qsymm_oltcopy$(TSUFFIX).$(SUFFIX) \ + qsymm_outcopy$(TSUFFIX).$(SUFFIX) qsymm_oltcopy$(TSUFFIX).$(SUFFIX) +ifeq ($(BUILD_COMPLEX),1) CBLASOBJS += \ ctrmm_iunucopy$(TSUFFIX).$(SUFFIX) ctrmm_iunncopy$(TSUFFIX).$(SUFFIX) \ ctrmm_ilnucopy$(TSUFFIX).$(SUFFIX) ctrmm_ilnncopy$(TSUFFIX).$(SUFFIX) \ @@ -230,6 +316,13 @@ CBLASOBJS += \ ctrmm_olnucopy$(TSUFFIX).$(SUFFIX) ctrmm_olnncopy$(TSUFFIX).$(SUFFIX) \ ctrmm_outucopy$(TSUFFIX).$(SUFFIX) ctrmm_outncopy$(TSUFFIX).$(SUFFIX) \ ctrmm_oltucopy$(TSUFFIX).$(SUFFIX) ctrmm_oltncopy$(TSUFFIX).$(SUFFIX) \ + csymm_iutcopy$(TSUFFIX).$(SUFFIX) csymm_iltcopy$(TSUFFIX).$(SUFFIX) \ + csymm_outcopy$(TSUFFIX).$(SUFFIX) csymm_oltcopy$(TSUFFIX).$(SUFFIX) \ + chemm_iutcopy$(TSUFFIX).$(SUFFIX) chemm_iltcopy$(TSUFFIX).$(SUFFIX) \ + chemm_outcopy$(TSUFFIX).$(SUFFIX) chemm_oltcopy$(TSUFFIX).$(SUFFIX) +endif +ifneq "$(or $(BUILD_COMPLEX),$(BUILD_COMPLEX16))" "" +CBLASOBJS += \ ctrsm_iunucopy$(TSUFFIX).$(SUFFIX) ctrsm_iunncopy$(TSUFFIX).$(SUFFIX) \ ctrsm_ilnucopy$(TSUFFIX).$(SUFFIX) ctrsm_ilnncopy$(TSUFFIX).$(SUFFIX) \ ctrsm_iutucopy$(TSUFFIX).$(SUFFIX) ctrsm_iutncopy$(TSUFFIX).$(SUFFIX) \ @@ -237,12 +330,10 @@ CBLASOBJS += \ ctrsm_ounucopy$(TSUFFIX).$(SUFFIX) ctrsm_ounncopy$(TSUFFIX).$(SUFFIX) \ ctrsm_olnucopy$(TSUFFIX).$(SUFFIX) ctrsm_olnncopy$(TSUFFIX).$(SUFFIX) \ ctrsm_outucopy$(TSUFFIX).$(SUFFIX) ctrsm_outncopy$(TSUFFIX).$(SUFFIX) \ - ctrsm_oltucopy$(TSUFFIX).$(SUFFIX) ctrsm_oltncopy$(TSUFFIX).$(SUFFIX) \ - csymm_iutcopy$(TSUFFIX).$(SUFFIX) csymm_iltcopy$(TSUFFIX).$(SUFFIX) \ - csymm_outcopy$(TSUFFIX).$(SUFFIX) csymm_oltcopy$(TSUFFIX).$(SUFFIX) \ - chemm_iutcopy$(TSUFFIX).$(SUFFIX) chemm_iltcopy$(TSUFFIX).$(SUFFIX) \ - chemm_outcopy$(TSUFFIX).$(SUFFIX) chemm_oltcopy$(TSUFFIX).$(SUFFIX) + ctrsm_oltucopy$(TSUFFIX).$(SUFFIX) ctrsm_oltncopy$(TSUFFIX).$(SUFFIX) +endif +ifeq ($(BUILD_COMPLEX16),1) ZBLASOBJS += \ ztrmm_iunucopy$(TSUFFIX).$(SUFFIX) ztrmm_iunncopy$(TSUFFIX).$(SUFFIX) \ ztrmm_ilnucopy$(TSUFFIX).$(SUFFIX) ztrmm_ilnncopy$(TSUFFIX).$(SUFFIX) \ @@ -264,6 +355,7 @@ ZBLASOBJS += \ zsymm_outcopy$(TSUFFIX).$(SUFFIX) zsymm_oltcopy$(TSUFFIX).$(SUFFIX) \ zhemm_iutcopy$(TSUFFIX).$(SUFFIX) zhemm_iltcopy$(TSUFFIX).$(SUFFIX) \ zhemm_outcopy$(TSUFFIX).$(SUFFIX) zhemm_oltcopy$(TSUFFIX).$(SUFFIX) +endif XBLASOBJS += \ xtrmm_iunucopy$(TSUFFIX).$(SUFFIX) xtrmm_iunncopy$(TSUFFIX).$(SUFFIX) \ @@ -289,6 +381,7 @@ XBLASOBJS += \ ifeq ($(USE_GEMM3M), 1) +ifeq ($(BUILD_COMPLEX),1) CBLASOBJS += \ cgemm3m_incopyb$(TSUFFIX).$(SUFFIX) cgemm3m_itcopyb$(TSUFFIX).$(SUFFIX) \ cgemm3m_incopyr$(TSUFFIX).$(SUFFIX) cgemm3m_itcopyr$(TSUFFIX).$(SUFFIX) \ @@ -308,7 +401,9 @@ CBLASOBJS += \ chemm3m_ilcopyb$(TSUFFIX).$(SUFFIX) chemm3m_olcopyb$(TSUFFIX).$(SUFFIX) \ chemm3m_ilcopyr$(TSUFFIX).$(SUFFIX) chemm3m_olcopyr$(TSUFFIX).$(SUFFIX) \ chemm3m_ilcopyi$(TSUFFIX).$(SUFFIX) chemm3m_olcopyi$(TSUFFIX).$(SUFFIX) +endif +ifeq ($(BUILD_COMPLEX16),1) ZBLASOBJS += \ zgemm3m_incopyb$(TSUFFIX).$(SUFFIX) zgemm3m_itcopyb$(TSUFFIX).$(SUFFIX) \ zgemm3m_incopyr$(TSUFFIX).$(SUFFIX) zgemm3m_itcopyr$(TSUFFIX).$(SUFFIX) \ @@ -328,6 +423,7 @@ ZBLASOBJS += \ zhemm3m_ilcopyb$(TSUFFIX).$(SUFFIX) zhemm3m_olcopyb$(TSUFFIX).$(SUFFIX) \ zhemm3m_ilcopyr$(TSUFFIX).$(SUFFIX) zhemm3m_olcopyr$(TSUFFIX).$(SUFFIX) \ zhemm3m_ilcopyi$(TSUFFIX).$(SUFFIX) zhemm3m_olcopyi$(TSUFFIX).$(SUFFIX) +endif XBLASOBJS += \ xgemm3m_incopyb$(TSUFFIX).$(SUFFIX) xgemm3m_itcopyb$(TSUFFIX).$(SUFFIX) \ @@ -352,20 +448,25 @@ XBLASOBJS += \ endif ###### BLAS extensions ##### + +ifeq ($(BUILD_SINGLE),1) SBLASOBJS += \ somatcopy_k_cn$(TSUFFIX).$(SUFFIX) somatcopy_k_rn$(TSUFFIX).$(SUFFIX) \ somatcopy_k_ct$(TSUFFIX).$(SUFFIX) somatcopy_k_rt$(TSUFFIX).$(SUFFIX) \ simatcopy_k_cn$(TSUFFIX).$(SUFFIX) simatcopy_k_rn$(TSUFFIX).$(SUFFIX) \ simatcopy_k_ct$(TSUFFIX).$(SUFFIX) simatcopy_k_rt$(TSUFFIX).$(SUFFIX) \ sgeadd_k$(TSUFFIX).$(SUFFIX) - +endif +ifeq ($(BUILD_DOUBLE),1) DBLASOBJS += \ domatcopy_k_cn$(TSUFFIX).$(SUFFIX) domatcopy_k_rn$(TSUFFIX).$(SUFFIX) \ domatcopy_k_ct$(TSUFFIX).$(SUFFIX) domatcopy_k_rt$(TSUFFIX).$(SUFFIX) \ dimatcopy_k_cn$(TSUFFIX).$(SUFFIX) dimatcopy_k_rn$(TSUFFIX).$(SUFFIX) \ dimatcopy_k_ct$(TSUFFIX).$(SUFFIX) dimatcopy_k_rt$(TSUFFIX).$(SUFFIX) \ dgeadd_k$(TSUFFIX).$(SUFFIX) +endif +ifeq ($(BUILD_COMPLEX),1) CBLASOBJS += \ comatcopy_k_cn$(TSUFFIX).$(SUFFIX) comatcopy_k_rn$(TSUFFIX).$(SUFFIX) \ comatcopy_k_ct$(TSUFFIX).$(SUFFIX) comatcopy_k_rt$(TSUFFIX).$(SUFFIX) \ @@ -376,7 +477,9 @@ CBLASOBJS += \ cimatcopy_k_cnc$(TSUFFIX).$(SUFFIX) cimatcopy_k_rnc$(TSUFFIX).$(SUFFIX) \ cimatcopy_k_ctc$(TSUFFIX).$(SUFFIX) cimatcopy_k_rtc$(TSUFFIX).$(SUFFIX) \ cgeadd_k$(TSUFFIX).$(SUFFIX) +endif +ifeq ($(BUILD_COMPLEX16),1) ZBLASOBJS += \ zomatcopy_k_cn$(TSUFFIX).$(SUFFIX) zomatcopy_k_rn$(TSUFFIX).$(SUFFIX) \ zomatcopy_k_ct$(TSUFFIX).$(SUFFIX) zomatcopy_k_rt$(TSUFFIX).$(SUFFIX) \ @@ -387,7 +490,14 @@ ZBLASOBJS += \ zimatcopy_k_cnc$(TSUFFIX).$(SUFFIX) zimatcopy_k_rnc$(TSUFFIX).$(SUFFIX) \ zimatcopy_k_ctc$(TSUFFIX).$(SUFFIX) zimatcopy_k_rtc$(TSUFFIX).$(SUFFIX) \ zgeadd_k$(TSUFFIX).$(SUFFIX) +endif +ifeq ($(BUILD_BFLOAT16), 1) +SBGEMMINCOPYOBJ_P = $(SBGEMMINCOPYOBJ:.$(SUFFIX)=.$(PSUFFIX)) +SBGEMMITCOPYOBJ_P = $(SBGEMMITCOPYOBJ:.$(SUFFIX)=.$(PSUFFIX)) +SBGEMMONCOPYOBJ_P = $(SBGEMMONCOPYOBJ:.$(SUFFIX)=.$(PSUFFIX)) +SBGEMMOTCOPYOBJ_P = $(SBGEMMOTCOPYOBJ:.$(SUFFIX)=.$(PSUFFIX)) +endif SGEMMINCOPYOBJ_P = $(SGEMMINCOPYOBJ:.$(SUFFIX)=.$(PSUFFIX)) SGEMMITCOPYOBJ_P = $(SGEMMITCOPYOBJ:.$(SUFFIX)=.$(PSUFFIX)) @@ -414,6 +524,11 @@ XGEMMITCOPYOBJ_P = $(XGEMMITCOPYOBJ:.$(SUFFIX)=.$(PSUFFIX)) XGEMMONCOPYOBJ_P = $(XGEMMONCOPYOBJ:.$(SUFFIX)=.$(PSUFFIX)) XGEMMOTCOPYOBJ_P = $(XGEMMOTCOPYOBJ:.$(SUFFIX)=.$(PSUFFIX)) +ifeq ($(BUILD_BFLOAT16),1) +$(KDIR)sbgemm_beta$(TSUFFIX).$(SUFFIX) : $(KERNELDIR)/$(SBGEMM_BETA) + $(CC) $(CFLAGS) -c -DBFLOAT16 -UDOUBLE -UCOMPLEX $< -o $@ +endif + $(KDIR)sgemm_beta$(TSUFFIX).$(SUFFIX) : $(KERNELDIR)/$(SGEMM_BETA) $(CC) $(CFLAGS) -c -UDOUBLE -UCOMPLEX $< -o $@ @@ -432,11 +547,54 @@ $(KDIR)zgemm_beta$(TSUFFIX).$(SUFFIX) : $(KERNELDIR)/$(ZGEMM_BETA) $(KDIR)xgemm_beta$(TSUFFIX).$(SUFFIX) : $(KERNELDIR)/$(XGEMM_BETA) $(CC) $(CFLAGS) -c -DXDOUBLE -DCOMPLEX $< -o $@ + +ifeq ($(BUILD_BFLOAT16), 1) + +$(KDIR)$(SBGEMMONCOPYOBJ) : $(KERNELDIR)/$(SBGEMMONCOPY) + $(CC) $(CFLAGS) -c -DBFLOAT16 -UDOUBLE -UCOMPLEX $< -o $@ + +$(KDIR)$(SBGEMMOTCOPYOBJ) : $(KERNELDIR)/$(SBGEMMOTCOPY) + +ifeq ($(OS), AIX) + $(CC) $(CFLAGS) -S -DBFLOAT16 -UDOUBLE -UCOMPLEX $< -o - > sbgemmotcopy.s + m4 sbgemmotcopy.s > sbgemmotcopy_nomacros.s + $(CC) $(CFLAGS) -c -DBFLOAT16 -UDOUBLE -UCOMPLEX sbgemmotcopy_nomacros.s -o $@ + rm sbgemmotcopy.s sbgemmotcopy_nomacros.s +else + $(CC) $(CFLAGS) -c -DBFLOAT16 -UDOUBLE -UCOMPLEX $< -o $@ +endif + +ifneq ($(SBGEMM_UNROLL_M), $(SBGEMM_UNROLL_N)) + +$(KDIR)$(SBGEMMINCOPYOBJ) : $(KERNELDIR)/$(SBGEMMINCOPY) + $(CC) $(CFLAGS) -c -DBFLOAT16 -UDOUBLE -UCOMPLEX $< -o $@ + +$(KDIR)$(SBGEMMITCOPYOBJ) : $(KERNELDIR)/$(SBGEMMITCOPY) +ifeq ($(OS), AIX) + $(CC) $(CFLAGS) -S -DBFLOAT16 -UDOUBLE -UCOMPLEX $< -o - > sbgemmitcopy.s + m4 sbgemmitcopy.s > sbgemmitcopy_nomacros.s + $(CC) $(CFLAGS) -c -DBFLOAT16 -UDOUBLE -UCOMPLEX sbgemmitcopy_nomacros.s -o $@ + rm sbgemmitcopy.s sbgemmitcopy_nomacros.s +else + $(CC) $(CFLAGS) -c -DBFLOAT16 -UDOUBLE -UCOMPLEX $< -o $@ +endif + +endif +endif + $(KDIR)$(SGEMMONCOPYOBJ) : $(KERNELDIR)/$(SGEMMONCOPY) $(CC) $(CFLAGS) -c -UDOUBLE -UCOMPLEX $< -o $@ $(KDIR)$(SGEMMOTCOPYOBJ) : $(KERNELDIR)/$(SGEMMOTCOPY) +ifeq ($(OS), AIX) + $(CC) $(CFLAGS) -S -UDOUBLE -UCOMPLEX $< -o - > sgemmotcopy.s + m4 sgemmotcopy.s > sgemmotcopy_nomacros.s + $(CC) $(CFLAGS) -c -UDOUBLE -UCOMPLEX sgemmotcopy_nomacros.s -o $@ + rm sgemmotcopy.s sgemmotcopy_nomacros.s +else $(CC) $(CFLAGS) -c -UDOUBLE -UCOMPLEX $< -o $@ +endif + ifneq ($(SGEMM_UNROLL_M), $(SGEMM_UNROLL_N)) @@ -444,12 +602,26 @@ $(KDIR)$(SGEMMINCOPYOBJ) : $(KERNELDIR)/$(SGEMMINCOPY) $(CC) $(CFLAGS) -c -UDOUBLE -UCOMPLEX $< -o $@ $(KDIR)$(SGEMMITCOPYOBJ) : $(KERNELDIR)/$(SGEMMITCOPY) - $(CC) $(CFLAGS) -c -UDOUBLE -UCOMPLEX $< -o $@ - +ifeq ($(OS), AIX) + $(CC) $(CFLAGS) -S -UDOUBLE -UCOMPLEX $< -o - > sgemmitcopy.s + m4 sgemmitcopy.s > sgemmitcopy_nomacros.s + $(CC) $(CFLAGS) -c -UDOUBLE -UCOMPLEX sgemmitcopy_nomacros.s -o $@ + rm sgemmitcopy.s sgemmitcopy_nomacros.s +else + $(CC) $(CFLAGS) -c -UDOUBLE -UCOMPLEX $< -o $@ +endif + endif $(KDIR)$(DGEMMONCOPYOBJ) : $(KERNELDIR)/$(DGEMMONCOPY) +ifeq ($(OS), AIX) + $(CC) $(CFLAGS) -S -DDOUBLE -UCOMPLEX $< -o - > dgemm_ncopy.s + m4 dgemm_ncopy.s > dgemm_ncopy_nomacros.s + $(CC) $(CFLAGS) -c -DDOUBLE -UCOMPLEX dgemm_ncopy_nomacros.s -o $@ + rm dgemm_ncopy.s dgemm_ncopy_nomacros.s +else $(CC) $(CFLAGS) -c -DDOUBLE -UCOMPLEX $< -o $@ +endif $(KDIR)$(DGEMMOTCOPYOBJ) : $(KERNELDIR)/$(DGEMMOTCOPY) $(CC) $(CFLAGS) -c -DDOUBLE -UCOMPLEX $< -o $@ @@ -460,7 +632,14 @@ $(KDIR)$(DGEMMINCOPYOBJ) : $(KERNELDIR)/$(DGEMMINCOPY) $(CC) $(CFLAGS) -c -DDOUBLE -UCOMPLEX $< -o $@ $(KDIR)$(DGEMMITCOPYOBJ) : $(KERNELDIR)/$(DGEMMITCOPY) +ifeq ($(OS), AIX) + $(CC) $(CFLAGS) -S -DDOUBLE -UCOMPLEX $< -o - > dgemm_itcopy.s + m4 dgemm_itcopy.s > dgemm_itcopy_nomacros.s + $(CC) $(CFLAGS) -c -DDOUBLE -UCOMPLEX dgemm_itcopy_nomacros.s -o $@ + rm dgemm_itcopy.s dgemm_itcopy_nomacros.s +else $(CC) $(CFLAGS) -c -DDOUBLE -UCOMPLEX $< -o $@ +endif endif @@ -496,7 +675,14 @@ $(KDIR)$(CGEMMINCOPYOBJ) : $(KERNELDIR)/$(CGEMMINCOPY) $(CC) $(CFLAGS) -c -UDOUBLE -UCOMPLEX $< -o $@ $(KDIR)$(CGEMMITCOPYOBJ) : $(KERNELDIR)/$(CGEMMITCOPY) +ifeq ($(OS), AIX) + $(CC) $(CFLAGS) -UDOUBLE -UCOMPLEX -S $< -o - > cgemm_itcopy.s + m4 cgemm_itcopy.s > cgemm_itcopy_nomacros.s + $(CC) $(CFLAGS) -c -UDOUBLE -UCOMPLEX cgemm_itcopy_nomacros.s -o $@ + rm cgemm_itcopy.s cgemm_itcopy_nomacros.s +else $(CC) $(CFLAGS) -c -UDOUBLE -UCOMPLEX $< -o $@ +endif endif @@ -512,7 +698,14 @@ $(KDIR)$(ZGEMMINCOPYOBJ) : $(KERNELDIR)/$(ZGEMMINCOPY) $(CC) $(CFLAGS) -c -DDOUBLE -UCOMPLEX $< -o $@ $(KDIR)$(ZGEMMITCOPYOBJ) : $(KERNELDIR)/$(ZGEMMITCOPY) +ifeq ($(OS), AIX) + $(CC) $(CFLAGS) -S -DDOUBLE -UCOMPLEX $< -o - > zgemm_itcopy.s + m4 zgemm_itcopy.s > zgemm_itcopy_nomacros.s + $(CC) $(CFLAGS) -c -DDOUBLE -UCOMPLEX zgemm_itcopy_nomacros.s -o $@ + rm zgemm_itcopy.s zgemm_itcopy_nomacros.s +else $(CC) $(CFLAGS) -c -DDOUBLE -UCOMPLEX $< -o $@ +endif endif @@ -537,37 +730,127 @@ endif endif $(KDIR)sgemm_kernel$(TSUFFIX).$(SUFFIX) : $(KERNELDIR)/$(SGEMMKERNEL) $(SGEMMDEPEND) +ifeq ($(OS), AIX) + $(CC) $(CFLAGS) -S -UDOUBLE -UCOMPLEX $< -o - > sgemm_kernel$(TSUFFIX).s + m4 sgemm_kernel$(TSUFFIX).s > sgemm_kernel$(TSUFFIX)_nomacros.s + $(CC) $(CFLAGS) -c -UDOUBLE -UCOMPLEX sgemm_kernel$(TSUFFIX)_nomacros.s -o $@ + rm sgemm_kernel$(TSUFFIX).s sgemm_kernel$(TSUFFIX)_nomacros.s +else $(CC) $(CFLAGS) -c -UDOUBLE -UCOMPLEX $< -o $@ +endif + +ifdef USE_DIRECT_SGEMM +$(KDIR)sgemm_direct_performant$(TSUFFIX).$(SUFFIX) : $(KERNELDIR)/$(SGEMMDIRECTPERFORMANT) + $(CC) $(CFLAGS) -c -UDOUBLE -UCOMPLEX $< -o $@ +$(KDIR)sgemm_direct$(TSUFFIX).$(SUFFIX) : $(KERNELDIR)/$(SGEMMDIRECTKERNEL) + $(CC) $(CFLAGS) -c -UDOUBLE -UCOMPLEX $< -o $@ +endif + +ifeq ($(BUILD_BFLOAT16), 1) + +$(KDIR)sbgemm_kernel$(TSUFFIX).$(SUFFIX) : $(KERNELDIR)/$(SBGEMMKERNEL) $(SBGEMMDEPEND) +ifeq ($(OS), AIX) + $(CC) $(CFLAGS) -S -DBFLOAT16 -UDOUBLE -UCOMPLEX $< -o - > sbgemm_kernel$(TSUFFIX).s + m4 sbgemm_kernel$(TSUFFIX).s > sbgemm_kernel$(TSUFFIX)_nomacros.s + $(CC) $(CFLAGS) -c -DBFLOAT16 -UDOUBLE -UCOMPLEX sbgemm_kernel$(TSUFFIX)_nomacros.s -o $@ + rm sbgemm_kernel$(TSUFFIX).s sbgemm_kernel$(TSUFFIX)_nomacros.s +else + $(CC) $(CFLAGS) -c -DBFLOAT16 -UDOUBLE -UCOMPLEX $< -o $@ +endif +endif $(KDIR)dgemm_kernel$(TSUFFIX).$(SUFFIX) : $(KERNELDIR)/$(DGEMMKERNEL) $(DGEMMDEPEND) +ifeq ($(OS), AIX) + $(CC) $(CFLAGS) -S -DDOUBLE -UCOMPLEX $< -o - > dgemm_kernel$(TSUFFIX).s + m4 dgemm_kernel$(TSUFFIX).s > dgemm_kernel$(TSUFFIX)_nomacros.s + $(CC) $(CFLAGS) -c -DDOUBLE -UCOMPLEX dgemm_kernel$(TSUFFIX)_nomacros.s -o $@ + rm dgemm_kernel$(TSUFFIX).s dgemm_kernel$(TSUFFIX)_nomacros.s +else $(CC) $(CFLAGS) -c -DDOUBLE -UCOMPLEX $< -o $@ +endif $(KDIR)qgemm_kernel$(TSUFFIX).$(SUFFIX) : $(KERNELDIR)/$(QGEMMKERNEL) $(QGEMMDEPEND) $(CC) $(CFLAGS) -c -DXDOUBLE -UCOMPLEX $< -o $@ $(KDIR)cgemm_kernel_n$(TSUFFIX).$(SUFFIX) : $(KERNELDIR)/$(CGEMMKERNEL) $(CGEMMDEPEND) +ifeq ($(OS), AIX) + $(CC) $(CFLAGS) -S -UDOUBLE -DCOMPLEX -DNN $< -o - > cgemm_kernel_n.s + m4 cgemm_kernel_n.s > cgemm_kernel_n_nomacros.s + $(CC) $(CFLAGS) -c -UDOUBLE -DCOMPLEX -DNN cgemm_kernel_n_nomacros.s -o $@ + rm cgemm_kernel_n.s cgemm_kernel_n_nomacros.s +else $(CC) $(CFLAGS) -c -UDOUBLE -DCOMPLEX -DNN $< -o $@ +endif $(KDIR)cgemm_kernel_l$(TSUFFIX).$(SUFFIX) : $(KERNELDIR)/$(CGEMMKERNEL) $(CGEMMDEPEND) +ifeq ($(OS), AIX) + $(CC) $(CFLAGS) -S -UDOUBLE -DCOMPLEX -DCN $< -o - > cgemm_kernel_l.s + m4 cgemm_kernel_l.s > cgemm_kernel_l_nomacros.s + $(CC) $(CFLAGS) -c -UDOUBLE -DCOMPLEX -DCN cgemm_kernel_l_nomacros.s -o $@ + rm cgemm_kernel_l.s cgemm_kernel_l_nomacros.s +else $(CC) $(CFLAGS) -c -UDOUBLE -DCOMPLEX -DCN $< -o $@ +endif $(KDIR)cgemm_kernel_r$(TSUFFIX).$(SUFFIX) : $(KERNELDIR)/$(CGEMMKERNEL) $(CGEMMDEPEND) +ifeq ($(OS), AIX) + $(CC) $(CFLAGS) -S -UDOUBLE -DCOMPLEX -DNC $< -o - > cgemm_kernel_r.s + m4 cgemm_kernel_r.s > cgemm_kernel_r_nomacros.s + $(CC) $(CFLAGS) -c -UDOUBLE -DCOMPLEX -DNC cgemm_kernel_r_nomacros.s -o $@ + rm cgemm_kernel_r.s cgemm_kernel_r_nomacros.s +else $(CC) $(CFLAGS) -c -UDOUBLE -DCOMPLEX -DNC $< -o $@ +endif $(KDIR)cgemm_kernel_b$(TSUFFIX).$(SUFFIX) : $(KERNELDIR)/$(CGEMMKERNEL) $(CGEMMDEPEND) +ifeq ($(OS), AIX) + $(CC) $(CFLAGS) -S -UDOUBLE -DCOMPLEX -DCC $< -o - > cgemm_kernel_b.s + m4 cgemm_kernel_b.s > cgemm_kernel_b_nomacros.s + $(CC) $(CFLAGS) -c -UDOUBLE -DCOMPLEX -DCC cgemm_kernel_b_nomacros.s -o $@ + rm cgemm_kernel_b.s cgemm_kernel_b_nomacros.s +else $(CC) $(CFLAGS) -c -UDOUBLE -DCOMPLEX -DCC $< -o $@ +endif $(KDIR)zgemm_kernel_n$(TSUFFIX).$(SUFFIX) : $(KERNELDIR)/$(ZGEMMKERNEL) $(ZGEMMDEPEND) +ifeq ($(OS), AIX) + $(CC) $(CFLAGS) -S -DDOUBLE -DCOMPLEX -DNN $< -o - > zgemm_kernel_n.s + m4 zgemm_kernel_n.s > zgemm_kernel_n_nomacros.s + $(CC) $(CFLAGS) -c -DDOUBLE -DCOMPLEX -DNN zgemm_kernel_n_nomacros.s -o $@ + rm zgemm_kernel_n.s zgemm_kernel_n_nomacros.s +else $(CC) $(CFLAGS) -c -DDOUBLE -DCOMPLEX -DNN $< -o $@ +endif $(KDIR)zgemm_kernel_l$(TSUFFIX).$(SUFFIX) : $(KERNELDIR)/$(ZGEMMKERNEL) $(ZGEMMDEPEND) +ifeq ($(OS), AIX) + $(CC) $(CFLAGS) -S -DDOUBLE -DCOMPLEX -DCN $< -o - > zgemm_kernel_l.s + m4 zgemm_kernel_l.s > zgemm_kernel_l_nomacros.s + $(CC) $(CFLAGS) -c -DDOUBLE -DCOMPLEX -DCN zgemm_kernel_l_nomacros.s -o $@ + rm zgemm_kernel_l.s zgemm_kernel_l_nomacros.s +else $(CC) $(CFLAGS) -c -DDOUBLE -DCOMPLEX -DCN $< -o $@ +endif $(KDIR)zgemm_kernel_r$(TSUFFIX).$(SUFFIX) : $(KERNELDIR)/$(ZGEMMKERNEL) $(ZGEMMDEPEND) +ifeq ($(OS), AIX) + $(CC) $(CFLAGS) -S -DDOUBLE -DCOMPLEX -DNC $< -o - > zgemm_kernel_r.s + m4 zgemm_kernel_r.s > zgemm_kernel_r_nomacros.s + $(CC) $(CFLAGS) -c -DDOUBLE -DCOMPLEX -DNC zgemm_kernel_r_nomacros.s -o $@ + rm zgemm_kernel_r.s zgemm_kernel_r_nomacros.s +else $(CC) $(CFLAGS) -c -DDOUBLE -DCOMPLEX -DNC $< -o $@ +endif $(KDIR)zgemm_kernel_b$(TSUFFIX).$(SUFFIX) : $(KERNELDIR)/$(ZGEMMKERNEL) $(ZGEMMDEPEND) +ifeq ($(OS), AIX) + $(CC) $(CFLAGS) -S -DDOUBLE -DCOMPLEX -DCC $< -o - > zgemm_kernel_b.s + m4 zgemm_kernel_b.s > zgemm_kernel_b_nomacros.s + $(CC) $(CFLAGS) -c -DDOUBLE -DCOMPLEX -DCC zgemm_kernel_b_nomacros.s -o $@ + rm zgemm_kernel_b.s zgemm_kernel_b_nomacros.s +else $(CC) $(CFLAGS) -c -DDOUBLE -DCOMPLEX -DCC $< -o $@ +endif $(KDIR)xgemm_kernel_n$(TSUFFIX).$(SUFFIX) : $(KERNELDIR)/$(XGEMMKERNEL) $(XGEMMDEPEND) $(CC) $(CFLAGS) -c -DXDOUBLE -DCOMPLEX -DNN $< -o $@ @@ -584,28 +867,84 @@ $(KDIR)xgemm_kernel_b$(TSUFFIX).$(SUFFIX) : $(KERNELDIR)/$(XGEMMKERNEL) $(XGEMMD ifdef USE_TRMM $(KDIR)strmm_kernel_LN$(TSUFFIX).$(SUFFIX) : $(KERNELDIR)/$(STRMMKERNEL) +ifeq ($(OS), AIX) + $(CC) $(CFLAGS) -S -DTRMMKERNEL -UDOUBLE -UCOMPLEX -DLEFT -UTRANSA $< -o - > strmmkernel_ln.s + m4 strmmkernel_ln.s > strmmkernel_ln_nomacros.s + $(CC) $(CFLAGS) -c -DTRMMKERNEL -UDOUBLE -UCOMPLEX -DLEFT -UTRANSA strmmkernel_ln_nomacros.s -o $@ + rm strmmkernel_ln.s strmmkernel_ln_nomacros.s +else $(CC) $(CFLAGS) -c -DTRMMKERNEL -UDOUBLE -UCOMPLEX -DLEFT -UTRANSA $< -o $@ +endif $(KDIR)strmm_kernel_LT$(TSUFFIX).$(SUFFIX) : $(KERNELDIR)/$(STRMMKERNEL) +ifeq ($(OS), AIX) + $(CC) $(CFLAGS) -S -DTRMMKERNEL -UDOUBLE -UCOMPLEX -DLEFT -DTRANSA $< -o - > strmmkernel_lt.s + m4 strmmkernel_lt.s > strmmkernel_lt_nomacros.s + $(CC) $(CFLAGS) -c -DTRMMKERNEL -UDOUBLE -UCOMPLEX -DLEFT -DTRANSA strmmkernel_lt_nomacros.s -o $@ + rm strmmkernel_lt.s strmmkernel_lt_nomacros.s +else $(CC) $(CFLAGS) -c -DTRMMKERNEL -UDOUBLE -UCOMPLEX -DLEFT -DTRANSA $< -o $@ +endif $(KDIR)strmm_kernel_RN$(TSUFFIX).$(SUFFIX) : $(KERNELDIR)/$(STRMMKERNEL) +ifeq ($(OS), AIX) + $(CC) $(CFLAGS) -S -DTRMMKERNEL -UDOUBLE -UCOMPLEX -ULEFT -UTRANSA $< -o - > strmmkernel_rn.s + m4 strmmkernel_rn.s > strmmkernel_rn_nomacros.s + $(CC) $(CFLAGS) -c -DTRMMKERNEL -UDOUBLE -UCOMPLEX -ULEFT -UTRANSA strmmkernel_rn_nomacros.s -o $@ + rm strmmkernel_rn.s strmmkernel_rn_nomacros.s +else $(CC) $(CFLAGS) -c -DTRMMKERNEL -UDOUBLE -UCOMPLEX -ULEFT -UTRANSA $< -o $@ +endif $(KDIR)strmm_kernel_RT$(TSUFFIX).$(SUFFIX) : $(KERNELDIR)/$(STRMMKERNEL) +ifeq ($(OS), AIX) + $(CC) $(CFLAGS) -S -DTRMMKERNEL -UDOUBLE -UCOMPLEX -ULEFT -DTRANSA $< -o - > strmm_kernel_rt.s + m4 strmm_kernel_rt.s > strmm_kernel_rt_nomacros.s + $(CC) $(CFLAGS) -c -DTRMMKERNEL -UDOUBLE -UCOMPLEX -ULEFT -DTRANSA strmm_kernel_rt_nomacros.s -o $@ + rm strmm_kernel_rt.s strmm_kernel_rt_nomacros.s +else $(CC) $(CFLAGS) -c -DTRMMKERNEL -UDOUBLE -UCOMPLEX -ULEFT -DTRANSA $< -o $@ +endif $(KDIR)dtrmm_kernel_LN$(TSUFFIX).$(SUFFIX) : $(KERNELDIR)/$(DTRMMKERNEL) +ifeq ($(OS), AIX) + $(CC) $(CFLAGS) -S -DTRMMKERNEL -DDOUBLE -UCOMPLEX -DLEFT -UTRANSA $< -o - > dtrmm_kernel_ln.s + m4 dtrmm_kernel_ln.s > dtrmm_kernel_ln_nomacros.s + $(CC) $(CFLAGS) -c -DTRMMKERNEL -DDOUBLE -UCOMPLEX -DLEFT -UTRANSA dtrmm_kernel_ln_nomacros.s -o $@ + rm dtrmm_kernel_ln.s dtrmm_kernel_ln_nomacros.s +else $(CC) $(CFLAGS) -c -DTRMMKERNEL -DDOUBLE -UCOMPLEX -DLEFT -UTRANSA $< -o $@ +endif $(KDIR)dtrmm_kernel_LT$(TSUFFIX).$(SUFFIX) : $(KERNELDIR)/$(DTRMMKERNEL) +ifeq ($(OS), AIX) + $(CC) $(CFLAGS) -S -DTRMMKERNEL -DDOUBLE -UCOMPLEX -DLEFT -DTRANSA $< -o - > dtrmm_kernel_lt.s + m4 dtrmm_kernel_lt.s > dtrmm_kernel_lt_nomacros.s + $(CC) $(CFLAGS) -c -DTRMMKERNEL -DDOUBLE -UCOMPLEX -DLEFT -DTRANSA dtrmm_kernel_lt_nomacros.s -o $@ + rm dtrmm_kernel_lt.s dtrmm_kernel_lt_nomacros.s +else $(CC) $(CFLAGS) -c -DTRMMKERNEL -DDOUBLE -UCOMPLEX -DLEFT -DTRANSA $< -o $@ +endif $(KDIR)dtrmm_kernel_RN$(TSUFFIX).$(SUFFIX) : $(KERNELDIR)/$(DTRMMKERNEL) +ifeq ($(OS), AIX) + $(CC) $(CFLAGS) -S -DTRMMKERNEL -DDOUBLE -UCOMPLEX -ULEFT -UTRANSA $< -o - > dtrmm_kernel_rn.s + m4 dtrmm_kernel_rn.s > dtrmm_kernel_rn_nomacros.s + $(CC) $(CFLAGS) -c -DTRMMKERNEL -DDOUBLE -UCOMPLEX -ULEFT -UTRANSA dtrmm_kernel_rn_nomacros.s -o $@ + rm dtrmm_kernel_rn.s dtrmm_kernel_rn_nomacros.s +else $(CC) $(CFLAGS) -c -DTRMMKERNEL -DDOUBLE -UCOMPLEX -ULEFT -UTRANSA $< -o $@ +endif $(KDIR)dtrmm_kernel_RT$(TSUFFIX).$(SUFFIX) : $(KERNELDIR)/$(DTRMMKERNEL) +ifeq ($(OS), AIX) + $(CC) $(CFLAGS) -S -DTRMMKERNEL -DDOUBLE -UCOMPLEX -ULEFT -DTRANSA $< -o - > dtrmm_kernel_rt.s + m4 dtrmm_kernel_rt.s > dtrmm_kernel_rt_nomacros.s + $(CC) $(CFLAGS) -c -DTRMMKERNEL -DDOUBLE -UCOMPLEX -ULEFT -DTRANSA dtrmm_kernel_rt_nomacros.s -o $@ + rm dtrmm_kernel_rt.s dtrmm_kernel_rt_nomacros.s +else $(CC) $(CFLAGS) -c -DTRMMKERNEL -DDOUBLE -UCOMPLEX -ULEFT -DTRANSA $< -o $@ +endif $(KDIR)qtrmm_kernel_LN$(TSUFFIX).$(SUFFIX) : $(KERNELDIR)/$(QGEMMKERNEL) $(CC) $(CFLAGS) -c -DTRMMKERNEL -DXDOUBLE -UCOMPLEX -DLEFT -UTRANSA $< -o $@ @@ -620,52 +959,165 @@ $(KDIR)qtrmm_kernel_RT$(TSUFFIX).$(SUFFIX) : $(KERNELDIR)/$(QGEMMKERNEL) $(CC) $(CFLAGS) -c -DTRMMKERNEL -DXDOUBLE -UCOMPLEX -ULEFT -DTRANSA $< -o $@ $(KDIR)ctrmm_kernel_LN$(TSUFFIX).$(SUFFIX) : $(KERNELDIR)/$(CTRMMKERNEL) +ifeq ($(OS), AIX) + $(CC) $(CFLAGS) -S -DTRMMKERNEL -UDOUBLE -DCOMPLEX -DLEFT -UTRANSA -UCONJ -DNN $< -o - > ctrmm_kernel_ln.s + m4 ctrmm_kernel_ln.s > ctrmm_kernel_ln_nomacros.s + $(CC) $(CFLAGS) -c -DTRMMKERNEL -UDOUBLE -DCOMPLEX -DLEFT -UTRANSA -UCONJ -DNN ctrmm_kernel_ln_nomacros.s -o $@ + rm ctrmm_kernel_ln.s ctrmm_kernel_ln_nomacros.s +else $(CC) $(CFLAGS) -c -DTRMMKERNEL -UDOUBLE -DCOMPLEX -DLEFT -UTRANSA -UCONJ -DNN $< -o $@ +endif $(KDIR)ctrmm_kernel_LT$(TSUFFIX).$(SUFFIX) : $(KERNELDIR)/$(CTRMMKERNEL) +ifeq ($(OS), AIX) + $(CC) $(CFLAGS) -S -DTRMMKERNEL -UDOUBLE -DCOMPLEX -DLEFT -DTRANSA -UCONJ -DNN $< -o - > ctrmm_kernel_lt.s + m4 ctrmm_kernel_lt.s > ctrmm_kernel_lt_nomacros.s + $(CC) $(CFLAGS) -c -DTRMMKERNEL -UDOUBLE -DCOMPLEX -DLEFT -DTRANSA -UCONJ -DNN ctrmm_kernel_lt_nomacros.s -o $@ + rm ctrmm_kernel_lt.s ctrmm_kernel_lt_nomacros.s +else $(CC) $(CFLAGS) -c -DTRMMKERNEL -UDOUBLE -DCOMPLEX -DLEFT -DTRANSA -UCONJ -DNN $< -o $@ +endif $(KDIR)ctrmm_kernel_LR$(TSUFFIX).$(SUFFIX) : $(KERNELDIR)/$(CTRMMKERNEL) +ifeq ($(OS), AIX) + $(CC) $(CFLAGS) -S -DTRMMKERNEL -UDOUBLE -DCOMPLEX -DLEFT -UTRANSA -DCONJ -DCN $< -o - > ctrmm_kernel_lr.s + m4 ctrmm_kernel_lr.s > ctrmm_kernel_lr_nomacros.s + $(CC) $(CFLAGS) -c -DTRMMKERNEL -UDOUBLE -DCOMPLEX -DLEFT -UTRANSA -DCONJ -DCN ctrmm_kernel_lr_nomacros.s -o $@ + rm ctrmm_kernel_lr.s ctrmm_kernel_lr_nomacros.s +else $(CC) $(CFLAGS) -c -DTRMMKERNEL -UDOUBLE -DCOMPLEX -DLEFT -UTRANSA -DCONJ -DCN $< -o $@ +endif $(KDIR)ctrmm_kernel_LC$(TSUFFIX).$(SUFFIX) : $(KERNELDIR)/$(CTRMMKERNEL) +ifeq ($(OS), AIX) + $(CC) $(CFLAGS) -S -DTRMMKERNEL -UDOUBLE -DCOMPLEX -DLEFT -DTRANSA -DCONJ -DCN $< -o - > ctrmm_kernel_lc.s + m4 ctrmm_kernel_lc.s > ctrmm_kernel_lc_nomacros.s + $(CC) $(CFLAGS) -c -DTRMMKERNEL -UDOUBLE -DCOMPLEX -DLEFT -DTRANSA -DCONJ -DCN ctrmm_kernel_lc_nomacros.s -o $@ + rm ctrmm_kernel_lc_nomacros.s ctrmm_kernel_lc.s +else $(CC) $(CFLAGS) -c -DTRMMKERNEL -UDOUBLE -DCOMPLEX -DLEFT -DTRANSA -DCONJ -DCN $< -o $@ +endif $(KDIR)ctrmm_kernel_RN$(TSUFFIX).$(SUFFIX) : $(KERNELDIR)/$(CTRMMKERNEL) +ifeq ($(OS), AIX) + $(CC) $(CFLAGS) -S -DTRMMKERNEL -UDOUBLE -DCOMPLEX -ULEFT -UTRANSA -UCONJ -DNN $< -o - > ctrmm_kernel_rn.s + m4 ctrmm_kernel_rn.s > ctrmm_kernel_rn_nomacros.s + $(CC) $(CFLAGS) -c -DTRMMKERNEL -UDOUBLE -DCOMPLEX -ULEFT -UTRANSA -UCONJ -DNN ctrmm_kernel_rn_nomacros.s -o $@ + rm ctrmm_kernel_rn.s ctrmm_kernel_rn_nomacros.s +else $(CC) $(CFLAGS) -c -DTRMMKERNEL -UDOUBLE -DCOMPLEX -ULEFT -UTRANSA -UCONJ -DNN $< -o $@ +endif $(KDIR)ctrmm_kernel_RT$(TSUFFIX).$(SUFFIX) : $(KERNELDIR)/$(CTRMMKERNEL) +ifeq ($(OS), AIX) + $(CC) $(CFLAGS) -S -DTRMMKERNEL -UDOUBLE -DCOMPLEX -ULEFT -DTRANSA -UCONJ -DNN $< -o - > ctrmm_kernel_rt.s + m4 ctrmm_kernel_rt.s > ctrmm_kernel_rt_nomacros.s + $(CC) $(CFLAGS) -c -DTRMMKERNEL -UDOUBLE -DCOMPLEX -ULEFT -DTRANSA -UCONJ -DNN ctrmm_kernel_rt_nomacros.s -o $@ + rm ctrmm_kernel_rt.s ctrmm_kernel_rt_nomacros.s +else $(CC) $(CFLAGS) -c -DTRMMKERNEL -UDOUBLE -DCOMPLEX -ULEFT -DTRANSA -UCONJ -DNN $< -o $@ +endif $(KDIR)ctrmm_kernel_RR$(TSUFFIX).$(SUFFIX) : $(KERNELDIR)/$(CTRMMKERNEL) +ifeq ($(OS), AIX) + $(CC) $(CFLAGS) -S -DTRMMKERNEL -UDOUBLE -DCOMPLEX -ULEFT -UTRANSA -DCONJ -DNC $< -o - > ctrmm_kernel_rr.s + m4 ctrmm_kernel_rr.s > ctrmm_kernel_rr_nomacros.s + $(CC) $(CFLAGS) -c -DTRMMKERNEL -UDOUBLE -DCOMPLEX -ULEFT -UTRANSA -DCONJ -DNC ctrmm_kernel_rr_nomacros.s -o $@ + rm ctrmm_kernel_rr.s ctrmm_kernel_rr_nomacros.s +else $(CC) $(CFLAGS) -c -DTRMMKERNEL -UDOUBLE -DCOMPLEX -ULEFT -UTRANSA -DCONJ -DNC $< -o $@ +endif $(KDIR)ctrmm_kernel_RC$(TSUFFIX).$(SUFFIX) : $(KERNELDIR)/$(CTRMMKERNEL) +ifeq ($(OS), AIX) + $(CC) $(CFLAGS) -S -DTRMMKERNEL -UDOUBLE -DCOMPLEX -ULEFT -DTRANSA -DCONJ -DNC $< -o - > ctrmm_kernel_RC.s + m4 ctrmm_kernel_RC.s > ctrmm_kernel_RC_nomacros.s + $(CC) $(CFLAGS) -c -DTRMMKERNEL -UDOUBLE -DCOMPLEX -ULEFT -DTRANSA -DCONJ -DNC ctrmm_kernel_RC_nomacros.s -o $@ + rm ctrmm_kernel_RC.s ctrmm_kernel_RC_nomacros.s +else $(CC) $(CFLAGS) -c -DTRMMKERNEL -UDOUBLE -DCOMPLEX -ULEFT -DTRANSA -DCONJ -DNC $< -o $@ +endif $(KDIR)ztrmm_kernel_LN$(TSUFFIX).$(SUFFIX) : $(KERNELDIR)/$(ZTRMMKERNEL) +ifeq ($(OS), AIX) + $(CC) $(CFLAGS) -S -DTRMMKERNEL -DDOUBLE -DCOMPLEX -DLEFT -UTRANSA -UCONJ -DNN $< -o - > ztrmm_kernel_ln.s + m4 ztrmm_kernel_ln.s > ztrmm_kernel_ln_nomacros.s + $(CC) $(CFLAGS) -c -DTRMMKERNEL -DDOUBLE -DCOMPLEX -DLEFT -UTRANSA -UCONJ -DNN ztrmm_kernel_ln_nomacros.s -o $@ + rm ztrmm_kernel_ln.s ztrmm_kernel_ln_nomacros.s +else $(CC) $(CFLAGS) -c -DTRMMKERNEL -DDOUBLE -DCOMPLEX -DLEFT -UTRANSA -UCONJ -DNN $< -o $@ +endif $(KDIR)ztrmm_kernel_LT$(TSUFFIX).$(SUFFIX) : $(KERNELDIR)/$(ZTRMMKERNEL) +ifeq ($(OS), AIX) + $(CC) $(CFLAGS) -S -DTRMMKERNEL -DDOUBLE -DCOMPLEX -DLEFT -DTRANSA -UCONJ -DNN $< -o - > ztrmm_kernel_lt.s + m4 ztrmm_kernel_lt.s > ztrmm_kernel_lt_nomacros.s + $(CC) $(CFLAGS) -c -DTRMMKERNEL -DDOUBLE -DCOMPLEX -DLEFT -DTRANSA -UCONJ -DNN ztrmm_kernel_lt_nomacros.s -o $@ + rm ztrmm_kernel_lt.s ztrmm_kernel_lt_nomacros.s +else $(CC) $(CFLAGS) -c -DTRMMKERNEL -DDOUBLE -DCOMPLEX -DLEFT -DTRANSA -UCONJ -DNN $< -o $@ +endif $(KDIR)ztrmm_kernel_LR$(TSUFFIX).$(SUFFIX) : $(KERNELDIR)/$(ZTRMMKERNEL) +ifeq ($(OS), AIX) + $(CC) $(CFLAGS) -S -DTRMMKERNEL -DDOUBLE -DCOMPLEX -DLEFT -UTRANSA -DCONJ -DCN $< -o - > ztrmm_kernel_lr.s + m4 ztrmm_kernel_lr.s > ztrmm_kernel_lr_nomacros.s + $(CC) $(CFLAGS) -c -DTRMMKERNEL -DDOUBLE -DCOMPLEX -DLEFT -UTRANSA -DCONJ -DCN ztrmm_kernel_lr_nomacros.s -o $@ + rm ztrmm_kernel_lr.s ztrmm_kernel_lr_nomacros.s +else $(CC) $(CFLAGS) -c -DTRMMKERNEL -DDOUBLE -DCOMPLEX -DLEFT -UTRANSA -DCONJ -DCN $< -o $@ +endif $(KDIR)ztrmm_kernel_LC$(TSUFFIX).$(SUFFIX) : $(KERNELDIR)/$(ZTRMMKERNEL) +ifeq ($(OS), AIX) + $(CC) $(CFLAGS) -S -DTRMMKERNEL -DDOUBLE -DCOMPLEX -DLEFT -DTRANSA -DCONJ -DCN $< -o - > ztrmm_kernel_lc.s + m4 ztrmm_kernel_lc.s >ztrmm_kernel_lc_nomacros.s + $(CC) $(CFLAGS) -c -DTRMMKERNEL -DDOUBLE -DCOMPLEX -DLEFT -DTRANSA -DCONJ -DCN ztrmm_kernel_lc_nomacros.s -o $@ + rm ztrmm_kernel_lc.s ztrmm_kernel_lc_nomacros.s +else $(CC) $(CFLAGS) -c -DTRMMKERNEL -DDOUBLE -DCOMPLEX -DLEFT -DTRANSA -DCONJ -DCN $< -o $@ +endif $(KDIR)ztrmm_kernel_RN$(TSUFFIX).$(SUFFIX) : $(KERNELDIR)/$(ZTRMMKERNEL) +ifeq ($(OS), AIX) + $(CC) $(CFLAGS) -S -DTRMMKERNEL -DDOUBLE -DCOMPLEX -ULEFT -UTRANSA -UCONJ -DNN $< -o - > ztrmm_kernel_rn.s + m4 ztrmm_kernel_rn.s > ztrmm_kernel_rn_nomacros.s + $(CC) $(CFLAGS) -c -DTRMMKERNEL -DDOUBLE -DCOMPLEX -ULEFT -UTRANSA -UCONJ -DNN ztrmm_kernel_rn_nomacros.s -o $@ + rm ztrmm_kernel_rn.s ztrmm_kernel_rn_nomacros.s +else $(CC) $(CFLAGS) -c -DTRMMKERNEL -DDOUBLE -DCOMPLEX -ULEFT -UTRANSA -UCONJ -DNN $< -o $@ +endif $(KDIR)ztrmm_kernel_RT$(TSUFFIX).$(SUFFIX) : $(KERNELDIR)/$(ZTRMMKERNEL) +ifeq ($(OS), AIX) + $(CC) $(CFLAGS) -S -DTRMMKERNEL -DDOUBLE -DCOMPLEX -ULEFT -DTRANSA -UCONJ -DNN $< -o - > ztrmm_kernel_rt.s + m4 ztrmm_kernel_rt.s > ztrmm_kernel_rt_nomacros.s + $(CC) $(CFLAGS) -c -DTRMMKERNEL -DDOUBLE -DCOMPLEX -ULEFT -DTRANSA -UCONJ -DNN ztrmm_kernel_rt_nomacros.s -o $@ + rm ztrmm_kernel_rt.s ztrmm_kernel_rt_nomacros.s +else $(CC) $(CFLAGS) -c -DTRMMKERNEL -DDOUBLE -DCOMPLEX -ULEFT -DTRANSA -UCONJ -DNN $< -o $@ +endif $(KDIR)ztrmm_kernel_RR$(TSUFFIX).$(SUFFIX) : $(KERNELDIR)/$(ZTRMMKERNEL) +ifeq ($(OS), AIX) + $(CC) $(CFLAGS) -S -DTRMMKERNEL -DDOUBLE -DCOMPLEX -ULEFT -UTRANSA -DCONJ -DNC $< -o - > ztrmm_kernel_rr.s + m4 ztrmm_kernel_rr.s > ztrmm_kernel_rr_nomacros.s + $(CC) $(CFLAGS) -c -DTRMMKERNEL -DDOUBLE -DCOMPLEX -ULEFT -UTRANSA -DCONJ -DNC ztrmm_kernel_rr_nomacros.s -o $@ + rm ztrmm_kernel_rr.s ztrmm_kernel_rr_nomacros.s +else $(CC) $(CFLAGS) -c -DTRMMKERNEL -DDOUBLE -DCOMPLEX -ULEFT -UTRANSA -DCONJ -DNC $< -o $@ +endif $(KDIR)ztrmm_kernel_RC$(TSUFFIX).$(SUFFIX) : $(KERNELDIR)/$(ZTRMMKERNEL) +ifeq ($(OS), AIX) + $(CC) $(CFLAGS) -S -DTRMMKERNEL -DDOUBLE -DCOMPLEX -ULEFT -DTRANSA -DCONJ -DNC $< -o - > ztrmm_kernel_rc.s + m4 ztrmm_kernel_rc.s > ztrmm_kernel_rc_nomacros.s + $(CC) $(CFLAGS) -c -DTRMMKERNEL -DDOUBLE -DCOMPLEX -ULEFT -DTRANSA -DCONJ -DNC ztrmm_kernel_rc_nomacros.s -o $@ + rm ztrmm_kernel_rc.s ztrmm_kernel_rc_nomacros.s +else $(CC) $(CFLAGS) -c -DTRMMKERNEL -DDOUBLE -DCOMPLEX -ULEFT -DTRANSA -DCONJ -DNC $< -o $@ +endif + else $(KDIR)strmm_kernel_LN$(TSUFFIX).$(SUFFIX) : $(KERNELDIR)/$(SGEMMKERNEL) $(CC) $(CFLAGS) -c -DTRMMKERNEL -UDOUBLE -UCOMPLEX -DLEFT -UTRANSA $< -o $@ @@ -677,7 +1129,14 @@ $(KDIR)strmm_kernel_RN$(TSUFFIX).$(SUFFIX) : $(KERNELDIR)/$(SGEMMKERNEL) $(CC) $(CFLAGS) -c -DTRMMKERNEL -UDOUBLE -UCOMPLEX -ULEFT -UTRANSA $< -o $@ $(KDIR)strmm_kernel_RT$(TSUFFIX).$(SUFFIX) : $(KERNELDIR)/$(SGEMMKERNEL) +ifeq ($(OS), AIX) + $(CC) $(CFLAGS) -S -DTRMMKERNEL -UDOUBLE -UCOMPLEX -ULEFT -DTRANSA $< -o - > strmm_kernel_rt.s + m4 strmm_kernel_rt.s > strmm_kernel_rt_nomacros.s + $(CC) $(CFLAGS) -c -DTRMMKERNEL -UDOUBLE -UCOMPLEX -ULEFT -DTRANSA strmm_kernel_rt_nomacros.s -o $@ + rm strmm_kernel_rt.s strmm_kernel_rt_nomacros.s +else $(CC) $(CFLAGS) -c -DTRMMKERNEL -UDOUBLE -UCOMPLEX -ULEFT -DTRANSA $< -o $@ +endif $(KDIR)dtrmm_kernel_LN$(TSUFFIX).$(SUFFIX) : $(KERNELDIR)/$(DGEMMKERNEL) $(CC) $(CFLAGS) -c -DTRMMKERNEL -DDOUBLE -UCOMPLEX -DLEFT -UTRANSA $< -o $@ @@ -804,7 +1263,14 @@ $(KDIR)dtrsm_kernel_LN$(TSUFFIX).$(SUFFIX) : $(KERNELDIR)/$(DTRSMKERNEL_LN) $(DT $(CC) -c $(CFLAGS) -DTRSMKERNEL -UCOMPLEX -DDOUBLE -DUPPER -DLN -UCONJ $< -o $@ $(KDIR)dtrsm_kernel_LT$(TSUFFIX).$(SUFFIX) : $(KERNELDIR)/$(DTRSMKERNEL_LT) $(DTRSMDEPEND) +ifeq ($(OS), AIX) + $(CC) $(CFLAGS) -S -DTRSMKERNEL -UCOMPLEX -DDOUBLE -UUPPER -DLT -UCONJ $< -o - > dtrsm_kernel_lt.s + m4 dtrsm_kernel_lt.s > dtrsm_kernel_lt_nomacros.s + $(CC) -c $(CFLAGS) -DTRSMKERNEL -UCOMPLEX -DDOUBLE -UUPPER -DLT -UCONJ dtrsm_kernel_lt_nomacros.s -o $@ + rm dtrsm_kernel_lt.s dtrsm_kernel_lt_nomacros.s +else $(CC) -c $(CFLAGS) -DTRSMKERNEL -UCOMPLEX -DDOUBLE -UUPPER -DLT -UCONJ $< -o $@ +endif $(KDIR)dtrsm_kernel_RN$(TSUFFIX).$(SUFFIX) : $(KERNELDIR)/$(DTRSMKERNEL_RN) $(DTRSMDEPEND) $(CC) -c $(CFLAGS) -DTRSMKERNEL -UCOMPLEX -DDOUBLE -DUPPER -DRN -UCONJ $< -o $@ @@ -1909,6 +2375,11 @@ $(KDIR)xtrsm_oltncopy$(TSUFFIX).$(SUFFIX) : generic/ztrsm_ltcopy_$(XGEMM_UNROLL_ $(KDIR)sgemm_beta$(TSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(SGEMM_BETA) $(CC) $(PFLAGS) -c -UDOUBLE -UCOMPLEX $< -o $@ +ifeq ($(BUILD_BFLOAT16),1) +$(KDIR)sbgemm_beta$(TSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(SBGEMM_BETA) + $(CC) $(PFLAGS) -c -DBFLOAT16 -UDOUBLE -UCOMPLEX $< -o $@ +endif + $(KDIR)dgemm_beta$(TSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(DGEMM_BETA) $(CC) $(PFLAGS) -c -DDOUBLE -UCOMPLEX $< -o $@ @@ -1924,6 +2395,24 @@ $(KDIR)zgemm_beta$(TSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(ZGEMM_BETA) $(KDIR)xgemm_beta$(TSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(XGEMM_BETA) $(CC) $(PFLAGS) -c -DXDOUBLE -DCOMPLEX $< -o $@ + +ifeq ($(BUILD_BFLOAT16), 1) +$(SBGEMMONCOPYOBJ_P) : $(KERNELDIR)/$(SBGEMMONCOPY) + $(CC) $(PFLAGS) -c -DBFLOAT16 -UDOUBLE -UCOMPLEX $< -o $@ + +$(SBGEMMOTCOPYOBJ_P) : $(KERNELDIR)/$(SBGEMMOTCOPY) + $(CC) $(PFLAGS) -c -DBFLOAT16 -UDOUBLE -UCOMPLEX $< -o $@ + +ifneq ($(SBGEMM_UNROLL_M), $(SBGEMM_UNROLL_N)) +$(SBGEMMINCOPYOBJ_P) : $(KERNELDIR)/$(SBGEMMINCOPY) + $(CC) $(PFLAGS) -c -DBFLOAT16 -UDOUBLE -UCOMPLEX $< -o $@ + +$(SBGEMMITCOPYOBJ_P) : $(KERNELDIR)/$(SBGEMMITCOPY) + $(CC) $(PFLAGS) -c -DBFLOAT16 -UDOUBLE -UCOMPLEX $< -o $@ + +endif +endif + $(SGEMMONCOPYOBJ_P) : $(KERNELDIR)/$(SGEMMONCOPY) $(CC) $(PFLAGS) -c -UDOUBLE -UCOMPLEX $< -o $@ @@ -2028,6 +2517,12 @@ endif endif + +ifeq ($(BUILD_BFLOAT16), 1) +$(KDIR)sbgemm_kernel$(TSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(SBGEMMKERNEL) $(SBGEMMDEPEND) + $(CC) $(PFLAGS) -c -DBFLOAT16 -UDOUBLE -UCOMPLEX $< -o $@ +endif + $(KDIR)sgemm_kernel$(TSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(SGEMMKERNEL) $(SGEMMDEPEND) $(CC) $(PFLAGS) -c -UDOUBLE -UCOMPLEX $< -o $@ @@ -2044,7 +2539,14 @@ $(KDIR)cgemm_kernel_l$(TSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(CGEMMKERNEL) $(CGEMM $(CC) $(PFLAGS) -c -UDOUBLE -DCOMPLEX -DCN $< -o $@ $(KDIR)cgemm_kernel_r$(TSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(CGEMMKERNEL) $(CGEMMDEPEND) - $(CC) $(PFLAGS) -c -UDOUBLE -DCOMPLEX -DNC $< -o $@ +ifeq ($(OS), AIX) + $(CC) $(PFLAGS) -S -UDOUBLE -DCOMPLEX -DNC $< -o - > cgemm_kernel_r.s + m4 cgemm_kernel_r.s > cgemm_kernel_r_nomacros.s + $(CC) $(PFLAGS) -c -UDOUBLE -DCOMPLEX -DNC cgemm_kernel_r_nomacros.s -o $@ + rm cgemm_kernel_r.s cgemm_kernel_r_nomacros.s +else + $(CC) $(CFLAGS) -c -UDOUBLE -DCOMPLEX -DNC $< -o $@ +endif $(KDIR)cgemm_kernel_b$(TSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(CGEMMKERNEL) $(CGEMMDEPEND) $(CC) $(PFLAGS) -c -UDOUBLE -DCOMPLEX -DCC $< -o $@ @@ -2083,7 +2585,14 @@ $(KDIR)strmm_kernel_RN$(TSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(SGEMMKERNEL) $(CC) $(PFLAGS) -c -DTRMMKERNEL -UDOUBLE -UCOMPLEX -ULEFT -UTRANSA $< -o $@ $(KDIR)strmm_kernel_RT$(TSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(SGEMMKERNEL) - $(CC) $(PFLAGS) -c -DTRMMKERNEL -UDOUBLE -UCOMPLEX -ULEFT -DTRANSA $< -o $@ +ifeq ($(OS), AIX) + $(CC) $(CFLAGS) -S -DTRMMKERNEL -UDOUBLE -UCOMPLEX -ULEFT -DTRANSA $< -o - > strmm_kernel_rt.s + m4 strmmkernel_rn.s > strmm_kernel_rt_nomacros.s + $(CC) $(PFLAGS) -c -DTRMMKERNEL -UDOUBLE -UCOMPLEX -ULEFT -DTRANSA strmm_kernel_rt_nomacros.s -o $@ + rm strmm_kernel_rt.s strmm_kernel_rt_nomacros.s +else + $(CC) $(CFLAGS) -c -DTRMMKERNEL -UDOUBLE -UCOMPLEX -ULEFT -DTRANSA $< -o $@ +endif $(KDIR)dtrmm_kernel_LN$(TSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(DGEMMKERNEL) $(CC) $(PFLAGS) -c -DTRMMKERNEL -DDOUBLE -UCOMPLEX -DLEFT -UTRANSA $< -o $@ diff --git a/kernel/arm/KERNEL.ARMV6 b/kernel/arm/KERNEL.ARMV6 index b773a5ba0..344a71885 100644 --- a/kernel/arm/KERNEL.ARMV6 +++ b/kernel/arm/KERNEL.ARMV6 @@ -1,30 +1,30 @@ include $(KERNELDIR)/KERNEL.ARMV5 -SAMAXKERNEL = iamax_vfp.S -DAMAXKERNEL = iamax_vfp.S -CAMAXKERNEL = iamax_vfp.S -ZAMAXKERNEL = iamax_vfp.S +SAMAXKERNEL = amax_vfp.S +DAMAXKERNEL = amax_vfp.S +#CAMAXKERNEL = amax_vfp.S +#ZAMAXKERNEL = amax_vfp.S -SAMINKERNEL = iamax_vfp.S -DAMINKERNEL = iamax_vfp.S -CAMINKERNEL = iamax_vfp.S -ZAMINKERNEL = iamax_vfp.S +SAMINKERNEL = amax_vfp.S +DAMINKERNEL = amax_vfp.S +#CAMINKERNEL = amax_vfp.S +#ZAMINKERNEL = amax_vfp.S -SMAXKERNEL = iamax_vfp.S -DMAXKERNEL = iamax_vfp.S +SMAXKERNEL = amax_vfp.S +DMAXKERNEL = amax_vfp.S -SMINKERNEL = iamax_vfp.S -DMINKERNEL = iamax_vfp.S +SMINKERNEL = amax_vfp.S +DMINKERNEL = amax_vfp.S ISAMAXKERNEL = iamax_vfp.S IDAMAXKERNEL = iamax_vfp.S -ICAMAXKERNEL = iamax_vfp.S -IZAMAXKERNEL = iamax_vfp.S +#ICAMAXKERNEL = iamax_vfp.S +#IZAMAXKERNEL = iamax_vfp.S ISAMINKERNEL = iamax_vfp.S IDAMINKERNEL = iamax_vfp.S -ICAMINKERNEL = iamax_vfp.S -IZAMINKERNEL = iamax_vfp.S +#ICAMINKERNEL = iamax_vfp.S +#IZAMINKERNEL = iamax_vfp.S ISMAXKERNEL = iamax_vfp.S IDMAXKERNEL = iamax_vfp.S diff --git a/kernel/arm/amax_vfp.S b/kernel/arm/amax_vfp.S new file mode 100644 index 000000000..d3770ea1e --- /dev/null +++ b/kernel/arm/amax_vfp.S @@ -0,0 +1,445 @@ +/*************************************************************************** +Copyright (c) 2013, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +/************************************************************************************** +* 2013/11/14 Saar +* BLASTEST : OK +* CTEST : OK +* TEST : OK +* +**************************************************************************************/ + +#define ASSEMBLER +#include "common.h" + +#define STACKSIZE 256 + +#define N r0 +#define X r1 +#define INC_X r2 + +#define I r12 + +#define X_PRE 512 + +/************************************************************************************** +* Macro definitions +**************************************************************************************/ + +#if defined(USE_ABS) + +#if defined(DOUBLE) + +#define VABS(x0,x1) vabs.f64 x0, x1 + +#else + +#define VABS(x0,x1) vabs.f32 x0, x1 + +#endif + +#else + +#define VABS(x0,x1) nop + +#endif + +/*****************************************************************************************/ + +#if defined(USE_MIN) + +#define MOVCOND movlt + +#if defined(DOUBLE) + +#define VMOVCOND vmovlt.f64 + +#else + +#define VMOVCOND vmovlt.f32 + +#endif + +#else + +#define MOVCOND movgt + +#if defined(DOUBLE) + +#define VMOVCOND vmovgt.f64 + +#else + +#define VMOVCOND vmovgt.f32 + +#endif + + +#endif + + +/*****************************************************************************************/ + + + +#if !defined(COMPLEX) + +#if defined(DOUBLE) + +.macro INIT_F + + vldmia.f64 X!, { d0 } + VABS( d0, d0 ) + +.endm + +.macro KERNEL_F1 + + vldmia.f64 X!, { d4 } + VABS( d4, d4 ) + vcmpe.f64 d4, d0 + vmrs APSR_nzcv, fpscr + VMOVCOND d0, d4 + +.endm + +.macro INIT_S + + vldmia.f64 X, { d0 } + VABS( d0, d0 ) + add X, X, INC_X + +.endm + + +.macro KERNEL_S1 + + vldmia.f64 X, { d4 } + VABS( d4, d4 ) + vcmpe.f64 d4, d0 + vmrs APSR_nzcv, fpscr + VMOVCOND d0, d4 + add X, X, INC_X + +.endm + +#else + +.macro INIT_F + + vldmia.f32 X!, { s0 } + VABS( s0, s0 ) + +.endm + +.macro KERNEL_F1 + + vldmia.f32 X!, { s4 } + VABS( s4, s4 ) + vcmpe.f32 s4, s0 + vmrs APSR_nzcv, fpscr + VMOVCOND s0, s4 + +.endm + +.macro INIT_S + + vldmia.f32 X, { s0 } + VABS( s0, s0 ) + add X, X, INC_X + +.endm + + +.macro KERNEL_S1 + + vldmia.f32 X, { s4 } + VABS( s4, s4 ) + vcmpe.f32 s4, s0 + vmrs APSR_nzcv, fpscr + VMOVCOND s0, s4 + add X, X, INC_X + +.endm + + + + +#endif + +#else + +#if defined(DOUBLE) + +.macro INIT_F + + vldmia.f64 X!, { d0 -d1 } + vabs.f64 d0, d0 + vabs.f64 d1, d1 + vadd.f64 d0 , d0, d1 +.endm + + +.macro KERNEL_F1 + + vldmia.f64 X!, { d4 - d5 } + vabs.f64 d4, d4 + vabs.f64 d5, d5 + vadd.f64 d4 , d4, d5 + vcmpe.f64 d4, d0 + vmrs APSR_nzcv, fpscr + VMOVCOND d0, d4 + +.endm + +.macro INIT_S + + vldmia.f64 X, { d0 -d1 } + vabs.f64 d0, d0 + vabs.f64 d1, d1 + vadd.f64 d0 , d0, d1 + add X, X, INC_X + +.endm + + + +.macro KERNEL_S1 + + vldmia.f64 X, { d4 - d5 } + vabs.f64 d4, d4 + vabs.f64 d5, d5 + vadd.f64 d4 , d4, d5 + vcmpe.f64 d4, d0 + vmrs APSR_nzcv, fpscr + VMOVCOND d0, d4 + add X, X, INC_X + +.endm + +#else + +.macro INIT_F + + vldmia.f32 X!, { s0 -s1 } + vabs.f32 s0, s0 + vabs.f32 s1, s1 + vadd.f32 s0 , s0, s1 + +.endm + + +.macro KERNEL_F1 + + vldmia.f32 X!, { s4 - s5 } + vabs.f32 s4, s4 + vabs.f32 s5, s5 + vadd.f32 s4 , s4, s5 + vcmpe.f32 s4, s0 + vmrs APSR_nzcv, fpscr + VMOVCOND s0, s4 + +.endm + +.macro INIT_S + + vldmia.f32 X, { s0 -s1 } + vabs.f32 s0, s0 + vabs.f32 s1, s1 + vadd.f32 s0 , s0, s1 + add X, X, INC_X + +.endm + + + +.macro KERNEL_S1 + + vldmia.f32 X, { s4 - s5 } + vabs.f32 s4, s4 + vabs.f32 s5, s5 + vadd.f32 s4 , s4, s5 + vcmpe.f32 s4, s0 + vmrs APSR_nzcv, fpscr + VMOVCOND s0, s4 + add X, X, INC_X + +.endm + + + + +#endif + +#endif + +/************************************************************************************** +* End of macro definitions +**************************************************************************************/ + + PROLOGUE + + .align 5 + + movs r12, #0 // clear floating point register + vmov s0, r12 +#if defined(DOUBLE) + vcvt.f64.f32 d0, s0 +#endif + + + cmp N, #0 + ble amax_kernel_L999 + + cmp INC_X, #0 + beq amax_kernel_L999 + + + cmp INC_X, #1 + bne amax_kernel_S_BEGIN + + +amax_kernel_F_BEGIN: + + INIT_F + + subs N, N , #1 + ble amax_kernel_L999 + + asrs I, N, #2 // I = N / 4 + ble amax_kernel_F1 + + .align 5 + +amax_kernel_F4: + + pld [ X, #X_PRE ] + KERNEL_F1 + KERNEL_F1 +#if defined(COMPLEX) && defined(DOUBLE) + pld [ X, #X_PRE ] +#endif + KERNEL_F1 + KERNEL_F1 + + subs I, I, #1 + ble amax_kernel_F1 + + +#if defined(COMPLEX) || defined(DOUBLE) + pld [ X, #X_PRE ] +#endif + KERNEL_F1 + KERNEL_F1 +#if defined(COMPLEX) && defined(DOUBLE) + pld [ X, #X_PRE ] +#endif + KERNEL_F1 + KERNEL_F1 + + subs I, I, #1 + bne amax_kernel_F4 + +amax_kernel_F1: + + ands I, N, #3 + ble amax_kernel_L999 + +amax_kernel_F10: + + KERNEL_F1 + + subs I, I, #1 + bne amax_kernel_F10 + + b amax_kernel_L999 + +amax_kernel_S_BEGIN: + +#if defined(COMPLEX) + +#if defined(DOUBLE) + lsl INC_X, INC_X, #4 // INC_X * SIZE * 2 +#else + lsl INC_X, INC_X, #3 // INC_X * SIZE * 2 +#endif + +#else + +#if defined(DOUBLE) + lsl INC_X, INC_X, #3 // INC_X * SIZE +#else + lsl INC_X, INC_X, #2 // INC_X * SIZE +#endif + +#endif + + INIT_S + + subs N, N , #1 + ble amax_kernel_L999 + + asrs I, N, #2 // I = N / 4 + ble amax_kernel_S1 + + .align 5 + +amax_kernel_S4: + + KERNEL_S1 + KERNEL_S1 + KERNEL_S1 + KERNEL_S1 + + subs I, I, #1 + bne amax_kernel_S4 + +amax_kernel_S1: + + ands I, N, #3 + ble amax_kernel_L999 + +amax_kernel_S10: + + KERNEL_S1 + + subs I, I, #1 + bne amax_kernel_S10 + + +amax_kernel_L999: +#if !defined(__ARM_PCS_VFP) +#if defined(DOUBLE) + vmov r0, r1, d0 +#else + vmov r0, s0 +#endif +#endif + bx lr + + EPILOGUE + diff --git a/kernel/arm/nrm2_vfpv3.S b/kernel/arm/nrm2_vfpv3.S index 7be1e977e..82ae5e8d4 100644 --- a/kernel/arm/nrm2_vfpv3.S +++ b/kernel/arm/nrm2_vfpv3.S @@ -61,20 +61,20 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. vldmia.f64 X!, { d4 } vcmpe.f64 d4, d6 // compare with 0.0 vmrs APSR_nzcv, fpscr - beq KERNEL_F1_NEXT_\@ + beq 1f /* KERNEL_F1_NEXT_\@ */ vabs.f64 d4, d4 vcmpe.f64 d0, d4 // compare with scale vmrs APSR_nzcv, fpscr vdivge.f64 d2 , d4, d0 // scale >= x ? x / scale vmlage.f64 d1 , d2 , d2 // ssq += ( x/scale ) * ( x/scale ) - bge KERNEL_F1_NEXT_\@ + bge 1f /* KERNEL_F1_NEXT_\@ */ vdiv.f64 d2 , d0, d4 // scale / x vmul.f64 d2 , d2, d2 // ( scale / x ) * ( scale / x ) vmul.f64 d3 , d1, d2 // ssq * ( scale / x ) * ( scale / x ) vadd.f64 d1 , d3, d7 // ssq = 1 + ssq * ( scale / x ) * ( scale / x ) vmov.f64 d0 , d4 // scale = x -KERNEL_F1_NEXT_\@: +1: /* KERNEL_F1_NEXT_\@: */ .endm @@ -124,20 +124,20 @@ KERNEL_S1_NEXT: vldmia.f32 X!, { s4 } vcmpe.f32 s4, s6 // compare with 0.0 vmrs APSR_nzcv, fpscr - beq KERNEL_F1_NEXT_\@ + beq 1f /* KERNEL_F1_NEXT_\@ */ vabs.f32 s4, s4 vcmpe.f32 s0, s4 // compare with scale vmrs APSR_nzcv, fpscr vdivge.f32 s2 , s4, s0 // scale >= x ? x / scale vmlage.f32 s1 , s2 , s2 // ssq += ( x/scale ) * ( x/scale ) - bge KERNEL_F1_NEXT_\@ + bge 1f /* KERNEL_F1_NEXT_\@ */ vdiv.f32 s2 , s0, s4 // scale / x vmul.f32 s2 , s2, s2 // ( scale / x ) * ( scale / x ) vmul.f32 s3 , s1, s2 // ssq * ( scale / x ) * ( scale / x ) vadd.f32 s1 , s3, s7 // ssq = 1 + ssq * ( scale / x ) * ( scale / x ) vmov.f32 s0 , s4 // scale = x -KERNEL_F1_NEXT_\@: +1: /* KERNEL_F1_NEXT_\@: */ .endm @@ -195,37 +195,37 @@ KERNEL_S1_NEXT: vcmpe.f64 d4, d6 // compare with 0.0 vmrs APSR_nzcv, fpscr - beq KERNEL_F1_NEXT_\@ + beq 1f /* KERNEL_F1_NEXT_\@ */ vabs.f64 d4, d4 vcmpe.f64 d0, d4 // compare with scale vmrs APSR_nzcv, fpscr vdivge.f64 d2 , d4, d0 // scale >= x ? x / scale vmlage.f64 d1 , d2 , d2 // ssq += ( x/scale ) * ( x/scale ) - bge KERNEL_F1_NEXT_\@ + bge 1f /* KERNEL_F1_NEXT_\@ */ vdiv.f64 d2 , d0, d4 // scale / x vmul.f64 d2 , d2, d2 // ( scale / x ) * ( scale / x ) vmul.f64 d3 , d1, d2 // ssq * ( scale / x ) * ( scale / x ) vadd.f64 d1 , d3, d7 // ssq = 1 + ssq * ( scale / x ) * ( scale / x ) vmov.f64 d0 , d4 // scale = x -KERNEL_F1_NEXT_\@: +1: /* KERNEL_F1_NEXT_\@: */ vcmpe.f64 d5, d6 // compare with 0.0 vmrs APSR_nzcv, fpscr - beq KERNEL_F1_END_\@ + beq 2f /* KERNEL_F1_END_\@ */ vabs.f64 d5, d5 vcmpe.f64 d0, d5 // compare with scale vmrs APSR_nzcv, fpscr vdivge.f64 d2 , d5, d0 // scale >= x ? x / scale vmlage.f64 d1 , d2 , d2 // ssq += ( x/scale ) * ( x/scale ) - bge KERNEL_F1_END_\@ + bge 2f /* KERNEL_F1_END_\@ */ vdiv.f64 d2 , d0, d5 // scale / x vmul.f64 d2 , d2, d2 // ( scale / x ) * ( scale / x ) vmul.f64 d3 , d1, d2 // ssq * ( scale / x ) * ( scale / x ) vadd.f64 d1 , d3, d7 // ssq = 1 + ssq * ( scale / x ) * ( scale / x ) vmov.f64 d0 , d5 // scale = x -KERNEL_F1_END_\@: +2: /* KERNEL_F1_END_\@: */ .endm @@ -253,37 +253,37 @@ KERNEL_F1_END_\@: vcmpe.f64 d4, d6 // compare with 0.0 vmrs APSR_nzcv, fpscr - beq KERNEL_S1_NEXT_\@ + beq 1f /* KERNEL_S1_NEXT_\@ */ vabs.f64 d4, d4 vcmpe.f64 d0, d4 // compare with scale vmrs APSR_nzcv, fpscr vdivge.f64 d2 , d4, d0 // scale >= x ? x / scale vmlage.f64 d1 , d2 , d2 // ssq += ( x/scale ) * ( x/scale ) - bge KERNEL_S1_NEXT_\@ + bge 1f /* KERNEL_S1_NEXT_\@ */ vdiv.f64 d2 , d0, d4 // scale / x vmul.f64 d2 , d2, d2 // ( scale / x ) * ( scale / x ) vmul.f64 d3 , d1, d2 // ssq * ( scale / x ) * ( scale / x ) vadd.f64 d1 , d3, d7 // ssq = 1 + ssq * ( scale / x ) * ( scale / x ) vmov.f64 d0 , d4 // scale = x -KERNEL_S1_NEXT_\@: +1: /* KERNEL_S1_NEXT_\@: */ vcmpe.f64 d5, d6 // compare with 0.0 vmrs APSR_nzcv, fpscr - beq KERNEL_S1_END_\@ + beq 2f /* KERNEL_S1_END_\@ */ vabs.f64 d5, d5 vcmpe.f64 d0, d5 // compare with scale vmrs APSR_nzcv, fpscr vdivge.f64 d2 , d5, d0 // scale >= x ? x / scale vmlage.f64 d1 , d2 , d2 // ssq += ( x/scale ) * ( x/scale ) - bge KERNEL_S1_END_\@ + bge 2f /* KERNEL_S1_END_\@ */ vdiv.f64 d2 , d0, d5 // scale / x vmul.f64 d2 , d2, d2 // ( scale / x ) * ( scale / x ) vmul.f64 d3 , d1, d2 // ssq * ( scale / x ) * ( scale / x ) vadd.f64 d1 , d3, d7 // ssq = 1 + ssq * ( scale / x ) * ( scale / x ) vmov.f64 d0 , d5 // scale = x -KERNEL_S1_END_\@: +2: /* KERNEL_S1_END_\@: */ add X, X, INC_X @@ -298,37 +298,37 @@ KERNEL_S1_END_\@: vcmpe.f32 s4, s6 // compare with 0.0 vmrs APSR_nzcv, fpscr - beq KERNEL_F1_NEXT_\@ + beq 1f /* KERNEL_F1_NEXT_\@ */ vabs.f32 s4, s4 vcmpe.f32 s0, s4 // compare with scale vmrs APSR_nzcv, fpscr vdivge.f32 s2 , s4, s0 // scale >= x ? x / scale vmlage.f32 s1 , s2 , s2 // ssq += ( x/scale ) * ( x/scale ) - bge KERNEL_F1_NEXT_\@ + bge 1f /* KERNEL_F1_NEXT_\@ */ vdiv.f32 s2 , s0, s4 // scale / x vmul.f32 s2 , s2, s2 // ( scale / x ) * ( scale / x ) vmul.f32 s3 , s1, s2 // ssq * ( scale / x ) * ( scale / x ) vadd.f32 s1 , s3, s7 // ssq = 1 + ssq * ( scale / x ) * ( scale / x ) vmov.f32 s0 , s4 // scale = x -KERNEL_F1_NEXT_\@: +1: /* KERNEL_F1_NEXT_\@: */ vcmpe.f32 s5, s6 // compare with 0.0 vmrs APSR_nzcv, fpscr - beq KERNEL_F1_END_\@ + beq 2f /* KERNEL_F1_END_\@ */ vabs.f32 s5, s5 vcmpe.f32 s0, s5 // compare with scale vmrs APSR_nzcv, fpscr vdivge.f32 s2 , s5, s0 // scale >= x ? x / scale vmlage.f32 s1 , s2 , s2 // ssq += ( x/scale ) * ( x/scale ) - bge KERNEL_F1_END_\@ + bge 2f /* KERNEL_F1_END_\@ */ vdiv.f32 s2 , s0, s5 // scale / x vmul.f32 s2 , s2, s2 // ( scale / x ) * ( scale / x ) vmul.f32 s3 , s1, s2 // ssq * ( scale / x ) * ( scale / x ) vadd.f32 s1 , s3, s7 // ssq = 1 + ssq * ( scale / x ) * ( scale / x ) vmov.f32 s0 , s5 // scale = x -KERNEL_F1_END_\@: +2: /* KERNEL_F1_END_\@: */ .endm @@ -354,37 +354,37 @@ KERNEL_F1_END_\@: vcmpe.f32 s4, s6 // compare with 0.0 vmrs APSR_nzcv, fpscr - beq KERNEL_S1_NEXT_\@ + beq 1f /* KERNEL_S1_NEXT_\@ */ vabs.f32 s4, s4 vcmpe.f32 s0, s4 // compare with scale vmrs APSR_nzcv, fpscr vdivge.f32 s2 , s4, s0 // scale >= x ? x / scale vmlage.f32 s1 , s2 , s2 // ssq += ( x/scale ) * ( x/scale ) - bge KERNEL_S1_NEXT_\@ + bge 1f /* KERNEL_S1_NEXT_\@ */ vdiv.f32 s2 , s0, s4 // scale / x vmul.f32 s2 , s2, s2 // ( scale / x ) * ( scale / x ) vmul.f32 s3 , s1, s2 // ssq * ( scale / x ) * ( scale / x ) vadd.f32 s1 , s3, s7 // ssq = 1 + ssq * ( scale / x ) * ( scale / x ) vmov.f32 s0 , s4 // scale = x -KERNEL_S1_NEXT_\@: +1: /* KERNEL_S1_NEXT_\@: */ vcmpe.f32 s5, s6 // compare with 0.0 vmrs APSR_nzcv, fpscr - beq KERNEL_S1_END_\@ + beq 2f /* KERNEL_S1_END_\@ */ vabs.f32 s5, s5 vcmpe.f32 s0, s5 // compare with scale vmrs APSR_nzcv, fpscr vdivge.f32 s2 , s5, s0 // scale >= x ? x / scale vmlage.f32 s1 , s2 , s2 // ssq += ( x/scale ) * ( x/scale ) - bge KERNEL_S1_END_\@ + bge 2f /* KERNEL_S1_END_\@ */ vdiv.f32 s2 , s0, s5 // scale / x vmul.f32 s2 , s2, s2 // ( scale / x ) * ( scale / x ) vmul.f32 s3 , s1, s2 // ssq * ( scale / x ) * ( scale / x ) vadd.f32 s1 , s3, s7 // ssq = 1 + ssq * ( scale / x ) * ( scale / x ) vmov.f32 s0 , s5 // scale = x -KERNEL_S1_END_\@: +2: /* KERNEL_S1_END_\@: */ add X, X, INC_X diff --git a/kernel/arm/sum.c b/kernel/arm/sum.c index 7b78ec61a..a486a1868 100644 --- a/kernel/arm/sum.c +++ b/kernel/arm/sum.c @@ -29,23 +29,77 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * trivial copy of asum.c with the ABS() removed * **************************************************************************************/ - #include "common.h" +#include "../simd/intrin.h" #include FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) { - BLASLONG i=0; + BLASLONG i = 0; FLOAT sumf = 0.0; - if (n <= 0 || inc_x <= 0) return(sumf); - + if (n <= 0 || inc_x <= 0) + return (sumf); n *= inc_x; - while(i < n) + if (inc_x == 1) + { +#if V_SIMD && (!defined(DOUBLE) || (defined(DOUBLE) && V_SIMD_F64 && V_SIMD > 128)) +#ifdef DOUBLE + const int vstep = v_nlanes_f64; + const int unrollx4 = n & (-vstep * 4); + const int unrollx = n & -vstep; + v_f64 vsum0 = v_zero_f64(); + v_f64 vsum1 = v_zero_f64(); + v_f64 vsum2 = v_zero_f64(); + v_f64 vsum3 = v_zero_f64(); + for (; i < unrollx4; i += vstep * 4) + { + vsum0 = v_add_f64(vsum0, v_loadu_f64(x + i)); + vsum1 = v_add_f64(vsum1, v_loadu_f64(x + i + vstep)); + vsum2 = v_add_f64(vsum2, v_loadu_f64(x + i + vstep * 2)); + vsum3 = v_add_f64(vsum3, v_loadu_f64(x + i + vstep * 3)); + } + vsum0 = v_add_f64( + v_add_f64(vsum0, vsum1), v_add_f64(vsum2, vsum3)); + for (; i < unrollx; i += vstep) + { + vsum0 = v_add_f64(vsum0, v_loadu_f64(x + i)); + } + sumf = v_sum_f64(vsum0); +#else + const int vstep = v_nlanes_f32; + const int unrollx4 = n & (-vstep * 4); + const int unrollx = n & -vstep; + v_f32 vsum0 = v_zero_f32(); + v_f32 vsum1 = v_zero_f32(); + v_f32 vsum2 = v_zero_f32(); + v_f32 vsum3 = v_zero_f32(); + for (; i < unrollx4; i += vstep * 4) + { + vsum0 = v_add_f32(vsum0, v_loadu_f32(x + i)); + vsum1 = v_add_f32(vsum1, v_loadu_f32(x + i + vstep)); + vsum2 = v_add_f32(vsum2, v_loadu_f32(x + i + vstep * 2)); + vsum3 = v_add_f32(vsum3, v_loadu_f32(x + i + vstep * 3)); + } + vsum0 = v_add_f32( + v_add_f32(vsum0, vsum1), v_add_f32(vsum2, vsum3)); + for (; i < unrollx; i += vstep) + { + vsum0 = v_add_f32(vsum0, v_loadu_f32(x + i)); + } + sumf = v_sum_f32(vsum0); +#endif +#else + int n1 = n & -4; + for (; i < n1; i += 4) + { + sumf += x[i] + x[i + 1] + x[i + 2] + x[i + 3]; + } +#endif + } + while (i < n) { sumf += x[i]; i += inc_x; } - return(sumf); + return (sumf); } - - diff --git a/kernel/arm/zdot.c b/kernel/arm/zdot.c index 733c235c6..79baa61b1 100644 --- a/kernel/arm/zdot.c +++ b/kernel/arm/zdot.c @@ -48,10 +48,12 @@ OPENBLAS_COMPLEX_FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLA dot[0]=0.0; dot[1]=0.0; - +#if !defined(__PPC__) && !defined(__SunOS) && !defined(__PGI) CREAL(result) = 0.0 ; CIMAG(result) = 0.0 ; - +#else + result = OPENBLAS_MAKE_COMPLEX_FLOAT(0.0,0.0); +#endif if ( n < 1 ) return(result); inc_x2 = 2 * inc_x ; @@ -71,8 +73,12 @@ OPENBLAS_COMPLEX_FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLA i++ ; } - CREAL(result) = dot[0]; +#if !defined(__PPC__) && !defined(__SunOS) && !defined(__PGI) + CREAL(result) = dot[0]; CIMAG(result) = dot[1]; +#else + result = OPENBLAS_MAKE_COMPLEX_FLOAT(dot[0],dot[1]); +#endif return(result); } diff --git a/kernel/arm64/KERNEL.ARMV8 b/kernel/arm64/KERNEL.ARMV8 index a2a435738..c8a53c86b 100644 --- a/kernel/arm64/KERNEL.ARMV8 +++ b/kernel/arm64/KERNEL.ARMV8 @@ -91,61 +91,55 @@ IDAMAXKERNEL = iamax.S ICAMAXKERNEL = izamax.S IZAMAXKERNEL = izamax.S -ifneq ($(OS_DARWIN)$(CROSS),11) SNRM2KERNEL = nrm2.S DNRM2KERNEL = nrm2.S CNRM2KERNEL = znrm2.S ZNRM2KERNEL = znrm2.S -endif DDOTKERNEL = dot.S -SDOTKERNEL = dot.S +ifneq ($(C_COMPILER), PGI) +SDOTKERNEL = ../generic/dot.c +else +SDOTKERNEL = dot.S +endif +ifneq ($(C_COMPILER), PGI) CDOTKERNEL = zdot.S ZDOTKERNEL = zdot.S +else +CDOTKERNEL = ../arm/zdot.c +ZDOTKERNEL = ../arm/zdot.c +endif DSDOTKERNEL = dot.S -ifeq ($(OS_DARWIN)$(CROSS),11) - -STRMMKERNEL = ../generic/trmmkernel_2x2.c -DTRMMKERNEL = ../generic/trmmkernel_2x2.c -CTRMMKERNEL = ../generic/ztrmmkernel_2x2.c -ZTRMMKERNEL = ../generic/ztrmmkernel_2x2.c - -SGEMMKERNEL = ../generic/gemmkernel_2x2.c -SGEMMONCOPY = ../generic/gemm_ncopy_2.c -SGEMMOTCOPY = ../generic/gemm_tcopy_2.c -SGEMMONCOPYOBJ = sgemm_oncopy$(TSUFFIX).$(SUFFIX) -SGEMMOTCOPYOBJ = sgemm_otcopy$(TSUFFIX).$(SUFFIX) - -DGEMMKERNEL = ../generic/gemmkernel_2x2.c -DGEMMONCOPY = ../generic/gemm_ncopy_2.c -DGEMMOTCOPY = ../generic/gemm_tcopy_2.c -DGEMMONCOPYOBJ = dgemm_oncopy$(TSUFFIX).$(SUFFIX) -DGEMMOTCOPYOBJ = dgemm_otcopy$(TSUFFIX).$(SUFFIX) - -CGEMMKERNEL = ../generic/zgemmkernel_2x2.c -CGEMMONCOPY = ../generic/zgemm_ncopy_2.c -CGEMMOTCOPY = ../generic/zgemm_tcopy_2.c -CGEMMONCOPYOBJ = cgemm_oncopy$(TSUFFIX).$(SUFFIX) -CGEMMOTCOPYOBJ = cgemm_otcopy$(TSUFFIX).$(SUFFIX) - -ZGEMMKERNEL = ../generic/zgemmkernel_2x2.c -ZGEMMONCOPY = ../generic/zgemm_ncopy_2.c -ZGEMMOTCOPY = ../generic/zgemm_tcopy_2.c -ZGEMMONCOPYOBJ = zgemm_oncopy$(TSUFFIX).$(SUFFIX) -ZGEMMOTCOPYOBJ = zgemm_otcopy$(TSUFFIX).$(SUFFIX) +DGEMM_BETA = dgemm_beta.S +SGEMM_BETA = sgemm_beta.S -else SGEMMKERNEL = sgemm_kernel_$(SGEMM_UNROLL_M)x$(SGEMM_UNROLL_N).S STRMMKERNEL = strmm_kernel_$(SGEMM_UNROLL_M)x$(SGEMM_UNROLL_N).S ifneq ($(SGEMM_UNROLL_M), $(SGEMM_UNROLL_N)) -SGEMMINCOPY = ../generic/gemm_ncopy_$(SGEMM_UNROLL_M).c +ifeq ($(SGEMM_UNROLL_M), 16) +SGEMMITCOPY = sgemm_tcopy_$(SGEMM_UNROLL_M).S +else SGEMMITCOPY = ../generic/gemm_tcopy_$(SGEMM_UNROLL_M).c +endif +ifeq ($(SGEMM_UNROLL_M), 4) +SGEMMINCOPY = sgemm_ncopy_$(SGEMM_UNROLL_M).S +else +SGEMMINCOPY = ../generic/gemm_ncopy_$(SGEMM_UNROLL_M).c +endif SGEMMINCOPYOBJ = sgemm_incopy$(TSUFFIX).$(SUFFIX) SGEMMITCOPYOBJ = sgemm_itcopy$(TSUFFIX).$(SUFFIX) endif -SGEMMONCOPY = ../generic/gemm_ncopy_$(SGEMM_UNROLL_N).c +ifeq ($(SGEMM_UNROLL_N), 16) +SGEMMOTCOPY = sgemm_tcopy_$(SGEMM_UNROLL_N).S +else SGEMMOTCOPY = ../generic/gemm_tcopy_$(SGEMM_UNROLL_N).c +endif +ifeq ($(SGEMM_UNROLL_N), 4) +SGEMMONCOPY = sgemm_ncopy_$(SGEMM_UNROLL_N).S +else +SGEMMONCOPY = ../generic/gemm_ncopy_$(SGEMM_UNROLL_N).c +endif SGEMMONCOPYOBJ = sgemm_oncopy$(TSUFFIX).$(SUFFIX) SGEMMOTCOPYOBJ = sgemm_otcopy$(TSUFFIX).$(SUFFIX) @@ -202,5 +196,3 @@ ZGEMMONCOPY = ../generic/zgemm_ncopy_$(ZGEMM_UNROLL_N).c ZGEMMOTCOPY = ../generic/zgemm_tcopy_$(ZGEMM_UNROLL_N).c ZGEMMONCOPYOBJ = zgemm_oncopy$(TSUFFIX).$(SUFFIX) ZGEMMOTCOPYOBJ = zgemm_otcopy$(TSUFFIX).$(SUFFIX) - -endif diff --git a/kernel/arm64/KERNEL.CORTEXA53 b/kernel/arm64/KERNEL.CORTEXA53 index c1d33fa3e..db322dd0d 100644 --- a/kernel/arm64/KERNEL.CORTEXA53 +++ b/kernel/arm64/KERNEL.CORTEXA53 @@ -1,3 +1,196 @@ -include $(KERNELDIR)/KERNEL.ARMV8 +SAMINKERNEL = ../arm/amin.c +DAMINKERNEL = ../arm/amin.c +CAMINKERNEL = ../arm/zamin.c +ZAMINKERNEL = ../arm/zamin.c +SMAXKERNEL = ../arm/max.c +DMAXKERNEL = ../arm/max.c +SMINKERNEL = ../arm/min.c +DMINKERNEL = ../arm/min.c + +ISAMINKERNEL = ../arm/iamin.c +IDAMINKERNEL = ../arm/iamin.c +ICAMINKERNEL = ../arm/izamin.c +IZAMINKERNEL = ../arm/izamin.c + +ISMAXKERNEL = ../arm/imax.c +IDMAXKERNEL = ../arm/imax.c + +ISMINKERNEL = ../arm/imin.c +IDMINKERNEL = ../arm/imin.c + +STRSMKERNEL_LN = ../generic/trsm_kernel_LN.c +STRSMKERNEL_LT = ../generic/trsm_kernel_LT.c +STRSMKERNEL_RN = ../generic/trsm_kernel_RN.c +STRSMKERNEL_RT = ../generic/trsm_kernel_RT.c + +DTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c +DTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c +DTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c +DTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c + +CTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c +CTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c +CTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c +CTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c + +ZTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c +ZTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c +ZTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c +ZTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c + +SAMAXKERNEL = amax.S +DAMAXKERNEL = amax.S +CAMAXKERNEL = zamax.S +ZAMAXKERNEL = zamax.S + +SAXPYKERNEL = axpy.S +DAXPYKERNEL = axpy.S +CAXPYKERNEL = zaxpy.S +ZAXPYKERNEL = zaxpy.S + +SROTKERNEL = rot.S +DROTKERNEL = rot.S +CROTKERNEL = zrot.S +ZROTKERNEL = zrot.S + +SSCALKERNEL = scal.S +DSCALKERNEL = scal.S +CSCALKERNEL = zscal.S +ZSCALKERNEL = zscal.S + +SGEMVNKERNEL = gemv_n.S +DGEMVNKERNEL = gemv_n.S +CGEMVNKERNEL = zgemv_n.S +ZGEMVNKERNEL = zgemv_n.S + +SGEMVTKERNEL = gemv_t.S +DGEMVTKERNEL = gemv_t.S +CGEMVTKERNEL = zgemv_t.S +ZGEMVTKERNEL = zgemv_t.S + + +SASUMKERNEL = asum.S +DASUMKERNEL = asum.S +CASUMKERNEL = casum.S +ZASUMKERNEL = zasum.S + +SCOPYKERNEL = copy.S +DCOPYKERNEL = copy.S +CCOPYKERNEL = copy.S +ZCOPYKERNEL = copy.S + +SSWAPKERNEL = swap.S +DSWAPKERNEL = swap.S +CSWAPKERNEL = swap.S +ZSWAPKERNEL = swap.S + +ISAMAXKERNEL = iamax.S +IDAMAXKERNEL = iamax.S +ICAMAXKERNEL = izamax.S +IZAMAXKERNEL = izamax.S + +SNRM2KERNEL = nrm2.S +DNRM2KERNEL = nrm2.S +CNRM2KERNEL = znrm2.S +ZNRM2KERNEL = znrm2.S + +ifneq ($(C_COMPILER), PGI) +SDOTKERNEL = ../generic/dot.c +else +SDOTKERNEL = dot.S +endif +DDOTKERNEL = dot.S +ifneq ($(C_COMPILER), PGI) +CDOTKERNEL = zdot.S +ZDOTKERNEL = zdot.S +else +CDOTKERNEL = ../arm/zdot.c +ZDOTKERNEL = ../arm/zdot.c +endif +DSDOTKERNEL = dot.S + +DGEMM_BETA = dgemm_beta.S +SGEMM_BETA = sgemm_beta.S + +ifeq ($(SGEMM_UNROLL_M)x$(SGEMM_UNROLL_N), 8x8) +SGEMMKERNEL = sgemm_kernel_$(SGEMM_UNROLL_M)x$(SGEMM_UNROLL_N)_cortexa53.S +STRMMKERNEL = strmm_kernel_$(SGEMM_UNROLL_M)x$(SGEMM_UNROLL_N)_cortexa53.S +else +SGEMMKERNEL = sgemm_kernel_$(SGEMM_UNROLL_M)x$(SGEMM_UNROLL_N).S +STRMMKERNEL = strmm_kernel_$(SGEMM_UNROLL_M)x$(SGEMM_UNROLL_N).S +endif +ifneq ($(SGEMM_UNROLL_M), $(SGEMM_UNROLL_N)) +ifeq ($(SGEMM_UNROLL_M), 16) +SGEMMITCOPY = sgemm_tcopy_$(SGEMM_UNROLL_M).S +else +SGEMMITCOPY = ../generic/gemm_tcopy_$(SGEMM_UNROLL_M).c +endif +ifeq ($(SGEMM_UNROLL_M), 4) +SGEMMINCOPY = sgemm_ncopy_$(SGEMM_UNROLL_M).S +else +SGEMMINCOPY = ../generic/gemm_ncopy_$(SGEMM_UNROLL_M).c +endif +SGEMMINCOPYOBJ = sgemm_incopy$(TSUFFIX).$(SUFFIX) +SGEMMITCOPYOBJ = sgemm_itcopy$(TSUFFIX).$(SUFFIX) +endif + +SGEMMOTCOPY = sgemm_tcopy_$(SGEMM_UNROLL_N).S +SGEMMONCOPY = sgemm_ncopy_$(SGEMM_UNROLL_N).S +SGEMMONCOPYOBJ = sgemm_oncopy$(TSUFFIX).$(SUFFIX) +SGEMMOTCOPYOBJ = sgemm_otcopy$(TSUFFIX).$(SUFFIX) + +DGEMMKERNEL = dgemm_kernel_$(DGEMM_UNROLL_M)x$(DGEMM_UNROLL_N).S +DTRMMKERNEL = dtrmm_kernel_$(DGEMM_UNROLL_M)x$(DGEMM_UNROLL_N).S + +ifneq ($(DGEMM_UNROLL_M), $(DGEMM_UNROLL_N)) + +ifeq ($(DGEMM_UNROLL_M), 8) +DGEMMINCOPY = dgemm_ncopy_$(DGEMM_UNROLL_M).S +DGEMMITCOPY = dgemm_tcopy_$(DGEMM_UNROLL_M).S +else +DGEMMINCOPY = ../generic/gemm_ncopy_$(DGEMM_UNROLL_M).c +DGEMMITCOPY = ../generic/gemm_tcopy_$(DGEMM_UNROLL_M).c +endif + +DGEMMINCOPYOBJ = dgemm_incopy$(TSUFFIX).$(SUFFIX) +DGEMMITCOPYOBJ = dgemm_itcopy$(TSUFFIX).$(SUFFIX) +endif + +ifeq ($(DGEMM_UNROLL_N), 4) +DGEMMONCOPY = dgemm_ncopy_$(DGEMM_UNROLL_N).S +DGEMMOTCOPY = dgemm_tcopy_$(DGEMM_UNROLL_N).S +else +DGEMMONCOPY = ../generic/gemm_ncopy_$(DGEMM_UNROLL_N).c +DGEMMOTCOPY = ../generic/gemm_tcopy_$(DGEMM_UNROLL_N).c +endif + +DGEMMONCOPYOBJ = dgemm_oncopy$(TSUFFIX).$(SUFFIX) +DGEMMOTCOPYOBJ = dgemm_otcopy$(TSUFFIX).$(SUFFIX) + +CGEMMKERNEL = cgemm_kernel_$(CGEMM_UNROLL_M)x$(CGEMM_UNROLL_N).S +CTRMMKERNEL = ctrmm_kernel_$(CGEMM_UNROLL_M)x$(CGEMM_UNROLL_N).S +ifneq ($(CGEMM_UNROLL_M), $(CGEMM_UNROLL_N)) +CGEMMINCOPY = ../generic/zgemm_ncopy_$(CGEMM_UNROLL_M).c +CGEMMITCOPY = ../generic/zgemm_tcopy_$(CGEMM_UNROLL_M).c +CGEMMINCOPYOBJ = cgemm_incopy$(TSUFFIX).$(SUFFIX) +CGEMMITCOPYOBJ = cgemm_itcopy$(TSUFFIX).$(SUFFIX) +endif +CGEMMONCOPY = ../generic/zgemm_ncopy_$(CGEMM_UNROLL_N).c +CGEMMOTCOPY = ../generic/zgemm_tcopy_$(CGEMM_UNROLL_N).c +CGEMMONCOPYOBJ = cgemm_oncopy$(TSUFFIX).$(SUFFIX) +CGEMMOTCOPYOBJ = cgemm_otcopy$(TSUFFIX).$(SUFFIX) + +ZGEMMKERNEL = zgemm_kernel_$(ZGEMM_UNROLL_M)x$(ZGEMM_UNROLL_N).S +ZTRMMKERNEL = ztrmm_kernel_$(ZGEMM_UNROLL_M)x$(ZGEMM_UNROLL_N).S +ifneq ($(ZGEMM_UNROLL_M), $(ZGEMM_UNROLL_N)) +ZGEMMINCOPY = ../generic/zgemm_ncopy_$(ZGEMM_UNROLL_M).c +ZGEMMITCOPY = ../generic/zgemm_tcopy_$(ZGEMM_UNROLL_M).c +ZGEMMINCOPYOBJ = zgemm_incopy$(TSUFFIX).$(SUFFIX) +ZGEMMITCOPYOBJ = zgemm_itcopy$(TSUFFIX).$(SUFFIX) +endif +ZGEMMONCOPY = ../generic/zgemm_ncopy_$(ZGEMM_UNROLL_N).c +ZGEMMOTCOPY = ../generic/zgemm_tcopy_$(ZGEMM_UNROLL_N).c +ZGEMMONCOPYOBJ = zgemm_oncopy$(TSUFFIX).$(SUFFIX) +ZGEMMOTCOPYOBJ = zgemm_otcopy$(TSUFFIX).$(SUFFIX) diff --git a/kernel/arm64/KERNEL.CORTEXA57 b/kernel/arm64/KERNEL.CORTEXA57 index 04d6940d7..0be334893 100644 --- a/kernel/arm64/KERNEL.CORTEXA57 +++ b/kernel/arm64/KERNEL.CORTEXA57 @@ -70,10 +70,19 @@ DCOPYKERNEL = copy.S CCOPYKERNEL = copy.S ZCOPYKERNEL = copy.S +ifneq ($(C_COMPILER), PGI) +SDOTKERNEL = ../generic/dot.c +else SDOTKERNEL = dot.S +endif DDOTKERNEL = dot.S +ifneq ($(C_COMPILER), PGI) CDOTKERNEL = zdot.S ZDOTKERNEL = zdot.S +else +CDOTKERNEL = ../arm/zdot.c +ZDOTKERNEL = ../arm/zdot.c +endif DSDOTKERNEL = dot.S SNRM2KERNEL = nrm2.S diff --git a/kernel/arm64/KERNEL.EMAG8180 b/kernel/arm64/KERNEL.EMAG8180 new file mode 100644 index 000000000..007b2ce26 --- /dev/null +++ b/kernel/arm64/KERNEL.EMAG8180 @@ -0,0 +1,3 @@ +include $(KERNELDIR)/KERNEL.CORTEXA57 + + diff --git a/kernel/arm64/KERNEL.NEOVERSEN1 b/kernel/arm64/KERNEL.NEOVERSEN1 new file mode 100644 index 000000000..ea010db42 --- /dev/null +++ b/kernel/arm64/KERNEL.NEOVERSEN1 @@ -0,0 +1,189 @@ +SAMINKERNEL = ../arm/amin.c +DAMINKERNEL = ../arm/amin.c +CAMINKERNEL = ../arm/zamin.c +ZAMINKERNEL = ../arm/zamin.c + +SMAXKERNEL = ../arm/max.c +DMAXKERNEL = ../arm/max.c + +SMINKERNEL = ../arm/min.c +DMINKERNEL = ../arm/min.c + +ISAMINKERNEL = ../arm/iamin.c +IDAMINKERNEL = ../arm/iamin.c +ICAMINKERNEL = ../arm/izamin.c +IZAMINKERNEL = ../arm/izamin.c + +ISMAXKERNEL = ../arm/imax.c +IDMAXKERNEL = ../arm/imax.c + +ISMINKERNEL = ../arm/imin.c +IDMINKERNEL = ../arm/imin.c + +STRSMKERNEL_LN = ../generic/trsm_kernel_LN.c +STRSMKERNEL_LT = ../generic/trsm_kernel_LT.c +STRSMKERNEL_RN = ../generic/trsm_kernel_RN.c +STRSMKERNEL_RT = ../generic/trsm_kernel_RT.c + +DTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c +DTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c +DTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c +DTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c + +CTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c +CTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c +CTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c +CTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c + +ZTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c +ZTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c +ZTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c +ZTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c + +SAMAXKERNEL = amax.S +DAMAXKERNEL = amax.S +CAMAXKERNEL = zamax.S +ZAMAXKERNEL = zamax.S + +SAXPYKERNEL = axpy.S +DAXPYKERNEL = daxpy_thunderx2t99.S +CAXPYKERNEL = zaxpy.S +ZAXPYKERNEL = zaxpy.S + +SROTKERNEL = rot.S +DROTKERNEL = rot.S +CROTKERNEL = zrot.S +ZROTKERNEL = zrot.S + +SSCALKERNEL = scal.S +DSCALKERNEL = scal.S +CSCALKERNEL = zscal.S +ZSCALKERNEL = zscal.S + +SGEMVNKERNEL = gemv_n.S +DGEMVNKERNEL = gemv_n.S +CGEMVNKERNEL = zgemv_n.S +ZGEMVNKERNEL = zgemv_n.S + +SGEMVTKERNEL = gemv_t.S +DGEMVTKERNEL = gemv_t.S +CGEMVTKERNEL = zgemv_t.S +ZGEMVTKERNEL = zgemv_t.S + + +SASUMKERNEL = sasum_thunderx2t99.c +DASUMKERNEL = dasum_thunderx2t99.c +CASUMKERNEL = casum_thunderx2t99.c +ZASUMKERNEL = zasum_thunderx2t99.c + +SCOPYKERNEL = copy_thunderx2t99.c +DCOPYKERNEL = copy_thunderx2t99.c +CCOPYKERNEL = copy_thunderx2t99.c +ZCOPYKERNEL = copy_thunderx2t99.c + +SSWAPKERNEL = swap_thunderx2t99.S +DSWAPKERNEL = swap_thunderx2t99.S +CSWAPKERNEL = swap_thunderx2t99.S +ZSWAPKERNEL = swap_thunderx2t99.S + +ISAMAXKERNEL = iamax_thunderx2t99.c +IDAMAXKERNEL = iamax_thunderx2t99.c +ICAMAXKERNEL = izamax_thunderx2t99.c +IZAMAXKERNEL = izamax_thunderx2t99.c + +SNRM2KERNEL = scnrm2_thunderx2t99.c +DNRM2KERNEL = dznrm2_thunderx2t99.c +CNRM2KERNEL = scnrm2_thunderx2t99.c +ZNRM2KERNEL = dznrm2_thunderx2t99.c + +DDOTKERNEL = dot_thunderx2t99.c +SDOTKERNEL = dot_thunderx2t99.c +CDOTKERNEL = zdot_thunderx2t99.c +ZDOTKERNEL = zdot_thunderx2t99.c +DSDOTKERNEL = dot.S + +DGEMM_BETA = dgemm_beta.S +SGEMM_BETA = sgemm_beta.S + +SGEMMKERNEL = sgemm_kernel_$(SGEMM_UNROLL_M)x$(SGEMM_UNROLL_N).S +STRMMKERNEL = strmm_kernel_$(SGEMM_UNROLL_M)x$(SGEMM_UNROLL_N).S +ifneq ($(SGEMM_UNROLL_M), $(SGEMM_UNROLL_N)) +ifeq ($(SGEMM_UNROLL_M), 16) +SGEMMITCOPY = sgemm_tcopy_$(SGEMM_UNROLL_M).S +else +SGEMMITCOPY = ../generic/gemm_tcopy_$(SGEMM_UNROLL_M).c +endif +ifeq ($(SGEMM_UNROLL_M), 4) +SGEMMINCOPY = sgemm_ncopy_$(SGEMM_UNROLL_M).S +else +SGEMMINCOPY = ../generic/gemm_ncopy_$(SGEMM_UNROLL_M).c +endif +SGEMMINCOPYOBJ = sgemm_incopy$(TSUFFIX).$(SUFFIX) +SGEMMITCOPYOBJ = sgemm_itcopy$(TSUFFIX).$(SUFFIX) +endif +ifeq ($(SGEMM_UNROLL_N), 16) +SGEMMOTCOPY = sgemm_tcopy_$(SGEMM_UNROLL_N).S +else +SGEMMOTCOPY = ../generic/gemm_tcopy_$(SGEMM_UNROLL_N).c +endif +ifeq ($(SGEMM_UNROLL_N), 4) +SGEMMONCOPY = sgemm_ncopy_$(SGEMM_UNROLL_N).S +else +SGEMMONCOPY = ../generic/gemm_ncopy_$(SGEMM_UNROLL_N).c +endif +SGEMMONCOPYOBJ = sgemm_oncopy$(TSUFFIX).$(SUFFIX) +SGEMMOTCOPYOBJ = sgemm_otcopy$(TSUFFIX).$(SUFFIX) + +DGEMMKERNEL = dgemm_kernel_$(DGEMM_UNROLL_M)x$(DGEMM_UNROLL_N).S +DTRMMKERNEL = dtrmm_kernel_$(DGEMM_UNROLL_M)x$(DGEMM_UNROLL_N).S + +ifneq ($(DGEMM_UNROLL_M), $(DGEMM_UNROLL_N)) + +ifeq ($(DGEMM_UNROLL_M), 8) +DGEMMINCOPY = dgemm_ncopy_$(DGEMM_UNROLL_M).S +DGEMMITCOPY = dgemm_tcopy_$(DGEMM_UNROLL_M).S +else +DGEMMINCOPY = ../generic/gemm_ncopy_$(DGEMM_UNROLL_M).c +DGEMMITCOPY = ../generic/gemm_tcopy_$(DGEMM_UNROLL_M).c +endif + +DGEMMINCOPYOBJ = dgemm_incopy$(TSUFFIX).$(SUFFIX) +DGEMMITCOPYOBJ = dgemm_itcopy$(TSUFFIX).$(SUFFIX) +endif + +ifeq ($(DGEMM_UNROLL_N), 4) +DGEMMONCOPY = dgemm_ncopy_$(DGEMM_UNROLL_N).S +DGEMMOTCOPY = dgemm_tcopy_$(DGEMM_UNROLL_N).S +else +DGEMMONCOPY = ../generic/gemm_ncopy_$(DGEMM_UNROLL_N).c +DGEMMOTCOPY = ../generic/gemm_tcopy_$(DGEMM_UNROLL_N).c +endif + +DGEMMONCOPYOBJ = dgemm_oncopy$(TSUFFIX).$(SUFFIX) +DGEMMOTCOPYOBJ = dgemm_otcopy$(TSUFFIX).$(SUFFIX) + +CGEMMKERNEL = cgemm_kernel_$(CGEMM_UNROLL_M)x$(CGEMM_UNROLL_N).S +CTRMMKERNEL = ctrmm_kernel_$(CGEMM_UNROLL_M)x$(CGEMM_UNROLL_N).S +ifneq ($(CGEMM_UNROLL_M), $(CGEMM_UNROLL_N)) +CGEMMINCOPY = ../generic/zgemm_ncopy_$(CGEMM_UNROLL_M).c +CGEMMITCOPY = ../generic/zgemm_tcopy_$(CGEMM_UNROLL_M).c +CGEMMINCOPYOBJ = cgemm_incopy$(TSUFFIX).$(SUFFIX) +CGEMMITCOPYOBJ = cgemm_itcopy$(TSUFFIX).$(SUFFIX) +endif +CGEMMONCOPY = ../generic/zgemm_ncopy_$(CGEMM_UNROLL_N).c +CGEMMOTCOPY = ../generic/zgemm_tcopy_$(CGEMM_UNROLL_N).c +CGEMMONCOPYOBJ = cgemm_oncopy$(TSUFFIX).$(SUFFIX) +CGEMMOTCOPYOBJ = cgemm_otcopy$(TSUFFIX).$(SUFFIX) + +ZGEMMKERNEL = zgemm_kernel_$(ZGEMM_UNROLL_M)x$(ZGEMM_UNROLL_N).S +ZTRMMKERNEL = ztrmm_kernel_$(ZGEMM_UNROLL_M)x$(ZGEMM_UNROLL_N).S +ifneq ($(ZGEMM_UNROLL_M), $(ZGEMM_UNROLL_N)) +ZGEMMINCOPY = ../generic/zgemm_ncopy_$(ZGEMM_UNROLL_M).c +ZGEMMITCOPY = ../generic/zgemm_tcopy_$(ZGEMM_UNROLL_M).c +ZGEMMINCOPYOBJ = zgemm_incopy$(TSUFFIX).$(SUFFIX) +ZGEMMITCOPYOBJ = zgemm_itcopy$(TSUFFIX).$(SUFFIX) +endif +ZGEMMONCOPY = ../generic/zgemm_ncopy_$(ZGEMM_UNROLL_N).c +ZGEMMOTCOPY = ../generic/zgemm_tcopy_$(ZGEMM_UNROLL_N).c +ZGEMMONCOPYOBJ = zgemm_oncopy$(TSUFFIX).$(SUFFIX) +ZGEMMOTCOPYOBJ = zgemm_otcopy$(TSUFFIX).$(SUFFIX) diff --git a/kernel/arm64/KERNEL.THUNDERX b/kernel/arm64/KERNEL.THUNDERX index cb02c7bc5..669f62698 100644 --- a/kernel/arm64/KERNEL.THUNDERX +++ b/kernel/arm64/KERNEL.THUNDERX @@ -47,8 +47,13 @@ ZCOPYKERNEL = copy.S SDOTKERNEL = dot_thunderx.c DDOTKERNEL = ddot_thunderx.c +ifneq ($(C_COMPILER), PGI) CDOTKERNEL = zdot.S ZDOTKERNEL = zdot.S +else +CDOTKERNEL = ../arm/zdot.c +ZDOTKERNEL = ../arm/zdot.c +endif DSDOTKERNEL = dot.S SNRM2KERNEL = nrm2.S diff --git a/kernel/arm64/KERNEL.THUNDERX3T110 b/kernel/arm64/KERNEL.THUNDERX3T110 new file mode 100644 index 000000000..a20d0d4a6 --- /dev/null +++ b/kernel/arm64/KERNEL.THUNDERX3T110 @@ -0,0 +1,184 @@ +SAMINKERNEL = ../arm/amin.c +DAMINKERNEL = ../arm/amin.c +CAMINKERNEL = ../arm/zamin.c +ZAMINKERNEL = ../arm/zamin.c + +SMAXKERNEL = ../arm/max.c +DMAXKERNEL = ../arm/max.c + +SMINKERNEL = ../arm/min.c +DMINKERNEL = ../arm/min.c + +ISAMINKERNEL = ../arm/iamin.c +IDAMINKERNEL = ../arm/iamin.c +ICAMINKERNEL = ../arm/izamin.c +IZAMINKERNEL = ../arm/izamin.c + +ISMAXKERNEL = ../arm/imax.c +IDMAXKERNEL = ../arm/imax.c + +ISMINKERNEL = ../arm/imin.c +IDMINKERNEL = ../arm/imin.c + +STRSMKERNEL_LN = ../generic/trsm_kernel_LN.c +STRSMKERNEL_LT = ../generic/trsm_kernel_LT.c +STRSMKERNEL_RN = ../generic/trsm_kernel_RN.c +STRSMKERNEL_RT = ../generic/trsm_kernel_RT.c + +DTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c +DTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c +DTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c +DTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c + +CTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c +CTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c +CTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c +CTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c + +ZTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c +ZTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c +ZTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c +ZTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c + +SAMAXKERNEL = amax.S +DAMAXKERNEL = amax.S +CAMAXKERNEL = zamax.S +ZAMAXKERNEL = zamax.S + +SAXPYKERNEL = axpy.S +DAXPYKERNEL = daxpy_thunderx2t99.S +CAXPYKERNEL = zaxpy.S +ZAXPYKERNEL = zaxpy.S + +SROTKERNEL = rot.S +DROTKERNEL = rot.S +CROTKERNEL = zrot.S +ZROTKERNEL = zrot.S + +SSCALKERNEL = scal.S +DSCALKERNEL = scal.S +CSCALKERNEL = zscal.S +ZSCALKERNEL = zscal.S + +SGEMVNKERNEL = gemv_n.S +DGEMVNKERNEL = gemv_n.S +CGEMVNKERNEL = zgemv_n.S +ZGEMVNKERNEL = zgemv_n.S + +SGEMVTKERNEL = gemv_t.S +DGEMVTKERNEL = gemv_t.S +CGEMVTKERNEL = zgemv_t.S +ZGEMVTKERNEL = zgemv_t.S + +STRMMKERNEL = strmm_kernel_$(SGEMM_UNROLL_M)x$(SGEMM_UNROLL_N).S +ifneq ($(SGEMM_UNROLL_M), $(SGEMM_UNROLL_N)) +SGEMMINCOPY = ../generic/gemm_ncopy_$(SGEMM_UNROLL_M).c +SGEMMITCOPY = ../generic/gemm_tcopy_$(SGEMM_UNROLL_M).c +SGEMMINCOPYOBJ = sgemm_incopy$(TSUFFIX).$(SUFFIX) +SGEMMITCOPYOBJ = sgemm_itcopy$(TSUFFIX).$(SUFFIX) +endif +SGEMMONCOPY = ../generic/gemm_ncopy_$(SGEMM_UNROLL_N).c +SGEMMOTCOPY = ../generic/gemm_tcopy_$(SGEMM_UNROLL_N).c +SGEMMONCOPYOBJ = sgemm_oncopy$(TSUFFIX).$(SUFFIX) +SGEMMOTCOPYOBJ = sgemm_otcopy$(TSUFFIX).$(SUFFIX) + +DTRMMKERNEL = dtrmm_kernel_$(DGEMM_UNROLL_M)x$(DGEMM_UNROLL_N).S + +ifneq ($(DGEMM_UNROLL_M), $(DGEMM_UNROLL_N)) + +ifeq ($(DGEMM_UNROLL_M), 8) +DGEMMINCOPY = dgemm_ncopy_$(DGEMM_UNROLL_M).S +DGEMMITCOPY = dgemm_tcopy_$(DGEMM_UNROLL_M).S +else +DGEMMINCOPY = ../generic/gemm_ncopy_$(DGEMM_UNROLL_M).c +DGEMMITCOPY = ../generic/gemm_tcopy_$(DGEMM_UNROLL_M).c +endif + +DGEMMINCOPYOBJ = dgemm_incopy$(TSUFFIX).$(SUFFIX) +DGEMMITCOPYOBJ = dgemm_itcopy$(TSUFFIX).$(SUFFIX) +endif + +ifeq ($(DGEMM_UNROLL_N), 4) +DGEMMONCOPY = dgemm_ncopy_$(DGEMM_UNROLL_N).S +DGEMMOTCOPY = dgemm_tcopy_$(DGEMM_UNROLL_N).S +else +DGEMMONCOPY = ../generic/gemm_ncopy_$(DGEMM_UNROLL_N).c +DGEMMOTCOPY = ../generic/gemm_tcopy_$(DGEMM_UNROLL_N).c +endif + +DGEMMONCOPYOBJ = dgemm_oncopy$(TSUFFIX).$(SUFFIX) +DGEMMOTCOPYOBJ = dgemm_otcopy$(TSUFFIX).$(SUFFIX) + +CTRMMKERNEL = ctrmm_kernel_$(CGEMM_UNROLL_M)x$(CGEMM_UNROLL_N).S +ifneq ($(CGEMM_UNROLL_M), $(CGEMM_UNROLL_N)) +CGEMMINCOPY = ../generic/zgemm_ncopy_$(CGEMM_UNROLL_M).c +CGEMMITCOPY = ../generic/zgemm_tcopy_$(CGEMM_UNROLL_M).c +CGEMMINCOPYOBJ = cgemm_incopy$(TSUFFIX).$(SUFFIX) +CGEMMITCOPYOBJ = cgemm_itcopy$(TSUFFIX).$(SUFFIX) +endif +CGEMMONCOPY = ../generic/zgemm_ncopy_$(CGEMM_UNROLL_N).c +CGEMMOTCOPY = ../generic/zgemm_tcopy_$(CGEMM_UNROLL_N).c +CGEMMONCOPYOBJ = cgemm_oncopy$(TSUFFIX).$(SUFFIX) +CGEMMOTCOPYOBJ = cgemm_otcopy$(TSUFFIX).$(SUFFIX) + +ZTRMMKERNEL = ztrmm_kernel_$(ZGEMM_UNROLL_M)x$(ZGEMM_UNROLL_N).S +ifneq ($(ZGEMM_UNROLL_M), $(ZGEMM_UNROLL_N)) +ZGEMMINCOPY = ../generic/zgemm_ncopy_$(ZGEMM_UNROLL_M).c +ZGEMMITCOPY = ../generic/zgemm_tcopy_$(ZGEMM_UNROLL_M).c +ZGEMMINCOPYOBJ = zgemm_incopy$(TSUFFIX).$(SUFFIX) +ZGEMMITCOPYOBJ = zgemm_itcopy$(TSUFFIX).$(SUFFIX) +endif +ZGEMMONCOPY = ../generic/zgemm_ncopy_$(ZGEMM_UNROLL_N).c +ZGEMMOTCOPY = ../generic/zgemm_tcopy_$(ZGEMM_UNROLL_N).c +ZGEMMONCOPYOBJ = zgemm_oncopy$(TSUFFIX).$(SUFFIX) +ZGEMMOTCOPYOBJ = zgemm_otcopy$(TSUFFIX).$(SUFFIX) + +SASUMKERNEL = sasum_thunderx2t99.c +DASUMKERNEL = dasum_thunderx2t99.c +CASUMKERNEL = casum_thunderx2t99.c +ZASUMKERNEL = zasum_thunderx2t99.c + +SCOPYKERNEL = copy_thunderx2t99.c +DCOPYKERNEL = copy_thunderx2t99.c +CCOPYKERNEL = copy_thunderx2t99.c +ZCOPYKERNEL = copy_thunderx2t99.c + +SSWAPKERNEL = swap_thunderx2t99.S +DSWAPKERNEL = swap_thunderx2t99.S +CSWAPKERNEL = swap_thunderx2t99.S +ZSWAPKERNEL = swap_thunderx2t99.S + +ISAMAXKERNEL = iamax_thunderx2t99.c +IDAMAXKERNEL = iamax_thunderx2t99.c +ICAMAXKERNEL = izamax_thunderx2t99.c +IZAMAXKERNEL = izamax_thunderx2t99.c + +SNRM2KERNEL = scnrm2_thunderx2t99.c +CNRM2KERNEL = scnrm2_thunderx2t99.c +#DNRM2KERNEL = dznrm2_thunderx2t99_fast.c +#ZNRM2KERNEL = dznrm2_thunderx2t99_fast.c +DNRM2KERNEL = dznrm2_thunderx2t99.c +ZNRM2KERNEL = dznrm2_thunderx2t99.c + + +DDOTKERNEL = dot_thunderx2t99.c +SDOTKERNEL = dot_thunderx2t99.c +CDOTKERNEL = zdot_thunderx2t99.c +ZDOTKERNEL = zdot_thunderx2t99.c +DSDOTKERNEL = dot.S + +ifeq ($(DGEMM_UNROLL_M)x$(DGEMM_UNROLL_N), 8x4) +DGEMMKERNEL = dgemm_kernel_8x4_thunderx2t99.S +endif + +ifeq ($(SGEMM_UNROLL_M)x$(SGEMM_UNROLL_N), 16x4) +SGEMMKERNEL = sgemm_kernel_16x4_thunderx2t99.S +endif + +ifeq ($(CGEMM_UNROLL_M)x$(CGEMM_UNROLL_N), 8x4) +CGEMMKERNEL = cgemm_kernel_8x4_thunderx2t99.S +endif + +ifeq ($(ZGEMM_UNROLL_M)x$(ZGEMM_UNROLL_N), 4x4) +ZGEMMKERNEL = zgemm_kernel_4x4_thunderx2t99.S +endif diff --git a/kernel/arm64/KERNEL.TSV110 b/kernel/arm64/KERNEL.TSV110 index 04d6940d7..54d016e17 100644 --- a/kernel/arm64/KERNEL.TSV110 +++ b/kernel/arm64/KERNEL.TSV110 @@ -72,8 +72,13 @@ ZCOPYKERNEL = copy.S SDOTKERNEL = dot.S DDOTKERNEL = dot.S +ifneq ($(C_COMPILER), PGI) CDOTKERNEL = zdot.S ZDOTKERNEL = zdot.S +else +CDOTKERNEL = ../arm/zdot.c +ZDOTKERNEL = ../arm/zdot.c +endif DSDOTKERNEL = dot.S SNRM2KERNEL = nrm2.S @@ -109,13 +114,29 @@ ZGEMVTKERNEL = zgemv_t.S SGEMMKERNEL = sgemm_kernel_$(SGEMM_UNROLL_M)x$(SGEMM_UNROLL_N).S STRMMKERNEL = strmm_kernel_$(SGEMM_UNROLL_M)x$(SGEMM_UNROLL_N).S ifneq ($(SGEMM_UNROLL_M), $(SGEMM_UNROLL_N)) -SGEMMINCOPY = ../generic/gemm_ncopy_$(SGEMM_UNROLL_M).c +ifeq ($(SGEMM_UNROLL_M), 16) +SGEMMITCOPY = sgemm_tcopy_$(SGEMM_UNROLL_M).S +else SGEMMITCOPY = ../generic/gemm_tcopy_$(SGEMM_UNROLL_M).c +endif +ifeq ($(SGEMM_UNROLL_M), 4) +SGEMMINCOPY = sgemm_ncopy_$(SGEMM_UNROLL_M).S +else +SGEMMINCOPY = ../generic/gemm_ncopy_$(SGEMM_UNROLL_M).c +endif SGEMMINCOPYOBJ = sgemm_incopy$(TSUFFIX).$(SUFFIX) SGEMMITCOPYOBJ = sgemm_itcopy$(TSUFFIX).$(SUFFIX) endif -SGEMMONCOPY = ../generic/gemm_ncopy_$(SGEMM_UNROLL_N).c +ifeq ($(SGEMM_UNROLL_N), 16) +SGEMMOTCOPY = sgemm_tcopy_$(SGEMM_UNROLL_N).S +else SGEMMOTCOPY = ../generic/gemm_tcopy_$(SGEMM_UNROLL_N).c +endif +ifeq ($(SGEMM_UNROLL_N), 4) +SGEMMONCOPY = sgemm_ncopy_$(SGEMM_UNROLL_N).S +else +SGEMMONCOPY = ../generic/gemm_ncopy_$(SGEMM_UNROLL_N).c +endif SGEMMONCOPYOBJ = sgemm_oncopy$(TSUFFIX).$(SUFFIX) SGEMMOTCOPYOBJ = sgemm_otcopy$(TSUFFIX).$(SUFFIX) diff --git a/kernel/arm64/KERNEL.VORTEX b/kernel/arm64/KERNEL.VORTEX new file mode 100644 index 000000000..e3efef1f5 --- /dev/null +++ b/kernel/arm64/KERNEL.VORTEX @@ -0,0 +1 @@ +include $(KERNELDIR)/KERNEL.ARMV8 diff --git a/kernel/arm64/daxpy_thunderx.c b/kernel/arm64/daxpy_thunderx.c index 37aae9391..f44f9d4e5 100644 --- a/kernel/arm64/daxpy_thunderx.c +++ b/kernel/arm64/daxpy_thunderx.c @@ -62,7 +62,7 @@ static void daxpy_kernel_8(BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) y5 = a * x[5] + y[5]; y6 = a * x[6] + y[6]; y7 = a * x[7] + y[7]; - asm("":"+w"(y0),"+w"(y1),"+w"(y2),"+w"(y3),"+w"(y4),"+w"(y5),"+w"(y6),"+w"(y7)); + __asm__("":"+w"(y0),"+w"(y1),"+w"(y2),"+w"(y3),"+w"(y4),"+w"(y5),"+w"(y6),"+w"(y7)); y[0] = y0; y[1] = y1; y[2] = y2; @@ -74,7 +74,7 @@ static void daxpy_kernel_8(BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) xx = (x + 4*128/sizeof(*x)); yy = (y + 4*128/sizeof(*y)); - asm("":"+r"(yy)::"memory"); + __asm__("":"+r"(yy)::"memory"); prefetch(xx); prefetch(yy); diff --git a/kernel/arm64/daxpy_thunderx2t99.S b/kernel/arm64/daxpy_thunderx2t99.S index b8d0af5c2..baf39150f 100644 --- a/kernel/arm64/daxpy_thunderx2t99.S +++ b/kernel/arm64/daxpy_thunderx2t99.S @@ -98,11 +98,58 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. add X, X, #128 .endm +/* + * No need to do software prefetches if the vector fits + * into L1 cache + */ +.macro KERNEL_F16_L1CACHE + ldp q4, q5, [X] + ldp q16, q17, [Y] + + ldp q6, q7, [X, #32] + ldp q18, q19, [Y, #32] + + fmla v16.2d, v4.2d, v0.d[0] + fmla v17.2d, v5.2d, v0.d[0] + + stp q16, q17, [Y] + + ldp q20, q21, [X, #64] + ldp q24, q25, [Y, #64] + + fmla v18.2d, v6.2d, v0.d[0] + fmla v19.2d, v7.2d, v0.d[0] + + stp q18, q19, [Y, #32] + + ldp q22, q23, [X, #96] + ldp q26, q27, [Y, #96] + + fmla v24.2d, v20.2d, v0.d[0] + fmla v25.2d, v21.2d, v0.d[0] + + stp q24, q25, [Y, #64] + + fmla v26.2d, v22.2d, v0.d[0] + fmla v27.2d, v23.2d, v0.d[0] + + stp q26, q27, [Y, #96] + + add Y, Y, #128 + add X, X, #128 +.endm + .macro KERNEL_F32 KERNEL_F16 KERNEL_F16 .endm + +.macro KERNEL_F32_L1CACHE + KERNEL_F16_L1CACHE + KERNEL_F16_L1CACHE +.endm + .macro INIT_S lsl INC_X, INC_X, #3 lsl INC_Y, INC_Y, #3 @@ -138,6 +185,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. cmp I, xzr beq .Ldaxpy_kernel_F1 + cmp N, #2048 + ble .Ldaxpy_kernel_F32_L1CACHE + .align 5 .Ldaxpy_kernel_F32: @@ -145,6 +195,15 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. subs I, I, #1 bne .Ldaxpy_kernel_F32 + b .Ldaxpy_kernel_F1 + + .align 5 +.Ldaxpy_kernel_F32_L1CACHE: + + KERNEL_F32_L1CACHE + + subs I, I, #1 + bne .Ldaxpy_kernel_F32_L1CACHE .Ldaxpy_kernel_F1: diff --git a/kernel/arm64/dgemm_beta.S b/kernel/arm64/dgemm_beta.S new file mode 100644 index 000000000..7d21525c2 --- /dev/null +++ b/kernel/arm64/dgemm_beta.S @@ -0,0 +1,252 @@ +/*************************************************************************** +Copyright (c) 2016, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A00 PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#define ASSEMBLER +#include "common.h" + +#define M x0 +#define N x1 +#define BETA d0 +#define LDC x6 +#define C00 x7 + +#define A01 x8 +#define A02 x9 +#define A03 x10 +#define A04 x11 + +#define beta0 d11 +#define betaV0 v11.d[0] +#define I x16 + +#define prfm_size 640 +#define calc_size 128 + +/************************************************************************************** +* Macro definitions +**************************************************************************************/ + +.macro SAVE_REGS + add sp, sp, #-(11 * 16) + stp d8, d9, [sp, #(0 * 16)] + stp d10, d11, [sp, #(1 * 16)] + stp d12, d13, [sp, #(2 * 16)] + stp d14, d15, [sp, #(3 * 16)] + stp d16, d17, [sp, #(4 * 16)] + stp x18, x19, [sp, #(5 * 16)] + stp x20, x21, [sp, #(6 * 16)] + stp x22, x23, [sp, #(7 * 16)] + stp x24, x25, [sp, #(8 * 16)] + stp x26, x27, [sp, #(9 * 16)] + str x28, [sp, #(10 * 16)] +.endm + +.macro RESTORE_REGS + ldp d8, d9, [sp, #(0 * 16)] + ldp d10, d11, [sp, #(1 * 16)] + ldp d12, d13, [sp, #(2 * 16)] + ldp d14, d15, [sp, #(3 * 16)] + ldp d16, d17, [sp, #(4 * 16)] + ldp x18, x19, [sp, #(5 * 16)] + ldp x20, x21, [sp, #(6 * 16)] + ldp x22, x23, [sp, #(7 * 16)] + ldp x24, x25, [sp, #(8 * 16)] + ldp x26, x27, [sp, #(9 * 16)] + ldr x28, [sp, #(10 * 16)] + add sp, sp, #(11*16) +.endm + +.macro INIT_ZERO + movi v0.2d, #0000000000000000 + movi v1.2d, #0000000000000000 + movi v2.2d, #0000000000000000 + movi v3.2d, #0000000000000000 + movi v4.2d, #0000000000000000 + movi v5.2d, #0000000000000000 + movi v6.2d, #0000000000000000 + movi v7.2d, #0000000000000000 +.endm + +/************************************************************************************** +* End of macro definitions +**************************************************************************************/ + + PROLOGUE + + .align 5 + + ldr LDC, [sp] + SAVE_REGS + +.Lgemm_beta_BEGIN: + + fmov beta0, BETA + cmp N, #0 + ble .Lgemm_beta_L999 + + fcmp BETA, #0.0 + beq .Lgemm_beta_zero_01 + +.Lgemm_beta_01: + + lsl LDC, LDC, #3 + + .align 5 +.Lgemm_beta_02: + + mov A01, C00 + add C00, C00, LDC + asr I, M, #4 + cmp I, #0 + ble .Lgemm_beta_04 + add A02, A01, #32 + add A03, A02, #32 + add A04, A03, #32 + + .align 5 +.Lgemm_beta_03: + + ldp q0, q1, [A01] + ldp q2, q3, [A02] + ldp q4, q5, [A03] + ldp q6, q7, [A04] + + fmul v0.2d, v0.2d, betaV0 + fmul v1.2d, v1.2d, betaV0 + + fmul v2.2d, v2.2d, betaV0 + fmul v3.2d, v3.2d, betaV0 + + prfm PLDL1KEEP, [A01, prfm_size] + + fmul v4.2d, v4.2d, betaV0 + fmul v5.2d, v5.2d, betaV0 + + prfm PLDL1KEEP, [A03, prfm_size] + + fmul v6.2d, v6.2d, betaV0 + fmul v7.2d, v7.2d, betaV0 + + st1 {v0.2d, v1.2d}, [A01] + add A01, A01, calc_size + st1 {v2.2d, v3.2d}, [A02] + add A02, A02, calc_size + st1 {v4.2d, v5.2d}, [A03] + add A03, A03, calc_size + st1 {v6.2d, v7.2d}, [A04] + add A04, A04, calc_size + + subs I , I , #1 + bne .Lgemm_beta_03 + + .align 5 +.Lgemm_beta_04: + + and I, M , #15 // M%16 + cmp I, #0 + ble .Lgemm_beta_06 + + .align 5 +.Lgemm_beta_05: + + ldr d12, [A01] + fmul d12, d12, beta0 + str d12, [A01] + add A01, A01, #8 + + subs I , I , #1 + bne .Lgemm_beta_05 + + .align 5 +.Lgemm_beta_06: + + subs N , N, #1 // N-- + bne .Lgemm_beta_02 + + .align 5 +.Lgemm_beta_L999: + + mov x0, #0 + RESTORE_REGS + ret + +.Lgemm_beta_zero_01: + INIT_ZERO + lsl LDC, LDC, #3 + + .align 5 +.Lgemm_beta_zero_02: + mov A01, C00 + add C00, C00, LDC + + asr I, M, #4 + cmp I, #0 + ble .Lgemm_beta_zero_04 + + add A02, A01, #64 + + .align 5 +.Lgemm_beta_zero_03: + + st1 {v0.2d, v1.2d, v2.2d, v3.2d}, [A01] + add A01, A01, calc_size + st1 {v4.2d, v5.2d, v6.2d, v7.2d}, [A02] + add A02, A02, calc_size + + subs I, I, #1 + bne .Lgemm_beta_zero_03 + + .align 5 +.Lgemm_beta_zero_04: + + and I, M, #15 + cmp I, #0 + ble .Lgemm_beta_zero_06 + + .align 5 +.Lgemm_beta_zero_05: + + str beta0, [A01] + add A01, A01, #8 + + subs I, I, #1 + bne .Lgemm_beta_zero_05 + + .align 5 +.Lgemm_beta_zero_06: + + subs N, N, #1 + bne .Lgemm_beta_zero_02 + + .align 5 +.Lgemm_beta_zero_L999: + + mov x0, #0 + RESTORE_REGS + ret + + EPILOGUE diff --git a/kernel/arm64/dznrm2_thunderx2t99.c b/kernel/arm64/dznrm2_thunderx2t99.c index b94f0cffc..b021a2832 100644 --- a/kernel/arm64/dznrm2_thunderx2t99.c +++ b/kernel/arm64/dznrm2_thunderx2t99.c @@ -58,6 +58,7 @@ extern int blas_level1_thread_with_return_value(int mode, BLASLONG m, BLASLONG n #define CUR_MAXINV "d8" #define CUR_MAXINV_V "v8.2d" #define CUR_MAX_V "v8.2d" +#define REGINF "d9" static void nrm2_compute(BLASLONG n, FLOAT *x, BLASLONG inc_x, double *ssq, double *scale) @@ -79,8 +80,10 @@ static void nrm2_compute(BLASLONG n, FLOAT *x, BLASLONG inc_x, " ble 9f //nrm2_kernel_L999 \n" "1: //nrm2_kernel_F_BEGIN: \n" + " mov x6, #0x7FF0000000000000 //+Infinity \n" " fmov "REGZERO", xzr \n" " fmov "REGONE", #1.0 \n" + " fmov "REGINF", x6 \n" " lsl "INC_X", "INC_X", #"INC_SHIFT" \n" " mov "J", "N" \n" " cmp "J", xzr \n" @@ -104,6 +107,8 @@ static void nrm2_compute(BLASLONG n, FLOAT *x, BLASLONG inc_x, " ldr d4, ["X"] \n" " fabs d4, d4 \n" " fmax "CUR_MAX", "SCALE", d4 \n" + " fcmp "CUR_MAX", "REGINF" \n" + " beq 10f \n" " fdiv "SCALE", "SCALE", "CUR_MAX" \n" " fmul "SCALE", "SCALE", "SCALE" \n" " fmul "SSQ", "SSQ", "SCALE" \n" @@ -116,6 +121,8 @@ static void nrm2_compute(BLASLONG n, FLOAT *x, BLASLONG inc_x, " ldr d3, ["X", #8] \n" " fabs d3, d3 \n" " fmax "CUR_MAX", "SCALE", d3 \n" + " fcmp "CUR_MAX", "REGINF" \n" + " beq 10f \n" " fdiv "SCALE", "SCALE", "CUR_MAX" \n" " fmul "SCALE", "SCALE", "SCALE" \n" " fmul "SSQ", "SSQ", "SCALE" \n" @@ -158,6 +165,8 @@ static void nrm2_compute(BLASLONG n, FLOAT *x, BLASLONG inc_x, " fmaxp v24.2d, v24.2d, v26.2d \n" " fmaxp v24.2d, v24.2d, v24.2d \n" " fmax "CUR_MAX", "SCALE", d24 \n" + " fcmp "CUR_MAX", "REGINF" \n" + " beq 10f \n" " fdiv "CUR_MAXINV", "REGONE", "CUR_MAX" \n" " //dup "CUR_MAX_V", v7.d[0] \n" " fdiv "SCALE", "SCALE", "CUR_MAX" \n" @@ -217,6 +226,8 @@ static void nrm2_compute(BLASLONG n, FLOAT *x, BLASLONG inc_x, " fmaxp v24.2d, v24.2d, v26.2d \n" " fmaxp v24.2d, v24.2d, v24.2d \n" " fmax "CUR_MAX", "SCALE", d24 \n" + " fcmp "CUR_MAX", "REGINF" \n" + " beq 10f \n" " fdiv "CUR_MAXINV", "REGONE", "CUR_MAX" \n" " //dup "CUR_MAX_V", v7.d[0] \n" " fdiv "SCALE", "SCALE", "CUR_MAX" \n" @@ -265,6 +276,8 @@ static void nrm2_compute(BLASLONG n, FLOAT *x, BLASLONG inc_x, " ldr d4, ["X"] \n" " fabs d4, d4 \n" " fmax "CUR_MAX", "SCALE", d4 \n" + " fcmp "CUR_MAX", "REGINF" \n" + " beq 10f \n" " fdiv "SCALE", "SCALE", "CUR_MAX" \n" " fmul "SCALE", "SCALE", "SCALE" \n" " fmul "SSQ", "SSQ", "SCALE" \n" @@ -276,6 +289,8 @@ static void nrm2_compute(BLASLONG n, FLOAT *x, BLASLONG inc_x, " ldr d3, ["X", #8] \n" " fabs d3, d3 \n" " fmax "CUR_MAX", "SCALE", d3 \n" + " fcmp "CUR_MAX", "REGINF" \n" + " beq 10f \n" " fdiv "SCALE", "SCALE", "CUR_MAX" \n" " fmul "SCALE", "SCALE", "SCALE" \n" " fmul "SSQ", "SSQ", "SCALE" \n" @@ -291,6 +306,11 @@ static void nrm2_compute(BLASLONG n, FLOAT *x, BLASLONG inc_x, "9: //nrm2_kernel_L999: \n" " str "SSQ", [%[SSQ_]] \n" " str "SCALE", [%[SCALE_]] \n" + " b 11f \n" + "10: \n" + " str "REGINF", [%[SSQ_]] \n" + " str "REGINF", [%[SCALE_]] \n" + "11: \n" : : [SSQ_] "r" (ssq), //%0 @@ -300,7 +320,7 @@ static void nrm2_compute(BLASLONG n, FLOAT *x, BLASLONG inc_x, [INCX_] "r" (inc_x) //%4 : "cc", "memory", - "x0", "x1", "x2", "x3", "x4", "x5", + "x0", "x1", "x2", "x3", "x4", "x5", "x6", "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8" ); @@ -359,6 +379,12 @@ FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) cur_ssq = *ptr; cur_scale = *(ptr + 1); + if (cur_ssq == INFINITY) { + ssq = INFINITY; + scale = INFINITY; + break; + } + if (cur_scale != 0) { if (cur_scale > scale) { scale = (scale / cur_scale); diff --git a/kernel/arm64/nrm2.S b/kernel/arm64/nrm2.S index e2cbd4def..0e5a8eed1 100644 --- a/kernel/arm64/nrm2.S +++ b/kernel/arm64/nrm2.S @@ -54,37 +54,37 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #if !defined(DOUBLE) ldr s4, [X], #4 fcmp s4, REGZERO - beq KERNEL_F1_NEXT_\@ + beq 2f /* KERNEL_F1_NEXT_\@ */ fabs s4, s4 fcmp SCALE, s4 - bge KERNEL_F1_SCALE_GE_X_\@ + bge 1f /* KERNEL_F1_SCALE_GE_X_\@ */ fdiv s2, SCALE, s4 fmul s2, s2, s2 fmul s3, SSQ, s2 fadd SSQ, REGONE, s3 fmov SCALE, s4 - b KERNEL_F1_NEXT_\@ -KERNEL_F1_SCALE_GE_X_\@: + b 2f /* KERNEL_F1_NEXT_\@ */ +1: /* KERNEL_F1_SCALE_GE_X_\@: */ fdiv s2, s4, SCALE fmla SSQ, s2, v2.s[0] #else ldr d4, [X], #8 fcmp d4, REGZERO - beq KERNEL_F1_NEXT_\@ + beq 2f /* KERNEL_F1_NEXT_\@ */ fabs d4, d4 fcmp SCALE, d4 - bge KERNEL_F1_SCALE_GE_X_\@ + bge 1f /* KERNEL_F1_SCALE_GE_X_\@ */ fdiv d2, SCALE, d4 fmul d2, d2, d2 fmul d3, SSQ, d2 fadd SSQ, REGONE, d3 fmov SCALE, d4 - b KERNEL_F1_NEXT_\@ -KERNEL_F1_SCALE_GE_X_\@: + b 2f /* KERNEL_F1_NEXT_\@ */ +1: /* KERNEL_F1_SCALE_GE_X_\@: */ fdiv d2, d4, SCALE fmla SSQ, d2, v2.d[0] #endif -KERNEL_F1_NEXT_\@: +2: /* KERNEL_F1_NEXT_\@: */ .endm .macro KERNEL_S1 diff --git a/kernel/arm64/sgemm_beta.S b/kernel/arm64/sgemm_beta.S new file mode 100755 index 000000000..574485bc4 --- /dev/null +++ b/kernel/arm64/sgemm_beta.S @@ -0,0 +1,259 @@ +/*************************************************************************** +Copyright (c) 2016, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A00 PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#define ASSEMBLER +#include "common.h" + +#define M x0 +#define N x1 +#define BETA s0 +#define LDC x6 +#define C00 x7 + +#define A01 x8 +#define A02 x9 +#define A03 x10 +#define A04 x11 +#define I x12 + +#define beta0 s11 +#define betaV0 v11.s[0] + +#define prfm_size 640 +#define calc_size 128 + +/************************************************************************************** +* Macro definitions +**************************************************************************************/ + +.macro SAVE_REGS + add sp, sp, #-(11 * 16) + stp d8, d9, [sp, #(0 * 16)] + stp d10, d11, [sp, #(1 * 16)] + stp d12, d13, [sp, #(2 * 16)] + stp d14, d15, [sp, #(3 * 16)] + stp d16, d17, [sp, #(4 * 16)] + stp x18, x19, [sp, #(5 * 16)] + stp x20, x21, [sp, #(6 * 16)] + stp x22, x23, [sp, #(7 * 16)] + stp x24, x25, [sp, #(8 * 16)] + stp x26, x27, [sp, #(9 * 16)] + str x28, [sp, #(10 * 16)] +.endm + +.macro RESTORE_REGS + ldp d8, d9, [sp, #(0 * 16)] + ldp d10, d11, [sp, #(1 * 16)] + ldp d12, d13, [sp, #(2 * 16)] + ldp d14, d15, [sp, #(3 * 16)] + ldp d16, d17, [sp, #(4 * 16)] + ldp x18, x19, [sp, #(5 * 16)] + ldp x20, x21, [sp, #(6 * 16)] + ldp x22, x23, [sp, #(7 * 16)] + ldp x24, x25, [sp, #(8 * 16)] + ldp x26, x27, [sp, #(9 * 16)] + ldr x28, [sp, #(10 * 16)] + add sp, sp, #(11*16) +.endm + +.macro INIT_ZERO + movi v0.4s, #0x0 + movi v1.4s, #0x0 + movi v2.4s, #0x0 + movi v3.4s, #0x0 + movi v4.4s, #0x0 + movi v5.4s, #0x0 + movi v6.4s, #0x0 + movi v7.4s, #0x0 +.endm + +/************************************************************************************** +* End of macro definitions +**************************************************************************************/ + + PROLOGUE + + .align 5 + + ldr LDC, [sp] + SAVE_REGS + +.Lgemm_beta_BEGIN: + + fmov beta0, BETA + cmp N, #0 + ble .Lgemm_beta_L999 + + fcmp BETA, #0.0 + beq .Lgemm_beta_zero_01 + +.Lgemm_beta_01: + + lsl LDC, LDC, #2 + + .align 5 +.Lgemm_beta_02: + + mov A01, C00 + add C00, C00, LDC + asr I, M, #5 + cmp I, #0 + ble .Lgemm_beta_04 + add A02, A01, #32 + add A03, A02, #32 + add A04, A03, #32 + + .align 5 +.Lgemm_beta_03: + + prfm PLDL1KEEP, [A01, prfm_size] + + ldp q0, q1, [A01] + ldp q2, q3, [A02] + ldp q4, q5, [A03] + ldp q6, q7, [A04] + + fmul v0.4s, v0.4s, betaV0 + fmul v1.4s, v1.4s, betaV0 + + fmul v2.4s, v2.4s, betaV0 + fmul v3.4s, v3.4s, betaV0 + + fmul v4.4s, v4.4s, betaV0 + fmul v5.4s, v5.4s, betaV0 + + fmul v6.4s, v6.4s, betaV0 + fmul v7.4s, v7.4s, betaV0 + + prfm PLDL1KEEP, [A01, prfm_size + 64] + + st1 {v0.4s, v1.4s}, [A01] + add A01, A01, calc_size + st1 {v2.4s, v3.4s}, [A02] + add A02, A02, calc_size + st1 {v4.4s, v5.4s}, [A03] + add A03, A03, calc_size + st1 {v6.4s, v7.4s}, [A04] + add A04, A04, calc_size + + subs I , I , #1 + bne .Lgemm_beta_03 + + .align 5 +.Lgemm_beta_04: + + and I, M , #31 + cmp I, #0 + ble .Lgemm_beta_06 + + .align 5 +.Lgemm_beta_05: + + ldr s12, [A01] + fmul s12, s12, beta0 + str s12, [A01] + add A01, A01, #4 + + subs I , I , #1 + bne .Lgemm_beta_05 + + .align 5 +.Lgemm_beta_06: + + subs N , N, #1 // N-- + bne .Lgemm_beta_02 + + .align 5 +.Lgemm_beta_L999: + + mov x0, #0 + RESTORE_REGS + ret + + .align 5 +.Lgemm_beta_zero_01: + + INIT_ZERO + lsl LDC, LDC, #2 + + .align 5 +.Lgemm_beta_zero_02: + + mov A01, C00 + add C00, C00, LDC + + asr I, M, #5 + cmp I, #0 + ble .Lgemm_beta_zero_04 + add A02, A01, #32 + add A03, A02, #32 + add A04, A03, #32 + + .align 5 +.Lgemm_beta_zero_03: + + st1 {v0.4s, v1.4s}, [A01] + add A01, A01, calc_size + st1 {v2.4s, v3.4s}, [A02] + add A02, A02, calc_size + st1 {v4.4s, v5.4s}, [A03] + add A03, A03, calc_size + st1 {v6.4s, v7.4s}, [A04] + add A04, A04, calc_size + + subs I, I, #1 + bne .Lgemm_beta_zero_03 + + .align 5 +.Lgemm_beta_zero_04: + + and I, M, #31 + cmp I, #0 + ble .Lgemm_beta_zero_06 + + .align 5 +.Lgemm_beta_zero_05: + + str beta0, [A01] + add A01, A01, #4 + + subs I, I, #1 + bne .Lgemm_beta_zero_05 + + .align 5 +.Lgemm_beta_zero_06: + + subs N, N, #1 + bne .Lgemm_beta_zero_02 + + .align 5 +.Lgemm_beta_zero_L999: + mov x0, #0 + RESTORE_REGS + ret + + EPILOGUE diff --git a/kernel/arm64/sgemm_kernel_8x8_cortexa53.S b/kernel/arm64/sgemm_kernel_8x8_cortexa53.S new file mode 100644 index 000000000..628a928ca --- /dev/null +++ b/kernel/arm64/sgemm_kernel_8x8_cortexa53.S @@ -0,0 +1,2299 @@ +/******************************************************************************* +Copyright (c) 2015, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*******************************************************************************/ +#define ASSEMBLER +#include "common.h" + +/* X0 X1 X2 s0 X3 x4 x5 x6 */ +/*int CNAME(BLASLONG bm,BLASLONG bn,BLASLONG bk,FLOAT alpha,FLOAT* ba,FLOAT* bb,FLOAT* C,BLASLONG ldc) */ + +#define origM x0 +#define origN x1 +#define origK x2 +#define origPA x3 +#define origPB x4 +#define pC x5 +#define LDC x6 +#define offset x7 +#define counterL x8 +#define counterI x9 +#define counterJ x10 +#define pB x11 +#define pCRow0 x12 +#define pCRow1 x13 +#define pCRow2 x14 +#define pA x15 +#define temp x16 + +#define alpha0 s10 +#define alphaV0 v10.s[0] +#define alpha1 s11 +#define alphaV1 v11.s[0] +#define alpha2 s14 +#define alphaV2 v14.s[0] +#define alpha3 s15 +#define alphaV3 v15.s[0] + +#define A_PRE_SIZE 640 +#define B_PRE_SIZE 224 +#define C_PRE_SIZE 96 + +// 00 origM +// 01 origN +// 02 origK +// 03 origPA +// 04 origPB +// 05 pC +// 06 origLDC -> LDC +// 07 offset +// 08 counterL +// 09 counterI +// 10 counterJ +// 11 pB +// 12 pCRow0 +// 13 pCRow1 +// 14 pCRow2 +// 15 pA +// 16 temp +// 17 +// 18 must save +// 19 must save +// 20 must save pA0_2, pA0_3 +// 21 must save pA0_6, pA0_7 +// 22 must save pA1_2, pA1_3 +// 23 must save pA1_6, pA1_7 +// 24 must save pB0_2, pB0_3 +// 25 must save pB0_6, pB0_7 +// 26 must save pB1_2, pB1_3 +// 27 must save pB1_6, pB1_7 +// 28 must save +// 29 frame +// 30 link +// 31 sp + +//v00 ALPHA -> pA0_0, pA0_1, pA0_2, pA0_3 +//v01 pA0_4, pA0_5, pA0_6, pA0_7 +//v02 pA1_0, pA1_1, pA1_2, pA1_3 +//v03 pA1_4, pA1_5, pA1_6, pA1_7 +//v04 pB0_0, pB0_1, pB0_2, pB0_3 +//v05 pB0_4, pB0_5, pB0_6, pB0_7 +//v06 pB1_0, pB1_1, pB1_2, pB1_3 +//v07 pB1_4, pB1_5, pB1_6, pB1_7 +//v08 must save +//v09 must save +//v10 must save ALPHA0 +//v11 must save ALPHA1 +//v12 must save +//v13 must save +//v14 must save ALPHA2 +//v15 must save ALPHA3 +//v16 must save C00, C01, C02, C03 +//v17 must save C04, C05, C06, C07 +//v18 C08, C09, C10, C11 +//v19 C12, C13, C14, C15 +//v20 C16, C17, C18, C19 +//v21 C20, C21, C22, C23 +//v22 C24, C25, C26, C27 +//v23 C28, C29, C30, C31 +//v24 C32, C33, C34, C35 +//v25 C36, C37, C38, C39 +//v26 C40, C41, C42, C43 +//v27 C44, C45, C46, C47 +//v28 C48, C49, C50, C51 +//v29 C52, C53, C54, C55 +//v30 C56, C57, C58, C59 +//v31 C60, C61, C62, C63 + +/******************************************************************************* +* Macro definitions +*******************************************************************************/ + +.macro INIT8x8 + fmov s16, wzr + fmov s17, wzr + fmov s18, s16 + fmov s19, s17 + fmov s20, wzr + fmov s21, s16 + fmov s22, s17 + fmov s23, s18 + fmov s24, wzr + fmov s25, s16 + fmov s26, s17 + fmov s27, s18 + fmov s28, wzr + fmov s29, s16 + fmov s30, s17 + fmov s31, s18 +.endm + +.macro KERNEL8x8_I + ldp q0, q1, [pA], #32 + ldp q4, q5, [pB], #32 + + ldr d2, [pA], #8 + ldr d6, [pB], #8 + ldr d3, [pA, #8] + ldr d7, [pB, #8] + ldr x22, [pA], #16 + fmul v16.4s, v0.4s, v4.s[0] + ldr x26, [pB], #16 + fmul v17.4s, v1.4s, v4.s[0] + ldr x23, [pA], #8 + fmul v18.4s, v0.4s, v4.s[1] + ldr x27, [pB], #8 + fmul v19.4s, v1.4s, v4.s[1] + fmul v20.4s, v0.4s, v4.s[2] + prfm PLDL1KEEP, [pA, #A_PRE_SIZE] + fmul v21.4s, v1.4s, v4.s[2] + prfm PLDL1KEEP, [pB, #B_PRE_SIZE] + fmul v22.4s, v0.4s, v4.s[3] + fmul v23.4s, v1.4s, v4.s[3] + fmul v24.4s, v0.4s, v5.s[0] + fmul v25.4s, v1.4s, v5.s[0] + fmul v26.4s, v0.4s, v5.s[1] + fmul v27.4s, v1.4s, v5.s[1] + fmul v28.4s, v0.4s, v5.s[2] + fmul v29.4s, v1.4s, v5.s[2] + fmul v30.4s, v0.4s, v5.s[3] + fmul v31.4s, v1.4s, v5.s[3] +.endm + +.macro KERNEL8x8_M1 + ldr d2, [pA], #8 + fmov v0.d[1], x20 + ldr d6, [pB], #8 + fmov v4.d[1], x24 + ldr d3, [pA, #8] + fmov v1.d[1], x21 + ldr d7, [pB, #8] + fmov v5.d[1], x25 + fmla v16.4s, v0.4s, v4.s[0] + ldr x22, [pA], #16 + fmla v17.4s, v1.4s, v4.s[0] + ldr x26, [pB], #16 + fmla v18.4s, v0.4s, v4.s[1] + ldr x23, [pA], #8 + fmla v19.4s, v1.4s, v4.s[1] + ldr x27, [pB], #8 + fmla v20.4s, v0.4s, v4.s[2] + prfm PLDL1KEEP, [pA, #A_PRE_SIZE] + fmla v21.4s, v1.4s, v4.s[2] + prfm PLDL1KEEP, [pB, #B_PRE_SIZE] + fmla v22.4s, v0.4s, v4.s[3] + fmla v23.4s, v1.4s, v4.s[3] + fmla v24.4s, v0.4s, v5.s[0] + fmla v25.4s, v1.4s, v5.s[0] + fmla v26.4s, v0.4s, v5.s[1] + fmla v27.4s, v1.4s, v5.s[1] + fmla v28.4s, v0.4s, v5.s[2] + fmla v29.4s, v1.4s, v5.s[2] + fmla v30.4s, v0.4s, v5.s[3] + fmla v31.4s, v1.4s, v5.s[3] +.endm + +.macro KERNEL8x8_M2 + ldr d0, [pA], #8 + fmov v2.d[1], x22 + ldr d4, [pB], #8 + fmov v6.d[1], x26 + ldr d1, [pA, #8] + fmov v3.d[1], x23 + ldr d5, [pB, #8] + fmov v7.d[1], x27 + fmla v16.4s, v2.4s, v6.s[0] + ldr x20, [pA], #16 + fmla v17.4s, v3.4s, v6.s[0] + ldr x24, [pB], #16 + fmla v18.4s, v2.4s, v6.s[1] + ldr x21, [pA], #8 + fmla v19.4s, v3.4s, v6.s[1] + ldr x25, [pB], #8 + fmla v20.4s, v2.4s, v6.s[2] + prfm PLDL1KEEP, [pA, #A_PRE_SIZE] + fmla v21.4s, v3.4s, v6.s[2] + prfm PLDL1KEEP, [pB, #B_PRE_SIZE] + fmla v22.4s, v2.4s, v6.s[3] + fmla v23.4s, v3.4s, v6.s[3] + fmla v24.4s, v2.4s, v7.s[0] + fmla v25.4s, v3.4s, v7.s[0] + fmla v26.4s, v2.4s, v7.s[1] + fmla v27.4s, v3.4s, v7.s[1] + fmla v28.4s, v2.4s, v7.s[2] + fmla v29.4s, v3.4s, v7.s[2] + fmla v30.4s, v2.4s, v7.s[3] + fmla v31.4s, v3.4s, v7.s[3] +.endm + +.macro KERNEL8x8_E + fmov v2.d[1], x22 + fmov v6.d[1], x26 + fmov v3.d[1], x23 + fmov v7.d[1], x27 + fmla v16.4s, v2.4s, v6.s[0] + fmla v17.4s, v3.4s, v6.s[0] + fmla v18.4s, v2.4s, v6.s[1] + fmla v19.4s, v3.4s, v6.s[1] + fmla v20.4s, v2.4s, v6.s[2] + prfm PLDL1KEEP, [pA, #A_PRE_SIZE] + fmla v21.4s, v3.4s, v6.s[2] + prfm PLDL1KEEP, [pB, #B_PRE_SIZE] + fmla v22.4s, v2.4s, v6.s[3] + fmla v23.4s, v3.4s, v6.s[3] + fmla v24.4s, v2.4s, v7.s[0] + fmla v25.4s, v3.4s, v7.s[0] + fmla v26.4s, v2.4s, v7.s[1] + fmla v27.4s, v3.4s, v7.s[1] + fmla v28.4s, v2.4s, v7.s[2] + fmla v29.4s, v3.4s, v7.s[2] + fmla v30.4s, v2.4s, v7.s[3] + fmla v31.4s, v3.4s, v7.s[3] +.endm + +.macro KERNEL8x8_SUB + ldp q0, q1, [pA], #32 + ldp q4, q5, [pB], #32 + + fmla v16.4s, v0.4s, v4.s[0] + fmla v17.4s, v1.4s, v4.s[0] + fmla v18.4s, v0.4s, v4.s[1] + fmla v19.4s, v1.4s, v4.s[1] + fmla v20.4s, v0.4s, v4.s[2] + prfm PLDL1KEEP, [pA, #A_PRE_SIZE] + fmla v21.4s, v1.4s, v4.s[2] + prfm PLDL1KEEP, [pB, #B_PRE_SIZE] + fmla v22.4s, v0.4s, v4.s[3] + fmla v23.4s, v1.4s, v4.s[3] + fmla v24.4s, v0.4s, v5.s[0] + fmla v25.4s, v1.4s, v5.s[0] + fmla v26.4s, v0.4s, v5.s[1] + fmla v27.4s, v1.4s, v5.s[1] + fmla v28.4s, v0.4s, v5.s[2] + fmla v29.4s, v1.4s, v5.s[2] + fmla v30.4s, v0.4s, v5.s[3] + fmla v31.4s, v1.4s, v5.s[3] +.endm + +.macro SAVE8x8 + prfm PLDL2KEEP, [pCRow0, #C_PRE_SIZE] + add pCRow1, pCRow0, LDC + + ldp q0, q1, [pCRow0] + fmla v0.4s, v16.4s, alphaV0 + fmla v1.4s, v17.4s, alphaV1 + stp q0, q1, [pCRow0] + + prfm PLDL2KEEP, [pCRow1, #C_PRE_SIZE] + add pCRow2, pCRow1, LDC + + ldp q2, q3, [pCRow1] + fmla v2.4s, v18.4s, alphaV2 + fmla v3.4s, v19.4s, alphaV3 + stp q2, q3, [pCRow1] + + prfm PLDL2KEEP, [pCRow2, #C_PRE_SIZE] + add pCRow1, pCRow2, LDC + + ldp q4, q5, [pCRow2] + fmla v4.4s, v20.4s, alphaV0 + fmla v5.4s, v21.4s, alphaV1 + stp q4, q5, [pCRow2] + + prfm PLDL2KEEP, [pCRow1, #C_PRE_SIZE] + add pCRow2, pCRow1, LDC + + ldp q6, q7, [pCRow1] + fmla v6.4s, v22.4s, alphaV2 + fmla v7.4s, v23.4s, alphaV3 + stp q6, q7, [pCRow1] + + prfm PLDL2KEEP, [pCRow2, #C_PRE_SIZE] + add pCRow1, pCRow2, LDC + + ldp q0, q1, [pCRow2] + fmla v0.4s, v24.4s, alphaV0 + fmla v1.4s, v25.4s, alphaV1 + stp q0, q1, [pCRow2] + + prfm PLDL2KEEP, [pCRow1, #C_PRE_SIZE] + add pCRow2, pCRow1, LDC + + ldp q2, q3, [pCRow1] + fmla v2.4s, v26.4s, alphaV2 + fmla v3.4s, v27.4s, alphaV3 + stp q2, q3, [pCRow1] + + prfm PLDL2KEEP, [pCRow2, #C_PRE_SIZE] + add pCRow1, pCRow2, LDC + + ldp q4, q5, [pCRow2] + fmla v4.4s, v28.4s, alphaV0 + fmla v5.4s, v29.4s, alphaV1 + stp q4, q5, [pCRow2] + + prfm PLDL2KEEP, [pCRow1, #C_PRE_SIZE] + + ldp q6, q7, [pCRow1] + fmla v6.4s, v30.4s, alphaV2 + fmla v7.4s, v31.4s, alphaV3 + stp q6, q7, [pCRow1] + + add pCRow0, pCRow0, #32 +.endm + +/******************************************************************************/ + +.macro INIT4x8 + fmov s16, wzr + fmov s18, wzr + fmov s20, wzr + fmov s22, s16 + fmov s24, wzr + fmov s26, s16 + fmov s28, s18 + fmov s30, s20 +.endm + +.macro KERNEL4x8_I + ldr q0, [pA], #16 + ldp q4, q5, [pB], #32 + + ldr d2, [pA], #8 + ldr d6, [pB], #8 + ldr d7, [pB, #8] + ldr x22, [pA], #8 + fmul v16.4s, v0.4s, v4.s[0] + ldr x26, [pB], #16 + fmul v18.4s, v0.4s, v4.s[1] + ldr x27, [pB], #8 + fmul v20.4s, v0.4s, v4.s[2] + fmul v22.4s, v0.4s, v4.s[3] + prfm PLDL1KEEP, [pB, #B_PRE_SIZE] + fmul v24.4s, v0.4s, v5.s[0] + fmul v26.4s, v0.4s, v5.s[1] + fmul v28.4s, v0.4s, v5.s[2] + fmul v30.4s, v0.4s, v5.s[3] +.endm + +.macro KERNEL4x8_M1 + ldr d2, [pA], #8 + fmov v0.d[1], x20 + ldr d6, [pB], #8 + fmov v4.d[1], x24 + ldr d7, [pB, #8] + fmov v5.d[1], x25 + ldr x22, [pA], #8 + fmla v16.4s, v0.4s, v4.s[0] + ldr x26, [pB], #16 + fmla v18.4s, v0.4s, v4.s[1] + ldr x27, [pB], #8 + fmla v20.4s, v0.4s, v4.s[2] + fmla v22.4s, v0.4s, v4.s[3] + prfm PLDL1KEEP, [pB, #B_PRE_SIZE] + fmla v24.4s, v0.4s, v5.s[0] + fmla v26.4s, v0.4s, v5.s[1] + fmla v28.4s, v0.4s, v5.s[2] + fmla v30.4s, v0.4s, v5.s[3] +.endm + +.macro KERNEL4x8_M2 + ldr d0, [pA], #8 + fmov v2.d[1], x22 + ldr d4, [pB], #8 + fmov v6.d[1], x26 + ldr d5, [pB, #8] + fmov v7.d[1], x27 + ldr x20, [pA], #8 + fmla v16.4s, v2.4s, v6.s[0] + ldr x24, [pB], #16 + fmla v18.4s, v2.4s, v6.s[1] + ldr x25, [pB], #8 + fmla v20.4s, v2.4s, v6.s[2] + fmla v22.4s, v2.4s, v6.s[3] + prfm PLDL1KEEP, [pB, #B_PRE_SIZE] + fmla v24.4s, v2.4s, v7.s[0] + fmla v26.4s, v2.4s, v7.s[1] + fmla v28.4s, v2.4s, v7.s[2] + fmla v30.4s, v2.4s, v7.s[3] +.endm + +.macro KERNEL4x8_E + fmov v2.d[1], x22 + fmov v6.d[1], x26 + fmov v7.d[1], x27 + fmla v16.4s, v2.4s, v6.s[0] + fmla v18.4s, v2.4s, v6.s[1] + fmla v20.4s, v2.4s, v6.s[2] + fmla v22.4s, v2.4s, v6.s[3] + prfm PLDL1KEEP, [pB, #B_PRE_SIZE] + fmla v24.4s, v2.4s, v7.s[0] + fmla v26.4s, v2.4s, v7.s[1] + fmla v28.4s, v2.4s, v7.s[2] + fmla v30.4s, v2.4s, v7.s[3] +.endm + +.macro KERNEL4x8_SUB + ldr q0, [pA], #16 + ldp q4, q5, [pB], #32 + + fmla v16.4s, v0.4s, v4.s[0] + fmla v18.4s, v0.4s, v4.s[1] + fmla v20.4s, v0.4s, v4.s[2] + fmla v22.4s, v0.4s, v4.s[3] + prfm PLDL1KEEP, [pB, #B_PRE_SIZE] + fmla v24.4s, v0.4s, v5.s[0] + fmla v26.4s, v0.4s, v5.s[1] + fmla v28.4s, v0.4s, v5.s[2] + fmla v30.4s, v0.4s, v5.s[3] +.endm + +.macro SAVE4x8 + add pCRow1, pCRow0, LDC + + ldr q0, [pCRow0] + fmla v0.4s, v16.4s, alphaV0 + str q0, [pCRow0] + + add pCRow2, pCRow1, LDC + + ldr q2, [pCRow1] + fmla v2.4s, v18.4s, alphaV2 + str q2, [pCRow1] + + add pCRow1, pCRow2, LDC + + ldr q4, [pCRow2] + fmla v4.4s, v20.4s, alphaV0 + str q4, [pCRow2] + + add pCRow2, pCRow1, LDC + + ldr q6, [pCRow1] + fmla v6.4s, v22.4s, alphaV2 + str q6, [pCRow1] + + add pCRow1, pCRow2, LDC + + ldr q0, [pCRow2] + fmla v0.4s, v24.4s, alphaV0 + str q0, [pCRow2] + + add pCRow2, pCRow1, LDC + + ldr q2, [pCRow1] + fmla v2.4s, v26.4s, alphaV2 + str q2, [pCRow1] + + add pCRow1, pCRow2, LDC + + ldr q4, [pCRow2] + fmla v4.4s, v28.4s, alphaV0 + str q4, [pCRow2] + + ldr q6, [pCRow1] + fmla v6.4s, v30.4s, alphaV2 + str q6, [pCRow1] + + add pCRow0, pCRow0, #16 +.endm + +/******************************************************************************/ + +.macro INIT2x8 + fmov s16, wzr + fmov s18, wzr + fmov s20, wzr + fmov s22, s16 + fmov s24, wzr + fmov s26, s16 + fmov s28, s18 + fmov s30, s20 +.endm + +.macro KERNEL2x8_SUB + ldr d0, [pA], #8 + ldp q4, q5, [pB], #32 + + fmla v16.2s, v0.2s, v4.s[0] + fmla v18.2s, v0.2s, v4.s[1] + fmla v20.2s, v0.2s, v4.s[2] + fmla v22.2s, v0.2s, v4.s[3] + prfm PLDL1KEEP, [pB, #B_PRE_SIZE] + fmla v24.2s, v0.2s, v5.s[0] + fmla v26.2s, v0.2s, v5.s[1] + fmla v28.2s, v0.2s, v5.s[2] + fmla v30.2s, v0.2s, v5.s[3] +.endm + +.macro SAVE2x8 + add pCRow1, pCRow0, LDC + + ldr d0, [pCRow0] + fmla v0.2s, v16.2s, alphaV0 + str d0, [pCRow0] + + add pCRow2, pCRow1, LDC + + ldr d2, [pCRow1] + fmla v2.2s, v18.2s, alphaV2 + str d2, [pCRow1] + + add pCRow1, pCRow2, LDC + + ldr d4, [pCRow2] + fmla v4.2s, v20.2s, alphaV0 + str d4, [pCRow2] + + add pCRow2, pCRow1, LDC + + ldr d6, [pCRow1] + fmla v6.2s, v22.2s, alphaV2 + str d6, [pCRow1] + + add pCRow1, pCRow2, LDC + + ldr d0, [pCRow2] + fmla v0.2s, v24.2s, alphaV0 + str d0, [pCRow2] + + add pCRow2, pCRow1, LDC + + ldr d2, [pCRow1] + fmla v2.2s, v26.2s, alphaV2 + str d2, [pCRow1] + + add pCRow1, pCRow2, LDC + + ldr d4, [pCRow2] + fmla v4.2s, v28.2s, alphaV0 + str d4, [pCRow2] + + ldr d6, [pCRow1] + fmla v6.2s, v30.2s, alphaV2 + str d6, [pCRow1] + + add pCRow0, pCRow0, #8 +.endm + +/******************************************************************************/ + +.macro INIT1x8 + fmov s16, wzr + fmov s18, wzr + fmov s20, wzr + fmov s22, s16 + fmov s24, wzr + fmov s26, s16 + fmov s28, s18 + fmov s30, s20 +.endm + +.macro KERNEL1x8_SUB + ldp q4, q5, [pB], #32 + ldr s0, [pA], #4 + + fmla s16, s0, v4.s[0] + fmla s18, s0, v4.s[1] + fmla s20, s0, v4.s[2] + fmla s22, s0, v4.s[3] + prfm PLDL1KEEP, [pB, #B_PRE_SIZE] + fmla s24, s0, v5.s[0] + fmla s26, s0, v5.s[1] + fmla s28, s0, v5.s[2] + fmla s30, s0, v5.s[3] +.endm + +.macro SAVE1x8 + add pCRow1, pCRow0, LDC + + ldr s0, [pCRow0] + fmla s0, s16, alphaV0 + str s0, [pCRow0] + + add pCRow2, pCRow1, LDC + + ldr s2, [pCRow1] + fmla s2, s18, alphaV2 + str s2, [pCRow1] + + add pCRow1, pCRow2, LDC + + ldr s4, [pCRow2] + fmla s4, s20, alphaV0 + str s4, [pCRow2] + + add pCRow2, pCRow1, LDC + + ldr s6, [pCRow1] + fmla s6, s22, alphaV2 + str s6, [pCRow1] + + add pCRow1, pCRow2, LDC + + ldr s0, [pCRow2] + fmla s0, s24, alphaV0 + str s0, [pCRow2] + + add pCRow2, pCRow1, LDC + + ldr s2, [pCRow1] + fmla s2, s26, alphaV2 + str s2, [pCRow1] + + add pCRow1, pCRow2, LDC + + ldr s4, [pCRow2] + fmla s4, s28, alphaV0 + str s4, [pCRow2] + + ldr s6, [pCRow1] + fmla s6, s30, alphaV2 + str s6, [pCRow1] + + add pCRow0, pCRow0, #4 +.endm + +/******************************************************************************/ + +.macro INIT8x4 + fmov s16, wzr + fmov s17, wzr + fmov s18, wzr + fmov s19, s16 + fmov s20, wzr + fmov s21, s16 + fmov s22, wzr + fmov s23, s16 +.endm + +.macro KERNEL8x4_I + ldp q0, q1, [pA], #32 + ldr q4, [pB], #16 + + ldr d2, [pA], #8 + ldr d6, [pB], #8 + ldr d3, [pA, #8] + fmul v16.4s, v0.4s, v4.s[0] + ldr x22, [pA], #16 + fmul v17.4s, v1.4s, v4.s[0] + ldr x26, [pB], #8 + fmul v18.4s, v0.4s, v4.s[1] + ldr x23, [pA], #8 + fmul v19.4s, v1.4s, v4.s[1] + prfm PLDL1KEEP, [pA, #A_PRE_SIZE] + fmul v20.4s, v0.4s, v4.s[2] + fmul v21.4s, v1.4s, v4.s[2] + fmul v22.4s, v0.4s, v4.s[3] + fmul v23.4s, v1.4s, v4.s[3] +.endm + +.macro KERNEL8x4_M1 + ldr d2, [pA], #8 + fmov v0.d[1], x20 + ldr d6, [pB], #8 + fmov v4.d[1], x24 + ldr d3, [pA, #8] + fmov v1.d[1], x21 + ldr x22, [pA], #16 + fmla v16.4s, v0.4s, v4.s[0] + ldr x26, [pB], #8 + fmla v17.4s, v1.4s, v4.s[0] + ldr x23, [pA], #8 + fmla v18.4s, v0.4s, v4.s[1] + prfm PLDL1KEEP, [pA, #A_PRE_SIZE] + fmla v19.4s, v1.4s, v4.s[1] + fmla v20.4s, v0.4s, v4.s[2] + fmla v21.4s, v1.4s, v4.s[2] + fmla v22.4s, v0.4s, v4.s[3] + fmla v23.4s, v1.4s, v4.s[3] +.endm + +.macro KERNEL8x4_M2 + ldr d0, [pA], #8 + fmov v2.d[1], x22 + ldr d4, [pB], #8 + fmov v6.d[1], x26 + ldr d1, [pA, #8] + fmov v3.d[1], x23 + ldr x20, [pA], #16 + fmla v16.4s, v2.4s, v6.s[0] + ldr x24, [pB], #8 + fmla v17.4s, v3.4s, v6.s[0] + ldr x21, [pA], #8 + fmla v18.4s, v2.4s, v6.s[1] + prfm PLDL1KEEP, [pA, #A_PRE_SIZE] + fmla v19.4s, v3.4s, v6.s[1] + fmla v20.4s, v2.4s, v6.s[2] + fmla v21.4s, v3.4s, v6.s[2] + fmla v22.4s, v2.4s, v6.s[3] + fmla v23.4s, v3.4s, v6.s[3] +.endm + +.macro KERNEL8x4_E + fmov v2.d[1], x22 + fmov v6.d[1], x26 + fmov v3.d[1], x23 + fmla v16.4s, v2.4s, v6.s[0] + prfm PLDL1KEEP, [pA, #A_PRE_SIZE] + fmla v17.4s, v3.4s, v6.s[0] + fmla v18.4s, v2.4s, v6.s[1] + fmla v19.4s, v3.4s, v6.s[1] + fmla v20.4s, v2.4s, v6.s[2] + fmla v21.4s, v3.4s, v6.s[2] + fmla v22.4s, v2.4s, v6.s[3] + fmla v23.4s, v3.4s, v6.s[3] +.endm + +.macro KERNEL8x4_SUB + ldp q0, q1, [pA], #32 + ldr q4, [pB], #16 + + fmla v16.4s, v0.4s, v4.s[0] + fmla v17.4s, v1.4s, v4.s[0] + fmla v18.4s, v0.4s, v4.s[1] + fmla v19.4s, v1.4s, v4.s[1] + prfm PLDL1KEEP, [pA, #A_PRE_SIZE] + fmla v20.4s, v0.4s, v4.s[2] + fmla v21.4s, v1.4s, v4.s[2] + fmla v22.4s, v0.4s, v4.s[3] + fmla v23.4s, v1.4s, v4.s[3] +.endm + +.macro SAVE8x4 + prfm PLDL2KEEP, [pCRow0, #C_PRE_SIZE] + add pCRow1, pCRow0, LDC + + ldp q0, q1, [pCRow0] + fmla v0.4s, v16.4s, alphaV0 + fmla v1.4s, v17.4s, alphaV1 + stp q0, q1, [pCRow0] + + prfm PLDL2KEEP, [pCRow1, #C_PRE_SIZE] + add pCRow2, pCRow1, LDC + + ldp q4, q5, [pCRow1] + fmla v4.4s, v18.4s, alphaV0 + fmla v5.4s, v19.4s, alphaV1 + stp q4, q5, [pCRow1] + + prfm PLDL2KEEP, [pCRow2, #C_PRE_SIZE] + add pCRow1, pCRow2, LDC + + ldp q0, q1, [pCRow2] + fmla v0.4s, v20.4s, alphaV0 + fmla v1.4s, v21.4s, alphaV1 + stp q0, q1, [pCRow2] + + prfm PLDL2KEEP, [pCRow1, #C_PRE_SIZE] + + ldp q4, q5, [pCRow1] + fmla v4.4s, v22.4s, alphaV0 + fmla v5.4s, v23.4s, alphaV1 + stp q4, q5, [pCRow1] + + add pCRow0, pCRow0, #32 +.endm + +/******************************************************************************/ + + +.macro INIT4x4 + fmov s16, wzr + fmov s18, wzr + fmov s20, wzr + fmov s22, wzr +.endm + +.macro KERNEL4x4_I + ldr q0, [pA], #16 + ldr q4, [pB], #16 + + ldr d2, [pA], #8 + ldr d6, [pB], #8 + fmul v16.4s, v0.4s, v4.s[0] + ldr x22, [pA], #8 + fmul v18.4s, v0.4s, v4.s[1] + ldr x26, [pB], #8 + fmul v20.4s, v0.4s, v4.s[2] + fmul v22.4s, v0.4s, v4.s[3] +.endm + +.macro KERNEL4x4_M1 + ldr d2, [pA], #8 + fmov v0.d[1], x20 + ldr d6, [pB], #8 + fmov v4.d[1], x24 + ldr x22, [pA], #8 + ldr x26, [pB], #8 + fmla v16.4s, v0.4s, v4.s[0] + fmla v18.4s, v0.4s, v4.s[1] + fmla v20.4s, v0.4s, v4.s[2] + fmla v22.4s, v0.4s, v4.s[3] +.endm + +.macro KERNEL4x4_M2 + ldr d0, [pA], #8 + fmov v2.d[1], x22 + ldr d4, [pB], #8 + fmov v6.d[1], x26 + ldr x20, [pA], #8 + ldr x24, [pB], #8 + fmla v16.4s, v2.4s, v6.s[0] + fmla v18.4s, v2.4s, v6.s[1] + fmla v20.4s, v2.4s, v6.s[2] + fmla v22.4s, v2.4s, v6.s[3] +.endm + +.macro KERNEL4x4_E + fmov v2.d[1], x22 + fmov v6.d[1], x26 + fmla v16.4s, v2.4s, v6.s[0] + fmla v18.4s, v2.4s, v6.s[1] + fmla v20.4s, v2.4s, v6.s[2] + fmla v22.4s, v2.4s, v6.s[3] +.endm + +.macro KERNEL4x4_SUB + ldr q0, [pA], #16 + ldr q4, [pB], #16 + + fmla v16.4s, v0.4s, v4.s[0] + fmla v18.4s, v0.4s, v4.s[1] + fmla v20.4s, v0.4s, v4.s[2] + fmla v22.4s, v0.4s, v4.s[3] +.endm + +.macro SAVE4x4 + ldr q0, [pCRow0] + fmla v0.4s, v16.4s, alphaV0 + str q0, [pCRow0] + + add pCRow1, pCRow0, LDC + ldr q1, [pCRow1] + fmla v1.4s, v18.4s, alphaV2 + str q1, [pCRow1] + + add pCRow2, pCRow1, LDC + ldr q2, [pCRow2] + fmla v2.4s, v20.4s, alphaV0 + str q2, [pCRow2] + + add pCRow1, pCRow2, LDC + ldr q3, [pCRow1] + fmla v3.4s, v22.4s, alphaV2 + str q3, [pCRow1] + + add pCRow0, pCRow0, #16 +.endm + +/******************************************************************************/ + +.macro INIT2x4 + fmov s16, wzr + fmov s18, wzr + fmov s20, wzr + fmov s22, s16 +.endm + +.macro KERNEL2x4_SUB + ldr d0, [pA], #8 + ldr q4, [pB], #16 + + fmla v16.2s, v0.2s, v4.s[0] + fmla v18.2s, v0.2s, v4.s[1] + fmla v20.2s, v0.2s, v4.s[2] + fmla v22.2s, v0.2s, v4.s[3] +.endm + +.macro SAVE2x4 + ldr d8, [pCRow0] + fmla v8.2s, v16.2s, alphaV0 + str d8, [pCRow0] + + add pCRow1, pCRow0, LDC + ldr d12, [pCRow1] + fmla v12.2s, v18.2s, alphaV1 + str d12, [pCRow1] + + add pCRow2, pCRow1, LDC + ldr d8, [pCRow2] + fmla v8.2s, v20.2s, alphaV2 + str d8, [pCRow2] + + add pCRow1, pCRow2, LDC + ldr d12, [pCRow1] + fmla v12.2s, v22.2s, alphaV3 + str d12, [pCRow1] + + add pCRow0, pCRow0, #8 +.endm + +/******************************************************************************/ + +.macro INIT1x4 + fmov s16, wzr + fmov s20, s16 +.endm + +.macro KERNEL1x4_SUB + ldr s0, [pA] + add pA, pA, #4 + + ld1 {v8.2s, v9.2s}, [pB] + add pB, pB, #16 + + fmla v16.2s, v8.2s, v0.s[0] + fmla v20.2s, v9.2s, v0.s[0] +.endm + +.macro SAVE1x4 + add pCRow1, pCRow0, LDC + ld1 {v8.s}[0], [pCRow0] + ld1 {v8.s}[1], [pCRow1] + fmla v8.2s, v16.2s, alphaV0 + st1 {v8.s}[0], [pCRow0] + st1 {v8.s}[1], [pCRow1] + + add pCRow2, pCRow1, LDC + add pCRow1, pCRow2, LDC + ld1 {v12.s}[0], [pCRow2] + ld1 {v12.s}[1], [pCRow1] + fmla v12.2s, v20.2s, alphaV1 + st1 {v12.s}[0], [pCRow2] + st1 {v12.s}[1], [pCRow1] + + add pCRow0, pCRow0, #4 +.endm + +/******************************************************************************/ + +.macro INIT8x2 + fmov s16, wzr + fmov s17, s16 + fmov s18, s17 + fmov s19, s16 +.endm + +.macro KERNEL8x2_SUB + ldp q0, q1, [pA], #32 + ldr d4, [pB], #8 + + fmla v16.4s, v0.4s, v4.s[0] + fmla v17.4s, v1.4s, v4.s[0] + prfm PLDL1KEEP, [pA, #A_PRE_SIZE] + fmla v18.4s, v0.4s, v4.s[1] + fmla v19.4s, v1.4s, v4.s[1] +.endm + +.macro SAVE8x2 + prfm PLDL2KEEP, [pCRow0, #C_PRE_SIZE] + add pCRow1, pCRow0, LDC + + ldp q0, q1, [pCRow0] + fmla v0.4s, v16.4s, alphaV0 + fmla v1.4s, v17.4s, alphaV1 + stp q0, q1, [pCRow0] + + prfm PLDL2KEEP, [pCRow1, #C_PRE_SIZE] + add pCRow2, pCRow1, LDC + + ldp q4, q5, [pCRow1] + fmla v4.4s, v18.4s, alphaV0 + fmla v5.4s, v19.4s, alphaV1 + stp q4, q5, [pCRow1] + + add pCRow0, pCRow0, #32 +.endm + +/******************************************************************************/ + +.macro INIT4x2 + fmov s16, wzr + fmov s17, s16 + fmov s20, s17 + fmov s21, s16 +.endm + +.macro KERNEL4x2_SUB + ld1 {v8.2s}, [pB] + add pB, pB, #8 + ld1 {v0.2s, v1.2s}, [pA] + add pA, pA, #16 + + fmla v16.2s, v0.2s, v8.s[0] + fmla v17.2s, v1.2s, v8.s[0] + fmla v20.2s, v0.2s, v8.s[1] + fmla v21.2s, v1.2s, v8.s[1] +.endm + +.macro SAVE4x2 + ld1 {v8.2s, v9.2s}, [pCRow0] + fmla v8.2s, v16.2s, alphaV0 + fmla v9.2s, v17.2s, alphaV1 + st1 {v8.2s, v9.2s}, [pCRow0] + + add pCRow1, pCRow0, LDC + ld1 {v12.2s, v13.2s}, [pCRow1] + fmla v12.2s, v20.2s, alphaV2 + fmla v13.2s, v21.2s, alphaV3 + st1 {v12.2s, v13.2s}, [pCRow1] + + add pCRow0, pCRow0, #16 +.endm + +/******************************************************************************/ + +.macro INIT2x2 + fmov s16, wzr + fmov s20, s16 +.endm + +.macro KERNEL2x2_SUB + ld1 {v8.2s}, [pB] + add pB, pB, #8 + + ld1 {v0.2s}, [pA] + add pA, pA, #8 + + fmla v16.2s, v0.2s, v8.s[0] + fmla v20.2s, v0.2s, v8.s[1] +.endm + +.macro SAVE2x2 + ld1 {v8.2s}, [pCRow0] + fmla v8.2s, v16.2s, alphaV0 + st1 {v8.2s}, [pCRow0] + + add pCRow1 , pCRow0, LDC + ld1 {v12.2s}, [pCRow1] + fmla v12.2s, v20.2s, alphaV1 + st1 {v12.2s}, [pCRow1] + + add pCRow0, pCRow0, #8 +.endm + +/******************************************************************************/ + +.macro INIT1x2 + fmov s16, wzr +.endm + +.macro KERNEL1x2_SUB + ld1 {v8.2s} , [pB] + add pB , pB, #8 + + ldr s0 , [pA] + add pA, pA, #4 + + fmla v16.2s, v8.2s, v0.s[0] +.endm + +.macro SAVE1x2 + add pCRow1 , pCRow0, LDC + ld1 {v8.s}[0], [pCRow0] + ld1 {v8.s}[1], [pCRow1] + fmla v8.2s, v16.2s, alphaV0 + st1 {v8.s}[0], [pCRow0] + st1 {v8.s}[1], [pCRow1] + + add pCRow0, pCRow0, #4 +.endm + +/******************************************************************************/ + +.macro INIT8x1 + fmov s16, wzr + fmov s17, wzr +.endm + +.macro KERNEL8x1_SUB + ldr s4, [pB], #4 + ldp q0, q1, [pA], #32 + + fmla v16.4s, v0.4s, v4.s[0] + prfm PLDL1KEEP, [pA, #A_PRE_SIZE] + fmla v17.4s, v1.4s, v4.s[0] +.endm + +.macro SAVE8x1 + prfm PLDL2KEEP, [pCRow0, #C_PRE_SIZE] + + ldp q0, q1, [pCRow0] + fmla v0.4s, v16.4s, alphaV0 + fmla v1.4s, v17.4s, alphaV1 + stp q0, q1, [pCRow0] + + add pCRow0, pCRow0, #32 +.endm + +/******************************************************************************/ + +.macro INIT4x1 + fmov s16, wzr + fmov s17, s16 +.endm + +.macro KERNEL4x1_SUB + ldr s8, [pB] + add pB , pB, #4 + + ld1 {v0.2s, v1.2s}, [pA] + add pA , pA, #16 + + fmla v16.2s, v0.2s, v8.s[0] + fmla v17.2s, v1.2s, v8.s[0] +.endm + +.macro SAVE4x1 + ld1 {v8.2s, v9.2s}, [pCRow0] + fmla v8.2s, v16.2s, alphaV0 + fmla v9.2s, v17.2s, alphaV1 + st1 {v8.2s, v9.2s}, [pCRow0] + + add pCRow0, pCRow0, #16 +.endm + +/******************************************************************************/ + +.macro INIT2x1 + fmov s16, wzr +.endm + +.macro KERNEL2x1_SUB + ldr s8, [pB] + add pB , pB, #4 + + ld1 {v0.2s}, [pA] + add pA , pA, #8 + + fmla v16.2s, v0.2s, v8.s[0] +.endm + +.macro SAVE2x1 + ld1 {v8.2s}, [pCRow0] + fmla v8.2s, v16.2s, alphaV0 + st1 {v8.2s}, [pCRow0] + + add pCRow0, pCRow0, #8 +.endm + +/******************************************************************************/ + +.macro INIT1x1 + fmov s16, wzr +.endm + +.macro KERNEL1x1_SUB + ldr s8, [pB] + add pB , pB, #4 + + ldr s0, [pA] + add pA , pA, #4 + + fmadd s16, s0, s8, s16 +.endm + +.macro SAVE1x1 + ldr s8, [pCRow0] + fmla s8, s16, alphaV0 + str s8, [pCRow0] + + add pCRow0, pCRow0, #4 +.endm + +/******************************************************************************* +* End of macro definitions +*******************************************************************************/ + + PROLOGUE + +.Lsgemm_kernel_begin: + + .align 5 + add sp, sp, #-(11 * 16) + stp d8, d9, [sp, #(0 * 16)] + stp d10, d11, [sp, #(1 * 16)] + stp d12, d13, [sp, #(2 * 16)] + stp d14, d15, [sp, #(3 * 16)] + stp d16, d17, [sp, #(4 * 16)] + stp x18, x19, [sp, #(5 * 16)] + stp x20, x21, [sp, #(6 * 16)] + stp x22, x23, [sp, #(7 * 16)] + stp x24, x25, [sp, #(8 * 16)] + stp x26, x27, [sp, #(9 * 16)] + str x28, [sp, #(10 * 16)] + + fmov alpha0, s0 + fmov alpha1, s0 + fmov alpha2, s0 + fmov alpha3, s0 + + lsl LDC, LDC, #2 // ldc = ldc * 4 + + mov pB, origPB + + mov counterJ, origN + asr counterJ, counterJ, #3 // J = J / 8 + cmp counterJ, #0 + ble .Lsgemm_kernel_L4_BEGIN + +/******************************************************************************/ +/******************************************************************************/ + +.Lsgemm_kernel_L8_BEGIN: + mov pCRow0, pC // pCRow0 = C + add pC, pC, LDC, lsl #3 + + mov pA, origPA // pA = start of A array + +/******************************************************************************/ + +.Lsgemm_kernel_L8_M8_BEGIN: + + mov counterI, origM + asr counterI, counterI, #3 // counterI = counterI / 8 + cmp counterI, #0 + ble .Lsgemm_kernel_L8_M4_BEGIN + +.Lsgemm_kernel_L8_M8_20: + + mov pB, origPB + + asr counterL , origK, #3 // L = K / 8 + cmp counterL , #2 // is there at least 16 to do? + blt .Lsgemm_kernel_L8_M8_32 + + KERNEL8x8_I // do one in the K + KERNEL8x8_M2 // do another in the K + KERNEL8x8_M1 + KERNEL8x8_M2 + KERNEL8x8_M1 + KERNEL8x8_M2 + KERNEL8x8_M1 + KERNEL8x8_M2 + + subs counterL, counterL, #2 + ble .Lsgemm_kernel_L8_M8_22a + .align 5 + +.Lsgemm_kernel_L8_M8_22: + + KERNEL8x8_M1 + KERNEL8x8_M2 + KERNEL8x8_M1 + KERNEL8x8_M2 + KERNEL8x8_M1 + KERNEL8x8_M2 + KERNEL8x8_M1 + KERNEL8x8_M2 + + subs counterL, counterL, #1 + bgt .Lsgemm_kernel_L8_M8_22 + +.Lsgemm_kernel_L8_M8_22a: + + KERNEL8x8_M1 + KERNEL8x8_M2 + KERNEL8x8_M1 + KERNEL8x8_M2 + KERNEL8x8_M1 + KERNEL8x8_M2 + KERNEL8x8_M1 + KERNEL8x8_E + + b .Lsgemm_kernel_L8_M8_44 + +.Lsgemm_kernel_L8_M8_32: + + tst counterL, #1 + ble .Lsgemm_kernel_L8_M8_40 + + KERNEL8x8_I + KERNEL8x8_M2 + KERNEL8x8_M1 + KERNEL8x8_M2 + KERNEL8x8_M1 + KERNEL8x8_M2 + KERNEL8x8_M1 + KERNEL8x8_E + + b .Lsgemm_kernel_L8_M8_44 + +.Lsgemm_kernel_L8_M8_40: + + INIT8x8 + +.Lsgemm_kernel_L8_M8_44: + + ands counterL , origK, #7 + ble .Lsgemm_kernel_L8_M8_100 + +.Lsgemm_kernel_L8_M8_46: + + KERNEL8x8_SUB + + subs counterL, counterL, 1 + bgt .Lsgemm_kernel_L8_M8_46 + +.Lsgemm_kernel_L8_M8_100: + + SAVE8x8 + +.Lsgemm_kernel_L8_M8_END: + subs counterI, counterI, #1 + bne .Lsgemm_kernel_L8_M8_20 + +/******************************************************************************/ + +.Lsgemm_kernel_L8_M4_BEGIN: + + mov counterI, origM + tst counterI , #7 + ble .Lsgemm_kernel_L8_END + + tst counterI, #4 + ble .Lsgemm_kernel_L8_M2_BEGIN + +.Lsgemm_kernel_L8_M4_20: + + mov pB, origPB + + asr counterL , origK, #1 // L = K / 2 + cmp counterL , #2 // is there at least 4 to do? + blt .Lsgemm_kernel_L8_M4_32 + + KERNEL4x8_I // do one in the K + KERNEL4x8_M2 // do another in the K + + subs counterL, counterL, #2 + ble .Lsgemm_kernel_L8_M4_22a + .align 5 + +.Lsgemm_kernel_L8_M4_22: + + KERNEL4x8_M1 + KERNEL4x8_M2 + + subs counterL, counterL, #1 + bgt .Lsgemm_kernel_L8_M4_22 + +.Lsgemm_kernel_L8_M4_22a: + + KERNEL4x8_M1 + KERNEL4x8_E + + b .Lsgemm_kernel_L8_M4_44 + +.Lsgemm_kernel_L8_M4_32: + + tst counterL, #1 + ble .Lsgemm_kernel_L8_M4_40 + + KERNEL4x8_I + KERNEL4x8_E + + b .Lsgemm_kernel_L8_M4_44 + +.Lsgemm_kernel_L8_M4_40: + + INIT4x8 + +.Lsgemm_kernel_L8_M4_44: + + ands counterL , origK, #1 + ble .Lsgemm_kernel_L8_M4_100 + +.Lsgemm_kernel_L8_M4_46: + + KERNEL4x8_SUB + +.Lsgemm_kernel_L8_M4_100: + + SAVE4x8 + +.Lsgemm_kernel_L8_M4_END: + +/******************************************************************************/ + +.Lsgemm_kernel_L8_M2_BEGIN: + + mov counterI, origM + tst counterI , #3 + ble .Lsgemm_kernel_L8_END + + tst counterI, #2 // counterI = counterI / 2 + ble .Lsgemm_kernel_L8_M1_BEGIN + +.Lsgemm_kernel_L8_M2_20: + + INIT2x8 + + mov pB, origPB + + asr counterL , origK, #3 // counterL = counterL / 8 + cmp counterL , #0 + ble .Lsgemm_kernel_L8_M2_40 + +.Lsgemm_kernel_L8_M2_22: + + KERNEL2x8_SUB + KERNEL2x8_SUB + KERNEL2x8_SUB + KERNEL2x8_SUB + + KERNEL2x8_SUB + KERNEL2x8_SUB + KERNEL2x8_SUB + KERNEL2x8_SUB + + subs counterL, counterL, #1 + bgt .Lsgemm_kernel_L8_M2_22 + + +.Lsgemm_kernel_L8_M2_40: + + ands counterL , origK, #7 // counterL = counterL % 8 + ble .Lsgemm_kernel_L8_M2_100 + +.Lsgemm_kernel_L8_M2_42: + + KERNEL2x8_SUB + + subs counterL, counterL, #1 + bgt .Lsgemm_kernel_L8_M2_42 + +.Lsgemm_kernel_L8_M2_100: + + SAVE2x8 + +.Lsgemm_kernel_L8_M2_END: + +/******************************************************************************/ + +.Lsgemm_kernel_L8_M1_BEGIN: + + tst counterI, #1 // counterI = counterI % 2 + ble .Lsgemm_kernel_L8_END + +.Lsgemm_kernel_L8_M1_20: + + INIT1x8 + + mov pB, origPB + + asr counterL , origK, #3 // counterL = counterL / 8 + cmp counterL , #0 + ble .Lsgemm_kernel_L8_M1_40 + +.Lsgemm_kernel_L8_M1_22: + KERNEL1x8_SUB + KERNEL1x8_SUB + KERNEL1x8_SUB + KERNEL1x8_SUB + + KERNEL1x8_SUB + KERNEL1x8_SUB + KERNEL1x8_SUB + KERNEL1x8_SUB + + subs counterL, counterL, #1 + bgt .Lsgemm_kernel_L8_M1_22 + + +.Lsgemm_kernel_L8_M1_40: + + ands counterL , origK, #7 // counterL = counterL % 8 + ble .Lsgemm_kernel_L8_M1_100 + +.Lsgemm_kernel_L8_M1_42: + + KERNEL1x8_SUB + + subs counterL, counterL, #1 + bgt .Lsgemm_kernel_L8_M1_42 + +.Lsgemm_kernel_L8_M1_100: + + SAVE1x8 + +.Lsgemm_kernel_L8_END: + lsl temp, origK, #5 // B = B + K * 4 * 8 + add origPB, origPB, temp + + subs counterJ, counterJ , #1 // j-- + bgt .Lsgemm_kernel_L8_BEGIN + +/******************************************************************************/ +/******************************************************************************/ + +.Lsgemm_kernel_L4_BEGIN: + + mov counterJ , origN + tst counterJ , #7 + ble .Lsgemm_kernel_L999 + + tst counterJ , #4 + ble .Lsgemm_kernel_L2_BEGIN + + mov pCRow0, pC // pCRow0 = pC + + add pC,pC,LDC, lsl #2 + + mov pA, origPA // pA = A + +/******************************************************************************/ + +.Lsgemm_kernel_L4_M8_BEGIN: + + mov counterI, origM + asr counterI, counterI, #3 // counterI = counterI / 8 + cmp counterI, #0 + ble .Lsgemm_kernel_L4_M4_BEGIN + +.Lsgemm_kernel_L4_M8_20: + + mov pB, origPB + + asr counterL , origK, #1 // L = K / 2 + cmp counterL , #2 // is there at least 4 to do? + blt .Lsgemm_kernel_L4_M8_32 + + KERNEL8x4_I // do one in the K + KERNEL8x4_M2 // do another in the K + + subs counterL, counterL, #2 + ble .Lsgemm_kernel_L4_M8_22a + .align 5 + +.Lsgemm_kernel_L4_M8_22: + + KERNEL8x4_M1 + KERNEL8x4_M2 + + subs counterL, counterL, #1 + bgt .Lsgemm_kernel_L4_M8_22 + +.Lsgemm_kernel_L4_M8_22a: + + KERNEL8x4_M1 + KERNEL8x4_E + + b .Lsgemm_kernel_L4_M8_44 + +.Lsgemm_kernel_L4_M8_32: + + tst counterL, #1 + ble .Lsgemm_kernel_L4_M8_40 + + KERNEL8x4_I + KERNEL8x4_E + + b .Lsgemm_kernel_L4_M8_44 + +.Lsgemm_kernel_L4_M8_40: + + INIT8x4 + +.Lsgemm_kernel_L4_M8_44: + + ands counterL , origK, #1 + ble .Lsgemm_kernel_L4_M8_100 + +.Lsgemm_kernel_L4_M8_46: + + KERNEL8x4_SUB + +.Lsgemm_kernel_L4_M8_100: + + SAVE8x4 + +.Lsgemm_kernel_L4_M8_END: + subs counterI, counterI, #1 + bne .Lsgemm_kernel_L4_M8_20 + +/******************************************************************************/ + +.Lsgemm_kernel_L4_M4_BEGIN: + + mov counterI, origM + tst counterI , #7 + ble .Lsgemm_kernel_L4_END + + tst counterI, #4 + ble .Lsgemm_kernel_L4_M2_BEGIN + +.Lsgemm_kernel_L4_M4_20: + + mov pB, origPB + + asr counterL , origK, #1 // L = K / 2 + cmp counterL , #2 // is there at least 4 to do? + blt .Lsgemm_kernel_L4_M4_32 + + KERNEL4x4_I // do one in the K + KERNEL4x4_M2 // do another in the K + + subs counterL, counterL, #2 + ble .Lsgemm_kernel_L4_M4_22a + .align 5 + +.Lsgemm_kernel_L4_M4_22: + + KERNEL4x4_M1 + KERNEL4x4_M2 + + subs counterL, counterL, #1 + bgt .Lsgemm_kernel_L4_M4_22 + +.Lsgemm_kernel_L4_M4_22a: + + KERNEL4x4_M1 + KERNEL4x4_E + + b .Lsgemm_kernel_L4_M4_44 + +.Lsgemm_kernel_L4_M4_32: + + tst counterL, #1 + ble .Lsgemm_kernel_L4_M4_40 + + KERNEL4x4_I + KERNEL4x4_E + + b .Lsgemm_kernel_L4_M4_44 + +.Lsgemm_kernel_L4_M4_40: + + INIT4x4 + +.Lsgemm_kernel_L4_M4_44: + + ands counterL , origK, #1 + ble .Lsgemm_kernel_L4_M4_100 + +.Lsgemm_kernel_L4_M4_46: + + KERNEL4x4_SUB + +.Lsgemm_kernel_L4_M4_100: + + SAVE4x4 + +.Lsgemm_kernel_L4_M4_END: + +/******************************************************************************/ + +.Lsgemm_kernel_L4_M2_BEGIN: + + mov counterI, origM + tst counterI , #3 + ble .Lsgemm_kernel_L4_END + + tst counterI, #2 // counterI = counterI / 2 + ble .Lsgemm_kernel_L4_M1_BEGIN + +.Lsgemm_kernel_L4_M2_20: + + INIT2x4 + + mov pB, origPB + + asr counterL , origK, #3 // counterL = counterL / 8 + cmp counterL , #0 + ble .Lsgemm_kernel_L4_M2_40 + +.Lsgemm_kernel_L4_M2_22: + + KERNEL2x4_SUB + KERNEL2x4_SUB + KERNEL2x4_SUB + KERNEL2x4_SUB + + KERNEL2x4_SUB + KERNEL2x4_SUB + KERNEL2x4_SUB + KERNEL2x4_SUB + + subs counterL, counterL, #1 + bgt .Lsgemm_kernel_L4_M2_22 + + +.Lsgemm_kernel_L4_M2_40: + + ands counterL , origK, #7 // counterL = counterL % 8 + ble .Lsgemm_kernel_L4_M2_100 + +.Lsgemm_kernel_L4_M2_42: + + KERNEL2x4_SUB + + subs counterL, counterL, #1 + bgt .Lsgemm_kernel_L4_M2_42 + +.Lsgemm_kernel_L4_M2_100: + + SAVE2x4 + +.Lsgemm_kernel_L4_M2_END: + +/******************************************************************************/ + +.Lsgemm_kernel_L4_M1_BEGIN: + + tst counterI, #1 // counterI = counterI % 2 + ble .Lsgemm_kernel_L4_END + +.Lsgemm_kernel_L4_M1_20: + + INIT1x4 + + mov pB, origPB + + asr counterL , origK, #3 // counterL = counterL / 8 + cmp counterL , #0 + ble .Lsgemm_kernel_L4_M1_40 + +.Lsgemm_kernel_L4_M1_22: + KERNEL1x4_SUB + KERNEL1x4_SUB + KERNEL1x4_SUB + KERNEL1x4_SUB + + KERNEL1x4_SUB + KERNEL1x4_SUB + KERNEL1x4_SUB + KERNEL1x4_SUB + + subs counterL, counterL, #1 + bgt .Lsgemm_kernel_L4_M1_22 + + +.Lsgemm_kernel_L4_M1_40: + + ands counterL , origK, #7 // counterL = counterL % 8 + ble .Lsgemm_kernel_L4_M1_100 + +.Lsgemm_kernel_L4_M1_42: + + KERNEL1x4_SUB + + subs counterL, counterL, #1 + bgt .Lsgemm_kernel_L4_M1_42 + +.Lsgemm_kernel_L4_M1_100: + + SAVE1x4 + +.Lsgemm_kernel_L4_END: + add origPB, origPB, origK, lsl #4 // B = B + K * 4 * 4 + +/******************************************************************************/ +/******************************************************************************/ + +.Lsgemm_kernel_L2_BEGIN: // less than 2 left in N direction + + mov counterJ , origN + tst counterJ , #3 + ble .Lsgemm_kernel_L999 + + tst counterJ , #2 + ble .Lsgemm_kernel_L1_BEGIN + + mov pCRow0, pC // pCRow0 = pC + + add pC,pC,LDC, lsl #1 + + mov pA, origPA // pA = A + +/******************************************************************************/ + +.Lsgemm_kernel_L2_M8_BEGIN: + + mov counterI, origM + asr counterI, counterI, #3 // counterI = counterI / 8 + cmp counterI,#0 + ble .Lsgemm_kernel_L2_M4_BEGIN + +.Lsgemm_kernel_L2_M8_20: + + INIT8x2 + + mov pB, origPB + + asr counterL , origK, #3 // counterL = counterL / 8 + cmp counterL,#0 + ble .Lsgemm_kernel_L2_M8_40 + .align 5 + +.Lsgemm_kernel_L2_M8_22: + KERNEL8x2_SUB + KERNEL8x2_SUB + KERNEL8x2_SUB + KERNEL8x2_SUB + + KERNEL8x2_SUB + KERNEL8x2_SUB + KERNEL8x2_SUB + KERNEL8x2_SUB + + subs counterL, counterL, #1 + bgt .Lsgemm_kernel_L2_M8_22 + + +.Lsgemm_kernel_L2_M8_40: + + ands counterL , origK, #7 // counterL = counterL % 8 + ble .Lsgemm_kernel_L2_M8_100 + +.Lsgemm_kernel_L2_M8_42: + + KERNEL8x2_SUB + + subs counterL, counterL, #1 + bgt .Lsgemm_kernel_L2_M8_42 + +.Lsgemm_kernel_L2_M8_100: + + SAVE8x2 + +.Lsgemm_kernel_L2_M8_END: + + subs counterI, counterI, #1 + bgt .Lsgemm_kernel_L2_M8_20 + +/******************************************************************************/ + +.Lsgemm_kernel_L2_M4_BEGIN: + + mov counterI, origM + tst counterI , #7 + ble .Lsgemm_kernel_L2_END + + tst counterI, #4 + ble .Lsgemm_kernel_L2_M2_BEGIN + +.Lsgemm_kernel_L2_M4_20: + + INIT4x2 + + mov pB, origPB + + asr counterL , origK, #3 // counterL = counterL / 8 + cmp counterL,#0 + ble .Lsgemm_kernel_L2_M4_40 + .align 5 + +.Lsgemm_kernel_L2_M4_22: + KERNEL4x2_SUB + KERNEL4x2_SUB + KERNEL4x2_SUB + KERNEL4x2_SUB + + KERNEL4x2_SUB + KERNEL4x2_SUB + KERNEL4x2_SUB + KERNEL4x2_SUB + + subs counterL, counterL, #1 + bgt .Lsgemm_kernel_L2_M4_22 + + +.Lsgemm_kernel_L2_M4_40: + + ands counterL , origK, #7 // counterL = counterL % 8 + ble .Lsgemm_kernel_L2_M4_100 + +.Lsgemm_kernel_L2_M4_42: + + KERNEL4x2_SUB + + subs counterL, counterL, #1 + bgt .Lsgemm_kernel_L2_M4_42 + +.Lsgemm_kernel_L2_M4_100: + + SAVE4x2 + +.Lsgemm_kernel_L2_M4_END: + +/******************************************************************************/ + +.Lsgemm_kernel_L2_M2_BEGIN: + + mov counterI, origM + tst counterI , #3 + ble .Lsgemm_kernel_L2_END + + tst counterI, #2 // counterI = counterI / 2 + ble .Lsgemm_kernel_L2_M1_BEGIN + +.Lsgemm_kernel_L2_M2_20: + + INIT2x2 + + mov pB, origPB + + asr counterL , origK, #3 // counterL = counterL / 8 + cmp counterL,#0 + ble .Lsgemm_kernel_L2_M2_40 + +.Lsgemm_kernel_L2_M2_22: + + KERNEL2x2_SUB + KERNEL2x2_SUB + KERNEL2x2_SUB + KERNEL2x2_SUB + + KERNEL2x2_SUB + KERNEL2x2_SUB + KERNEL2x2_SUB + KERNEL2x2_SUB + + subs counterL, counterL, #1 + bgt .Lsgemm_kernel_L2_M2_22 + + +.Lsgemm_kernel_L2_M2_40: + + ands counterL , origK, #7 // counterL = counterL % 8 + ble .Lsgemm_kernel_L2_M2_100 + +.Lsgemm_kernel_L2_M2_42: + + KERNEL2x2_SUB + + subs counterL, counterL, #1 + bgt .Lsgemm_kernel_L2_M2_42 + +.Lsgemm_kernel_L2_M2_100: + + SAVE2x2 + +.Lsgemm_kernel_L2_M2_END: + +/******************************************************************************/ + +.Lsgemm_kernel_L2_M1_BEGIN: + + tst counterI, #1 // counterI = counterI % 2 + ble .Lsgemm_kernel_L2_END + +.Lsgemm_kernel_L2_M1_20: + + INIT1x2 + + mov pB, origPB + + asr counterL , origK, #3 // counterL = counterL / 8 + cmp counterL, #0 + ble .Lsgemm_kernel_L2_M1_40 + +.Lsgemm_kernel_L2_M1_22: + KERNEL1x2_SUB + KERNEL1x2_SUB + KERNEL1x2_SUB + KERNEL1x2_SUB + + KERNEL1x2_SUB + KERNEL1x2_SUB + KERNEL1x2_SUB + KERNEL1x2_SUB + + subs counterL, counterL, #1 + bgt .Lsgemm_kernel_L2_M1_22 + + +.Lsgemm_kernel_L2_M1_40: + + ands counterL , origK, #7 // counterL = counterL % 8 + ble .Lsgemm_kernel_L2_M1_100 + +.Lsgemm_kernel_L2_M1_42: + + KERNEL1x2_SUB + + subs counterL, counterL, #1 + bgt .Lsgemm_kernel_L2_M1_42 + +.Lsgemm_kernel_L2_M1_100: + + SAVE1x2 + +.Lsgemm_kernel_L2_END: + + add origPB, origPB, origK, lsl #3 // B = B + K * 2 * 4 + +/******************************************************************************/ +/******************************************************************************/ + +.Lsgemm_kernel_L1_BEGIN: + + mov counterJ , origN + tst counterJ , #1 + ble .Lsgemm_kernel_L999 // done + + + mov pCRow0, pC // pCRow0 = C + add pC , pC , LDC // Update pC to point to next + + mov pA, origPA // pA = A + +/******************************************************************************/ + +.Lsgemm_kernel_L1_M8_BEGIN: + + mov counterI, origM + asr counterI, counterI, #3 + cmp counterI, #0 + ble .Lsgemm_kernel_L1_M4_BEGIN + +.Lsgemm_kernel_L1_M8_20: + + INIT8x1 + + mov pB, origPB + + asr counterL , origK, #3 // counterL = counterL / 8 + cmp counterL , #0 + ble .Lsgemm_kernel_L1_M8_40 + .align 5 + +.Lsgemm_kernel_L1_M8_22: + KERNEL8x1_SUB + KERNEL8x1_SUB + KERNEL8x1_SUB + KERNEL8x1_SUB + + KERNEL8x1_SUB + KERNEL8x1_SUB + KERNEL8x1_SUB + KERNEL8x1_SUB + + subs counterL, counterL, #1 + bgt .Lsgemm_kernel_L1_M8_22 + + +.Lsgemm_kernel_L1_M8_40: + + ands counterL , origK, #7 // counterL = counterL % 8 + ble .Lsgemm_kernel_L1_M8_100 + +.Lsgemm_kernel_L1_M8_42: + + KERNEL8x1_SUB + + subs counterL, counterL, #1 + bgt .Lsgemm_kernel_L1_M8_42 + +.Lsgemm_kernel_L1_M8_100: + + SAVE8x1 + +.Lsgemm_kernel_L1_M8_END: + + subs counterI, counterI, #1 + bgt .Lsgemm_kernel_L1_M8_20 + +/******************************************************************************/ + +.Lsgemm_kernel_L1_M4_BEGIN: + + mov counterI, origM + tst counterI , #7 + ble .Lsgemm_kernel_L1_END + + tst counterI, #4 + ble .Lsgemm_kernel_L1_M2_BEGIN + +.Lsgemm_kernel_L1_M4_20: + + INIT4x1 + + mov pB, origPB + + asr counterL , origK, #3 // counterL = counterL / 8 + cmp counterL , #0 + ble .Lsgemm_kernel_L1_M4_40 + .align 5 + +.Lsgemm_kernel_L1_M4_22: + KERNEL4x1_SUB + KERNEL4x1_SUB + KERNEL4x1_SUB + KERNEL4x1_SUB + + KERNEL4x1_SUB + KERNEL4x1_SUB + KERNEL4x1_SUB + KERNEL4x1_SUB + + subs counterL, counterL, #1 + bgt .Lsgemm_kernel_L1_M4_22 + + +.Lsgemm_kernel_L1_M4_40: + + ands counterL , origK, #7 // counterL = counterL % 8 + ble .Lsgemm_kernel_L1_M4_100 + +.Lsgemm_kernel_L1_M4_42: + + KERNEL4x1_SUB + + subs counterL, counterL, #1 + bgt .Lsgemm_kernel_L1_M4_42 + +.Lsgemm_kernel_L1_M4_100: + + SAVE4x1 + +.Lsgemm_kernel_L1_M4_END: + +/******************************************************************************/ + +.Lsgemm_kernel_L1_M2_BEGIN: + + mov counterI, origM + tst counterI , #3 + ble .Lsgemm_kernel_L1_END + + tst counterI, #2 // counterI = counterI / 2 + ble .Lsgemm_kernel_L1_M1_BEGIN + +.Lsgemm_kernel_L1_M2_20: + + INIT2x1 + + mov pB, origPB + + asr counterL , origK, #3 // counterL = counterL / 8 + cmp counterL , #0 + ble .Lsgemm_kernel_L1_M2_40 + +.Lsgemm_kernel_L1_M2_22: + + KERNEL2x1_SUB + KERNEL2x1_SUB + KERNEL2x1_SUB + KERNEL2x1_SUB + + KERNEL2x1_SUB + KERNEL2x1_SUB + KERNEL2x1_SUB + KERNEL2x1_SUB + + subs counterL, counterL, #1 + bgt .Lsgemm_kernel_L1_M2_22 + + +.Lsgemm_kernel_L1_M2_40: + + ands counterL , origK, #7 // counterL = counterL % 8 + ble .Lsgemm_kernel_L1_M2_100 + +.Lsgemm_kernel_L1_M2_42: + + KERNEL2x1_SUB + + subs counterL, counterL, #1 + bgt .Lsgemm_kernel_L1_M2_42 + +.Lsgemm_kernel_L1_M2_100: + + SAVE2x1 + +.Lsgemm_kernel_L1_M2_END: + +/******************************************************************************/ + +.Lsgemm_kernel_L1_M1_BEGIN: + + tst counterI, #1 // counterI = counterI % 2 + ble .Lsgemm_kernel_L1_END + +.Lsgemm_kernel_L1_M1_20: + + INIT1x1 + + mov pB, origPB + + asr counterL , origK, #3 // counterL = counterL / 8 + cmp counterL , #0 + ble .Lsgemm_kernel_L1_M1_40 + +.Lsgemm_kernel_L1_M1_22: + KERNEL1x1_SUB + KERNEL1x1_SUB + KERNEL1x1_SUB + KERNEL1x1_SUB + + KERNEL1x1_SUB + KERNEL1x1_SUB + KERNEL1x1_SUB + KERNEL1x1_SUB + + subs counterL, counterL, #1 + bgt .Lsgemm_kernel_L1_M1_22 + + +.Lsgemm_kernel_L1_M1_40: + + ands counterL , origK, #7 // counterL = counterL % 8 + ble .Lsgemm_kernel_L1_M1_100 + +.Lsgemm_kernel_L1_M1_42: + + KERNEL1x1_SUB + + subs counterL, counterL, #1 + bgt .Lsgemm_kernel_L1_M1_42 + +.Lsgemm_kernel_L1_M1_100: + + SAVE1x1 + +.Lsgemm_kernel_L1_END: + +/******************************************************************************/ + +.Lsgemm_kernel_L999: + mov x0, #0 // set return value + ldp d8, d9, [sp, #(0 * 16)] + ldp d10, d11, [sp, #(1 * 16)] + ldp d12, d13, [sp, #(2 * 16)] + ldp d14, d15, [sp, #(3 * 16)] + ldp d16, d17, [sp, #(4 * 16)] + ldp x18, x19, [sp, #(5 * 16)] + ldp x20, x21, [sp, #(6 * 16)] + ldp x22, x23, [sp, #(7 * 16)] + ldp x24, x25, [sp, #(8 * 16)] + ldp x26, x27, [sp, #(9 * 16)] + ldr x28, [sp, #(10 * 16)] + add sp, sp, #(11*16) + ret + + EPILOGUE + diff --git a/kernel/arm64/sgemm_ncopy_4.S b/kernel/arm64/sgemm_ncopy_4.S new file mode 100644 index 000000000..30450cc7d --- /dev/null +++ b/kernel/arm64/sgemm_ncopy_4.S @@ -0,0 +1,333 @@ +/*************************************************************************** +Copyright (c) 2016, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A00 PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#define ASSEMBLER +#include "common.h" + +#define M x0 +#define N x1 +#define A00 x2 +#define LDA x3 +#define B00 x4 + +#define A01 x5 +#define A02 x6 +#define A03 x7 +#define A04 x8 + +#define I x9 +#define J x10 + +#define TEMP1 x11 +#define TEMP2 x12 + +#define A_PREFETCH 2560 + +/************************************************************************************** +* Macro definitions +**************************************************************************************/ + +.macro SAVE_REGS + add sp, sp, #-(11 * 16) + stp d8, d9, [sp, #(0 * 16)] + stp d10, d11, [sp, #(1 * 16)] + stp d12, d13, [sp, #(2 * 16)] + stp d14, d15, [sp, #(3 * 16)] + stp d16, d17, [sp, #(4 * 16)] + stp x18, x19, [sp, #(5 * 16)] + stp x20, x21, [sp, #(6 * 16)] + stp x22, x23, [sp, #(7 * 16)] + stp x24, x25, [sp, #(8 * 16)] + stp x26, x27, [sp, #(9 * 16)] + str x28, [sp, #(10 * 16)] +.endm + +.macro RESTORE_REGS + ldp d8, d9, [sp, #(0 * 16)] + ldp d10, d11, [sp, #(1 * 16)] + ldp d12, d13, [sp, #(2 * 16)] + ldp d14, d15, [sp, #(3 * 16)] + ldp d16, d17, [sp, #(4 * 16)] + ldp x18, x19, [sp, #(5 * 16)] + ldp x20, x21, [sp, #(6 * 16)] + ldp x22, x23, [sp, #(7 * 16)] + ldp x24, x25, [sp, #(8 * 16)] + ldp x26, x27, [sp, #(9 * 16)] + ldr x28, [sp, #(10 * 16)] + add sp, sp, #(11*16) +.endm + +.macro COPY4x4 + prfm PLDL1KEEP, [A01, #A_PREFETCH] + prfm PLDL1KEEP, [A02, #A_PREFETCH] + prfm PLDL1KEEP, [A03, #A_PREFETCH] + prfm PLDL1KEEP, [A04, #A_PREFETCH] + + ldr q0, [A01], #16 + ins v8.s[0], v0.s[0] + ins v9.s[0], v0.s[1] + ins v10.s[0], v0.s[2] + ins v11.s[0], v0.s[3] + + ldr q1, [A02], #16 + ins v8.s[1], v1.s[0] + ins v9.s[1], v1.s[1] + ins v10.s[1], v1.s[2] + ins v11.s[1], v1.s[3] + + ldr q2, [A03], #16 + ins v8.s[2], v2.s[0] + ins v9.s[2], v2.s[1] + ins v10.s[2], v2.s[2] + ins v11.s[2], v2.s[3] + + ldr q3, [A04], #16 + ins v8.s[3], v3.s[0] + ins v9.s[3], v3.s[1] + ins v10.s[3], v3.s[2] + ins v11.s[3], v3.s[3] + + st1 {v8.4s, v9.4s, v10.4s, v11.4s}, [B00] + add B00, B00, #64 + +.endm + +.macro COPY1x4 + prfm PLDL1KEEP, [A01, #A_PREFETCH] + prfm PLDL1KEEP, [A02, #A_PREFETCH] + prfm PLDL1KEEP, [A03, #A_PREFETCH] + prfm PLDL1KEEP, [A04, #A_PREFETCH] + + ldr s0, [A01], #4 + ldr s1, [A02], #4 + ldr s2, [A03], #4 + ldr s3, [A04], #4 + + stp s0, s1, [B00] + add B00, B00, #8 + stp s2, s3, [B00] + add B00, B00, #8 +.endm + +.macro COPY4x2 + prfm PLDL1KEEP, [A01, #A_PREFETCH] + prfm PLDL1KEEP, [A02, #A_PREFETCH] + + ldr q0, [A01], #16 + ins v8.s[0], v0.s[0] + ins v9.s[0], v0.s[1] + ins v10.s[0], v0.s[2] + ins v11.s[0], v0.s[3] + + ldr q1, [A02], #16 + ins v8.s[1], v1.s[0] + ins v9.s[1], v1.s[1] + ins v10.s[1], v1.s[2] + ins v11.s[1], v1.s[3] + + st1 {v8.2s, v9.2s, v10.2s, v11.2s}, [B00] + add B00, B00, #32 +.endm + + +.macro COPY1x2 + prfm PLDL1KEEP, [A01, #A_PREFETCH] + prfm PLDL1KEEP, [A02, #A_PREFETCH] + + ldr s0, [A01], #4 + ldr s1, [A02], #4 + + stp s0, s1, [B00] + add B00, B00, #8 +.endm + +.macro COPY4x1 + prfm PLDL1KEEP, [A01, #A_PREFETCH] + + ldr q0, [A01], #16 + str q0, [B00], #16 +.endm + + +.macro COPY1x1 + prfm PLDL1KEEP, [A01, #A_PREFETCH] + + ldr s0, [A01], #4 + str s0, [B00], #4 +.endm + +/************************************************************************************** +* End of macro definitions +**************************************************************************************/ + + PROLOGUE + + .align 5 + + SAVE_REGS + + lsl LDA, LDA, #2 // LDA = LDA * SIZE + +.Ldgemm_ncopy_L4_BEGIN: + + asr J, N, #2 // J = N / 4 + cmp J, #0 + ble .Ldgemm_ncopy_L2_BEGIN + + .align 5 +.Ldgemm_ncopy_L4_M4_BEGIN: + + mov A01, A00 + add A02, A01, LDA + add A03, A02, LDA + add A04, A03, LDA + add A00, A04, LDA + + asr I, M, #2 // I = M / 4 + cmp I, #0 + ble .Ldgemm_ncopy_L4_M4_40 + + .align 5 +.Ldgemm_ncopy_L4_M4_20: + + COPY4x4 + + subs I , I , #1 + bne .Ldgemm_ncopy_L4_M4_20 + +.Ldgemm_ncopy_L4_M4_40: + + and I, M , #3 + cmp I, #0 + ble .Ldgemm_ncopy_L4_M4_END + + .align 5 +.Ldgemm_ncopy_L4_M4_60: + + COPY1x4 + + subs I , I , #1 + bne .Ldgemm_ncopy_L4_M4_60 + +.Ldgemm_ncopy_L4_M4_END: + + subs J , J, #1 // j-- + bne .Ldgemm_ncopy_L4_M4_BEGIN + +/*********************************************************************************************/ + +.Ldgemm_ncopy_L2_BEGIN: + + tst N, #3 + ble .Ldgemm_ncopy_L999 + + tst N, #2 + ble .Ldgemm_ncopy_L1_BEGIN + +.Ldgemm_ncopy_L2_M4_BEGIN: + mov A01, A00 + add A02, A01, LDA + add A00, A02, LDA + + asr I, M, #2 // I = M / 4 + cmp I, #0 + ble .Ldgemm_ncopy_L2_M4_40 + + .align 5 +.Ldgemm_ncopy_L2_M4_20: + + COPY4x2 + + subs I , I , #1 + bne .Ldgemm_ncopy_L2_M4_20 + +.Ldgemm_ncopy_L2_M4_40: + + and I, M , #3 + cmp I, #0 + ble .Ldgemm_ncopy_L2_M4_END + + .align 5 +.Ldgemm_ncopy_L2_M4_60: + + COPY1x2 + + subs I , I , #1 + bne .Ldgemm_ncopy_L2_M4_60 + +.Ldgemm_ncopy_L2_M4_END: + + +/*********************************************************************************************/ + +.Ldgemm_ncopy_L1_BEGIN: + + tst N, #1 + ble .Ldgemm_ncopy_L999 + +.Ldgemm_ncopy_L1_M4_BEGIN: + + mov A01, A00 + + asr I, M, #2 // I = M / 4 + cmp I, #0 + ble .Ldgemm_ncopy_L1_M4_40 + + .align 5 +.Ldgemm_ncopy_L1_M4_20: + + COPY4x1 + + subs I , I , #1 + bne .Ldgemm_ncopy_L1_M4_20 + + +.Ldgemm_ncopy_L1_M4_40: + + and I, M , #3 + cmp I, #0 + ble .Ldgemm_ncopy_L1_M4_END + + .align 5 +.Ldgemm_ncopy_L1_M4_60: + + COPY1x1 + + subs I , I , #1 + bne .Ldgemm_ncopy_L1_M4_60 + + +.Ldgemm_ncopy_L1_M4_END: + +.Ldgemm_ncopy_L999: + + mov x0, #0 + RESTORE_REGS + ret + + EPILOGUE + diff --git a/kernel/arm64/sgemm_ncopy_8.S b/kernel/arm64/sgemm_ncopy_8.S new file mode 100644 index 000000000..f99b1d992 --- /dev/null +++ b/kernel/arm64/sgemm_ncopy_8.S @@ -0,0 +1,562 @@ +/*************************************************************************** +Copyright (c) 2016, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A00 PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ +#define ASSEMBLER +#include "common.h" + +#define M x0 +#define N x1 +#define A00 x2 +#define LDA x3 +#define B00 x4 + +#define A01 x5 +#define A02 x6 +#define A03 x7 +#define A04 x8 +#define A05 x9 +#define A06 x10 +#define A07 x11 +#define A08 x12 + +#define I x13 +#define J x14 +#define K x15 + +#define TEMP1 x16 +#define TEMP2 x17 + +/************************************************************************************** +* Macro definitions +**************************************************************************************/ + +.macro SAVE_REGS + add sp, sp, #-(11 * 16) + stp d8, d9, [sp, #(0 * 16)] + stp d10, d11, [sp, #(1 * 16)] + stp d12, d13, [sp, #(2 * 16)] + stp d14, d15, [sp, #(3 * 16)] + stp d16, d17, [sp, #(4 * 16)] + stp x18, x19, [sp, #(5 * 16)] + stp x20, x21, [sp, #(6 * 16)] + stp x22, x23, [sp, #(7 * 16)] + stp x24, x25, [sp, #(8 * 16)] + stp x26, x27, [sp, #(9 * 16)] + str x28, [sp, #(10 * 16)] +.endm + +.macro RESTORE_REGS + ldp d8, d9, [sp, #(0 * 16)] + ldp d10, d11, [sp, #(1 * 16)] + ldp d12, d13, [sp, #(2 * 16)] + ldp d14, d15, [sp, #(3 * 16)] + ldp d16, d17, [sp, #(4 * 16)] + ldp x18, x19, [sp, #(5 * 16)] + ldp x20, x21, [sp, #(6 * 16)] + ldp x22, x23, [sp, #(7 * 16)] + ldp x24, x25, [sp, #(8 * 16)] + ldp x26, x27, [sp, #(9 * 16)] + ldr x28, [sp, #(10 * 16)] + add sp, sp, #(11*16) +.endm + +.macro COPY4x8 + ldr q0, [A01], #16 + ldr q1, [A02], #16 + ins v8.s[0], v0.s[0] + ins v10.s[0], v0.s[1] + ins v12.s[0], v0.s[2] + ins v14.s[0], v0.s[3] + ins v8.s[1], v1.s[0] + ins v10.s[1], v1.s[1] + ins v12.s[1], v1.s[2] + ins v14.s[1], v1.s[3] + + ldr q2, [A03], #16 + ldr q3, [A04], #16 + ins v8.s[2], v2.s[0] + ins v10.s[2], v2.s[1] + ins v12.s[2], v2.s[2] + ins v14.s[2], v2.s[3] + ins v8.s[3], v3.s[0] + ins v10.s[3], v3.s[1] + ins v12.s[3], v3.s[2] + ins v14.s[3], v3.s[3] + + ldr q4, [A05], #16 + ldr q5, [A06], #16 + ins v9.s[0], v4.s[0] + ins v11.s[0], v4.s[1] + ins v13.s[0], v4.s[2] + ins v15.s[0], v4.s[3] + ins v9.s[1], v5.s[0] + ins v11.s[1], v5.s[1] + ins v13.s[1], v5.s[2] + ins v15.s[1], v5.s[3] + + ldr q6, [A07], #16 + ldr q7, [A08], #16 + ins v9.s[2], v6.s[0] + ins v11.s[2], v6.s[1] + ins v13.s[2], v6.s[2] + ins v15.s[2], v6.s[3] + ins v9.s[3], v7.s[0] + ins v11.s[3], v7.s[1] + ins v13.s[3], v7.s[2] + ins v15.s[3], v7.s[3] + + st1 {v8.4s, v9.4s, v10.4s, v11.4s}, [B00], #64 + st1 {v12.4s, v13.4s, v14.4s, v15.4s}, [B00], #64 +.endm + +.macro COPY2x8 + ldr d0, [A01], #8 + ldr d1, [A02], #8 + ins v8.s[0], v0.s[0] + ins v10.s[0], v0.s[1] + ins v8.s[1], v1.s[0] + ins v10.s[1], v1.s[1] + + ldr d2, [A03], #8 + ldr d3, [A04], #8 + ins v8.s[2], v2.s[0] + ins v10.s[2], v2.s[1] + ins v8.s[3], v3.s[0] + ins v10.s[3], v3.s[1] + + ldr d4, [A05], #8 + ldr d5, [A06], #8 + ins v9.s[0], v4.s[0] + ins v11.s[0], v4.s[1] + ins v9.s[1], v5.s[0] + ins v11.s[1], v5.s[1] + + ldr d6, [A07], #8 + ldr d7, [A08], #8 + ins v9.s[2], v6.s[0] + ins v11.s[2], v6.s[1] + ins v9.s[3], v7.s[0] + ins v11.s[3], v7.s[1] + + st1 {v8.4s, v9.4s, v10.4s, v11.4s}, [B00], #64 +.endm + +.macro COPY1x8 + ldr s0, [A01], #4 + ldr s1, [A02], #4 + ins v8.s[0], v0.s[0] + ins v8.s[1], v1.s[0] + + ldr s2, [A03], #4 + ldr s3, [A04], #4 + ins v8.s[2], v2.s[0] + ins v8.s[3], v3.s[0] + + ldr s4, [A05], #4 + ldr s5, [A06], #4 + ins v9.s[0], v4.s[0] + ins v9.s[1], v5.s[0] + + ldr s6, [A07], #4 + ldr s7, [A08], #4 + ins v9.s[2], v6.s[0] + ins v9.s[3], v7.s[0] + + st1 {v8.4s, v9.4s}, [B00], #32 +.endm + +.macro COPY4x4 + ldr q0, [A01], #16 + ldr q1, [A02], #16 + ins v8.s[0], v0.s[0] + ins v9.s[0], v0.s[1] + ins v10.s[0], v0.s[2] + ins v11.s[0], v0.s[3] + ins v8.s[1], v1.s[0] + ins v9.s[1], v1.s[1] + ins v10.s[1], v1.s[2] + ins v11.s[1], v1.s[3] + + ldr q2, [A03], #16 + ldr q3, [A04], #16 + ins v8.s[2], v2.s[0] + ins v9.s[2], v2.s[1] + ins v10.s[2], v2.s[2] + ins v11.s[2], v2.s[3] + ins v8.s[3], v3.s[0] + ins v9.s[3], v3.s[1] + ins v10.s[3], v3.s[2] + ins v11.s[3], v3.s[3] + + st1 {v8.4s, v9.4s, v10.4s, v11.4s}, [B00], #64 +.endm + +.macro COPY2x4 + ldr d0, [A01], #8 + ldr d1, [A02], #8 + ins v8.s[0], v0.s[0] + ins v9.s[0], v0.s[1] + ins v8.s[1], v1.s[0] + ins v9.s[1], v1.s[1] + + ldr d2, [A03], #8 + ldr d3, [A04], #8 + ins v8.s[2], v2.s[0] + ins v9.s[2], v2.s[1] + ins v8.s[3], v3.s[0] + ins v9.s[3], v3.s[1] + + st1 {v8.4s, v9.4s}, [B00], #32 +.endm + +.macro COPY1x4 + ldr s0, [A01], #4 + ldr s1, [A02], #4 + ins v8.s[0], v0.s[0] + ins v8.s[1], v1.s[0] + + ldr s2, [A03], #4 + ldr s3, [A04], #4 + ins v8.s[2], v2.s[0] + ins v8.s[3], v3.s[0] + + st1 {v8.4s}, [B00], #16 +.endm + +.macro COPY4x2 + ldr q0, [A01], #16 + ldr q1, [A02], #16 + ins v8.s[0], v0.s[0] + ins v9.s[0], v0.s[1] + ins v10.s[0], v0.s[2] + ins v11.s[0], v0.s[3] + ins v8.s[1], v1.s[0] + ins v9.s[1], v1.s[1] + ins v10.s[1], v1.s[2] + ins v11.s[1], v1.s[3] + + st1 {v8.2s, v9.2s, v10.2s, v11.2s}, [B00], #32 +.endm + +.macro COPY2x2 + ldr d0, [A01], #8 + ldr d1, [A02], #8 + ins v8.s[0], v0.s[0] + ins v9.s[0], v0.s[1] + ins v8.s[1], v1.s[0] + ins v9.s[1], v1.s[1] + + st1 {v8.2s, v9.2s}, [B00], #16 +.endm + +.macro COPY1x2 + ldr s0, [A01], #4 + ldr s1, [A02], #4 + ins v8.s[0], v0.s[0] + ins v8.s[1], v1.s[0] + + st1 {v8.2s}, [B00], #8 +.endm + +.macro COPY1x1 + ldr s0, [A01], #4 + str s0, [B00], #4 +.endm + +/************************************************************************************** +* End of macro definitions +**************************************************************************************/ + + PROLOGUE + + .align 5 + + SAVE_REGS + + lsl LDA, LDA, #2 // LDA = LDA * SIZE + +.Lsgemm_ncopy_L8_BEGIN: + + asr J, N, #3 // J = N / 8 + cmp J, #0 + ble .Lsgemm_ncopy_L4_BEGIN + + .align 5 +.Lsgemm_ncopy_L8_M4_BEGIN: + + mov A01, A00 + add A02, A01, LDA + add A03, A02, LDA + add A04, A03, LDA + add A05, A04, LDA + add A06, A05, LDA + add A07, A06, LDA + add A08, A07, LDA + add A00, A08, LDA + + asr I, M, #2 // I = M / 4 + cmp I, #0 + ble .Lsgemm_ncopy_L8_M4_40 + + asr K, M, #4 // K = M / 16(cacheline) + mov TEMP1, A01 + + .align 5 +.Lsgemm_tcopy_L8_warnup_1: + + ldr s0, [TEMP1], #64 + + subs K, K, #1 + bgt .Lsgemm_tcopy_L8_warnup_1 + + asr K, M, #4 // K = M / 16(cacheline) + mov TEMP1, A02 + + .align 5 +.Lsgemm_tcopy_L8_warnup_2: + + ldr s0, [TEMP1], #64 + + subs K, K, #1 + bgt .Lsgemm_tcopy_L8_warnup_2 + + asr K, M, #4 // K = M / 16(cacheline) + mov TEMP1, A03 + + .align 5 +.Lsgemm_tcopy_L8_warnup_3: + + ldr s0, [TEMP1], #64 + + subs K, K, #1 + bgt .Lsgemm_tcopy_L8_warnup_3 + + asr K, M, #4 // K = M / 16(cacheline) + mov TEMP1, A04 + + .align 5 +.Lsgemm_tcopy_L8_warnup_4: + + ldr s0, [TEMP1], #64 + + subs K, K, #1 + bgt .Lsgemm_tcopy_L8_warnup_4 + + asr K, M, #4 // K = M / 16(cacheline) + mov TEMP1, A05 + + .align 5 +.Lsgemm_tcopy_L8_warnup_5: + + ldr s0, [TEMP1], #64 + + subs K, K, #1 + bgt .Lsgemm_tcopy_L8_warnup_5 + + asr K, M, #4 // K = M / 16(cacheline) + mov TEMP1, A06 + + .align 5 +.Lsgemm_tcopy_L8_warnup_6: + + ldr s0, [TEMP1], #64 + + subs K, K, #1 + bgt .Lsgemm_tcopy_L8_warnup_6 + + asr K, M, #4 // K = M / 16(cacheline) + mov TEMP1, A07 + + .align 5 +.Lsgemm_tcopy_L8_warnup_7: + + ldr s0, [TEMP1], #64 + + subs K, K, #1 + bgt .Lsgemm_tcopy_L8_warnup_7 + + asr K, M, #4 // K = M / 16(cacheline) + mov TEMP1, A08 + + .align 5 +.Lsgemm_tcopy_L8_warnup_8: + + ldr s0, [TEMP1], #64 + + subs K, K, #1 + bgt .Lsgemm_tcopy_L8_warnup_8 + + .align 5 +.Lsgemm_ncopy_L8_M4_20: + + COPY4x8 + + subs I, I, #1 + bne .Lsgemm_ncopy_L8_M4_20 + +.Lsgemm_ncopy_L8_M4_40: + + and I, M, #2 + cmp I, #0 + ble .Lsgemm_ncopy_L8_M4_60 + + COPY2x8 + +.Lsgemm_ncopy_L8_M4_60: + + and I, M, #1 + cmp I, #0 + ble .Lsgemm_ncopy_L8_M4_END + + COPY1x8 + +.Lsgemm_ncopy_L8_M4_END: + + subs J , J, #1 // j-- + bne .Lsgemm_ncopy_L8_M4_BEGIN + +/*********************************************************************************************/ + +.Lsgemm_ncopy_L4_BEGIN: + + tst N, #7 + ble .Lsgemm_ncopy_L999 + + tst N, #4 + ble .Lsgemm_ncopy_L2_BEGIN + +.Lsgemm_ncopy_L4_M4_BEGIN: + mov A01, A00 + add A02, A01, LDA + add A03, A02, LDA + add A04, A03, LDA + add A00, A04, LDA + + asr I, M, #2 // I = M / 4 + cmp I, #0 + ble .Lsgemm_ncopy_L4_M4_40 + + .align 5 +.Lsgemm_ncopy_L4_M4_20: + + COPY4x4 + + subs I, I, #1 + bne .Lsgemm_ncopy_L4_M4_20 + +.Lsgemm_ncopy_L4_M4_40: + + and I, M, #2 + cmp I, #0 + ble .Lsgemm_ncopy_L4_M4_60 + + COPY2x4 + +.Lsgemm_ncopy_L4_M4_60: + + and I, M, #1 + cmp I, #0 + ble .Lsgemm_ncopy_L4_M4_END + + COPY1x4 + +.Lsgemm_ncopy_L4_M4_END: + + +/*********************************************************************************************/ + +.Lsgemm_ncopy_L2_BEGIN: + + tst N, #2 + ble .Lsgemm_ncopy_L1_BEGIN + +.Lsgemm_ncopy_L2_M4_BEGIN: + + mov A01, A00 + add A02, A01, LDA + add A00, A02, LDA + + asr I, M, #2 // I = M / 4 + cmp I, #0 + ble .Lsgemm_ncopy_L2_M4_40 + + .align 5 +.Lsgemm_ncopy_L2_M4_20: + + COPY4x2 + + subs I , I , #1 + bne .Lsgemm_ncopy_L2_M4_20 + + +.Lsgemm_ncopy_L2_M4_40: + + and I, M, #2 + cmp I, #0 + ble .Lsgemm_ncopy_L2_M4_60 + + COPY2x2 + +.Lsgemm_ncopy_L2_M4_60: + + and I, M, #1 + cmp I, #0 + ble .Lsgemm_ncopy_L2_M4_END + + COPY1x2 + +.Lsgemm_ncopy_L2_M4_END: + +.Lsgemm_ncopy_L1_BEGIN: + + tst N, #1 + ble .Lsgemm_ncopy_L999 + +.Lsgemm_ncopy_L1_M1_BEGIN: + + mov A01, A00 + + mov I, M + cmp I, #0 + ble .Lsgemm_ncopy_L1_M1_END + + .align 5 +.Lsgemm_ncopy_L1_M1_20: + + COPY1x1 + + subs I, I, #1 + bne .Lsgemm_ncopy_L1_M1_20 + +.Lsgemm_ncopy_L1_M1_END: + +.Lsgemm_ncopy_L999: + + mov x0, #0 + RESTORE_REGS + ret + + EPILOGUE diff --git a/kernel/arm64/sgemm_tcopy_16.S b/kernel/arm64/sgemm_tcopy_16.S new file mode 100644 index 000000000..12b80bdca --- /dev/null +++ b/kernel/arm64/sgemm_tcopy_16.S @@ -0,0 +1,824 @@ +/*************************************************************************** +Copyright (c) 2019, The OpenBLAS Project +All rights reserved. +*****************************************************************************/ + +#define ASSEMBLER +#include "common.h" + +#define M x0 +#define N x1 +#define A x2 +#define LDA x3 +#define B x4 + +#define M8 x5 + +#define A01 x6 +#define A02 x7 +#define A03 x8 +#define A04 x9 +#define A05 x10 +#define A06 x11 +#define A07 x12 +#define A08 x13 + +#define B01 x14 +#define B02 x15 +#define B03 x16 +#define B04 x17 +#define B00 x22 + + +#define I x18 +#define J x19 + +#define TEMP1 x20 + +#define A_PREFETCH 256 + +/************************************************************************************** +* Macro definitions +**************************************************************************************/ +.macro SAVE_REGS + add sp, sp, #-(11 * 16) + stp d8, d9, [sp, #(0 * 16)] + stp d10, d11, [sp, #(1 * 16)] + stp d12, d13, [sp, #(2 * 16)] + stp d14, d15, [sp, #(3 * 16)] + stp d16, d17, [sp, #(4 * 16)] + stp x18, x19, [sp, #(5 * 16)] + stp x20, x21, [sp, #(6 * 16)] + stp x22, x23, [sp, #(7 * 16)] + stp x24, x25, [sp, #(8 * 16)] + stp x26, x27, [sp, #(9 * 16)] + str x28, [sp, #(10 * 16)] +.endm + +.macro RESTORE_REGS + ldp d8, d9, [sp, #(0 * 16)] + ldp d10, d11, [sp, #(1 * 16)] + ldp d12, d13, [sp, #(2 * 16)] + ldp d14, d15, [sp, #(3 * 16)] + ldp d16, d17, [sp, #(4 * 16)] + ldp x18, x19, [sp, #(5 * 16)] + ldp x20, x21, [sp, #(6 * 16)] + ldp x22, x23, [sp, #(7 * 16)] + ldp x24, x25, [sp, #(8 * 16)] + ldp x26, x27, [sp, #(9 * 16)] + ldr x28, [sp, #(10 * 16)] + add sp, sp, #(11*16) +.endm + +/*************************************************************************************************************************/ + +.macro COPY16x8 + prfm PLDL1KEEP, [A01, #A_PREFETCH] + prfm PLDL1KEEP, [A02, #A_PREFETCH] + prfm PLDL1KEEP, [A03, #A_PREFETCH] + prfm PLDL1KEEP, [A04, #A_PREFETCH] + prfm PLDL1KEEP, [A05, #A_PREFETCH] + prfm PLDL1KEEP, [A06, #A_PREFETCH] + prfm PLDL1KEEP, [A07, #A_PREFETCH] + prfm PLDL1KEEP, [A08, #A_PREFETCH] + //prfm PSTL1KEEP, [B00, M8] + + ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [A01] + add A01, A01, #64 + + st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [B00] + add TEMP1, B00, #64 + + ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [A02] + add A02, A02, #64 + + st1 {v4.4s, v5.4s, v6.4s, v7.4s}, [TEMP1] + add TEMP1, TEMP1, #64 + + ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [A03] + add A03, A03, #64 + + st1 {v8.4s, v9.4s, v10.4s, v11.4s}, [TEMP1] + add TEMP1, TEMP1, #64 + + ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [A04] + add A04, A04, #64 + + st1 {v12.4s, v13.4s, v14.4s, v15.4s}, [TEMP1] + add TEMP1, TEMP1, #64 + + ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [A05] + add A05, A05, #64 + + st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [TEMP1] + add TEMP1, TEMP1, #64 + + ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [A06] + add A06, A06, #64 + + st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [TEMP1] + add TEMP1, TEMP1, #64 + + ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [A07] + add A07, A07, #64 + + st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [TEMP1] + add TEMP1, TEMP1, #64 + + ld1 {v28.4s, v29.4s, v30.4s, v31.4s}, [A08] + add A08, A08, #64 + + st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [TEMP1] + add TEMP1, TEMP1, #64 + + add B00, B00, M8 + +.endm + +.macro COPY8x8 + prfm PLDL1KEEP, [A01, #A_PREFETCH] + prfm PLDL1KEEP, [A02, #A_PREFETCH] + prfm PLDL1KEEP, [A03, #A_PREFETCH] + prfm PLDL1KEEP, [A04, #A_PREFETCH] + prfm PLDL1KEEP, [A05, #A_PREFETCH] + prfm PLDL1KEEP, [A06, #A_PREFETCH] + prfm PLDL1KEEP, [A07, #A_PREFETCH] + prfm PLDL1KEEP, [A08, #A_PREFETCH] + + ldp q0, q1, [A01] + ldp q2, q3, [A02] + add A01, A01, #32 + add A02, A02, #32 + + st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [B01] + add B01, B01, #64 + + ldp q4, q5, [A03] + ldp q6, q7, [A04] + add A03, A03, #32 + add A04, A04, #32 + + st1 {v4.4s, v5.4s, v6.4s, v7.4s}, [B01] + add B01, B01, #64 + + ldp q8, q9, [A05] + ldp q10, q11, [A06] + add A05, A05, #32 + add A06, A06, #32 + + st1 {v8.4s, v9.4s, v10.4s, v11.4s}, [B01] + add B01, B01, #64 + + ldp q12, q13, [A07] + ldp q14, q15, [A08] + add A07, A07, #32 + add A08, A08, #32 + + st1 {v12.4s, v13.4s, v14.4s, v15.4s}, [B01] + add B01, B01, #64 +.endm + +.macro COPY4x8 + //prfm PLDL1KEEP, [A01, #A_PREFETCH] + //prfm PLDL1KEEP, [A02, #A_PREFETCH] + //prfm PLDL1KEEP, [A03, #A_PREFETCH] + //prfm PLDL1KEEP, [A04, #A_PREFETCH] + //prfm PLDL1KEEP, [A05, #A_PREFETCH] + //prfm PLDL1KEEP, [A06, #A_PREFETCH] + //prfm PLDL1KEEP, [A07, #A_PREFETCH] + //prfm PLDL1KEEP, [A08, #A_PREFETCH] + + ldr q0, [A01] + ldr q1, [A02] + ldr q2, [A03] + ldr q3, [A04] + add A01, A01, #16 + add A02, A02, #16 + add A03, A03, #16 + add A04, A04, #16 + + st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [B02] + add B02, B02, #64 + + ldr q4, [A05] + ldr q5, [A06] + ldr q6, [A07] + ldr q7, [A08] + + add A05, A05, #16 + add A06, A06, #16 + add A07, A07, #16 + add A08, A08, #16 + + st1 {v4.4s, v5.4s, v6.4s, v7.4s}, [B02] + add B02, B02, #64 +.endm + +.macro COPY2x8 + //prfm PLDL1KEEP, [A01, #A_PREFETCH] + //prfm PLDL1KEEP, [A02, #A_PREFETCH] + //prfm PLDL1KEEP, [A03, #A_PREFETCH] + //prfm PLDL1KEEP, [A04, #A_PREFETCH] + //prfm PLDL1KEEP, [A05, #A_PREFETCH] + //prfm PLDL1KEEP, [A06, #A_PREFETCH] + //prfm PLDL1KEEP, [A07, #A_PREFETCH] + //prfm PLDL1KEEP, [A08, #A_PREFETCH] + + ldr d0, [A01] + ldr d1, [A02] + ldr d2, [A03] + ldr d3, [A04] + + add A01, A01, #8 + add A02, A02, #8 + add A03, A03, #8 + add A04, A04, #8 + + stp d0, d1, [B03] + add B03, B03, #16 + stp d2, d3, [B03] + add B03, B03, #16 + + ldr d4, [A05] + ldr d5, [A06] + ldr d6, [A07] + ldr d7, [A08] + + add A05, A05, #8 + add A06, A06, #8 + add A07, A07, #8 + add A08, A08, #8 + + stp d4, d5, [B03] + add B03, B03, #16 + stp d6, d7, [B03] + add B03, B03, #16 + +.endm + +.macro COPY1x8 + //prfm PLDL1KEEP, [A01, #A_PREFETCH] + //prfm PLDL1KEEP, [A02, #A_PREFETCH] + //prfm PLDL1KEEP, [A03, #A_PREFETCH] + //prfm PLDL1KEEP, [A04, #A_PREFETCH] + //prfm PLDL1KEEP, [A05, #A_PREFETCH] + //prfm PLDL1KEEP, [A06, #A_PREFETCH] + //prfm PLDL1KEEP, [A07, #A_PREFETCH] + //prfm PLDL1KEEP, [A08, #A_PREFETCH] + + ldr s0, [A01] + ldr s1, [A02] + ldr s2, [A03] + ldr s3, [A04] + + add A01, A01, #4 + add A02, A02, #4 + add A03, A03, #4 + add A04, A04, #4 + + stp s0, s1, [B04] + add B04, B04, #8 + stp s2, s3, [B04] + add B04, B04, #8 + + ldr s4, [A05] + ldr s5, [A06] + ldr s6, [A07] + ldr s7, [A08] + + ldr d4, [A05], #8 + ldr d5, [A06], #8 + ldr d6, [A07], #8 + ldr d7, [A08], #8 + + stp s4, s5, [B04] + add B04, B04, #8 + stp s6, s7, [B04] + add B04, B04, #8 + +.endm + +/*************************************************************************************************************************/ +.macro COPY16x4 + prfm PLDL1KEEP, [A01, #A_PREFETCH] + prfm PLDL1KEEP, [A02, #A_PREFETCH] + prfm PLDL1KEEP, [A03, #A_PREFETCH] + prfm PLDL1KEEP, [A04, #A_PREFETCH] + + ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [A01] + add A01, A01, #64 + + st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [B00] + add TEMP1, B00, #64 + + ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [A02] + add A02, A02, #64 + + st1 {v4.4s, v5.4s, v6.4s, v7.4s}, [TEMP1] + add TEMP1, TEMP1, #64 + + ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [A03] + add A03, A03, #64 + + st1 {v8.4s, v9.4s, v10.4s, v11.4s}, [TEMP1] + add TEMP1, TEMP1, #64 + + ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [A04] + add A04, A04, #64 + + st1 {v12.4s, v13.4s, v14.4s, v15.4s}, [TEMP1] + + add B00, B00, M8 +.endm + +.macro COPY8x4 + prfm PLDL1KEEP, [A01, #A_PREFETCH] + prfm PLDL1KEEP, [A02, #A_PREFETCH] + prfm PLDL1KEEP, [A03, #A_PREFETCH] + prfm PLDL1KEEP, [A04, #A_PREFETCH] + + ldp q0, q1, [A01] + ldp q2, q3, [A02] + add A01, A01, #32 + add A02, A02, #32 + + st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [B01] + add B01, B01, #64 + + ldp q4, q5, [A03] + ldp q6, q7, [A04] + add A03, A03, #32 + add A04, A04, #32 + + st1 {v4.4s, v5.4s, v6.4s, v7.4s}, [B01] + add B01, B01, #64 +.endm + +.macro COPY4x4 + //prfm PLDL1KEEP, [A01, #A_PREFETCH] + //prfm PLDL1KEEP, [A02, #A_PREFETCH] + //prfm PLDL1KEEP, [A03, #A_PREFETCH] + //prfm PLDL1KEEP, [A04, #A_PREFETCH] + + ldr q0, [A01] + ldr q1, [A02] + ldr q2, [A03] + ldr q3, [A04] + add A01, A01, #16 + add A02, A02, #16 + add A03, A03, #16 + add A04, A04, #16 + + st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [B02] + + add B02, B02, #64 +.endm + +.macro COPY2x4 + //prfm PLDL1KEEP, [A01, #A_PREFETCH] + //prfm PLDL1KEEP, [A02, #A_PREFETCH] + //prfm PLDL1KEEP, [A03, #A_PREFETCH] + //prfm PLDL1KEEP, [A04, #A_PREFETCH] + + ldr d0, [A01] + ldr d1, [A02] + ldr d2, [A03] + ldr d3, [A04] + + add A01, A01, #8 + add A02, A02, #8 + add A03, A03, #8 + add A04, A04, #8 + + stp d0, d1, [B03] + add B03, B03, #16 + stp d2, d3, [B03] + + add B03, B03, #16 +.endm + +.macro COPY1x4 + //prfm PLDL1KEEP, [A01, #A_PREFETCH] + //prfm PLDL1KEEP, [A02, #A_PREFETCH] + //prfm PLDL1KEEP, [A03, #A_PREFETCH] + //prfm PLDL1KEEP, [A04, #A_PREFETCH] + + ldr s0, [A01] + ldr s1, [A02] + ldr s2, [A03] + ldr s3, [A04] + + add A01, A01, #4 + add A02, A02, #4 + add A03, A03, #4 + add A04, A04, #4 + + stp s0, s1, [B04] + add B04, B04, #8 + stp s2, s3, [B04] + add B04, B04, #8 + +.endm + +/*************************************************************************************************************************/ + +.macro COPY16x2 + prfm PLDL1KEEP, [A01, #A_PREFETCH] + prfm PLDL1KEEP, [A02, #A_PREFETCH] + + ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [A01] + add A01, A01, #64 + + ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [A02] + add A02, A02, #64 + + st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [B00] + add TEMP1, B00, #64 + st1 {v4.4s, v5.4s, v6.4s, v7.4s}, [TEMP1] + add B00, B00, M8 +.endm + +.macro COPY8x2 + prfm PLDL1KEEP, [A01, #A_PREFETCH] + prfm PLDL1KEEP, [A02, #A_PREFETCH] + + ld1 {v0.4s, v1.4s}, [A01] + ld1 {v2.4s, v3.4s}, [A02] + add A01, A01, #32 + add A02, A02, #32 + + st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [B01] + add B01, B01, #64 +.endm + +.macro COPY4x2 + //prfm PLDL1KEEP, [A01, #A_PREFETCH] + //prfm PLDL1KEEP, [A02, #A_PREFETCH] + + ldr q0, [A01] + ldr q1, [A02] + add A01, A01, #16 + add A02, A02, #16 + + stp q0, q1, [B02] + add B02, B02, #32 +.endm + +.macro COPY2x2 + //prfm PLDL1KEEP, [A01, #A_PREFETCH] + //prfm PLDL1KEEP, [A02, #A_PREFETCH] + + ldr d0, [A01] + ldr d1, [A02] + + add A01, A01, #8 + add A02, A02, #8 + + stp d0, d1, [B03] + add B03, B03, #16 +.endm + +.macro COPY1x2 + //prfm PLDL1KEEP, [A01, #A_PREFETCH] + //prfm PLDL1KEEP, [A02, #A_PREFETCH] + + ldr s0, [A01] + ldr s1, [A02] + + add A01, A01, #4 + add A02, A02, #4 + + stp s0, s1, [B04] + + add B04, B04, #8 +.endm + +/*************************************************************************************************************************/ + +.macro COPY16x1 + prfm PLDL1KEEP, [A01, #A_PREFETCH] + + ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [A01] + add A01, A01, #64 + + st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [B00] + add B00, B00, M8 +.endm + +.macro COPY8x1 + prfm PLDL1KEEP, [A01, #A_PREFETCH] + + ldp q0, q1, [A01] + add A01, A01, #32 + stp q0, q1, [B01] + + add B01, B01, #32 +.endm + +.macro COPY4x1 + //prfm PLDL1KEEP, [A01, #A_PREFETCH] + + ldr q0, [A01] + add A01, A01, #16 + str q0, [B02] + + add B02, B02, #16 +.endm + +.macro COPY2x1 + //prfm PLDL1KEEP, [A01, #A_PREFETCH] + + ldr d0, [A01] + add A01, A01, #8 + str d0, [B03] + + add B03, B03, #8 +.endm + +.macro COPY1x1 + //prfm PLDL1KEEP, [A01, #A_PREFETCH] + + ldr s0, [A01] + add A01, A01, #4 + str s0, [B04] + + add B04, B04, #4 +.endm + +/************************************************************************************** +* End of macro definitions +**************************************************************************************/ + + PROLOGUE + + .align 5 + + SAVE_REGS + + lsl LDA, LDA, #2 // LDA = LDA * SIZE + + lsl TEMP1, M, #2 // TEMP1 = M * SIZE + + and B01 , N , #-16 + and B02 , N , #-8 + and B03 , N , #-4 + and B04 , N , #-2 + + mul B01, B01, TEMP1 + mul B02, B02, TEMP1 + mul B03, B03, TEMP1 + mul B04, B04, TEMP1 + + add B01 , B01, B + add B02 , B02, B + add B03 , B03, B + add B04 , B04, B + + lsl M8, M, #6 // M8 = M * 16 * SIZE + +.Lsgemm_tcopy_L8_BEGIN: + asr J, M, #3 // J = M / 8 + cmp J, #0 + ble .Lsgemm_tcopy_L4_BEGIN + + .align 5 +.Lsgemm_tcopy_L8_M16_BEGIN: + + mov A01, A + add A02, A01, LDA + add A03, A02, LDA + add A04, A03, LDA + add A05, A04, LDA + add A06, A05, LDA + add A07, A06, LDA + add A08, A07, LDA + add A, A08, LDA + + mov B00, B + add B, B00, #512 // B = B + 8 * 16 * SIZE + + asr I, N, #4 // I = N / 16 + cmp I, #0 + ble .Lsgemm_tcopy_L8_M16_40 + + .align 5 +.Lsgemm_tcopy_L8_M16_20: + + COPY16x8 + + subs I , I , #1 + bne .Lsgemm_tcopy_L8_M16_20 + +.Lsgemm_tcopy_L8_M16_40: + tst N , #8 + ble .Lsgemm_tcopy_L8_M16_60 + + COPY8x8 + +.Lsgemm_tcopy_L8_M16_60: + tst N , #4 + ble .Lsgemm_tcopy_L8_M16_80 + + COPY4x8 + +.Lsgemm_tcopy_L8_M16_80: + + tst N , #2 + ble .Lsgemm_tcopy_L8_M16_100 + + COPY2x8 + +.Lsgemm_tcopy_L8_M16_100: + + tst N, #1 + ble .Lsgemm_tcopy_L8_M16_END + + COPY1x8 + +.Lsgemm_tcopy_L8_M16_END: + + subs J , J, #1 // j-- + bne .Lsgemm_tcopy_L8_M16_BEGIN + +/*********************************************************************************************/ + +.Lsgemm_tcopy_L4_BEGIN: + tst M, #7 + ble .Lsgemm_tcopy_L999 + + tst M, #4 + ble .Lsgemm_tcopy_L2_BEGIN + +.Lsgemm_tcopy_L4_M16_BEGIN: + + mov A01, A + add A02, A01, LDA + add A03, A02, LDA + add A04, A03, LDA + add A, A04, LDA + + mov B00, B + add B, B00, #256 // B = B + 4 * 16 * SIZE + + asr I, N, #4 // I = N / 16 + cmp I, #0 + ble .Lsgemm_tcopy_L4_M16_40 + + .align 5 +.Lsgemm_tcopy_L4_M16_20: + + COPY16x4 + + subs I , I , #1 + bne .Lsgemm_tcopy_L4_M16_20 + +.Lsgemm_tcopy_L4_M16_40: + tst N , #8 + ble .Lsgemm_tcopy_L4_M16_60 + + COPY8x4 + +.Lsgemm_tcopy_L4_M16_60: + tst N , #4 + ble .Lsgemm_tcopy_L4_M16_80 + + COPY4x4 + +.Lsgemm_tcopy_L4_M16_80: + + tst N , #2 + ble .Lsgemm_tcopy_L4_M16_100 + + COPY2x4 + + +.Lsgemm_tcopy_L4_M16_100: + + tst N, #1 + ble .Lsgemm_tcopy_L4_M16_END + + COPY1x4 + + +.Lsgemm_tcopy_L4_M16_END: + +/*********************************************************************************************/ + +.Lsgemm_tcopy_L2_BEGIN: + + tst M, #3 + ble .Lsgemm_tcopy_L999 + + tst M, #2 + ble .Lsgemm_tcopy_L1_BEGIN + +.Lsgemm_tcopy_L2_M16_BEGIN: + mov A01, A + add A02, A01, LDA + add A, A02, LDA + + mov B00, B + add B, B00, #128 // B = B + 2 * 16 * SIZE + + asr I, N, #4 // I = N / 16 + cmp I, #0 + ble .Lsgemm_tcopy_L2_M16_40 + + .align 5 +.Lsgemm_tcopy_L2_M16_20: + + COPY16x2 + + subs I , I , #1 + bne .Lsgemm_tcopy_L2_M16_20 + +.Lsgemm_tcopy_L2_M16_40: + tst N , #8 + ble .Lsgemm_tcopy_L2_M16_60 + + COPY8x2 + +.Lsgemm_tcopy_L2_M16_60: + tst N , #4 + ble .Lsgemm_tcopy_L2_M16_80 + + COPY4x2 + +.Lsgemm_tcopy_L2_M16_80: + + tst N , #2 + ble .Lsgemm_tcopy_L2_M16_100 + + COPY2x2 + +.Lsgemm_tcopy_L2_M16_100: + + tst N , #1 + ble .Lsgemm_tcopy_L2_M16_END + + COPY1x2 + +.Lsgemm_tcopy_L2_M16_END: + +/*********************************************************************************************/ + +.Lsgemm_tcopy_L1_BEGIN: + + tst M, #1 + ble .Lsgemm_tcopy_L999 + + +.Lsgemm_tcopy_L1_M16_BEGIN: + + mov A01, A // A01 = A + mov B00, B + + asr I, N, #4 // I = M / 16 + cmp I, #0 + ble .Lsgemm_tcopy_L1_M16_40 + + .align 5 +.Lsgemm_tcopy_L1_M16_20: + + COPY16x1 + + subs I , I , #1 + bne .Lsgemm_tcopy_L1_M16_20 + +.Lsgemm_tcopy_L1_M16_40: + tst N , #8 + ble .Lsgemm_tcopy_L1_M16_60 + + COPY8x1 + +.Lsgemm_tcopy_L1_M16_60: + tst N , #4 + ble .Lsgemm_tcopy_L1_M16_80 + + COPY4x1 + +.Lsgemm_tcopy_L1_M16_80: + + tst N , #2 + ble .Lsgemm_tcopy_L1_M16_100 + + COPY2x1 + +.Lsgemm_tcopy_L1_M16_100: + + tst N , #1 + ble .Lsgemm_tcopy_L1_M16_END + + COPY1x1 + + +.Lsgemm_tcopy_L1_M16_END: + +.Lsgemm_tcopy_L999: + mov x0, #0 // set return value + RESTORE_REGS + ret + + EPILOGUE + + diff --git a/kernel/arm64/sgemm_tcopy_8.S b/kernel/arm64/sgemm_tcopy_8.S new file mode 100644 index 000000000..7d81ba266 --- /dev/null +++ b/kernel/arm64/sgemm_tcopy_8.S @@ -0,0 +1,707 @@ +/*************************************************************************** +Copyright (c) 2016, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A00 PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ +#define ASSEMBLER +#include "common.h" + +#define M x0 +#define N x1 +#define A x2 +#define LDA x3 +#define B x4 + +#define M8 x5 + +#define A01 x6 +#define A02 x7 +#define A03 x8 +#define A04 x9 +#define A05 x10 +#define A06 x11 +#define A07 x12 +#define A08 x13 + +#define B01 x14 +#define B02 x15 +#define B03 x16 +#define B04 x17 +#define B00 x22 + + +#define I x18 +#define J x19 + +#define TEMP1 x20 + +#define A_PREFETCH 256 + +/************************************************************************************** +* Macro definitions +**************************************************************************************/ +.macro SAVE_REGS + add sp, sp, #-(11 * 16) + stp d8, d9, [sp, #(0 * 16)] + stp d10, d11, [sp, #(1 * 16)] + stp d12, d13, [sp, #(2 * 16)] + stp d14, d15, [sp, #(3 * 16)] + stp d16, d17, [sp, #(4 * 16)] + stp x18, x19, [sp, #(5 * 16)] + stp x20, x21, [sp, #(6 * 16)] + stp x22, x23, [sp, #(7 * 16)] + stp x24, x25, [sp, #(8 * 16)] + stp x26, x27, [sp, #(9 * 16)] + str x28, [sp, #(10 * 16)] +.endm + +.macro RESTORE_REGS + ldp d8, d9, [sp, #(0 * 16)] + ldp d10, d11, [sp, #(1 * 16)] + ldp d12, d13, [sp, #(2 * 16)] + ldp d14, d15, [sp, #(3 * 16)] + ldp d16, d17, [sp, #(4 * 16)] + ldp x18, x19, [sp, #(5 * 16)] + ldp x20, x21, [sp, #(6 * 16)] + ldp x22, x23, [sp, #(7 * 16)] + ldp x24, x25, [sp, #(8 * 16)] + ldp x26, x27, [sp, #(9 * 16)] + ldr x28, [sp, #(10 * 16)] + add sp, sp, #(11*16) +.endm + +/*************************************************************************************************************************/ + +.macro COPY8x8 + prfm PLDL1KEEP, [A01, #A_PREFETCH] + prfm PLDL1KEEP, [A02, #A_PREFETCH] + prfm PLDL1KEEP, [A03, #A_PREFETCH] + prfm PLDL1KEEP, [A04, #A_PREFETCH] + prfm PLDL1KEEP, [A05, #A_PREFETCH] + prfm PLDL1KEEP, [A06, #A_PREFETCH] + prfm PLDL1KEEP, [A07, #A_PREFETCH] + prfm PLDL1KEEP, [A08, #A_PREFETCH] + + ldp q0, q1, [A01] + ldp q2, q3, [A02] + add A01, A01, #32 + add A02, A02, #32 + + st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [B00] + add TEMP1, B00, #64 + + ldp q4, q5, [A03] + ldp q6, q7, [A04] + add A03, A03, #32 + add A04, A04, #32 + + st1 {v4.4s, v5.4s, v6.4s, v7.4s}, [TEMP1] + add TEMP1, TEMP1, #64 + + ldp q8, q9, [A05] + ldp q10, q11, [A06] + add A05, A05, #32 + add A06, A06, #32 + + st1 {v8.4s, v9.4s, v10.4s, v11.4s}, [TEMP1] + add TEMP1, TEMP1, #64 + + ldp q12, q13, [A07] + ldp q14, q15, [A08] + add A07, A07, #32 + add A08, A08, #32 + + st1 {v12.4s, v13.4s, v14.4s, v15.4s}, [TEMP1] + add TEMP1, TEMP1, #64 + + add B00, B00, M8 +.endm + +.macro COPY4x8 + //prfm PLDL1KEEP, [A01, #A_PREFETCH] + //prfm PLDL1KEEP, [A02, #A_PREFETCH] + //prfm PLDL1KEEP, [A03, #A_PREFETCH] + //prfm PLDL1KEEP, [A04, #A_PREFETCH] + //prfm PLDL1KEEP, [A05, #A_PREFETCH] + //prfm PLDL1KEEP, [A06, #A_PREFETCH] + //prfm PLDL1KEEP, [A07, #A_PREFETCH] + //prfm PLDL1KEEP, [A08, #A_PREFETCH] + + ldr q0, [A01] + ldr q1, [A02] + ldr q2, [A03] + ldr q3, [A04] + add A01, A01, #16 + add A02, A02, #16 + add A03, A03, #16 + add A04, A04, #16 + + st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [B01] + add B01, B01, #64 + + ldr q4, [A05] + ldr q5, [A06] + ldr q6, [A07] + ldr q7, [A08] + + add A05, A05, #16 + add A06, A06, #16 + add A07, A07, #16 + add A08, A08, #16 + + st1 {v4.4s, v5.4s, v6.4s, v7.4s}, [B01] + add B01, B01, #64 +.endm + +.macro COPY2x8 + //prfm PLDL1KEEP, [A01, #A_PREFETCH] + //prfm PLDL1KEEP, [A02, #A_PREFETCH] + //prfm PLDL1KEEP, [A03, #A_PREFETCH] + //prfm PLDL1KEEP, [A04, #A_PREFETCH] + //prfm PLDL1KEEP, [A05, #A_PREFETCH] + //prfm PLDL1KEEP, [A06, #A_PREFETCH] + //prfm PLDL1KEEP, [A07, #A_PREFETCH] + //prfm PLDL1KEEP, [A08, #A_PREFETCH] + + ldr d0, [A01] + ldr d1, [A02] + ldr d2, [A03] + ldr d3, [A04] + + add A01, A01, #8 + add A02, A02, #8 + add A03, A03, #8 + add A04, A04, #8 + + stp d0, d1, [B02] + add B02, B02, #16 + stp d2, d3, [B02] + add B02, B02, #16 + + ldr d4, [A05] + ldr d5, [A06] + ldr d6, [A07] + ldr d7, [A08] + + add A05, A05, #8 + add A06, A06, #8 + add A07, A07, #8 + add A08, A08, #8 + + stp d4, d5, [B02] + add B02, B02, #16 + stp d6, d7, [B02] + add B02, B02, #16 + +.endm + +.macro COPY1x8 + //prfm PLDL1KEEP, [A01, #A_PREFETCH] + //prfm PLDL1KEEP, [A02, #A_PREFETCH] + //prfm PLDL1KEEP, [A03, #A_PREFETCH] + //prfm PLDL1KEEP, [A04, #A_PREFETCH] + //prfm PLDL1KEEP, [A05, #A_PREFETCH] + //prfm PLDL1KEEP, [A06, #A_PREFETCH] + //prfm PLDL1KEEP, [A07, #A_PREFETCH] + //prfm PLDL1KEEP, [A08, #A_PREFETCH] + + ldr s0, [A01] + ldr s1, [A02] + ldr s2, [A03] + ldr s3, [A04] + + add A01, A01, #4 + add A02, A02, #4 + add A03, A03, #4 + add A04, A04, #4 + + stp s0, s1, [B03] + add B03, B03, #8 + stp s2, s3, [B03] + add B03, B03, #8 + + ldr s4, [A05] + ldr s5, [A06] + ldr s6, [A07] + ldr s7, [A08] + + ldr d4, [A05], #8 + ldr d5, [A06], #8 + ldr d6, [A07], #8 + ldr d7, [A08], #8 + + stp s4, s5, [B03] + add B03, B03, #8 + stp s6, s7, [B03] + add B03, B03, #8 + +.endm + +/*************************************************************************************************************************/ + +.macro COPY8x4 + prfm PLDL1KEEP, [A01, #A_PREFETCH] + prfm PLDL1KEEP, [A02, #A_PREFETCH] + prfm PLDL1KEEP, [A03, #A_PREFETCH] + prfm PLDL1KEEP, [A04, #A_PREFETCH] + + ldp q0, q1, [A01] + ldp q2, q3, [A02] + add A01, A01, #32 + add A02, A02, #32 + + st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [B00] + add TEMP1, B00, #64 + + ldp q4, q5, [A03] + ldp q6, q7, [A04] + add A03, A03, #32 + add A04, A04, #32 + + st1 {v4.4s, v5.4s, v6.4s, v7.4s}, [TEMP1] + add TEMP1, TEMP1, #64 + + add B00, B00, M8 +.endm + +.macro COPY4x4 + //prfm PLDL1KEEP, [A01, #A_PREFETCH] + //prfm PLDL1KEEP, [A02, #A_PREFETCH] + //prfm PLDL1KEEP, [A03, #A_PREFETCH] + //prfm PLDL1KEEP, [A04, #A_PREFETCH] + + ldr q0, [A01] + ldr q1, [A02] + ldr q2, [A03] + ldr q3, [A04] + add A01, A01, #16 + add A02, A02, #16 + add A03, A03, #16 + add A04, A04, #16 + + st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [B01] + + add B01, B01, #64 +.endm + +.macro COPY2x4 + //prfm PLDL1KEEP, [A01, #A_PREFETCH] + //prfm PLDL1KEEP, [A02, #A_PREFETCH] + //prfm PLDL1KEEP, [A03, #A_PREFETCH] + //prfm PLDL1KEEP, [A04, #A_PREFETCH] + + ldr d0, [A01] + ldr d1, [A02] + ldr d2, [A03] + ldr d3, [A04] + + add A01, A01, #8 + add A02, A02, #8 + add A03, A03, #8 + add A04, A04, #8 + + stp d0, d1, [B02] + add B02, B02, #16 + stp d2, d3, [B02] + + add B02, B02, #16 +.endm + +.macro COPY1x4 + //prfm PLDL1KEEP, [A01, #A_PREFETCH] + //prfm PLDL1KEEP, [A02, #A_PREFETCH] + //prfm PLDL1KEEP, [A03, #A_PREFETCH] + //prfm PLDL1KEEP, [A04, #A_PREFETCH] + + ldr s0, [A01] + ldr s1, [A02] + ldr s2, [A03] + ldr s3, [A04] + + add A01, A01, #4 + add A02, A02, #4 + add A03, A03, #4 + add A04, A04, #4 + + stp s0, s1, [B03] + add B03, B03, #8 + stp s2, s3, [B03] + add B03, B03, #8 + +.endm + +/*************************************************************************************************************************/ + +.macro COPY8x2 + prfm PLDL1KEEP, [A01, #A_PREFETCH] + prfm PLDL1KEEP, [A02, #A_PREFETCH] + + ld1 {v0.4s, v1.4s}, [A01] + ld1 {v2.4s, v3.4s}, [A02] + add A01, A01, #32 + add A02, A02, #32 + + st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [B00] + add B00, B00, M8 +.endm + +.macro COPY4x2 + //prfm PLDL1KEEP, [A01, #A_PREFETCH] + //prfm PLDL1KEEP, [A02, #A_PREFETCH] + + ldr q0, [A01] + ldr q1, [A02] + add A01, A01, #16 + add A02, A02, #16 + + stp q0, q1, [B01] + add B01, B01, #32 +.endm + +.macro COPY2x2 + //prfm PLDL1KEEP, [A01, #A_PREFETCH] + //prfm PLDL1KEEP, [A02, #A_PREFETCH] + + ldr d0, [A01] + ldr d1, [A02] + + add A01, A01, #8 + add A02, A02, #8 + + stp d0, d1, [B02] + add B02, B02, #16 +.endm + +.macro COPY1x2 + //prfm PLDL1KEEP, [A01, #A_PREFETCH] + //prfm PLDL1KEEP, [A02, #A_PREFETCH] + + ldr s0, [A01] + ldr s1, [A02] + + add A01, A01, #4 + add A02, A02, #4 + + stp s0, s1, [B03] + + add B03, B03, #8 +.endm + +/*************************************************************************************************************************/ + +.macro COPY8x1 + prfm PLDL1KEEP, [A01, #A_PREFETCH] + + ldp q0, q1, [A01] + add A01, A01, #32 + stp q0, q1, [B00] + + add B00, B00, M8 +.endm + +.macro COPY4x1 + //prfm PLDL1KEEP, [A01, #A_PREFETCH] + + ldr q0, [A01] + add A01, A01, #16 + str q0, [B01] + + add B01, B01, #16 +.endm + +.macro COPY2x1 + //prfm PLDL1KEEP, [A01, #A_PREFETCH] + + ldr d0, [A01] + add A01, A01, #8 + str d0, [B02] + + add B02, B02, #8 +.endm + +.macro COPY1x1 + //prfm PLDL1KEEP, [A01, #A_PREFETCH] + + ldr s0, [A01] + add A01, A01, #4 + str s0, [B03] + + add B03, B03, #4 +.endm + +/************************************************************************************** +* End of macro definitions +**************************************************************************************/ + + PROLOGUE + + .align 5 + + SAVE_REGS + + lsl LDA, LDA, #2 // LDA = LDA * SIZE + + lsl TEMP1, M, #2 // TEMP1 = M * SIZE + + and B01 , N , #-8 + and B02 , N , #-4 + and B03 , N , #-2 + + mul B01, B01, TEMP1 + mul B02, B02, TEMP1 + mul B03, B03, TEMP1 + + add B01 , B01, B + add B02 , B02, B + add B03 , B03, B + + lsl M8, M, #5 // M8 = M * 8 * SIZE + +.Lsgemm_tcopy_L8_BEGIN: + + asr J, M, #3 // J = M / 8 + cmp J, #0 + ble .Lsgemm_tcopy_L4_BEGIN + + .align 5 +.Lsgemm_tcopy_L8_M8_BEGIN: + + mov A01, A + add A02, A01, LDA + add A03, A02, LDA + add A04, A03, LDA + add A05, A04, LDA + add A06, A05, LDA + add A07, A06, LDA + add A08, A07, LDA + add A, A08, LDA + + mov B00, B + add B, B00, #256 // B = B + 8 * 8 * SIZE + + asr I, N, #3 // I = N / 8 + cmp I, #0 + ble .Lsgemm_tcopy_L8_M8_40 + + .align 5 +.Lsgemm_tcopy_L8_M8_20: + + COPY8x8 + + subs I , I , #1 + bne .Lsgemm_tcopy_L8_M8_20 + +.Lsgemm_tcopy_L8_M8_40: + + tst N , #4 + ble .Lsgemm_tcopy_L8_M8_60 + + COPY4x8 + +.Lsgemm_tcopy_L8_M8_60: + + tst N , #2 + ble .Lsgemm_tcopy_L8_M8_80 + + COPY2x8 + +.Lsgemm_tcopy_L8_M8_80: + + tst N, #1 + ble .Lsgemm_tcopy_L8_M8_END + + COPY1x8 + +.Lsgemm_tcopy_L8_M8_END: + + subs J, J, #1 // j-- + bne .Lsgemm_tcopy_L8_M8_BEGIN + +/*********************************************************************************************/ + +.Lsgemm_tcopy_L4_BEGIN: + + tst M, #7 + ble .Lsgemm_tcopy_L999 + + tst M, #4 + ble .Lsgemm_tcopy_L2_BEGIN + +.Lsgemm_tcopy_L4_M8_BEGIN: + + mov A01, A + add A02, A01, LDA + add A03, A02, LDA + add A04, A03, LDA + add A, A04, LDA + + mov B00, B + add B, B00, #128 // B = B + 4 * 8 * SIZE + + asr I, N, #3 // I = N / 8 + cmp I, #0 + ble .Lsgemm_tcopy_L4_M8_40 + + .align 5 +.Lsgemm_tcopy_L4_M8_20: + + COPY8x4 + + subs I , I , #1 + bne .Lsgemm_tcopy_L4_M8_20 + +.Lsgemm_tcopy_L4_M8_40: + + tst N , #4 + ble .Lsgemm_tcopy_L4_M8_60 + + COPY4x4 + +.Lsgemm_tcopy_L4_M8_60: + + tst N , #2 + ble .Lsgemm_tcopy_L4_M8_80 + + COPY2x4 + +.Lsgemm_tcopy_L4_M8_80: + + tst N , #1 + ble .Lsgemm_tcopy_L4_M8_END + + COPY1x4 + + +.Lsgemm_tcopy_L4_M8_END: + +/*********************************************************************************************/ + +.Lsgemm_tcopy_L2_BEGIN: + + tst M, #3 + ble .Lsgemm_tcopy_L999 + + tst M, #2 + ble .Lsgemm_tcopy_L1_BEGIN + +.Lsgemm_tcopy_L2_M16_BEGIN: + + mov A01, A + add A02, A01, LDA + add A, A02, LDA + + mov B00, B + add B, B00, #64 // B = B + 2 * 8 * SIZE + + asr I, N, #3 // I = N / 8 + cmp I, #0 + ble .Lsgemm_tcopy_L2_M8_40 + + .align 5 +.Lsgemm_tcopy_L2_M8_20: + + COPY8x2 + + subs I , I , #1 + bne .Lsgemm_tcopy_L2_M8_20 + +.Lsgemm_tcopy_L2_M8_40: + + tst N , #4 + ble .Lsgemm_tcopy_L2_M8_60 + + COPY4x2 + +.Lsgemm_tcopy_L2_M8_60: + + tst N , #2 + ble .Lsgemm_tcopy_L2_M8_80 + + COPY2x2 + +.Lsgemm_tcopy_L2_M8_80: + + tst N , #1 + ble .Lsgemm_tcopy_L2_M8_END + + COPY1x2 + +.Lsgemm_tcopy_L2_M8_END: + +/*********************************************************************************************/ + +.Lsgemm_tcopy_L1_BEGIN: + + tst M, #1 + ble .Lsgemm_tcopy_L999 + + +.Lsgemm_tcopy_L1_M16_BEGIN: + + mov A01, A // A01 = A + mov B00, B + + asr I, N, #3 // I = M / 8 + cmp I, #0 + ble .Lsgemm_tcopy_L1_M8_40 + + .align 5 +.Lsgemm_tcopy_L1_M8_20: + + COPY8x1 + + subs I , I , #1 + bne .Lsgemm_tcopy_L1_M8_20 + +.Lsgemm_tcopy_L1_M8_40: + + tst N , #4 + ble .Lsgemm_tcopy_L1_M8_60 + + COPY4x1 + +.Lsgemm_tcopy_L1_M8_60: + + tst N , #2 + ble .Lsgemm_tcopy_L1_M8_80 + + COPY2x1 + +.Lsgemm_tcopy_L1_M8_80: + + tst N , #1 + ble .Lsgemm_tcopy_L1_M8_END + + COPY1x1 + + +.Lsgemm_tcopy_L1_M8_END: + +.Lsgemm_tcopy_L999: + + mov x0, #0 // set return value + RESTORE_REGS + ret + + EPILOGUE diff --git a/kernel/arm64/strmm_kernel_8x8_cortexa53.S b/kernel/arm64/strmm_kernel_8x8_cortexa53.S new file mode 100644 index 000000000..4b84623f3 --- /dev/null +++ b/kernel/arm64/strmm_kernel_8x8_cortexa53.S @@ -0,0 +1,2823 @@ +/******************************************************************************* +Copyright (c) 2015, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*******************************************************************************/ +#define ASSEMBLER +#include "common.h" + +/* X0 X1 X2 s0 X3 x4 x5 x6 x7 */ +/*int CNAME(BLASLONG bm,BLASLONG bn,BLASLONG bk,FLOAT alpha,FLOAT* ba,FLOAT* bb,FLOAT* C,BLASLONG ldc, BLASLONG offset) */ + +#define origM x0 +#define origN x1 +#define origK x2 +#define origPA x3 +#define origPB x4 +#define pC x5 +#define LDC x6 +#define offset x7 +#define counterL x8 +#define counterI x9 +#define counterJ x10 +#define pB x11 +#define pCRow0 x12 +#define pCRow1 x13 +#define pCRow2 x14 +#define pA x15 +#define temp x16 +#define tempOffset x17 +#define tempK x18 + +#define alpha0 s10 +#define alphaV0 v10.s[0] +#define alpha1 s11 +#define alphaV1 v11.s[0] +#define alpha2 s14 +#define alphaV2 v14.s[0] +#define alpha3 s15 +#define alphaV3 v15.s[0] + +// 00 origM +// 01 origN +// 02 origK +// 03 origPA +// 04 origPB +// 05 pC +// 06 origLDC -> LDC +// 07 offset +// 08 counterL +// 09 counterI +// 10 counterJ +// 11 pB +// 12 pCRow0 +// 13 pCRow1 +// 14 pCRow2 +// 15 pA +// 16 temp +// 17 tempOffset +// 18 must save tempK +// 19 must save +// 20 must save pA0_2, pA0_3 +// 21 must save pA0_6, pA0_7 +// 22 must save pA1_2, pA1_3 +// 23 must save pA1_6, pA1_7 +// 24 must save pB0_2, pB0_3 +// 25 must save pB0_6, pB0_7 +// 26 must save pB1_2, pB1_3 +// 27 must save pB1_6, pB1_7 +// 28 must save +// 29 frame +// 30 link +// 31 sp + +//v00 ALPHA -> pA0_0, pA0_1, pA0_2, pA0_3 +//v01 pA0_4, pA0_5, pA0_6, pA0_7 +//v02 pA1_0, pA1_1, pA1_2, pA1_3 +//v03 pA1_4, pA1_5, pA1_6, pA1_7 +//v04 pB0_0, pB0_1, pB0_2, pB0_3 +//v05 pB0_4, pB0_5, pB0_6, pB0_7 +//v06 pB1_0, pB1_1, pB1_2, pB1_3 +//v07 pB1_4, pB1_5, pB1_6, pB1_7 +//v08 must save +//v09 must save +//v10 must save ALPHA0 +//v11 must save ALPHA1 +//v12 must save +//v13 must save +//v14 must save ALPHA2 +//v15 must save ALPHA3 +//v16 must save C00, C01, C02, C03 +//v17 must save C04, C05, C06, C07 +//v18 C08, C09, C10, C11 +//v19 C12, C13, C14, C15 +//v20 C16, C17, C18, C19 +//v21 C20, C21, C22, C23 +//v22 C24, C25, C26, C27 +//v23 C28, C29, C30, C31 +//v24 C32, C33, C34, C35 +//v25 C36, C37, C38, C39 +//v26 C40, C41, C42, C43 +//v27 C44, C45, C46, C47 +//v28 C48, C49, C50, C51 +//v29 C52, C53, C54, C55 +//v30 C56, C57, C58, C59 +//v31 C60, C61, C62, C63 + +/******************************************************************************* +* Macro definitions +*******************************************************************************/ + +.macro INIT8x8 + fmov s16, wzr + fmov s17, wzr + fmov s18, s16 + fmov s19, s17 + fmov s20, wzr + fmov s21, s16 + fmov s22, s17 + fmov s23, s18 + fmov s24, wzr + fmov s25, s16 + fmov s26, s17 + fmov s27, s18 + fmov s28, wzr + fmov s29, s16 + fmov s30, s17 + fmov s31, s18 +.endm + +.macro KERNEL8x8_I + ld1 {v0.4s, v1.4s}, [pA], #32 + ld1 {v4.4s, v5.4s}, [pB], #32 + ldr d2, [pA], #8 + ldr d6, [pB], #8 + ldr d3, [pA, #8] + ldr d7, [pB, #8] + + ldr x22, [pA], #16 + fmul v16.4s, v0.4s, v4.s[0] + ldr x26, [pB], #16 + fmul v17.4s, v1.4s, v4.s[0] + ldr x23, [pA], #8 + fmul v18.4s, v0.4s, v4.s[1] + ldr x27, [pB], #8 + fmul v19.4s, v1.4s, v4.s[1] + fmul v20.4s, v0.4s, v4.s[2] + fmul v21.4s, v1.4s, v4.s[2] + fmul v22.4s, v0.4s, v4.s[3] + fmul v23.4s, v1.4s, v4.s[3] + fmul v24.4s, v0.4s, v5.s[0] + fmul v25.4s, v1.4s, v5.s[0] + fmul v26.4s, v0.4s, v5.s[1] + fmul v27.4s, v1.4s, v5.s[1] + fmul v28.4s, v0.4s, v5.s[2] + fmul v29.4s, v1.4s, v5.s[2] + fmul v30.4s, v0.4s, v5.s[3] + fmul v31.4s, v1.4s, v5.s[3] +.endm + +.macro KERNEL8x8_M1 + ldr d2, [pA], #8 + fmov v0.d[1], x20 + ldr d6, [pB], #8 + fmov v4.d[1], x24 + ldr d3, [pA, #8] + fmov v1.d[1], x21 + ldr d7, [pB, #8] + fmov v5.d[1], x25 + fmla v16.4s, v0.4s, v4.s[0] + ldr x22, [pA], #16 + fmla v17.4s, v1.4s, v4.s[0] + ldr x26, [pB], #16 + fmla v18.4s, v0.4s, v4.s[1] + ldr x23, [pA], #8 + fmla v19.4s, v1.4s, v4.s[1] + ldr x27, [pB], #8 + fmla v20.4s, v0.4s, v4.s[2] + fmla v21.4s, v1.4s, v4.s[2] + fmla v22.4s, v0.4s, v4.s[3] + fmla v23.4s, v1.4s, v4.s[3] + fmla v24.4s, v0.4s, v5.s[0] + fmla v25.4s, v1.4s, v5.s[0] + fmla v26.4s, v0.4s, v5.s[1] + fmla v27.4s, v1.4s, v5.s[1] + fmla v28.4s, v0.4s, v5.s[2] + fmla v29.4s, v1.4s, v5.s[2] + fmla v30.4s, v0.4s, v5.s[3] + fmla v31.4s, v1.4s, v5.s[3] +.endm + +.macro KERNEL8x8_M2 + ldr d0, [pA], #8 + fmov v2.d[1], x22 + ldr d4, [pB], #8 + fmov v6.d[1], x26 + ldr d1, [pA, #8] + fmov v3.d[1], x23 + ldr d5, [pB, #8] + fmov v7.d[1], x27 + fmla v16.4s, v2.4s, v6.s[0] + ldr x20, [pA], #16 + fmla v17.4s, v3.4s, v6.s[0] + ldr x24, [pB], #16 + fmla v18.4s, v2.4s, v6.s[1] + ldr x21, [pA], #8 + fmla v19.4s, v3.4s, v6.s[1] + ldr x25, [pB], #8 + fmla v20.4s, v2.4s, v6.s[2] + fmla v21.4s, v3.4s, v6.s[2] + fmla v22.4s, v2.4s, v6.s[3] + fmla v23.4s, v3.4s, v6.s[3] + fmla v24.4s, v2.4s, v7.s[0] + fmla v25.4s, v3.4s, v7.s[0] + fmla v26.4s, v2.4s, v7.s[1] + fmla v27.4s, v3.4s, v7.s[1] + fmla v28.4s, v2.4s, v7.s[2] + fmla v29.4s, v3.4s, v7.s[2] + fmla v30.4s, v2.4s, v7.s[3] + fmla v31.4s, v3.4s, v7.s[3] +.endm + +.macro KERNEL8x8_E + fmov v2.d[1], x22 + fmov v6.d[1], x26 + fmov v3.d[1], x23 + fmov v7.d[1], x27 + fmla v16.4s, v2.4s, v6.s[0] + fmla v17.4s, v3.4s, v6.s[0] + fmla v18.4s, v2.4s, v6.s[1] + fmla v19.4s, v3.4s, v6.s[1] + fmla v20.4s, v2.4s, v6.s[2] + fmla v21.4s, v3.4s, v6.s[2] + fmla v22.4s, v2.4s, v6.s[3] + fmla v23.4s, v3.4s, v6.s[3] + fmla v24.4s, v2.4s, v7.s[0] + fmla v25.4s, v3.4s, v7.s[0] + fmla v26.4s, v2.4s, v7.s[1] + fmla v27.4s, v3.4s, v7.s[1] + fmla v28.4s, v2.4s, v7.s[2] + fmla v29.4s, v3.4s, v7.s[2] + fmla v30.4s, v2.4s, v7.s[3] + fmla v31.4s, v3.4s, v7.s[3] +.endm + +.macro KERNEL8x8_SUB + ld1 {v4.4s}, [pB] + add pB, pB, #16 + ld1 {v5.4s}, [pB] + add pB, pB, #16 + ld1 {v0.4s}, [pA] + add pA, pA, #16 + ld1 {v1.4s}, [pA] + add pA, pA, #16 + + fmla v16.4s, v0.4s, v4.s[0] + fmla v17.4s, v1.4s, v4.s[0] + fmla v18.4s, v0.4s, v4.s[1] + fmla v19.4s, v1.4s, v4.s[1] + fmla v20.4s, v0.4s, v4.s[2] + fmla v21.4s, v1.4s, v4.s[2] + fmla v22.4s, v0.4s, v4.s[3] + fmla v23.4s, v1.4s, v4.s[3] + fmla v24.4s, v0.4s, v5.s[0] + fmla v25.4s, v1.4s, v5.s[0] + fmla v26.4s, v0.4s, v5.s[1] + fmla v27.4s, v1.4s, v5.s[1] + fmla v28.4s, v0.4s, v5.s[2] + fmla v29.4s, v1.4s, v5.s[2] + fmla v30.4s, v0.4s, v5.s[3] + fmla v31.4s, v1.4s, v5.s[3] +.endm + +.macro SAVE8x8 + add pCRow1, pCRow0, LDC + + fmul v0.4s, v16.4s, alphaV0 + fmul v1.4s, v17.4s, alphaV1 + st1 {v0.4s, v1.4s}, [pCRow0] + + add pCRow2, pCRow1, LDC + + fmul v2.4s, v18.4s, alphaV2 + fmul v3.4s, v19.4s, alphaV3 + st1 {v2.4s, v3.4s}, [pCRow1] + + add pCRow1, pCRow2, LDC + + fmul v4.4s, v20.4s, alphaV0 + fmul v5.4s, v21.4s, alphaV1 + st1 {v4.4s, v5.4s}, [pCRow2] + + add pCRow2, pCRow1, LDC + + fmul v6.4s, v22.4s, alphaV2 + fmul v7.4s, v23.4s, alphaV3 + st1 {v6.4s, v7.4s}, [pCRow1] + + add pCRow1, pCRow2, LDC + + fmul v0.4s, v24.4s, alphaV0 + fmul v1.4s, v25.4s, alphaV1 + st1 {v0.4s, v1.4s}, [pCRow2] + + add pCRow2, pCRow1, LDC + + fmul v2.4s, v26.4s, alphaV2 + fmul v3.4s, v27.4s, alphaV3 + st1 {v2.4s, v3.4s}, [pCRow1] + + add pCRow1, pCRow2, LDC + + fmul v4.4s, v28.4s, alphaV0 + fmul v5.4s, v29.4s, alphaV1 + st1 {v4.4s, v5.4s}, [pCRow2] + + fmul v6.4s, v30.4s, alphaV2 + fmul v7.4s, v31.4s, alphaV3 + st1 {v6.4s, v7.4s}, [pCRow1] + + add pCRow0, pCRow0, #32 +.endm + +/******************************************************************************/ + + +.macro INIT4x8 + fmov s16, wzr + fmov s18, wzr + fmov s20, wzr + fmov s22, s16 + fmov s24, wzr + fmov s26, s16 + fmov s28, s18 + fmov s30, s20 +.endm + +.macro KERNEL4x8_I + ld1 {v0.4s}, [pA], #16 + ld1 {v4.4s, v5.4s}, [pB], #32 + + ldr d2, [pA], #8 + ldr d6, [pB], #8 + ldr d7, [pB, #8] + ldr x21, [pA], #8 + fmul v16.4s, v0.4s, v4.s[0] + ldr x26, [pB], #16 + fmul v18.4s, v0.4s, v4.s[1] + ldr x27, [pB], #8 + fmul v20.4s, v0.4s, v4.s[2] + fmul v22.4s, v0.4s, v4.s[3] + fmul v24.4s, v0.4s, v5.s[0] + fmul v26.4s, v0.4s, v5.s[1] + fmul v28.4s, v0.4s, v5.s[2] + fmul v30.4s, v0.4s, v5.s[3] +.endm + +.macro KERNEL4x8_M1 + ldr d2, [pA], #8 + fmov v0.d[1], x20 + ldr d6, [pB], #8 + fmov v4.d[1], x24 + ldr d7, [pB, #8] + fmov v5.d[1], x25 + fmla v16.4s, v0.4s, v4.s[0] + ldr x21, [pA], #8 + fmla v18.4s, v0.4s, v4.s[1] + ldr x26, [pB], #16 + fmla v20.4s, v0.4s, v4.s[2] + ldr x27, [pB], #8 + fmla v22.4s, v0.4s, v4.s[3] + fmla v24.4s, v0.4s, v5.s[0] + fmla v26.4s, v0.4s, v5.s[1] + fmla v28.4s, v0.4s, v5.s[2] + fmla v30.4s, v0.4s, v5.s[3] +.endm + +.macro KERNEL4x8_M2 + ldr d0, [pA], #8 + fmov v2.d[1], x21 + ldr d4, [pB], #8 + fmov v6.d[1], x26 + ldr d5, [pB, #8] + fmov v7.d[1], x27 + fmla v16.4s, v2.4s, v6.s[0] + ldr x20, [pA], #8 + fmla v18.4s, v2.4s, v6.s[1] + ldr x24, [pB], #16 + fmla v20.4s, v2.4s, v6.s[2] + ldr x25, [pB], #8 + fmla v22.4s, v2.4s, v6.s[3] + fmla v24.4s, v2.4s, v7.s[0] + fmla v26.4s, v2.4s, v7.s[1] + fmla v28.4s, v2.4s, v7.s[2] + fmla v30.4s, v2.4s, v7.s[3] +.endm + +.macro KERNEL4x8_E + fmov v2.d[1], x21 + fmov v6.d[1], x26 + fmov v7.d[1], x27 + fmla v16.4s, v2.4s, v6.s[0] + fmla v18.4s, v2.4s, v6.s[1] + fmla v20.4s, v2.4s, v6.s[2] + fmla v22.4s, v2.4s, v6.s[3] + fmla v24.4s, v2.4s, v7.s[0] + fmla v26.4s, v2.4s, v7.s[1] + fmla v28.4s, v2.4s, v7.s[2] + fmla v30.4s, v2.4s, v7.s[3] +.endm + +.macro KERNEL4x8_SUB + ld1 {v4.4s}, [pB] + add pB, pB, #16 + ld1 {v5.4s}, [pB] + add pB, pB, #16 + ld1 {v0.4s}, [pA] + add pA, pA, #16 + + fmla v16.4s, v0.4s, v4.s[0] + fmla v18.4s, v0.4s, v4.s[1] + fmla v20.4s, v0.4s, v4.s[2] + fmla v22.4s, v0.4s, v4.s[3] + fmla v24.4s, v0.4s, v5.s[0] + fmla v26.4s, v0.4s, v5.s[1] + fmla v28.4s, v0.4s, v5.s[2] + fmla v30.4s, v0.4s, v5.s[3] +.endm + +.macro SAVE4x8 + add pCRow1, pCRow0, LDC + + + fmul v0.4s, v16.4s, alphaV0 + st1 {v0.4s}, [pCRow0] + + add pCRow2, pCRow1, LDC + + + fmul v2.4s, v18.4s, alphaV2 + st1 {v2.4s}, [pCRow1] + + add pCRow1, pCRow2, LDC + + + fmul v4.4s, v20.4s, alphaV0 + st1 {v4.4s}, [pCRow2] + + add pCRow2, pCRow1, LDC + + + fmul v6.4s, v22.4s, alphaV2 + st1 {v6.4s}, [pCRow1] + + add pCRow1, pCRow2, LDC + + + fmul v0.4s, v24.4s, alphaV0 + st1 {v0.4s}, [pCRow2] + + add pCRow2, pCRow1, LDC + + + fmul v2.4s, v26.4s, alphaV2 + st1 {v2.4s}, [pCRow1] + + add pCRow1, pCRow2, LDC + + + fmul v4.4s, v28.4s, alphaV0 + st1 {v4.4s}, [pCRow2] + + + fmul v6.4s, v30.4s, alphaV2 + st1 {v6.4s}, [pCRow1] + + add pCRow0, pCRow0, #16 +.endm + +/******************************************************************************/ + +.macro INIT2x8 + fmov s16, wzr + fmov s18, wzr + fmov s20, wzr + fmov s22, s16 + fmov s24, wzr + fmov s26, s16 + fmov s28, s18 + fmov s30, s20 +.endm + +.macro KERNEL2x8_SUB + ld1 {v4.4s}, [pB] + add pB, pB, #16 + ld1 {v5.4s}, [pB] + add pB, pB, #16 + ld1 {v0.2s}, [pA] + add pA, pA, #8 + + fmla v16.2s, v0.2s, v4.s[0] + fmla v18.2s, v0.2s, v4.s[1] + fmla v20.2s, v0.2s, v4.s[2] + fmla v22.2s, v0.2s, v4.s[3] + fmla v24.2s, v0.2s, v5.s[0] + fmla v26.2s, v0.2s, v5.s[1] + fmla v28.2s, v0.2s, v5.s[2] + fmla v30.2s, v0.2s, v5.s[3] +.endm + +.macro SAVE2x8 + add pCRow1, pCRow0, LDC + + + fmul v0.2s, v16.2s, alphaV0 + st1 {v0.2s}, [pCRow0] + + add pCRow2, pCRow1, LDC + + + fmul v2.2s, v18.2s, alphaV2 + st1 {v2.2s}, [pCRow1] + + add pCRow1, pCRow2, LDC + + + fmul v4.2s, v20.2s, alphaV0 + st1 {v4.2s}, [pCRow2] + + add pCRow2, pCRow1, LDC + + + fmul v6.2s, v22.2s, alphaV2 + st1 {v6.2s}, [pCRow1] + + add pCRow1, pCRow2, LDC + + + fmul v0.2s, v24.2s, alphaV0 + st1 {v0.2s}, [pCRow2] + + add pCRow2, pCRow1, LDC + + + fmul v2.2s, v26.2s, alphaV2 + st1 {v2.2s}, [pCRow1] + + add pCRow1, pCRow2, LDC + + + fmul v4.2s, v28.2s, alphaV0 + st1 {v4.2s}, [pCRow2] + + + fmul v6.2s, v30.2s, alphaV2 + st1 {v6.2s}, [pCRow1] + + add pCRow0, pCRow0, #8 +.endm + +/******************************************************************************/ + +.macro INIT1x8 + fmov s16, wzr + fmov s18, wzr + fmov s20, wzr + fmov s22, s16 + fmov s24, wzr + fmov s26, s16 + fmov s28, s18 + fmov s30, s20 +.endm + +.macro KERNEL1x8_SUB + ld1 {v4.4s}, [pB] + add pB, pB, #16 + ld1 {v5.4s}, [pB] + add pB, pB, #16 + ldr s0, [pA] + add pA, pA, #4 + + fmla s16, s0, v4.s[0] + fmla s18, s0, v4.s[1] + fmla s20, s0, v4.s[2] + fmla s22, s0, v4.s[3] + fmla s24, s0, v5.s[0] + fmla s26, s0, v5.s[1] + fmla s28, s0, v5.s[2] + fmla s30, s0, v5.s[3] +.endm + +.macro SAVE1x8 + add pCRow1, pCRow0, LDC + + + fmul s0, s16, alphaV0 + str s0, [pCRow0] + + add pCRow2, pCRow1, LDC + + + fmul s2, s18, alphaV2 + str s2, [pCRow1] + + add pCRow1, pCRow2, LDC + + + fmul s4, s20, alphaV0 + str s4, [pCRow2] + + add pCRow2, pCRow1, LDC + + + fmul s6, s22, alphaV2 + str s6, [pCRow1] + + add pCRow1, pCRow2, LDC + + + fmul s0, s24, alphaV0 + str s0, [pCRow2] + + add pCRow2, pCRow1, LDC + + + fmul s2, s26, alphaV2 + str s2, [pCRow1] + + add pCRow1, pCRow2, LDC + + + fmul s4, s28, alphaV0 + str s4, [pCRow2] + + + fmul s6, s30, alphaV2 + str s6, [pCRow1] + + add pCRow0, pCRow0, #4 +.endm + +/******************************************************************************/ + +.macro INIT8x4 + fmov s16, wzr + fmov s17, wzr + fmov s20, wzr + fmov s21, s16 + fmov s24, wzr + fmov s25, s16 + fmov s28, wzr + fmov s29, s16 +.endm + +.macro KERNEL8x4_I + ld1 {v8.4s}, [pB], #16 + ld1 {v0.4s, v1.4s}, [pA], #32 + + ldr d9, [pB], #8 + ldr d2, [pA], #8 + ldr d3, [pA, #8] + fmul v16.4s, v0.4s, v8.s[0] + ldr x25, [pB], #8 + fmul v17.4s, v1.4s, v8.s[0] + ldr x22, [pA], #16 + fmul v20.4s, v0.4s, v8.s[1] + ldr x23, [pA], #8 + fmul v21.4s, v1.4s, v8.s[1] + fmul v24.4s, v0.4s, v8.s[2] + fmul v25.4s, v1.4s, v8.s[2] + fmul v28.4s, v0.4s, v8.s[3] + fmul v29.4s, v1.4s, v8.s[3] +.endm + +.macro KERNEL8x4_M1 + ldr d9, [pB], #8 + fmov v8.d[1], x24 + ldr d2, [pA], #8 + fmov v0.d[1], x20 + ldr d3, [pA, #8] + fmov v1.d[1], x21 + fmla v16.4s, v0.4s, v8.s[0] + ldr x25, [pB], #8 + fmla v17.4s, v1.4s, v8.s[0] + ldr x22, [pA], #16 + fmla v20.4s, v0.4s, v8.s[1] + ldr x23, [pA], #8 + fmla v21.4s, v1.4s, v8.s[1] + fmla v24.4s, v0.4s, v8.s[2] + fmla v25.4s, v1.4s, v8.s[2] + fmla v28.4s, v0.4s, v8.s[3] + fmla v29.4s, v1.4s, v8.s[3] +.endm + +.macro KERNEL8x4_M2 + ldr d8, [pB], #8 + fmov v9.d[1], x25 + ldr d0, [pA], #8 + fmov v2.d[1], x22 + ldr d1, [pA, #8] + fmov v3.d[1], x23 + fmla v16.4s, v2.4s, v9.s[0] + ldr x24, [pB], #8 + fmla v17.4s, v3.4s, v9.s[0] + ldr x20, [pA], #16 + fmla v20.4s, v2.4s, v9.s[1] + ldr x21, [pA], #8 + fmla v21.4s, v3.4s, v9.s[1] + fmla v24.4s, v2.4s, v9.s[2] + fmla v25.4s, v3.4s, v9.s[2] + fmla v28.4s, v2.4s, v9.s[3] + fmla v29.4s, v3.4s, v9.s[3] +.endm + +.macro KERNEL8x4_E + fmov v9.d[1], x25 + fmov v2.d[1], x22 + fmov v3.d[1], x23 + fmla v16.4s, v2.4s, v9.s[0] + fmla v17.4s, v3.4s, v9.s[0] + fmla v20.4s, v2.4s, v9.s[1] + fmla v21.4s, v3.4s, v9.s[1] + fmla v24.4s, v2.4s, v9.s[2] + fmla v25.4s, v3.4s, v9.s[2] + fmla v28.4s, v2.4s, v9.s[3] + fmla v29.4s, v3.4s, v9.s[3] +.endm + +.macro KERNEL8x4_SUB + ld1 {v8.4s}, [pB], #16 + ld1 {v0.4s, v1.4s}, [pA], #32 + fmla v16.4s, v0.4s, v8.s[0] + fmla v17.4s, v1.4s, v8.s[0] + fmla v20.4s, v0.4s, v8.s[1] + fmla v21.4s, v1.4s, v8.s[1] + fmla v24.4s, v0.4s, v8.s[2] + fmla v25.4s, v1.4s, v8.s[2] + fmla v28.4s, v0.4s, v8.s[3] + fmla v29.4s, v1.4s, v8.s[3] +.endm + +.macro SAVE8x4 + add pCRow1, pCRow0, LDC + + + fmul v0.4s, v16.4s, alphaV0 + fmul v1.4s, v17.4s, alphaV1 + st1 {v0.4s, v1.4s}, [pCRow0] + + add pCRow2, pCRow1, LDC + + + fmul v4.4s, v20.4s, alphaV0 + fmul v5.4s, v21.4s, alphaV1 + st1 {v4.4s, v5.4s}, [pCRow1] + + add pCRow1, pCRow2, LDC + + + fmul v0.4s, v24.4s, alphaV0 + fmul v1.4s, v25.4s, alphaV1 + st1 {v0.4s, v1.4s}, [pCRow2] + + + fmul v4.4s, v28.4s, alphaV0 + fmul v5.4s, v29.4s, alphaV1 + st1 {v4.4s, v5.4s}, [pCRow1] + + add pCRow0, pCRow0, #32 +.endm + +/******************************************************************************/ + + +.macro INIT4x4 + fmov s16, wzr + fmov s17, s16 + fmov s20, s17 + fmov s21, s16 + fmov s24, s17 + fmov s25, s16 + fmov s28, s17 + fmov s29, s16 +.endm + +.macro KERNEL4x4_I + ld1 {v8.2s, v9.2s}, [pB] + add pB, pB, #16 + ld1 {v0.2s, v1.2s}, [pA] + add pA, pA, #16 + + fmul v16.2s, v0.2s, v8.s[0] + fmul v29.2s, v1.2s, v9.s[1] + + fmul v20.2s, v0.2s, v8.s[1] + fmul v25.2s, v1.2s, v9.s[0] + + fmul v24.2s, v0.2s, v9.s[0] + fmul v21.2s, v1.2s, v8.s[1] + + fmul v28.2s, v0.2s, v9.s[1] + fmul v17.2s, v1.2s, v8.s[0] + + ld1 {v12.2s, v13.2s}, [pB] + add pB, pB, #16 + ld1 {v4.2s, v5.2s}, [pA] + add pA, pA, #16 +.endm + +.macro KERNEL4x4_M1 + fmla v16.2s, v0.2s, v8.s[0] + fmla v29.2s, v1.2s, v9.s[1] + + ld1 {v12.2s, v13.2s}, [pB] // For next round + add pB, pB, #16 + + fmla v20.2s, v0.2s, v8.s[1] + fmla v25.2s, v1.2s, v9.s[0] + + ld1 {v4.2s, v5.2s}, [pA] // For next round + add pA, pA, #16 + + fmla v24.2s, v0.2s, v9.s[0] + fmla v21.2s, v1.2s, v8.s[1] + + prfm PLDL1KEEP, [pB, #512] + + fmla v28.2s, v0.2s, v9.s[1] + fmla v17.2s, v1.2s, v8.s[0] +.endm + +.macro KERNEL4x4_M2 + fmla v16.2s, v4.2s, v12.s[0] + fmla v29.2s, v5.2s, v13.s[1] + + ld1 {v8.2s, v9.2s}, [pB] // For next round + add pB, pB, #16 + + fmla v20.2s, v4.2s, v12.s[1] + fmla v25.2s, v5.2s, v13.s[0] + + ld1 {v0.2s, v1.2s}, [pA] // For next round + add pA, pA, #16 + + fmla v24.2s, v4.2s, v13.s[0] + fmla v21.2s, v5.2s, v12.s[1] + + prfm PLDL1KEEP, [pA, #512] + + fmla v28.2s, v4.2s, v13.s[1] + fmla v17.2s, v5.2s, v12.s[0] +.endm + +.macro KERNEL4x4_E + fmla v16.2s, v4.2s, v12.s[0] + fmla v29.2s, v5.2s, v13.s[1] + + fmla v20.2s, v4.2s, v12.s[1] + fmla v25.2s, v5.2s, v13.s[0] + + fmla v24.2s, v4.2s, v13.s[0] + fmla v21.2s, v5.2s, v12.s[1] + + fmla v28.2s, v4.2s, v13.s[1] + fmla v17.2s, v5.2s, v12.s[0] +.endm + +.macro KERNEL4x4_SUB + ld1 {v8.2s, v9.2s}, [pB] + add pB, pB, #16 + ld1 {v0.2s, v1.2s}, [pA] + add pA, pA, #16 + + fmla v16.2s, v0.2s, v8.s[0] + fmla v29.2s, v1.2s, v9.s[1] + + fmla v20.2s, v0.2s, v8.s[1] + fmla v25.2s, v1.2s, v9.s[0] + + fmla v24.2s, v0.2s, v9.s[0] + fmla v21.2s, v1.2s, v8.s[1] + + fmla v28.2s, v0.2s, v9.s[1] + fmla v17.2s, v1.2s, v8.s[0] +.endm + +.macro SAVE4x4 + + fmul v8.2s, v16.2s, alphaV0 + fmul v9.2s, v17.2s, alphaV1 + st1 {v8.2s, v9.2s}, [pCRow0] + + add pCRow1, pCRow0, LDC + + fmul v12.2s, v20.2s, alphaV2 + fmul v13.2s, v21.2s, alphaV3 + st1 {v12.2s, v13.2s}, [pCRow1] + + add pCRow2, pCRow1, LDC + + fmul v8.2s, v24.2s, alphaV0 + fmul v9.2s, v25.2s, alphaV1 + st1 {v8.2s, v9.2s}, [pCRow2] + + add pCRow1, pCRow2, LDC + + fmul v12.2s, v28.2s, alphaV2 + fmul v13.2s, v29.2s, alphaV3 + st1 {v12.2s, v13.2s}, [pCRow1] + + add pCRow0, pCRow0, #16 +.endm + +/******************************************************************************/ + +.macro INIT2x4 + fmov s16, wzr + fmov s20, s16 + fmov s24, s20 + fmov s28, s16 +.endm + +.macro KERNEL2x4_SUB + ld1 {v8.2s, v9.2s}, [pB] + add pB, pB, #16 + ld1 {v0.2s}, [pA] + add pA, pA, #8 + + fmla v16.2s, v0.2s, v8.s[0] + fmla v20.2s, v0.2s, v8.s[1] + fmla v24.2s, v0.2s, v9.s[0] + fmla v28.2s, v0.2s, v9.s[1] +.endm + +.macro SAVE2x4 + + fmul v8.2s, v16.2s, alphaV0 + st1 {v8.2s}, [pCRow0] + + add pCRow1, pCRow0, LDC + + fmul v12.2s, v20.2s, alphaV1 + st1 {v12.2s}, [pCRow1] + + add pCRow2, pCRow1, LDC + + fmul v8.2s, v24.2s, alphaV2 + st1 {v8.2s}, [pCRow2] + + add pCRow1, pCRow2, LDC + + fmul v12.2s, v28.2s, alphaV3 + st1 {v12.2s}, [pCRow1] + + add pCRow0, pCRow0, #8 +.endm + +/******************************************************************************/ + +.macro INIT1x4 + fmov s16, wzr + fmov s20, s16 +.endm + +.macro KERNEL1x4_SUB + ldr s0, [pA] + add pA, pA, #4 + + ld1 {v8.2s, v9.2s}, [pB] + add pB, pB, #16 + + fmla v16.2s, v8.2s, v0.s[0] + fmla v20.2s, v9.2s, v0.s[0] +.endm + +.macro SAVE1x4 + add pCRow1, pCRow0, LDC + + + fmul v8.2s, v16.2s, alphaV0 + st1 {v8.s}[0], [pCRow0] + st1 {v8.s}[1], [pCRow1] + + add pCRow2, pCRow1, LDC + add pCRow1, pCRow2, LDC + + + fmul v12.2s, v20.2s, alphaV1 + st1 {v12.s}[0], [pCRow2] + st1 {v12.s}[1], [pCRow1] + + add pCRow0, pCRow0, #4 +.endm + +/******************************************************************************/ + +.macro INIT8x2 + fmov s16, wzr + fmov s17, s16 + fmov s20, s17 + fmov s21, s16 +.endm + +.macro KERNEL8x2_SUB + ld1 {v8.2s}, [pB] + add pB, pB, #8 + ld1 {v0.4s}, [pA] + add pA, pA, #16 + ld1 {v1.4s}, [pA] + add pA, pA, #16 + + fmla v16.4s, v0.4s, v8.s[0] + fmla v17.4s, v1.4s, v8.s[0] + + fmla v20.4s, v0.4s, v8.s[1] + fmla v21.4s, v1.4s, v8.s[1] +.endm + +.macro SAVE8x2 + add pCRow1, pCRow0, LDC + + + fmul v0.4s, v16.4s, alphaV0 + fmul v1.4s, v17.4s, alphaV1 + st1 {v0.4s, v1.4s}, [pCRow0] + + add pCRow2, pCRow1, LDC + + + fmul v4.4s, v20.4s, alphaV0 + fmul v5.4s, v21.4s, alphaV1 + st1 {v4.4s, v5.4s}, [pCRow1] + + add pCRow0, pCRow0, #32 +.endm + +/******************************************************************************/ + +.macro INIT4x2 + fmov s16, wzr + fmov s17, s16 + fmov s20, s17 + fmov s21, s16 +.endm + +.macro KERNEL4x2_SUB + ld1 {v8.2s}, [pB] + add pB, pB, #8 + ld1 {v0.2s, v1.2s}, [pA] + add pA, pA, #16 + + fmla v16.2s, v0.2s, v8.s[0] + fmla v17.2s, v1.2s, v8.s[0] + fmla v20.2s, v0.2s, v8.s[1] + fmla v21.2s, v1.2s, v8.s[1] +.endm + +.macro SAVE4x2 + + fmul v8.2s, v16.2s, alphaV0 + fmul v9.2s, v17.2s, alphaV1 + st1 {v8.2s, v9.2s}, [pCRow0] + + add pCRow1, pCRow0, LDC + + fmul v12.2s, v20.2s, alphaV2 + fmul v13.2s, v21.2s, alphaV3 + st1 {v12.2s, v13.2s}, [pCRow1] + + add pCRow0, pCRow0, #16 +.endm + +/******************************************************************************/ + +.macro INIT2x2 + fmov s16, wzr + fmov s20, s16 +.endm + +.macro KERNEL2x2_SUB + ld1 {v8.2s}, [pB] + add pB, pB, #8 + + ld1 {v0.2s}, [pA] + add pA, pA, #8 + + fmla v16.2s, v0.2s, v8.s[0] + fmla v20.2s, v0.2s, v8.s[1] +.endm + +.macro SAVE2x2 + + fmul v8.2s, v16.2s, alphaV0 + st1 {v8.2s}, [pCRow0] + + add pCRow1 , pCRow0, LDC + + fmul v12.2s, v20.2s, alphaV1 + st1 {v12.2s}, [pCRow1] + + add pCRow0, pCRow0, #8 +.endm + +/******************************************************************************/ + +.macro INIT1x2 + fmov s16, wzr +.endm + +.macro KERNEL1x2_SUB + ld1 {v8.2s} , [pB] + add pB , pB, #8 + + ldr s0 , [pA] + add pA, pA, #4 + + fmla v16.2s, v8.2s, v0.s[0] +.endm + +.macro SAVE1x2 + add pCRow1 , pCRow0, LDC + + + fmul v8.2s, v16.2s, alphaV0 + st1 {v8.s}[0], [pCRow0] + st1 {v8.s}[1], [pCRow1] + + add pCRow0, pCRow0, #4 +.endm + +/******************************************************************************/ + +.macro INIT8x1 + fmov s16, wzr + fmov s17, wzr +.endm + +.macro KERNEL8x1_SUB + ldr s8, [pB] + add pB , pB, #4 + + ld1 {v0.4s}, [pA] + add pA, pA, #16 + ld1 {v1.4s}, [pA] + add pA, pA, #16 + + fmla v16.4s, v0.4s, v8.s[0] + fmla v17.4s, v1.4s, v8.s[0] +.endm + +.macro SAVE8x1 + + fmul v0.4s, v16.4s, alphaV0 + fmul v1.4s, v17.4s, alphaV1 + st1 {v0.4s, v1.4s}, [pCRow0] + + add pCRow0, pCRow0, #32 +.endm + +/******************************************************************************/ + +.macro INIT4x1 + fmov s16, wzr + fmov s17, s16 +.endm + +.macro KERNEL4x1_SUB + ldr s8, [pB] + add pB , pB, #4 + + ld1 {v0.2s, v1.2s}, [pA] + add pA , pA, #16 + + fmla v16.2s, v0.2s, v8.s[0] + fmla v17.2s, v1.2s, v8.s[0] +.endm + +.macro SAVE4x1 + + fmul v8.2s, v16.2s, alphaV0 + fmul v9.2s, v17.2s, alphaV1 + st1 {v8.2s, v9.2s}, [pCRow0] + + add pCRow0, pCRow0, #16 +.endm + +/******************************************************************************/ + +.macro INIT2x1 + fmov s16, wzr +.endm + +.macro KERNEL2x1_SUB + ldr s8, [pB] + add pB , pB, #4 + + ld1 {v0.2s}, [pA] + add pA , pA, #8 + + fmla v16.2s, v0.2s, v8.s[0] +.endm + +.macro SAVE2x1 + + fmul v8.2s, v16.2s, alphaV0 + st1 {v8.2s}, [pCRow0] + + add pCRow0, pCRow0, #8 +.endm + +/******************************************************************************/ + +.macro INIT1x1 + fmov s16, wzr +.endm + +.macro KERNEL1x1_SUB + ldr s8, [pB] + add pB , pB, #4 + + ldr s0, [pA] + add pA , pA, #4 + + fmadd s16, s0, s8, s16 +.endm + +.macro SAVE1x1 + + fmul s8, s16, alpha0 + str s8, [pCRow0] + + add pCRow0, pCRow0, #4 +.endm + +/******************************************************************************* +* End of macro definitions +*******************************************************************************/ + + PROLOGUE + +.Lstrmm_kernel_begin: + + .align 5 + add sp, sp, #-(11 * 16) + stp d8, d9, [sp, #(0 * 16)] + stp d10, d11, [sp, #(1 * 16)] + stp d12, d13, [sp, #(2 * 16)] + stp d14, d15, [sp, #(3 * 16)] + stp d16, d17, [sp, #(4 * 16)] + stp x18, x19, [sp, #(5 * 16)] + stp x20, x21, [sp, #(6 * 16)] + stp x22, x23, [sp, #(7 * 16)] + stp x24, x25, [sp, #(8 * 16)] + stp x26, x27, [sp, #(9 * 16)] + str x28, [sp, #(10 * 16)] + + fmov alpha0, s0 + fmov alpha1, s0 + fmov alpha2, s0 + fmov alpha3, s0 + + lsl LDC, LDC, #2 // ldc = ldc * 4 + +#if !defined(LEFT) + neg tempOffset, offset +#endif + mov pB, origPB + + mov counterJ, origN + asr counterJ, counterJ, #3 // J = J / 8 + cmp counterJ, #0 + ble .Lstrmm_kernel_L4_BEGIN + +/******************************************************************************/ +/******************************************************************************/ + +.Lstrmm_kernel_L8_BEGIN: + mov pCRow0, pC // pCRow0 = C + add pC, pC, LDC, lsl #3 + +#if defined(LEFT) + mov tempOffset, offset +#endif + + mov pA, origPA // pA = start of A array + +/******************************************************************************/ + +.Lstrmm_kernel_L8_M8_BEGIN: + + mov counterI, origM + asr counterI, counterI, #3 // counterI = counterI / 8 + cmp counterI, #0 + ble .Lstrmm_kernel_L8_M4_BEGIN + +.Lstrmm_kernel_L8_M8_20: + +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + mov pB, origPB +#else + mov pB, origPB + lsl temp, tempOffset, #5 + add pA, pA, temp + add pB, pB, temp +#endif + +#if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + sub tempK, origK, tempOffset +#elif defined(LEFT) + add tempK, tempOffset, #8 +#else + add tempK, tempOffset, #8 +#endif + + asr counterL , tempK, #3 // L = K / 8 + cmp counterL , #2 // is there at least 16 to do? + blt .Lstrmm_kernel_L8_M8_32 + + KERNEL8x8_I // do one in the K + KERNEL8x8_M2 // do another in the K + KERNEL8x8_M1 + KERNEL8x8_M2 + KERNEL8x8_M1 + KERNEL8x8_M2 + KERNEL8x8_M1 + KERNEL8x8_M2 + + subs counterL, counterL, #2 + ble .Lstrmm_kernel_L8_M8_22a + .align 5 + +.Lstrmm_kernel_L8_M8_22: + + KERNEL8x8_M1 + KERNEL8x8_M2 + KERNEL8x8_M1 + KERNEL8x8_M2 + KERNEL8x8_M1 + KERNEL8x8_M2 + KERNEL8x8_M1 + KERNEL8x8_M2 + + subs counterL, counterL, #1 + bgt .Lstrmm_kernel_L8_M8_22 + +.Lstrmm_kernel_L8_M8_22a: + + KERNEL8x8_M1 + KERNEL8x8_M2 + KERNEL8x8_M1 + KERNEL8x8_M2 + KERNEL8x8_M1 + KERNEL8x8_M2 + KERNEL8x8_M1 + KERNEL8x8_E + + b .Lstrmm_kernel_L8_M8_44 + +.Lstrmm_kernel_L8_M8_32: + + tst counterL, #1 + ble .Lstrmm_kernel_L8_M8_40 + + KERNEL8x8_I + KERNEL8x8_M2 + KERNEL8x8_M1 + KERNEL8x8_M2 + KERNEL8x8_M1 + KERNEL8x8_M2 + KERNEL8x8_M1 + KERNEL8x8_E + + b .Lstrmm_kernel_L8_M8_44 + +.Lstrmm_kernel_L8_M8_40: + + INIT8x8 + +.Lstrmm_kernel_L8_M8_44: + + ands counterL , tempK, #7 + ble .Lstrmm_kernel_L8_M8_100 + +.Lstrmm_kernel_L8_M8_46: + + KERNEL8x8_SUB + + subs counterL, counterL, 1 + bgt .Lstrmm_kernel_L8_M8_46 + +.Lstrmm_kernel_L8_M8_100: + + SAVE8x8 + +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + sub tempK, origK, tempOffset +#if defined(LEFT) + sub tempK, tempK, #8 +#else + sub tempK, tempK, #8 +#endif + lsl temp, tempK, #5 + add pA, pA, temp + add pB, pB, temp +#endif +#if defined(LEFT) + add tempOffset, tempOffset, #8 +#endif + +.Lstrmm_kernel_L8_M8_END: + subs counterI, counterI, #1 + bne .Lstrmm_kernel_L8_M8_20 + +/******************************************************************************/ + +.Lstrmm_kernel_L8_M4_BEGIN: + + mov counterI, origM + tst counterI , #7 + ble .Lstrmm_kernel_L8_END + + tst counterI, #4 + ble .Lstrmm_kernel_L8_M2_BEGIN + +.Lstrmm_kernel_L8_M4_20: + +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + mov pB, origPB +#else + mov pB, origPB + lsl temp, tempOffset, #4 + add pA, pA, temp + lsl temp, tempOffset, #5 + add pB, pB, temp +#endif + +#if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + sub tempK, origK, tempOffset +#elif defined(LEFT) + add tempK, tempOffset, #4 +#else + add tempK, tempOffset, #8 +#endif + + asr counterL , tempK, #1 // L = K / 2 + cmp counterL , #2 // is there at least 4 to do? + blt .Lstrmm_kernel_L8_M4_32 + + KERNEL4x8_I // do one in the K + KERNEL4x8_M2 // do another in the K + + subs counterL, counterL, #2 + ble .Lstrmm_kernel_L8_M4_22a + .align 5 + +.Lstrmm_kernel_L8_M4_22: + + KERNEL4x8_M1 + KERNEL4x8_M2 + + subs counterL, counterL, #1 + bgt .Lstrmm_kernel_L8_M4_22 + +.Lstrmm_kernel_L8_M4_22a: + + KERNEL4x8_M1 + KERNEL4x8_E + + b .Lstrmm_kernel_L8_M4_44 + +.Lstrmm_kernel_L8_M4_32: + + tst counterL, #1 + ble .Lstrmm_kernel_L8_M4_40 + + KERNEL4x8_I + KERNEL4x8_E + + b .Lstrmm_kernel_L8_M4_44 + +.Lstrmm_kernel_L8_M4_40: + + INIT4x8 + +.Lstrmm_kernel_L8_M4_44: + + ands counterL , tempK, #1 + ble .Lstrmm_kernel_L8_M4_100 + +.Lstrmm_kernel_L8_M4_46: + + KERNEL4x8_SUB + +.Lstrmm_kernel_L8_M4_100: + + SAVE4x8 + +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + sub tempK, origK, tempOffset +#if defined(LEFT) + sub tempK, tempK, #4 +#else + sub tempK, tempK, #8 +#endif + lsl temp, tempK, #4 + add pA, pA, temp + lsl temp, tempK, #5 + add pB, pB, temp +#endif +#if defined(LEFT) + add tempOffset, tempOffset, #4 +#endif + +.Lstrmm_kernel_L8_M4_END: + +/******************************************************************************/ + +.Lstrmm_kernel_L8_M2_BEGIN: + + mov counterI, origM + tst counterI , #3 + ble .Lstrmm_kernel_L8_END + + tst counterI, #2 // counterI = counterI / 2 + ble .Lstrmm_kernel_L8_M1_BEGIN + +.Lstrmm_kernel_L8_M2_20: + + INIT2x8 + +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + mov pB, origPB +#else + mov pB, origPB + lsl temp, tempOffset, #3 + add pA, pA, temp + lsl temp, tempOffset, #5 + add pB, pB, temp +#endif + +#if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + sub tempK, origK, tempOffset +#elif defined(LEFT) + add tempK, tempOffset, #2 +#else + add tempK, tempOffset, #8 +#endif + + asr counterL , tempK, #3 // counterL = counterL / 8 + cmp counterL , #0 + ble .Lstrmm_kernel_L8_M2_40 + +.Lstrmm_kernel_L8_M2_22: + + KERNEL2x8_SUB + KERNEL2x8_SUB + KERNEL2x8_SUB + KERNEL2x8_SUB + + KERNEL2x8_SUB + KERNEL2x8_SUB + KERNEL2x8_SUB + KERNEL2x8_SUB + + subs counterL, counterL, #1 + bgt .Lstrmm_kernel_L8_M2_22 + + +.Lstrmm_kernel_L8_M2_40: + + ands counterL , tempK, #7 // counterL = counterL % 8 + ble .Lstrmm_kernel_L8_M2_100 + +.Lstrmm_kernel_L8_M2_42: + + KERNEL2x8_SUB + + subs counterL, counterL, #1 + bgt .Lstrmm_kernel_L8_M2_42 + +.Lstrmm_kernel_L8_M2_100: + + SAVE2x8 + +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + sub tempK, origK, tempOffset +#if defined(LEFT) + sub tempK, tempK, #2 +#else + sub tempK, tempK, #8 +#endif + lsl temp, tempK, #3 + add pA, pA, temp + lsl temp, tempK, #5 + add pB, pB, temp +#endif +#if defined(LEFT) + add tempOffset, tempOffset, #2 +#endif + +.Lstrmm_kernel_L8_M2_END: + +/******************************************************************************/ + +.Lstrmm_kernel_L8_M1_BEGIN: + + tst counterI, #1 // counterI = counterI % 2 + ble .Lstrmm_kernel_L8_END + +.Lstrmm_kernel_L8_M1_20: + + INIT1x8 + +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + mov pB, origPB +#else + mov pB, origPB + lsl temp, tempOffset, #2 + add pA, pA, temp + lsl temp, tempOffset, #5 + add pB, pB, temp +#endif + +#if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + sub tempK, origK, tempOffset +#elif defined(LEFT) + add tempK, tempOffset, #1 +#else + add tempK, tempOffset, #8 +#endif + + asr counterL , tempK, #3 // counterL = counterL / 8 + cmp counterL , #0 + ble .Lstrmm_kernel_L8_M1_40 + +.Lstrmm_kernel_L8_M1_22: + KERNEL1x8_SUB + KERNEL1x8_SUB + KERNEL1x8_SUB + KERNEL1x8_SUB + + KERNEL1x8_SUB + KERNEL1x8_SUB + KERNEL1x8_SUB + KERNEL1x8_SUB + + subs counterL, counterL, #1 + bgt .Lstrmm_kernel_L8_M1_22 + + +.Lstrmm_kernel_L8_M1_40: + + ands counterL , tempK, #7 // counterL = counterL % 8 + ble .Lstrmm_kernel_L8_M1_100 + +.Lstrmm_kernel_L8_M1_42: + + KERNEL1x8_SUB + + subs counterL, counterL, #1 + bgt .Lstrmm_kernel_L8_M1_42 + +.Lstrmm_kernel_L8_M1_100: + + SAVE1x8 + +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + sub tempK, origK, tempOffset +#if defined(LEFT) + sub tempK, tempK, #1 +#else + sub tempK, tempK, #8 +#endif + lsl temp, tempK, #2 + add pA, pA, temp + lsl temp, tempK, #5 + add pB, pB, temp +#endif +#if defined(LEFT) + add tempOffset, tempOffset, #1 +#endif + +.Lstrmm_kernel_L8_END: + lsl temp, origK, #5 // B = B + K * 4 * 8 + add origPB, origPB, temp + +#if !defined(LEFT) + add tempOffset, tempOffset, #8 +#endif + + subs counterJ, counterJ , #1 // j-- + bgt .Lstrmm_kernel_L8_BEGIN + +/******************************************************************************/ +/******************************************************************************/ + +.Lstrmm_kernel_L4_BEGIN: + + mov counterJ , origN + tst counterJ , #7 + ble .Lstrmm_kernel_L999 + + tst counterJ , #4 + ble .Lstrmm_kernel_L2_BEGIN + + mov pCRow0, pC // pCRow0 = pC + + add pC,pC,LDC, lsl #2 + +#if defined(LEFT) + mov tempOffset, offset +#endif + + mov pA, origPA // pA = A + +/******************************************************************************/ + +.Lstrmm_kernel_L4_M8_BEGIN: + + mov counterI, origM + asr counterI, counterI, #3 // counterI = counterI / 8 + cmp counterI, #0 + ble .Lstrmm_kernel_L4_M4_BEGIN + +.Lstrmm_kernel_L4_M8_20: + +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + mov pB, origPB +#else + mov pB, origPB + lsl temp, tempOffset, #5 + add pA, pA, temp + lsl temp, tempOffset, #4 + add pB, pB, temp +#endif + +#if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + sub tempK, origK, tempOffset +#elif defined(LEFT) + add tempK, tempOffset, #8 +#else + add tempK, tempOffset, #4 +#endif + + asr counterL , tempK, #1 // L = K / 2 + cmp counterL , #2 // is there at least 4 to do? + blt .Lstrmm_kernel_L4_M8_32 + + KERNEL8x4_I // do one in the K + KERNEL8x4_M2 // do another in the K + + subs counterL, counterL, #2 + ble .Lstrmm_kernel_L4_M8_22a + .align 5 + +.Lstrmm_kernel_L4_M8_22: + + KERNEL8x4_M1 + KERNEL8x4_M2 + + subs counterL, counterL, #1 + bgt .Lstrmm_kernel_L4_M8_22 + +.Lstrmm_kernel_L4_M8_22a: + + KERNEL8x4_M1 + KERNEL8x4_E + + b .Lstrmm_kernel_L4_M8_44 + +.Lstrmm_kernel_L4_M8_32: + + tst counterL, #1 + ble .Lstrmm_kernel_L4_M8_40 + + KERNEL8x4_I + KERNEL8x4_E + + b .Lstrmm_kernel_L4_M8_44 + +.Lstrmm_kernel_L4_M8_40: + + INIT8x4 + +.Lstrmm_kernel_L4_M8_44: + + ands counterL , tempK, #1 + ble .Lstrmm_kernel_L4_M8_100 + +.Lstrmm_kernel_L4_M8_46: + + KERNEL8x4_SUB + +.Lstrmm_kernel_L4_M8_100: + + SAVE8x4 + +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + sub tempK, origK, tempOffset +#if defined(LEFT) + sub tempK, tempK, #8 +#else + sub tempK, tempK, #4 +#endif + lsl temp, tempK, #5 + add pA, pA, temp + lsl temp, tempK, #4 + add pB, pB, temp +#endif +#if defined(LEFT) + add tempOffset, tempOffset, #8 +#endif +.Lstrmm_kernel_L4_M8_END: + subs counterI, counterI, #1 + bne .Lstrmm_kernel_L4_M8_20 + +/******************************************************************************/ + +.Lstrmm_kernel_L4_M4_BEGIN: + + mov counterI, origM + tst counterI , #7 + ble .Lstrmm_kernel_L4_END + + tst counterI, #4 + ble .Lstrmm_kernel_L4_M2_BEGIN + +.Lstrmm_kernel_L4_M4_20: + +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + mov pB, origPB +#else + mov pB, origPB + lsl temp, tempOffset, #4 + add pB, pB, temp + add pA, pA, temp +#endif + +#if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + sub tempK, origK, tempOffset +#elif defined(LEFT) + add tempK, tempOffset, #4 +#else + add tempK, tempOffset, #4 +#endif + asr counterL , tempK, #1 // L = K / 2 + cmp counterL , #2 // is there at least 4 to do? + blt .Lstrmm_kernel_L4_M4_32 + + KERNEL4x4_I // do one in the K + KERNEL4x4_M2 // do another in the K + + subs counterL, counterL, #2 + ble .Lstrmm_kernel_L4_M4_22a + .align 5 + +.Lstrmm_kernel_L4_M4_22: + + KERNEL4x4_M1 + KERNEL4x4_M2 + + subs counterL, counterL, #1 + bgt .Lstrmm_kernel_L4_M4_22 + +.Lstrmm_kernel_L4_M4_22a: + + KERNEL4x4_M1 + KERNEL4x4_E + + b .Lstrmm_kernel_L4_M4_44 + +.Lstrmm_kernel_L4_M4_32: + + tst counterL, #1 + ble .Lstrmm_kernel_L4_M4_40 + + KERNEL4x4_I + KERNEL4x4_E + + b .Lstrmm_kernel_L4_M4_44 + +.Lstrmm_kernel_L4_M4_40: + + INIT4x4 + +.Lstrmm_kernel_L4_M4_44: + + ands counterL , tempK, #1 + ble .Lstrmm_kernel_L4_M4_100 + +.Lstrmm_kernel_L4_M4_46: + + KERNEL4x4_SUB + +.Lstrmm_kernel_L4_M4_100: + + SAVE4x4 + +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + sub tempK, origK, tempOffset +#if defined(LEFT) + sub tempK, tempK, #4 +#else + sub tempK, tempK, #4 +#endif + lsl temp, tempK, #4 + add pA, pA, temp + add pB, pB, temp +#endif +#if defined(LEFT) + add tempOffset, tempOffset, #4 +#endif +.Lstrmm_kernel_L4_M4_END: + +/******************************************************************************/ + +.Lstrmm_kernel_L4_M2_BEGIN: + + mov counterI, origM + tst counterI , #3 + ble .Lstrmm_kernel_L4_END + + tst counterI, #2 // counterI = counterI / 2 + ble .Lstrmm_kernel_L4_M1_BEGIN + +.Lstrmm_kernel_L4_M2_20: + + INIT2x4 + +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + mov pB, origPB +#else + mov pB, origPB + lsl temp, tempOffset, #3 + add pA, pA, temp + lsl temp, tempOffset, #4 + add pB, pB, temp +#endif + +#if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + sub tempK, origK, tempOffset +#elif defined(LEFT) + add tempK, tempOffset, #2 +#else + add tempK, tempOffset, #4 +#endif + asr counterL , tempK, #3 // counterL = counterL / 8 + cmp counterL , #0 + ble .Lstrmm_kernel_L4_M2_40 + +.Lstrmm_kernel_L4_M2_22: + + KERNEL2x4_SUB + KERNEL2x4_SUB + KERNEL2x4_SUB + KERNEL2x4_SUB + + KERNEL2x4_SUB + KERNEL2x4_SUB + KERNEL2x4_SUB + KERNEL2x4_SUB + + subs counterL, counterL, #1 + bgt .Lstrmm_kernel_L4_M2_22 + + +.Lstrmm_kernel_L4_M2_40: + + ands counterL , tempK, #7 // counterL = counterL % 8 + ble .Lstrmm_kernel_L4_M2_100 + +.Lstrmm_kernel_L4_M2_42: + + KERNEL2x4_SUB + + subs counterL, counterL, #1 + bgt .Lstrmm_kernel_L4_M2_42 + +.Lstrmm_kernel_L4_M2_100: + + SAVE2x4 + +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + sub tempK, origK, tempOffset +#if defined(LEFT) + sub tempK, tempK, #2 +#else + sub tempK, tempK, #4 +#endif + lsl temp, tempK, #3 + add pA, pA, temp + lsl temp, tempK, #4 + add pB, pB, temp +#endif +#if defined(LEFT) + add tempOffset, tempOffset, #2 +#endif +.Lstrmm_kernel_L4_M2_END: + +/******************************************************************************/ + +.Lstrmm_kernel_L4_M1_BEGIN: + + tst counterI, #1 // counterI = counterI % 2 + ble .Lstrmm_kernel_L4_END + +.Lstrmm_kernel_L4_M1_20: + + INIT1x4 + +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + mov pB, origPB +#else + mov pB, origPB + lsl temp, tempOffset, #4 + add pB, pB, temp + lsl temp, tempOffset, #2 + add pA, pA, temp +#endif + +#if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + sub tempK, origK, tempOffset +#elif defined(LEFT) + add tempK, tempOffset, #1 +#else + add tempK, tempOffset, #4 +#endif + asr counterL , tempK, #3 // counterL = counterL / 8 + cmp counterL , #0 + ble .Lstrmm_kernel_L4_M1_40 + +.Lstrmm_kernel_L4_M1_22: + KERNEL1x4_SUB + KERNEL1x4_SUB + KERNEL1x4_SUB + KERNEL1x4_SUB + + KERNEL1x4_SUB + KERNEL1x4_SUB + KERNEL1x4_SUB + KERNEL1x4_SUB + + subs counterL, counterL, #1 + bgt .Lstrmm_kernel_L4_M1_22 + + +.Lstrmm_kernel_L4_M1_40: + + ands counterL , tempK, #7 // counterL = counterL % 8 + ble .Lstrmm_kernel_L4_M1_100 + +.Lstrmm_kernel_L4_M1_42: + + KERNEL1x4_SUB + + subs counterL, counterL, #1 + bgt .Lstrmm_kernel_L4_M1_42 + +.Lstrmm_kernel_L4_M1_100: + + SAVE1x4 + +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + sub tempK, origK, tempOffset +#if defined(LEFT) + sub tempK, tempK, #1 +#else + sub tempK, tempK, #4 +#endif + lsl temp, tempK, #2 + add pA, pA, temp + lsl temp, tempK, #4 + add pB, pB, temp +#endif +#if defined(LEFT) + add tempOffset, tempOffset, #1 +#endif +.Lstrmm_kernel_L4_END: + add origPB, origPB, origK, lsl #4 // B = B + K * 4 * 4 +#if !defined(LEFT) + add tempOffset, tempOffset, #4 +#endif + +/******************************************************************************/ +/******************************************************************************/ + +.Lstrmm_kernel_L2_BEGIN: // less than 2 left in N direction + + mov counterJ , origN + tst counterJ , #3 + ble .Lstrmm_kernel_L999 + + tst counterJ , #2 + ble .Lstrmm_kernel_L1_BEGIN + + mov pCRow0, pC // pCRow0 = pC + + add pC,pC,LDC, lsl #1 + +#if defined(LEFT) + mov tempOffset, offset +#endif + mov pA, origPA // pA = A + +/******************************************************************************/ + +.Lstrmm_kernel_L2_M8_BEGIN: + + mov counterI, origM + asr counterI, counterI, #3 // counterI = counterI / 8 + cmp counterI,#0 + ble .Lstrmm_kernel_L2_M4_BEGIN + +.Lstrmm_kernel_L2_M8_20: + + INIT8x2 + +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + mov pB, origPB +#else + mov pB, origPB + lsl temp, tempOffset, #5 + add pA, pA, temp + lsl temp, tempOffset, #3 + add pB, pB, temp +#endif + +#if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + sub tempK, origK, tempOffset +#elif defined(LEFT) + add tempK, tempOffset, #8 +#else + add tempK, tempOffset, #2 +#endif + asr counterL , tempK, #3 // counterL = counterL / 8 + cmp counterL,#0 + ble .Lstrmm_kernel_L2_M8_40 + .align 5 + +.Lstrmm_kernel_L2_M8_22: + KERNEL8x2_SUB + KERNEL8x2_SUB + KERNEL8x2_SUB + KERNEL8x2_SUB + + KERNEL8x2_SUB + KERNEL8x2_SUB + KERNEL8x2_SUB + KERNEL8x2_SUB + + subs counterL, counterL, #1 + bgt .Lstrmm_kernel_L2_M8_22 + + +.Lstrmm_kernel_L2_M8_40: + + ands counterL , tempK, #7 // counterL = counterL % 8 + ble .Lstrmm_kernel_L2_M8_100 + +.Lstrmm_kernel_L2_M8_42: + + KERNEL8x2_SUB + + subs counterL, counterL, #1 + bgt .Lstrmm_kernel_L2_M8_42 + +.Lstrmm_kernel_L2_M8_100: + + SAVE8x2 + +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + sub tempK, origK, tempOffset +#if defined(LEFT) + sub tempK, tempK, #8 +#else + sub tempK, tempK, #2 +#endif + lsl temp, tempK, #5 + add pA, pA, temp + lsl temp, tempK, #3 + add pB, pB, temp +#endif +#if defined(LEFT) + add tempOffset, tempOffset, #8 +#endif +.Lstrmm_kernel_L2_M8_END: + + subs counterI, counterI, #1 + bgt .Lstrmm_kernel_L2_M8_20 + +/******************************************************************************/ + +.Lstrmm_kernel_L2_M4_BEGIN: + + mov counterI, origM + tst counterI , #7 + ble .Lstrmm_kernel_L2_END + + tst counterI, #4 + ble .Lstrmm_kernel_L2_M2_BEGIN + +.Lstrmm_kernel_L2_M4_20: + + INIT4x2 + +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + mov pB, origPB +#else + mov pB, origPB + lsl temp, tempOffset, #3 + add pB, pB, temp + lsl temp, tempOffset, #4 + add pA, pA, temp +#endif + +#if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + sub tempK, origK, tempOffset +#elif defined(LEFT) + add tempK, tempOffset, #4 +#else + add tempK, tempOffset, #2 +#endif + asr counterL , tempK, #3 // counterL = counterL / 8 + cmp counterL,#0 + ble .Lstrmm_kernel_L2_M4_40 + .align 5 + +.Lstrmm_kernel_L2_M4_22: + KERNEL4x2_SUB + KERNEL4x2_SUB + KERNEL4x2_SUB + KERNEL4x2_SUB + + KERNEL4x2_SUB + KERNEL4x2_SUB + KERNEL4x2_SUB + KERNEL4x2_SUB + + subs counterL, counterL, #1 + bgt .Lstrmm_kernel_L2_M4_22 + + +.Lstrmm_kernel_L2_M4_40: + + ands counterL , tempK, #7 // counterL = counterL % 8 + ble .Lstrmm_kernel_L2_M4_100 + +.Lstrmm_kernel_L2_M4_42: + + KERNEL4x2_SUB + + subs counterL, counterL, #1 + bgt .Lstrmm_kernel_L2_M4_42 + +.Lstrmm_kernel_L2_M4_100: + + SAVE4x2 + +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + sub tempK, origK, tempOffset +#if defined(LEFT) + sub tempK, tempK, #4 +#else + sub tempK, tempK, #2 +#endif + lsl temp, tempK, #4 + add pA, pA, temp + lsl temp, tempK, #3 + add pB, pB, temp +#endif +#if defined(LEFT) + add tempOffset, tempOffset, #4 +#endif +.Lstrmm_kernel_L2_M4_END: + +/******************************************************************************/ + +.Lstrmm_kernel_L2_M2_BEGIN: + + mov counterI, origM + tst counterI , #3 + ble .Lstrmm_kernel_L2_END + + tst counterI, #2 // counterI = counterI / 2 + ble .Lstrmm_kernel_L2_M1_BEGIN + +.Lstrmm_kernel_L2_M2_20: + + INIT2x2 + +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + mov pB, origPB +#else + mov pB, origPB + lsl temp, tempOffset, #3 + add pB, pB, temp + lsl temp, tempOffset, #3 + add pA, pA, temp +#endif + +#if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + sub tempK, origK, tempOffset +#elif defined(LEFT) + add tempK, tempOffset, #2 +#else + add tempK, tempOffset, #2 +#endif + asr counterL , tempK, #3 // counterL = counterL / 8 + cmp counterL,#0 + ble .Lstrmm_kernel_L2_M2_40 + +.Lstrmm_kernel_L2_M2_22: + + KERNEL2x2_SUB + KERNEL2x2_SUB + KERNEL2x2_SUB + KERNEL2x2_SUB + + KERNEL2x2_SUB + KERNEL2x2_SUB + KERNEL2x2_SUB + KERNEL2x2_SUB + + subs counterL, counterL, #1 + bgt .Lstrmm_kernel_L2_M2_22 + + +.Lstrmm_kernel_L2_M2_40: + + ands counterL , tempK, #7 // counterL = counterL % 8 + ble .Lstrmm_kernel_L2_M2_100 + +.Lstrmm_kernel_L2_M2_42: + + KERNEL2x2_SUB + + subs counterL, counterL, #1 + bgt .Lstrmm_kernel_L2_M2_42 + +.Lstrmm_kernel_L2_M2_100: + + SAVE2x2 +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + sub tempK, origK, tempOffset +#if defined(LEFT) + sub tempK, tempK, #2 +#else + sub tempK, tempK, #2 +#endif + lsl temp, tempK, #3 + add pA, pA, temp + lsl temp, tempK, #3 + add pB, pB, temp +#endif +#if defined(LEFT) + add tempOffset, tempOffset, #2 +#endif + +.Lstrmm_kernel_L2_M2_END: + +/******************************************************************************/ + +.Lstrmm_kernel_L2_M1_BEGIN: + + tst counterI, #1 // counterI = counterI % 2 + ble .Lstrmm_kernel_L2_END + +.Lstrmm_kernel_L2_M1_20: + + INIT1x2 + +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + mov pB, origPB +#else + mov pB, origPB + lsl temp, tempOffset, #3 + add pB, pB, temp + lsl temp, tempOffset, #2 + add pA, pA, temp +#endif + +#if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + sub tempK, origK, tempOffset +#elif defined(LEFT) + add tempK, tempOffset, #1 +#else + add tempK, tempOffset, #2 +#endif + asr counterL , tempK, #3 // counterL = counterL / 8 + cmp counterL, #0 + ble .Lstrmm_kernel_L2_M1_40 + +.Lstrmm_kernel_L2_M1_22: + KERNEL1x2_SUB + KERNEL1x2_SUB + KERNEL1x2_SUB + KERNEL1x2_SUB + + KERNEL1x2_SUB + KERNEL1x2_SUB + KERNEL1x2_SUB + KERNEL1x2_SUB + + subs counterL, counterL, #1 + bgt .Lstrmm_kernel_L2_M1_22 + + +.Lstrmm_kernel_L2_M1_40: + + ands counterL , tempK, #7 // counterL = counterL % 8 + ble .Lstrmm_kernel_L2_M1_100 + +.Lstrmm_kernel_L2_M1_42: + + KERNEL1x2_SUB + + subs counterL, counterL, #1 + bgt .Lstrmm_kernel_L2_M1_42 + +.Lstrmm_kernel_L2_M1_100: + + SAVE1x2 + +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + sub tempK, origK, tempOffset +#if defined(LEFT) + sub tempK, tempK, #1 +#else + sub tempK, tempK, #2 +#endif + lsl temp, tempK, #2 + add pA, pA, temp + lsl temp, tempK, #3 + add pB, pB, temp +#endif +#if defined(LEFT) + add tempOffset, tempOffset, #1 +#endif +.Lstrmm_kernel_L2_END: +#if !defined(LEFT) + add tempOffset, tempOffset, #2 +#endif + add origPB, origPB, origK, lsl #3 // B = B + K * 2 * 4 + +/******************************************************************************/ +/******************************************************************************/ + +.Lstrmm_kernel_L1_BEGIN: + + mov counterJ , origN + tst counterJ , #1 + ble .Lstrmm_kernel_L999 // done + + + mov pCRow0, pC // pCRow0 = C + add pC , pC , LDC // Update pC to point to next + +#if defined(LEFT) + mov tempOffset, offset +#endif + mov pA, origPA // pA = A + +/******************************************************************************/ + +.Lstrmm_kernel_L1_M8_BEGIN: + + mov counterI, origM + asr counterI, counterI, #3 + cmp counterI, #0 + ble .Lstrmm_kernel_L1_M4_BEGIN + +.Lstrmm_kernel_L1_M8_20: + + INIT8x1 + +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + mov pB, origPB +#else + mov pB, origPB + lsl temp, tempOffset, #5 + add pA, pA, temp + lsl temp, tempOffset, #2 + add pB, pB, temp +#endif + +#if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + sub tempK, origK, tempOffset +#elif defined(LEFT) + add tempK, tempOffset, #8 +#else + add tempK, tempOffset, #1 +#endif + asr counterL , tempK, #3 // counterL = counterL / 8 + cmp counterL , #0 + ble .Lstrmm_kernel_L1_M8_40 + .align 5 + +.Lstrmm_kernel_L1_M8_22: + KERNEL8x1_SUB + KERNEL8x1_SUB + KERNEL8x1_SUB + KERNEL8x1_SUB + + KERNEL8x1_SUB + KERNEL8x1_SUB + KERNEL8x1_SUB + KERNEL8x1_SUB + + subs counterL, counterL, #1 + bgt .Lstrmm_kernel_L1_M8_22 + + +.Lstrmm_kernel_L1_M8_40: + + ands counterL , tempK, #7 // counterL = counterL % 8 + ble .Lstrmm_kernel_L1_M8_100 + +.Lstrmm_kernel_L1_M8_42: + + KERNEL8x1_SUB + + subs counterL, counterL, #1 + bgt .Lstrmm_kernel_L1_M8_42 + +.Lstrmm_kernel_L1_M8_100: + + SAVE8x1 + +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + sub tempK, origK, tempOffset +#if defined(LEFT) + sub tempK, tempK, #8 +#else + sub tempK, tempK, #1 +#endif + lsl temp, tempK, #5 + add pA, pA, temp + lsl temp, tempK, #2 + add pB, pB, temp +#endif +#if defined(LEFT) + add tempOffset, tempOffset, #8 +#endif +.Lstrmm_kernel_L1_M8_END: + + subs counterI, counterI, #1 + bgt .Lstrmm_kernel_L1_M8_20 + +/******************************************************************************/ + +.Lstrmm_kernel_L1_M4_BEGIN: + + mov counterI, origM + tst counterI , #7 + ble .Lstrmm_kernel_L1_END + + tst counterI, #4 + ble .Lstrmm_kernel_L1_M2_BEGIN + +.Lstrmm_kernel_L1_M4_20: + + INIT4x1 + +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + mov pB, origPB +#else + mov pB, origPB + lsl temp, tempOffset, #2 + add pB, pB, temp + lsl temp, tempOffset, #4 + add pA, pA, temp +#endif + +#if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + sub tempK, origK, tempOffset +#elif defined(LEFT) + add tempK, tempOffset, #4 +#else + add tempK, tempOffset, #1 +#endif + asr counterL , tempK, #3 // counterL = counterL / 8 + cmp counterL , #0 + ble .Lstrmm_kernel_L1_M4_40 + .align 5 + +.Lstrmm_kernel_L1_M4_22: + KERNEL4x1_SUB + KERNEL4x1_SUB + KERNEL4x1_SUB + KERNEL4x1_SUB + + KERNEL4x1_SUB + KERNEL4x1_SUB + KERNEL4x1_SUB + KERNEL4x1_SUB + + subs counterL, counterL, #1 + bgt .Lstrmm_kernel_L1_M4_22 + + +.Lstrmm_kernel_L1_M4_40: + + ands counterL , tempK, #7 // counterL = counterL % 8 + ble .Lstrmm_kernel_L1_M4_100 + +.Lstrmm_kernel_L1_M4_42: + + KERNEL4x1_SUB + + subs counterL, counterL, #1 + bgt .Lstrmm_kernel_L1_M4_42 + +.Lstrmm_kernel_L1_M4_100: + + SAVE4x1 + +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + sub tempK, origK, tempOffset +#if defined(LEFT) + sub tempK, tempK, #4 +#else + sub tempK, tempK, #1 +#endif + lsl temp, tempK, #4 + add pA, pA, temp + lsl temp, tempK, #2 + add pB, pB, temp +#endif +#if defined(LEFT) + add tempOffset, tempOffset, #4 +#endif +.Lstrmm_kernel_L1_M4_END: + +/******************************************************************************/ + +.Lstrmm_kernel_L1_M2_BEGIN: + + mov counterI, origM + tst counterI , #3 + ble .Lstrmm_kernel_L1_END + + tst counterI, #2 // counterI = counterI / 2 + ble .Lstrmm_kernel_L1_M1_BEGIN + +.Lstrmm_kernel_L1_M2_20: + + INIT2x1 + +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + mov pB, origPB +#else + mov pB, origPB + lsl temp, tempOffset, #2 + add pB, pB, temp + lsl temp, tempOffset, #3 + add pA, pA, temp +#endif + +#if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + sub tempK, origK, tempOffset +#elif defined(LEFT) + add tempK, tempOffset, #2 +#else + add tempK, tempOffset, #1 +#endif + asr counterL , tempK, #3 // counterL = counterL / 8 + cmp counterL , #0 + ble .Lstrmm_kernel_L1_M2_40 + +.Lstrmm_kernel_L1_M2_22: + + KERNEL2x1_SUB + KERNEL2x1_SUB + KERNEL2x1_SUB + KERNEL2x1_SUB + + KERNEL2x1_SUB + KERNEL2x1_SUB + KERNEL2x1_SUB + KERNEL2x1_SUB + + subs counterL, counterL, #1 + bgt .Lstrmm_kernel_L1_M2_22 + + +.Lstrmm_kernel_L1_M2_40: + + ands counterL , tempK, #7 // counterL = counterL % 8 + ble .Lstrmm_kernel_L1_M2_100 + +.Lstrmm_kernel_L1_M2_42: + + KERNEL2x1_SUB + + subs counterL, counterL, #1 + bgt .Lstrmm_kernel_L1_M2_42 + +.Lstrmm_kernel_L1_M2_100: + + SAVE2x1 + +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + sub tempK, origK, tempOffset +#if defined(LEFT) + sub tempK, tempK, #2 +#else + sub tempK, tempK, #1 +#endif + lsl temp, tempK, #3 + add pA, pA, temp + lsl temp, tempK, #2 + add pB, pB, temp +#endif +#if defined(LEFT) + add tempOffset, tempOffset, #2 +#endif +.Lstrmm_kernel_L1_M2_END: + +/******************************************************************************/ + +.Lstrmm_kernel_L1_M1_BEGIN: + + tst counterI, #1 // counterI = counterI % 2 + ble .Lstrmm_kernel_L1_END + +.Lstrmm_kernel_L1_M1_20: + + INIT1x1 + +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + mov pB, origPB +#else + mov pB, origPB + lsl temp, tempOffset, #2 + add pB, pB, temp + lsl temp, tempOffset, #2 + add pA, pA, temp +#endif + +#if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + sub tempK, origK, tempOffset +#elif defined(LEFT) + add tempK, tempOffset, #1 +#else + add tempK, tempOffset, #1 +#endif + asr counterL , tempK, #3 // counterL = counterL / 8 + cmp counterL , #0 + ble .Lstrmm_kernel_L1_M1_40 + +.Lstrmm_kernel_L1_M1_22: + KERNEL1x1_SUB + KERNEL1x1_SUB + KERNEL1x1_SUB + KERNEL1x1_SUB + + KERNEL1x1_SUB + KERNEL1x1_SUB + KERNEL1x1_SUB + KERNEL1x1_SUB + + subs counterL, counterL, #1 + bgt .Lstrmm_kernel_L1_M1_22 + + +.Lstrmm_kernel_L1_M1_40: + + ands counterL , tempK, #7 // counterL = counterL % 8 + ble .Lstrmm_kernel_L1_M1_100 + +.Lstrmm_kernel_L1_M1_42: + + KERNEL1x1_SUB + + subs counterL, counterL, #1 + bgt .Lstrmm_kernel_L1_M1_42 + +.Lstrmm_kernel_L1_M1_100: + + SAVE1x1 + +.Lstrmm_kernel_L1_END: + +/******************************************************************************/ + +.Lstrmm_kernel_L999: + mov x0, #0 // set return value + ldp d8, d9, [sp, #(0 * 16)] + ldp d10, d11, [sp, #(1 * 16)] + ldp d12, d13, [sp, #(2 * 16)] + ldp d14, d15, [sp, #(3 * 16)] + ldp d16, d17, [sp, #(4 * 16)] + ldp x18, x19, [sp, #(5 * 16)] + ldp x20, x21, [sp, #(6 * 16)] + ldp x22, x23, [sp, #(7 * 16)] + ldp x24, x25, [sp, #(8 * 16)] + ldp x26, x27, [sp, #(9 * 16)] + ldr x28, [sp, #(10 * 16)] + add sp, sp, #(11*16) + ret + + EPILOGUE + diff --git a/kernel/arm64/zamax.S b/kernel/arm64/zamax.S index c2c0a5374..3b49c4fff 100644 --- a/kernel/arm64/zamax.S +++ b/kernel/arm64/zamax.S @@ -137,10 +137,10 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. fadd v3.2d, v3.2d, v4.2d #if defined(USE_MIN) fmin v1.2d, v1.2d, v3.2d - fminp MAXF, v1.2d + fminp TMPF, v1.2d #else fmax v1.2d, v1.2d, v3.2d - fmaxp MAXF, v1.2d + fmaxp TMPF, v1.2d #endif #endif fcmp MAXF, TMPF diff --git a/kernel/arm64/znrm2.S b/kernel/arm64/znrm2.S index 1c89685ea..a530b80f0 100644 --- a/kernel/arm64/znrm2.S +++ b/kernel/arm64/znrm2.S @@ -54,138 +54,138 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #if !defined(DOUBLE) ldr s4, [X], #4 fcmp s4, REGZERO - beq KERNEL_F1_NEXT_\@ + beq 2f /* KERNEL_F1_NEXT_\@ */ fabs s4, s4 fcmp SCALE, s4 - bge KERNEL_F1_SCALE_GE_XR_\@ + bge 1f /* KERNEL_F1_SCALE_GE_XR_\@ */ fdiv s2, SCALE, s4 fmul s2, s2, s2 fmul s3, SSQ, s2 fadd SSQ, REGONE, s3 fmov SCALE, s4 - b KERNEL_F1_NEXT_\@ -KERNEL_F1_SCALE_GE_XR_\@: + b 2f /* KERNEL_F1_NEXT_\@ */ +1: /* KERNEL_F1_SCALE_GE_XR_\@: */ fdiv s2, s4, SCALE fmla SSQ, s2, v2.s[0] -KERNEL_F1_NEXT_\@: +2: /* KERNEL_F1_NEXT_\@: */ ldr s5, [X], #4 fcmp s5, REGZERO - beq KERNEL_F1_END_\@ + beq 4f /* KERNEL_F1_END_\@ */ fabs s5, s5 fcmp SCALE, s5 - bge KERNEL_F1_SCALE_GE_XI_\@ + bge 3f /* KERNEL_F1_SCALE_GE_XI_\@ */ fdiv s2, SCALE, s5 fmul s2, s2, s2 fmul s3, SSQ, s2 fadd SSQ, REGONE, s3 fmov SCALE, s5 - b KERNEL_F1_END_\@ -KERNEL_F1_SCALE_GE_XI_\@: + b 4f /* KERNEL_F1_END_\@ */ +3: /* KERNEL_F1_SCALE_GE_XI_\@: */ fdiv s2, s5, SCALE fmla SSQ, s2, v2.s[0] #else ldr d4, [X], #8 fcmp d4, REGZERO - beq KERNEL_F1_NEXT_\@ + beq 2f /* KERNEL_F1_NEXT_\@ */ fabs d4, d4 fcmp SCALE, d4 - bge KERNEL_F1_SCALE_GE_XR_\@ + bge 1f /* KERNEL_F1_SCALE_GE_XR_\@ */ fdiv d2, SCALE, d4 fmul d2, d2, d2 fmul d3, SSQ, d2 fadd SSQ, REGONE, d3 fmov SCALE, d4 - b KERNEL_F1_NEXT_\@ -KERNEL_F1_SCALE_GE_XR_\@: + b 2f /* KERNEL_F1_NEXT_\@ */ +1: /* KERNEL_F1_SCALE_GE_XR_\@: */ fdiv d2, d4, SCALE fmla SSQ, d2, v2.d[0] -KERNEL_F1_NEXT_\@: +2: /* KERNEL_F1_NEXT_\@: */ ldr d5, [X], #8 fcmp d5, REGZERO - beq KERNEL_F1_END_\@ + beq 4f /* KERNEL_F1_END_\@ */ fabs d5, d5 fcmp SCALE, d5 - bge KERNEL_F1_SCALE_GE_XI_\@ + bge 3f /* KERNEL_F1_SCALE_GE_XI_\@ */ fdiv d2, SCALE, d5 fmul d2, d2, d2 fmul d3, SSQ, d2 fadd SSQ, REGONE, d3 fmov SCALE, d5 - b KERNEL_F1_END_\@ -KERNEL_F1_SCALE_GE_XI_\@: + b 4f /* KERNEL_F1_END_\@ */ +3: /* KERNEL_F1_SCALE_GE_XI_\@: */ fdiv d2, d5, SCALE fmla SSQ, d2, v2.d[0] #endif -KERNEL_F1_END_\@: +4: /* KERNEL_F1_END_\@: */ .endm .macro KERNEL_S1 #if !defined(DOUBLE) ldr s4, [X] fcmp s4, REGZERO - beq KERNEL_S1_NEXT_\@ + beq KERNEL_S1_NEXT fabs s4, s4 fcmp SCALE, s4 - bge KERNEL_S1_SCALE_GE_XR_\@ + bge KERNEL_S1_SCALE_GE_XR fdiv s2, SCALE, s4 fmul s2, s2, s2 fmul s3, SSQ, s2 fadd SSQ, REGONE, s3 fmov SCALE, s4 - b KERNEL_S1_NEXT_\@ -KERNEL_S1_SCALE_GE_XR_\@: + b KERNEL_S1_NEXT +KERNEL_S1_SCALE_GE_XR: fdiv s2, s4, SCALE fmla SSQ, s2, v2.s[0] -KERNEL_S1_NEXT_\@: +KERNEL_S1_NEXT: ldr s5, [X, #4] fcmp s5, REGZERO - beq KERNEL_S1_END_\@ + beq KERNEL_S1_END fabs s5, s5 fcmp SCALE, s5 - bge KERNEL_S1_SCALE_GE_XI_\@ + bge KERNEL_S1_SCALE_GE_XI fdiv s2, SCALE, s5 fmul s2, s2, s2 fmul s3, SSQ, s2 fadd SSQ, REGONE, s3 fmov SCALE, s5 - b KERNEL_S1_END_\@ -KERNEL_S1_SCALE_GE_XI_\@: + b KERNEL_S1_END +KERNEL_S1_SCALE_GE_XI: fdiv s2, s5, SCALE fmla SSQ, s2, v2.s[0] #else ldr d4, [X] fcmp d4, REGZERO - beq KERNEL_S1_NEXT_\@ + beq KERNEL_S1_NEXT fabs d4, d4 fcmp SCALE, d4 - bge KERNEL_S1_SCALE_GE_XR_\@ + bge KERNEL_S1_SCALE_GE_XR fdiv d2, SCALE, d4 fmul d2, d2, d2 fmul d3, SSQ, d2 fadd SSQ, REGONE, d3 fmov SCALE, d4 - b KERNEL_S1_NEXT_\@ -KERNEL_S1_SCALE_GE_XR_\@: + b KERNEL_S1_NEXT +KERNEL_S1_SCALE_GE_XR: fdiv d2, d4, SCALE fmla SSQ, d2, v2.d[0] -KERNEL_S1_NEXT_\@: +KERNEL_S1_NEXT: ldr d5, [X, #8] fcmp d5, REGZERO - beq KERNEL_S1_END_\@ + beq KERNEL_S1_END fabs d5, d5 fcmp SCALE, d5 - bge KERNEL_S1_SCALE_GE_XI_\@ + bge KERNEL_S1_SCALE_GE_XI fdiv d2, SCALE, d5 fmul d2, d2, d2 fmul d3, SSQ, d2 fadd SSQ, REGONE, d3 fmov SCALE, d5 - b KERNEL_S1_END_\@ -KERNEL_S1_SCALE_GE_XI_\@: + b KERNEL_S1_END +KERNEL_S1_SCALE_GE_XI: fdiv d2, d5, SCALE fmla SSQ, d2, v2.d[0] #endif -KERNEL_S1_END_\@: +KERNEL_S1_END: add X, X, INC_X .endm diff --git a/kernel/generic/dot.c b/kernel/generic/dot.c index bc07bc78f..5abbb735c 100644 --- a/kernel/generic/dot.c +++ b/kernel/generic/dot.c @@ -27,7 +27,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" - +#include "../simd/intrin.h" #if defined(DSDOT) double CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y) #else @@ -47,27 +47,59 @@ FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y) if ( (inc_x == 1) && (inc_y == 1) ) { - - int n1 = n & -4; - - while(i < n1) + int n1 = n & -4; +#if V_SIMD && !defined(DSDOT) + const int vstep = v_nlanes_f32; + const int unrollx4 = n & (-vstep * 4); + const int unrollx = n & -vstep; + v_f32 vsum0 = v_zero_f32(); + v_f32 vsum1 = v_zero_f32(); + v_f32 vsum2 = v_zero_f32(); + v_f32 vsum3 = v_zero_f32(); + while(i < unrollx4) + { + vsum0 = v_muladd_f32( + v_loadu_f32(x + i), v_loadu_f32(y + i), vsum0 + ); + vsum1 = v_muladd_f32( + v_loadu_f32(x + i + vstep), v_loadu_f32(y + i + vstep), vsum1 + ); + vsum2 = v_muladd_f32( + v_loadu_f32(x + i + vstep*2), v_loadu_f32(y + i + vstep*2), vsum2 + ); + vsum3 = v_muladd_f32( + v_loadu_f32(x + i + vstep*3), v_loadu_f32(y + i + vstep*3), vsum3 + ); + i += vstep*4; + } + vsum0 = v_add_f32( + v_add_f32(vsum0, vsum1), v_add_f32(vsum2 , vsum3) + ); + while(i < unrollx) + { + vsum0 = v_muladd_f32( + v_loadu_f32(x + i), v_loadu_f32(y + i), vsum0 + ); + i += vstep; + } + dot = v_sum_f32(vsum0); +#elif defined(DSDOT) + for (; i < n1; i += 4) { - -#if defined(DSDOT) dot += (double) y[i] * (double) x[i] + (double) y[i+1] * (double) x[i+1] + (double) y[i+2] * (double) x[i+2] + (double) y[i+3] * (double) x[i+3] ; + } #else + for (; i < n1; i += 4) + { dot += y[i] * x[i] + y[i+1] * x[i+1] + y[i+2] * x[i+2] + y[i+3] * x[i+3] ; -#endif - i+=4 ; - } - +#endif while(i < n) { diff --git a/kernel/generic/gemm_beta.c b/kernel/generic/gemm_beta.c index c4e4f7abe..ccb772cc7 100644 --- a/kernel/generic/gemm_beta.c +++ b/kernel/generic/gemm_beta.c @@ -39,104 +39,56 @@ #include "common.h" int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT beta, - FLOAT *dummy2, BLASLONG dummy3, FLOAT *dummy4, BLASLONG dummy5, + IFLOAT *dummy2, BLASLONG dummy3, IFLOAT *dummy4, BLASLONG dummy5, FLOAT *c, BLASLONG ldc){ + BLASLONG i, j; + BLASLONG chunk, remain; FLOAT *c_offset1, *c_offset; - FLOAT ctemp1, ctemp2, ctemp3, ctemp4; - FLOAT ctemp5, ctemp6, ctemp7, ctemp8; - c_offset = c; - + chunk = m >> 3; + remain = m & 7; if (beta == ZERO){ - - j = n; - do { - c_offset1 = c_offset; - c_offset += ldc; - - i = (m >> 3); - if (i > 0){ - do { - *(c_offset1 + 0) = ZERO; - *(c_offset1 + 1) = ZERO; - *(c_offset1 + 2) = ZERO; - *(c_offset1 + 3) = ZERO; - *(c_offset1 + 4) = ZERO; - *(c_offset1 + 5) = ZERO; - *(c_offset1 + 6) = ZERO; - *(c_offset1 + 7) = ZERO; - c_offset1 += 8; - i --; - } while (i > 0); - } - - i = (m & 7); - if (i > 0){ - do { - *c_offset1 = ZERO; - c_offset1 ++; - i --; - } while (i > 0); - } - j --; - } while (j > 0); - + for(j=n; j>0; j--){ + c_offset1 = c_offset; + c_offset += ldc; + for(i=chunk; i>0; i--){ + *(c_offset1 + 0) = ZERO; + *(c_offset1 + 1) = ZERO; + *(c_offset1 + 2) = ZERO; + *(c_offset1 + 3) = ZERO; + *(c_offset1 + 4) = ZERO; + *(c_offset1 + 5) = ZERO; + *(c_offset1 + 6) = ZERO; + *(c_offset1 + 7) = ZERO; + c_offset1 += 8; + } + for(i=remain; i>0; i--){ + *c_offset1 = ZERO; + c_offset1 ++; + } + } } else { - - j = n; - do { - c_offset1 = c_offset; - c_offset += ldc; - - i = (m >> 3); - if (i > 0){ - do { - ctemp1 = *(c_offset1 + 0); - ctemp2 = *(c_offset1 + 1); - ctemp3 = *(c_offset1 + 2); - ctemp4 = *(c_offset1 + 3); - ctemp5 = *(c_offset1 + 4); - ctemp6 = *(c_offset1 + 5); - ctemp7 = *(c_offset1 + 6); - ctemp8 = *(c_offset1 + 7); - - ctemp1 *= beta; - ctemp2 *= beta; - ctemp3 *= beta; - ctemp4 *= beta; - ctemp5 *= beta; - ctemp6 *= beta; - ctemp7 *= beta; - ctemp8 *= beta; - - *(c_offset1 + 0) = ctemp1; - *(c_offset1 + 1) = ctemp2; - *(c_offset1 + 2) = ctemp3; - *(c_offset1 + 3) = ctemp4; - *(c_offset1 + 4) = ctemp5; - *(c_offset1 + 5) = ctemp6; - *(c_offset1 + 6) = ctemp7; - *(c_offset1 + 7) = ctemp8; - c_offset1 += 8; - i --; - } while (i > 0); - } - - i = (m & 7); - if (i > 0){ - do { - ctemp1 = *c_offset1; - ctemp1 *= beta; - *c_offset1 = ctemp1; - c_offset1 ++; - i --; - } while (i > 0); - } - j --; - } while (j > 0); - + for(j=n; j>0; j--){ + c_offset1 = c_offset; + c_offset += ldc; + for(i=chunk; i>0; i--){ + *(c_offset1 + 0) *= beta; + *(c_offset1 + 1) *= beta; + *(c_offset1 + 2) *= beta; + *(c_offset1 + 3) *= beta; + *(c_offset1 + 4) *= beta; + *(c_offset1 + 5) *= beta; + *(c_offset1 + 6) *= beta; + *(c_offset1 + 7) *= beta; + c_offset1 += 8; + } + for(i=remain; i>0; i--){ + *c_offset1 *= beta; + c_offset1 ++; + } + } } return 0; }; diff --git a/kernel/generic/gemm_ncopy_16.c b/kernel/generic/gemm_ncopy_16.c index 5f91d0dbe..d3ab46472 100644 --- a/kernel/generic/gemm_ncopy_16.c +++ b/kernel/generic/gemm_ncopy_16.c @@ -39,24 +39,24 @@ #include #include "common.h" -int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, FLOAT *b){ +int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b){ BLASLONG i, j; - FLOAT *aoffset; - FLOAT *aoffset1, *aoffset2, *aoffset3, *aoffset4; - FLOAT *aoffset5, *aoffset6, *aoffset7, *aoffset8; - FLOAT *aoffset9, *aoffset10, *aoffset11, *aoffset12; - FLOAT *aoffset13, *aoffset14, *aoffset15, *aoffset16; - - FLOAT *boffset; - FLOAT ctemp01, ctemp02, ctemp03, ctemp04; - FLOAT ctemp05, ctemp06, ctemp07, ctemp08; - FLOAT ctemp09, ctemp10, ctemp11, ctemp12; - FLOAT ctemp13, ctemp14, ctemp15, ctemp16; - FLOAT ctemp17, ctemp18, ctemp19, ctemp20; - FLOAT ctemp21, ctemp22, ctemp23, ctemp24; - FLOAT ctemp25, ctemp26, ctemp27, ctemp28; - FLOAT ctemp29, ctemp30, ctemp31, ctemp32; + IFLOAT *aoffset; + IFLOAT *aoffset1, *aoffset2, *aoffset3, *aoffset4; + IFLOAT *aoffset5, *aoffset6, *aoffset7, *aoffset8; + IFLOAT *aoffset9, *aoffset10, *aoffset11, *aoffset12; + IFLOAT *aoffset13, *aoffset14, *aoffset15, *aoffset16; + + IFLOAT *boffset; + IFLOAT ctemp01, ctemp02, ctemp03, ctemp04; + IFLOAT ctemp05, ctemp06, ctemp07, ctemp08; + IFLOAT ctemp09, ctemp10, ctemp11, ctemp12; + IFLOAT ctemp13, ctemp14, ctemp15, ctemp16; + IFLOAT ctemp17, ctemp18, ctemp19, ctemp20; + IFLOAT ctemp21, ctemp22, ctemp23, ctemp24; + IFLOAT ctemp25, ctemp26, ctemp27, ctemp28; + IFLOAT ctemp29, ctemp30, ctemp31, ctemp32; aoffset = a; boffset = b; diff --git a/kernel/generic/gemm_ncopy_2.c b/kernel/generic/gemm_ncopy_2.c index b728c713f..415860f81 100644 --- a/kernel/generic/gemm_ncopy_2.c +++ b/kernel/generic/gemm_ncopy_2.c @@ -39,10 +39,10 @@ #include #include "common.h" -int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, FLOAT *b){ +int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b){ BLASLONG i, j; - FLOAT *a_offset, *a_offset1, *a_offset2; - FLOAT *b_offset; + IFLOAT *a_offset, *a_offset1, *a_offset2; + IFLOAT *b_offset; a_offset = a; b_offset = b; diff --git a/kernel/generic/gemm_ncopy_8.c b/kernel/generic/gemm_ncopy_8.c index a49a778e6..aaf9c8917 100644 --- a/kernel/generic/gemm_ncopy_8.c +++ b/kernel/generic/gemm_ncopy_8.c @@ -39,30 +39,30 @@ #include #include "common.h" -int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, FLOAT *b){ +int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b){ BLASLONG i, j; - FLOAT *aoffset; - FLOAT *aoffset1, *aoffset2, *aoffset3, *aoffset4; - FLOAT *aoffset5, *aoffset6, *aoffset7, *aoffset8; - - FLOAT *boffset; - FLOAT ctemp01, ctemp02, ctemp03, ctemp04; - FLOAT ctemp05, ctemp06, ctemp07, ctemp08; - FLOAT ctemp09, ctemp10, ctemp11, ctemp12; - FLOAT ctemp13, ctemp14, ctemp15, ctemp16; - FLOAT ctemp17, ctemp18, ctemp19, ctemp20; - FLOAT ctemp21, ctemp22, ctemp23, ctemp24; - FLOAT ctemp25, ctemp26, ctemp27, ctemp28; - FLOAT ctemp29, ctemp30, ctemp31, ctemp32; - FLOAT ctemp33, ctemp34, ctemp35, ctemp36; - FLOAT ctemp37, ctemp38, ctemp39, ctemp40; - FLOAT ctemp41, ctemp42, ctemp43, ctemp44; - FLOAT ctemp45, ctemp46, ctemp47, ctemp48; - FLOAT ctemp49, ctemp50, ctemp51, ctemp52; - FLOAT ctemp53, ctemp54, ctemp55, ctemp56; - FLOAT ctemp57, ctemp58, ctemp59, ctemp60; - FLOAT ctemp61, ctemp62, ctemp63, ctemp64; + IFLOAT *aoffset; + IFLOAT *aoffset1, *aoffset2, *aoffset3, *aoffset4; + IFLOAT *aoffset5, *aoffset6, *aoffset7, *aoffset8; + + IFLOAT *boffset; + IFLOAT ctemp01, ctemp02, ctemp03, ctemp04; + IFLOAT ctemp05, ctemp06, ctemp07, ctemp08; + IFLOAT ctemp09, ctemp10, ctemp11, ctemp12; + IFLOAT ctemp13, ctemp14, ctemp15, ctemp16; + IFLOAT ctemp17, ctemp18, ctemp19, ctemp20; + IFLOAT ctemp21, ctemp22, ctemp23, ctemp24; + IFLOAT ctemp25, ctemp26, ctemp27, ctemp28; + IFLOAT ctemp29, ctemp30, ctemp31, ctemp32; + IFLOAT ctemp33, ctemp34, ctemp35, ctemp36; + IFLOAT ctemp37, ctemp38, ctemp39, ctemp40; + IFLOAT ctemp41, ctemp42, ctemp43, ctemp44; + IFLOAT ctemp45, ctemp46, ctemp47, ctemp48; + IFLOAT ctemp49, ctemp50, ctemp51, ctemp52; + IFLOAT ctemp53, ctemp54, ctemp55, ctemp56; + IFLOAT ctemp57, ctemp58, ctemp59, ctemp60; + IFLOAT ctemp61, ctemp62, ctemp63, ctemp64; aoffset = a; diff --git a/kernel/generic/gemm_tcopy_16.c b/kernel/generic/gemm_tcopy_16.c index 56268ebf2..14252599a 100644 --- a/kernel/generic/gemm_tcopy_16.c +++ b/kernel/generic/gemm_tcopy_16.c @@ -39,22 +39,22 @@ #include #include "common.h" -int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, FLOAT *b){ +int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b){ BLASLONG i, j; - FLOAT *aoffset; - FLOAT *aoffset1, *aoffset2; - FLOAT *boffset; - - FLOAT ctemp01, ctemp02, ctemp03, ctemp04; - FLOAT ctemp05, ctemp06, ctemp07, ctemp08; - FLOAT ctemp09, ctemp10, ctemp11, ctemp12; - FLOAT ctemp13, ctemp14, ctemp15, ctemp16; - FLOAT ctemp17, ctemp18, ctemp19, ctemp20; - FLOAT ctemp21, ctemp22, ctemp23, ctemp24; - FLOAT ctemp25, ctemp26, ctemp27, ctemp28; - FLOAT ctemp29, ctemp30, ctemp31, ctemp32; + IFLOAT *aoffset; + IFLOAT *aoffset1, *aoffset2; + IFLOAT *boffset; + + IFLOAT ctemp01, ctemp02, ctemp03, ctemp04; + IFLOAT ctemp05, ctemp06, ctemp07, ctemp08; + IFLOAT ctemp09, ctemp10, ctemp11, ctemp12; + IFLOAT ctemp13, ctemp14, ctemp15, ctemp16; + IFLOAT ctemp17, ctemp18, ctemp19, ctemp20; + IFLOAT ctemp21, ctemp22, ctemp23, ctemp24; + IFLOAT ctemp25, ctemp26, ctemp27, ctemp28; + IFLOAT ctemp29, ctemp30, ctemp31, ctemp32; aoffset = a; boffset = b; diff --git a/kernel/generic/gemm_tcopy_2.c b/kernel/generic/gemm_tcopy_2.c index 5695b13c2..b4aa4de57 100644 --- a/kernel/generic/gemm_tcopy_2.c +++ b/kernel/generic/gemm_tcopy_2.c @@ -39,11 +39,11 @@ #include #include "common.h" -int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, FLOAT *b){ +int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b){ BLASLONG i, j; - FLOAT *a_offset, *a_offset1, *a_offset2; - FLOAT *b_offset, *b_offset1, *b_offset2; + IFLOAT *a_offset, *a_offset1, *a_offset2; + IFLOAT *b_offset, *b_offset1, *b_offset2; a_offset = a; b_offset = b; diff --git a/kernel/generic/gemm_tcopy_8.c b/kernel/generic/gemm_tcopy_8.c index b28f3d219..3e8a839db 100644 --- a/kernel/generic/gemm_tcopy_8.c +++ b/kernel/generic/gemm_tcopy_8.c @@ -39,32 +39,32 @@ #include #include "common.h" -int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, FLOAT *b){ +int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b){ BLASLONG i, j; - FLOAT *aoffset; - FLOAT *aoffset1, *aoffset2, *aoffset3, *aoffset4; - FLOAT *aoffset5, *aoffset6, *aoffset7, *aoffset8; - - FLOAT *boffset, *boffset1, *boffset2, *boffset3, *boffset4; - - FLOAT ctemp01, ctemp02, ctemp03, ctemp04; - FLOAT ctemp05, ctemp06, ctemp07, ctemp08; - FLOAT ctemp09, ctemp10, ctemp11, ctemp12; - FLOAT ctemp13, ctemp14, ctemp15, ctemp16; - FLOAT ctemp17, ctemp18, ctemp19, ctemp20; - FLOAT ctemp21, ctemp22, ctemp23, ctemp24; - FLOAT ctemp25, ctemp26, ctemp27, ctemp28; - FLOAT ctemp29, ctemp30, ctemp31, ctemp32; - FLOAT ctemp33, ctemp34, ctemp35, ctemp36; - FLOAT ctemp37, ctemp38, ctemp39, ctemp40; - FLOAT ctemp41, ctemp42, ctemp43, ctemp44; - FLOAT ctemp45, ctemp46, ctemp47, ctemp48; - FLOAT ctemp49, ctemp50, ctemp51, ctemp52; - FLOAT ctemp53, ctemp54, ctemp55, ctemp56; - FLOAT ctemp57, ctemp58, ctemp59, ctemp60; - FLOAT ctemp61, ctemp62, ctemp63, ctemp64; + IFLOAT *aoffset; + IFLOAT *aoffset1, *aoffset2, *aoffset3, *aoffset4; + IFLOAT *aoffset5, *aoffset6, *aoffset7, *aoffset8; + + IFLOAT *boffset, *boffset1, *boffset2, *boffset3, *boffset4; + + IFLOAT ctemp01, ctemp02, ctemp03, ctemp04; + IFLOAT ctemp05, ctemp06, ctemp07, ctemp08; + IFLOAT ctemp09, ctemp10, ctemp11, ctemp12; + IFLOAT ctemp13, ctemp14, ctemp15, ctemp16; + IFLOAT ctemp17, ctemp18, ctemp19, ctemp20; + IFLOAT ctemp21, ctemp22, ctemp23, ctemp24; + IFLOAT ctemp25, ctemp26, ctemp27, ctemp28; + IFLOAT ctemp29, ctemp30, ctemp31, ctemp32; + IFLOAT ctemp33, ctemp34, ctemp35, ctemp36; + IFLOAT ctemp37, ctemp38, ctemp39, ctemp40; + IFLOAT ctemp41, ctemp42, ctemp43, ctemp44; + IFLOAT ctemp45, ctemp46, ctemp47, ctemp48; + IFLOAT ctemp49, ctemp50, ctemp51, ctemp52; + IFLOAT ctemp53, ctemp54, ctemp55, ctemp56; + IFLOAT ctemp57, ctemp58, ctemp59, ctemp60; + IFLOAT ctemp61, ctemp62, ctemp63, ctemp64; aoffset = a; boffset = b; diff --git a/kernel/generic/gemmkernel_2x2.c b/kernel/generic/gemmkernel_2x2.c index 01f1c67b5..bf1c3ae38 100644 --- a/kernel/generic/gemmkernel_2x2.c +++ b/kernel/generic/gemmkernel_2x2.c @@ -1,13 +1,32 @@ #include "common.h" -int CNAME(BLASLONG bm,BLASLONG bn,BLASLONG bk,FLOAT alpha,FLOAT* ba,FLOAT* bb,FLOAT* C,BLASLONG ldc +#if defined(BFLOAT16) && defined(BFLOAT16CONVERSION) +static float +bfloat16tof32 (bfloat16 f16) +{ + float result = 0; + unsigned short* q = (unsigned short*)(&result); +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + q[0] = f16; +#else + q[1] = f16; +#endif + return result; +} +#define BF16TOF32(x) (bfloat16tof32(x)) +#else +#define BF16TOF32(x) x +#endif +int CNAME(BLASLONG bm,BLASLONG bn,BLASLONG bk,FLOAT alpha,IFLOAT* ba,IFLOAT* bb,FLOAT* C,BLASLONG ldc #ifdef TRMMKERNEL ,BLASLONG offset #endif ) { BLASLONG i,j,k; - FLOAT *C0,*C1,*ptrba,*ptrbb; - FLOAT res0,res1,res2,res3,load0,load1,load2,load3,load4,load5,load6,load7; + FLOAT *C0,*C1; + IFLOAT *ptrba,*ptrbb; + FLOAT res0,res1,res2,res3; + IFLOAT load0,load1,load2,load3,load4,load5,load6,load7; for (j=0; j> 1); j--;) diff --git a/kernel/mips/cscal_msa.c b/kernel/mips/cscal_msa.c index 11a1450cf..451d0c921 100644 --- a/kernel/mips/cscal_msa.c +++ b/kernel/mips/cscal_msa.c @@ -49,11 +49,7 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da_r, FLOAT da_i, { if ((0.0 == da_r) && (0.0 == da_i)) { - v4f32 zero_v = __msa_cast_to_vector_float(0); - zero_v = (v4f32) __msa_insert_w((v4i32) zero_v, 0, 0.0); - zero_v = (v4f32) __msa_insert_w((v4i32) zero_v, 1, 0.0); - zero_v = (v4f32) __msa_insert_w((v4i32) zero_v, 2, 0.0); - zero_v = (v4f32) __msa_insert_w((v4i32) zero_v, 3, 0.0); + v4f32 zero_v = {0.0, 0.0, 0.0, 0.0}; for (i = (n >> 5); i--;) { diff --git a/kernel/mips/dscal_msa.c b/kernel/mips/dscal_msa.c index 6ce0375ab..2e41d8bef 100644 --- a/kernel/mips/dscal_msa.c +++ b/kernel/mips/dscal_msa.c @@ -44,9 +44,7 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da, FLOAT *x, { if (0.0 == da) { - v2f64 zero_v = __msa_cast_to_vector_double(0); - zero_v = (v2f64) __msa_insert_d((v2i64) zero_v, 0, 0.0); - zero_v = (v2f64) __msa_insert_d((v2i64) zero_v, 1, 0.0); + v2f64 zero_v = {0.0, 0.0}; for (i = (n >> 5); i--;) { diff --git a/kernel/mips/dswap_msa.c b/kernel/mips/dswap_msa.c index 7b1f02477..67e97f710 100644 --- a/kernel/mips/dswap_msa.c +++ b/kernel/mips/dswap_msa.c @@ -184,7 +184,7 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT dummy3, } } } - else + else if ((inc_x != 0) && (inc_y != 0)) { for (i = (n >> 3); i--;) { @@ -248,6 +248,32 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT dummy3, } } } - + else + { + if (inc_x == inc_y) + { + if (n & 1) + { + x0 = *srcx; + *srcx = *srcy; + *srcy = x0; + } + else + return (0); + } + else + { + BLASLONG ix = 0, iy = 0; + while (i < n) + { + x0 = srcx[ix]; + srcx[ix] = srcy[iy]; + srcy[iy] = x0; + ix += inc_x; + iy += inc_y; + i++; + } + } + } return (0); } diff --git a/kernel/mips/dtrsm_kernel_LN_8x4_msa.c b/kernel/mips/dtrsm_kernel_LN_8x4_msa.c index 9fb5141ca..e2cd3aa4b 100644 --- a/kernel/mips/dtrsm_kernel_LN_8x4_msa.c +++ b/kernel/mips/dtrsm_kernel_LN_8x4_msa.c @@ -186,8 +186,7 @@ void dsolve_8x4_ln_msa(FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, BLASLONG bk) ILVRL_D2_DP(src_c14, src_c10, res_c12, res_c13); ILVRL_D2_DP(src_c15, src_c11, res_c14, res_c15); - src_a54 = __msa_cast_to_vector_double(*(a + 54)); - src_a54 = (v2f64) __msa_splati_d((v2i64) src_a54, 0); + src_a54 = COPY_DOUBLE_TO_VECTOR(*(a + 54)); src_a62 = LD_DP(a + 62); src_a63 = (v2f64) __msa_splati_d((v2i64) src_a62, 1); src_a62 = (v2f64) __msa_splati_d((v2i64) src_a62, 0); @@ -200,8 +199,7 @@ void dsolve_8x4_ln_msa(FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, BLASLONG bk) src_a44 = LD_DP(a + 44); src_a45 = (v2f64) __msa_splati_d((v2i64) src_a44, 1); src_a44 = (v2f64) __msa_splati_d((v2i64) src_a44, 0); - src_a36 = __msa_cast_to_vector_double(*(a + 36)); - src_a36 = (v2f64) __msa_splati_d((v2i64) src_a36, 0); + src_a36 = COPY_DOUBLE_TO_VECTOR(*(a + 36)); res_c7 *= src_a63; res_c6 -= res_c7 * src_a62; @@ -271,8 +269,7 @@ void dsolve_8x4_ln_msa(FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, BLASLONG bk) src_a26 = LD_DP(a + 26); src_a27 = (v2f64) __msa_splati_d((v2i64) src_a26, 1); src_a26 = (v2f64) __msa_splati_d((v2i64) src_a26, 0); - src_a18 = __msa_cast_to_vector_double(*(a + 18)); - src_a18 = (v2f64) __msa_splati_d((v2i64) src_a18, 0); + src_a18 = COPY_DOUBLE_TO_VECTOR(*(a + 18)); res_c3 -= res_c7 * src_a59; res_c2 -= res_c7 * src_a58; @@ -358,8 +355,7 @@ void dsolve_8x4_ln_msa(FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, BLASLONG bk) src_a8 = LD_DP(a + 8); src_a9 = (v2f64) __msa_splati_d((v2i64) src_a8, 1); src_a8 = (v2f64) __msa_splati_d((v2i64) src_a8, 0); - src_a0 = __msa_cast_to_vector_double(*(a + 0)); - src_a0 = (v2f64) __msa_splati_d((v2i64) src_a0, 0); + src_a0 = COPY_DOUBLE_TO_VECTOR(*(a + 0)); res_c1 -= res_c2 * src_a17; res_c1 *= src_a9; @@ -488,8 +484,7 @@ static void dsolve_8x2_ln_msa(FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, BLASLO src_a52 = LD_DP(a - 12); src_a53 = (v2f64) __msa_splati_d((v2i64) src_a52, 1); src_a52 = (v2f64) __msa_splati_d((v2i64) src_a52, 0); - src_a54 = __msa_cast_to_vector_double(*(a - 10)); - src_a54 = (v2f64) __msa_splati_d((v2i64) src_a54, 0); + src_a54 = COPY_DOUBLE_TO_VECTOR(*(a -10)); src_a40 = LD_DP(a - 24); src_a41 = (v2f64) __msa_splati_d((v2i64) src_a40, 1); @@ -526,8 +521,7 @@ static void dsolve_8x2_ln_msa(FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, BLASLO src_a34 = LD_DP(a - 30); src_a35 = (v2f64) __msa_splati_d((v2i64) src_a34, 1); src_a34 = (v2f64) __msa_splati_d((v2i64) src_a34, 0); - src_a36 = __msa_cast_to_vector_double(*(a - 28)); - src_a36 = (v2f64) __msa_splati_d((v2i64) src_a36, 0); + src_a36 = COPY_DOUBLE_TO_VECTOR(*(a -28)); res_c4 *= src_a36; res_c3 -= res_c4 * src_a35; @@ -544,10 +538,8 @@ static void dsolve_8x2_ln_msa(FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, BLASLO src_a16 = LD_DP(a - 48); src_a17 = (v2f64) __msa_splati_d((v2i64) src_a16, 1); src_a16 = (v2f64) __msa_splati_d((v2i64) src_a16, 0); - src_a18 = __msa_cast_to_vector_double(*(a - 46)); - src_a18 = (v2f64) __msa_splati_d((v2i64) src_a18, 0); - src_a0 = __msa_cast_to_vector_double(*(a - 64)); - src_a0 = (v2f64) __msa_splati_d((v2i64) src_a0, 0); + src_a18 = COPY_DOUBLE_TO_VECTOR(*(a - 46)); + src_a0 = COPY_DOUBLE_TO_VECTOR(*(a - 64)); src_a8 = LD_DP(a - 56); src_a9 = (v2f64) __msa_splati_d((v2i64) src_a8, 1); src_a8 = (v2f64) __msa_splati_d((v2i64) src_a8, 0); @@ -785,11 +777,8 @@ static void dsolve_4x4_ln_msa(FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, BLASLO src_a10 = (v2f64) __msa_splati_d((v2i64) src_a9, 1); src_a9 = (v2f64) __msa_splati_d((v2i64) src_a9, 0); - src_a8 = __msa_cast_to_vector_double(*(a + 8)); - src_a0 = __msa_cast_to_vector_double(*(a + 0)); - - src_a8 = (v2f64) __msa_splati_d((v2i64) src_a8, 0); - src_a0 = (v2f64) __msa_splati_d((v2i64) src_a0, 0); + src_a8 = COPY_DOUBLE_TO_VECTOR(*(a + 8)); + src_a0 = COPY_DOUBLE_TO_VECTOR(*(a + 0)); src_a4 = LD_DP(a + 4); src_a5 = (v2f64) __msa_splati_d((v2i64) src_a4, 1); @@ -890,11 +879,8 @@ static void dsolve_4x2_ln_msa(FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, BLASLO src_a10 = (v2f64) __msa_splati_d((v2i64) src_a9, 1); src_a9 = (v2f64) __msa_splati_d((v2i64) src_a9, 0); - src_a8 = __msa_cast_to_vector_double(*(a + 8)); - src_a0 = __msa_cast_to_vector_double(*(a + 0)); - - src_a8 = (v2f64) __msa_splati_d((v2i64) src_a8, 0); - src_a0 = (v2f64) __msa_splati_d((v2i64) src_a0, 0); + src_a8 = COPY_DOUBLE_TO_VECTOR(*(a + 8)); + src_a0 = COPY_DOUBLE_TO_VECTOR(*(a + 0)); src_a4 = LD_DP(a + 4); src_a5 = (v2f64) __msa_splati_d((v2i64) src_a4, 1); diff --git a/kernel/mips/dtrsm_kernel_LT_8x4_msa.c b/kernel/mips/dtrsm_kernel_LT_8x4_msa.c index 525fc8585..74cc1278a 100644 --- a/kernel/mips/dtrsm_kernel_LT_8x4_msa.c +++ b/kernel/mips/dtrsm_kernel_LT_8x4_msa.c @@ -215,8 +215,7 @@ void dsolve_8x4_lt_msa(FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, BLASLONG bk) res_c14 -= res_c8 * src_a6; res_c15 -= res_c8 * src_a7; - src_a9 = __msa_cast_to_vector_double(*(a + 9)); - src_a9 = (v2f64) __msa_splati_d((v2i64) src_a9, 0); + src_a9 = COPY_DOUBLE_TO_VECTOR(*(a + 9)); src_a10 = LD_DP(a + 10); src_a11 = (v2f64) __msa_splati_d((v2i64) src_a10, 1); src_a10 = (v2f64) __msa_splati_d((v2i64) src_a10, 0); @@ -280,8 +279,7 @@ void dsolve_8x4_lt_msa(FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, BLASLONG bk) res_c14 -= res_c10 * src_a22; res_c15 -= res_c10 * src_a23; - src_a27 = __msa_cast_to_vector_double(*(a + 27)); - src_a27 = (v2f64) __msa_splati_d((v2i64) src_a27, 0); + src_a27 = COPY_DOUBLE_TO_VECTOR(*(a + 27)); src_a28 = LD_DP(a + 28); src_a29 = (v2f64) __msa_splati_d((v2i64) src_a28, 1); src_a28 = (v2f64) __msa_splati_d((v2i64) src_a28, 0); @@ -326,8 +324,7 @@ void dsolve_8x4_lt_msa(FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, BLASLONG bk) res_c14 -= res_c12 * src_a38; res_c15 -= res_c12 * src_a39; - src_a45 = __msa_cast_to_vector_double(*(a + 45)); - src_a45 = (v2f64) __msa_splati_d((v2i64) src_a45, 0); + src_a45 = COPY_DOUBLE_TO_VECTOR(*(a + 45)); src_a46 = LD_DP(a + 46); src_a47 = (v2f64) __msa_splati_d((v2i64) src_a46, 1); src_a46 = (v2f64) __msa_splati_d((v2i64) src_a46, 0); @@ -353,8 +350,7 @@ void dsolve_8x4_lt_msa(FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, BLASLONG bk) ILVRL_D2_DP(res_c5, res_c4, src_c2, src_c6); ILVRL_D2_DP(res_c13, res_c12, src_c10, src_c14); - src_a63 = __msa_cast_to_vector_double(*(a + 63)); - src_a63 = (v2f64) __msa_splati_d((v2i64) src_a63, 0); + src_a63 = COPY_DOUBLE_TO_VECTOR(*(a + 63)); src_a54 = LD_DP(a + 54); src_a55 = (v2f64) __msa_splati_d((v2i64) src_a54, 1); src_a54 = (v2f64) __msa_splati_d((v2i64) src_a54, 0); @@ -478,8 +474,7 @@ static void dsolve_8x2_lt_msa(FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, BLASLO res_c6 -= res_c0 * src_a6; res_c7 -= res_c0 * src_a7; - src_a9 = __msa_cast_to_vector_double(*(a + 9)); - src_a9 = (v2f64) __msa_splati_d((v2i64) src_a9, 0); + src_a9 = COPY_DOUBLE_TO_VECTOR(*(a + 9)); src_a10 = LD_DP(a + 10); src_a11 = (v2f64) __msa_splati_d((v2i64) src_a10, 1); src_a10 = (v2f64) __msa_splati_d((v2i64) src_a10, 0); @@ -515,8 +510,7 @@ static void dsolve_8x2_lt_msa(FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, BLASLO res_c6 -= res_c2 * src_a22; res_c7 -= res_c2 * src_a23; - src_a27 = __msa_cast_to_vector_double(*(a + 27)); - src_a27 = (v2f64) __msa_splati_d((v2i64) src_a27, 0); + src_a27 = COPY_DOUBLE_TO_VECTOR(*(a + 27)); src_a28 = LD_DP(a + 28); src_a29 = (v2f64) __msa_splati_d((v2i64) src_a28, 1); src_a28 = (v2f64) __msa_splati_d((v2i64) src_a28, 0); @@ -553,8 +547,7 @@ static void dsolve_8x2_lt_msa(FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, BLASLO res_c6 -= res_c4 * src_a38; res_c7 -= res_c4 * src_a39; - src_a45 = __msa_cast_to_vector_double(*(a + 45)); - src_a45 = (v2f64) __msa_splati_d((v2i64) src_a45, 0); + src_a45 = COPY_DOUBLE_TO_VECTOR(*(a + 45)); src_a46 = LD_DP(a + 46); src_a47 = (v2f64) __msa_splati_d((v2i64) src_a46, 1); src_a46 = (v2f64) __msa_splati_d((v2i64) src_a46, 0); @@ -563,8 +556,7 @@ static void dsolve_8x2_lt_msa(FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, BLASLO res_c6 -= res_c5 * src_a46; res_c7 -= res_c5 * src_a47; - src_a63 = __msa_cast_to_vector_double(*(a + 63)); - src_a63 = (v2f64) __msa_splati_d((v2i64) src_a63, 0); + src_a63 = COPY_DOUBLE_TO_VECTOR(*(a + 63)); src_a54 = LD_DP(a + 54); src_a55 = (v2f64) __msa_splati_d((v2i64) src_a54, 1); src_a54 = (v2f64) __msa_splati_d((v2i64) src_a54, 0); @@ -786,8 +778,7 @@ static void dsolve_4x4_lt_msa(FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, BLASLO res_c6 -= res_c4 * src_a2; res_c7 -= res_c4 * src_a3; - src_a5 = __msa_cast_to_vector_double(*(a + 5)); - src_a5 = (v2f64) __msa_splati_d((v2i64) src_a5, 0); + src_a5 = COPY_DOUBLE_TO_VECTOR(*(a + 5)); src_a6 = LD_DP(a + 6); src_a7 = (v2f64) __msa_splati_d((v2i64) src_a6, 1); src_a6 = (v2f64) __msa_splati_d((v2i64) src_a6, 0); @@ -803,8 +794,7 @@ static void dsolve_4x4_lt_msa(FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, BLASLO src_a10 = LD_DP(a + 10); src_a11 = (v2f64) __msa_splati_d((v2i64) src_a10, 1); src_a10 = (v2f64) __msa_splati_d((v2i64) src_a10, 0); - src_a15 = __msa_cast_to_vector_double(*(a + 15)); - src_a15 = (v2f64) __msa_splati_d((v2i64) src_a15, 0); + src_a15 = COPY_DOUBLE_TO_VECTOR(*(a + 15)); res_c2 *= src_a10; res_c3 -= res_c2 * src_a11; @@ -881,8 +871,7 @@ static void dsolve_4x2_lt_msa(FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, BLASLO res_c2 -= res_c0 * src_a2; res_c3 -= res_c0 * src_a3; - src_a5 = __msa_cast_to_vector_double(*(a + 5)); - src_a5 = (v2f64) __msa_splati_d((v2i64) src_a5, 0); + src_a5 = COPY_DOUBLE_TO_VECTOR(*(a + 5)); src_a6 = LD_DP(a + 6); src_a7 = (v2f64) __msa_splati_d((v2i64) src_a6, 1); src_a6 = (v2f64) __msa_splati_d((v2i64) src_a6, 0); @@ -894,8 +883,7 @@ static void dsolve_4x2_lt_msa(FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, BLASLO src_a10 = LD_DP(a + 10); src_a11 = (v2f64) __msa_splati_d((v2i64) src_a10, 1); src_a10 = (v2f64) __msa_splati_d((v2i64) src_a10, 0); - src_a15 = __msa_cast_to_vector_double(*(a + 15)); - src_a15 = (v2f64) __msa_splati_d((v2i64) src_a15, 0); + src_a15 = COPY_DOUBLE_TO_VECTOR(*(a + 15)); res_c2 *= src_a10; res_c3 -= res_c2 * src_a11; diff --git a/kernel/mips/dtrsm_kernel_RN_8x4_msa.c b/kernel/mips/dtrsm_kernel_RN_8x4_msa.c index cb361c511..03036f1c7 100644 --- a/kernel/mips/dtrsm_kernel_RN_8x4_msa.c +++ b/kernel/mips/dtrsm_kernel_RN_8x4_msa.c @@ -161,16 +161,14 @@ void dsolve_8x4_rn_msa(FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, BLASLONG bk) src_b2 = LD_DP(b + 2); src_b3 = (v2f64) __msa_splati_d((v2i64) src_b2, 1); src_b2 = (v2f64) __msa_splati_d((v2i64) src_b2, 0); - src_b5 = __msa_cast_to_vector_double(*(b + 5)); - src_b5 = (v2f64) __msa_splati_d((v2i64) src_b5, 0); + src_b5 = COPY_DOUBLE_TO_VECTOR(*(b + 5)); src_b6 = LD_DP(b + 6); src_b7 = (v2f64) __msa_splati_d((v2i64) src_b6, 1); src_b6 = (v2f64) __msa_splati_d((v2i64) src_b6, 0); src_b10 = LD_DP(b + 10); src_b11 = (v2f64) __msa_splati_d((v2i64) src_b10, 1); src_b10 = (v2f64) __msa_splati_d((v2i64) src_b10, 0); - src_b15 = __msa_cast_to_vector_double(*(b + 15)); - src_b15 = (v2f64) __msa_splati_d((v2i64) src_b15, 0); + src_b15 = COPY_DOUBLE_TO_VECTOR(*(b + 15)); src_c0 *= src_b0; src_c1 *= src_b0; @@ -294,8 +292,7 @@ static void dsolve_8x2_rn_msa(FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, BLASLO src_b0 = LD_DP(b + 0); src_b1 = (v2f64) __msa_splati_d((v2i64) src_b0, 1); src_b0 = (v2f64) __msa_splati_d((v2i64) src_b0, 0); - src_b3 = __msa_cast_to_vector_double(*(b + 3)); - src_b3 = (v2f64) __msa_splati_d((v2i64) src_b3, 0); + src_b3 = COPY_DOUBLE_TO_VECTOR(*(b + 3)); src_c0 *= src_b0; src_c1 *= src_b0; @@ -347,8 +344,7 @@ static void dsolve_8x1_rn_msa(FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG bk) } } - src_b0 = __msa_cast_to_vector_double(*b); - src_b0 = (v2f64) __msa_splati_d((v2i64) src_b0, 0); + src_b0 = COPY_DOUBLE_TO_VECTOR(*b); src_c0 *= src_b0; src_c1 *= src_b0; @@ -407,16 +403,14 @@ static void dsolve_4x4_rn_msa(FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, BLASLO src_b2 = LD_DP(b + 2); src_b3 = (v2f64) __msa_splati_d((v2i64) src_b2, 1); src_b2 = (v2f64) __msa_splati_d((v2i64) src_b2, 0); - src_b5 = __msa_cast_to_vector_double(*(b + 5)); - src_b5 = (v2f64) __msa_splati_d((v2i64) src_b5, 0); + src_b5 = COPY_DOUBLE_TO_VECTOR(*(b + 5)); src_b6 = LD_DP(b + 6); src_b7 = (v2f64) __msa_splati_d((v2i64) src_b6, 1); src_b6 = (v2f64) __msa_splati_d((v2i64) src_b6, 0); src_b10 = LD_DP(b + 10); src_b11 = (v2f64) __msa_splati_d((v2i64) src_b10, 1); src_b10 = (v2f64) __msa_splati_d((v2i64) src_b10, 0); - src_b15 = __msa_cast_to_vector_double(*(b + 15)); - src_b15 = (v2f64) __msa_splati_d((v2i64) src_b15, 0); + src_b15 = COPY_DOUBLE_TO_VECTOR(*(b + 15)); src_c0 *= src_b0; src_c1 *= src_b0; @@ -490,8 +484,7 @@ static void dsolve_4x2_rn_msa(FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, BLASLO src_b0 = LD_DP(b + 0); src_b1 = (v2f64) __msa_splati_d((v2i64) src_b0, 1); src_b0 = (v2f64) __msa_splati_d((v2i64) src_b0, 0); - src_b3 = __msa_cast_to_vector_double(*(b + 3)); - src_b3 = (v2f64) __msa_splati_d((v2i64) src_b3, 0); + src_b3 = COPY_DOUBLE_TO_VECTOR(*(b + 3)); src_c0 *= src_b0; src_c1 *= src_b0; diff --git a/kernel/mips/dtrsm_kernel_RT_8x4_msa.c b/kernel/mips/dtrsm_kernel_RT_8x4_msa.c index 581a90f71..4c55a0f37 100644 --- a/kernel/mips/dtrsm_kernel_RT_8x4_msa.c +++ b/kernel/mips/dtrsm_kernel_RT_8x4_msa.c @@ -168,11 +168,9 @@ void dsolve_8x4_rt_msa(FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, BLASLONG bk) src_b8 = LD_DP(b + 8); src_b9 = (v2f64) __msa_splati_d((v2i64) src_b8, 1); src_b8 = (v2f64) __msa_splati_d((v2i64) src_b8, 0); - src_b10 = __msa_cast_to_vector_double(*(b + 10)); - src_b10 = (v2f64) __msa_splati_d((v2i64) src_b10, 0); + src_b10 = COPY_DOUBLE_TO_VECTOR(*(b + 10)); - src_b0 = __msa_cast_to_vector_double(*(b + 0)); - src_b0 = (v2f64) __msa_splati_d((v2i64) src_b0, 0); + src_b0 = COPY_DOUBLE_TO_VECTOR(*(b + 0)); src_b4 = LD_DP(b + 4); src_b5 = (v2f64) __msa_splati_d((v2i64) src_b4, 1); src_b4 = (v2f64) __msa_splati_d((v2i64) src_b4, 0); @@ -298,8 +296,7 @@ static void dsolve_8x2_rt_msa(FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, BLASLO a -= 16; b -= 4; - src_b0 = __msa_cast_to_vector_double(*(b + 0)); - src_b0 = (v2f64) __msa_splati_d((v2i64) src_b0, 0); + src_b0 = COPY_DOUBLE_TO_VECTOR(*(b + 0)); src_b2 = LD_DP(b + 2); src_b3 = (v2f64) __msa_splati_d((v2i64) src_b2, 1); src_b2 = (v2f64) __msa_splati_d((v2i64) src_b2, 0); @@ -377,8 +374,7 @@ static void dsolve_8x1_rt_msa(FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG bk) a -= 8; b -= 1; - src_b0 = __msa_cast_to_vector_double(*b); - src_b0 = (v2f64) __msa_splati_d((v2i64) src_b0, 0); + src_b0 = COPY_DOUBLE_TO_VECTOR(*b); src_c0 *= src_b0; src_c1 *= src_b0; @@ -445,11 +441,9 @@ static void dsolve_4x4_rt_msa(FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, BLASLO src_b8 = LD_DP(b + 8); src_b9 = (v2f64) __msa_splati_d((v2i64) src_b8, 1); src_b8 = (v2f64) __msa_splati_d((v2i64) src_b8, 0); - src_b10 = __msa_cast_to_vector_double(*(b + 10)); - src_b10 = (v2f64) __msa_splati_d((v2i64) src_b10, 0); + src_b10 = COPY_DOUBLE_TO_VECTOR(*(b + 10)); - src_b0 = __msa_cast_to_vector_double(*(b + 0)); - src_b0 = (v2f64) __msa_splati_d((v2i64) src_b0, 0); + src_b0 = COPY_DOUBLE_TO_VECTOR(*(b + 0)); src_b4 = LD_DP(b + 4); src_b5 = (v2f64) __msa_splati_d((v2i64) src_b4, 1); src_b4 = (v2f64) __msa_splati_d((v2i64) src_b4, 0); @@ -527,8 +521,7 @@ static void dsolve_4x2_rt_msa(FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, BLASLO a -= 8; b -= 4; - src_b0 = __msa_cast_to_vector_double(*(b + 0)); - src_b0 = (v2f64) __msa_splati_d((v2i64) src_b0, 0); + src_b0 = COPY_DOUBLE_TO_VECTOR(*(b + 0)); src_b2 = LD_DP(b + 2); src_b3 = (v2f64) __msa_splati_d((v2i64) src_b2, 1); src_b2 = (v2f64) __msa_splati_d((v2i64) src_b2, 0); diff --git a/kernel/mips/macros_msa.h b/kernel/mips/macros_msa.h index ee0dea0b7..b887800ed 100644 --- a/kernel/mips/macros_msa.h +++ b/kernel/mips/macros_msa.h @@ -63,16 +63,12 @@ inline static void prefetch_load_lf(unsigned char *src) #define ST_DP(...) ST_D(v2f64, __VA_ARGS__) #define COPY_FLOAT_TO_VECTOR(a) ( { \ - v4f32 out; \ - out = __msa_cast_to_vector_float(a); \ - out = (v4f32) __msa_splati_w((v4i32) out, 0); \ + v4f32 out = {a, a, a, a}; \ out; \ } ) #define COPY_DOUBLE_TO_VECTOR(a) ( { \ - v2f64 out; \ - out = __msa_cast_to_vector_double(a); \ - out = (v2f64) __msa_splati_d((v2i64) out, 0); \ + v2f64 out = {a, a}; \ out; \ } ) diff --git a/kernel/mips/srot_msa.c b/kernel/mips/srot_msa.c index 75730241a..79d921b7a 100644 --- a/kernel/mips/srot_msa.c +++ b/kernel/mips/srot_msa.c @@ -48,11 +48,7 @@ int CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, { if ((0 == c) && (0 == s)) { - v4f32 zero = __msa_cast_to_vector_float(0); - zero = (v4f32) __msa_insert_w((v4i32) zero, 0, 0.0); - zero = (v4f32) __msa_insert_w((v4i32) zero, 1, 0.0); - zero = (v4f32) __msa_insert_w((v4i32) zero, 2, 0.0); - zero = (v4f32) __msa_insert_w((v4i32) zero, 3, 0.0); + v4f32 zero = {0.0, 0.0, 0.0, 0.0}; /* process 4 floats */ for (j = (n >> 2); j--;) diff --git a/kernel/mips/sscal_msa.c b/kernel/mips/sscal_msa.c index 64b62d659..66e17b844 100644 --- a/kernel/mips/sscal_msa.c +++ b/kernel/mips/sscal_msa.c @@ -44,11 +44,7 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da, FLOAT *x, { if (0.0 == da) { - v4f32 zero_v = __msa_cast_to_vector_float(0); - zero_v = (v4f32) __msa_insert_w((v4i32) zero_v, 0, 0.0); - zero_v = (v4f32) __msa_insert_w((v4i32) zero_v, 1, 0.0); - zero_v = (v4f32) __msa_insert_w((v4i32) zero_v, 2, 0.0); - zero_v = (v4f32) __msa_insert_w((v4i32) zero_v, 3, 0.0); + v4f32 zero_v = {0.0, 0.0, 0.0, 0.0}; for (i = (n >> 6); i--;) { diff --git a/kernel/mips/sswap_msa.c b/kernel/mips/sswap_msa.c index 46fa8aa87..d412285b0 100644 --- a/kernel/mips/sswap_msa.c +++ b/kernel/mips/sswap_msa.c @@ -198,7 +198,7 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT dummy3, } } } - else + else if ((inc_x != 0) && (inc_y != 0)) { for (i = (n >> 3); i--;) { @@ -262,6 +262,33 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT dummy3, } } } + else + { + if (inc_x == inc_y) + { + if (n & 1) + { + x0 = *srcx; + *srcx = *srcy; + *srcy = x0; + } + else + return (0); + } + else + { + BLASLONG ix = 0, iy = 0; + while (i < n) + { + x0 = srcx[ix]; + srcx[ix] = srcy[iy]; + srcy[iy] = x0; + ix += inc_x; + iy += inc_y; + i++; + } + } + } return (0); } diff --git a/kernel/mips/zgemv_n_msa.c b/kernel/mips/zgemv_n_msa.c index 669c25758..97a80b4ba 100644 --- a/kernel/mips/zgemv_n_msa.c +++ b/kernel/mips/zgemv_n_msa.c @@ -56,11 +56,11 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #if !defined(XCONJ) #define OP0 += #define OP1 -= - #define OP2 -= + #define OP2 += #else #define OP0 -= #define OP1 -= - #define OP2 += + #define OP2 -= #endif #endif diff --git a/kernel/mips/zgemv_t_msa.c b/kernel/mips/zgemv_t_msa.c index e6febb577..6492f90be 100644 --- a/kernel/mips/zgemv_t_msa.c +++ b/kernel/mips/zgemv_t_msa.c @@ -34,14 +34,26 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #undef OP3 #undef OP4 -#if ( !defined(CONJ) && !defined(XCONJ) ) || ( defined(CONJ) && defined(XCONJ) ) - #define OP0 -= - #define OP1 += - #define OP2 += +#if !defined(CONJ) + #if !defined(XCONJ) + #define OP0 -= + #define OP1 += + #define OP2 += + #else + #define OP0 += + #define OP1 += + #define OP2 -= + #endif #else - #define OP0 += - #define OP1 += - #define OP2 -= + #if !defined(XCONJ) + #define OP0 += + #define OP1 -= + #define OP2 += + #else + #define OP0 -= + #define OP1 -= + #define OP2 -= + #endif #endif #define ZGEMV_T_8x1() \ diff --git a/kernel/mips/zscal_msa.c b/kernel/mips/zscal_msa.c index 5a8766d3c..a45c3cecd 100644 --- a/kernel/mips/zscal_msa.c +++ b/kernel/mips/zscal_msa.c @@ -49,9 +49,7 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da_r, FLOAT da_i, { if ((0.0 == da_r) && (0.0 == da_i)) { - v2f64 zero_v = __msa_cast_to_vector_double(0); - zero_v = (v2f64) __msa_insert_d((v2i64) zero_v, 0, 0.0); - zero_v = (v2f64) __msa_insert_d((v2i64) zero_v, 1, 0.0); + v2f64 zero_v = {0.0, 0.0}; for (i = (n >> 4); i--;) { @@ -475,9 +473,7 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da_r, FLOAT da_i, if ((0.0 == da_r) && (0.0 == da_i)) { - v2f64 zero_v = __msa_cast_to_vector_double(0); - zero_v = (v2f64) __msa_insert_d((v2i64) zero_v, 0, 0.0); - zero_v = (v2f64) __msa_insert_d((v2i64) zero_v, 1, 0.0); + v2f64 zero_v = {0.0, 0.0}; for (i = (n >> 4); i--;) { diff --git a/kernel/mips64/KERNEL b/kernel/mips64/KERNEL index 61da7445f..97ef3692c 100644 --- a/kernel/mips64/KERNEL +++ b/kernel/mips64/KERNEL @@ -167,3 +167,27 @@ endif CGEMM3MKERNEL = zgemm3m_kernel.S ZGEMM3MKERNEL = zgemm3m_kernel.S + +ifndef ISMINKERNEL +ISMINKERNEL = imin.S +endif + +ifndef IDMINKERNEL +IDMINKERNEL = imin.S +endif + +ifndef IQMINKERNEL +IQMINKERNEL = imin.S +endif + +ifndef ISMAXKERNEL +ISMAXKERNEL = imax.S +endif + +ifndef IDMAXKERNEL +IDMAXKERNEL = imax.S +endif + +ifndef IQMAXKERNEL +IQMAXKERNEL = imax.S +endif diff --git a/kernel/mips64/KERNEL.LOONGSON3B b/kernel/mips64/KERNEL.LOONGSON3B deleted file mode 100644 index e476c631e..000000000 --- a/kernel/mips64/KERNEL.LOONGSON3B +++ /dev/null @@ -1,64 +0,0 @@ -SAXPYKERNEL=axpy_loongson3a.S -DAXPYKERNEL=daxpy_loongson3a_simd.S - -SGEMVNKERNEL = gemv_n_loongson3a.c -SGEMVTKERNEL = gemv_t_loongson3a.c -DGEMVNKERNEL = gemv_n_loongson3a.c -DGEMVTKERNEL = gemv_t_loongson3a.c -CGEMVNKERNEL = zgemv_n_loongson3a.c -CGEMVTKERNEL = zgemv_t_loongson3a.c -ZGEMVNKERNEL = zgemv_n_loongson3a.c -ZGEMVTKERNEL = zgemv_t_loongson3a.c - -STRMMKERNEL = ../generic/trmmkernel_2x2.c -DTRMMKERNEL = ../generic/trmmkernel_2x2.c -CTRMMKERNEL = ../generic/ztrmmkernel_2x2.c -ZTRMMKERNEL = ../generic/ztrmmkernel_2x2.c - -SGEMMKERNEL = ../generic/gemmkernel_2x2.c -SGEMMONCOPY = ../generic/gemm_ncopy_2.c -SGEMMOTCOPY = ../generic/gemm_tcopy_2.c -SGEMMONCOPYOBJ = sgemm_oncopy.o -SGEMMOTCOPYOBJ = sgemm_otcopy.o - -DGEMMKERNEL = ../generic/gemmkernel_2x2.c -DGEMMONCOPY = ../generic/gemm_ncopy_2.c -DGEMMOTCOPY = ../generic/gemm_tcopy_2.c -DGEMMONCOPYOBJ = dgemm_oncopy.o -DGEMMOTCOPYOBJ = dgemm_otcopy.o - -CGEMMKERNEL = ../generic/zgemmkernel_2x2.c -CGEMMONCOPY = ../generic/zgemm_ncopy_2.c -CGEMMOTCOPY = ../generic/zgemm_tcopy_2.c -CGEMMONCOPYOBJ = cgemm_oncopy.o -CGEMMOTCOPYOBJ = cgemm_otcopy.o - -ZGEMMKERNEL = ../generic/zgemmkernel_2x2.c -ZGEMMONCOPY = ../generic/zgemm_ncopy_2.c -ZGEMMOTCOPY = ../generic/zgemm_tcopy_2.c -ZGEMMONCOPYOBJ = zgemm_oncopy.o -ZGEMMOTCOPYOBJ = zgemm_otcopy.o - -STRSMKERNEL_LN = ../generic/trsm_kernel_LN.c -STRSMKERNEL_LT = ../generic/trsm_kernel_LT.c -STRSMKERNEL_RN = ../generic/trsm_kernel_RN.c -STRSMKERNEL_RT = ../generic/trsm_kernel_RT.c - -DTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c -DTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c -DTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c -DTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c - -CTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c -CTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c -CTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c -CTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c - -ZTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c -ZTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c -ZTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c -ZTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c - - - - diff --git a/kernel/mips64/KERNEL.LOONGSON3A b/kernel/mips64/KERNEL.LOONGSON3R3 similarity index 75% rename from kernel/mips64/KERNEL.LOONGSON3A rename to kernel/mips64/KERNEL.LOONGSON3R3 index 0298faaad..904828d57 100644 --- a/kernel/mips64/KERNEL.LOONGSON3A +++ b/kernel/mips64/KERNEL.LOONGSON3R3 @@ -16,32 +16,32 @@ SGEMMINCOPY = ../generic/gemm_ncopy_8.c SGEMMITCOPY = ../generic/gemm_tcopy_8.c SGEMMONCOPY = ../generic/gemm_ncopy_4.c SGEMMOTCOPY = ../generic/gemm_tcopy_4.c -SGEMMINCOPYOBJ = sgemm_incopy.o -SGEMMITCOPYOBJ = sgemm_itcopy.o -SGEMMONCOPYOBJ = sgemm_oncopy.o -SGEMMOTCOPYOBJ = sgemm_otcopy.o +SGEMMINCOPYOBJ = sgemm_incopy$(TSUFFIX).$(SUFFIX) +SGEMMITCOPYOBJ = sgemm_itcopy$(TSUFFIX).$(SUFFIX) +SGEMMONCOPYOBJ = sgemm_oncopy$(TSUFFIX).$(SUFFIX) +SGEMMOTCOPYOBJ = sgemm_otcopy$(TSUFFIX).$(SUFFIX) DGEMMKERNEL = dgemm_kernel_loongson3a_4x4.S DGEMMONCOPY = ../generic/gemm_ncopy_4.c DGEMMOTCOPY = ../generic/gemm_tcopy_4.c -DGEMMONCOPYOBJ = dgemm_oncopy.o -DGEMMOTCOPYOBJ = dgemm_otcopy.o +DGEMMONCOPYOBJ = dgemm_oncopy$(TSUFFIX).$(SUFFIX) +DGEMMOTCOPYOBJ = dgemm_otcopy$(TSUFFIX).$(SUFFIX) CGEMMKERNEL = cgemm_kernel_loongson3a_4x2_ps.S CGEMMINCOPY = ../generic/zgemm_ncopy_4.c CGEMMITCOPY = ../generic/zgemm_tcopy_4.c CGEMMONCOPY = ../generic/zgemm_ncopy_2.c CGEMMOTCOPY = ../generic/zgemm_tcopy_2.c -CGEMMINCOPYOBJ = cgemm_incopy.o -CGEMMITCOPYOBJ = cgemm_itcopy.o -CGEMMONCOPYOBJ = cgemm_oncopy.o -CGEMMOTCOPYOBJ = cgemm_otcopy.o +CGEMMINCOPYOBJ = cgemm_incopy$(TSUFFIX).$(SUFFIX) +CGEMMITCOPYOBJ = cgemm_itcopy$(TSUFFIX).$(SUFFIX) +CGEMMONCOPYOBJ = cgemm_oncopy$(TSUFFIX).$(SUFFIX) +CGEMMOTCOPYOBJ = cgemm_otcopy$(TSUFFIX).$(SUFFIX) ZGEMMKERNEL = zgemm_kernel_loongson3a_2x2.S ZGEMMONCOPY = ../generic/zgemm_ncopy_2.c ZGEMMOTCOPY = ../generic/zgemm_tcopy_2.c -ZGEMMONCOPYOBJ = zgemm_oncopy.o -ZGEMMOTCOPYOBJ = zgemm_otcopy.o +ZGEMMONCOPYOBJ = zgemm_oncopy$(TSUFFIX).$(SUFFIX) +ZGEMMOTCOPYOBJ = zgemm_otcopy$(TSUFFIX).$(SUFFIX) STRSMKERNEL_LN = ../generic/trsm_kernel_LN.c STRSMKERNEL_LT = ../generic/trsm_kernel_LT.c @@ -64,6 +64,3 @@ ZTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c ZTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c DSDOTKERNEL = ../mips/dot.c - - - diff --git a/kernel/mips64/KERNEL.LOONGSON3R4 b/kernel/mips64/KERNEL.LOONGSON3R4 new file mode 100644 index 000000000..b81e5441d --- /dev/null +++ b/kernel/mips64/KERNEL.LOONGSON3R4 @@ -0,0 +1,192 @@ +ifdef HAVE_MSA +SAXPYKERNEL = ../mips/saxpy_msa.c +DAXPYKERNEL = ../mips/daxpy_msa.c +CAXPYKERNEL = ../mips/caxpy_msa.c +ZAXPYKERNEL = ../mips/zaxpy_msa.c +else +SAXPYKERNEL = axpy_loongson3a.S +DAXPYKERNEL = daxpy_loongson3a_simd.S +endif + +ifdef HAVE_MSA +SCOPYKERNEL = ../mips/scopy_msa.c +DCOPYKERNEL = ../mips/dcopy_msa.c +CCOPYKERNEL = ../mips/ccopy_msa.c +ZCOPYKERNEL = ../mips/zcopy_msa.c +endif + +ifdef HAVE_MSA +SDOTKERNEL = ../mips/sdot_msa.c +DDOTKERNEL = ../mips/ddot_msa.c +CDOTKERNEL = ../mips/cdot_msa.c +ZDOTKERNEL = ../mips/zdot_msa.c +endif +DSDOTKERNEL = ../mips/dot.c + +ifdef HAVE_MSA +SROTKERNEL = ../mips/srot_msa.c +DROTKERNEL = ../mips/drot_msa.c +CROTKERNEL = ../mips/crot_msa.c +ZROTKERNEL = ../mips/zrot_msa.c +endif + +ifdef HAVE_MSA +SSCALKERNEL = ../mips/sscal_msa.c +DSCALKERNEL = ../mips/dscal_msa.c +CSCALKERNEL = ../mips/cscal_msa.c +ZSCALKERNEL = ../mips/zscal_msa.c +endif + +ifdef HAVE_MSA +SGEMVNKERNEL = ../mips/sgemv_n_msa.c +DGEMVNKERNEL = ../mips/dgemv_n_msa.c +SGEMVTKERNEL = ../mips/sgemv_t_msa.c +DGEMVTKERNEL = ../mips/dgemv_t_msa.c +CGEMVNKERNEL = ../mips/cgemv_n_msa.c +CGEMVTKERNEL = ../mips/cgemv_t_msa.c +ZGEMVNKERNEL = ../mips/zgemv_n_msa.c +ZGEMVTKERNEL = ../mips/zgemv_t_msa.c +else +SGEMVNKERNEL = gemv_n_loongson3a.c +SGEMVTKERNEL = gemv_t_loongson3a.c +DGEMVNKERNEL = gemv_n_loongson3a.c +DGEMVTKERNEL = gemv_t_loongson3a.c +CGEMVNKERNEL = zgemv_n_loongson3a.c +CGEMVTKERNEL = zgemv_t_loongson3a.c +ZGEMVNKERNEL = zgemv_n_loongson3a.c +ZGEMVTKERNEL = zgemv_t_loongson3a.c +endif + +ifdef HAVE_MSA +SASUMKERNEL = ../mips/sasum_msa.c +DASUMKERNEL = ../mips/dasum_msa.c +CASUMKERNEL = ../mips/casum_msa.c +ZASUMKERNEL = ../mips/zasum_msa.c +endif + +ifdef HAVE_MSA +SSWAPKERNEL = ../mips/sswap_msa.c +DSWAPKERNEL = ../mips/dswap_msa.c +CSWAPKERNEL = ../mips/cswap_msa.c +ZSWAPKERNEL = ../mips/zswap_msa.c +endif + +ifdef HAVE_MSA +SGEMMKERNEL = ../mips/sgemm_kernel_8x8_msa.c +SGEMMONCOPY = ../mips/sgemm_ncopy_8_msa.c +SGEMMOTCOPY = ../mips/sgemm_tcopy_8_msa.c +SGEMMONCOPYOBJ = sgemm_oncopy$(TSUFFIX).$(SUFFIX) +SGEMMOTCOPYOBJ = sgemm_otcopy$(TSUFFIX).$(SUFFIX) +else +SGEMMKERNEL = sgemm_kernel_8x4_ps.S +SGEMMINCOPY = ../generic/gemm_ncopy_8.c +SGEMMITCOPY = ../generic/gemm_tcopy_8.c +SGEMMONCOPY = ../generic/gemm_ncopy_4.c +SGEMMOTCOPY = ../generic/gemm_tcopy_4.c +SGEMMINCOPYOBJ = sgemm_incopy$(TSUFFIX).$(SUFFIX) +SGEMMITCOPYOBJ = sgemm_itcopy$(TSUFFIX).$(SUFFIX) +SGEMMONCOPYOBJ = sgemm_oncopy$(TSUFFIX).$(SUFFIX) +SGEMMOTCOPYOBJ = sgemm_otcopy$(TSUFFIX).$(SUFFIX) +endif + +ifdef HAVE_MSA +DGEMMKERNEL = ../mips/dgemm_kernel_8x4_msa.c +DGEMMINCOPY = ../mips/dgemm_ncopy_8_msa.c +DGEMMITCOPY = ../mips/dgemm_tcopy_8_msa.c +DGEMMONCOPY = ../mips/dgemm_ncopy_4_msa.c +DGEMMOTCOPY = ../mips/dgemm_tcopy_4_msa.c +DGEMMINCOPYOBJ = dgemm_incopy$(TSUFFIX).$(SUFFIX) +DGEMMITCOPYOBJ = dgemm_itcopy$(TSUFFIX).$(SUFFIX) +DGEMMONCOPYOBJ = dgemm_oncopy$(TSUFFIX).$(SUFFIX) +DGEMMOTCOPYOBJ = dgemm_otcopy$(TSUFFIX).$(SUFFIX) +else +DGEMMKERNEL = dgemm_kernel_loongson3a_4x4.S +DGEMMONCOPY = ../generic/gemm_ncopy_4.c +DGEMMOTCOPY = ../generic/gemm_tcopy_4.c +DGEMMONCOPYOBJ = dgemm_oncopy$(TSUFFIX).$(SUFFIX) +DGEMMOTCOPYOBJ = dgemm_otcopy$(TSUFFIX).$(SUFFIX) +endif + +ifdef HAVE_MSA +CGEMMKERNEL = ../mips/cgemm_kernel_8x4_msa.c +CGEMMINCOPY = ../mips/cgemm_ncopy_8_msa.c +CGEMMITCOPY = ../mips/cgemm_tcopy_8_msa.c +CGEMMONCOPY = ../mips/cgemm_ncopy_4_msa.c +CGEMMOTCOPY = ../mips/cgemm_tcopy_4_msa.c +CGEMMINCOPYOBJ = cgemm_incopy$(TSUFFIX).$(SUFFIX) +CGEMMITCOPYOBJ = cgemm_itcopy$(TSUFFIX).$(SUFFIX) +CGEMMONCOPYOBJ = cgemm_oncopy$(TSUFFIX).$(SUFFIX) +CGEMMOTCOPYOBJ = cgemm_otcopy$(TSUFFIX).$(SUFFIX) +else +CGEMMKERNEL = cgemm_kernel_loongson3a_4x2_ps.S +CGEMMINCOPY = ../generic/zgemm_ncopy_4.c +CGEMMITCOPY = ../generic/zgemm_tcopy_4.c +CGEMMONCOPY = ../generic/zgemm_ncopy_2.c +CGEMMOTCOPY = ../generic/zgemm_tcopy_2.c +CGEMMINCOPYOBJ = cgemm_incopy$(TSUFFIX).$(SUFFIX) +CGEMMITCOPYOBJ = cgemm_itcopy$(TSUFFIX).$(SUFFIX) +CGEMMONCOPYOBJ = cgemm_oncopy$(TSUFFIX).$(SUFFIX) +CGEMMOTCOPYOBJ = cgemm_otcopy$(TSUFFIX).$(SUFFIX) +endif + +ifdef HAVE_MSA +ZGEMMKERNEL = ../mips/zgemm_kernel_4x4_msa.c +ZGEMMONCOPY = ../mips/zgemm_ncopy_4_msa.c +ZGEMMOTCOPY = ../mips/zgemm_tcopy_4_msa.c +ZGEMMONCOPYOBJ = zgemm_oncopy$(TSUFFIX).$(SUFFIX) +ZGEMMOTCOPYOBJ = zgemm_otcopy$(TSUFFIX).$(SUFFIX) +else +ZGEMMKERNEL = zgemm_kernel_loongson3a_2x2.S +ZGEMMONCOPY = ../generic/zgemm_ncopy_2.c +ZGEMMOTCOPY = ../generic/zgemm_tcopy_2.c +ZGEMMONCOPYOBJ = zgemm_oncopy$(TSUFFIX).$(SUFFIX) +ZGEMMOTCOPYOBJ = zgemm_otcopy$(TSUFFIX).$(SUFFIX) +endif + +ifdef HAVE_MSA +STRSMKERNEL_LN = ../mips/strsm_kernel_LN_8x8_msa.c +STRSMKERNEL_LT = ../mips/strsm_kernel_LT_8x8_msa.c +STRSMKERNEL_RN = ../mips/strsm_kernel_RN_8x8_msa.c +STRSMKERNEL_RT = ../mips/strsm_kernel_RT_8x8_msa.c +else +STRSMKERNEL_LN = ../generic/trsm_kernel_LN.c +STRSMKERNEL_LT = ../generic/trsm_kernel_LT.c +STRSMKERNEL_RN = ../generic/trsm_kernel_RN.c +STRSMKERNEL_RT = ../generic/trsm_kernel_RT.c +endif + +ifdef HAVE_MSA +DTRSMKERNEL_LN = ../mips/dtrsm_kernel_LN_8x4_msa.c +DTRSMKERNEL_LT = ../mips/dtrsm_kernel_LT_8x4_msa.c +DTRSMKERNEL_RN = ../mips/dtrsm_kernel_RN_8x4_msa.c +DTRSMKERNEL_RT = ../mips/dtrsm_kernel_RT_8x4_msa.c +else +DTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c +DTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c +DTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c +DTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c +endif + +ifdef HAVE_MSA +CTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c +CTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c +CTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c +CTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c +else +CTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c +CTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c +CTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c +CTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c +endif + +ifdef HAVE_MSA +ZTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c +ZTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c +ZTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c +ZTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c +else +ZTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c +ZTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c +ZTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c +ZTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c +endif diff --git a/kernel/mips64/KERNEL.generic b/kernel/mips64/KERNEL.generic new file mode 100644 index 000000000..17f2ef976 --- /dev/null +++ b/kernel/mips64/KERNEL.generic @@ -0,0 +1,160 @@ +SGEMM_BETA = ../generic/gemm_beta.c +DGEMM_BETA = ../generic/gemm_beta.c +CGEMM_BETA = ../generic/zgemm_beta.c +ZGEMM_BETA = ../generic/zgemm_beta.c + +STRMMKERNEL = ../generic/trmmkernel_2x2.c +DTRMMKERNEL = ../generic/trmmkernel_2x2.c +CTRMMKERNEL = ../generic/ztrmmkernel_2x2.c +ZTRMMKERNEL = ../generic/ztrmmkernel_2x2.c + +SGEMMKERNEL = ../generic/gemmkernel_2x2.c +SGEMMONCOPY = ../generic/gemm_ncopy_2.c +SGEMMOTCOPY = ../generic/gemm_tcopy_2.c +SGEMMONCOPYOBJ = sgemm_oncopy.o +SGEMMOTCOPYOBJ = sgemm_otcopy.o + +DGEMMKERNEL = ../generic/gemmkernel_2x2.c +DGEMMONCOPY = ../generic/gemm_ncopy_2.c +DGEMMOTCOPY = ../generic/gemm_tcopy_2.c +DGEMMONCOPYOBJ = dgemm_oncopy.o +DGEMMOTCOPYOBJ = dgemm_otcopy.o + +CGEMMKERNEL = ../generic/zgemmkernel_2x2.c +CGEMMONCOPY = ../generic/zgemm_ncopy_2.c +CGEMMOTCOPY = ../generic/zgemm_tcopy_2.c +CGEMMONCOPYOBJ = cgemm_oncopy.o +CGEMMOTCOPYOBJ = cgemm_otcopy.o + +ZGEMMKERNEL = ../generic/zgemmkernel_2x2.c +ZGEMMONCOPY = ../generic/zgemm_ncopy_2.c +ZGEMMOTCOPY = ../generic/zgemm_tcopy_2.c +ZGEMMONCOPYOBJ = zgemm_oncopy.o +ZGEMMOTCOPYOBJ = zgemm_otcopy.o + +STRSMKERNEL_LN = ../generic/trsm_kernel_LN.c +STRSMKERNEL_LT = ../generic/trsm_kernel_LT.c +STRSMKERNEL_RN = ../generic/trsm_kernel_RN.c +STRSMKERNEL_RT = ../generic/trsm_kernel_RT.c + +DTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c +DTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c +DTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c +DTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c + +CTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c +CTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c +CTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c +CTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c + +ZTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c +ZTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c +ZTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c +ZTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c + +#Pure C for other kernels +SAMAXKERNEL = ../mips/amax.c +DAMAXKERNEL = ../mips/amax.c +CAMAXKERNEL = ../mips/zamax.c +ZAMAXKERNEL = ../mips/zamax.c + +SAMINKERNEL = ../mips/amin.c +DAMINKERNEL = ../mips/amin.c +CAMINKERNEL = ../mips/zamin.c +ZAMINKERNEL = ../mips/zamin.c + +SMAXKERNEL = ../mips/max.c +DMAXKERNEL = ../mips/max.c + +SMINKERNEL = ../mips/min.c +DMINKERNEL = ../mips/min.c + +ISAMAXKERNEL = ../mips/iamax.c +IDAMAXKERNEL = ../mips/iamax.c +ICAMAXKERNEL = ../mips/izamax.c +IZAMAXKERNEL = ../mips/izamax.c + +ISAMINKERNEL = ../mips/iamin.c +IDAMINKERNEL = ../mips/iamin.c +ICAMINKERNEL = ../mips/izamin.c +IZAMINKERNEL = ../mips/izamin.c + +ISMAXKERNEL = ../mips/imax.c +IDMAXKERNEL = ../mips/imax.c + +ISMINKERNEL = ../mips/imin.c +IDMINKERNEL = ../mips/imin.c + +SASUMKERNEL = ../mips/asum.c +DASUMKERNEL = ../mips/asum.c +CASUMKERNEL = ../mips/zasum.c +ZASUMKERNEL = ../mips/zasum.c + +SSUMKERNEL = ../mips/sum.c +DSUMKERNEL = ../mips/sum.c +CSUMKERNEL = ../mips/zsum.c +ZSUMKERNEL = ../mips/zsum.c + +SAXPYKERNEL = ../mips/axpy.c +DAXPYKERNEL = ../mips/axpy.c +CAXPYKERNEL = ../mips/zaxpy.c +ZAXPYKERNEL = ../mips/zaxpy.c + +SCOPYKERNEL = ../mips/copy.c +DCOPYKERNEL = ../mips/copy.c +CCOPYKERNEL = ../mips/zcopy.c +ZCOPYKERNEL = ../mips/zcopy.c + +SDOTKERNEL = ../mips/dot.c +DDOTKERNEL = ../mips/dot.c +CDOTKERNEL = ../mips/zdot.c +ZDOTKERNEL = ../mips/zdot.c + +SNRM2KERNEL = ../mips/nrm2.c +DNRM2KERNEL = ../mips/nrm2.c +CNRM2KERNEL = ../mips/znrm2.c +ZNRM2KERNEL = ../mips/znrm2.c + +SROTKERNEL = ../mips/rot.c +DROTKERNEL = ../mips/rot.c +CROTKERNEL = ../mips/zrot.c +ZROTKERNEL = ../mips/zrot.c + +SSCALKERNEL = ../mips/scal.c +DSCALKERNEL = ../mips/scal.c +CSCALKERNEL = ../mips/zscal.c +ZSCALKERNEL = ../mips/zscal.c + +SSWAPKERNEL = ../mips/swap.c +DSWAPKERNEL = ../mips/swap.c +CSWAPKERNEL = ../mips/zswap.c +ZSWAPKERNEL = ../mips/zswap.c + +SGEMVNKERNEL = ../mips/gemv_n.c +DGEMVNKERNEL = ../mips/gemv_n.c +CGEMVNKERNEL = ../mips/zgemv_n.c +ZGEMVNKERNEL = ../mips/zgemv_n.c + +SGEMVTKERNEL = ../mips/gemv_t.c +DGEMVTKERNEL = ../mips/gemv_t.c +CGEMVTKERNEL = ../mips/zgemv_t.c +ZGEMVTKERNEL = ../mips/zgemv_t.c + +SSYMV_U_KERNEL = ../generic/symv_k.c +SSYMV_L_KERNEL = ../generic/symv_k.c +DSYMV_U_KERNEL = ../generic/symv_k.c +DSYMV_L_KERNEL = ../generic/symv_k.c +QSYMV_U_KERNEL = ../generic/symv_k.c +QSYMV_L_KERNEL = ../generic/symv_k.c +CSYMV_U_KERNEL = ../generic/zsymv_k.c +CSYMV_L_KERNEL = ../generic/zsymv_k.c +ZSYMV_U_KERNEL = ../generic/zsymv_k.c +ZSYMV_L_KERNEL = ../generic/zsymv_k.c +XSYMV_U_KERNEL = ../generic/zsymv_k.c +XSYMV_L_KERNEL = ../generic/zsymv_k.c + +ZHEMV_U_KERNEL = ../generic/zhemv_k.c +ZHEMV_L_KERNEL = ../generic/zhemv_k.c + +CGEMM3MKERNEL = ../generic/zgemm3mkernel_dump.c +ZGEMM3MKERNEL = ../generic/zgemm3mkernel_dump.c diff --git a/kernel/power/KERNEL b/kernel/power/KERNEL index c3c86b310..9070450f4 100644 --- a/kernel/power/KERNEL +++ b/kernel/power/KERNEL @@ -50,3 +50,26 @@ ifndef DSDOTKERNEL DSDOTKERNEL = ../generic/dot.c endif +ifndef ISMINKERNEL +ISMINKERNEL = imin.S +endif + +ifndef IDMINKERNEL +IDMINKERNEL = imin.S +endif + +ifndef IQMINKERNEL +IQMINKERNEL = imin.S +endif + +ifndef ISMAXKERNEL +ISMAXKERNEL = imax.S +endif + +ifndef IDMAXKERNEL +IDMAXKERNEL = imax.S +endif + +ifndef IQMAXKERNEL +IQMAXKERNEL = imax.S +endif diff --git a/kernel/power/KERNEL.POWER10 b/kernel/power/KERNEL.POWER10 new file mode 100644 index 000000000..1cf7b0b7c --- /dev/null +++ b/kernel/power/KERNEL.POWER10 @@ -0,0 +1,217 @@ +ifeq ($(__BYTE_ORDER__),__ORDER_BIG_ENDIAN__) +include $(KERNELDIR)/KERNEL.POWER8 +else + +#SGEMM_BETA = ../generic/gemm_beta.c +#DGEMM_BETA = ../generic/gemm_beta.c +#CGEMM_BETA = ../generic/zgemm_beta.c +#ZGEMM_BETA = ../generic/zgemm_beta.c + +SBGEMM_BETA = ../generic/gemm_beta.c +SBGEMMKERNEL = sbgemm_kernel_power10.c +SBGEMMINCOPY = sbgemm_ncopy_16_power10.c +SBGEMMITCOPY = sbgemm_tcopy_16_power10.c +SBGEMMONCOPY = sbgemm_ncopy_8_power10.c +SBGEMMOTCOPY = sbgemm_tcopy_8_power10.c +SBGEMMINCOPYOBJ = sbgemm_incopy$(TSUFFIX).$(SUFFIX) +SBGEMMITCOPYOBJ = sbgemm_itcopy$(TSUFFIX).$(SUFFIX) +SBGEMMONCOPYOBJ = sbgemm_oncopy$(TSUFFIX).$(SUFFIX) +SBGEMMOTCOPYOBJ = sbgemm_otcopy$(TSUFFIX).$(SUFFIX) + +STRMMKERNEL = sgemm_kernel_power10.c +DTRMMKERNEL = dgemm_kernel_power10.c +CTRMMKERNEL = cgemm_kernel_power10.S +ZTRMMKERNEL = zgemm_kernel_power10.S + +SGEMMKERNEL = sgemm_kernel_power10.c +SGEMMINCOPY = ../generic/gemm_ncopy_16.c +SGEMMITCOPY = sgemm_tcopy_16_power8.S +SGEMMONCOPY = ../generic/gemm_ncopy_8.c +SGEMMOTCOPY = sgemm_tcopy_8_power8.S +SGEMMINCOPYOBJ = sgemm_incopy$(TSUFFIX).$(SUFFIX) +SGEMMITCOPYOBJ = sgemm_itcopy$(TSUFFIX).$(SUFFIX) +SGEMMONCOPYOBJ = sgemm_oncopy$(TSUFFIX).$(SUFFIX) +SGEMMOTCOPYOBJ = sgemm_otcopy$(TSUFFIX).$(SUFFIX) + +DGEMMKERNEL = dgemm_kernel_power10.c +DGEMMINCOPY = +DGEMMITCOPY = +DGEMMONCOPY = dgemm_ncopy_8_power10.c +DGEMMOTCOPY = ../generic/gemm_tcopy_8.c +DGEMMINCOPYOBJ = +DGEMMITCOPYOBJ = +DGEMMONCOPYOBJ = dgemm_oncopy$(TSUFFIX).$(SUFFIX) +DGEMMOTCOPYOBJ = dgemm_otcopy$(TSUFFIX).$(SUFFIX) + +CGEMMKERNEL = cgemm_kernel_power10.S +CGEMMINCOPY = ../generic/zgemm_ncopy_8.c +CGEMMITCOPY = ../generic/zgemm_tcopy_8.c +CGEMMONCOPY = ../generic/zgemm_ncopy_4.c +CGEMMOTCOPY = ../generic/zgemm_tcopy_4.c +CGEMMONCOPYOBJ = cgemm_oncopy$(TSUFFIX).$(SUFFIX) +CGEMMOTCOPYOBJ = cgemm_otcopy$(TSUFFIX).$(SUFFIX) +CGEMMINCOPYOBJ = cgemm_incopy$(TSUFFIX).$(SUFFIX) +CGEMMITCOPYOBJ = cgemm_itcopy$(TSUFFIX).$(SUFFIX) + +ZGEMMKERNEL = zgemm_kernel_power10.S +ZGEMMONCOPY = ../generic/zgemm_ncopy_2.c +ZGEMMOTCOPY = ../generic/zgemm_tcopy_2.c +ZGEMMINCOPY = ../generic/zgemm_ncopy_8.c +ZGEMMITCOPY = zgemm_tcopy_8_power8.S +ZGEMMONCOPYOBJ = zgemm_oncopy$(TSUFFIX).$(SUFFIX) +ZGEMMOTCOPYOBJ = zgemm_otcopy$(TSUFFIX).$(SUFFIX) +ZGEMMINCOPYOBJ = zgemm_incopy$(TSUFFIX).$(SUFFIX) +ZGEMMITCOPYOBJ = zgemm_itcopy$(TSUFFIX).$(SUFFIX) + +STRSMKERNEL_LN = trsm_kernel_LN_power10.c +STRSMKERNEL_LT = trsm_kernel_LT_power10.c +STRSMKERNEL_RN = trsm_kernel_RN_power10.c +STRSMKERNEL_RT = trsm_kernel_RT_power10.c + +DTRSMKERNEL_LN = trsm_kernel_LN_power10.c +DTRSMKERNEL_LT = trsm_kernel_LT_power10.c +DTRSMKERNEL_RN = trsm_kernel_RN_power10.c +DTRSMKERNEL_RT = trsm_kernel_RT_power10.c + +CTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c +CTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c +CTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c +CTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c + +ZTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c +ZTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c +ZTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c +ZTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c + +#Todo: CGEMM3MKERNEL should be 4x4 blocksizes. +#CGEMM3MKERNEL = zgemm3m_kernel_8x4_sse3.S +#ZGEMM3MKERNEL = zgemm3m_kernel_4x4_sse3.S + +#Pure C for other kernels +#SAMAXKERNEL = ../arm/amax.c +#DAMAXKERNEL = ../arm/amax.c +#CAMAXKERNEL = ../arm/zamax.c +#ZAMAXKERNEL = ../arm/zamax.c +# +#SAMINKERNEL = ../arm/amin.c +#DAMINKERNEL = ../arm/amin.c +#CAMINKERNEL = ../arm/zamin.c +#ZAMINKERNEL = ../arm/zamin.c +# +#SMAXKERNEL = ../arm/max.c +#DMAXKERNEL = ../arm/max.c +# +#SMINKERNEL = ../arm/min.c +#DMINKERNEL = ../arm/min.c +# +ifneq ($(GCCVERSIONGTEQ9),1) +ISAMAXKERNEL = isamax_power9.S +else +ISAMAXKERNEL = isamax.c +endif +IDAMAXKERNEL = idamax.c +ifneq ($(GCCVERSIONGTEQ9),1) +ICAMAXKERNEL = icamax_power9.S +else +ICAMAXKERNEL = icamax.c +endif +IZAMAXKERNEL = izamax.c +# +ifneq ($(GCCVERSIONGTEQ9),1) +ISAMINKERNEL = isamin_power9.S +else +ISAMINKERNEL = isamin.c +endif +IDAMINKERNEL = idamin.c +ifneq ($(GCCVERSIONGTEQ9),1) +ICAMINKERNEL = icamin_power9.S +else +ICAMINKERNEL = icamin.c +endif +IZAMINKERNEL = izamin.c +# +#ISMAXKERNEL = ../arm/imax.c +#IDMAXKERNEL = ../arm/imax.c +# +#ISMINKERNEL = ../arm/imin.c +#IDMINKERNEL = ../arm/imin.c +# +SASUMKERNEL = sasum.c +DASUMKERNEL = dasum.c +CASUMKERNEL = casum.c +ZASUMKERNEL = zasum.c +# +SAXPYKERNEL = saxpy_power10.c +DAXPYKERNEL = daxpy_power10.c +CAXPYKERNEL = caxpy_power10.c +ZAXPYKERNEL = zaxpy_power10.c +# +SCOPYKERNEL = scopy_power10.c +DCOPYKERNEL = dcopy_power10.c +CCOPYKERNEL = ccopy_power10.c +ZCOPYKERNEL = zcopy_power10.c +# +SDOTKERNEL = sdot_power10.c +DDOTKERNEL = ddot_power10.c +DSDOTKERNEL = sdot_power10.c +CDOTKERNEL = cdot.c +ZDOTKERNEL = zdot.c +# +SNRM2KERNEL = ../arm/nrm2.c +DNRM2KERNEL = ../arm/nrm2.c +CNRM2KERNEL = ../arm/znrm2.c +ZNRM2KERNEL = ../arm/znrm2.c +# +SROTKERNEL = srot.c +DROTKERNEL = drot.c +CROTKERNEL = crot.c +ZROTKERNEL = zrot.c +# +SSCALKERNEL = sscal.c +DSCALKERNEL = dscal.c +CSCALKERNEL = zscal.c +ZSCALKERNEL = zscal.c +# +SSWAPKERNEL = sswap.c +DSWAPKERNEL = dswap.c +CSWAPKERNEL = cswap.c +ZSWAPKERNEL = zswap.c +# + +SGEMVNKERNEL = sgemv_n.c +DGEMVNKERNEL = dgemv_n_power10.c +CGEMVNKERNEL = cgemv_n.c +ZGEMVNKERNEL = zgemv_n_4.c +# +SGEMVTKERNEL = sgemv_t.c +DGEMVTKERNEL = dgemv_t_power10.c +CGEMVTKERNEL = cgemv_t.c +ZGEMVTKERNEL = zgemv_t_4.c + + +#SSYMV_U_KERNEL = ../generic/symv_k.c +#SSYMV_L_KERNEL = ../generic/symv_k.c +#DSYMV_U_KERNEL = ../generic/symv_k.c +#DSYMV_L_KERNEL = ../generic/symv_k.c +#QSYMV_U_KERNEL = ../generic/symv_k.c +#QSYMV_L_KERNEL = ../generic/symv_k.c +#CSYMV_U_KERNEL = ../generic/zsymv_k.c +#CSYMV_L_KERNEL = ../generic/zsymv_k.c +#ZSYMV_U_KERNEL = ../generic/zsymv_k.c +#ZSYMV_L_KERNEL = ../generic/zsymv_k.c +#XSYMV_U_KERNEL = ../generic/zsymv_k.c +#XSYMV_L_KERNEL = ../generic/zsymv_k.c + +#ZHEMV_U_KERNEL = ../generic/zhemv_k.c +#ZHEMV_L_KERNEL = ../generic/zhemv_k.c + +LSAME_KERNEL = ../generic/lsame.c +SCABS_KERNEL = ../generic/cabs.c +DCABS_KERNEL = ../generic/cabs.c +QCABS_KERNEL = ../generic/cabs.c + +#Dump kernel +CGEMM3MKERNEL = ../generic/zgemm3mkernel_dump.c +ZGEMM3MKERNEL = ../generic/zgemm3mkernel_dump.c + +endif diff --git a/kernel/power/KERNEL.POWER8 b/kernel/power/KERNEL.POWER8 index 43f004fbb..c2f4cd204 100644 --- a/kernel/power/KERNEL.POWER8 +++ b/kernel/power/KERNEL.POWER8 @@ -1,3 +1,44 @@ +# Big-endian 32bit (AIX) is supported through the POWER6 GEMM kernels, no separate TRMM +ifeq ($(__BYTE_ORDER__)$(BINARY32),__ORDER_BIG_ENDIAN__1) +SGEMMKERNEL = gemm_kernel_power6.S +SGEMMINCOPY = +SGEMMITCOPY = +SGEMMONCOPY = gemm_ncopy_4.S +SGEMMOTCOPY = gemm_tcopy_4.S +SGEMMINCOPYOBJ = +SGEMMITCOPYOBJ = +SGEMMONCOPYOBJ = sgemm_oncopy$(TSUFFIX).$(SUFFIX) +SGEMMOTCOPYOBJ = sgemm_otcopy$(TSUFFIX).$(SUFFIX) +DGEMMKERNEL = gemm_kernel_power6.S +DGEMMINCOPY = +DGEMMITCOPY = +DGEMMONCOPY = gemm_ncopy_4.S +DGEMMOTCOPY = gemm_tcopy_4.S +DGEMMINCOPYOBJ = +DGEMMITCOPYOBJ = +DGEMMONCOPYOBJ = dgemm_oncopy$(TSUFFIX).$(SUFFIX) +DGEMMOTCOPYOBJ = dgemm_otcopy$(TSUFFIX).$(SUFFIX) +CGEMMKERNEL = zgemm_kernel_power6.S +CGEMMINCOPY = ../generic/zgemm_ncopy_2.c +CGEMMITCOPY = ../generic/zgemm_tcopy_2.c +CGEMMONCOPY = ../generic/zgemm_ncopy_4.c +CGEMMOTCOPY = ../generic/zgemm_tcopy_4.c +CGEMMINCOPYOBJ = cgemm_incopy$(TSUFFIX).$(SUFFIX) +CGEMMITCOPYOBJ = cgemm_itcopy$(TSUFFIX).$(SUFFIX) +CGEMMONCOPYOBJ = cgemm_oncopy$(TSUFFIX).$(SUFFIX) +CGEMMOTCOPYOBJ = cgemm_otcopy$(TSUFFIX).$(SUFFIX) +ZGEMMKERNEL = zgemm_kernel_power6.S +ZGEMMINCOPY = ../generic/zgemm_ncopy_2.c +ZGEMMITCOPY = ../generic/zgemm_tcopy_2.c +ZGEMMONCOPY = ../generic/zgemm_ncopy_4.c +ZGEMMOTCOPY = ../generic/zgemm_tcopy_4.c +ZGEMMINCOPYOBJ = zgemm_incopy$(TSUFFIX).$(SUFFIX) +ZGEMMITCOPYOBJ = zgemm_itcopy$(TSUFFIX).$(SUFFIX) +ZGEMMONCOPYOBJ = zgemm_oncopy$(TSUFFIX).$(SUFFIX) +ZGEMMOTCOPYOBJ = zgemm_otcopy$(TSUFFIX).$(SUFFIX) + +else + #SGEMM_BETA = ../generic/gemm_beta.c #DGEMM_BETA = ../generic/gemm_beta.c #CGEMM_BETA = ../generic/zgemm_beta.c @@ -12,7 +53,7 @@ SGEMMKERNEL = sgemm_kernel_16x8_power8.S SGEMMINCOPY = ../generic/gemm_ncopy_16.c SGEMMITCOPY = sgemm_tcopy_16_power8.S SGEMMONCOPY = ../generic/gemm_ncopy_8.c -SGEMMOTCOPY = sgemm_tcopy_8_power8.S +SGEMMOTCOPY = sgemm_tcopy_8_power8.S SGEMMINCOPYOBJ = sgemm_incopy$(TSUFFIX).$(SUFFIX) SGEMMITCOPYOBJ = sgemm_itcopy$(TSUFFIX).$(SUFFIX) SGEMMONCOPYOBJ = sgemm_oncopy$(TSUFFIX).$(SUFFIX) @@ -47,16 +88,24 @@ ZGEMMONCOPYOBJ = zgemm_oncopy$(TSUFFIX).$(SUFFIX) ZGEMMOTCOPYOBJ = zgemm_otcopy$(TSUFFIX).$(SUFFIX) ZGEMMINCOPYOBJ = zgemm_incopy$(TSUFFIX).$(SUFFIX) ZGEMMITCOPYOBJ = zgemm_itcopy$(TSUFFIX).$(SUFFIX) +endif STRSMKERNEL_LN = ../generic/trsm_kernel_LN.c STRSMKERNEL_LT = ../generic/trsm_kernel_LT.c STRSMKERNEL_RN = ../generic/trsm_kernel_RN.c STRSMKERNEL_RT = ../generic/trsm_kernel_RT.c +ifeq ($(__BYTE_ORDER__)$(BINARY32),__ORDER_BIG_ENDIAN__1) +DTRSMKERNEL_LN = trsm_kernel_power6_LN.S +DTRSMKERNEL_LT = trsm_kernel_power6_LT.S +DTRSMKERNEL_RN = trsm_kernel_power6_LT.S +DTRSMKERNEL_RT = trsm_kernel_power6_RT.S +else DTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c DTRSMKERNEL_LT = dtrsm_kernel_LT_16x4_power8.S DTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c DTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c +endif CTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c CTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c @@ -88,15 +137,56 @@ ZTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c # #SMINKERNEL = ../arm/min.c #DMINKERNEL = ../arm/min.c -# + +ISMINKERNEL = imin.S +ISMAXKERNEL = imax.S + +ifneq ($(__BYTE_ORDER__),__ORDER_BIG_ENDIAN__) +ifneq ($(GCCVERSIONGTEQ9),1) +ISAMAXKERNEL = isamax_power8.S +else +ISAMAXKERNEL = isamax.c +endif +else ISAMAXKERNEL = isamax.c +endif +# IDAMAXKERNEL = idamax.c +# +ifneq ($(__BYTE_ORDER__),__ORDER_BIG_ENDIAN__) +ifneq ($(GCCVERSIONGTEQ9),1) +ICAMAXKERNEL = icamax_power8.S +else ICAMAXKERNEL = icamax.c +endif +else +ICAMAXKERNEL = icamax.c +endif +# IZAMAXKERNEL = izamax.c # +ifneq ($(__BYTE_ORDER__),__ORDER_BIG_ENDIAN__) +ifneq ($(GCCVERSIONGTEQ9),1) +ISAMINKERNEL = isamin_power8.S +else ISAMINKERNEL = isamin.c +endif +else +ISAMINKERNEL = isamin.c +endif +# IDAMINKERNEL = idamin.c +# +ifneq ($(__BYTE_ORDER__),__ORDER_BIG_ENDIAN__) +ifneq ($(GCCVERSIONGTEQ9),1) +ICAMINKERNEL = icamin_power8.S +else +ICAMINKERNEL = icamin.c +endif +else ICAMINKERNEL = icamin.c +endif +# IZAMINKERNEL = izamin.c # #ISMAXKERNEL = ../arm/imax.c @@ -112,7 +202,21 @@ ZASUMKERNEL = zasum.c # SAXPYKERNEL = saxpy.c DAXPYKERNEL = daxpy.c +# +ifeq ($(__BYTE_ORDER__)$(BINARY32),__ORDER_BIG_ENDIAN__1) +CAXPYKERNEL = zaxpy.S +else +ifneq ($(__BYTE_ORDER__),__ORDER_BIG_ENDIAN__) +ifneq ($(GCCVERSIONGTEQ9),1) +CAXPYKERNEL = caxpy_power8.S +else CAXPYKERNEL = caxpy.c +endif +else +CAXPYKERNEL = caxpy.c +endif +endif +# ZAXPYKERNEL = zaxpy.c # SCOPYKERNEL = scopy.c @@ -182,3 +286,10 @@ QCABS_KERNEL = ../generic/cabs.c #Dump kernel CGEMM3MKERNEL = ../generic/zgemm3mkernel_dump.c ZGEMM3MKERNEL = ../generic/zgemm3mkernel_dump.c + +ifeq ($(__BYTE_ORDER__)$(ELF_VERSION),__ORDER_BIG_ENDIAN__2) +IDAMAXKERNEL = ../arm/iamax.c +IDAMINKERNEL = ../arm/iamin.c +IZAMAXKERNEL = ../arm/izamax.c +IZAMINKERNEL = ../arm/izamin.c +endif diff --git a/kernel/power/KERNEL.POWER9 b/kernel/power/KERNEL.POWER9 index 7e4619082..2bd2516de 100644 --- a/kernel/power/KERNEL.POWER9 +++ b/kernel/power/KERNEL.POWER9 @@ -1,184 +1,214 @@ -#SGEMM_BETA = ../generic/gemm_beta.c -#DGEMM_BETA = ../generic/gemm_beta.c -#CGEMM_BETA = ../generic/zgemm_beta.c -#ZGEMM_BETA = ../generic/zgemm_beta.c - -STRMMKERNEL = sgemm_kernel_power9.S -DTRMMKERNEL = dgemm_kernel_power9.S -CTRMMKERNEL = ctrmm_kernel_8x4_power8.S -ZTRMMKERNEL = ztrmm_kernel_8x2_power8.S - -SGEMMKERNEL = sgemm_kernel_power9.S -SGEMMINCOPY = ../generic/gemm_ncopy_16.c -SGEMMITCOPY = sgemm_tcopy_16_power8.S -SGEMMONCOPY = ../generic/gemm_ncopy_8.c -SGEMMOTCOPY = sgemm_tcopy_8_power8.S -SGEMMINCOPYOBJ = sgemm_incopy$(TSUFFIX).$(SUFFIX) -SGEMMITCOPYOBJ = sgemm_itcopy$(TSUFFIX).$(SUFFIX) -SGEMMONCOPYOBJ = sgemm_oncopy$(TSUFFIX).$(SUFFIX) -SGEMMOTCOPYOBJ = sgemm_otcopy$(TSUFFIX).$(SUFFIX) - -DGEMMKERNEL = dgemm_kernel_power9.S -DGEMMINCOPY = ../generic/gemm_ncopy_16.c -DGEMMITCOPY = dgemm_tcopy_16_power8.S -DGEMMONCOPY = dgemm_ncopy_4_power8.S -DGEMMOTCOPY = ../generic/gemm_tcopy_4.c -DGEMMINCOPYOBJ = dgemm_incopy$(TSUFFIX).$(SUFFIX) -DGEMMITCOPYOBJ = dgemm_itcopy$(TSUFFIX).$(SUFFIX) -DGEMMONCOPYOBJ = dgemm_oncopy$(TSUFFIX).$(SUFFIX) -DGEMMOTCOPYOBJ = dgemm_otcopy$(TSUFFIX).$(SUFFIX) - -CGEMMKERNEL = cgemm_kernel_8x4_power8.S -CGEMMINCOPY = ../generic/zgemm_ncopy_8.c -CGEMMITCOPY = cgemm_tcopy_8_power8.S -CGEMMONCOPY = ../generic/zgemm_ncopy_4.c -CGEMMOTCOPY = ../generic/zgemm_tcopy_4.c -CGEMMONCOPYOBJ = cgemm_oncopy$(TSUFFIX).$(SUFFIX) -CGEMMOTCOPYOBJ = cgemm_otcopy$(TSUFFIX).$(SUFFIX) -CGEMMINCOPYOBJ = cgemm_incopy$(TSUFFIX).$(SUFFIX) -CGEMMITCOPYOBJ = cgemm_itcopy$(TSUFFIX).$(SUFFIX) - -ZGEMMKERNEL = zgemm_kernel_8x2_power8.S -ZGEMMONCOPY = ../generic/zgemm_ncopy_2.c -ZGEMMOTCOPY = ../generic/zgemm_tcopy_2.c -ZGEMMINCOPY = ../generic/zgemm_ncopy_8.c -ZGEMMITCOPY = zgemm_tcopy_8_power8.S -ZGEMMONCOPYOBJ = zgemm_oncopy$(TSUFFIX).$(SUFFIX) -ZGEMMOTCOPYOBJ = zgemm_otcopy$(TSUFFIX).$(SUFFIX) -ZGEMMINCOPYOBJ = zgemm_incopy$(TSUFFIX).$(SUFFIX) -ZGEMMITCOPYOBJ = zgemm_itcopy$(TSUFFIX).$(SUFFIX) - -STRSMKERNEL_LN = ../generic/trsm_kernel_LN.c -STRSMKERNEL_LT = ../generic/trsm_kernel_LT.c -STRSMKERNEL_RN = ../generic/trsm_kernel_RN.c -STRSMKERNEL_RT = ../generic/trsm_kernel_RT.c - -DTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c -DTRSMKERNEL_LT = dtrsm_kernel_LT_16x4_power8.S -DTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c -DTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c - -CTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c -CTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c -CTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c -CTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c - -ZTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c -ZTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c -ZTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c -ZTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c - -#Todo: CGEMM3MKERNEL should be 4x4 blocksizes. -#CGEMM3MKERNEL = zgemm3m_kernel_8x4_sse3.S -#ZGEMM3MKERNEL = zgemm3m_kernel_4x4_sse3.S - -#Pure C for other kernels -#SAMAXKERNEL = ../arm/amax.c -#DAMAXKERNEL = ../arm/amax.c -#CAMAXKERNEL = ../arm/zamax.c -#ZAMAXKERNEL = ../arm/zamax.c -# -#SAMINKERNEL = ../arm/amin.c -#DAMINKERNEL = ../arm/amin.c -#CAMINKERNEL = ../arm/zamin.c -#ZAMINKERNEL = ../arm/zamin.c -# -#SMAXKERNEL = ../arm/max.c -#DMAXKERNEL = ../arm/max.c -# -#SMINKERNEL = ../arm/min.c -#DMINKERNEL = ../arm/min.c -# -ISAMAXKERNEL = isamax.c -IDAMAXKERNEL = idamax.c -ICAMAXKERNEL = icamax.c -IZAMAXKERNEL = izamax.c -# -ISAMINKERNEL = isamin.c -IDAMINKERNEL = idamin.c -ICAMINKERNEL = icamin.c -IZAMINKERNEL = izamin.c -# -#ISMAXKERNEL = ../arm/imax.c -#IDMAXKERNEL = ../arm/imax.c -# -#ISMINKERNEL = ../arm/imin.c -#IDMINKERNEL = ../arm/imin.c -# -SASUMKERNEL = sasum.c -DASUMKERNEL = dasum.c -CASUMKERNEL = casum.c -ZASUMKERNEL = zasum.c -# -SAXPYKERNEL = saxpy.c -DAXPYKERNEL = daxpy.c -CAXPYKERNEL = caxpy.c -ZAXPYKERNEL = zaxpy.c -# -SCOPYKERNEL = scopy.c -DCOPYKERNEL = dcopy.c -CCOPYKERNEL = ccopy.c -ZCOPYKERNEL = zcopy.c -# -SDOTKERNEL = sdot.c -DDOTKERNEL = ddot.c -DSDOTKERNEL = sdot.c -CDOTKERNEL = cdot.c -ZDOTKERNEL = zdot.c -# -SNRM2KERNEL = ../arm/nrm2.c -DNRM2KERNEL = ../arm/nrm2.c -CNRM2KERNEL = ../arm/znrm2.c -ZNRM2KERNEL = ../arm/znrm2.c -# -SROTKERNEL = srot.c -DROTKERNEL = drot.c -CROTKERNEL = crot.c -ZROTKERNEL = zrot.c -# -SSCALKERNEL = sscal.c -DSCALKERNEL = dscal.c -CSCALKERNEL = zscal.c -ZSCALKERNEL = zscal.c -# -SSWAPKERNEL = sswap.c -DSWAPKERNEL = dswap.c -CSWAPKERNEL = cswap.c -ZSWAPKERNEL = zswap.c -# - -SGEMVNKERNEL = sgemv_n.c -DGEMVNKERNEL = dgemv_n.c -CGEMVNKERNEL = cgemv_n.c -ZGEMVNKERNEL = zgemv_n_4.c -# -SGEMVTKERNEL = sgemv_t.c -DGEMVTKERNEL = dgemv_t.c -CGEMVTKERNEL = cgemv_t.c -ZGEMVTKERNEL = zgemv_t_4.c - - -#SSYMV_U_KERNEL = ../generic/symv_k.c -#SSYMV_L_KERNEL = ../generic/symv_k.c -#DSYMV_U_KERNEL = ../generic/symv_k.c -#DSYMV_L_KERNEL = ../generic/symv_k.c -#QSYMV_U_KERNEL = ../generic/symv_k.c -#QSYMV_L_KERNEL = ../generic/symv_k.c -#CSYMV_U_KERNEL = ../generic/zsymv_k.c -#CSYMV_L_KERNEL = ../generic/zsymv_k.c -#ZSYMV_U_KERNEL = ../generic/zsymv_k.c -#ZSYMV_L_KERNEL = ../generic/zsymv_k.c -#XSYMV_U_KERNEL = ../generic/zsymv_k.c -#XSYMV_L_KERNEL = ../generic/zsymv_k.c - -#ZHEMV_U_KERNEL = ../generic/zhemv_k.c -#ZHEMV_L_KERNEL = ../generic/zhemv_k.c - -LSAME_KERNEL = ../generic/lsame.c -SCABS_KERNEL = ../generic/cabs.c -DCABS_KERNEL = ../generic/cabs.c -QCABS_KERNEL = ../generic/cabs.c - -#Dump kernel -CGEMM3MKERNEL = ../generic/zgemm3mkernel_dump.c -ZGEMM3MKERNEL = ../generic/zgemm3mkernel_dump.c +ifeq ($(__BYTE_ORDER__),__ORDER_BIG_ENDIAN__) +include $(KERNELDIR)/KERNEL.POWER8 +else + +#SGEMM_BETA = ../generic/gemm_beta.c +#DGEMM_BETA = ../generic/gemm_beta.c +#CGEMM_BETA = ../generic/zgemm_beta.c +#ZGEMM_BETA = ../generic/zgemm_beta.c + +STRMMKERNEL = sgemm_kernel_power9.S +DTRMMKERNEL = dgemm_kernel_power9.S +CTRMMKERNEL = cgemm_kernel_power9.S +ZTRMMKERNEL = zgemm_kernel_power9.S + +SGEMMKERNEL = sgemm_kernel_power9.S +SGEMMINCOPY = ../generic/gemm_ncopy_16.c +SGEMMITCOPY = sgemm_tcopy_16_power8.S +SGEMMONCOPY = ../generic/gemm_ncopy_8.c +SGEMMOTCOPY = sgemm_tcopy_8_power8.S +SGEMMINCOPYOBJ = sgemm_incopy$(TSUFFIX).$(SUFFIX) +SGEMMITCOPYOBJ = sgemm_itcopy$(TSUFFIX).$(SUFFIX) +SGEMMONCOPYOBJ = sgemm_oncopy$(TSUFFIX).$(SUFFIX) +SGEMMOTCOPYOBJ = sgemm_otcopy$(TSUFFIX).$(SUFFIX) + +DGEMMKERNEL = dgemm_kernel_power9.S +DGEMMINCOPY = ../generic/gemm_ncopy_16.c +DGEMMITCOPY = dgemm_tcopy_16_power8.S +DGEMMONCOPY = dgemm_ncopy_4_power8.S +DGEMMOTCOPY = ../generic/gemm_tcopy_4.c +DGEMMINCOPYOBJ = dgemm_incopy$(TSUFFIX).$(SUFFIX) +DGEMMITCOPYOBJ = dgemm_itcopy$(TSUFFIX).$(SUFFIX) +DGEMMONCOPYOBJ = dgemm_oncopy$(TSUFFIX).$(SUFFIX) +DGEMMOTCOPYOBJ = dgemm_otcopy$(TSUFFIX).$(SUFFIX) + +CGEMMKERNEL = cgemm_kernel_power9.S +CGEMMINCOPY = ../generic/zgemm_ncopy_8.c +CGEMMITCOPY = ../generic/zgemm_tcopy_8.c +CGEMMONCOPY = ../generic/zgemm_ncopy_4.c +CGEMMOTCOPY = ../generic/zgemm_tcopy_4.c +CGEMMONCOPYOBJ = cgemm_oncopy$(TSUFFIX).$(SUFFIX) +CGEMMOTCOPYOBJ = cgemm_otcopy$(TSUFFIX).$(SUFFIX) +CGEMMINCOPYOBJ = cgemm_incopy$(TSUFFIX).$(SUFFIX) +CGEMMITCOPYOBJ = cgemm_itcopy$(TSUFFIX).$(SUFFIX) + +ZGEMMKERNEL = zgemm_kernel_power9.S +ZGEMMONCOPY = ../generic/zgemm_ncopy_2.c +ZGEMMOTCOPY = ../generic/zgemm_tcopy_2.c +ZGEMMINCOPY = ../generic/zgemm_ncopy_8.c +ZGEMMITCOPY = zgemm_tcopy_8_power8.S +ZGEMMONCOPYOBJ = zgemm_oncopy$(TSUFFIX).$(SUFFIX) +ZGEMMOTCOPYOBJ = zgemm_otcopy$(TSUFFIX).$(SUFFIX) +ZGEMMINCOPYOBJ = zgemm_incopy$(TSUFFIX).$(SUFFIX) +ZGEMMITCOPYOBJ = zgemm_itcopy$(TSUFFIX).$(SUFFIX) + +STRSMKERNEL_LN = trsm_kernel_LN_power10.c +STRSMKERNEL_LT = trsm_kernel_LT_power10.c +STRSMKERNEL_RN = trsm_kernel_RN_power10.c +STRSMKERNEL_RT = trsm_kernel_RT_power10.c + +DTRSMKERNEL_LN = trsm_kernel_LN_power10.c +DTRSMKERNEL_LT = dtrsm_kernel_LT_16x4_power8.S +DTRSMKERNEL_RN = trsm_kernel_RN_power10.c +DTRSMKERNEL_RT = trsm_kernel_RT_power10.c + +CTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c +CTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c +CTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c +CTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c + +ZTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c +ZTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c +ZTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c +ZTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c + +#Todo: CGEMM3MKERNEL should be 4x4 blocksizes. +#CGEMM3MKERNEL = zgemm3m_kernel_8x4_sse3.S +#ZGEMM3MKERNEL = zgemm3m_kernel_4x4_sse3.S + +#Pure C for other kernels +#SAMAXKERNEL = ../arm/amax.c +#DAMAXKERNEL = ../arm/amax.c +#CAMAXKERNEL = ../arm/zamax.c +#ZAMAXKERNEL = ../arm/zamax.c +# +#SAMINKERNEL = ../arm/amin.c +#DAMINKERNEL = ../arm/amin.c +#CAMINKERNEL = ../arm/zamin.c +#ZAMINKERNEL = ../arm/zamin.c +# +#SMAXKERNEL = ../arm/max.c +#DMAXKERNEL = ../arm/max.c +# +#SMINKERNEL = ../arm/min.c +#DMINKERNEL = ../arm/min.c +# +ifneq ($(GCCVERSIONGTEQ9),1) +ISAMAXKERNEL = isamax_power9.S +else +ISAMAXKERNEL = isamax.c +endif +IDAMAXKERNEL = idamax.c +ifneq ($(GCCVERSIONGTEQ9),1) +ICAMAXKERNEL = icamax_power9.S +else +ICAMAXKERNEL = icamax.c +endif +IZAMAXKERNEL = izamax.c +# +ifneq ($(GCCVERSIONGTEQ9),1) +ISAMINKERNEL = isamin_power9.S +else +ISAMINKERNEL = isamin.c +endif +IDAMINKERNEL = idamin.c +ifneq ($(GCCVERSIONGTEQ9),1) +ICAMINKERNEL = icamin_power9.S +else +ICAMINKERNEL = icamin.c +endif +IZAMINKERNEL = izamin.c +# +#ISMAXKERNEL = ../arm/imax.c +#IDMAXKERNEL = ../arm/imax.c +# +#ISMINKERNEL = ../arm/imin.c +#IDMINKERNEL = ../arm/imin.c +# +SASUMKERNEL = sasum.c +DASUMKERNEL = dasum.c +CASUMKERNEL = casum.c +ZASUMKERNEL = zasum.c +# +SAXPYKERNEL = saxpy.c +DAXPYKERNEL = daxpy.c +ifneq ($(GCCVERSIONGTEQ9),1) +CAXPYKERNEL = caxpy_power9.S +else +CAXPYKERNEL = caxpy.c +endif +ZAXPYKERNEL = zaxpy.c +# +SCOPYKERNEL = scopy.c +DCOPYKERNEL = dcopy.c +CCOPYKERNEL = ccopy.c +ZCOPYKERNEL = zcopy.c +# +SDOTKERNEL = sdot.c +DDOTKERNEL = ddot.c +DSDOTKERNEL = sdot.c +ifneq ($(GCCVERSIONGTEQ9),1) +CDOTKERNEL = cdot_power9.S +else +CDOTKERNEL = cdot.c +endif +ZDOTKERNEL = zdot.c +# +SNRM2KERNEL = ../arm/nrm2.c +DNRM2KERNEL = ../arm/nrm2.c +CNRM2KERNEL = ../arm/znrm2.c +ZNRM2KERNEL = ../arm/znrm2.c +# +SROTKERNEL = srot.c +DROTKERNEL = drot.c +CROTKERNEL = crot.c +ZROTKERNEL = zrot.c +# +SSCALKERNEL = sscal.c +DSCALKERNEL = dscal.c +CSCALKERNEL = zscal.c +ZSCALKERNEL = zscal.c +# +SSWAPKERNEL = sswap.c +DSWAPKERNEL = dswap.c +CSWAPKERNEL = cswap.c +ZSWAPKERNEL = zswap.c +# + +SGEMVNKERNEL = sgemv_n.c +DGEMVNKERNEL = dgemv_n.c +CGEMVNKERNEL = cgemv_n.c +ZGEMVNKERNEL = zgemv_n_4.c +# +SGEMVTKERNEL = sgemv_t.c +DGEMVTKERNEL = dgemv_t.c +CGEMVTKERNEL = cgemv_t.c +ZGEMVTKERNEL = zgemv_t_4.c + + +#SSYMV_U_KERNEL = ../generic/symv_k.c +#SSYMV_L_KERNEL = ../generic/symv_k.c +#DSYMV_U_KERNEL = ../generic/symv_k.c +#DSYMV_L_KERNEL = ../generic/symv_k.c +#QSYMV_U_KERNEL = ../generic/symv_k.c +#QSYMV_L_KERNEL = ../generic/symv_k.c +#CSYMV_U_KERNEL = ../generic/zsymv_k.c +#CSYMV_L_KERNEL = ../generic/zsymv_k.c +#ZSYMV_U_KERNEL = ../generic/zsymv_k.c +#ZSYMV_L_KERNEL = ../generic/zsymv_k.c +#XSYMV_U_KERNEL = ../generic/zsymv_k.c +#XSYMV_L_KERNEL = ../generic/zsymv_k.c + +#ZHEMV_U_KERNEL = ../generic/zhemv_k.c +#ZHEMV_L_KERNEL = ../generic/zhemv_k.c + +LSAME_KERNEL = ../generic/lsame.c +SCABS_KERNEL = ../generic/cabs.c +DCABS_KERNEL = ../generic/cabs.c +QCABS_KERNEL = ../generic/cabs.c + +#Dump kernel +CGEMM3MKERNEL = ../generic/zgemm3mkernel_dump.c +ZGEMM3MKERNEL = ../generic/zgemm3mkernel_dump.c + +endif diff --git a/kernel/power/KERNEL.PPC440 b/kernel/power/KERNEL.PPC440 index 988a4b701..677af5f21 100644 --- a/kernel/power/KERNEL.PPC440 +++ b/kernel/power/KERNEL.PPC440 @@ -15,13 +15,23 @@ ZASUMKERNEL = zasum_ppc440.S SAXPYKERNEL = axpy_ppc440.S DAXPYKERNEL = axpy_ppc440.S +ifneq ($(__BYTE_ORDER__),__ORDER_BIG_ENDIAN__) +CAXPYKERNEL = ../arm/zaxpy.c +ZAXPYKERNEL = ../arm/zaxpy.c +else CAXPYKERNEL = zaxpy_ppc440.S ZAXPYKERNEL = zaxpy_ppc440.S +endif SDOTKERNEL = dot_ppc440.S DDOTKERNEL = dot_ppc440.S +ifneq ($(__BYTE_ORDER__),__ORDER_BIG_ENDIAN__) CDOTKERNEL = zdot_ppc440.S ZDOTKERNEL = zdot_ppc440.S +else +CDOTKERNEL = ../arm/zdot.c +ZDOTKERNEL = ../arm/zdot.c +endif ISAMAXKERNEL = iamax_ppc440.S IDAMAXKERNEL = iamax_ppc440.S @@ -52,8 +62,13 @@ ZNRM2KERNEL = znrm2_ppc440.S SROTKERNEL = rot_ppc440.S DROTKERNEL = rot_ppc440.S +ifneq ($(__BYTE_ORDER__),__ORDER_BIG_ENDIAN__) CROTKERNEL = zrot_ppc440.S ZROTKERNEL = zrot_ppc440.S +else +CROTKERNEL = ../arm/zrot.c +ZROTKERNEL = ../arm/zrot.c +endif SSCALKERNEL = scal_ppc440.S DSCALKERNEL = scal_ppc440.S @@ -116,3 +131,15 @@ ZTRSMKERNEL_LN = ztrsm_kernel_ppc440_LN.S ZTRSMKERNEL_LT = ztrsm_kernel_ppc440_LT.S ZTRSMKERNEL_RN = ztrsm_kernel_ppc440_LT.S ZTRSMKERNEL_RT = ztrsm_kernel_ppc440_RT.S + +ifeq ($(__BYTE_ORDER__),__ORDER_BIG_ENDIAN__) +SGEMVNKERNEL = ../arm/gemv_n.c +DGEMVNKERNEL = ../arm/gemv_n.c +SGEMVTKERNEL = ../arm/gemv_t.c +DGEMVTKERNEL = ../arm/gemv_t.c +CGEMVNKERNEL = ../arm/zgemv_n.c +ZGEMVNKERNEL = ../arm/zgemv_n.c +CGEMVTKERNEL = ../arm/zgemv_t.c +ZGEMVTKERNEL = ../arm/zgemv_t.c +endif + diff --git a/kernel/power/KERNEL.PPC970 b/kernel/power/KERNEL.PPC970 index 7431a7788..a99fb7d96 100644 --- a/kernel/power/KERNEL.PPC970 +++ b/kernel/power/KERNEL.PPC970 @@ -1,3 +1,14 @@ +ifeq ($(__BYTE_ORDER__),__ORDER_BIG_ENDIAN__) +SGEMMKERNEL = gemm_kernel.S +SGEMMINCOPY = +SGEMMITCOPY = +SGEMMONCOPY = ../generic/gemm_ncopy_4.c +SGEMMOTCOPY = ../generic/gemm_tcopy_4.c +SGEMMINCOPYOBJ = +SGEMMITCOPYOBJ = +SGEMMONCOPYOBJ = sgemm_oncopy$(TSUFFIX).$(SUFFIX) +SGEMMOTCOPYOBJ = sgemm_otcopy$(TSUFFIX).$(SUFFIX) +else SGEMMKERNEL = gemm_kernel_altivec.S SGEMMINCOPY = ../generic/gemm_ncopy_16.c SGEMMITCOPY = ../generic/gemm_tcopy_16.c @@ -7,6 +18,8 @@ SGEMMINCOPYOBJ = sgemm_incopy$(TSUFFIX).$(SUFFIX) SGEMMITCOPYOBJ = sgemm_itcopy$(TSUFFIX).$(SUFFIX) SGEMMONCOPYOBJ = sgemm_oncopy$(TSUFFIX).$(SUFFIX) SGEMMOTCOPYOBJ = sgemm_otcopy$(TSUFFIX).$(SUFFIX) +endif + DGEMMKERNEL = gemm_kernel.S DGEMMINCOPY = DGEMMITCOPY = @@ -16,6 +29,18 @@ DGEMMINCOPYOBJ = DGEMMITCOPYOBJ = DGEMMONCOPYOBJ = dgemm_oncopy$(TSUFFIX).$(SUFFIX) DGEMMOTCOPYOBJ = dgemm_otcopy$(TSUFFIX).$(SUFFIX) + +ifeq ($(__BYTE_ORDER__),__ORDER_BIG_ENDIAN__) +CGEMMKERNEL = zgemm_kernel.S +CGEMMINCOPY = +CGEMMITCOPY = +CGEMMONCOPY = ../generic/zgemm_ncopy_2.c +CGEMMOTCOPY = ../generic/zgemm_tcopy_2.c +CGEMMINCOPYOBJ = +CGEMMITCOPYOBJ = +CGEMMONCOPYOBJ = cgemm_oncopy$(TSUFFIX).$(SUFFIX) +CGEMMOTCOPYOBJ = cgemm_otcopy$(TSUFFIX).$(SUFFIX) +else CGEMMKERNEL = zgemm_kernel_altivec.S CGEMMINCOPY = ../generic/zgemm_ncopy_8.c CGEMMITCOPY = ../generic/zgemm_tcopy_8.c @@ -25,6 +50,8 @@ CGEMMINCOPYOBJ = cgemm_incopy$(TSUFFIX).$(SUFFIX) CGEMMITCOPYOBJ = cgemm_itcopy$(TSUFFIX).$(SUFFIX) CGEMMONCOPYOBJ = cgemm_oncopy$(TSUFFIX).$(SUFFIX) CGEMMOTCOPYOBJ = cgemm_otcopy$(TSUFFIX).$(SUFFIX) +endif + ZGEMMKERNEL = zgemm_kernel.S ZGEMMINCOPY = ZGEMMITCOPY = @@ -35,22 +62,30 @@ ZGEMMITCOPYOBJ = ZGEMMONCOPYOBJ = zgemm_oncopy$(TSUFFIX).$(SUFFIX) ZGEMMOTCOPYOBJ = zgemm_otcopy$(TSUFFIX).$(SUFFIX) -#STRSMKERNEL_LN = trsm_kernel_LN.S -#STRSMKERNEL_LT = trsm_kernel_LT.S -#STRSMKERNEL_RN = trsm_kernel_LT.S -#STRSMKERNEL_RT = trsm_kernel_RT.S - DTRSMKERNEL_LN = trsm_kernel_LN.S DTRSMKERNEL_LT = trsm_kernel_LT.S DTRSMKERNEL_RN = trsm_kernel_LT.S DTRSMKERNEL_RT = trsm_kernel_RT.S -#CTRSMKERNEL_LN = ztrsm_kernel_LN.S -#CTRSMKERNEL_LT = ztrsm_kernel_LT.S -#CTRSMKERNEL_RN = ztrsm_kernel_LT.S -#CTRSMKERNEL_RT = ztrsm_kernel_RT.S - ZTRSMKERNEL_LN = ztrsm_kernel_LN.S ZTRSMKERNEL_LT = ztrsm_kernel_LT.S ZTRSMKERNEL_RN = ztrsm_kernel_LT.S ZTRSMKERNEL_RT = ztrsm_kernel_RT.S + +ifeq ($(__BYTE_ORDER__),__ORDER_BIG_ENDIAN__) +STRSMKERNEL_LN = trsm_kernel_LN.S +STRSMKERNEL_LT = trsm_kernel_LT.S +STRSMKERNEL_RN = trsm_kernel_LT.S +STRSMKERNEL_RT = trsm_kernel_RT.S + +CTRSMKERNEL_LN = ztrsm_kernel_LN.S +CTRSMKERNEL_LT = ztrsm_kernel_LT.S +CTRSMKERNEL_RN = ztrsm_kernel_LT.S +CTRSMKERNEL_RT = ztrsm_kernel_RT.S + + +SROTKERNEL = ../arm/rot.c +DROTKERNEL = ../arm/rot.c +CROTKERNEL = ../arm/zrot.c +ZROTKERNEL = ../arm/zrot.c +endif diff --git a/kernel/power/KERNEL.PPCG4 b/kernel/power/KERNEL.PPCG4 index f615754bb..54660b54d 100644 --- a/kernel/power/KERNEL.PPCG4 +++ b/kernel/power/KERNEL.PPCG4 @@ -20,8 +20,10 @@ ZAXPYKERNEL = zaxpy_ppc440.S SDOTKERNEL = dot_ppc440.S DDOTKERNEL = dot_ppc440.S -CDOTKERNEL = zdot_ppc440.S -ZDOTKERNEL = zdot_ppc440.S +#CDOTKERNEL = zdot_ppc440.S +#ZDOTKERNEL = zdot_ppc440.S +CDOTKERNEL = ../arm/zdot.c +ZDOTKERNEL = ../arm/zdot.c ISAMAXKERNEL = iamax_ppc440.S IDAMAXKERNEL = iamax_ppc440.S @@ -52,8 +54,11 @@ ZNRM2KERNEL = znrm2_ppc440.S SROTKERNEL = rot_ppc440.S DROTKERNEL = rot_ppc440.S -CROTKERNEL = zrot_ppc440.S -ZROTKERNEL = zrot_ppc440.S +#CROTKERNEL = zrot_ppc440.S +#ZROTKERNEL = zrot_ppc440.S +CROTKERNEL = ../arm/zrot.c +ZROTKERNEL = ../arm/zrot.c + SSCALKERNEL = scal_ppc440.S DSCALKERNEL = scal_ppc440.S @@ -78,13 +83,18 @@ DGEMMINCOPYOBJ = DGEMMITCOPYOBJ = DGEMMONCOPYOBJ = dgemm_oncopy$(TSUFFIX).$(SUFFIX) DGEMMOTCOPYOBJ = dgemm_otcopy$(TSUFFIX).$(SUFFIX) -CGEMMKERNEL = zgemm_kernel_altivec_g4.S -CGEMMINCOPY = ../generic/zgemm_ncopy_8.c -CGEMMITCOPY = ../generic/zgemm_tcopy_8.c +#CGEMMKERNEL = zgemm_kernel_altivec_g4.S +#CGEMMINCOPY = ../generic/zgemm_ncopy_8.c +#CGEMMITCOPY = ../generic/zgemm_tcopy_8.c +CGEMMKERNEL = zgemm_kernel.S +CGEMMINCOPY = +CGEMMONCOPY = CGEMMONCOPY = ../generic/zgemm_ncopy_2.c CGEMMOTCOPY = ../generic/zgemm_tcopy_2.c -CGEMMINCOPYOBJ = cgemm_incopy$(TSUFFIX).$(SUFFIX) -CGEMMITCOPYOBJ = cgemm_itcopy$(TSUFFIX).$(SUFFIX) +CGEMMINCOPYOBJ = +#cgemm_incopy$(TSUFFIX).$(SUFFIX) +CGEMMITCOPYOBJ = +#cgemm_itcopy$(TSUFFIX).$(SUFFIX) CGEMMONCOPYOBJ = cgemm_oncopy$(TSUFFIX).$(SUFFIX) CGEMMOTCOPYOBJ = cgemm_otcopy$(TSUFFIX).$(SUFFIX) ZGEMMKERNEL = zgemm_kernel_g4.S diff --git a/kernel/power/axpy.S b/kernel/power/axpy.S index fb9789da4..238771826 100644 --- a/kernel/power/axpy.S +++ b/kernel/power/axpy.S @@ -39,7 +39,7 @@ #define ASSEMBLER #include "common.h" -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define N r3 #define X r6 diff --git a/kernel/power/axpy_ppc440.S b/kernel/power/axpy_ppc440.S index 81a660e4d..7733e46e7 100644 --- a/kernel/power/axpy_ppc440.S +++ b/kernel/power/axpy_ppc440.S @@ -39,7 +39,7 @@ #define ASSEMBLER #include "common.h" -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define N r3 #define X r6 diff --git a/kernel/power/casum.c b/kernel/power/casum.c index a9ece0768..06982bfba 100644 --- a/kernel/power/casum.c +++ b/kernel/power/casum.c @@ -46,9 +46,11 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endif -#if defined(POWER8) || defined(POWER9) +#if defined(POWER8) || defined(POWER9) || defined(POWER10) +#if defined(__VEC__) || defined(__ALTIVEC__) #include "casum_microk_power8.c" #endif +#endif #ifndef HAVE_KERNEL_16 diff --git a/kernel/power/casum_microk_power8.c b/kernel/power/casum_microk_power8.c index 7d12c9885..91d53ffc3 100644 --- a/kernel/power/casum_microk_power8.c +++ b/kernel/power/casum_microk_power8.c @@ -68,10 +68,10 @@ static float casum_kernel_16 (long n, float *x) "addi %2, %2, 128 \n\t" "addic. %1, %1, -16 \n\t" - "ble 2f \n\t" + "ble two%= \n\t" - ".p2align 5 \n" - "1: \n\t" + ".align 5 \n" + "one%=: \n\t" "xvabssp 48, 40 \n\t" "xvabssp 49, 41 \n\t" @@ -108,9 +108,9 @@ static float casum_kernel_16 (long n, float *x) "xvaddsp 38, 38, %x5 \n\t" "xvaddsp 39, 39, %x6 \n\t" - "bgt 1b \n" + "bgt one%= \n" - "2: \n\t" + "two%=: \n\t" "xvabssp 48, 40 \n\t" "xvabssp 49, 41 \n\t" diff --git a/kernel/power/caxpy.c b/kernel/power/caxpy.c index 4bdf13c34..0545766b1 100644 --- a/kernel/power/caxpy.c +++ b/kernel/power/caxpy.c @@ -24,12 +24,21 @@ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ - #include "common.h" - - #ifndef HAVE_ASM_KERNEL #include + +#define offset_0 0 +#define offset_1 16 +#define offset_2 32 +#define offset_3 48 +#define offset_4 64 +#define offset_5 80 +#define offset_6 96 +#define offset_7 112 + +static const unsigned char __attribute__((aligned(16))) swap_mask_arr[]={ 4,5,6,7,0,1,2,3, 12,13,14,15, 8,9,10,11}; + static void caxpy_kernel_16(BLASLONG n, FLOAT *x, FLOAT *y, FLOAT alpha_r, FLOAT alpha_i) { @@ -43,28 +52,29 @@ static void caxpy_kernel_16(BLASLONG n, FLOAT *x, FLOAT *y, FLOAT alpha_r, FLOAT register __vector float valpha_i = {alpha_i, alpha_i,alpha_i, alpha_i}; #endif - __vector unsigned char swap_mask = { 4,5,6,7,0,1,2,3, 12,13,14,15, 8,9,10,11}; - register __vector float *vy = (__vector float *) y; - register __vector float *vx = (__vector float *) x; + __vector unsigned char swap_mask = *((__vector unsigned char*)swap_mask_arr); + register __vector float *vptr_y = (__vector float *) y; + register __vector float *vptr_x = (__vector float *) x; BLASLONG i=0; - for (; i < n/2; i += 8) { + for(;i 0 ) + { + copy_kernel(n1, x, y); + i=n1; + ix=n1*2; + iy=n1*2; + } + + while(i < n) + { + y[iy] = x[iy] ; + y[iy+1] = x[ix+1] ; + ix+=2; + iy+=2; + i++ ; + + } + + + } + else + { + + BLASLONG inc_x2 = 2 * inc_x; + BLASLONG inc_y2 = 2 * inc_y; + + while(i < n) + { + y[iy] = x[ix] ; + y[iy+1] = x[ix+1] ; + ix += inc_x2 ; + iy += inc_y2 ; + i++ ; + + } + + } + return(0); + + +} + + diff --git a/kernel/power/cdot.c b/kernel/power/cdot.c index f86a33f22..c53fe0c02 100644 --- a/kernel/power/cdot.c +++ b/kernel/power/cdot.c @@ -23,17 +23,30 @@ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ +#if !defined(__VEC__) || !defined(__ALTIVEC__) +#include "../arm/zdot.c" +#else #include "common.h" - +#if defined(POWER10) +#include "cdot_microk_power10.c" +#else #ifndef HAVE_KERNEL_8 #include + +#define offset_0 0 +#define offset_1 16 +#define offset_2 32 +#define offset_3 48 + + + +static const unsigned char __attribute__((aligned(16))) swap_mask_arr[]={ 4,5,6,7,0,1,2,3, 12,13,14,15, 8,9,10,11}; static void cdot_kernel_8(BLASLONG n, FLOAT *x, FLOAT *y, float *dot) { - __vector unsigned char swap_mask = { 4,5,6,7,0,1,2,3, 12,13,14,15, 8,9,10,11}; - register __vector float *vy = (__vector float *) y; - register __vector float *vx = (__vector float *) x; - BLASLONG i = 0; + __vector unsigned char swap_mask = *((__vector unsigned char*)swap_mask_arr); + register __vector float *vptr_y = (__vector float *) y; + register __vector float *vptr_x = (__vector float *) x; register __vector float vd_0 = { 0 }; register __vector float vd_1 = { 0 }; register __vector float vd_2 = { 0 }; @@ -41,26 +54,23 @@ static void cdot_kernel_8(BLASLONG n, FLOAT *x, FLOAT *y, float *dot) register __vector float vdd_0 = { 0 }; register __vector float vdd_1 = { 0 }; register __vector float vdd_2 = { 0 }; - register __vector float vdd_3 = { 0 }; - for (; i < n/2; i += 4) { - - register __vector float vyy_0 ; - register __vector float vyy_1 ; - register __vector float vyy_2 ; - register __vector float vyy_3 ; - - register __vector float vy_0 = vy[i]; - register __vector float vy_1 = vy[i + 1]; - register __vector float vy_2 = vy[i + 2]; - register __vector float vy_3 = vy[i + 3]; - register __vector float vx_0= vx[i]; - register __vector float vx_1 = vx[i + 1]; - register __vector float vx_2 = vx[i + 2]; - register __vector float vx_3 = vx[i + 3]; - vyy_0 = vec_perm(vy_0, vy_0, swap_mask); - vyy_1 = vec_perm(vy_1, vy_1, swap_mask); - vyy_2 = vec_perm(vy_2, vy_2, swap_mask); - vyy_3 = vec_perm(vy_3, vy_3, swap_mask); + register __vector float vdd_3 = { 0 }; + BLASLONG i=0; + for(;i #include @@ -32,7 +35,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define NBMAX 1024 -static const unsigned char swap_mask_arr[]={ 4,5,6,7,0,1,2,3, 12,13,14,15, 8,9,10,11}; +static const unsigned char __attribute__((aligned(16))) swap_mask_arr[]={ 4,5,6,7,0,1,2,3, 12,13,14,15, 8,9,10,11}; static void cgemv_kernel_4x4(BLASLONG n, BLASLONG lda, FLOAT *ap, FLOAT *x, FLOAT *y) { @@ -62,23 +65,24 @@ static void cgemv_kernel_4x4(BLASLONG n, BLASLONG lda, FLOAT *ap, FLOAT *x, FLOA register __vector float vx3_r = {x[6], -x[6],x[6], -x[6]}; register __vector float vx3_i = {x[7], x[7],x[7], x[7]}; #endif - register __vector float *vy = (__vector float *) y; + register __vector float *vptr_y = (__vector float *) y; register __vector float *vptr_a0 = (__vector float *) a0; register __vector float *vptr_a1 = (__vector float *) a1; register __vector float *vptr_a2 = (__vector float *) a2; register __vector float *vptr_a3 = (__vector float *) a3; BLASLONG i = 0; - for (;i< n / 2; i+=2) { - register __vector float vy_0 = vy[i]; - register __vector float vy_1 = vy[i + 1]; - register __vector float va0 = vptr_a0[i]; - register __vector float va1 = vptr_a1[i]; - register __vector float va2 = vptr_a2[i]; - register __vector float va3 = vptr_a3[i]; - register __vector float va0_1 = vptr_a0[i + 1]; - register __vector float va1_1 = vptr_a1[i + 1]; - register __vector float va2_1 = vptr_a2[i + 1]; - register __vector float va3_1 = vptr_a3[i + 1]; + BLASLONG i2=16; + for (;i< n * 8; i+=32,i2+=32) { + register __vector float vy_0 = vec_vsx_ld(i,vptr_y); + register __vector float vy_1 = vec_vsx_ld(i2,vptr_y); + register __vector float va0 = vec_vsx_ld(i,vptr_a0); + register __vector float va1 = vec_vsx_ld(i, vptr_a1); + register __vector float va2 = vec_vsx_ld(i ,vptr_a2); + register __vector float va3 = vec_vsx_ld(i ,vptr_a3); + register __vector float va0_1 = vec_vsx_ld(i2 ,vptr_a0); + register __vector float va1_1 = vec_vsx_ld(i2 ,vptr_a1); + register __vector float va2_1 = vec_vsx_ld(i2 ,vptr_a2); + register __vector float va3_1 = vec_vsx_ld(i2 ,vptr_a3); vy_0 += va0*vx0_r + va1*vx1_r + va2*vx2_r + va3*vx3_r; vy_1 += va0_1*vx0_r + va1_1*vx1_r + va2_1*vx2_r + va3_1*vx3_r; @@ -93,8 +97,8 @@ static void cgemv_kernel_4x4(BLASLONG n, BLASLONG lda, FLOAT *ap, FLOAT *x, FLOA vy_0 += va0*vx0_i + va1*vx1_i + va2*vx2_i + va3*vx3_i; vy_1 += va0_1*vx0_i + va1_1*vx1_i + va2_1*vx2_i + va3_1*vx3_i; - vy[i] = vy_0; - vy[i + 1] = vy_1; + vec_vsx_st(vy_0 ,i, vptr_y); + vec_vsx_st(vy_1,i2,vptr_y); } } @@ -118,17 +122,19 @@ static void cgemv_kernel_4x2(BLASLONG n, BLASLONG lda, FLOAT *ap, FLOAT *x, FLOA register __vector float vx1_r = {x[2], -x[2],x[2], -x[2]}; register __vector float vx1_i = {x[3], x[3],x[3], x[3]}; #endif - register __vector float *vy = (__vector float *) y; + register __vector float *vptr_y = (__vector float *) y; register __vector float *vptr_a0 = (__vector float *) a0; register __vector float *vptr_a1 = (__vector float *) a1; - BLASLONG i = 0; - for (;i< n / 2; i+=2) { - register __vector float vy_0 = vy[i]; - register __vector float vy_1 = vy[i + 1]; - register __vector float va0 = vptr_a0[i]; - register __vector float va1 = vptr_a1[i]; - register __vector float va0_1 = vptr_a0[i + 1]; - register __vector float va1_1 = vptr_a1[i + 1]; + BLASLONG i = 0; + BLASLONG i2 = 16; + for (;i< n * 8; i+=32, i2+=32) { + register __vector float vy_0 = vec_vsx_ld(i,vptr_y); + register __vector float vy_1 = vec_vsx_ld(i2,vptr_y); + register __vector float va0 = vec_vsx_ld(i,vptr_a0); + register __vector float va1 = vec_vsx_ld(i, vptr_a1); + register __vector float va0_1 = vec_vsx_ld(i2 ,vptr_a0); + register __vector float va1_1 = vec_vsx_ld(i2 ,vptr_a1); + register __vector float va0x = vec_perm(va0, va0,swap_mask); register __vector float va0x_1 = vec_perm(va0_1, va0_1,swap_mask); register __vector float va1x = vec_perm(va1, va1,swap_mask); @@ -136,8 +142,8 @@ static void cgemv_kernel_4x2(BLASLONG n, BLASLONG lda, FLOAT *ap, FLOAT *x, FLOA vy_0 += va0*vx0_r + va1*vx1_r + va0x*vx0_i + va1x*vx1_i; vy_1 += va0_1*vx0_r + va1_1*vx1_r + va0x_1*vx0_i + va1x_1*vx1_i; - vy[i] = vy_0; - vy[i + 1] = vy_1; + vec_vsx_st(vy_0 ,i, vptr_y); + vec_vsx_st(vy_1,i2,vptr_y); } } @@ -154,21 +160,23 @@ static void cgemv_kernel_4x1(BLASLONG n, FLOAT *ap, FLOAT *x, FLOAT *y) { register __vector float vx0_r = {x[0], -x[0],x[0], -x[0]}; register __vector float vx0_i = {x[1], x[1],x[1], x[1]}; #endif - register __vector float *vy = (__vector float *) y; + register __vector float *vptr_y = (__vector float *) y; register __vector float *vptr_a0 = (__vector float *) ap; BLASLONG i = 0; - for (;i< n / 2; i+=2) { - register __vector float vy_0 = vy[i]; - register __vector float vy_1 = vy[i + 1]; - register __vector float va0 = vptr_a0[i]; - register __vector float va0_1 = vptr_a0[i + 1]; + BLASLONG i2 = 16; + for (;i< n * 8; i+=32, i2+=32) { + register __vector float vy_0 = vec_vsx_ld(i,vptr_y); + register __vector float vy_1 = vec_vsx_ld(i2,vptr_y); + register __vector float va0 = vec_vsx_ld(i,vptr_a0); + register __vector float va0_1 = vec_vsx_ld(i2 ,vptr_a0); + register __vector float va0x = vec_perm(va0, va0,swap_mask); register __vector float va0x_1 = vec_perm(va0_1, va0_1,swap_mask); vy_0 += va0*vx0_r + va0x*vx0_i; vy_1 += va0_1*vx0_r + va0x_1*vx0_i; - vy[i] = vy_0; - vy[i + 1] = vy_1; + vec_vsx_st(vy_0 ,i, vptr_y); + vec_vsx_st(vy_1,i2,vptr_y); } } @@ -176,7 +184,7 @@ static void cgemv_kernel_4x1(BLASLONG n, FLOAT *ap, FLOAT *x, FLOAT *y) { static void add_y(BLASLONG n, FLOAT *src, FLOAT *dest, BLASLONG inc_dest, FLOAT alpha_r, FLOAT alpha_i) { - BLASLONG i; + BLASLONG i=0; if (inc_dest != 2) { @@ -213,20 +221,24 @@ static void add_y(BLASLONG n, FLOAT *src, FLOAT *dest, BLASLONG inc_dest, FLOAT register __vector float *vptr_src = (__vector float *) src; register __vector float *vptr_y = (__vector float *) dest; - for (i = 0; i < n/2; i += 2 ){ - register __vector float vy_0 = vptr_y[i]; - register __vector float vy_1 = vptr_y[i +1]; + BLASLONG i2 = 16; + for (;i< n * 8; i+=32, i2+=32) { + register __vector float vy_0 = vec_vsx_ld(i,vptr_y); + register __vector float vy_1 = vec_vsx_ld(i2,vptr_y); + - register __vector float vsrc = vptr_src[i]; - register __vector float vsrc_1 = vptr_src[i + 1]; - register __vector float vsrcx = vec_perm(vsrc, vsrc, swap_mask); - register __vector float vsrcx_1 = vec_perm(vsrc_1, vsrc_1, swap_mask); + register __vector float vsrc = vec_vsx_ld(i,vptr_src); + register __vector float vsrc_1 = vec_vsx_ld(i2,vptr_src); - vy_0 += vsrc*valpha_r + vsrcx*valpha_i; - vy_1 += vsrc_1*valpha_r + vsrcx_1*valpha_i; - vptr_y[i] = vy_0; - vptr_y[i+1 ] = vy_1; + register __vector float vsrcx = vec_perm(vsrc, vsrc, swap_mask); + register __vector float vsrcx_1 = vec_perm(vsrc_1, vsrc_1, swap_mask); + + vy_0 += vsrc*valpha_r + vsrcx*valpha_i; + vy_1 += vsrc_1*valpha_r + vsrcx_1*valpha_i; + + vec_vsx_st(vy_0 ,i, vptr_y); + vec_vsx_st(vy_1,i2,vptr_y); } @@ -237,7 +249,7 @@ static void add_y(BLASLONG n, FLOAT *src, FLOAT *dest, BLASLONG inc_dest, FLOAT int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha_r, FLOAT alpha_i, FLOAT *a, BLASLONG lda, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT * buffer) { - BLASLONG i; + BLASLONG i=0; FLOAT *a_ptr; FLOAT *x_ptr; FLOAT *y_ptr; @@ -247,8 +259,8 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha_r, FLOAT alpha_i, BLASLONG m2; BLASLONG m3; BLASLONG n2; - - FLOAT xbuffer[8], *ybuffer; + FLOAT xbuffer[8] __attribute__((aligned(16))); + FLOAT *ybuffer; if (m < 1) return (0); if (n < 1) return (0); @@ -582,4 +594,4 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha_r, FLOAT alpha_i, return (0); } - +#endif diff --git a/kernel/power/cgemv_t.c b/kernel/power/cgemv_t.c index c646618cf..1bfc235db 100644 --- a/kernel/power/cgemv_t.c +++ b/kernel/power/cgemv_t.c @@ -23,16 +23,19 @@ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - *****************************************************************************/ +*****************************************************************************/ +#if !defined(__VEC__) || !defined(__ALTIVEC__) +#include "../arm/zgemv_t.c" +#else #include "common.h" #define NBMAX 1024 #include -static const unsigned char swap_mask_arr[]={ 4,5,6,7,0,1,2,3, 12,13,14,15, 8,9,10,11}; +static const unsigned char __attribute__((aligned(16))) swap_mask_arr[]={ 4,5,6,7,0,1,2,3, 12,13,14,15, 8,9,10,11}; static void cgemv_kernel_4x4(BLASLONG n, BLASLONG lda, FLOAT *ap, FLOAT *x, FLOAT *y, FLOAT alpha_r, FLOAT alpha_i) { - BLASLONG i; + FLOAT *a0, *a1, *a2, *a3; a0 = ap; a1 = ap + lda; @@ -48,26 +51,39 @@ static void cgemv_kernel_4x4(BLASLONG n, BLASLONG lda, FLOAT *ap, FLOAT *x, FLOA register __vector float vtemp2_r = {0.0, 0.0,0.0,0.0}; register __vector float vtemp3_p = {0.0, 0.0,0.0,0.0}; register __vector float vtemp3_r = {0.0, 0.0,0.0,0.0}; - __vector float* va0 = (__vector float*) a0; - __vector float* va1 = (__vector float*) a1; - __vector float* va2 = (__vector float*) a2; - __vector float* va3 = (__vector float*) a3; + __vector float* vptr_a0 = (__vector float*) a0; + __vector float* vptr_a1 = (__vector float*) a1; + __vector float* vptr_a2 = (__vector float*) a2; + __vector float* vptr_a3 = (__vector float*) a3; __vector float* v_x = (__vector float*) x; - for (i = 0; i < n / 2; i+=2) { - register __vector float vx_0 = v_x[i]; - register __vector float vx_1 = v_x[i+1]; + BLASLONG i = 0; + BLASLONG i2 = 16; + for (;i< n * 8; i+=32, i2+=32) { + register __vector float vx_0 = vec_vsx_ld( i,v_x) ; + register __vector float vx_1 = vec_vsx_ld(i2, v_x); + register __vector float vxr_0 = vec_perm(vx_0, vx_0, swap_mask); register __vector float vxr_1 = vec_perm(vx_1, vx_1, swap_mask); - vtemp0_p += vx_0*va0[i] + vx_1*va0[i+1] ; - vtemp0_r += vxr_0*va0[i] + vxr_1*va0[i+1]; - vtemp1_p += vx_0*va1[i] + vx_1*va1[i+1]; - vtemp1_r += vxr_0*va1[i] + vxr_1*va1[i+1]; - vtemp2_p += vx_0*va2[i] + vx_1*va2[i+1]; - vtemp2_r += vxr_0*va2[i] + vxr_1*va2[i+1]; - vtemp3_p += vx_0*va3[i] + vx_1*va3[i+1]; - vtemp3_r += vxr_0*va3[i] + vxr_1*va3[i+1]; + register __vector float va0 = vec_vsx_ld(i,vptr_a0); + register __vector float va1 = vec_vsx_ld(i, vptr_a1); + register __vector float va2 = vec_vsx_ld(i ,vptr_a2); + register __vector float va3 = vec_vsx_ld(i ,vptr_a3); + register __vector float va0_1 = vec_vsx_ld(i2 ,vptr_a0); + register __vector float va1_1 = vec_vsx_ld(i2 ,vptr_a1); + register __vector float va2_1 = vec_vsx_ld(i2 ,vptr_a2); + register __vector float va3_1 = vec_vsx_ld(i2 ,vptr_a3); + + + vtemp0_p += vx_0*va0 + vx_1*va0_1 ; + vtemp0_r += vxr_0*va0 + vxr_1*va0_1; + vtemp1_p += vx_0*va1 + vx_1*va1_1; + vtemp1_r += vxr_0*va1 + vxr_1*va1_1; + vtemp2_p += vx_0*va2 + vx_1*va2_1; + vtemp2_r += vxr_0*va2 + vxr_1*va2_1; + vtemp3_p += vx_0*va3 + vx_1*va3_1; + vtemp3_r += vxr_0*va3 + vxr_1*va3_1; } @@ -128,7 +144,7 @@ static void cgemv_kernel_4x4(BLASLONG n, BLASLONG lda, FLOAT *ap, FLOAT *x, FLOA static void cgemv_kernel_4x2(BLASLONG n, BLASLONG lda, FLOAT *ap, FLOAT *x, FLOAT *y, FLOAT alpha_r, FLOAT alpha_i) { - BLASLONG i; + FLOAT *a0, *a1; a0 = ap; a1 = ap + lda; @@ -138,23 +154,33 @@ static void cgemv_kernel_4x2(BLASLONG n, BLASLONG lda, FLOAT *ap, FLOAT *x, FLOA register __vector float vtemp0_r = {0.0, 0.0,0.0,0.0}; register __vector float vtemp1_p = {0.0, 0.0,0.0,0.0}; register __vector float vtemp1_r = {0.0, 0.0,0.0,0.0}; - __vector float* va0 = (__vector float*) a0; - __vector float* va1 = (__vector float*) a1; + + + __vector float* vptr_a0 = (__vector float*) a0; + __vector float* vptr_a1 = (__vector float*) a1; __vector float* v_x = (__vector float*) x; - for (i = 0; i < n / 2; i+=2) { - register __vector float vx_0 = v_x[i]; - register __vector float vx_1 = v_x[i+1]; + BLASLONG i = 0; + BLASLONG i2 = 16; + for (;i< n * 8; i+=32, i2+=32) { + register __vector float vx_0 = vec_vsx_ld( i,v_x) ; + register __vector float vx_1 = vec_vsx_ld(i2, v_x); + register __vector float vxr_0 = vec_perm(vx_0, vx_0, swap_mask); register __vector float vxr_1 = vec_perm(vx_1, vx_1, swap_mask); - vtemp0_p += vx_0*va0[i] + vx_1*va0[i+1] ; - vtemp0_r += vxr_0*va0[i] + vxr_1*va0[i+1]; - vtemp1_p += vx_0*va1[i] + vx_1*va1[i+1]; - vtemp1_r += vxr_0*va1[i] + vxr_1*va1[i+1]; + register __vector float va0 = vec_vsx_ld(i,vptr_a0); + register __vector float va1 = vec_vsx_ld(i, vptr_a1); + register __vector float va0_1 = vec_vsx_ld(i2 ,vptr_a0); + register __vector float va1_1 = vec_vsx_ld(i2 ,vptr_a1); - } + vtemp0_p += vx_0*va0 + vx_1*va0_1 ; + vtemp0_r += vxr_0*va0 + vxr_1*va0_1; + vtemp1_p += vx_0*va1 + vx_1*va1_1; + vtemp1_r += vxr_0*va1 + vxr_1*va1_1; + + } #if ( !defined(CONJ) && !defined(XCONJ) ) || ( defined(CONJ) && defined(XCONJ) ) register FLOAT temp_r0 = vtemp0_p[0] - vtemp0_p[1] + vtemp0_p[2] - vtemp0_p[3]; @@ -193,23 +219,27 @@ static void cgemv_kernel_4x2(BLASLONG n, BLASLONG lda, FLOAT *ap, FLOAT *x, FLOA static void cgemv_kernel_4x1(BLASLONG n, FLOAT *ap, FLOAT *x, FLOAT *y, FLOAT alpha_r, FLOAT alpha_i) { - BLASLONG i; + __vector unsigned char swap_mask = *((__vector unsigned char*)swap_mask_arr); //p for positive(real*real,image*image,real*real,image*image) r for image (real*image,image*real,real*image,image*real) register __vector float vtemp0_p = {0.0, 0.0,0.0,0.0}; register __vector float vtemp0_r = {0.0, 0.0,0.0,0.0}; - __vector float* va0 = (__vector float*) ap; + __vector float* vptr_a0 = (__vector float*) ap; __vector float* v_x = (__vector float*) x; - - for (i = 0; i < n / 2; i+=2) { - register __vector float vx_0 = v_x[i]; - register __vector float vx_1 = v_x[i+1]; + BLASLONG i = 0; + BLASLONG i2 = 16; + for (;i< n * 8; i+=32, i2+=32) { + register __vector float vx_0 = vec_vsx_ld( i,v_x) ; + register __vector float vx_1 = vec_vsx_ld(i2, v_x); + register __vector float vxr_0 = vec_perm(vx_0, vx_0, swap_mask); register __vector float vxr_1 = vec_perm(vx_1, vx_1, swap_mask); - vtemp0_p += vx_0*va0[i] + vx_1*va0[i+1] ; - vtemp0_r += vxr_0*va0[i] + vxr_1*va0[i+1]; + register __vector float va0 = vec_vsx_ld(i,vptr_a0); + register __vector float va0_1 = vec_vsx_ld(i2 ,vptr_a0); + vtemp0_p += vx_0*va0 + vx_1*va0_1 ; + vtemp0_r += vxr_0*va0 + vxr_1*va0_1; } #if ( !defined(CONJ) && !defined(XCONJ) ) || ( defined(CONJ) && defined(XCONJ) ) @@ -249,8 +279,8 @@ static void copy_x(BLASLONG n, FLOAT *src, FLOAT *dest, BLASLONG inc_src) { } int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha_r, FLOAT alpha_i, FLOAT *a, BLASLONG lda, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT *buffer) { - BLASLONG i; - BLASLONG j; + BLASLONG i=0; + BLASLONG j=0; FLOAT *a_ptr; FLOAT *x_ptr; FLOAT *y_ptr; @@ -260,8 +290,8 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha_r, FLOAT alpha_i, BLASLONG m2; BLASLONG m3; BLASLONG n2; - - FLOAT ybuffer[8], *xbuffer; + FLOAT ybuffer[8] __attribute__((aligned(16))); + FLOAT *xbuffer; if (m < 1) return (0); if (n < 1) return (0); @@ -568,4 +598,4 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha_r, FLOAT alpha_i, return (0); } - +#endif diff --git a/kernel/power/copy_microk_power10.c b/kernel/power/copy_microk_power10.c new file mode 100644 index 000000000..8bca1a1e7 --- /dev/null +++ b/kernel/power/copy_microk_power10.c @@ -0,0 +1,146 @@ +/*************************************************************************** +Copyright (c) 2020, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#define HAVE_KERNEL 1 + +static void copy_kernel (BLASLONG n, FLOAT *x, FLOAT *y) +{ + __asm__ + ( + "lxvp 32, 0(%2) \n\t" + "lxvp 34, 32(%2) \n\t" + "lxvp 36, 64(%2) \n\t" + "lxvp 38, 96(%2) \n\t" + "lxvp 40, 128(%2) \n\t" + "lxvp 42, 160(%2) \n\t" + "lxvp 44, 192(%2) \n\t" + "lxvp 46, 224(%2) \n\t" + + "lxvp 48, 256(%2) \n\t" + "lxvp 50, 288(%2) \n\t" + "lxvp 52, 320(%2) \n\t" + "lxvp 54, 352(%2) \n\t" + "lxvp 56, 384(%2) \n\t" + "lxvp 58, 416(%2) \n\t" + "lxvp 60, 448(%2) \n\t" + "lxvp 62, 480(%2) \n\t" + "addi %2, %2, 512 \n\t" +#if !defined(COMPLEX) && !defined(DOUBLE) + "addic. %1, %1, -128 \n\t" +#elif defined(COMPLEX) && defined(DOUBLE) + "addic. %1, %1, -32 \n\t" +#else + "addic. %1, %1, -64 \n\t" +#endif + "ble two%= \n\t" + + ".align 5 \n" + "one%=: \n\t" + + "stxvp 32, 0(%3) \n\t" + "stxvp 34, 32(%3) \n\t" + "stxvp 36, 64(%3) \n\t" + "stxvp 38, 96(%3) \n\t" + "lxvp 32, 0(%2) \n\t" + "lxvp 34, 32(%2) \n\t" + "lxvp 36, 64(%2) \n\t" + "lxvp 38, 96(%2) \n\t" + + "stxvp 40, 128(%3) \n\t" + "stxvp 42, 160(%3) \n\t" + "stxvp 44, 192(%3) \n\t" + "stxvp 46, 224(%3) \n\t" + "lxvp 40, 128(%2) \n\t" + "lxvp 42, 160(%2) \n\t" + "lxvp 44, 192(%2) \n\t" + "lxvp 46, 224(%2) \n\t" + + "stxvp 48, 256(%3) \n\t" + "stxvp 50, 288(%3) \n\t" + "stxvp 52, 320(%3) \n\t" + "stxvp 54, 352(%3) \n\t" + "lxvp 48, 256(%2) \n\t" + "lxvp 50, 288(%2) \n\t" + "lxvp 52, 320(%2) \n\t" + "lxvp 54, 352(%2) \n\t" + + "stxvp 56, 384(%3) \n\t" + "stxvp 58, 416(%3) \n\t" + "stxvp 60, 448(%3) \n\t" + "stxvp 62, 480(%3) \n\t" + "lxvp 56, 384(%2) \n\t" + "lxvp 58, 416(%2) \n\t" + "lxvp 60, 448(%2) \n\t" + "lxvp 62, 480(%2) \n\t" + + "addi %3, %3, 512 \n\t" + "addi %2, %2, 512 \n\t" + +#if !defined(COMPLEX) && !defined(DOUBLE) + "addic. %1, %1, -128 \n\t" +#elif defined(COMPLEX) && defined(DOUBLE) + "addic. %1, %1, -32 \n\t" +#else + "addic. %1, %1, -64 \n\t" +#endif + "bgt one%= \n" + + "two%=: \n\t" + + "stxvp 32, 0(%3) \n\t" + "stxvp 34, 32(%3) \n\t" + "stxvp 36, 64(%3) \n\t" + "stxvp 38, 96(%3) \n\t" + "stxvp 40, 128(%3) \n\t" + "stxvp 42, 160(%3) \n\t" + "stxvp 44, 192(%3) \n\t" + "stxvp 46, 224(%3) \n\t" + "stxvp 48, 256(%3) \n\t" + "stxvp 50, 288(%3) \n\t" + "stxvp 52, 320(%3) \n\t" + "stxvp 54, 352(%3) \n\t" + "stxvp 56, 384(%3) \n\t" + "stxvp 58, 416(%3) \n\t" + "stxvp 60, 448(%3) \n\t" + "stxvp 62, 480(%3) \n\t" + + "#n=%1 x=%4=%2 y=%0=%3" + : + "=m" (*y), + "+r" (n), // 1 + "+b" (x), // 2 + "+b" (y) // 3 + : + "m" (*x) + : + "cr0", + "vs32","vs33","vs34","vs35","vs36","vs37","vs38","vs39", + "vs40","vs41","vs42","vs43","vs44","vs45","vs46","vs47", + "vs48","vs49","vs50","vs51","vs52","vs53","vs54","vs55", + "vs56","vs57","vs58","vs59","vs60","vs61","vs62","vs63" + ); +} diff --git a/kernel/power/crot.c b/kernel/power/crot.c index 959a9eda0..84ba5d913 100644 --- a/kernel/power/crot.c +++ b/kernel/power/crot.c @@ -27,7 +27,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" -#if defined(POWER8) || defined(POWER9) +#if defined(POWER8) || defined(POWER9) || defined(POWER10) +#if defined(__VEC__) || defined(__ALTIVEC__) static void crot_kernel_8 (long n, float *x, float *y, float c, float s) { @@ -56,9 +57,9 @@ static void crot_kernel_8 (long n, float *x, float *y, float c, float s) "addi %[x_ptr], %[x_ptr], 64 \n\t" "addi %[y_ptr], %[y_ptr], 64 \n\t" "addic. %[temp_n], %[temp_n], -8 \n\t" - "ble 2f \n\t" - ".p2align 5 \n\t" - "1: \n\t" + "ble two%= \n\t" + ".align 5 \n\t" + "one%=: \n\t" "xvmulsp 40, 32, 36 \n\t" // c * x "xvmulsp 41, 33, 36 \n\t" "xvmulsp 42, 34, 36 \n\t" @@ -104,8 +105,8 @@ static void crot_kernel_8 (long n, float *x, float *y, float c, float s) "addi %[x_ptr], %[x_ptr], 128 \n\t" "addi %[y_ptr], %[y_ptr], 128 \n\t" "addic. %[temp_n], %[temp_n], -8 \n\t" - "bgt 1b \n\t" - "2: \n\t" + "bgt one%= \n\t" + "two%=: \n\t" "xvmulsp 40, 32, 36 \n\t" // c * x "xvmulsp 41, 33, 36 \n\t" "xvmulsp 42, 34, 36 \n\t" @@ -169,6 +170,7 @@ static void crot_kernel_8 (long n, float *x, float *y, float c, float s) } #endif +#endif int CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT c, FLOAT s) @@ -183,7 +185,7 @@ int CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT if ( (inc_x == 1) && (inc_y == 1) ) { - +#if defined(__VEC__) || defined(__ALTIVEC__) BLASLONG n1 = n & -8; if ( n1 > 0 ) { @@ -191,7 +193,7 @@ int CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT i=n1; ix=2*n1; } - +#endif while(i < n) { temp[0] = c*x[ix] + s*y[ix] ; diff --git a/kernel/power/cswap.c b/kernel/power/cswap.c index 31e02fe5a..4d9b9ccd6 100644 --- a/kernel/power/cswap.c +++ b/kernel/power/cswap.c @@ -36,8 +36,12 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" +#if defined(__VEC__) || defined(__ALTIVEC__) #if defined(POWER8) || defined(POWER9) #include "cswap_microk_power8.c" +#elif defined(POWER10) +#include "cswap_microk_power10.c" +#endif #endif diff --git a/kernel/power/cswap_microk_power10.c b/kernel/power/cswap_microk_power10.c new file mode 100644 index 000000000..2a44a9e30 --- /dev/null +++ b/kernel/power/cswap_microk_power10.c @@ -0,0 +1,127 @@ +/*************************************************************************** +Copyright (c) 2021, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#if defined(DOUBLE) +#define HAVE_KERNEL_16 1 +static void zswap_kernel_16 (long n, double *x, double *y) +#else +#define HAVE_KERNEL_32 1 +static void cswap_kernel_32 (long n, float *x, float *y) +#endif +{ + __asm__ + ( + ".align 5 \n" + "one%=: \n\t" + "lxvp 32, 0(%4) \n\t" + "lxvp 34, 32(%4) \n\t" + "lxvp 36, 64(%4) \n\t" + "lxvp 38, 96(%4) \n\t" + + "lxvp 40, 128(%4) \n\t" + "lxvp 42, 160(%4) \n\t" + "lxvp 44, 192(%4) \n\t" + "lxvp 46, 224(%4) \n\t" + + "lxvp 48, 0(%3) \n\t" + "lxvp 50, 32(%3) \n\t" + "lxvp 52, 64(%3) \n\t" + "lxvp 54, 96(%3) \n\t" + + "lxvp 56, 128(%3) \n\t" + "lxvp 58, 160(%3) \n\t" + "lxvp 60, 192(%3) \n\t" + "lxvp 62, 224(%3) \n\t" + + + "stxv 33, 0(%3) \n\t" + "stxv 32, 16(%3) \n\t" + "stxv 35, 32(%3) \n\t" + "stxv 34, 48(%3) \n\t" + "stxv 37, 64(%3) \n\t" + "stxv 36, 80(%3) \n\t" + "stxv 39, 96(%3) \n\t" + "stxv 38, 112(%3) \n\t" + + "addi %3, %3, 128 \n\t" + + "stxv 41, 0(%3) \n\t" + "stxv 40, 16(%3) \n\t" + "stxv 43, 32(%3) \n\t" + "stxv 42, 48(%3) \n\t" + "stxv 45, 64(%3) \n\t" + "stxv 44, 80(%3) \n\t" + "stxv 47, 96(%3) \n\t" + "stxv 46, 112(%3) \n\t" + + "addi %3, %3, 128 \n\t" + + "stxv 49, 0(%4) \n\t" + "stxv 48, 16(%4) \n\t" + "stxv 51, 32(%4) \n\t" + "stxv 50, 48(%4) \n\t" + "stxv 53, 64(%4) \n\t" + "stxv 52, 80(%4) \n\t" + "stxv 55, 96(%4) \n\t" + "stxv 54, 112(%4) \n\t" + + "addi %4, %4, 128 \n\t" + + "stxv 57, 0(%4) \n\t" + "stxv 56, 16(%4) \n\t" + "stxv 59, 32(%4) \n\t" + "stxv 58, 48(%4) \n\t" + "stxv 61, 64(%4) \n\t" + "stxv 60, 80(%4) \n\t" + "stxv 63, 96(%4) \n\t" + "stxv 62, 112(%4) \n\t" + + "addi %4, %4, 128 \n\t" + +#if defined(DOUBLE) + "addic. %2, %2, -16 \n\t" +#else + "addic. %2, %2, -32 \n\t" +#endif + "bgt one%= \n" + + "#n=%2 x=%0=%3 y=%1=%4" + : + "+m" (*x), + "+m" (*y), + "+r" (n), // 2 + "+b" (x), // 3 + "+b" (y) // 4 + : + : + "cr0", + "vs32","vs33","vs34","vs35","vs36","vs37","vs38","vs39", + "vs40","vs41","vs42","vs43","vs44","vs45","vs46","vs47", + "vs48","vs49","vs50","vs51","vs52","vs53","vs54","vs55", + "vs56","vs57","vs58","vs59","vs60","vs61","vs62","vs63" + ); +} diff --git a/kernel/power/cswap_microk_power8.c b/kernel/power/cswap_microk_power8.c index 8d7d0c0b9..829800230 100644 --- a/kernel/power/cswap_microk_power8.c +++ b/kernel/power/cswap_microk_power8.c @@ -39,8 +39,8 @@ static void cswap_kernel_32 (long n, float *x, float *y) { __asm__ ( - ".p2align 5 \n" - "1: \n\t" + ".align 5 \n" + "one%=: \n\t" "lxvd2x 32, 0, %4 \n\t" "lxvd2x 33, %5, %4 \n\t" @@ -131,7 +131,7 @@ static void cswap_kernel_32 (long n, float *x, float *y) "addi %4, %4, 128 \n\t" "addic. %2, %2, -32 \n\t" - "bgt 1b \n" + "bgt one%= \n" "#n=%2 x=%0=%3 y=%1=%4 o16=%5 o32=%6 o48=%7 o64=%8 o80=%9 o96=%10 o112=%11" : diff --git a/kernel/power/ctrmm_kernel_8x4_power8.S b/kernel/power/ctrmm_kernel_8x4_power8.S index 26f49c663..35faad19e 100644 --- a/kernel/power/ctrmm_kernel_8x4_power8.S +++ b/kernel/power/ctrmm_kernel_8x4_power8.S @@ -82,12 +82,10 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endif #ifdef __64BIT__ -#define STACKSIZE 400 #define STACKSIZE 592 #define ALPHA_R_SP 304+192(SP) #define ALPHA_I_SP 312+192(SP) #else -#define STACKSIZE 256 #define STACKSIZE 452 #define ALPHA_R_SP 224+196(SP) #define ALPHA_I_SP 232+196(SP) @@ -98,7 +96,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define N r4 #define K r5 -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define A r6 #define B r7 @@ -264,7 +262,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. stfs f2, ALPHA_I_SP // stw r0, FZERO -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifdef __64BIT__ ld LDC, FRAMESLOT(0) + STACKSIZE(SP) #endif @@ -285,7 +283,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endif #ifdef TRMMKERNEL -#if defined(linux) && defined(__64BIT__) +#if (defined(linux) || defined(__FreeBSD__)) && defined(__64BIT__) ld OFFSET, FRAMESLOT(1) + STACKSIZE(SP) #endif diff --git a/kernel/power/ctrmm_macros_8x4_power8.S b/kernel/power/ctrmm_macros_8x4_power8.S index 48a21252c..922cab57a 100644 --- a/kernel/power/ctrmm_macros_8x4_power8.S +++ b/kernel/power/ctrmm_macros_8x4_power8.S @@ -83,7 +83,11 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * Macros for N=4 and M=8 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD4x8_1', ` +#else .macro LOAD4x8_1 +#endif lxvw4x vs0, o0, AO // load a0, a1 @@ -113,9 +117,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 32 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x8_I1', ` +#else .macro KERNEL4x8_I1 +#endif lxvw4x vs4, o0, AO // load a0, a1 @@ -184,9 +196,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs63, vs3, vs15 // a3_r*b3_i, a3_i*b3_i, a1_r*b3_i, a1_i*b3_i +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x8_1', ` +#else .macro KERNEL4x8_1 +#endif lxvw4x vs4, o0, AO // load a0, a1 @@ -255,9 +275,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs63, vs3, vs15 // a3_r*b3_i, a3_i*b3_i, a1_r*b3_i, a1_i*b3_i +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x8_2', ` +#else .macro KERNEL4x8_2 +#endif lxvw4x vs0, o0, AO // load a0, a1 @@ -326,9 +354,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs63, vs7, vs23 // a7_r*b3_i, a7_i*b3_i, a1_r*b3_i, a1_i*b3_i +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x8_E2', ` +#else .macro KERNEL4x8_E2 +#endif xvmaddasp vs32, vs4, vs16 // a4_r*b0_r, a4_i*b0_r, a1_r*b0_r, a1_i*b0_r @@ -368,9 +404,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs63, vs7, vs23 // a7_r*b3_i, a7_i*b3_i, a1_r*b3_i, a1_i*b3_i +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x8_SUBI1', ` +#else .macro KERNEL4x8_SUBI1 +#endif lxvw4x vs0, o0, AO // load a0, a1 @@ -439,9 +483,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs63, vs3, vs15 // a3_r*b3_i, a3_i*b3_i, a1_r*b3_i, a1_i*b3_i +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x8_SUB1', ` +#else .macro KERNEL4x8_SUB1 +#endif lxvw4x vs0, o0, AO // load a0, a1 @@ -510,9 +562,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs63, vs3, vs15 // a3_r*b3_i, a3_i*b3_i, a1_r*b3_i, a1_i*b3_i +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE4x8', ` +#else .macro SAVE4x8 +#endif mr T1, CO @@ -1597,14 +1657,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 64 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=4 and M=4 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD4x4_1', ` +#else .macro LOAD4x4_1 +#endif lxvw4x vs0, o0, AO // load a0, a1 @@ -1630,9 +1698,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 32 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x4_I1', ` +#else .macro KERNEL4x4_I1 +#endif lxvw4x vs4, o0, AO // load a0, a1 @@ -1681,9 +1757,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs47, vs1, vs15 // a1_r*b3_i, a1_i*b3_i, a1_r*b3_i, a1_i*b3_i +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x4_1', ` +#else .macro KERNEL4x4_1 +#endif lxvw4x vs4, o0, AO // load a0, a1 @@ -1732,9 +1816,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs47, vs1, vs15 // a1_r*b3_i, a1_i*b3_i, a1_r*b3_i, a1_i*b3_i +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x4_2', ` +#else .macro KERNEL4x4_2 +#endif lxvw4x vs0, o0, AO // load a0, a1 @@ -1783,9 +1875,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs47, vs5, vs23 // a5_r*b3_i, a5_i*b3_i, a1_r*b3_i, a1_i*b3_i +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x4_E2', ` +#else .macro KERNEL4x4_E2 +#endif xvmaddasp vs32, vs4, vs16 // a4_r*b0_r, a4_i*b0_r, a1_r*b0_r, a1_i*b0_r @@ -1809,9 +1909,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs47, vs5, vs23 // a5_r*b3_i, a5_i*b3_i, a1_r*b3_i, a1_i*b3_i +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x4_SUBI1', ` +#else .macro KERNEL4x4_SUBI1 +#endif lxvw4x vs0, o0, AO // load a0, a1 @@ -1860,9 +1968,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs47, vs1, vs15 // a1_r*b3_i, a1_i*b3_i, a1_r*b3_i, a1_i*b3_i +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x4_SUB1', ` +#else .macro KERNEL4x4_SUB1 +#endif lxvw4x vs0, o0, AO // load a0, a1 @@ -1911,9 +2027,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs47, vs1, vs15 // a1_r*b3_i, a1_i*b3_i, a1_r*b3_i, a1_i*b3_i +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE4x4', ` +#else .macro SAVE4x4 +#endif mr T1, CO @@ -2470,14 +2594,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 32 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=4 and M=2 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD4x2_1', ` +#else .macro LOAD4x2_1 +#endif lxvw4x vs0, o0, AO // load a0, a1 @@ -2501,9 +2633,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 32 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x2_I1', ` +#else .macro KERNEL4x2_I1 +#endif lxvw4x vs4, o0, AO // load a0, a1 @@ -2542,9 +2682,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs39, vs0, vs15 // a0_r*b3_i, a0_i*b3_i, a1_r*b3_i, a1_i*b3_i +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x2_1', ` +#else .macro KERNEL4x2_1 +#endif lxvw4x vs4, o0, AO // load a0, a1 @@ -2583,9 +2731,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs39, vs0, vs15 // a0_r*b3_i, a0_i*b3_i, a1_r*b3_i, a1_i*b3_i +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x2_2', ` +#else .macro KERNEL4x2_2 +#endif lxvw4x vs0, o0, AO // load a0, a1 @@ -2624,9 +2780,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs39, vs4, vs23 // a4_r*b3_i, a4_i*b3_i, a1_r*b3_i, a1_i*b3_i +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x2_E2', ` +#else .macro KERNEL4x2_E2 +#endif xvmaddasp vs32, vs4, vs16 // a4_r*b0_r, a4_i*b0_r, a1_r*b0_r, a1_i*b0_r @@ -2642,9 +2806,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs39, vs4, vs23 // a4_r*b3_i, a4_i*b3_i, a1_r*b3_i, a1_i*b3_i +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x2_SUBI1', ` +#else .macro KERNEL4x2_SUBI1 +#endif lxvw4x vs0, o0, AO // load a0, a1 @@ -2683,9 +2855,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs39, vs0, vs15 // a0_r*b3_i, a0_i*b3_i, a1_r*b3_i, a1_i*b3_i +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x2_SUB1', ` +#else .macro KERNEL4x2_SUB1 +#endif lxvw4x vs0, o0, AO // load a0, a1 @@ -2724,9 +2904,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs39, vs0, vs15 // a0_r*b3_i, a0_i*b3_i, a1_r*b3_i, a1_i*b3_i +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE4x2', ` +#else .macro SAVE4x2 +#endif mr T1, CO @@ -3019,14 +3207,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 16 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=4 and M=1 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD4x1_1', ` +#else .macro LOAD4x1_1 +#endif lxsspx vs0, o0, AO // load a0_r lxsspx vs1, o4, AO // load a0_i @@ -3055,9 +3251,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 32 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x1_I1', ` +#else .macro KERNEL4x1_I1 +#endif lxsspx vs4, o0, AO // load a0_r @@ -3109,9 +3313,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmuldp vs47, vs1, vs14 // a0_i*b3_r +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x1_1', ` +#else .macro KERNEL4x1_1 +#endif lxsspx vs4, o0, AO // load a0_r @@ -3163,9 +3375,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs47, vs1, vs14 // a0_i*b3_r +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x1_2', ` +#else .macro KERNEL4x1_2 +#endif lxsspx vs0, o0, AO // load a0_r @@ -3217,9 +3437,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs47, vs5, vs22 // a4_i*b3_r +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x1_E2', ` +#else .macro KERNEL4x1_E2 +#endif xsmaddadp vs32, vs4, vs16 // a4_r*b0_r @@ -3243,9 +3471,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs47, vs5, vs22 // a4_i*b3_r +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x1_SUBI1', ` +#else .macro KERNEL4x1_SUBI1 +#endif lxsspx vs0, o0, AO // load a0_r @@ -3297,9 +3533,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmuldp vs47, vs1, vs14 // a0_i*b3_r +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x1_SUB1', ` +#else .macro KERNEL4x1_SUB1 +#endif lxsspx vs0, o0, AO // load a0_r @@ -3351,9 +3595,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs47, vs1, vs14 // a0_i*b3_r +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE4x1', ` +#else .macro SAVE4x1 +#endif mr T1, CO @@ -3526,14 +3778,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 8 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=2 and M=8 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD2x8_1', ` +#else .macro LOAD2x8_1 +#endif lxvw4x vs0, o0, AO // load a0, a1 @@ -3556,9 +3816,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 16 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x8_I1', ` +#else .macro KERNEL2x8_I1 +#endif lxvw4x vs4, o0, AO // load a0, a1 @@ -3602,9 +3870,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs47, vs3, vs11 // a3_r*b1_i, a3_i*b1_i, a1_r*b1_i, a1_i*b1_i +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x8_1', ` +#else .macro KERNEL2x8_1 +#endif lxvw4x vs4, o0, AO // load a0, a1 @@ -3648,9 +3924,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs47, vs3, vs11 // a3_r*b1_i, a3_i*b1_i, a1_r*b1_i, a1_i*b1_i +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x8_2', ` +#else .macro KERNEL2x8_2 +#endif lxvw4x vs0, o0, AO // load a0, a1 @@ -3694,9 +3978,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs47, vs7, vs19 // a7_r*b1_i, a7_i*b1_i, a1_r*b1_i, a1_i*b1_i +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x8_E2', ` +#else .macro KERNEL2x8_E2 +#endif xvmaddasp vs32, vs4, vs16 // a4_r*b0_r, a4_i*b0_r, a1_r*b0_r, a1_i*b0_r @@ -3718,9 +4010,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs47, vs7, vs19 // a7_r*b1_i, a7_i*b1_i, a1_r*b1_i, a1_i*b1_i +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x8_SUBI1', ` +#else .macro KERNEL2x8_SUBI1 +#endif lxvw4x vs0, o0, AO // load a0, a1 @@ -3764,9 +4064,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs47, vs3, vs11 // a3_r*b1_i, a3_i*b1_i, a1_r*b1_i, a1_i*b1_i +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x8_SUB1', ` +#else .macro KERNEL2x8_SUB1 +#endif lxvw4x vs0, o0, AO // load a0, a1 @@ -3810,9 +4118,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs47, vs3, vs11 // a3_r*b1_i, a3_i*b1_i, a1_r*b1_i, a1_i*b1_i +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE2x8', ` +#else .macro SAVE2x8 +#endif mr T1, CO @@ -4357,14 +4673,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 64 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=2 and M=4 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD2x4_1', ` +#else .macro LOAD2x4_1 +#endif lxvw4x vs0, o0, AO // load a0, a1 @@ -4383,9 +4707,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 16 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x4_I1', ` +#else .macro KERNEL2x4_I1 +#endif lxvw4x vs4, o0, AO // load a0, a1 @@ -4417,9 +4749,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs39, vs1, vs11 // a1_r*b1_i, a1_i*b1_i, a1_r*b1_i, a1_i*b1_i +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x4_1', ` +#else .macro KERNEL2x4_1 +#endif lxvw4x vs4, o0, AO // load a0, a1 @@ -4451,9 +4791,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs39, vs1, vs11 // a1_r*b1_i, a1_i*b1_i, a1_r*b1_i, a1_i*b1_i +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x4_2', ` +#else .macro KERNEL2x4_2 +#endif lxvw4x vs0, o0, AO // load a0, a1 @@ -4485,9 +4833,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs39, vs5, vs19 // a5_r*b1_i, a5_i*b1_i, a1_r*b1_i, a1_i*b1_i +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x4_E2', ` +#else .macro KERNEL2x4_E2 +#endif xvmaddasp vs32, vs4, vs16 // a4_r*b0_r, a4_i*b0_r, a1_r*b0_r, a1_i*b0_r @@ -4501,9 +4857,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs39, vs5, vs19 // a5_r*b1_i, a5_i*b1_i, a1_r*b1_i, a1_i*b1_i +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x4_SUBI1', ` +#else .macro KERNEL2x4_SUBI1 +#endif lxvw4x vs0, o0, AO // load a0, a1 @@ -4535,9 +4899,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs39, vs1, vs11 // a1_r*b1_i, a1_i*b1_i, a1_r*b1_i, a1_i*b1_i +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x4_SUB1', ` +#else .macro KERNEL2x4_SUB1 +#endif lxvw4x vs0, o0, AO // load a0, a1 @@ -4569,9 +4941,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs39, vs1, vs11 // a1_r*b1_i, a1_i*b1_i, a1_r*b1_i, a1_i*b1_i +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE2x4', ` +#else .macro SAVE2x4 +#endif mr T1, CO @@ -4852,14 +5232,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 32 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=2 and M=2 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD2x2_1', ` +#else .macro LOAD2x2_1 +#endif lxvw4x vs0, o0, AO // load a0, a1 @@ -4876,9 +5264,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 16 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x2_I1', ` +#else .macro KERNEL2x2_I1 +#endif lxvw4x vs4, o0, AO // load a0, a1 @@ -4904,9 +5300,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs35, vs0, vs11 // a0_r*b1_i, a0_i*b1_i, a1_r*b1_i, a1_i*b1_i +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x2_1', ` +#else .macro KERNEL2x2_1 +#endif lxvw4x vs4, o0, AO // load a0, a1 @@ -4932,9 +5336,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs35, vs0, vs11 // a0_r*b1_i, a0_i*b1_i, a1_r*b1_i, a1_i*b1_i +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x2_2', ` +#else .macro KERNEL2x2_2 +#endif lxvw4x vs0, o0, AO // load a0, a1 @@ -4960,9 +5372,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs35, vs4, vs19 // a4_r*b1_i, a4_i*b1_i, a1_r*b1_i, a1_i*b1_i +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x2_E2', ` +#else .macro KERNEL2x2_E2 +#endif xvmaddasp vs32, vs4, vs16 // a4_r*b0_r, a4_i*b0_r, a1_r*b0_r, a1_i*b0_r @@ -4972,9 +5392,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs35, vs4, vs19 // a4_r*b1_i, a4_i*b1_i, a1_r*b1_i, a1_i*b1_i +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x2_SUBI1', ` +#else .macro KERNEL2x2_SUBI1 +#endif lxvw4x vs0, o0, AO // load a0, a1 @@ -5000,9 +5428,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs35, vs0, vs11 // a0_r*b1_i, a0_i*b1_i, a1_r*b1_i, a1_i*b1_i +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x2_SUB1', ` +#else .macro KERNEL2x2_SUB1 +#endif lxvw4x vs0, o0, AO // load a0, a1 @@ -5028,9 +5464,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs35, vs0, vs11 // a0_r*b1_i, a0_i*b1_i, a1_r*b1_i, a1_i*b1_i +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE2x2', ` +#else .macro SAVE2x2 +#endif mr T1, CO @@ -5179,14 +5623,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 16 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=2 and M=1 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD2x1_1', ` +#else .macro LOAD2x1_1 +#endif lxsspx vs0, o0, AO // load a0_r lxsspx vs1, o4, AO // load a0_i @@ -5205,9 +5657,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 16 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x1_I1', ` +#else .macro KERNEL2x1_I1 +#endif lxsspx vs4, o0, AO // load a0_r @@ -5239,9 +5699,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmuldp vs39, vs1, vs10 // a0_i*b1_r +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x1_1', ` +#else .macro KERNEL2x1_1 +#endif lxsspx vs4, o0, AO // load a0_r @@ -5273,9 +5741,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs39, vs1, vs10 // a0_i*b1_r +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x1_2', ` +#else .macro KERNEL2x1_2 +#endif lxsspx vs0, o0, AO // load a0_r @@ -5307,9 +5783,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs39, vs5, vs18 // a4_i*b1_r +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x1_E2', ` +#else .macro KERNEL2x1_E2 +#endif xsmaddadp vs32, vs4, vs16 // a4_r*b0_r @@ -5323,9 +5807,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs39, vs5, vs18 // a4_i*b1_r +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x1_SUBI1', ` +#else .macro KERNEL2x1_SUBI1 +#endif lxsspx vs0, o0, AO // load a0_r @@ -5357,9 +5849,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmuldp vs39, vs1, vs10 // a0_i*b1_r +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x1_SUB1', ` +#else .macro KERNEL2x1_SUB1 +#endif lxsspx vs0, o0, AO // load a0_r @@ -5391,9 +5891,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs39, vs1, vs10 // a0_i*b1_r +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE2x1', ` +#else .macro SAVE2x1 +#endif mr T1, CO @@ -5482,14 +5990,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 8 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=1 and M=8 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD1x8_1', ` +#else .macro LOAD1x8_1 +#endif lxvw4x vs0, o0, AO // load a0, a1 @@ -5514,9 +6030,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 8 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x8_I1', ` +#else .macro KERNEL1x8_I1 +#endif lxvw4x vs4, o0, AO // load a0, a1 @@ -5553,9 +6077,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs39, vs3, vs9 // a3_r*b0_i, a3_i*b0_i, a1_r*b0_i, a1_i*b0_i +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x8_1', ` +#else .macro KERNEL1x8_1 +#endif lxvw4x vs4, o0, AO // load a0, a1 @@ -5592,9 +6124,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs39, vs3, vs9 // a3_r*b0_i, a3_i*b0_i, a1_r*b0_i, a1_i*b0_i +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x8_2', ` +#else .macro KERNEL1x8_2 +#endif lxvw4x vs0, o0, AO // load a0, a1 @@ -5631,9 +6171,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs39, vs7, vs17 // a7_r*b0_i, a7_i*b0_i, a1_r*b0_i, a1_i*b0_i +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x8_E2', ` +#else .macro KERNEL1x8_E2 +#endif xvmaddasp vs32, vs4, vs16 // a4_r*b0_r, a4_i*b0_r, a1_r*b0_r, a1_i*b0_r @@ -5646,9 +6194,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs39, vs7, vs17 // a7_r*b0_i, a7_i*b0_i, a1_r*b0_i, a1_i*b0_i +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x8_SUBI1', ` +#else .macro KERNEL1x8_SUBI1 +#endif lxvw4x vs0, o0, AO // load a0, a1 @@ -5685,9 +6241,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs39, vs3, vs9 // a3_r*b0_i, a3_i*b0_i, a1_r*b0_i, a1_i*b0_i +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x8_SUB1', ` +#else .macro KERNEL1x8_SUB1 +#endif lxvw4x vs0, o0, AO // load a0, a1 @@ -5724,9 +6288,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs39, vs3, vs9 // a3_r*b0_i, a3_i*b0_i, a1_r*b0_i, a1_i*b0_i +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE1x8', ` +#else .macro SAVE1x8 +#endif mr T1, CO @@ -6001,14 +6573,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 64 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=1 and M=4 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD1x4_1', ` +#else .macro LOAD1x4_1 +#endif lxvw4x vs0, o0, AO // load a0, a1 @@ -6029,9 +6609,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 8 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x4_I1', ` +#else .macro KERNEL1x4_I1 +#endif lxvw4x vs4, o0, AO // load a0, a1 @@ -6060,9 +6648,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs35, vs1, vs9 // a1_r*b0_i, a1_i*b0_i, a1_r*b0_i, a1_i*b0_i +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x4_1', ` +#else .macro KERNEL1x4_1 +#endif lxvw4x vs4, o0, AO // load a0, a1 @@ -6091,9 +6687,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs35, vs1, vs9 // a1_r*b0_i, a1_i*b0_i, a1_r*b0_i, a1_i*b0_i +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x4_2', ` +#else .macro KERNEL1x4_2 +#endif lxvw4x vs0, o0, AO // load a0, a1 @@ -6122,9 +6726,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs35, vs5, vs17 // a5_r*b0_i, a5_i*b0_i, a1_r*b0_i, a1_i*b0_i +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x4_E2', ` +#else .macro KERNEL1x4_E2 +#endif xvmaddasp vs32, vs4, vs16 // a4_r*b0_r, a4_i*b0_r, a1_r*b0_r, a1_i*b0_r @@ -6133,9 +6745,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs35, vs5, vs17 // a5_r*b0_i, a5_i*b0_i, a1_r*b0_i, a1_i*b0_i +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x4_SUBI1', ` +#else .macro KERNEL1x4_SUBI1 +#endif lxvw4x vs0, o0, AO // load a0, a1 @@ -6164,9 +6784,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs35, vs1, vs9 // a1_r*b0_i, a1_i*b0_i, a1_r*b0_i, a1_i*b0_i +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x4_SUB1', ` +#else .macro KERNEL1x4_SUB1 +#endif lxvw4x vs0, o0, AO // load a0, a1 @@ -6195,9 +6823,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs35, vs1, vs9 // a1_r*b0_i, a1_i*b0_i, a1_r*b0_i, a1_i*b0_i +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE1x4', ` +#else .macro SAVE1x4 +#endif mr T1, CO @@ -6340,14 +6976,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 32 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=1 and M=2 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD1x2_1', ` +#else .macro LOAD1x2_1 +#endif lxvw4x vs0, o0, AO // load a0, a1 @@ -6366,9 +7010,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 8 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x2_I1', ` +#else .macro KERNEL1x2_I1 +#endif lxvw4x vs4, o0, AO // load a0, a1 @@ -6393,9 +7045,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs33, vs0, vs9 // a0_r*b0_i, a0_i*b0_i, a1_r*b0_i, a1_i*b0_i +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x2_1', ` +#else .macro KERNEL1x2_1 +#endif lxvw4x vs4, o0, AO // load a0, a1 @@ -6420,9 +7080,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs33, vs0, vs9 // a0_r*b0_i, a0_i*b0_i, a1_r*b0_i, a1_i*b0_i +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x2_2', ` +#else .macro KERNEL1x2_2 +#endif lxvw4x vs0, o0, AO // load a0, a1 @@ -6447,18 +7115,34 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs33, vs4, vs17 // a4_r*b0_i, a4_i*b0_i, a1_r*b0_i, a1_i*b0_i +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x2_E2', ` +#else .macro KERNEL1x2_E2 +#endif xvmaddasp vs32, vs4, vs16 // a4_r*b0_r, a4_i*b0_r, a1_r*b0_r, a1_i*b0_r xvmaddasp vs33, vs4, vs17 // a4_r*b0_i, a4_i*b0_i, a1_r*b0_i, a1_i*b0_i +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x2_SUBI1', ` +#else .macro KERNEL1x2_SUBI1 +#endif lxvw4x vs0, o0, AO // load a0, a1 @@ -6483,9 +7167,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs33, vs0, vs9 // a0_r*b0_i, a0_i*b0_i, a1_r*b0_i, a1_i*b0_i +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x2_SUB1', ` +#else .macro KERNEL1x2_SUB1 +#endif lxvw4x vs0, o0, AO // load a0, a1 @@ -6510,9 +7202,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs33, vs0, vs9 // a0_r*b0_i, a0_i*b0_i, a1_r*b0_i, a1_i*b0_i +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE1x2', ` +#else .macro SAVE1x2 +#endif mr T1, CO @@ -6589,14 +7289,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 16 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=1 and M=1 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD1x1_1', ` +#else .macro LOAD1x1_1 +#endif lxsspx vs0, o0, AO // load a0_r lxsspx vs1, o4, AO // load a0_i @@ -6610,9 +7318,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 8 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x1_I1', ` +#else .macro KERNEL1x1_I1 +#endif lxsspx vs4, o0, AO // load a0_r @@ -6634,9 +7350,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmuldp vs35, vs1, vs8 // a0_i*b0_r +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x1_1', ` +#else .macro KERNEL1x1_1 +#endif lxsspx vs4, o0, AO // load a0_r @@ -6658,9 +7382,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs35, vs1, vs8 // a0_i*b0_r +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x1_2', ` +#else .macro KERNEL1x1_2 +#endif lxsspx vs0, o0, AO // load a0_r @@ -6682,9 +7414,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs35, vs5, vs16 // a4_i*b0_r +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x1_E2', ` +#else .macro KERNEL1x1_E2 +#endif xsmaddadp vs32, vs4, vs16 // a4_r*b0_r @@ -6693,9 +7433,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs35, vs5, vs16 // a4_i*b0_r +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x1_SUBI1', ` +#else .macro KERNEL1x1_SUBI1 +#endif lxsspx vs0, o0, AO // load a0_r @@ -6717,9 +7465,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmuldp vs35, vs1, vs8 // a0_i*b0_r +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x1_SUB1', ` +#else .macro KERNEL1x1_SUB1 +#endif lxsspx vs0, o0, AO // load a0_r @@ -6741,9 +7497,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs35, vs1, vs8 // a0_i*b0_r +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE1x1', ` +#else .macro SAVE1x1 +#endif mr T1, CO @@ -6790,5 +7554,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 8 +#if defined(_AIX) +') +#else .endm +#endif diff --git a/kernel/power/dasum.c b/kernel/power/dasum.c index d0e060977..999dc677a 100644 --- a/kernel/power/dasum.c +++ b/kernel/power/dasum.c @@ -46,9 +46,11 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endif -#if defined(POWER8) || defined(POWER9) +#if defined(POWER8) || defined(POWER9) || defined(POWER10) +#if defined(__VEC__) || defined(__ALTIVEC__) #include "dasum_microk_power8.c" #endif +#endif #ifndef HAVE_KERNEL_16 diff --git a/kernel/power/dasum_microk_power8.c b/kernel/power/dasum_microk_power8.c index 880d7d271..4652fc57c 100644 --- a/kernel/power/dasum_microk_power8.c +++ b/kernel/power/dasum_microk_power8.c @@ -68,10 +68,10 @@ static double dasum_kernel_16 (long n, double *x) "addi %2, %2, 128 \n\t" "addic. %1, %1, -16 \n\t" - "ble 2f \n\t" + "ble two%= \n\t" - ".p2align 5 \n" - "1: \n\t" + ".align 5 \n" + "one%=: \n\t" "xvabsdp 48, 40 \n\t" "xvabsdp 49, 41 \n\t" @@ -108,9 +108,9 @@ static double dasum_kernel_16 (long n, double *x) "xvadddp 38, 38, %x5 \n\t" "xvadddp 39, 39, %x6 \n\t" - "bgt 1b \n" + "bgt one%= \n" - "2: \n\t" + "two%=: \n\t" "xvabsdp 48, 40 \n\t" "xvabsdp 49, 41 \n\t" @@ -140,7 +140,7 @@ static double dasum_kernel_16 (long n, double *x) "xvadddp 32, 32, 36 \n\t" - "xxswapd 33, 32 \n\t" + XXSWAPD_S(33,32) "xsadddp %x0, 32, 33 \n" "#n=%1 x=%3=%2 sum=%0 o16=%8 o32=%9 o48=%10 o64=%11 o80=%12 o96=%13 o112=%14\n" diff --git a/kernel/power/daxpy.c b/kernel/power/daxpy.c index f09611ff0..2de4e0911 100644 --- a/kernel/power/daxpy.c +++ b/kernel/power/daxpy.c @@ -36,9 +36,11 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" -#if defined(POWER8) || defined(POWER9) +#if defined(POWER8) || defined(POWER9) || defined(POWER10) +#if defined(__VEC__) || defined(__ALTIVEC__) #include "daxpy_microk_power8.c" #endif +#endif #ifndef HAVE_KERNEL_8 diff --git a/kernel/power/daxpy_microk_power10.c b/kernel/power/daxpy_microk_power10.c new file mode 100644 index 000000000..bc9199efd --- /dev/null +++ b/kernel/power/daxpy_microk_power10.c @@ -0,0 +1,131 @@ +/*************************************************************************** +Copyright (c) 2020, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#define HAVE_KERNEL_8 1 + +static void daxpy_kernel_8 (long n, double *x, double *y, double alpha) +{ + __vector double t0; + + __asm__ + ( + XXSPLTD_S(%x4,%x6,0) + + "dcbt 0, %2 \n\t" + "dcbt 0, %3 \n\t" + + "lxvp 32, 0(%2) \n\t" + "lxvp 34, 32(%2) \n\t" + "lxvp 40, 64(%2) \n\t" + "lxvp 42, 96(%2) \n\t" + + "lxvp 36, 0(%3) \n\t" + "lxvp 38, 32(%3) \n\t" + "lxvp 44, 64(%3) \n\t" + "lxvp 46, 96(%3) \n\t" + + "addi %2, %2, 128 \n\t" + + "addic. %1, %1, -16 \n\t" + "ble two%= \n\t" + + ".align 5 \n" + "one%=: \n\t" + + "xvmaddadp 36, 32, %x4 \n\t" + "xvmaddadp 37, 33, %x4 \n\t" + + "lxvp 32, 0(%2) \n\t" + "stxvp 36, 0(%3) \n\t" + + "xvmaddadp 38, 34, %x4 \n\t" + "xvmaddadp 39, 35, %x4 \n\t" + + "lxvp 34, 32(%2) \n\t" + "stxvp 38, 32(%3) \n\t" + + + "lxvp 36, 128(%3) \n\t" + "lxvp 38, 160(%3) \n\t" + + "xvmaddadp 44, 40, %x4 \n\t" + "xvmaddadp 45, 41, %x4 \n\t" + + "lxvp 40, 64(%2) \n\t" + "stxvp 44, 64(%3) \n\t" + + "xvmaddadp 46, 42, %x4 \n\t" + "xvmaddadp 47, 43, %x4 \n\t" + + "lxvp 42, 96(%2) \n\t" + "stxvp 46, 96(%3) \n\t" + + "addi %2, %2, 128 \n\t" + "addi %3, %3, 128 \n\t" + + "lxvp 44, 64(%3) \n\t" + "lxvp 46, 96(%3) \n\t" + + "addic. %1, %1, -16 \n\t" + "bgt one%= \n" + + "two%=: \n\t" + + "xvmaddadp 36, 32, %x4 \n\t" + "xvmaddadp 37, 33, %x4 \n\t" + "xvmaddadp 38, 34, %x4 \n\t" + "xvmaddadp 39, 35, %x4 \n\t" + + "xvmaddadp 44, 40, %x4 \n\t" + "xvmaddadp 45, 41, %x4 \n\t" + "xvmaddadp 46, 42, %x4 \n\t" + "xvmaddadp 47, 43, %x4 \n\t" + + "stxvp 36, 0(%3) \n\t" + "stxvp 38, 32(%3) \n\t" + "stxvp 44, 64(%3) \n\t" + "stxvp 46, 96(%3) \n\t" + + "#n=%1 x=%5=%2 y=%0=%3 alpha=%6 t0=%x4\n" + : + "+m" (*y), + "+r" (n), // 1 + "+b" (x), // 2 + "+b" (y), // 3 + "=wa" (t0) // 4 + : + "m" (*x), + "d" (alpha) // 6 + : + "cr0", + "vs32","vs33","vs34","vs35","vs36","vs37", "vs38", "vs39", + "vs40","vs41","vs42","vs43","vs44","vs45","vs46","vs47" + ); + +} + + diff --git a/kernel/power/daxpy_microk_power8.c b/kernel/power/daxpy_microk_power8.c index fb714a3f9..a92026e83 100644 --- a/kernel/power/daxpy_microk_power8.c +++ b/kernel/power/daxpy_microk_power8.c @@ -58,7 +58,7 @@ static void daxpy_kernel_8 (long n, double *x, double *y, double alpha) __asm__ ( - "xxspltd %x4, %x22, 0 \n\t" + XXSPLTD_S(%x4,%x22,0) "dcbt 0, %2 \n\t" "dcbt 0, %3 \n\t" @@ -90,10 +90,10 @@ static void daxpy_kernel_8 (long n, double *x, double *y, double alpha) "addi %3, %3, -64 \n\t" "addic. %1, %1, -16 \n\t" - "ble 2f \n\t" + "ble two%= \n\t" ".align 5 \n" - "1: \n\t" + "one%=: \n\t" "xvmaddadp %x13, %x5, %x4 \n\t" "xvmaddadp %x14, %x6, %x4 \n\t" @@ -152,9 +152,9 @@ static void daxpy_kernel_8 (long n, double *x, double *y, double alpha) "addi %3, %3, -64 \n\t" "addic. %1, %1, -16 \n\t" - "bgt 1b \n" + "bgt one%= \n" - "2: \n\t" + "two%=: \n\t" "xvmaddadp %x13, %x5, %x4 \n\t" "xvmaddadp %x14, %x6, %x4 \n\t" diff --git a/kernel/power/daxpy_power10.c b/kernel/power/daxpy_power10.c new file mode 100644 index 000000000..8640efcfd --- /dev/null +++ b/kernel/power/daxpy_power10.c @@ -0,0 +1,128 @@ +/*************************************************************************** +Copyright (c) 2020, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "common.h" + +#if defined(__VEC__) || defined(__ALTIVEC__) +#include "daxpy_microk_power10.c" +#endif + + +#ifndef HAVE_KERNEL_8 + +static void daxpy_kernel_8(BLASLONG n, FLOAT *x, FLOAT *y, FLOAT alpha) +{ + BLASLONG register i = 0; + + while(i < n) + { + y[i] += alpha * x[i]; + y[i+1] += alpha * x[i+1]; + y[i+2] += alpha * x[i+2]; + y[i+3] += alpha * x[i+3]; + y[i+4] += alpha * x[i+4]; + y[i+5] += alpha * x[i+5]; + y[i+6] += alpha * x[i+6]; + y[i+7] += alpha * x[i+7]; + i+=8 ; + + } + +} + +#endif + +int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT *dummy, BLASLONG dummy2) +{ + BLASLONG i=0; + BLASLONG ix=0,iy=0; + + if ( n <= 0 ) return(0); + + if ( (inc_x == 1) && (inc_y == 1) ) + { + + if ( n >= 16 ) + { + BLASLONG align = ((32 - ((uintptr_t)y & (uintptr_t)0x1F)) >> 3) & 0x3; + for (i = 0; i < align; i++) { + y[i] += da * x[i] ; + } + } + BLASLONG n1 = (n-i) & -16; + if ( n1 ) + daxpy_kernel_8(n1, &x[i], &y[i], da); + + i += n1; + + while(i < n) + { + + y[i] += da * x[i] ; + i++ ; + + } + return(0); + + + } + + BLASLONG n1 = n & -4; + + while(i < n1) + { + + FLOAT m1 = da * x[ix] ; + FLOAT m2 = da * x[ix+inc_x] ; + FLOAT m3 = da * x[ix+2*inc_x] ; + FLOAT m4 = da * x[ix+3*inc_x] ; + + y[iy] += m1 ; + y[iy+inc_y] += m2 ; + y[iy+2*inc_y] += m3 ; + y[iy+3*inc_y] += m4 ; + + ix += inc_x*4 ; + iy += inc_y*4 ; + i+=4 ; + + } + + while(i < n) + { + + y[iy] += da * x[ix] ; + ix += inc_x ; + iy += inc_y ; + i++ ; + + } + return(0); + +} + + diff --git a/kernel/power/dcopy.c b/kernel/power/dcopy.c index 27b39144b..24279f8a2 100644 --- a/kernel/power/dcopy.c +++ b/kernel/power/dcopy.c @@ -35,9 +35,11 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" -#if defined(POWER8) || defined(POWER9) +#if defined(POWER8) || defined(POWER9) || defined(POWER10) +#if defined(__VEC__) || defined(__ALTIVEC__) #include "dcopy_microk_power8.c" #endif +#endif #ifndef HAVE_KERNEL_32 diff --git a/kernel/power/dcopy_microk_power8.c b/kernel/power/dcopy_microk_power8.c index 261dc04de..b51a21d08 100644 --- a/kernel/power/dcopy_microk_power8.c +++ b/kernel/power/dcopy_microk_power8.c @@ -62,10 +62,10 @@ static void dcopy_kernel_32 (long n, double *x, double *y) "addi %2, %2, 128 \n\t" "addic. %1, %1, -32 \n\t" - "ble 2f \n\t" + "ble two%= \n\t" - ".p2align 5 \n" - "1: \n\t" + ".align 5 \n" + "one%=: \n\t" "stxvd2x 32, 0, %3 \n\t" "stxvd2x 33, %5, %3 \n\t" @@ -108,9 +108,9 @@ static void dcopy_kernel_32 (long n, double *x, double *y) "addi %2, %2, 128 \n\t" "addic. %1, %1, -32 \n\t" - "bgt 1b \n" + "bgt one%= \n" - "2: \n\t" + "two%=: \n\t" "stxvd2x 32, 0, %3 \n\t" "stxvd2x 33, %5, %3 \n\t" diff --git a/kernel/power/dcopy_power10.c b/kernel/power/dcopy_power10.c new file mode 100644 index 000000000..6c5eb4d77 --- /dev/null +++ b/kernel/power/dcopy_power10.c @@ -0,0 +1,129 @@ +/*************************************************************************** +Copyright (c) 2020, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "common.h" + +#if defined(__VEC__) || defined(__ALTIVEC__) +#include "copy_microk_power10.c" +#endif + +#ifndef HAVE_KERNEL + +static void copy_kernel(BLASLONG n, FLOAT *x, FLOAT *y) +{ + + BLASLONG i=0; + FLOAT f0, f1, f2, f3, f4, f5, f6, f7; + FLOAT *x1=x; + FLOAT *y1=y; + + while ( i= 64 ) + { + BLASLONG align = ((32 - ((uintptr_t)y & (uintptr_t)0x1F)) >> 3) & 0x3; + for (i = 0; i < align; i++) { + y[i] = x[i] ; + } + } + BLASLONG n1 = (n-i) & -64; + if ( n1 ) + { + copy_kernel(n1, &x[i], &y[i]); + i += n1; + } + + while(i < n) + { + y[i] = x[i] ; + i++ ; + + } + + + } + else + { + + while(i < n) + { + y[iy] = x[ix] ; + ix += inc_x ; + iy += inc_y ; + i++ ; + + } + + } + return(0); + + +} + + diff --git a/kernel/power/ddot.c b/kernel/power/ddot.c index f985df1c5..c5493015a 100644 --- a/kernel/power/ddot.c +++ b/kernel/power/ddot.c @@ -36,9 +36,11 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" -#if defined(POWER8) || defined(POWER9) +#if defined(POWER8) || defined(POWER9) || defined(POWER10) +#if defined(__VEC__) || defined(__ALTIVEC__) #include "ddot_microk_power8.c" #endif +#endif #ifndef HAVE_KERNEL_8 diff --git a/kernel/power/ddot_microk_power10.c b/kernel/power/ddot_microk_power10.c new file mode 100644 index 000000000..3a9865cc0 --- /dev/null +++ b/kernel/power/ddot_microk_power10.c @@ -0,0 +1,131 @@ +/*************************************************************************** +Copyright (c) 2020, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#define HAVE_KERNEL_8 1 + +static double ddot_kernel_8 (long n, double *x, double *y) +{ + double dot; + + __asm__ + ( + "dcbt 0, %2 \n\t" + "dcbt 0, %3 \n\t" + + "xxlxor 32, 32, 32 \n\t" + "xxlxor 33, 33, 33 \n\t" + "xxlxor 34, 34, 34 \n\t" + "xxlxor 35, 35, 35 \n\t" + "xxlxor 36, 36, 36 \n\t" + "xxlxor 37, 37, 37 \n\t" + "xxlxor 38, 38, 38 \n\t" + "xxlxor 39, 39, 39 \n\t" + + "lxvp 40, 0(%2) \n\t" + "lxvp 42, 32(%2) \n\t" + "lxvp 44, 64(%2) \n\t" + "lxvp 46, 96(%2) \n\t" + "lxvp 48, 0(%3) \n\t" + "lxvp 50, 32(%3) \n\t" + "lxvp 52, 64(%3) \n\t" + "lxvp 54, 96(%3) \n\t" + + "addi %2, %2, 128 \n\t" + "addi %3, %3, 128 \n\t" + + "addic. %1, %1, -16 \n\t" + "ble two%= \n\t" + + ".align 5 \n" + "one%=: \n\t" + + "xvmaddadp 32, 40, 48 \n\t" + "xvmaddadp 33, 41, 49 \n\t" + "lxvp 40, 0(%2) \n\t" + "lxvp 48, 0(%3) \n\t" + "xvmaddadp 34, 42, 50 \n\t" + "xvmaddadp 35, 43, 51 \n\t" + "lxvp 42, 32(%2) \n\t" + "lxvp 50, 32(%3) \n\t" + "xvmaddadp 36, 44, 52 \n\t" + "xvmaddadp 37, 45, 53 \n\t" + "lxvp 44, 64(%2) \n\t" + "lxvp 52, 64(%3) \n\t" + "xvmaddadp 38, 46, 54 \n\t" + "xvmaddadp 39, 47, 55 \n\t" + "lxvp 46, 96(%2) \n\t" + "lxvp 54, 96(%3) \n\t" + + "addi %2, %2, 128 \n\t" + "addi %3, %3, 128 \n\t" + + "addic. %1, %1, -16 \n\t" + "bgt one%= \n" + + "two%=: \n\t" + + "xvmaddadp 32, 40, 48 \n\t" + "xvmaddadp 33, 41, 49 \n\t" + "xvmaddadp 34, 42, 50 \n\t" + "xvmaddadp 35, 43, 51 \n\t" + "xvmaddadp 36, 44, 52 \n\t" + "xvmaddadp 37, 45, 53 \n\t" + "xvmaddadp 38, 46, 54 \n\t" + "xvmaddadp 39, 47, 55 \n\t" + + "xvadddp 32, 32, 33 \n\t" + "xvadddp 34, 34, 35 \n\t" + "xvadddp 36, 36, 37 \n\t" + "xvadddp 38, 38, 39 \n\t" + + "xvadddp 32, 32, 34 \n\t" + "xvadddp 36, 36, 38 \n\t" + + "xvadddp 32, 32, 36 \n\t" + + XXSWAPD_S(33,32) + + "xsadddp %x0, 32, 33 \n" + + "#dot=%0 n=%1 x=%4=%2 y=%5=%3\n" + : + "=d" (dot), // 0 + "+r" (n), // 1 + "+b" (x), // 2 + "+b" (y) // 3 + : + "m" (*x), + "m" (*y) + : + "cr0", + "vs32","vs33","vs34","vs35","vs36","vs37","vs38","vs39", + "vs40","vs41","vs42","vs43","vs44","vs45","vs46","vs47", + "vs48","vs49","vs50","vs51","vs52","vs53","vs54","vs55" + ); + + return dot; +} diff --git a/kernel/power/ddot_microk_power8.c b/kernel/power/ddot_microk_power8.c index 4e6bc29c9..d2518ef7e 100644 --- a/kernel/power/ddot_microk_power8.c +++ b/kernel/power/ddot_microk_power8.c @@ -78,10 +78,10 @@ static double ddot_kernel_8 (long n, double *x, double *y) "addi %3, %3, 128 \n\t" "addic. %1, %1, -16 \n\t" - "ble 2f \n\t" + "ble two%= \n\t" - ".p2align 5 \n" - "1: \n\t" + ".align 5 \n" + "one%=: \n\t" "xvmaddadp 32, 40, 48 \n\t" "lxvd2x 40, 0, %2 \n\t" @@ -112,9 +112,9 @@ static double ddot_kernel_8 (long n, double *x, double *y) "addi %3, %3, 128 \n\t" "addic. %1, %1, -16 \n\t" - "bgt 1b \n" + "bgt one%= \n" - "2: \n\t" + "two%=: \n\t" "xvmaddadp 32, 40, 48 \n\t" "xvmaddadp 33, 41, 49 \n\t" @@ -135,7 +135,7 @@ static double ddot_kernel_8 (long n, double *x, double *y) "xvadddp 32, 32, 36 \n\t" - "xxswapd 33, 32 \n\t" + XXSWAPD_S(33,32) "xsadddp %x0, 32, 33 \n" diff --git a/kernel/power/ddot_power10.c b/kernel/power/ddot_power10.c new file mode 100644 index 000000000..302dceb68 --- /dev/null +++ b/kernel/power/ddot_power10.c @@ -0,0 +1,130 @@ +/*************************************************************************** +Copyright (c) 2013-2016, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "common.h" + + +#if defined(__VEC__) || defined(__ALTIVEC__) +#include "ddot_microk_power10.c" +#endif + + +#ifndef HAVE_KERNEL_8 + +static FLOAT ddot_kernel_8 (BLASLONG n, FLOAT *x, FLOAT *y) +{ + BLASLONG register i = 0; + FLOAT dot = 0.0; + + while(i < n) + { + dot += y[i] * x[i] + + y[i+1] * x[i+1] + + y[i+2] * x[i+2] + + y[i+3] * x[i+3] + + y[i+4] * x[i+4] + + y[i+5] * x[i+5] + + y[i+6] * x[i+6] + + y[i+7] * x[i+7] ; + + i+=8 ; + + } + return dot; +} + +#endif + +FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y) +{ + BLASLONG i=0; + BLASLONG ix=0,iy=0; + + FLOAT dot = 0.0 ; + + if ( n <= 0 ) return(dot); + + if ( (inc_x == 1) && (inc_y == 1) ) + { + + BLASLONG n1 = n & -16; + + if ( n1 ) + dot = ddot_kernel_8(n1, x, y); + + i = n1; + while(i < n) + { + + dot += y[i] * x[i] ; + i++ ; + + } + return(dot); + + + } + + FLOAT temp1 = 0.0; + FLOAT temp2 = 0.0; + + BLASLONG n1 = n & -4; + + while(i < n1) + { + + FLOAT m1 = y[iy] * x[ix] ; + FLOAT m2 = y[iy+inc_y] * x[ix+inc_x] ; + + FLOAT m3 = y[iy+2*inc_y] * x[ix+2*inc_x] ; + FLOAT m4 = y[iy+3*inc_y] * x[ix+3*inc_x] ; + + ix += inc_x*4 ; + iy += inc_y*4 ; + + temp1 += m1+m3; + temp2 += m2+m4; + + i+=4 ; + + } + + while(i < n) + { + + temp1 += y[iy] * x[ix] ; + ix += inc_x ; + iy += inc_y ; + i++ ; + + } + dot = temp1 + temp2; + return(dot); + +} + + diff --git a/kernel/power/dgemm_kernel_16x4_power8.S b/kernel/power/dgemm_kernel_16x4_power8.S index 41958eab0..f8ed12ee9 100644 --- a/kernel/power/dgemm_kernel_16x4_power8.S +++ b/kernel/power/dgemm_kernel_16x4_power8.S @@ -82,12 +82,10 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endif #ifdef __64BIT__ -#define STACKSIZE 320 #define STACKSIZE 512 #define ALPHA_SP 296+192(SP) #define FZERO 304+192(SP) #else -#define STACKSIZE 240 #define STACKSIZE 440 #define ALPHA_SP 224+200(SP) #define FZERO 232+200(SP) @@ -97,7 +95,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define N r4 #define K r5 -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define A r6 #define B r7 @@ -271,7 +269,7 @@ li r11,0 slwi LDC, LDC, BASE_SHIFT #if defined(TRMMKERNEL) -#if defined(linux) && defined(__64BIT__) +#if (defined(linux) || defined(__FreeBSD__)) && defined(__64BIT__) ld OFFSET, FRAMESLOT(0) + STACKSIZE(SP) #endif diff --git a/kernel/power/dgemm_kernel_power10.c b/kernel/power/dgemm_kernel_power10.c new file mode 100644 index 000000000..b531799a6 --- /dev/null +++ b/kernel/power/dgemm_kernel_power10.c @@ -0,0 +1,881 @@ +/********************************************************************************* +Copyright (c) 2020, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +**********************************************************************************/ +#include "common.h" +#include + +typedef __vector unsigned char vec_t; +typedef FLOAT v4sf_t __attribute__ ((vector_size (16))); +typedef FLOAT v2sf_t __attribute__ ((vector_size (8))); + +#ifdef TRMMKERNEL +#define SAVE_ACC(ACC, J) \ + __builtin_mma_disassemble_acc ((void *)result, ACC); \ + rowC = (v4sf_t *) &CO[0* ldc+J]; \ + rowC[0] = result[0] * alpha; \ + rowC = (v4sf_t *) &CO[1*ldc+J]; \ + rowC[0] = result[1] * alpha; \ + rowC = (v4sf_t *) &CO[2*ldc+J]; \ + rowC[0] = result[2] * alpha; \ + rowC = (v4sf_t *) &CO[3*ldc+J]; \ + rowC[0] = result[3] * alpha; +#define SAVE_ACC1(ACC, J) \ + __builtin_mma_disassemble_acc ((void *)result, ACC); \ + rowC = (v4sf_t *) &CO[4* ldc+J]; \ + rowC[0] = result[0] * alpha; \ + rowC = (v4sf_t *) &CO[5*ldc+J]; \ + rowC[0] = result[1] * alpha; \ + rowC = (v4sf_t *) &CO[6*ldc+J]; \ + rowC[0] = result[2] * alpha; \ + rowC = (v4sf_t *) &CO[7*ldc+J]; \ + rowC[0] = result[3] * alpha; +#define SAVE2x4_ACC(ACC, J) \ + __builtin_mma_disassemble_acc ((void *)result, ACC); \ + rowC = (v4sf_t *) &CO[0* ldc+J]; \ + rowC[0] = result[0] * alpha; \ + rowC = (v4sf_t *) &CO[1* ldc+J]; \ + rowC[0] = result[1] * alpha; +#else +#define SAVE_ACC(ACC, J) \ + __builtin_mma_disassemble_acc ((void *)result, ACC); \ + rowC = (v4sf_t *) &CO[0* ldc+J]; \ + rowC[0] += result[0] * alpha; \ + rowC = (v4sf_t *) &CO[1*ldc+J]; \ + rowC[0] += result[1] * alpha; \ + rowC = (v4sf_t *) &CO[2*ldc+J]; \ + rowC[0] += result[2] * alpha; \ + rowC = (v4sf_t *) &CO[3*ldc+J]; \ + rowC[0] += result[3] * alpha; +#define SAVE_ACC1(ACC, J) \ + __builtin_mma_disassemble_acc ((void *)result, ACC); \ + rowC = (v4sf_t *) &CO[4* ldc+J]; \ + rowC[0] += result[0] * alpha; \ + rowC = (v4sf_t *) &CO[5*ldc+J]; \ + rowC[0] += result[1] * alpha; \ + rowC = (v4sf_t *) &CO[6*ldc+J]; \ + rowC[0] += result[2] * alpha; \ + rowC = (v4sf_t *) &CO[7*ldc+J]; \ + rowC[0] += result[3] * alpha; +#define SAVE2x4_ACC(ACC, J) \ + __builtin_mma_disassemble_acc ((void *)result, ACC); \ + rowC = (v4sf_t *) &CO[0* ldc+J]; \ + rowC[0] += result[0] * alpha; \ + rowC = (v4sf_t *) &CO[1* ldc+J]; \ + rowC[0] += result[1] * alpha; +#endif + +#define PREFETCH1(x, y) asm volatile ("dcbt %0, %1" : : "r" (x), "b" (y) : "memory"); + +#if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) +#define REFRESH_TEMP_BK(x, y) \ + temp = k - off; +#elif defined(LEFT) +#define REFRESH_TEMP_BK(x, y) \ + temp = off + x; +#else +#define REFRESH_TEMP_BK(x, y) \ + temp = off + y; +#endif +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) +#define REFRESH_POINTERS(x, y) \ + BO = B; \ + REFRESH_TEMP_BK(x, y) +#else +#define REFRESH_POINTERS(x, y) \ + AO += off * x; \ + BO = B + off * y; \ + REFRESH_TEMP_BK(x, y) +#endif + +#ifdef LEFT +#define REFRESH_OFF(x) \ + off += x; +#else +#define REFRESH_OFF(x) +#endif + +#ifdef LEFT +#define UPDATE_TEMP(x, y) \ + temp -= x; +#else +#define UPDATE_TEMP(x, y) \ + temp -= y; +#endif + +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) +#define REFRESH_TMP_AFTER_SAVE(x, y) \ + temp = k - off; \ + UPDATE_TEMP(x, y) \ + AO += temp * x; \ + BO += temp * y; +#else +#define REFRESH_TMP_AFTER_SAVE(x, y) +#endif + +#define REFRESH_AFTER_SAVE(x,y) \ + REFRESH_TMP_AFTER_SAVE(x, y) \ + REFRESH_OFF(x) +/************************************************************************************* +* GEMM Kernel +*************************************************************************************/ +int +CNAME (BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alpha, FLOAT * A, FLOAT * B, + FLOAT * C, BLASLONG ldc +#ifdef TRMMKERNEL + , BLASLONG offset +#endif + ) +{ + BLASLONG i1; +#if defined(TRMMKERNEL) + BLASLONG off; +#endif +#if defined(TRMMKERNEL) && !defined(LEFT) + off = -offset; +#endif + v4sf_t valpha = { alpha, alpha }; + for (i1 = 0; i1 < (n >> 3); i1++) + { + BLASLONG j, temp; + FLOAT *CO; + FLOAT *AO; +#if defined(TRMMKERNEL) && defined(LEFT) + off = offset; +#endif + CO = C; + C += ldc << 3; + AO = A; + PREFETCH1 (A, 128); + PREFETCH1 (A, 256); + for (j = 0; j < (m >> 3); j++) + { + FLOAT *BO; +#if defined(TRMMKERNEL) + REFRESH_POINTERS (8, 8); +#else + BO = B; + temp = k; +#endif + v4sf_t *rowC; + v4sf_t result[4]; + __vector_quad acc0, acc1, acc2, acc3, acc4,acc5,acc6,acc7; + BLASLONG l = 0; + vec_t *rowA = (vec_t *) & AO[0]; + vec_t *rb = (vec_t *) & BO[0]; + __vector_pair rowB, rowB1; + __builtin_mma_assemble_pair (&rowB, rb[1], rb[0]); + __builtin_mma_assemble_pair (&rowB1, rb[3], rb[2]); + __builtin_mma_xvf64ger (&acc0, rowB, rowA[0]); + __builtin_mma_xvf64ger (&acc1, rowB1, rowA[0]); + __builtin_mma_xvf64ger (&acc2, rowB, rowA[1]); + __builtin_mma_xvf64ger (&acc3, rowB1, rowA[1]); + __builtin_mma_xvf64ger (&acc4, rowB, rowA[2]); + __builtin_mma_xvf64ger (&acc5, rowB1, rowA[2]); + __builtin_mma_xvf64ger (&acc6, rowB, rowA[3]); + __builtin_mma_xvf64ger (&acc7, rowB1, rowA[3]); + for (l = 1; l < temp; l++) + { + rowA = (vec_t *) & AO[l << 3]; + rb = (vec_t *) & BO[l << 3]; + __builtin_mma_assemble_pair (&rowB, rb[1], rb[0]); + __builtin_mma_assemble_pair (&rowB1, rb[3], rb[2]); + __builtin_mma_xvf64gerpp (&acc0, rowB, rowA[0]); + __builtin_mma_xvf64gerpp (&acc1, rowB1, rowA[0]); + __builtin_mma_xvf64gerpp (&acc2, rowB, rowA[1]); + __builtin_mma_xvf64gerpp (&acc3, rowB1, rowA[1]); + __builtin_mma_xvf64gerpp (&acc4, rowB, rowA[2]); + __builtin_mma_xvf64gerpp (&acc5, rowB1, rowA[2]); + __builtin_mma_xvf64gerpp (&acc6, rowB, rowA[3]); + __builtin_mma_xvf64gerpp (&acc7, rowB1, rowA[3]); + } + SAVE_ACC (&acc0, 0); + SAVE_ACC1 (&acc1, 0); + SAVE_ACC (&acc2, 2); + SAVE_ACC1 (&acc3, 2); + SAVE_ACC (&acc4, 4); + SAVE_ACC1 (&acc5, 4); + SAVE_ACC (&acc6, 6); + SAVE_ACC1 (&acc7, 6); + CO += 8; + AO += temp << 3; + BO += temp << 3; +#if defined(TRMMKERNEL) + REFRESH_AFTER_SAVE (8, 8) +#endif + } + if (m & 4) + { + FLOAT *BO; +#if defined(TRMMKERNEL) + REFRESH_POINTERS (4, 8); +#else + BO = B; + temp = k; +#endif + v4sf_t *rowC; + v4sf_t result[4]; + __vector_quad acc0, acc1, acc2, acc3; + BLASLONG l = 0; + vec_t *rowA = (vec_t *) & AO[0]; + __vector_pair rowB, rowB1; + vec_t *rb = (vec_t *) & BO[0]; + __builtin_mma_assemble_pair (&rowB, rb[1], rb[0]); + __builtin_mma_assemble_pair (&rowB1, rb[3], rb[2]); + __builtin_mma_xvf64ger (&acc0, rowB, rowA[0]); + __builtin_mma_xvf64ger (&acc1, rowB1, rowA[0]); + __builtin_mma_xvf64ger (&acc2, rowB, rowA[1]); + __builtin_mma_xvf64ger (&acc3, rowB1, rowA[1]); + for (l = 1; l < temp; l++) + { + rowA = (vec_t *) & AO[l << 2]; + rb = (vec_t *) & BO[l << 3]; + __builtin_mma_assemble_pair (&rowB, rb[1], rb[0]); + __builtin_mma_assemble_pair (&rowB1, rb[3], rb[2]); + __builtin_mma_xvf64gerpp (&acc0, rowB, rowA[0]); + __builtin_mma_xvf64gerpp (&acc1, rowB1, rowA[0]); + __builtin_mma_xvf64gerpp (&acc2, rowB, rowA[1]); + __builtin_mma_xvf64gerpp (&acc3, rowB1, rowA[1]); + } + SAVE_ACC (&acc0, 0); + SAVE_ACC1 (&acc1, 0); + SAVE_ACC (&acc2, 2); + SAVE_ACC1 (&acc3, 2); + CO += 4; + AO += temp << 2; + BO += temp << 3; +#if defined(TRMMKERNEL) + REFRESH_AFTER_SAVE (4, 8) +#endif + } + if (m & 2) + { + FLOAT *BO; +#if defined(TRMMKERNEL) + REFRESH_POINTERS (2, 8); +#else + BO = B; + temp = k; +#endif + v4sf_t *rowC; + v4sf_t result[4]; + __vector_quad acc0, acc1; + BLASLONG l = 0; + vec_t *rowA = (vec_t *) & AO[0]; + __vector_pair rowB, rowB1; + vec_t *rb = (vec_t *) & BO[0]; + __builtin_mma_assemble_pair (&rowB, rb[1], rb[0]); + __builtin_mma_assemble_pair (&rowB1, rb[3], rb[2]); + __builtin_mma_xvf64ger (&acc0, rowB, rowA[0]); + __builtin_mma_xvf64ger (&acc1, rowB1, rowA[0]); + for (l = 1; l < temp; l++) + { + rowA = (vec_t *) & AO[l << 1]; + rb = (vec_t *) & BO[l << 3]; + __builtin_mma_assemble_pair (&rowB, rb[1], rb[0]); + __builtin_mma_assemble_pair (&rowB1, rb[3], rb[2]); + __builtin_mma_xvf64gerpp (&acc0, rowB, rowA[0]); + __builtin_mma_xvf64gerpp (&acc1, rowB1, rowA[0]); + } + SAVE_ACC (&acc0, 0); + SAVE_ACC1 (&acc1, 0); + CO += 2; + AO += temp << 1; + BO += temp << 3; +#if defined(TRMMKERNEL) + REFRESH_AFTER_SAVE (2, 8) +#endif + } + if (m & 1) + { + FLOAT *BO; +#if defined(TRMMKERNEL) + REFRESH_POINTERS (1, 8); +#else + BO = B; + temp = k; +#endif + BLASLONG l = 0; + v4sf_t t = { 0, 0 }; + v4sf_t t1 = { 0, 0 }; + v4sf_t t2 = { 0, 0 }; + v4sf_t t3 = { 0, 0 }; + for (l = 0; l < temp; l++) + { + v4sf_t rowA = { AO[l], AO[l] }; + v4sf_t rowB = { BO[l << 3], BO[(l << 3) + 1] }; + v4sf_t rowB1 = { BO[(l << 3) + 2], BO[(l << 3) + 3] }; + v4sf_t rowB2 = { BO[(l << 3) + 4], BO[(l << 3) + 5] }; + v4sf_t rowB3 = { BO[(l << 3) + 6], BO[(l << 3) + 7] }; + t += rowA * rowB; + t1 += rowA * rowB1; + t2 += rowA * rowB2; + t3 += rowA * rowB3; + } + t = t * valpha; + t1 = t1 * valpha; + t2 = t2 * valpha; + t3 = t3 * valpha; +#if defined(TRMMKERNEL) + CO[0 * ldc] = t[0]; + CO[1 * ldc] = t[1]; + CO[2 * ldc] = t1[0]; + CO[3 * ldc] = t1[1]; + CO[4 * ldc] = t2[0]; + CO[5 * ldc] = t2[1]; + CO[6 * ldc] = t3[0]; + CO[7 * ldc] = t3[1]; +#else + CO[0 * ldc] += t[0]; + CO[1 * ldc] += t[1]; + CO[2 * ldc] += t1[0]; + CO[3 * ldc] += t1[1]; + CO[4 * ldc] += t2[0]; + CO[5 * ldc] += t2[1]; + CO[6 * ldc] += t3[0]; + CO[7 * ldc] += t3[1]; +#endif + CO += 1; + AO += temp; + BO += temp << 3; +#if defined(TRMMKERNEL) + REFRESH_AFTER_SAVE (1, 8) +#endif + } +#if defined(TRMMKERNEL) && !defined(LEFT) + off += 8; // number of values in A +#endif + B += k << 3; + } + if (n & 4) + { + BLASLONG j, temp; + FLOAT *CO; + FLOAT *AO; +#if defined(TRMMKERNEL) && defined(LEFT) + off = offset; +#endif + CO = C; + C += ldc << 2; + AO = A; + PREFETCH1 (A, 128); + PREFETCH1 (A, 256); + for (j = 0; j < (m >> 3); j++) + { + FLOAT *BO; +#if defined(TRMMKERNEL) + REFRESH_POINTERS (8, 4); +#else + BO = B; + temp = k; +#endif + v4sf_t *rowC; + v4sf_t result[4]; + __vector_quad acc0, acc1, acc2, acc3; + BLASLONG l = 0; + vec_t *rowA = (vec_t *) & AO[0]; + __vector_pair rowB; + vec_t *rb = (vec_t *) & BO[0]; + __builtin_mma_assemble_pair (&rowB, rb[1], rb[0]); + __builtin_mma_xvf64ger (&acc0, rowB, rowA[0]); + __builtin_mma_xvf64ger (&acc1, rowB, rowA[1]); + __builtin_mma_xvf64ger (&acc2, rowB, rowA[2]); + __builtin_mma_xvf64ger (&acc3, rowB, rowA[3]); + for (l = 1; l < temp; l++) + { + rowA = (vec_t *) & AO[l << 3]; + rb = (vec_t *) & BO[l << 2]; + __builtin_mma_assemble_pair (&rowB, rb[1], rb[0]); + __builtin_mma_xvf64gerpp (&acc0, rowB, rowA[0]); + __builtin_mma_xvf64gerpp (&acc1, rowB, rowA[1]); + __builtin_mma_xvf64gerpp (&acc2, rowB, rowA[2]); + __builtin_mma_xvf64gerpp (&acc3, rowB, rowA[3]); + } + SAVE_ACC (&acc0, 0); + SAVE_ACC (&acc2, 4); + SAVE_ACC (&acc1, 2); + SAVE_ACC (&acc3, 6); + CO += 8; + AO += temp << 3; + BO += temp << 2; +#if defined(TRMMKERNEL) + REFRESH_AFTER_SAVE (8, 4) +#endif + } + if (m & 4) + { + FLOAT *BO; +#if defined(TRMMKERNEL) + REFRESH_POINTERS (4, 4); +#else + BO = B; + temp = k; +#endif + v4sf_t *rowC; + v4sf_t result[4]; + __vector_quad acc0, acc1; + BLASLONG l = 0; + vec_t *rowA = (vec_t *) & AO[0]; + __vector_pair rowB; + vec_t *rb = (vec_t *) & BO[0]; + __builtin_mma_assemble_pair (&rowB, rb[1], rb[0]); + __builtin_mma_xvf64ger (&acc0, rowB, rowA[0]); + __builtin_mma_xvf64ger (&acc1, rowB, rowA[1]); + for (l = 1; l < temp; l++) + { + rowA = (vec_t *) & AO[l << 2]; + rb = (vec_t *) & BO[l << 2]; + __builtin_mma_assemble_pair (&rowB, rb[1], rb[0]); + __builtin_mma_xvf64gerpp (&acc0, rowB, rowA[0]); + __builtin_mma_xvf64gerpp (&acc1, rowB, rowA[1]); + } + SAVE_ACC (&acc0, 0); + SAVE_ACC (&acc1, 2); + CO += 4; + AO += temp << 2; + BO += temp << 2; +#if defined(TRMMKERNEL) + REFRESH_AFTER_SAVE (4, 4) +#endif + } + if (m & 2) + { + FLOAT *BO; +#if defined(TRMMKERNEL) + REFRESH_POINTERS (2, 4); +#else + BO = B; + temp = k; +#endif + v4sf_t *rowC; + v4sf_t result[4]; + __vector_quad acc0; + BLASLONG l = 0; + vec_t *rowA = (vec_t *) & AO[0]; + __vector_pair rowB; + vec_t *rb = (vec_t *) & BO[0]; + __builtin_mma_assemble_pair (&rowB, rb[1], rb[0]); + __builtin_mma_xvf64ger (&acc0, rowB, rowA[0]); + for (l = 1; l < temp; l++) + { + rowA = (vec_t *) & AO[l << 1]; + rb = (vec_t *) & BO[l << 2]; + __builtin_mma_assemble_pair (&rowB, rb[1], rb[0]); + __builtin_mma_xvf64gerpp (&acc0, rowB, rowA[0]); + } + SAVE_ACC (&acc0, 0); + CO += 2; + AO += temp << 1; + BO += temp << 2; +#if defined(TRMMKERNEL) + REFRESH_AFTER_SAVE (2, 4) +#endif + } + if (m & 1) + { + FLOAT *BO; +#if defined(TRMMKERNEL) + REFRESH_POINTERS (1, 4); +#else + BO = B; + temp = k; +#endif + BLASLONG l = 0; + v4sf_t t = { 0, 0 }; + v4sf_t t1 = { 0, 0 }; + for (l = 0; l < temp; l++) + { + v4sf_t rowA = { AO[l], AO[l] }; + v4sf_t rowB = { BO[l << 2], BO[(l << 2) + 1] }; + v4sf_t rowB1 = { BO[(l << 2) + 2], BO[(l << 2) + 3] }; + t += rowA * rowB; + t1 += rowA * rowB1; + } + t = t * valpha; + t1 = t1 * valpha; +#if defined(TRMMKERNEL) + CO[0 * ldc] = t[0]; + CO[1 * ldc] = t[1]; + CO[2 * ldc] = t1[0]; + CO[3 * ldc] = t1[1]; +#else + CO[0 * ldc] += t[0]; + CO[1 * ldc] += t[1]; + CO[2 * ldc] += t1[0]; + CO[3 * ldc] += t1[1]; +#endif + CO += 1; + AO += temp; + BO += temp << 2; +#if defined(TRMMKERNEL) + REFRESH_AFTER_SAVE (1, 4) +#endif + } +#if defined(TRMMKERNEL) && !defined(LEFT) + off += 4; // number of values in A +#endif + B += k << 2; + } + if (n & 2) + { + BLASLONG j, temp; +#if defined(TRMMKERNEL) && defined(LEFT) + off = offset; +#endif + FLOAT *CO; + FLOAT *AO; + CO = C; + C += ldc << 1; + AO = A; + for (j = 0; j < (m >> 3); j++) + { + FLOAT *BO; +#if defined(TRMMKERNEL) + REFRESH_POINTERS (8, 2); +#else + BO = B; + temp = k; +#endif + v4sf_t *rowC; + v4sf_t result[4]; + __vector_quad acc0, acc1, acc2, acc3; + BLASLONG l = 0; + FLOAT t[4] = { 0, 0, 0, 0 }; + t[0] = BO[0], t[1] = BO[1]; + __vector_pair rowB; + vec_t *rb = (vec_t *) & t[0]; + __builtin_mma_assemble_pair (&rowB, rb[1], rb[0]); + vec_t *rowA = (vec_t *) & AO[0]; + __builtin_mma_xvf64ger (&acc0, rowB, rowA[0]); + __builtin_mma_xvf64ger (&acc1, rowB, rowA[1]); + __builtin_mma_xvf64ger (&acc2, rowB, rowA[2]); + __builtin_mma_xvf64ger (&acc3, rowB, rowA[3]); + for (l = 1; l < temp; l++) + { + t[0] = BO[l << 1], t[1] = BO[(l << 1) + 1]; + rb = (vec_t *) & t[0]; + __builtin_mma_assemble_pair (&rowB, rb[1], rb[0]); + rowA = (vec_t *) & AO[l << 3]; + __builtin_mma_xvf64gerpp (&acc0, rowB, rowA[0]); + __builtin_mma_xvf64gerpp (&acc1, rowB, rowA[1]); + __builtin_mma_xvf64gerpp (&acc2, rowB, rowA[2]); + __builtin_mma_xvf64gerpp (&acc3, rowB, rowA[3]); + } + SAVE2x4_ACC (&acc0, 0); + SAVE2x4_ACC (&acc1, 2); + SAVE2x4_ACC (&acc2, 4); + SAVE2x4_ACC (&acc3, 6); + CO += 8; + AO += temp << 3; + BO += temp << 1; +#if defined(TRMMKERNEL) + REFRESH_AFTER_SAVE (8, 2) +#endif + } + if (m & 4) + { + FLOAT *BO; +#if defined(TRMMKERNEL) + REFRESH_POINTERS (4, 2); +#else + BO = B; + temp = k; +#endif + v4sf_t *rowC; + v4sf_t result[4]; + __vector_quad acc0, acc1; + BLASLONG l = 0; + FLOAT t[4] = { 0, 0, 0, 0 }; + t[0] = BO[0], t[1] = BO[1]; + __vector_pair rowB; + vec_t *rb = (vec_t *) & t[0]; + __builtin_mma_assemble_pair (&rowB, rb[1], rb[0]); + vec_t *rowA = (vec_t *) & AO[0]; + __builtin_mma_xvf64ger (&acc0, rowB, rowA[0]); + __builtin_mma_xvf64ger (&acc1, rowB, rowA[1]); + for (l = 1; l < temp; l++) + { + t[0] = BO[l << 1], t[1] = BO[(l << 1) + 1]; + rb = (vec_t *) & t[0]; + __builtin_mma_assemble_pair (&rowB, rb[1], rb[0]); + rowA = (vec_t *) & AO[l << 2]; + __builtin_mma_xvf64gerpp (&acc0, rowB, rowA[0]); + __builtin_mma_xvf64gerpp (&acc1, rowB, rowA[1]); + } + SAVE2x4_ACC (&acc0, 0); + SAVE2x4_ACC (&acc1, 2); + CO += 4; + AO += temp << 2; + BO += temp << 1; +#if defined(TRMMKERNEL) + REFRESH_AFTER_SAVE (4, 2) +#endif + } + if (m & 2) + { + FLOAT *BO; +#if defined(TRMMKERNEL) + REFRESH_POINTERS (2, 2); +#else + BO = B; + temp = k; +#endif + v4sf_t *rowC; + v4sf_t result[4]; + __vector_quad acc0; + BLASLONG l = 0; + FLOAT t[4] = { 0, 0, 0, 0 }; + t[0] = BO[0], t[1] = BO[1]; + __vector_pair rowB; + vec_t *rb = (vec_t *) & t[0]; + __builtin_mma_assemble_pair (&rowB, rb[1], rb[0]); + vec_t *rowA = (vec_t *) & AO[0]; + __builtin_mma_xvf64ger (&acc0, rowB, rowA[0]); + for (l = 1; l < temp; l++) + { + t[0] = BO[l << 1], t[1] = BO[(l << 1) + 1]; + rb = (vec_t *) & t[0]; + __builtin_mma_assemble_pair (&rowB, rb[1], rb[0]); + rowA = (vec_t *) & AO[l << 1]; + __builtin_mma_xvf64gerpp (&acc0, rowB, rowA[0]); + } + SAVE2x4_ACC (&acc0, 0); + CO += 2; + AO += temp << 1; + BO += temp << 1; +#if defined(TRMMKERNEL) + REFRESH_AFTER_SAVE (2, 2) +#endif + } + if (m & 1) + { + FLOAT *BO; +#if defined(TRMMKERNEL) + REFRESH_POINTERS (1, 2); +#else + BO = B; + temp = k; +#endif + BLASLONG l = 0; + v4sf_t t = { 0, 0 }; + for (l = 0; l < temp; l++) + { + v4sf_t rowA = { AO[l], AO[l] }; + v4sf_t rowB = { BO[l << 1], BO[(l << 1) + 1] }; + t += rowA * rowB; + } + t = t * valpha; +#if defined(TRMMKERNEL) + CO[0 * ldc] = t[0]; + CO[1 * ldc] = t[1]; +#else + CO[0 * ldc] += t[0]; + CO[1 * ldc] += t[1]; +#endif + CO += 1; + AO += temp; + BO += temp << 1; +#if defined(TRMMKERNEL) + REFRESH_AFTER_SAVE (1, 2) +#endif + } +#if defined(TRMMKERNEL) && !defined(LEFT) + off += 2; // number of values in A +#endif + B += k << 1; + } + if (n & 1) + { + BLASLONG i, temp; +#if defined(TRMMKERNEL) && defined(LEFT) + off = offset; +#endif + FLOAT *CO; + FLOAT *AO; + CO = C; + C += ldc; + AO = A; + for (i = 0; i < (m >> 3); i++) + { + FLOAT *BO; +#if defined(TRMMKERNEL) + REFRESH_POINTERS (8, 1) +#else + BO = B; + temp = k; +#endif + BLASLONG l = 0; + v4sf_t t = { 0, 0 }; + v4sf_t t1 = { 0, 0 }; + v4sf_t t2 = { 0, 0 }; + v4sf_t t3 = { 0, 0 }; + for (l = 0; l < temp; l++) + { + v4sf_t rowB = { BO[l], BO[l] }; + v4sf_t rowA = { AO[l << 3], AO[(l << 3) + 1] }; + v4sf_t rowA1 = { AO[(l << 3) + 2], AO[(l << 3) + 3] }; + v4sf_t rowA2 = { AO[(l << 3) + 4], AO[(l << 3) + 5] }; + v4sf_t rowA3 = { AO[(l << 3) + 6], AO[(l << 3) + 7] }; + t += rowA * rowB; + t1 += rowA1 * rowB; + t2 += rowA2 * rowB; + t3 += rowA3 * rowB; + } + t = t * valpha; + t1 = t1 * valpha; + t2 = t2 * valpha; + t3 = t3 * valpha; +#if defined(TRMMKERNEL) + CO[0] = t[0]; + CO[1] = t[1]; + CO[2] = t1[0]; + CO[3] = t1[1]; + CO[4] = t2[0]; + CO[5] = t2[1]; + CO[6] = t3[0]; + CO[7] = t3[1]; +#else + CO[0] += t[0]; + CO[1] += t[1]; + CO[2] += t1[0]; + CO[3] += t1[1]; + CO[4] += t2[0]; + CO[5] += t2[1]; + CO[6] += t3[0]; + CO[7] += t3[1]; +#endif + AO += temp << 3; + BO += temp; + CO += 8; +#if defined(TRMMKERNEL) + REFRESH_AFTER_SAVE (8, 1) +#endif + } + if (m & 4) + { + FLOAT *BO; +#if defined(TRMMKERNEL) + REFRESH_POINTERS (4, 1) +#else + BO = B; + temp = k; +#endif + BLASLONG l = 0; + v4sf_t t = { 0, 0 }; + v4sf_t t1 = { 0, 0 }; + for (l = 0; l < temp; l++) + { + v4sf_t rowB = { BO[l], BO[l] }; + v4sf_t rowA = { AO[l << 2], AO[(l << 2) + 1] }; + v4sf_t rowA1 = { AO[(l << 2) + 2], AO[(l << 2) + 3] }; + t += rowA * rowB; + t1 += rowA1 * rowB; + } + t = t * valpha; + t1 = t1 * valpha; +#if defined(TRMMKERNEL) + CO[0] = t[0]; + CO[1] = t[1]; + CO[2] = t1[0]; + CO[3] = t1[1]; +#else + CO[0] += t[0]; + CO[1] += t[1]; + CO[2] += t1[0]; + CO[3] += t1[1]; +#endif + AO += temp << 2; + BO += temp; + CO += 4; +#if defined(TRMMKERNEL) + REFRESH_AFTER_SAVE (4, 1) +#endif + } + if (m & 2) + { + FLOAT *BO; +#if defined(TRMMKERNEL) + REFRESH_POINTERS (2, 1) +#else + BO = B; + temp = k; +#endif + BLASLONG l = 0; + v4sf_t t = { 0, 0 }; + for (l = 0; l < temp; l++) + { + v4sf_t rowB = { BO[l], BO[l] }; + v4sf_t rowA = { AO[l << 1], AO[(l << 1) + 1] }; + t += rowA * rowB; + } + t = t * valpha; +#if defined(TRMMKERNEL) + CO[0] = t[0]; + CO[1] = t[1]; +#else + CO[0] += t[0]; + CO[1] += t[1]; +#endif + AO += temp << 1; + BO += temp; + CO += 2; +#if defined(TRMMKERNEL) + REFRESH_AFTER_SAVE (2, 1) +#endif + } + if (m & 1) + { + FLOAT *BO; +#if defined(TRMMKERNEL) + REFRESH_POINTERS (1, 1) +#else + BO = B; + temp = k; +#endif + BLASLONG l = 0; + FLOAT t = 0; + for (l = 0; l < temp; l++) + { + t += AO[l] * BO[l]; + } + AO += temp; + BO += temp; +#if defined(TRMMKERNEL) + CO[0] = t * alpha; +#else + CO[0] += t * alpha; +#endif + CO += 1; +#if defined(TRMMKERNEL) + REFRESH_AFTER_SAVE (1, 1) +#endif + } +#if defined(TRMMKERNEL) && !defined(LEFT) + off += 1; // number of values in A +#endif + B += k; + } + return 0; +} diff --git a/kernel/power/dgemm_kernel_power9.S b/kernel/power/dgemm_kernel_power9.S index a1762dcf2..2fb1b27ef 100644 --- a/kernel/power/dgemm_kernel_power9.S +++ b/kernel/power/dgemm_kernel_power9.S @@ -135,18 +135,18 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. std r14, 280(SP) - stxv v20, 288(SP) - stxv v21, 304(SP) - stxv v22, 320(SP) - stxv v23, 336(SP) - stxv v24, 352(SP) - stxv v25, 368(SP) - stxv v26, 384(SP) - stxv v27, 400(SP) - stxv v28, 416(SP) - stxv v29, 432(SP) - stxv v30, 448(SP) - stxv v31, 464(SP) + stxv vs52, 288(SP) + stxv vs53, 304(SP) + stxv vs54, 320(SP) + stxv vs55, 336(SP) + stxv vs56, 352(SP) + stxv vs57, 368(SP) + stxv vs58, 384(SP) + stxv vs59, 400(SP) + stxv vs60, 416(SP) + stxv vs61, 432(SP) + stxv vs62, 448(SP) + stxv vs63, 464(SP) stfd f1, ALPHA_SP @@ -229,18 +229,18 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ld r15, 272(SP) ld r14, 280(SP) - lxv v20, 288(SP) - lxv v21, 304(SP) - lxv v22, 320(SP) - lxv v23, 336(SP) - lxv v24, 352(SP) - lxv v25, 368(SP) - lxv v26, 384(SP) - lxv v27, 400(SP) - lxv v28, 416(SP) - lxv v29, 432(SP) - lxv v30, 448(SP) - lxv v31, 464(SP) + lxv vs52, 288(SP) + lxv vs53, 304(SP) + lxv vs54, 320(SP) + lxv vs55, 336(SP) + lxv vs56, 352(SP) + lxv vs57, 368(SP) + lxv vs58, 384(SP) + lxv vs59, 400(SP) + lxv vs60, 416(SP) + lxv vs61, 432(SP) + lxv vs62, 448(SP) + lxv vs63, 464(SP) addi SP, SP, STACKSIZE blr diff --git a/kernel/power/dgemm_macros_16x4_power8.S b/kernel/power/dgemm_macros_16x4_power8.S index 5be517f7c..782425fbd 100644 --- a/kernel/power/dgemm_macros_16x4_power8.S +++ b/kernel/power/dgemm_macros_16x4_power8.S @@ -37,7 +37,11 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * Macros for N=4, M=16 * *********************************************************************/ +#if defined(_AIX) +define(`LOAD4x16_1', ` +#else .macro LOAD4x16_1 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -58,10 +62,18 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi AO, AO, 128 addi BO, BO, 32 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x16_I1', ` +#else .macro KERNEL4x16_I1 +#endif xvmuldp vs32, vs0, vs24 xvmuldp vs33, vs1, vs24 @@ -125,11 +137,19 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi AO, AO, 128 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x16_1', ` +#else .macro KERNEL4x16_1 +#endif xvmaddadp vs32, vs0, vs24 xvmaddadp vs33, vs1, vs24 @@ -194,9 +214,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi AO, AO, 128 addi BO, BO, 32 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x16_2', ` +#else .macro KERNEL4x16_2 +#endif xvmaddadp vs32, vs8, vs28 xvmaddadp vs33, vs9, vs28 @@ -260,9 +288,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi AO, AO, 128 addi BO, BO, 32 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x16_L1', ` +#else .macro KERNEL4x16_L1 +#endif xvmaddadp vs32, vs0, vs24 xvmaddadp vs33, vs1, vs24 @@ -326,9 +362,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi AO, AO, 128 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x16_L2', ` +#else .macro KERNEL4x16_L2 +#endif xvmaddadp vs32, vs8, vs28 xvmaddadp vs33, vs9, vs28 @@ -392,10 +436,18 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs63, vs15, vs31 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x16_E2', ` +#else .macro KERNEL4x16_E2 +#endif xvmaddadp vs32, vs8, vs28 @@ -434,9 +486,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs62, vs14, vs31 xvmaddadp vs63, vs15, vs31 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x16_SUBI1', ` +#else .macro KERNEL4x16_SUBI1 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -495,9 +555,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs62, vs6, vs27 xvmuldp vs63, vs7, vs27 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x16_SUB1', ` +#else .macro KERNEL4x16_SUB1 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -555,9 +623,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs62, vs6, vs27 xvmaddadp vs63, vs7, vs27 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE4x16', ` +#else .macro SAVE4x16 +#endif add T2, CO, LDC @@ -680,13 +756,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. stxvd2x vs39, o112, T4 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************* * Macros for N=4, M=8 * *********************************************************************/ +#if defined(_AIX) +define(`LOAD4x8_1', ` +#else .macro LOAD4x8_1 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -703,9 +787,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi AO, AO, 64 addi BO, BO, 32 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x8_I1', ` +#else .macro KERNEL4x8_I1 +#endif xvmuldp vs32, vs0, vs24 xvmuldp vs33, vs1, vs24 @@ -744,9 +836,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi AO, AO, 64 addi BO, BO, 32 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x8_1', ` +#else .macro KERNEL4x8_1 +#endif xvmaddadp vs32, vs0, vs24 xvmaddadp vs33, vs1, vs24 @@ -784,9 +884,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi AO, AO, 64 addi BO, BO, 32 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x8_2', ` +#else .macro KERNEL4x8_2 +#endif xvmaddadp vs32, vs8, vs28 xvmaddadp vs33, vs9, vs28 @@ -824,9 +932,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi AO, AO, 64 addi BO, BO, 32 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x8_E2', ` +#else .macro KERNEL4x8_E2 +#endif xvmaddadp vs32, vs8, vs28 @@ -849,9 +965,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs58, vs10, vs31 xvmaddadp vs59, vs11, vs31 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x8_SUBI1', ` +#else .macro KERNEL4x8_SUBI1 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -887,9 +1011,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs58, vs2, vs27 xvmuldp vs59, vs3, vs27 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x8_SUB1', ` +#else .macro KERNEL4x8_SUB1 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -925,9 +1057,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs58, vs2, vs27 xvmaddadp vs59, vs3, vs27 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE4x8', ` +#else .macro SAVE4x8 +#endif mr T1, CO @@ -1035,13 +1175,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 64 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************* * Macros for N=4, M=4 * *********************************************************************/ +#if defined(_AIX) +define(`LOAD4x4_1', ` +#else .macro LOAD4x4_1 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -1054,9 +1202,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi AO, AO, 32 addi BO, BO, 32 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x4_I1', ` +#else .macro KERNEL4x4_I1 +#endif lxvd2x vs8, 0, AO lxvd2x vs9, o16, AO @@ -1082,9 +1238,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs56, vs0, vs27 xvmuldp vs57, vs1, vs27 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x4_1', ` +#else .macro KERNEL4x4_1 +#endif lxvd2x vs8, 0, AO lxvd2x vs9, o16, AO @@ -1110,9 +1274,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs56, vs0, vs27 xvmaddadp vs57, vs1, vs27 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x4_2', ` +#else .macro KERNEL4x4_2 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -1138,9 +1310,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs56, vs8, vs31 xvmaddadp vs57, vs9, vs31 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x4_E2', ` +#else .macro KERNEL4x4_E2 +#endif xvmaddadp vs32, vs8, vs28 @@ -1155,9 +1335,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs56, vs8, vs31 xvmaddadp vs57, vs9, vs31 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x4_SUBI1', ` +#else .macro KERNEL4x4_SUBI1 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -1183,9 +1371,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs56, vs0, vs27 xvmuldp vs57, vs1, vs27 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x4_SUB1', ` +#else .macro KERNEL4x4_SUB1 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -1211,9 +1407,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs56, vs0, vs27 xvmaddadp vs57, vs1, vs27 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE4x4', ` +#else .macro SAVE4x4 +#endif mr T1, CO @@ -1289,13 +1493,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 32 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************* * Macros for N=4, M=2 * *********************************************************************/ +#if defined(_AIX) +define(`LOAD4x2_1', ` +#else .macro LOAD4x2_1 +#endif lxvd2x vs0, 0, AO @@ -1307,9 +1519,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi AO, AO, 16 addi BO, BO, 32 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x2_I1', ` +#else .macro KERNEL4x2_I1 +#endif lxvd2x vs8, 0, AO @@ -1330,9 +1550,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs56, vs0, vs27 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x2_1', ` +#else .macro KERNEL4x2_1 +#endif lxvd2x vs8, 0, AO @@ -1353,9 +1581,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs56, vs0, vs27 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x2_2', ` +#else .macro KERNEL4x2_2 +#endif lxvd2x vs0, 0, AO @@ -1376,9 +1612,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs56, vs8, vs31 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x2_E2', ` +#else .macro KERNEL4x2_E2 +#endif xvmaddadp vs32, vs8, vs28 @@ -1389,9 +1633,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs56, vs8, vs31 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x2_SUBI1', ` +#else .macro KERNEL4x2_SUBI1 +#endif lxvd2x vs0, 0, AO @@ -1412,9 +1664,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs56, vs0, vs27 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x2_SUB1', ` +#else .macro KERNEL4x2_SUB1 +#endif lxvd2x vs0, 0, AO @@ -1435,9 +1695,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs56, vs0, vs27 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE4x2', ` +#else .macro SAVE4x2 +#endif mr T1, CO @@ -1497,13 +1765,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 16 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************* * Macros for N=4, M=1 * *********************************************************************/ +#if defined(_AIX) +define(`LOAD4x1_1', ` +#else .macro LOAD4x1_1 +#endif lxsdx vs0, 0, AO @@ -1515,9 +1791,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi AO, AO, 8 addi BO, BO, 32 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x1_I1', ` +#else .macro KERNEL4x1_I1 +#endif lxsdx vs8, 0, AO @@ -1538,9 +1822,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmuldp vs56, vs0, vs27 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x1_1', ` +#else .macro KERNEL4x1_1 +#endif lxsdx vs8, 0, AO @@ -1561,9 +1853,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs56, vs0, vs27 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x1_2', ` +#else .macro KERNEL4x1_2 +#endif lxsdx vs0, 0, AO @@ -1584,9 +1884,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs56, vs8, vs31 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x1_E2', ` +#else .macro KERNEL4x1_E2 +#endif xsmaddadp vs32, vs8, vs28 @@ -1597,9 +1905,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs56, vs8, vs31 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x1_SUBI1', ` +#else .macro KERNEL4x1_SUBI1 +#endif lxsdx vs0, 0, AO @@ -1620,9 +1936,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmuldp vs56, vs0, vs27 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x1_SUB1', ` +#else .macro KERNEL4x1_SUB1 +#endif lxsdx vs0, 0, AO @@ -1643,9 +1967,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs56, vs0, vs27 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE4x1', ` +#else .macro SAVE4x1 +#endif mr T1, CO @@ -1705,13 +2037,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 8 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************* * Macros for N=2, M=16 * *********************************************************************/ +#if defined(_AIX) +define(`LOAD2x16_1', ` +#else .macro LOAD2x16_1 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -1731,9 +2071,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi AO, AO, 64 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x16_I1', ` +#else .macro KERNEL2x16_I1 +#endif lxvd2x vs8, 0, AO lxvd2x vs9, o16, AO @@ -1772,9 +2120,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs46, vs6, vs25 xvmuldp vs47, vs7, vs25 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x16_1', ` +#else .macro KERNEL2x16_1 +#endif lxvd2x vs8, 0, AO lxvd2x vs9, o16, AO @@ -1813,9 +2169,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs46, vs6, vs25 xvmaddadp vs47, vs7, vs25 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x16_2', ` +#else .macro KERNEL2x16_2 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -1854,9 +2218,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs46, vs14, vs29 xvmaddadp vs47, vs15, vs29 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x16_E2', ` +#else .macro KERNEL2x16_E2 +#endif xvmaddadp vs32, vs8, vs28 @@ -1877,9 +2249,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs46, vs14, vs29 xvmaddadp vs47, vs15, vs29 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x16_SUBI1', ` +#else .macro KERNEL2x16_SUBI1 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -1918,9 +2298,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs46, vs6, vs25 xvmuldp vs47, vs7, vs25 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x16_SUB1', ` +#else .macro KERNEL2x16_SUB1 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -1959,9 +2347,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs46, vs6, vs25 xvmaddadp vs47, vs7, vs25 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE2x16', ` +#else .macro SAVE2x16 +#endif mr T1, CO addi T2, T1, 64 @@ -2055,13 +2451,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 128 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************* * Macros for N=4, M=8 * *********************************************************************/ +#if defined(_AIX) +define(`LOAD2x8_1', ` +#else .macro LOAD2x8_1 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -2074,9 +2478,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi AO, AO, 64 addi BO, BO, 16 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x8_I1', ` +#else .macro KERNEL2x8_I1 +#endif lxvd2x vs8, 0, AO lxvd2x vs9, o16, AO @@ -2100,9 +2512,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs42, vs2, vs25 xvmuldp vs43, vs3, vs25 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x8_1', ` +#else .macro KERNEL2x8_1 +#endif lxvd2x vs8, 0, AO lxvd2x vs9, o16, AO @@ -2126,9 +2546,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs42, vs2, vs25 xvmaddadp vs43, vs3, vs25 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x8_2', ` +#else .macro KERNEL2x8_2 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -2152,9 +2580,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs42, vs10, vs29 xvmaddadp vs43, vs11, vs29 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x8_E2', ` +#else .macro KERNEL2x8_E2 +#endif xvmaddadp vs32, vs8, vs28 @@ -2167,9 +2603,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs42, vs10, vs29 xvmaddadp vs43, vs11, vs29 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x8_SUBI1', ` +#else .macro KERNEL2x8_SUBI1 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -2193,9 +2637,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs42, vs2, vs25 xvmuldp vs43, vs3, vs25 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x8_SUB1', ` +#else .macro KERNEL2x8_SUB1 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -2219,9 +2671,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs42, vs2, vs25 xvmaddadp vs43, vs3, vs25 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE2x8', ` +#else .macro SAVE2x8 +#endif mr T1, CO @@ -2277,13 +2737,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 64 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************* * Macros for N=2, M=4 * *********************************************************************/ +#if defined(_AIX) +define(`LOAD2x4_1', ` +#else .macro LOAD2x4_1 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -2294,9 +2762,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi AO, AO, 32 addi BO, BO, 16 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x4_I1', ` +#else .macro KERNEL2x4_I1 +#endif lxvd2x vs8, 0, AO lxvd2x vs9, o16, AO @@ -2314,9 +2790,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs40, vs0, vs25 xvmuldp vs41, vs1, vs25 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x4_1', ` +#else .macro KERNEL2x4_1 +#endif lxvd2x vs8, 0, AO lxvd2x vs9, o16, AO @@ -2334,9 +2818,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs40, vs0, vs25 xvmaddadp vs41, vs1, vs25 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x4_2', ` +#else .macro KERNEL2x4_2 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -2354,9 +2846,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs40, vs8, vs29 xvmaddadp vs41, vs9, vs29 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x4_E2', ` +#else .macro KERNEL2x4_E2 +#endif xvmaddadp vs32, vs8, vs28 @@ -2365,9 +2865,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs40, vs8, vs29 xvmaddadp vs41, vs9, vs29 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x4_SUBI1', ` +#else .macro KERNEL2x4_SUBI1 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -2385,9 +2893,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs40, vs0, vs25 xvmuldp vs41, vs1, vs25 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x4_SUB1', ` +#else .macro KERNEL2x4_SUB1 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -2405,9 +2921,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs40, vs0, vs25 xvmaddadp vs41, vs1, vs25 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE2x4', ` +#else .macro SAVE2x4 +#endif mr T1, CO @@ -2447,13 +2971,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 32 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************* * Macros for N=2, M=2 * *********************************************************************/ +#if defined(_AIX) +define(`LOAD2x2_1', ` +#else .macro LOAD2x2_1 +#endif lxvd2x vs0, 0, AO @@ -2463,9 +2995,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi AO, AO, 16 addi BO, BO, 16 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x2_I1', ` +#else .macro KERNEL2x2_I1 +#endif lxvd2x vs8, 0, AO @@ -2480,9 +3020,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs40, vs0, vs25 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x2_1', ` +#else .macro KERNEL2x2_1 +#endif lxvd2x vs8, 0, AO @@ -2497,9 +3045,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs40, vs0, vs25 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x2_2', ` +#else .macro KERNEL2x2_2 +#endif lxvd2x vs0, 0, AO @@ -2514,18 +3070,34 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs40, vs8, vs29 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x2_E2', ` +#else .macro KERNEL2x2_E2 +#endif xvmaddadp vs32, vs8, vs28 xvmaddadp vs40, vs8, vs29 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x2_SUBI1', ` +#else .macro KERNEL2x2_SUBI1 +#endif lxvd2x vs0, 0, AO @@ -2540,9 +3112,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs40, vs0, vs25 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x2_SUB1', ` +#else .macro KERNEL2x2_SUB1 +#endif lxvd2x vs0, 0, AO @@ -2557,9 +3137,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs40, vs0, vs25 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE2x2', ` +#else .macro SAVE2x2 +#endif mr T1, CO @@ -2591,13 +3179,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 16 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************* * Macros for N=2, M=1 * *********************************************************************/ +#if defined(_AIX) +define(`LOAD2x1_1', ` +#else .macro LOAD2x1_1 +#endif lxsdx vs0, 0, AO @@ -2607,9 +3203,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi AO, AO, 8 addi BO, BO, 16 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x1_I1', ` +#else .macro KERNEL2x1_I1 +#endif lxsdx vs8, 0, AO @@ -2624,9 +3228,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmuldp vs40, vs0, vs25 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x1_1', ` +#else .macro KERNEL2x1_1 +#endif lxsdx vs8, 0, AO @@ -2641,9 +3253,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs40, vs0, vs25 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x1_2', ` +#else .macro KERNEL2x1_2 +#endif lxsdx vs0, 0, AO @@ -2658,18 +3278,34 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs40, vs8, vs29 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x1_E2', ` +#else .macro KERNEL2x1_E2 +#endif xsmaddadp vs32, vs8, vs28 xsmaddadp vs40, vs8, vs29 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x1_SUBI1', ` +#else .macro KERNEL2x1_SUBI1 +#endif lxsdx vs0, 0, AO @@ -2684,9 +3320,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmuldp vs40, vs0, vs25 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x1_SUB1', ` +#else .macro KERNEL2x1_SUB1 +#endif lxsdx vs0, 0, AO @@ -2701,9 +3345,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs40, vs0, vs25 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE2x1', ` +#else .macro SAVE2x1 +#endif mr T1, CO @@ -2735,13 +3387,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 8 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************* * Macros for N=1, M=16 * *********************************************************************/ +#if defined(_AIX) +define(`LOAD1x16_1', ` +#else .macro LOAD1x16_1 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -2760,9 +3420,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi AO, AO, 64 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x16_I1', ` +#else .macro KERNEL1x16_I1 +#endif lxvd2x vs8, 0, AO lxvd2x vs9, o16, AO @@ -2791,9 +3459,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs38, vs6, vs24 xvmuldp vs39, vs7, vs24 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x16_1', ` +#else .macro KERNEL1x16_1 +#endif lxvd2x vs8, 0, AO lxvd2x vs9, o16, AO @@ -2822,9 +3498,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs38, vs6, vs24 xvmaddadp vs39, vs7, vs24 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x16_2', ` +#else .macro KERNEL1x16_2 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -2853,9 +3537,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs38, vs14, vs28 xvmaddadp vs39, vs15, vs28 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x16_E2', ` +#else .macro KERNEL1x16_E2 +#endif xvmaddadp vs32, vs8, vs28 @@ -2867,9 +3559,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs38, vs14, vs28 xvmaddadp vs39, vs15, vs28 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x16_SUBI1', ` +#else .macro KERNEL1x16_SUBI1 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -2898,9 +3598,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs38, vs6, vs24 xvmuldp vs39, vs7, vs24 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x16_SUB1', ` +#else .macro KERNEL1x16_SUB1 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -2929,9 +3637,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs38, vs6, vs24 xvmaddadp vs39, vs7, vs24 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE1x16', ` +#else .macro SAVE1x16 +#endif mr T1, CO addi T2, T1, 64 @@ -2980,13 +3696,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 128 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************* * Macros for N=4, M=8 * *********************************************************************/ +#if defined(_AIX) +define(`LOAD1x8_1', ` +#else .macro LOAD1x8_1 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -2998,9 +3722,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi AO, AO, 64 addi BO, BO, 8 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x8_I1', ` +#else .macro KERNEL1x8_I1 +#endif lxvd2x vs8, 0, AO lxvd2x vs9, o16, AO @@ -3018,9 +3750,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs34, vs2, vs24 xvmuldp vs35, vs3, vs24 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x8_1', ` +#else .macro KERNEL1x8_1 +#endif lxvd2x vs8, 0, AO lxvd2x vs9, o16, AO @@ -3038,9 +3778,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs34, vs2, vs24 xvmaddadp vs35, vs3, vs24 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x8_2', ` +#else .macro KERNEL1x8_2 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -3058,9 +3806,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs34, vs10, vs28 xvmaddadp vs35, vs11, vs28 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x8_E2', ` +#else .macro KERNEL1x8_E2 +#endif xvmaddadp vs32, vs8, vs28 @@ -3068,9 +3824,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs34, vs10, vs28 xvmaddadp vs35, vs11, vs28 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x8_SUBI1', ` +#else .macro KERNEL1x8_SUBI1 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -3088,9 +3852,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs34, vs2, vs24 xvmuldp vs35, vs3, vs24 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x8_SUB1', ` +#else .macro KERNEL1x8_SUB1 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -3108,9 +3880,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs34, vs2, vs24 xvmaddadp vs35, vs3, vs24 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE1x8', ` +#else .macro SAVE1x8 +#endif mr T1, CO @@ -3140,13 +3920,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 64 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************* * Macros for N=1, M=4 * *********************************************************************/ +#if defined(_AIX) +define(`LOAD1x4_1', ` +#else .macro LOAD1x4_1 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -3156,9 +3944,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi AO, AO, 32 addi BO, BO, 8 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x4_I1', ` +#else .macro KERNEL1x4_I1 +#endif lxvd2x vs8, 0, AO lxvd2x vs9, o16, AO @@ -3172,9 +3968,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs32, vs0, vs24 xvmuldp vs33, vs1, vs24 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x4_1', ` +#else .macro KERNEL1x4_1 +#endif lxvd2x vs8, 0, AO lxvd2x vs9, o16, AO @@ -3188,9 +3992,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs32, vs0, vs24 xvmaddadp vs33, vs1, vs24 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x4_2', ` +#else .macro KERNEL1x4_2 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -3204,17 +4016,33 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs32, vs8, vs28 xvmaddadp vs33, vs9, vs28 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x4_E2', ` +#else .macro KERNEL1x4_E2 +#endif xvmaddadp vs32, vs8, vs28 xvmaddadp vs33, vs9, vs28 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x4_SUBI1', ` +#else .macro KERNEL1x4_SUBI1 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -3228,9 +4056,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs32, vs0, vs24 xvmuldp vs33, vs1, vs24 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x4_SUB1', ` +#else .macro KERNEL1x4_SUB1 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -3244,9 +4080,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs32, vs0, vs24 xvmaddadp vs33, vs1, vs24 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE1x4', ` +#else .macro SAVE1x4 +#endif mr T1, CO @@ -3268,13 +4112,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 32 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************* * Macros for N=1, M=2 * *********************************************************************/ +#if defined(_AIX) +define(`LOAD1x2_1', ` +#else .macro LOAD1x2_1 +#endif lxvd2x vs0, 0, AO @@ -3283,9 +4135,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi AO, AO, 16 addi BO, BO, 8 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x2_I1', ` +#else .macro KERNEL1x2_I1 +#endif lxvd2x vs8, 0, AO @@ -3297,9 +4157,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs32, vs0, vs24 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x2_1', ` +#else .macro KERNEL1x2_1 +#endif lxvd2x vs8, 0, AO @@ -3311,9 +4179,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs32, vs0, vs24 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x2_2', ` +#else .macro KERNEL1x2_2 +#endif lxvd2x vs0, 0, AO @@ -3325,16 +4201,32 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs32, vs8, vs28 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x2_E2', ` +#else .macro KERNEL1x2_E2 +#endif xvmaddadp vs32, vs8, vs28 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x2_SUBI1', ` +#else .macro KERNEL1x2_SUBI1 +#endif lxvd2x vs0, 0, AO @@ -3346,9 +4238,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs32, vs0, vs24 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x2_SUB1', ` +#else .macro KERNEL1x2_SUB1 +#endif lxvd2x vs0, 0, AO @@ -3360,9 +4260,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs32, vs0, vs24 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE1x2', ` +#else .macro SAVE1x2 +#endif mr T1, CO @@ -3380,13 +4288,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 16 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************* * Macros for N=1, M=1 * *********************************************************************/ +#if defined(_AIX) +define(`LOAD1x1_1', ` +#else .macro LOAD1x1_1 +#endif lxsdx vs0, 0, AO @@ -3395,9 +4311,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi AO, AO, 8 addi BO, BO, 8 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x1_I1', ` +#else .macro KERNEL1x1_I1 +#endif lxsdx vs8, 0, AO @@ -3409,9 +4333,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmuldp vs32, vs0, vs24 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x1_1', ` +#else .macro KERNEL1x1_1 +#endif lxsdx vs8, 0, AO @@ -3423,9 +4355,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs32, vs0, vs24 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x1_2', ` +#else .macro KERNEL1x1_2 +#endif lxsdx vs0, 0, AO @@ -3437,16 +4377,32 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs32, vs8, vs28 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x1_E2', ` +#else .macro KERNEL1x1_E2 +#endif xsmaddadp vs32, vs8, vs28 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x1_SUBI1', ` +#else .macro KERNEL1x1_SUBI1 +#endif lxsdx vs0, 0, AO @@ -3458,9 +4414,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmuldp vs32, vs0, vs24 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x1_SUB1', ` +#else .macro KERNEL1x1_SUB1 +#endif lxsdx vs0, 0, AO @@ -3472,9 +4436,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs32, vs0, vs24 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE1x1', ` +#else .macro SAVE1x1 +#endif mr T1, CO @@ -3492,5 +4464,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 8 +#if defined(_AIX) +') +#else .endm +#endif diff --git a/kernel/power/dgemm_ncopy_8_power10.c b/kernel/power/dgemm_ncopy_8_power10.c new file mode 100644 index 000000000..9836c2e7f --- /dev/null +++ b/kernel/power/dgemm_ncopy_8_power10.c @@ -0,0 +1,326 @@ +/*********************************************************************/ +/* Copyright 2009, 2010 The University of Texas at Austin. */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* 1. Redistributions of source code must retain the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer. */ +/* */ +/* 2. Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials */ +/* provided with the distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ +/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ +/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ +/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ +/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ +/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ +/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* */ +/* The views and conclusions contained in the software and */ +/* documentation are those of the authors and should not be */ +/* interpreted as representing official policies, either expressed */ +/* or implied, of The University of Texas at Austin. */ +/*********************************************************************/ + +#include +#include "common.h" +#include +#define PREFETCHA(x, y) asm volatile ("dcbt %0, %1" : : "r" (x), "b" (y) : "memory"); + +int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b){ + BLASLONG i, j; + + IFLOAT *aoffset; + IFLOAT *aoffset1, *aoffset2, *aoffset3, *aoffset4; + IFLOAT *aoffset5, *aoffset6, *aoffset7, *aoffset8; + + IFLOAT *boffset; + IFLOAT ctemp01, ctemp02, ctemp03, ctemp04; + IFLOAT ctemp09, ctemp17, ctemp33; + IFLOAT ctemp25, ctemp41; + IFLOAT ctemp49, ctemp57; + + aoffset = a; + boffset = b; + + j = (n >> 3); + if (j > 0){ + do{ + aoffset1 = aoffset; + aoffset2 = aoffset1 + lda; + aoffset3 = aoffset2 + lda; + aoffset4 = aoffset3 + lda; + aoffset5 = aoffset4 + lda; + aoffset6 = aoffset5 + lda; + aoffset7 = aoffset6 + lda; + aoffset8 = aoffset7 + lda; + aoffset += 8 * lda; + + i = (m >> 3); + if (i > 0){ + do{ + PREFETCHA (aoffset1, 384); + PREFETCHA (aoffset2, 384); + PREFETCHA (aoffset3, 384); + PREFETCHA (aoffset4, 384); + PREFETCHA (aoffset5, 384); + PREFETCHA (aoffset6, 384); + PREFETCHA (aoffset7, 384); + PREFETCHA (aoffset8, 384); + __vector double va0 = *(__vector double*)(aoffset1 + 0); + __vector double va1 = *(__vector double*)(aoffset1 + 2); + __vector double va2 = *(__vector double*)(aoffset1 + 4); + __vector double va3 = *(__vector double*)(aoffset1 + 6); + + __vector double va4 = *(__vector double*)(aoffset2 + 0); + __vector double va5 = *(__vector double*)(aoffset2 + 2); + __vector double va6 = *(__vector double*)(aoffset2 + 4); + __vector double va7 = *(__vector double*)(aoffset2 + 6); + + __vector double va8 = *(__vector double*)(aoffset3 + 0); + __vector double va9 = *(__vector double*)(aoffset3 + 2); + __vector double va10 = *(__vector double*)(aoffset3 + 4); + __vector double va11 = *(__vector double*)(aoffset3 + 6); + + __vector double va12 = *(__vector double*)(aoffset4 + 0); + __vector double va13 = *(__vector double*)(aoffset4 + 2); + __vector double va14 = *(__vector double*)(aoffset4 + 4); + __vector double va15 = *(__vector double*)(aoffset4 + 6); + + __vector double va16 = *(__vector double*)(aoffset5 + 0); + __vector double va17 = *(__vector double*)(aoffset5 + 2); + __vector double va18 = *(__vector double*)(aoffset5 + 4); + __vector double va19 = *(__vector double*)(aoffset5 + 6); + + __vector double va20 = *(__vector double*)(aoffset6 + 0); + __vector double va21 = *(__vector double*)(aoffset6 + 2); + __vector double va22 = *(__vector double*)(aoffset6 + 4); + __vector double va23 = *(__vector double*)(aoffset6 + 6); + + __vector double va24 = *(__vector double*)(aoffset7 + 0); + __vector double va25 = *(__vector double*)(aoffset7 + 2); + __vector double va26 = *(__vector double*)(aoffset7 + 4); + __vector double va27 = *(__vector double*)(aoffset7 + 6); + + __vector double va28 = *(__vector double*)(aoffset8 + 0); + __vector double va29 = *(__vector double*)(aoffset8 + 2); + __vector double va30 = *(__vector double*)(aoffset8 + 4); + __vector double va31 = *(__vector double*)(aoffset8 + 6); + + *(__vector double*)(boffset + 0) = vec_xxpermdi(va0, va4, 0); + *(__vector double*)(boffset + 2) = vec_xxpermdi(va8, va12, 0); + *(__vector double*)(boffset + 4) = vec_xxpermdi(va16, va20, 0); + *(__vector double*)(boffset + 6) = vec_xxpermdi(va24, va28, 0); + *(__vector double*)(boffset + 8) = vec_xxpermdi(va0, va4, 3); + *(__vector double*)(boffset + 10) = vec_xxpermdi(va8, va12, 3); + *(__vector double*)(boffset + 12) = vec_xxpermdi(va16, va20, 3); + *(__vector double*)(boffset + 14) = vec_xxpermdi(va24, va28, 3); + + *(__vector double*)(boffset + 16) = vec_xxpermdi(va1, va5, 0); + *(__vector double*)(boffset + 18) = vec_xxpermdi(va9, va13, 0); + *(__vector double*)(boffset + 20) = vec_xxpermdi(va17, va21, 0); + *(__vector double*)(boffset + 22) = vec_xxpermdi(va25, va29, 0); + *(__vector double*)(boffset + 24) = vec_xxpermdi(va1, va5, 3); + *(__vector double*)(boffset + 26) = vec_xxpermdi(va9, va13, 3); + *(__vector double*)(boffset + 28) = vec_xxpermdi(va17, va21, 3); + *(__vector double*)(boffset + 30) = vec_xxpermdi(va25, va29, 3); + + *(__vector double*)(boffset + 32) = vec_xxpermdi(va2, va6, 0); + *(__vector double*)(boffset + 34) = vec_xxpermdi(va10, va14, 0); + *(__vector double*)(boffset + 36) = vec_xxpermdi(va18, va22, 0); + *(__vector double*)(boffset + 38) = vec_xxpermdi(va26, va30, 0); + *(__vector double*)(boffset + 40) = vec_xxpermdi(va2, va6, 3); + *(__vector double*)(boffset + 42) = vec_xxpermdi(va10, va14, 3); + *(__vector double*)(boffset + 44) = vec_xxpermdi(va18, va22, 3); + *(__vector double*)(boffset + 46) = vec_xxpermdi(va26, va30, 3); + + *(__vector double*)(boffset + 48) = vec_xxpermdi(va3, va7, 0); + *(__vector double*)(boffset + 50) = vec_xxpermdi(va11, va15, 0); + *(__vector double*)(boffset + 52) = vec_xxpermdi(va19, va23, 0); + *(__vector double*)(boffset + 54) = vec_xxpermdi(va27, va31, 0); + *(__vector double*)(boffset + 56) = vec_xxpermdi(va3, va7, 3); + *(__vector double*)(boffset + 58) = vec_xxpermdi(va11, va15, 3); + *(__vector double*)(boffset + 60) = vec_xxpermdi(va19, va23, 3); + *(__vector double*)(boffset + 62) = vec_xxpermdi(va27, va31, 3); + aoffset1 += 8; + aoffset2 += 8; + aoffset3 += 8; + aoffset4 += 8; + aoffset5 += 8; + aoffset6 += 8; + aoffset7 += 8; + aoffset8 += 8; + boffset += 64; + i --; + }while(i > 0); + } + + i = (m & 7); + if (i > 0){ + do{ + ctemp01 = *(aoffset1 + 0); + ctemp09 = *(aoffset2 + 0); + ctemp17 = *(aoffset3 + 0); + ctemp25 = *(aoffset4 + 0); + ctemp33 = *(aoffset5 + 0); + ctemp41 = *(aoffset6 + 0); + ctemp49 = *(aoffset7 + 0); + ctemp57 = *(aoffset8 + 0); + + *(boffset + 0) = ctemp01; + *(boffset + 1) = ctemp09; + *(boffset + 2) = ctemp17; + *(boffset + 3) = ctemp25; + *(boffset + 4) = ctemp33; + *(boffset + 5) = ctemp41; + *(boffset + 6) = ctemp49; + *(boffset + 7) = ctemp57; + + aoffset1 ++; + aoffset2 ++; + aoffset3 ++; + aoffset4 ++; + aoffset5 ++; + aoffset6 ++; + aoffset7 ++; + aoffset8 ++; + + boffset += 8; + i --; + }while(i > 0); + } + j--; + }while(j > 0); + } /* end of if(j > 0) */ + + if (n & 4){ + aoffset1 = aoffset; + aoffset2 = aoffset1 + lda; + aoffset3 = aoffset2 + lda; + aoffset4 = aoffset3 + lda; + aoffset += 4 * lda; + + i = (m >> 2); + if (i > 0){ + do{ + PREFETCHA (aoffset1, 384); + PREFETCHA (aoffset2, 384); + PREFETCHA (aoffset3, 384); + PREFETCHA (aoffset4, 384); + __vector double va0 = *(__vector double*)(aoffset1 + 0); + __vector double va1 = *(__vector double*)(aoffset1 + 2); + __vector double va2 = *(__vector double*)(aoffset2 + 0); + __vector double va3 = *(__vector double*)(aoffset2 + 2); + __vector double va4 = *(__vector double*)(aoffset3 + 0); + __vector double va5 = *(__vector double*)(aoffset3 + 2); + __vector double va6 = *(__vector double*)(aoffset4 + 0); + __vector double va7 = *(__vector double*)(aoffset4 + 2); + *(__vector double*)(boffset + 0) = vec_xxpermdi(va0, va2, 0); + *(__vector double*)(boffset + 2) = vec_xxpermdi(va4, va6, 0); + *(__vector double*)(boffset + 4) = vec_xxpermdi(va0, va2, 3); + *(__vector double*)(boffset + 6) = vec_xxpermdi(va4, va6, 3); + *(__vector double*)(boffset + 8) = vec_xxpermdi(va1, va3, 0); + *(__vector double*)(boffset + 10) = vec_xxpermdi(va5, va7, 0); + *(__vector double*)(boffset + 12) = vec_xxpermdi(va1, va3, 3); + *(__vector double*)(boffset + 14) = vec_xxpermdi(va5, va7, 3); + + aoffset1 += 4; + aoffset2 += 4; + aoffset3 += 4; + aoffset4 += 4; + boffset += 16; + i --; + }while(i > 0); + } + + i = (m & 3); + if (i > 0){ + do{ + ctemp01 = *(aoffset1 + 0); + ctemp02 = *(aoffset2 + 0); + ctemp03 = *(aoffset3 + 0); + ctemp04 = *(aoffset4 + 0); + + *(boffset + 0) = ctemp01; + *(boffset + 1) = ctemp02; + *(boffset + 2) = ctemp03; + *(boffset + 3) = ctemp04; + + aoffset1 ++; + aoffset2 ++; + aoffset3 ++; + aoffset4 ++; + + boffset += 4; + i --; + }while(i > 0); + } + } /* end of if(j > 0) */ + + if (n & 2){ + aoffset1 = aoffset; + aoffset2 = aoffset1 + lda; + aoffset += 2 * lda; + + i = (m >> 1); + if (i > 0){ + do{ + __vector double va0 = *(__vector double*)(aoffset1 + 0); + __vector double va1 = *(__vector double*)(aoffset2 + 0); + *(__vector double*)(boffset + 0) = vec_xxpermdi(va0, va1, 0); + *(__vector double*)(boffset + 2) = vec_xxpermdi(va0, va1, 3); + + aoffset1 += 2; + aoffset2 += 2; + boffset += 4; + i --; + }while(i > 0); + } + + if (m & 1){ + ctemp01 = *(aoffset1 + 0); + ctemp02 = *(aoffset2 + 0); + + *(boffset + 0) = ctemp01; + *(boffset + 1) = ctemp02; + + aoffset1 ++; + aoffset2 ++; + boffset += 2; + } + } /* end of if(j > 0) */ + + if (n & 1){ + aoffset1 = aoffset; + + i = m; + if (i > 0){ + do{ + ctemp01 = *(aoffset1 + 0); + + *(boffset + 0) = ctemp01; + + aoffset1 ++; + boffset ++; + i --; + }while(i > 0); + } + + } /* end of if(j > 0) */ + + return 0; +} diff --git a/kernel/power/dgemm_ncopy_macros_4_power8.S b/kernel/power/dgemm_ncopy_macros_4_power8.S index 8d6744b91..33d02c77d 100644 --- a/kernel/power/dgemm_ncopy_macros_4_power8.S +++ b/kernel/power/dgemm_ncopy_macros_4_power8.S @@ -38,7 +38,11 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * Macros for N=4 and M=16 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_4x16', ` +#else .macro COPY_4x16 +#endif lxvd2x vs0, o0, A0 lxvd2x vs1, o0, A1 @@ -180,14 +184,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 128 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=4 and M=8 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_4x8', ` +#else .macro COPY_4x8 +#endif lxvd2x vs0, o0, A0 lxvd2x vs1, o16, A0 @@ -259,14 +271,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 128 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=4 and M=4 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_4x4', ` +#else .macro COPY_4x4 +#endif lxvd2x vs0, o0, A0 lxvd2x vs1, o16, A0 @@ -310,14 +330,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 128 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=4 and M=2 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_4x2', ` +#else .macro COPY_4x2 +#endif lxvd2x vs0, o0, A0 addi A0, A0, 16 @@ -348,14 +376,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 64 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=4 and M=1 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_4x1', ` +#else .macro COPY_4x1 +#endif lxsdx vs0, o0, A0 addi A0, A0, 8 @@ -382,14 +418,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 32 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=2 and M=16 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_2x16', ` +#else .macro COPY_2x16 +#endif lxvd2x vs0, o0, A0 lxvd2x vs1, o16, A0 @@ -459,14 +503,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 128 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=2 and M=8 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_2x8', ` +#else .macro COPY_2x8 +#endif lxvd2x vs0, o0, A0 lxvd2x vs1, o16, A0 @@ -506,14 +558,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 128 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=2 and M=4 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_2x4', ` +#else .macro COPY_2x4 +#endif lxvd2x vs0, o0, A0 lxvd2x vs1, o16, A0 @@ -539,14 +599,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 64 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=2 and M=2 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_2x2', ` +#else .macro COPY_2x2 +#endif lxvd2x vs0, o0, A0 addi A0, A0, 16 @@ -565,14 +633,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 32 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=2 and M=1 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_2x1', ` +#else .macro COPY_2x1 +#endif lxsdx vs0, o0, A0 addi A0, A0, 8 @@ -589,14 +665,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 16 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=1 and M=16 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_1x16', ` +#else .macro COPY_1x16 +#endif lxvd2x vs0, o0, A0 lxvd2x vs1, o16, A0 @@ -622,14 +706,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 64 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=1 and M=8 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_1x8', ` +#else .macro COPY_1x8 +#endif lxvd2x vs0, o0, A0 lxvd2x vs1, o16, A0 @@ -645,14 +737,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 64 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=1 and M=4 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_1x4', ` +#else .macro COPY_1x4 +#endif lxvd2x vs0, o0, A0 lxvd2x vs1, o16, A0 @@ -664,14 +764,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 32 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=1 and M=2 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_1x2', ` +#else .macro COPY_1x2 +#endif lxvd2x vs0, o0, A0 addi A0, A0, 16 @@ -681,14 +789,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 16 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=1 and M=1 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_1x1', ` +#else .macro COPY_1x1 +#endif lxsdx vs0, o0, A0 addi A0, A0, 8 @@ -698,5 +814,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 8 +#if defined(_AIX) +') +#else .endm +#endif diff --git a/kernel/power/dgemm_tcopy_macros_16_power8.S b/kernel/power/dgemm_tcopy_macros_16_power8.S index 68e53bcf2..6c5b8ed62 100644 --- a/kernel/power/dgemm_tcopy_macros_16_power8.S +++ b/kernel/power/dgemm_tcopy_macros_16_power8.S @@ -38,7 +38,11 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * Macros for N=4 and M=16 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_4x16', ` +#else .macro COPY_4x16 +#endif lxvd2x vs32, o0, A0 lxvd2x vs33, o16, A0 @@ -140,14 +144,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. stxvd2x vs10, o32, T1 stxvd2x vs11, o48, T1 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=4 and M=8 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_4x8', ` +#else .macro COPY_4x8 +#endif lxvd2x vs32, o0, A0 lxvd2x vs33, o16, A0 @@ -205,14 +217,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. stxvd2x vs46, o32, T1 stxvd2x vs47, o48, T1 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=4 and M=4 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_4x4', ` +#else .macro COPY_4x4 +#endif lxvd2x vs32, o0, A0 lxvd2x vs33, o16, A0 @@ -250,14 +270,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. stxvd2x vs38, o32, T1 stxvd2x vs39, o48, T1 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=4 and M=2 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_4x2', ` +#else .macro COPY_4x2 +#endif lxvd2x vs32, o0, A0 addi A0, A0, 16 @@ -285,14 +313,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. stxvd2x vs35, o48, T1 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=4 and M=1 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_4x1', ` +#else .macro COPY_4x1 +#endif lxsdx vs32, o0, A0 addi A0, A0, 8 @@ -322,14 +358,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. stxsdx vs35, o8, T1 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=2 and M=16 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_2x16', ` +#else .macro COPY_2x16 +#endif lxvd2x vs32, o0, A0 lxvd2x vs33, o16, A0 @@ -383,14 +427,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. stxvd2x vs46, o32, T1 stxvd2x vs47, o48, T1 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=2 and M=8 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_2x8', ` +#else .macro COPY_2x8 +#endif lxvd2x vs32, o0, A0 lxvd2x vs33, o16, A0 @@ -420,14 +472,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. stxvd2x vs38, o32, T1 stxvd2x vs39, o48, T1 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=2 and M=4 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_2x4', ` +#else .macro COPY_2x4 +#endif lxvd2x vs32, o0, A0 lxvd2x vs33, o16, A0 @@ -447,14 +507,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. stxvd2x vs34, o32, T1 stxvd2x vs35, o48, T1 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=2 and M=2 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_2x2', ` +#else .macro COPY_2x2 +#endif lxvd2x vs32, o0, A0 addi A0, A0, 16 @@ -470,14 +538,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. stxvd2x vs33, o16, T1 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=2 and M=1 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_2x1', ` +#else .macro COPY_2x1 +#endif lxsdx vs32, o0, A0 addi A0, A0, 8 @@ -493,14 +569,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. stxsdx vs33, o8, T1 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=1 and M=16 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_1x16', ` +#else .macro COPY_1x16 +#endif lxvd2x vs32, o0, A0 lxvd2x vs33, o16, A0 @@ -528,14 +612,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. stxvd2x vs38, o32, T1 stxvd2x vs39, o48, T1 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=1 and M=8 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_1x8', ` +#else .macro COPY_1x8 +#endif lxvd2x vs32, o0, A0 lxvd2x vs33, o16, A0 @@ -551,14 +643,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. stxvd2x vs34, o32, T1 stxvd2x vs35, o48, T1 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=1 and M=4 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_1x4', ` +#else .macro COPY_1x4 +#endif lxvd2x vs32, o0, A0 lxvd2x vs33, o16, A0 @@ -570,14 +670,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. stxvd2x vs32, o0, T1 stxvd2x vs33, o16, T1 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=1 and M=2 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_1x2', ` +#else .macro COPY_1x2 +#endif lxvd2x vs32, o0, A0 addi A0, A0, 16 @@ -587,14 +695,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. stxvd2x vs32, o0, T1 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=1 and M=1 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_1x1', ` +#else .macro COPY_1x1 +#endif lxsdx vs32, o0, A0 addi A0, A0, 8 @@ -604,5 +720,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. stxsdx vs32, o0, T1 +#if defined(_AIX) +') +#else .endm +#endif diff --git a/kernel/power/dgemv_n.c b/kernel/power/dgemv_n.c index b458e11fc..ac365b3b2 100644 --- a/kernel/power/dgemv_n.c +++ b/kernel/power/dgemv_n.c @@ -38,9 +38,11 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" -#if defined(POWER8) || defined(POWER9) +#if defined(POWER8) || defined(POWER9) || defined(POWER10) +#if defined(__VEC__) || defined(__ALTIVEC__) #include "dgemv_n_microk_power8.c" #endif +#endif #define NBMAX 4096 @@ -145,7 +147,7 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, FLOAT *a, BLASLO BLASLONG m3; BLASLONG n2; BLASLONG lda4 = lda << 2; - FLOAT xbuffer[8] __attribute__ ((aligned (16)));; + FLOAT xbuffer[8] __attribute__ ((aligned (16))); FLOAT *ybuffer; if ( m < 1 ) return(0); diff --git a/kernel/power/dgemv_n_microk_power10.c b/kernel/power/dgemv_n_microk_power10.c new file mode 100644 index 000000000..e47de2cb5 --- /dev/null +++ b/kernel/power/dgemv_n_microk_power10.c @@ -0,0 +1,402 @@ +/*************************************************************************** +Copyright (c) 2013-2016, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#define HAVE_KERNEL_4x4 1 + +static void dgemv_kernel_4x4 (long n, double *ap, long lda, double *x, double *y, double alpha) +{ + double *a0; + double *a1; + double *a2; + double *a3; + + __asm__ + ( + "lxvp 40, 0(%10) \n\t" // x0, x1 + XXSPLTD_S(32,%x9,0) // alpha, alpha + + "sldi %6, %13, 3 \n\t" // lda * sizeof (double) + + "xvmuldp 34, 41, 32 \n\t" // x0 * alpha, x1 * alpha + "xvmuldp 35, 40, 32 \n\t" // x2 * alpha, x3 * alpha + + "add %4, %3, %6 \n\t" // a0 = ap, a1 = a0 + lda + "add %6, %6, %6 \n\t" // 2 * lda + + XXSPLTD_S(32,34,1) // x0 * alpha, x0 * alpha + XXSPLTD_S(33,34,0) // x1 * alpha, x1 * alpha + XXSPLTD_S(34,35,1) // x2 * alpha, x2 * alpha + XXSPLTD_S(35,35,0) // x3 * alpha, x3 * alpha + + "add %5, %3, %6 \n\t" // a2 = a0 + 2 * lda + "add %6, %4, %6 \n\t" // a3 = a1 + 2 * lda + + "dcbt 0, %3 \n\t" + "dcbt 0, %4 \n\t" + "dcbt 0, %5 \n\t" + "dcbt 0, %6 \n\t" + + "lxvp 40, 0(%3) \n\t" // a0[0], a0[1] + + "lxvp 42, 0(%4) \n\t" // a1[0], a1[1] + + "lxvp 44, 0(%5) \n\t" // a2[0], a2[1] + + "lxvp 46, 0(%6) \n\t" // a3[0], a3[1] + + "dcbt 0, %2 \n\t" + + "addi %3, %3, 32 \n\t" + "addi %4, %4, 32 \n\t" + "addi %5, %5, 32 \n\t" + "addi %6, %6, 32 \n\t" + + "addic. %1, %1, -4 \n\t" + "ble two%= \n\t" + + ".align 5 \n" + "one%=: \n\t" + + "lxvp 36, 0(%2) \n\t" // y0, y1 + + "xvmaddadp 36, 40, 32 \n\t" + "xvmaddadp 37, 41, 32 \n\t" + + "lxvp 40, 0(%3) \n\t" // a0[0], a0[1] + + "xvmaddadp 36, 42, 33 \n\t" + "addi %3, %3, 32 \n\t" + "xvmaddadp 37, 43, 33 \n\t" + + "lxvp 42, 0(%4) \n\t" // a1[0], a1[1] + + "xvmaddadp 36, 44, 34 \n\t" + "addi %4, %4, 32 \n\t" + "xvmaddadp 37, 45, 34 \n\t" + + "lxvp 44, 0(%5) \n\t" // a2[0], a2[1] + + "xvmaddadp 36, 46, 35 \n\t" + "addi %5, %5, 32 \n\t" + "xvmaddadp 37, 47, 35 \n\t" + + "stxvp 36, 0(%2) \n\t" // y0, y1 + + "lxvp 46, 0(%6) \n\t" // a3[0], a3[1] + + "addi %6, %6, 32 \n\t" + "addi %2, %2, 32 \n\t" + + "addic. %1, %1, -4 \n\t" + "ble two%= \n\t" + + + "lxvp 36, 0(%2) \n\t" // y0, y1 + + "xvmaddadp 36, 40, 32 \n\t" + "xvmaddadp 37, 41, 32 \n\t" + + "lxvp 40, 0(%3) \n\t" // a0[0], a0[1] + + "xvmaddadp 36, 42, 33 \n\t" + "addi %3, %3, 32 \n\t" + "xvmaddadp 37, 43, 33 \n\t" + + "lxvp 42, 0(%4) \n\t" // a1[0], a1[1] + + "xvmaddadp 36, 44, 34 \n\t" + "addi %4, %4, 32 \n\t" + "xvmaddadp 37, 45, 34 \n\t" + + "lxvp 44, 0(%5) \n\t" // a2[0], a2[1] + + "xvmaddadp 36, 46, 35 \n\t" + "addi %5, %5, 32 \n\t" + "xvmaddadp 37, 47, 35 \n\t" + + "stxvp 36, 0(%2) \n\t" // y0, y1 + + "lxvp 46, 0(%6) \n\t" // a3[0], a3[1] + + "addi %6, %6, 32 \n\t" + "addi %2, %2, 32 \n\t" + + "addic. %1, %1, -4 \n\t" + "ble two%= \n\t" + + + "lxvp 36, 0(%2) \n\t" // y0, y1 + + "xvmaddadp 36, 40, 32 \n\t" + "xvmaddadp 37, 41, 32 \n\t" + + "lxvp 40, 0(%3) \n\t" // a0[0], a0[1] + + "xvmaddadp 36, 42, 33 \n\t" + "addi %3, %3, 32 \n\t" + "xvmaddadp 37, 43, 33 \n\t" + + "lxvp 42, 0(%4) \n\t" // a1[0], a1[1] + + "xvmaddadp 36, 44, 34 \n\t" + "addi %4, %4, 32 \n\t" + "xvmaddadp 37, 45, 34 \n\t" + + "lxvp 44, 0(%5) \n\t" // a2[0], a2[1] + + "xvmaddadp 36, 46, 35 \n\t" + "addi %5, %5, 32 \n\t" + "xvmaddadp 37, 47, 35 \n\t" + + "stxvp 36, 0(%2) \n\t" // y0, y1 + + "lxvp 46, 0(%6) \n\t" // a3[0], a3[1] + + "addi %6, %6, 32 \n\t" + "addi %2, %2, 32 \n\t" + + "addic. %1, %1, -4 \n\t" + "ble two%= \n\t" + + + "lxvp 36, 0(%2) \n\t" // y0, y1 + + "xvmaddadp 36, 40, 32 \n\t" + "xvmaddadp 37, 41, 32 \n\t" + + "lxvp 40, 0(%3) \n\t" // a0[0], a0[1] + + "xvmaddadp 36, 42, 33 \n\t" + "addi %3, %3, 32 \n\t" + "xvmaddadp 37, 43, 33 \n\t" + + "lxvp 42, 0(%4) \n\t" // a1[0], a1[1] + + "xvmaddadp 36, 44, 34 \n\t" + "addi %4, %4, 32 \n\t" + "xvmaddadp 37, 45, 34 \n\t" + + "lxvp 44, 0(%5) \n\t" // a2[0], a2[1] + + "xvmaddadp 36, 46, 35 \n\t" + "addi %5, %5, 32 \n\t" + "xvmaddadp 37, 47, 35 \n\t" + + "stxvp 36, 0(%2) \n\t" // y0, y1 + + "lxvp 46, 0(%6) \n\t" // a3[0], a3[1] + + "addi %6, %6, 32 \n\t" + "addi %2, %2, 32 \n\t" + + "addic. %1, %1, -4 \n\t" + "bgt one%= \n" + + "two%=: \n\t" + + "lxvp 36, 0(%2) \n\t" // y0, y1 + + "xvmaddadp 36, 40, 32 \n\t" + "xvmaddadp 37, 41, 32 \n\t" + + "xvmaddadp 36, 42, 33 \n\t" + "xvmaddadp 37, 43, 33 \n\t" + + "xvmaddadp 36, 44, 34 \n\t" + "xvmaddadp 37, 45, 34 \n\t" + + "xvmaddadp 36, 46, 35 \n\t" + "xvmaddadp 37, 47, 35 \n\t" + + "stxvp 36, 0(%2) \n\t" // y0, y1 + + "#n=%1 ap=%8=%12 lda=%13 x=%7=%10 y=%0=%2 alpha=%9 o16=%11\n" + "#a0=%3 a1=%4 a2=%5 a3=%6" + : + "+m" (*y), + "+r" (n), // 1 + "+b" (y), // 2 + "=b" (a0), // 3 + "=b" (a1), // 4 + "=&b" (a2), // 5 + "=&b" (a3) // 6 + : + "m" (*x), + "m" (*ap), + "d" (alpha), // 9 + "r" (x), // 10 + "b" (16), // 11 + "3" (ap), // 12 + "4" (lda) // 13 + : + "cr0", + "vs32","vs33","vs34","vs35","vs36","vs37", + "vs40","vs41","vs42","vs43","vs44","vs45","vs46","vs47" + ); +} +static void dgemv_kernel_4x8 (long n, double *ap, long lda, double *x, double *y, double alpha) +{ + + double *a0; + double *a1; + double *a2; + double *a3; + double *a4; + double *a5; + double *a6; + double *a7; + long tmp; + __asm__ + ( + "lxvp 34, 0( %15) \n\t" // x0, x1 + "lxvp 38, 32( %15) \n\t" // x4, x5 + + XXSPLTD_S(58,%x14,0) // alpha, alpha + "sldi %10, %17, 3 \n\t" // lda * sizeof (double) + "xvmuldp 34, 34, 58 \n\t" // x0 * alpha, x1 * alpha + "xvmuldp 35, 35, 58 \n\t" // x2 * alpha, x3 * alpha + "xvmuldp 38, 38, 58 \n\t" // x4 * alpha, x5 * alpha + "xvmuldp 39, 39, 58 \n\t" // x6 * alpha, x7 * alpha + + "li %11, 32 \n\t" + + "add %4, %3, %10 \n\t" // a0 = ap, a1 = a0 + lda + "add %10, %10, %10 \n\t" // 2 * lda + XXSPLTD_S(32,34,1) // x0 * alpha, x0 * alpha + XXSPLTD_S(33,34,0) // x1 * alpha, x1 * alpha + XXSPLTD_S(34,35,1) // x2 * alpha, x2 * alpha + XXSPLTD_S(35,35,0) // x3 * alpha, x3 * alpha + XXSPLTD_S(48,39,1) // x6 * alpha, x6 * alpha + XXSPLTD_S(49,39,0) // x7 * alpha, x7 * alpha + XXSPLTD_S(39,38,0) // x5 * alpha, x5 * alpha + XXSPLTD_S(38,38,1) // x4 * alpha, x4 * alpha + + "add %5, %3, %10 \n\t" // a2 = a0 + 2 * lda + "add %6, %4, %10 \n\t" // a3 = a1 + 2 * lda + "add %7, %5, %10 \n\t" // a4 = a2 + 2 * lda + "add %8, %6, %10 \n\t" // a5 = a3 + 2 * lda + "add %9, %7, %10 \n\t" // a6 = a4 + 2 * lda + "add %10, %8, %10 \n\t" // a7 = a5 + 2 * lda + + "lxvp 40, 0( %3) \n\t" // a0[0], a0[1] + "lxvp 42, 0( %4) \n\t" // a1[0], a1[1] + "lxvp 44, 0( %5) \n\t" // a2[0], a2[1] + "lxvp 46, 0( %6) \n\t" // a3[0], a3[1] + "lxvp 50, 0( %7) \n\t" // a4[0] + "lxvp 52, 0( %8) \n\t" // a5[0] + "lxvp 54, 0( %9) \n\t" // a6[0] + "lxvp 56, 0( %10) \n\t" // a7[0] + + + "addic. %1, %1, -4 \n\t" + "ble two%= \n\t" + + ".align 5 \n" + "one%=: \n\t" + + "lxvp 36, 0( %2) \n\t" // y0, y1 + + "xvmaddadp 36, 40, 34 \n\t" + "xvmaddadp 37, 41, 34 \n\t" + "lxvpx 40, %3, %11 \n\t" // a0[0], a0[1] + "xvmaddadp 36, 42, 35 \n\t" + "xvmaddadp 37, 43, 35 \n\t" + "lxvpx 42, %4, %11 \n\t" // a1[0], a1[1] + "xvmaddadp 36, 44, 32 \n\t" + "xvmaddadp 37, 45, 32 \n\t" + "lxvpx 44, %5, %11 \n\t" // a2[0], a2[1] + "xvmaddadp 36, 46, 33 \n\t" + "xvmaddadp 37, 47, 33 \n\t" + "lxvpx 46, %6, %11 \n\t" // a3[0], a3[1] + "xvmaddadp 36, 50, 48 \n\t" + "xvmaddadp 37, 51, 48 \n\t" + "lxvpx 50, %7, %11 \n\t" // a4[0] + "xvmaddadp 36, 52, 49 \n\t" + "xvmaddadp 37, 53, 49 \n\t" + "lxvpx 52, %8, %11 \n\t" // a5[0] + "xvmaddadp 36, 54, 38 \n\t" + "xvmaddadp 37, 55, 38 \n\t" + "lxvpx 54, %9, %11 \n\t" // a6[0] + "xvmaddadp 36, 56, 39 \n\t" + "xvmaddadp 37, 57, 39 \n\t" + "lxvpx 56, %10, %11 \n\t" // a7[0] + "addi %11, %11, 32 \n\t" + + "stxvp 36, 0( %2) \n\t" // y0, y1 + "addi %2, %2, 32 \n\t" + + "addic. %1, %1, -4 \n\t" + "bgt one%= \n" + + "two%=: \n\t" + + "lxvp 36, 0( %2) \n\t" // y0, y1 + "xvmaddadp 36, 40, 34 \n\t" + "xvmaddadp 37, 41, 34 \n\t" + "xvmaddadp 36, 42, 35 \n\t" + "xvmaddadp 37, 43, 35 \n\t" + "xvmaddadp 36, 44, 32 \n\t" + "xvmaddadp 37, 45, 32 \n\t" + "xvmaddadp 36, 46, 33 \n\t" + "xvmaddadp 37, 47, 33 \n\t" + "xvmaddadp 36, 50, 48 \n\t" + "xvmaddadp 37, 51, 48 \n\t" + "xvmaddadp 36, 52, 49 \n\t" + "xvmaddadp 37, 53, 49 \n\t" + "xvmaddadp 36, 54, 38 \n\t" + "xvmaddadp 37, 55, 38 \n\t" + "xvmaddadp 36, 56, 39 \n\t" + "xvmaddadp 37, 57, 39 \n\t" + "stxvp 36, 0( %2) \n\t" // y0, y1 + + : + "+m" (*y), + "+r" (n), // 1 + "+b" (y), // 2 + "=b" (a0), // 3 + "=b" (a1), // 4 + "=&b" (a2), // 5 + "=&b" (a3), // 6 + "=&b" (a4), // 7 + "=&b" (a5), // 8 + "=&b" (a6), // 9 + "=&b" (a7), // 10 + "=b" (tmp) + : + "m" (*x), + "m" (*ap), + "d" (alpha), // 14 + "r" (x), // 15 + "3" (ap), // 16 + "4" (lda) // 17 + : + "cr0", + "vs32","vs33","vs34","vs35","vs36","vs37", + "vs40","vs41","vs42","vs43","vs44","vs45","vs46","vs47", "vs48", + "vs49","vs50","vs51","vs52","vs53","vs54","vs55","vs56", "vs57", "vs58" + ); +} diff --git a/kernel/power/dgemv_n_microk_power8.c b/kernel/power/dgemv_n_microk_power8.c index ae4fe9009..c2eb3968c 100644 --- a/kernel/power/dgemv_n_microk_power8.c +++ b/kernel/power/dgemv_n_microk_power8.c @@ -46,7 +46,7 @@ static void dgemv_kernel_4x4 (long n, double *ap, long lda, double *x, double *y ( "lxvd2x 34, 0, %10 \n\t" // x0, x1 "lxvd2x 35, %11, %10 \n\t" // x2, x3 - "xxspltd 32, %x9, 0 \n\t" // alpha, alpha + XXSPLTD_S(32,%x9,0) // alpha, alpha "sldi %6, %13, 3 \n\t" // lda * sizeof (double) @@ -56,10 +56,10 @@ static void dgemv_kernel_4x4 (long n, double *ap, long lda, double *x, double *y "add %4, %3, %6 \n\t" // a0 = ap, a1 = a0 + lda "add %6, %6, %6 \n\t" // 2 * lda - "xxspltd 32, 34, 0 \n\t" // x0 * alpha, x0 * alpha - "xxspltd 33, 34, 1 \n\t" // x1 * alpha, x1 * alpha - "xxspltd 34, 35, 0 \n\t" // x2 * alpha, x2 * alpha - "xxspltd 35, 35, 1 \n\t" // x3 * alpha, x3 * alpha + XXSPLTD_S(32,34,0) // x0 * alpha, x0 * alpha + XXSPLTD_S(33,34,1) // x1 * alpha, x1 * alpha + XXSPLTD_S(34,35,0) // x2 * alpha, x2 * alpha + XXSPLTD_S(35,35,1) // x3 * alpha, x3 * alpha "add %5, %3, %6 \n\t" // a2 = a0 + 2 * lda "add %6, %4, %6 \n\t" // a3 = a1 + 2 * lda @@ -89,10 +89,10 @@ static void dgemv_kernel_4x4 (long n, double *ap, long lda, double *x, double *y "addi %6, %6, 32 \n\t" "addic. %1, %1, -4 \n\t" - "ble 2f \n\t" + "ble two%= \n\t" - ".p2align 5 \n" - "1: \n\t" + ".align 5 \n" + "one%=: \n\t" "lxvd2x 36, 0, %2 \n\t" // y0, y1 "lxvd2x 37, %11, %2 \n\t" // y2, y3 @@ -131,7 +131,7 @@ static void dgemv_kernel_4x4 (long n, double *ap, long lda, double *x, double *y "addi %2, %2, 32 \n\t" "addic. %1, %1, -4 \n\t" - "ble 2f \n\t" + "ble two%= \n\t" "lxvd2x 36, 0, %2 \n\t" // y0, y1 @@ -171,7 +171,7 @@ static void dgemv_kernel_4x4 (long n, double *ap, long lda, double *x, double *y "addi %2, %2, 32 \n\t" "addic. %1, %1, -4 \n\t" - "ble 2f \n\t" + "ble two%= \n\t" "lxvd2x 36, 0, %2 \n\t" // y0, y1 @@ -211,7 +211,7 @@ static void dgemv_kernel_4x4 (long n, double *ap, long lda, double *x, double *y "addi %2, %2, 32 \n\t" "addic. %1, %1, -4 \n\t" - "ble 2f \n\t" + "ble two%= \n\t" "lxvd2x 36, 0, %2 \n\t" // y0, y1 @@ -251,9 +251,9 @@ static void dgemv_kernel_4x4 (long n, double *ap, long lda, double *x, double *y "addi %2, %2, 32 \n\t" "addic. %1, %1, -4 \n\t" - "bgt 1b \n" + "bgt one%= \n" - "2: \n\t" + "two%=: \n\t" "lxvd2x 36, 0, %2 \n\t" // y0, y1 "lxvd2x 37, %11, %2 \n\t" // y2, y3 diff --git a/kernel/power/dgemv_n_power10.c b/kernel/power/dgemv_n_power10.c new file mode 100644 index 000000000..aba15ab4e --- /dev/null +++ b/kernel/power/dgemv_n_power10.c @@ -0,0 +1,406 @@ +/*************************************************************************** +Copyright (c) 2020, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "common.h" + +#include "dgemv_n_microk_power10.c" + +#define NBMAX 4096 + +#ifndef HAVE_KERNEL_4x4 + +static void dgemv_kernel_4x4(BLASLONG n, FLOAT *a_ptr, BLASLONG lda, FLOAT *xo, FLOAT *y, FLOAT alpha) +{ + BLASLONG i; + FLOAT x[4] __attribute__ ((aligned (16)));; + FLOAT *a0 = a_ptr; + FLOAT *a1 = a0 + lda; + FLOAT *a2 = a1 + lda; + FLOAT *a3 = a2 + lda; + + + for ( i=0; i<4; i++) + x[i] = xo[i] * alpha; + + for ( i=0; i< n; i+=4 ) + { + y[i] += a0[i]*x[0] + a1[i]*x[1] + a2[i]*x[2] + a3[i]*x[3]; + y[i+1] += a0[i+1]*x[0] + a1[i+1]*x[1] + a2[i+1]*x[2] + a3[i+1]*x[3]; + y[i+2] += a0[i+2]*x[0] + a1[i+2]*x[1] + a2[i+2]*x[2] + a3[i+2]*x[3]; + y[i+3] += a0[i+3]*x[0] + a1[i+3]*x[1] + a2[i+3]*x[2] + a3[i+3]*x[3]; + } +} + +#endif + +#ifndef HAVE_KERNEL_4x2 + +static void dgemv_kernel_4x2(BLASLONG n, FLOAT *a0, FLOAT *a1, FLOAT *xo, FLOAT *y, FLOAT alpha) +{ + BLASLONG i; + FLOAT x[4] __attribute__ ((aligned (16)));; + + for ( i=0; i<2; i++) + x[i] = xo[i] * alpha; + + for ( i=0; i< n; i+=4 ) + { + y[i] += a0[i]*x[0] + a1[i]*x[1]; + y[i+1] += a0[i+1]*x[0] + a1[i+1]*x[1]; + y[i+2] += a0[i+2]*x[0] + a1[i+2]*x[1]; + y[i+3] += a0[i+3]*x[0] + a1[i+3]*x[1]; + } +} + + +#endif + +#ifndef HAVE_KERNEL_4x1 + +static void dgemv_kernel_4x1(BLASLONG n, FLOAT *a0, FLOAT *xo, FLOAT *y, FLOAT alpha) +{ + BLASLONG i; + FLOAT x[4] __attribute__ ((aligned (16)));; + + for ( i=0; i<1; i++) + x[i] = xo[i] * alpha; + + for ( i=0; i< n; i+=4 ) + { + y[i] += a0[i]*x[0]; + y[i+1] += a0[i+1]*x[0]; + y[i+2] += a0[i+2]*x[0]; + y[i+3] += a0[i+3]*x[0]; + } +} + + +#endif + + +static void add_y(BLASLONG n, FLOAT *src, FLOAT *dest, BLASLONG inc_dest) +{ + BLASLONG i; + if ( inc_dest != 1 ) + { + for ( i=0; i> 3; + n2 = n & 3; + + m3 = m & 3 ; + m1 = m & -4 ; + m2 = (m & (NBMAX-1)) - m3 ; + + y_ptr = y; + + BLASLONG NB = NBMAX; + + while ( NB == NBMAX ) + { + + m1 -= NB; + if ( m1 < 0) + { + if ( m2 == 0 ) break; + NB = m2; + } + + a_ptr = a; + x_ptr = x; + + if ( inc_y != 1 ) + memset(ybuffer,0,NB*8); + else + ybuffer = y_ptr; + + if ( inc_x == 1 ) + { + + for( i = 0; i < n8 ; i++) + { + dgemv_kernel_4x8(NB,a_ptr,lda,x_ptr,ybuffer,alpha); + a_ptr += lda8; + x_ptr += 8; + } + + if( n & 4 ) + { + dgemv_kernel_4x4(NB,a_ptr,lda,x_ptr,ybuffer,alpha); + a_ptr += lda4; + x_ptr += 4; + } + + if ( n2 & 2 ) + { + dgemv_kernel_4x2(NB,a_ptr,a_ptr+lda,x_ptr,ybuffer,alpha); + a_ptr += lda*2; + x_ptr += 2; + } + + + if ( n2 & 1 ) + { + dgemv_kernel_4x1(NB,a_ptr,x_ptr,ybuffer,alpha); + a_ptr += lda; + x_ptr += 1; + + } + + + } + else + { + for( i = 0; i < n8 ; i++) + { + BLASLONG j; + for ( j = 0; j < 8 ; j++) + { + xbuffer[j] = x_ptr[0]; + x_ptr += inc_x; + } + dgemv_kernel_4x8(NB,a_ptr,lda,xbuffer,ybuffer,alpha); + a_ptr += lda8; + } + + if( n & 4 ) + { + xbuffer[0] = x_ptr[0]; + x_ptr += inc_x; + xbuffer[1] = x_ptr[0]; + x_ptr += inc_x; + xbuffer[2] = x_ptr[0]; + x_ptr += inc_x; + xbuffer[3] = x_ptr[0]; + x_ptr += inc_x; + dgemv_kernel_4x4(NB,a_ptr,lda,xbuffer,ybuffer,alpha); + a_ptr += lda4; + } + + for( i = 0; i < n2 ; i++) + { + xbuffer[0] = x_ptr[0]; + x_ptr += inc_x; + dgemv_kernel_4x1(NB,a_ptr,xbuffer,ybuffer,alpha); + a_ptr += lda; + + } + + } + + a += NB; + if ( inc_y != 1 ) + { + add_y(NB,ybuffer,y_ptr,inc_y); + y_ptr += NB * inc_y; + } + else + y_ptr += NB ; + + } + + if ( m3 == 0 ) return(0); + + if ( m3 == 3 ) + { + a_ptr = a; + x_ptr = x; + FLOAT temp0 = 0.0; + FLOAT temp1 = 0.0; + FLOAT temp2 = 0.0; + if ( lda == 3 && inc_x ==1 ) + { + + for( i = 0; i < ( n & -4 ); i+=4 ) + { + + temp0 += a_ptr[0] * x_ptr[0] + a_ptr[3] * x_ptr[1]; + temp1 += a_ptr[1] * x_ptr[0] + a_ptr[4] * x_ptr[1]; + temp2 += a_ptr[2] * x_ptr[0] + a_ptr[5] * x_ptr[1]; + + temp0 += a_ptr[6] * x_ptr[2] + a_ptr[9] * x_ptr[3]; + temp1 += a_ptr[7] * x_ptr[2] + a_ptr[10] * x_ptr[3]; + temp2 += a_ptr[8] * x_ptr[2] + a_ptr[11] * x_ptr[3]; + + a_ptr += 12; + x_ptr += 4; + } + + for( ; i < n; i++ ) + { + temp0 += a_ptr[0] * x_ptr[0]; + temp1 += a_ptr[1] * x_ptr[0]; + temp2 += a_ptr[2] * x_ptr[0]; + a_ptr += 3; + x_ptr ++; + } + + } + else + { + + for( i = 0; i < n; i++ ) + { + temp0 += a_ptr[0] * x_ptr[0]; + temp1 += a_ptr[1] * x_ptr[0]; + temp2 += a_ptr[2] * x_ptr[0]; + a_ptr += lda; + x_ptr += inc_x; + + + } + + } + y_ptr[0] += alpha * temp0; + y_ptr += inc_y; + y_ptr[0] += alpha * temp1; + y_ptr += inc_y; + y_ptr[0] += alpha * temp2; + return(0); + } + + + if ( m3 == 2 ) + { + a_ptr = a; + x_ptr = x; + FLOAT temp0 = 0.0; + FLOAT temp1 = 0.0; + if ( lda == 2 && inc_x ==1 ) + { + + for( i = 0; i < (n & -4) ; i+=4 ) + { + temp0 += a_ptr[0] * x_ptr[0] + a_ptr[2] * x_ptr[1]; + temp1 += a_ptr[1] * x_ptr[0] + a_ptr[3] * x_ptr[1]; + temp0 += a_ptr[4] * x_ptr[2] + a_ptr[6] * x_ptr[3]; + temp1 += a_ptr[5] * x_ptr[2] + a_ptr[7] * x_ptr[3]; + a_ptr += 8; + x_ptr += 4; + + } + + + for( ; i < n; i++ ) + { + temp0 += a_ptr[0] * x_ptr[0]; + temp1 += a_ptr[1] * x_ptr[0]; + a_ptr += 2; + x_ptr ++; + } + + } + else + { + + for( i = 0; i < n; i++ ) + { + temp0 += a_ptr[0] * x_ptr[0]; + temp1 += a_ptr[1] * x_ptr[0]; + a_ptr += lda; + x_ptr += inc_x; + + + } + + } + y_ptr[0] += alpha * temp0; + y_ptr += inc_y; + y_ptr[0] += alpha * temp1; + return(0); + } + + if ( m3 == 1 ) + { + a_ptr = a; + x_ptr = x; + FLOAT temp = 0.0; + if ( lda == 1 && inc_x ==1 ) + { + + for( i = 0; i < (n & -4); i+=4 ) + { + temp += a_ptr[i] * x_ptr[i] + a_ptr[i+1] * x_ptr[i+1] + a_ptr[i+2] * x_ptr[i+2] + a_ptr[i+3] * x_ptr[i+3]; + + } + + for( ; i < n; i++ ) + { + temp += a_ptr[i] * x_ptr[i]; + } + + } + else + { + + for( i = 0; i < n; i++ ) + { + temp += a_ptr[0] * x_ptr[0]; + a_ptr += lda; + x_ptr += inc_x; + } + + } + y_ptr[0] += alpha * temp; + return(0); + } + + + return(0); +} + + diff --git a/kernel/power/dgemv_t.c b/kernel/power/dgemv_t.c index b8589a131..c07b3c223 100644 --- a/kernel/power/dgemv_t.c +++ b/kernel/power/dgemv_t.c @@ -25,15 +25,19 @@ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ +#if !defined(__VEC__) || !defined(__ALTIVEC__) +#include "../arm/gemv_t.c" +#else + #include "common.h" #define NBMAX 1024 //#define PREFETCH 1 + #include #define HAVE_KERNEL4x8_ASM 1 - #if defined(HAVE_KERNEL4x8_ASM) static void dgemv_kernel_4x8(BLASLONG n, BLASLONG lda, double *ap, double *x, double *y, double alpha) { @@ -93,11 +97,11 @@ static void dgemv_kernel_4x8(BLASLONG n, BLASLONG lda, double *ap, double *x, do "li %[off],32 \n\t" - "ble- 2f \n\t" + "ble- two%= \n\t" //-------------------------------------------------- - ".p2align 5 \n\t" - "1: \n\t" + ".align 5 \n\t" + "one%=: \n\t" "xvmaddadp 34,36,32 \n\t" "xvmaddadp 35,38,32 \n\t" "addi %[off2], %[off2],32 \n\t" @@ -137,7 +141,7 @@ static void dgemv_kernel_4x8(BLASLONG n, BLASLONG lda, double *ap, double *x, do "lxvd2x 49, %[a6], %[off2] \n\t" "lxvd2x 51, %[a7], %[off2] \n\t" "lxvd2x 33, %[x], %[off2] \n\t" - "ble- 2f \n\t" + "ble- two%= \n\t" "xvmaddadp 34,36,32 \n\t" "xvmaddadp 35,38,32 \n\t" "addi %[off2], %[off2],32 \n\t" @@ -177,7 +181,7 @@ static void dgemv_kernel_4x8(BLASLONG n, BLASLONG lda, double *ap, double *x, do "lxvd2x 49, %[a6], %[off2] \n\t" "lxvd2x 51, %[a7], %[off2] \n\t" "lxvd2x 33, %[x], %[off2] \n\t" - "ble- 2f \n\t" + "ble- two%= \n\t" "xvmaddadp 34,36,32 \n\t" "xvmaddadp 35,38,32 \n\t" #if defined(PREFETCH) @@ -229,7 +233,7 @@ static void dgemv_kernel_4x8(BLASLONG n, BLASLONG lda, double *ap, double *x, do "lxvd2x 33, %[x], %[off2] \n\t" "addic. %[n],%[n],-4 \n\t" - "ble- 2f \n\t" + "ble- two%= \n\t" "addi %[off2], %[off2],32 \n\t" #if defined(PREFETCH) @@ -288,9 +292,9 @@ static void dgemv_kernel_4x8(BLASLONG n, BLASLONG lda, double *ap, double *x, do #if defined(PREFETCH) "dcbt %[temp],%[x] \n\t" #endif - "bgt+ 1b \n\t" - ".p2align 5 \n\t" - "2: \n\t" + "bgt+ one%= \n\t" + ".align 5 \n\t" + "two%=: \n\t" //-------------------------------------------- "xvmaddadp 34,36,32 \n\t" @@ -301,7 +305,7 @@ static void dgemv_kernel_4x8(BLASLONG n, BLASLONG lda, double *ap, double *x, do "xvmaddadp 7,46,32 \n\t" "xvmaddadp 8,48,32 \n\t" "xvmaddadp 9,50,32 \n\t" - "xxspltd 36, %x[alpha], 0 \n\t" + XXSPLTD_S(36,%x[alpha],0) "xvmaddadp 34,37,33 \n\t" "xvmaddadp 35,39,33 \n\t" "xvmaddadp 4,41,33 \n\t" @@ -322,21 +326,21 @@ static void dgemv_kernel_4x8(BLASLONG n, BLASLONG lda, double *ap, double *x, do - "xxmrgld 42,34,35 \n\t" - "xxmrghd 43,34,35 \n\t" + XXMRGLD_S(42,34,35) + XXMRGHD_S(43,34,35) - "xxmrgld 44,4,5 \n\t" - "xxmrghd 45,4,5 \n\t" + XXMRGLD_S(44,4,5) + XXMRGHD_S(45,4,5) "xvadddp 42,42,43 \n\t" - "xxmrgld 46,6,7 \n\t" - "xxmrghd 47,6,7 \n\t" + XXMRGLD_S(46,6,7) + XXMRGHD_S(47,6,7) "xvadddp 44,44,45 \n\t" - "xxmrgld 48,8,9 \n\t" - "xxmrghd 49,8,9 \n\t" + XXMRGLD_S(48,8,9) + XXMRGHD_S(49,8,9) "xvadddp 46,46,47 \n\t" @@ -355,7 +359,7 @@ static void dgemv_kernel_4x8(BLASLONG n, BLASLONG lda, double *ap, double *x, do "stxvd2x 39, %[off], %[y] \n\t" "stxvd2x 40, %[off2], %[y] \n\t" - : [memy] "+m" (*(const double (*)[8])y), + : [memy] "+m" (*(double (*)[8])y), [n] "+&r" (n), [a0] "=b" (a0), [a1] "=&b" (a1), @@ -369,7 +373,7 @@ static void dgemv_kernel_4x8(BLASLONG n, BLASLONG lda, double *ap, double *x, do [off2]"=&b" (off2), [temp] "=&b" (tempR) : [memx] "m" (*(const double (*)[n])x), - [mem_ap] "m" (*(const double (*)[]) ap), + [mem_ap] "m" (*(const double (*)[n*8]) ap), [alpha] "d" (alpha), "[a0]" (ap), [x] "b" (x), @@ -581,9 +585,9 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, FLOAT *a, BLASLO BLASLONG m1; BLASLONG m2; BLASLONG m3; - BLASLONG n2; - - FLOAT ybuffer[8], *xbuffer; + BLASLONG n2; + FLOAT ybuffer[8] __attribute__((aligned(16))); + FLOAT *xbuffer; if (m < 1) return (0); if (n < 1) return (0); @@ -883,4 +887,5 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, FLOAT *a, BLASLO return (0); } +#endif diff --git a/kernel/power/dgemv_t_power10.c b/kernel/power/dgemv_t_power10.c new file mode 100644 index 000000000..3db4d5785 --- /dev/null +++ b/kernel/power/dgemv_t_power10.c @@ -0,0 +1,840 @@ +/*************************************************************************** +Copyright (c) 2018, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ + +#include "common.h" + +#define NBMAX 1024 +//#define PREFETCH 1 +#include + +#define HAVE_KERNEL4x8_ASM 1 + + +#if defined(HAVE_KERNEL4x8_ASM) +static void dgemv_kernel_4x8(BLASLONG n, BLASLONG lda, double *ap, double *x, double *y, double alpha) { + + FLOAT *a0, *a1, *a2, *a3, *a4, *a5, *a6, *a7; + BLASLONG off2; + BLASLONG tempR; + __asm__( + + "sldi %[temp],%[off], 4 \n\t" // lda * sizeof (double) *2 + "sldi %[off], %[off], 3 \n\t" // lda * sizeof (double) + "xxlxor 34,34,34 \n\t" + "xxlxor 35,34,34 \n\t" + "add %[a2], %[a0], %[temp] \n\t" + "add %[a1], %[a0], %[off] \n\t" + "xxlxor 4,34,34 \n\t" + "xxlxor 5,34,34 \n\t" + "xxlxor 6,34,34 \n\t" + "xxlxor 7,34,34 \n\t" + "add %[a3], %[a2], %[off] \n\t" + "add %[a4], %[a2], %[temp] \n\t" + + "xxlxor 8,34,34 \n\t" + "xxlxor 9,34,34 \n\t" + "add %[a5], %[a3], %[temp] \n\t" + "li %[off],0 \n\t" + "li %[off2],16 \n\t" + + "add %[a6], %[a4], %[temp] \n\t" + "add %[a7], %[a5], %[temp] \n\t" + + + + + "lxvp 32, 0(%[x]) \n\t" + "lxvp 36, 0(%[a0]) \n\t" + "lxvp 38, 0(%[a1]) \n\t" + "lxvp 40, 0(%[a2]) \n\t" + "lxvp 42, 0(%[a3]) \n\t" + "lxvp 44, 0(%[a4]) \n\t" + "lxvp 46, 0(%[a5]) \n\t" + "lxvp 48, 0(%[a6]) \n\t" + "lxvp 50, 0(%[a7]) \n\t" +#if defined(PREFETCH) + "li %[temp],896 \n\t" +#endif + "addic. %[n],%[n],-4 \n\t" + + "li %[off],32 \n\t" + + + "ble- two%= \n\t" + + //-------------------------------------------------- + ".align 5 \n\t" + "one%=: \n\t" + "xvmaddadp 34,36,32 \n\t" + "xvmaddadp 35,38,32 \n\t" + "addi %[off2], %[off2],32 \n\t" + "xvmaddadp 4,40,32 \n\t" + "xvmaddadp 5,42,32 \n\t" + "xvmaddadp 6,44,32 \n\t" + "xvmaddadp 7,46,32 \n\t" + "xvmaddadp 8,48,32 \n\t" + "xvmaddadp 9,50,32 \n\t" + + "xvmaddadp 34,37,33 \n\t" + "xvmaddadp 35,39,33 \n\t" + "lxvp 36, 32(%[a0]) \n\t" + "lxvp 38, 32(%[a1]) \n\t" + "xvmaddadp 4,41,33 \n\t" + "xvmaddadp 5,43,33 \n\t" + "addi %[off], %[off],32 \n\t" + "lxvp 40, 32(%[a2]) \n\t" + "lxvp 42, 32(%[a3]) \n\t" + "xvmaddadp 6,45,33 \n\t" + "xvmaddadp 7,47,33 \n\t" + "lxvp 44, 32(%[a4]) \n\t" + "lxvp 46, 32(%[a5]) \n\t" + "xvmaddadp 8,49,33 \n\t" + "xvmaddadp 9,51,33 \n\t" + + "addic. %[n],%[n],-4 \n\t" + "lxvp 48, 32(%[a6]) \n\t" + "lxvp 50, 32(%[a7]) \n\t" + "lxvp 32, 32(%[x]) \n\t" + "ble- two%= \n\t" + "xvmaddadp 34,36,32 \n\t" + "xvmaddadp 35,38,32 \n\t" + "addi %[off2], %[off2],32 \n\t" + "xvmaddadp 4,40,32 \n\t" + "xvmaddadp 5,42,32 \n\t" + "xvmaddadp 6,44,32 \n\t" + "xvmaddadp 7,46,32 \n\t" + "xvmaddadp 8,48,32 \n\t" + "xvmaddadp 9,50,32 \n\t" + + "xvmaddadp 34,37,33 \n\t" + "xvmaddadp 35,39,33 \n\t" + "lxvp 36, 64(%[a0]) \n\t" + "lxvp 38, 64(%[a1]) \n\t" + "xvmaddadp 4,41,33 \n\t" + "xvmaddadp 5,43,33 \n\t" + "addi %[off], %[off],32 \n\t" + "lxvp 40, 64(%[a2]) \n\t" + "lxvp 42, 64(%[a3]) \n\t" + "xvmaddadp 6,45,33 \n\t" + "xvmaddadp 7,47,33 \n\t" + "lxvp 44, 64(%[a4]) \n\t" + "lxvp 46, 64(%[a5]) \n\t" + "xvmaddadp 8,49,33 \n\t" + "xvmaddadp 9,51,33 \n\t" + + "addic. %[n],%[n],-4 \n\t" + "lxvp 48, 64(%[a6]) \n\t" + "lxvp 50, 64(%[a7]) \n\t" + "lxvp 32, 64(%[x]) \n\t" + "ble- two%= \n\t" + "xvmaddadp 34,36,32 \n\t" + "xvmaddadp 35,38,32 \n\t" +#if defined(PREFETCH) + "addi %[temp],%[temp],128 \n\t" +#endif + "addi %[off2], %[off2],32 \n\t" + "xvmaddadp 4,40,32 \n\t" + "xvmaddadp 5,42,32 \n\t" + "xvmaddadp 6,44,32 \n\t" + "xvmaddadp 7,46,32 \n\t" + "xvmaddadp 8,48,32 \n\t" + "xvmaddadp 9,50,32 \n\t" +#if defined(PREFETCH) + "dcbt %[temp],%[a0] \n\t" +#endif + + "xvmaddadp 34,37,33 \n\t" + "xvmaddadp 35,39,33 \n\t" + "lxvp 36, 96(%[a0]) \n\t" + "lxvp 38, 96(%[a1]) \n\t" + "xvmaddadp 4,41,33 \n\t" + "xvmaddadp 5,43,33 \n\t" +#if defined(PREFETCH) + "dcbt %[temp],%[a1] \n\t" +#endif + "lxvp 40, 96(%[a2]) \n\t" + "lxvp 42, 96(%[a3]) \n\t" + "addi %[off], %[off],32 \n\t" + "xvmaddadp 6,45,33 \n\t" + "xvmaddadp 7,47,33 \n\t" + "lxvp 44, 96(%[a4]) \n\t" + "lxvp 46, 96(%[a5]) \n\t" + "xvmaddadp 8,49,33 \n\t" + "xvmaddadp 9,51,33 \n\t" +#if defined(PREFETCH) + "dcbt %[temp],%[a3] \n\t" +#endif + "lxvp 48, 96(%[a6]) \n\t" + "lxvp 50, 96(%[a7]) \n\t" + "lxvp 32, 96(%[x]) \n\t" + + "addic. %[n],%[n],-4 \n\t" + "ble- two%= \n\t" + + "addi %[off2], %[off2],32 \n\t" +#if defined(PREFETCH) + "dcbt %[temp],%[a2] \n\t" +#endif + "xvmaddadp 34,36,32 \n\t" + "xvmaddadp 35,38,32 \n\t" + "xvmaddadp 4,40,32 \n\t" + "xvmaddadp 5,42,32 \n\t" +#if defined(PREFETCH) + "dcbt %[temp],%[a4] \n\t" +#endif + "xvmaddadp 6,44,32 \n\t" + "xvmaddadp 7,46,32 \n\t" + "xvmaddadp 8,48,32 \n\t" + "xvmaddadp 9,50,32 \n\t" + +#if defined(PREFETCH) + "dcbt %[temp],%[a5] \n\t" +#endif + "xvmaddadp 34,37,33 \n\t" + "xvmaddadp 35,39,33 \n\t" + "lxvp 36, 128(%[a0]) \n\t" + "lxvp 38, 128(%[a1]) \n\t" + "xvmaddadp 4,41,33 \n\t" + "xvmaddadp 5,43,33 \n\t" + "addi %[off], %[off],32 \n\t" + "lxvp 40, 128(%[a2]) \n\t" + "lxvp 42, 128(%[a3]) \n\t" +#if defined(PREFETCH) + "dcbt %[temp],%[a6] \n\t" +#endif + "xvmaddadp 6,45,33 \n\t" + "xvmaddadp 7,47,33 \n\t" + "lxvp 44, 128(%[a4]) \n\t" + "lxvp 46, 128(%[a5]) \n\t" + "xvmaddadp 8,49,33 \n\t" + "xvmaddadp 9,51,33 \n\t" + +#if defined(PREFETCH) + "dcbt %[temp],%[a7] \n\t" +#endif + "addic. %[n],%[n],-4 \n\t" + "lxvp 48, 128(%[a6]) \n\t" + "lxvp 50, 128(%[a7]) \n\t" + "lxvp 32, 128(%[x]) \n\t" +#if defined(PREFETCH) + "dcbt %[temp],%[x] \n\t" +#endif + "addi %[a0], %[a0], 128 \n\t" + "addi %[a1], %[a1], 128 \n\t" + "addi %[a2], %[a2], 128 \n\t" + "addi %[a3], %[a3], 128 \n\t" + "addi %[a4], %[a4], 128 \n\t" + "addi %[a5], %[a5], 128 \n\t" + "addi %[a6], %[a6], 128 \n\t" + "addi %[a7], %[a7], 128 \n\t" + "addi %[x], %[x], 128 \n\t" + "bgt+ one%= \n\t" + ".align 5 \n\t" + "two%=: \n\t" + //-------------------------------------------- + + "xvmaddadp 34,36,32 \n\t" + "xvmaddadp 35,38,32 \n\t" + "xvmaddadp 4,40,32 \n\t" + "xvmaddadp 5,42,32 \n\t" + "xvmaddadp 6,44,32 \n\t" + "xvmaddadp 7,46,32 \n\t" + "xvmaddadp 8,48,32 \n\t" + "xvmaddadp 9,50,32 \n\t" + XXSPLTD_S(36,%x[alpha],0) + "xvmaddadp 34,37,33 \n\t" + "xvmaddadp 35,39,33 \n\t" + "xvmaddadp 4,41,33 \n\t" + "xvmaddadp 5,43,33 \n\t" + "xvmaddadp 6,45,33 \n\t" + "xvmaddadp 7,47,33 \n\t" + "xvmaddadp 8,49,33 \n\t" + "xvmaddadp 9,51,33 \n\t" + + "lxvp 38, 0(%[y]) \n\t" + "lxvp 40, 32(%[y]) \n\t" + + + + XXMRGLD_S(42,35,34) + XXMRGHD_S(43,35,34) + + XXMRGLD_S(44,5,4) + XXMRGHD_S(45,5,4) + + "xvadddp 42,42,43 \n\t" + + XXMRGLD_S(46,7,6) + XXMRGHD_S(47,7,6) + + "xvadddp 44,44,45 \n\t" + + XXMRGLD_S(48,9,8) + XXMRGHD_S(49,9,8) + + "xvadddp 46,46,47 \n\t" + + "xvmaddadp 39,42,36 \n\t" + "xvmaddadp 38,44,36 \n\t" + + "xvadddp 48,48,49 \n\t" + + "xvmaddadp 41,46,36 \n\t" + + "stxvp 38, 0(%[y]) \n\t" + "xvmaddadp 40,48,36 \n\t" + "stxvp 40, 32(%[y]) \n\t" + + : [memy] "+m" (*(double (*)[8])y), + [n] "+&r" (n), + [a0] "=b" (a0), + [a1] "=&b" (a1), + [a2] "=&b" (a2), + [a3] "=&b" (a3), + [a4] "=&b" (a4), + [a5] "=&b" (a5), + [a6] "=&b" (a6), + [a7] "=&b" (a7), + [off] "+&b" (lda), + [off2]"=&b" (off2), + [temp] "=&b" (tempR) + : [memx] "m" (*(const double (*)[n])x), + [mem_ap] "m" (*(const double (*)[n*8]) ap), + [alpha] "d" (alpha), + "[a0]" (ap), + [x] "b" (x), + [y] "b" (y) + : "cc","vs4","vs5","vs6","vs7","vs8","vs9" ,"vs32","vs33","vs34","vs35", "vs36", "vs37", "vs38", "vs39", + "vs40", "vs41", "vs42", "vs43", "vs44", "vs45", "vs46", "vs47", "vs48", "vs49", "vs50", "vs51" + ); + return; +} +#else +static void dgemv_kernel_4x8(BLASLONG n, BLASLONG lda, FLOAT *ap, FLOAT *x, FLOAT *y, FLOAT alpha) { + BLASLONG i; +#if defined(PREFETCH) + BLASLONG j, c, k; +#endif + FLOAT *a0, *a1, *a2, *a3, *a4, *a5, *a6, *a7; + __vector double *va0, *va1, *va2, *va3, *va4, *va5, *va6, *va7, *v_x; + register __vector double temp0 = {0, 0}; + register __vector double temp1 = {0, 0}; + register __vector double temp2 = {0, 0}; + register __vector double temp3 = {0, 0}; + register __vector double temp4 = {0, 0}; + register __vector double temp5 = {0, 0}; + register __vector double temp6 = {0, 0}; + register __vector double temp7 = {0, 0}; + + a0 = ap; + a1 = ap + lda; + a2 = a1 + lda; + a3 = a2 + lda; + a4 = a3 + lda; + a5 = a4 + lda; + a6 = a5 + lda; + a7 = a6 + lda; + va0 = (__vector double*) a0; + va1 = (__vector double*) a1; + va2 = (__vector double*) a2; + va3 = (__vector double*) a3; + va4 = (__vector double*) a4; + va5 = (__vector double*) a5; + va6 = (__vector double*) a6; + va7 = (__vector double*) a7; + v_x = (__vector double*) x; + +#if defined(PREFETCH) + + c = n >> 1; + + for (j = 0; j < c; j += 64) { + k = (c - j) > 64 ? 64 : (c - j); + __builtin_prefetch(v_x + 64); + __builtin_prefetch(va0 + 64); + __builtin_prefetch(va1 + 64); + __builtin_prefetch(va2 + 64); + __builtin_prefetch(va3 + 64); + __builtin_prefetch(va4 + 64); + __builtin_prefetch(va5 + 64); + __builtin_prefetch(va6 + 64); + __builtin_prefetch(va7 + 64); + for (i = 0; i < k; i += 2) { +#else + + for (i = 0; i < n/2; i += 2) { +#endif + temp0 += v_x[i] * va0[i]; + temp1 += v_x[i] * va1[i]; + temp2 += v_x[i] * va2[i]; + temp3 += v_x[i] * va3[i]; + temp4 += v_x[i] * va4[i]; + temp5 += v_x[i] * va5[i]; + temp6 += v_x[i] * va6[i]; + temp7 += v_x[i] * va7[i]; + temp0 += v_x[i + 1] * va0[i + 1]; + temp1 += v_x[i + 1] * va1[i + 1]; + temp2 += v_x[i + 1] * va2[i + 1]; + temp3 += v_x[i + 1] * va3[i + 1]; + + temp4 += v_x[i + 1] * va4[i + 1]; + temp5 += v_x[i + 1] * va5[i + 1]; + temp6 += v_x[i + 1] * va6[i + 1]; + temp7 += v_x[i + 1] * va7[i + 1]; + } +#if defined(PREFETCH) + va0 += 64; + va1 += 64; + va2 += 64; + va3 += 64; + va4 += 64; + va5 += 64; + va6 += 64; + va7 += 64; + v_x += 64; + + } +#endif + y[0] += alpha * (temp0[0] + temp0[1]); + y[1] += alpha * (temp1[0] + temp1[1]); + y[2] += alpha * (temp2[0] + temp2[1]); + y[3] += alpha * (temp3[0] + temp3[1]); + + y[4] += alpha * (temp4[0] + temp4[1]); + y[5] += alpha * (temp5[0] + temp5[1]); + y[6] += alpha * (temp6[0] + temp6[1]); + y[7] += alpha * (temp7[0] + temp7[1]); + +} + +#endif + + +static void dgemv_kernel_4x4(BLASLONG n, BLASLONG lda, FLOAT *ap, FLOAT *x, FLOAT *y, FLOAT alpha) { + BLASLONG i = 0; + FLOAT *a0, *a1, *a2, *a3; + a0 = ap; + a1 = ap + lda; + a2 = a1 + lda; + a3 = a2 + lda; + __vector double* va0 = (__vector double*) a0; + __vector double* va1 = (__vector double*) a1; + __vector double* va2 = (__vector double*) a2; + __vector double* va3 = (__vector double*) a3; + __vector double* v_x = (__vector double*) x; + register __vector double temp0 = {0, 0}; + register __vector double temp1 = {0, 0}; + register __vector double temp2 = {0, 0}; + register __vector double temp3 = {0, 0}; + register __vector double temp4 = {0, 0}; + register __vector double temp5 = {0, 0}; + register __vector double temp6 = {0, 0}; + register __vector double temp7 = {0, 0}; + + for (i = 0; i < n / 2; i += 2) { + temp0 += v_x[i] * va0[i]; + temp1 += v_x[i] * va1[i]; + temp2 += v_x[i] * va2[i]; + temp3 += v_x[i] * va3[i]; + temp4 += v_x[i + 1] * va0[i + 1]; + temp5 += v_x[i + 1] * va1[i + 1]; + temp6 += v_x[i + 1] * va2[i + 1]; + temp7 += v_x[i + 1] * va3[i + 1]; + } + + temp0 += temp4; + temp1 += temp5; + temp2 += temp6; + temp3 += temp7; + y[0] += alpha * (temp0[0] + temp0[1]); + y[1] += alpha * (temp1[0] + temp1[1]); + y[2] += alpha * (temp2[0] + temp2[1]); + y[3] += alpha * (temp3[0] + temp3[1]); + +} + + +static void dgemv_kernel_4x2(BLASLONG n, BLASLONG lda, FLOAT *ap, FLOAT *x, FLOAT *y, FLOAT alpha, BLASLONG inc_y) { + + BLASLONG i; + FLOAT *a0, *a1; + a0 = ap; + a1 = ap + lda; + __vector double* va0 = (__vector double*) a0; + __vector double* va1 = (__vector double*) a1; + __vector double* v_x = (__vector double*) x; + __vector double temp0 = {0, 0}; + __vector double temp1 = {0, 0}; + for (i = 0; i < n / 2; i += 2) { + temp0 += v_x[i] * va0[i] + v_x[i + 1] * va0[i + 1]; + temp1 += v_x[i] * va1[i] + v_x[i + 1] * va1[i + 1]; + } + + + + y[0] += alpha * (temp0[0] + temp0[1]); + y[inc_y] += alpha * (temp1[0] + temp1[1]); +} + +static void dgemv_kernel_4x1(BLASLONG n, FLOAT *ap, FLOAT *x, FLOAT *y, FLOAT alpha) { + + BLASLONG i; + FLOAT *a0; + a0 = ap; + __vector double* va0 = (__vector double*) a0; + __vector double* v_x = (__vector double*) x; + __vector double temp0 = {0, 0}; + for (i = 0; i < n / 2; i += 2) { + temp0 += v_x[i] * va0[i] + v_x[i + 1] * va0[i + 1]; + } + + *y += alpha * (temp0[0] + temp0[1]); + +} + +static void copy_x(BLASLONG n, FLOAT *src, FLOAT *dest, BLASLONG inc_src) { + BLASLONG i; + for (i = 0; i < n; i++) { + *dest++ = *src; + src += inc_src; + } +} + +int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, FLOAT *a, BLASLONG lda, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT *buffer) { + BLASLONG i; + BLASLONG j; + FLOAT *a_ptr; + FLOAT *x_ptr; + FLOAT *y_ptr; + + BLASLONG n1; + BLASLONG m1; + BLASLONG m2; + BLASLONG m3; + BLASLONG n2; + FLOAT ybuffer[8] __attribute__((aligned(16))); + FLOAT *xbuffer; + + if (m < 1) return (0); + if (n < 1) return (0); + + xbuffer = buffer; + + n1 = n >> 3; + n2 = n & 7; + + m3 = m & 3; + m1 = m - m3; + m2 = (m & (NBMAX - 1)) - m3; + + BLASLONG NB = NBMAX; + + while (NB == NBMAX) { + + m1 -= NB; + if (m1 < 0) { + if (m2 == 0) break; + NB = m2; + } + + y_ptr = y; + a_ptr = a; + x_ptr = x; + + if (inc_x != 1) + copy_x(NB, x_ptr, xbuffer, inc_x); + else + xbuffer = x_ptr; + + BLASLONG lda8 = lda << 3; + + + if (inc_y == 1) { + + for (i = 0; i < n1; i++) { + + dgemv_kernel_4x8(NB, lda, a_ptr, xbuffer, y_ptr, alpha); + + y_ptr += 8; + a_ptr += lda8; +#if defined(PREFETCH) + __builtin_prefetch(y_ptr+64); +#endif + } + + } else { + + for (i = 0; i < n1; i++) { + ybuffer[0] = 0; + ybuffer[1] = 0; + ybuffer[2] = 0; + ybuffer[3] = 0; + ybuffer[4] = 0; + ybuffer[5] = 0; + ybuffer[6] = 0; + ybuffer[7] = 0; + dgemv_kernel_4x8(NB, lda, a_ptr, xbuffer, ybuffer, alpha); + + + + *y_ptr += ybuffer[0]; + y_ptr += inc_y; + *y_ptr += ybuffer[1]; + y_ptr += inc_y; + *y_ptr += ybuffer[2]; + y_ptr += inc_y; + *y_ptr += ybuffer[3]; + y_ptr += inc_y; + + *y_ptr += ybuffer[4]; + y_ptr += inc_y; + *y_ptr += ybuffer[5]; + y_ptr += inc_y; + *y_ptr += ybuffer[6]; + y_ptr += inc_y; + *y_ptr += ybuffer[7]; + y_ptr += inc_y; + + a_ptr += lda8; + } + + } + + + if (n2 & 4) { + ybuffer[0] = 0; + ybuffer[1] = 0; + ybuffer[2] = 0; + ybuffer[3] = 0; + dgemv_kernel_4x4(NB, lda, a_ptr, xbuffer, ybuffer, alpha); + + a_ptr += lda<<2; + + *y_ptr += ybuffer[0]; + y_ptr += inc_y; + *y_ptr += ybuffer[1]; + y_ptr += inc_y; + *y_ptr += ybuffer[2]; + y_ptr += inc_y; + *y_ptr += ybuffer[3]; + y_ptr += inc_y; + } + + if (n2 & 2) { + dgemv_kernel_4x2(NB, lda, a_ptr, xbuffer, y_ptr, alpha, inc_y); + a_ptr += lda << 1; + y_ptr += 2 * inc_y; + + } + + if (n2 & 1) { + dgemv_kernel_4x1(NB, a_ptr, xbuffer, y_ptr, alpha); + a_ptr += lda; + y_ptr += inc_y; + + } + + a += NB; + x += NB * inc_x; + + + } + + if (m3 == 0) return (0); + + x_ptr = x; + a_ptr = a; + if (m3 == 3) { + FLOAT xtemp0 = *x_ptr * alpha; + x_ptr += inc_x; + FLOAT xtemp1 = *x_ptr * alpha; + x_ptr += inc_x; + FLOAT xtemp2 = *x_ptr * alpha; + + FLOAT *aj = a_ptr; + y_ptr = y; + + if (lda == 3 && inc_y == 1) { + + for (j = 0; j < (n & -4); j += 4) { + + y_ptr[j] += aj[0] * xtemp0 + aj[1] * xtemp1 + aj[2] * xtemp2; + y_ptr[j + 1] += aj[3] * xtemp0 + aj[4] * xtemp1 + aj[5] * xtemp2; + y_ptr[j + 2] += aj[6] * xtemp0 + aj[7] * xtemp1 + aj[8] * xtemp2; + y_ptr[j + 3] += aj[9] * xtemp0 + aj[10] * xtemp1 + aj[11] * xtemp2; + aj += 12; + } + + for (; j < n; j++) { + y_ptr[j] += aj[0] * xtemp0 + aj[1] * xtemp1 + aj[2] * xtemp2; + aj += 3; + } + + } else { + + if (inc_y == 1) { + + BLASLONG register lda2 = lda << 1; + BLASLONG register lda4 = lda << 2; + BLASLONG register lda3 = lda2 + lda; + + for (j = 0; j < (n & -4); j += 4) { + + y_ptr[j] += *aj * xtemp0 + *(aj + 1) * xtemp1 + *(aj + 2) * xtemp2; + y_ptr[j + 1] += *(aj + lda) * xtemp0 + *(aj + lda + 1) * xtemp1 + *(aj + lda + 2) * xtemp2; + y_ptr[j + 2] += *(aj + lda2) * xtemp0 + *(aj + lda2 + 1) * xtemp1 + *(aj + lda2 + 2) * xtemp2; + y_ptr[j + 3] += *(aj + lda3) * xtemp0 + *(aj + lda3 + 1) * xtemp1 + *(aj + lda3 + 2) * xtemp2; + aj += lda4; + } + + for (; j < n; j++) { + + y_ptr[j] += *aj * xtemp0 + *(aj + 1) * xtemp1 + *(aj + 2) * xtemp2; + aj += lda; + } + + } else { + + for (j = 0; j < n; j++) { + *y_ptr += *aj * xtemp0 + *(aj + 1) * xtemp1 + *(aj + 2) * xtemp2; + y_ptr += inc_y; + aj += lda; + } + + } + + } + return (0); + } + + if (m3 == 2) { + FLOAT xtemp0 = *x_ptr * alpha; + x_ptr += inc_x; + FLOAT xtemp1 = *x_ptr * alpha; + + FLOAT *aj = a_ptr; + y_ptr = y; + + if (lda == 2 && inc_y == 1) { + + for (j = 0; j < (n & -4); j += 4) { + y_ptr[j] += aj[0] * xtemp0 + aj[1] * xtemp1; + y_ptr[j + 1] += aj[2] * xtemp0 + aj[3] * xtemp1; + y_ptr[j + 2] += aj[4] * xtemp0 + aj[5] * xtemp1; + y_ptr[j + 3] += aj[6] * xtemp0 + aj[7] * xtemp1; + aj += 8; + + } + + for (; j < n; j++) { + y_ptr[j] += aj[0] * xtemp0 + aj[1] * xtemp1; + aj += 2; + } + + } else { + if (inc_y == 1) { + + BLASLONG register lda2 = lda << 1; + BLASLONG register lda4 = lda << 2; + BLASLONG register lda3 = lda2 + lda; + + for (j = 0; j < (n & -4); j += 4) { + + y_ptr[j] += *aj * xtemp0 + *(aj + 1) * xtemp1; + y_ptr[j + 1] += *(aj + lda) * xtemp0 + *(aj + lda + 1) * xtemp1; + y_ptr[j + 2] += *(aj + lda2) * xtemp0 + *(aj + lda2 + 1) * xtemp1; + y_ptr[j + 3] += *(aj + lda3) * xtemp0 + *(aj + lda3 + 1) * xtemp1; + aj += lda4; + } + + for (; j < n; j++) { + + y_ptr[j] += *aj * xtemp0 + *(aj + 1) * xtemp1; + aj += lda; + } + + } else { + for (j = 0; j < n; j++) { + *y_ptr += *aj * xtemp0 + *(aj + 1) * xtemp1; + y_ptr += inc_y; + aj += lda; + } + } + + } + return (0); + + } + + FLOAT xtemp = *x_ptr * alpha; + FLOAT *aj = a_ptr; + y_ptr = y; + if (lda == 1 && inc_y == 1) { + for (j = 0; j < (n & -4); j += 4) { + y_ptr[j] += aj[j] * xtemp; + y_ptr[j + 1] += aj[j + 1] * xtemp; + y_ptr[j + 2] += aj[j + 2] * xtemp; + y_ptr[j + 3] += aj[j + 3] * xtemp; + } + for (; j < n; j++) { + y_ptr[j] += aj[j] * xtemp; + } + + + } else { + if (inc_y == 1) { + + BLASLONG register lda2 = lda << 1; + BLASLONG register lda4 = lda << 2; + BLASLONG register lda3 = lda2 + lda; + for (j = 0; j < (n & -4); j += 4) { + y_ptr[j] += *aj * xtemp; + y_ptr[j + 1] += *(aj + lda) * xtemp; + y_ptr[j + 2] += *(aj + lda2) * xtemp; + y_ptr[j + 3] += *(aj + lda3) * xtemp; + aj += lda4; + } + + for (; j < n; j++) { + y_ptr[j] += *aj * xtemp; + aj += lda; + } + + } else { + for (j = 0; j < n; j++) { + *y_ptr += *aj * xtemp; + y_ptr += inc_y; + aj += lda; + } + + } + } + + return (0); + +} + diff --git a/kernel/power/drot.c b/kernel/power/drot.c index baeb54205..94d9d95a3 100644 --- a/kernel/power/drot.c +++ b/kernel/power/drot.c @@ -39,8 +39,12 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #pragma GCC optimize "O1" +#if defined(__VEC__) || defined(__ALTIVEC__) #if defined(POWER8) || defined(POWER9) #include "drot_microk_power8.c" +#elif defined(POWER10) +#include "drot_microk_power10.c" +#endif #endif @@ -113,12 +117,30 @@ int CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT if ( (inc_x == 1) && (inc_y == 1) ) { +#if defined(POWER10) + if ( n >= 16 ) + { + BLASLONG align = ((32 - ((uintptr_t)y & (uintptr_t)0x1F)) >> 3) & 0x3; + for (i = 0; i < align; i++) { + temp = c*x[i] + s*y[i] ; + y[i] = c*y[i] - s*x[i] ; + x[i] = temp ; + } + } + BLASLONG n1 = (n-i) & -16; + if ( n1 > 0 ) + { + drot_kernel_16(n1,&x[i], &y[i], c, s); + i+=n1; + } +#else BLASLONG n1 = n & -16; if ( n1 > 0 ) { drot_kernel_16(n1, x1, y1, c, s); i=n1; } +#endif while(i < n) { diff --git a/kernel/power/drot_microk_power10.c b/kernel/power/drot_microk_power10.c new file mode 100644 index 000000000..e34e745c7 --- /dev/null +++ b/kernel/power/drot_microk_power10.c @@ -0,0 +1,148 @@ +/*************************************************************************** +Copyright (c) 2021, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#define HAVE_KERNEL_16 1 + +static void drot_kernel_16 (long n, double *x, double *y, double c, double s) +{ + __asm__ + ( + XXSPLTD_S(36,%x5,0) // load c to both dwords + XXSPLTD_S(37,%x6,0) // load s to both dwords + "lxvp 32, 0(%3) \n\t" // load x + "lxvp 34, 32(%3) \n\t" + "lxvp 48, 0(%4) \n\t" // load y + "lxvp 50, 32(%4) \n\t" + + "addic. %2, %2, -8 \n\t" + "ble two%= \n\t" + + ".align 5 \n" + "one%=: \n\t" + + "xvmuldp 40, 32, 36 \n\t" // c * x + "xvmuldp 41, 33, 36 \n\t" + "xvmuldp 42, 34, 36 \n\t" + "xvmuldp 43, 35, 36 \n\t" + + "xvmuldp 44, 32, 37 \n\t" // s * x + "xvmuldp 45, 33, 37 \n\t" + "xvmuldp 46, 34, 37 \n\t" + "xvmuldp 47, 35, 37 \n\t" + + "lxvp 32, 64(%3) \n\t" // load x + "lxvp 34, 96(%3) \n\t" + "xvmuldp 52, 48, 36 \n\t" // c * y + "xvmuldp 53, 49, 36 \n\t" + "xvmuldp 54, 50, 36 \n\t" + "xvmuldp 55, 51, 36 \n\t" + + "xvmuldp 38, 48, 37 \n\t" // s * y + "xvmuldp 39, 49, 37 \n\t" + "xvmuldp 56, 50, 37 \n\t" + "xvmuldp 57, 51, 37 \n\t" + + "lxvp 48, 64(%4) \n\t" // load y + "lxvp 50, 96(%4) \n\t" + + "xvadddp 40, 40, 38 \n\t" // c * x + s * y + "xvadddp 41, 41, 39 \n\t" // c * x + s * y + "xvadddp 42, 42, 56 \n\t" // c * x + s * y + "xvadddp 43, 43, 57 \n\t" // c * x + s * y + + "stxvp 40, 0(%3) \n\t" // store x + "stxvp 42, 32(%3) \n\t" + + "xvsubdp 52, 52, 44 \n\t" // c * y - s * x + "xvsubdp 53, 53, 45 \n\t" // c * y - s * x + "xvsubdp 54, 54, 46 \n\t" // c * y - s * x + "xvsubdp 55, 55, 47 \n\t" // c * y - s * x + + "stxvp 52, 0(%4) \n\t" // store y + "stxvp 54, 32(%4) \n\t" + + "addi %3, %3, 64 \n\t" + "addi %4, %4, 64 \n\t" + + "addic. %2, %2, -8 \n\t" + "bgt one%= \n" + + "two%=: \n\t" + + "xvmuldp 40, 32, 36 \n\t" // c * x + "xvmuldp 41, 33, 36 \n\t" + "xvmuldp 42, 34, 36 \n\t" + "xvmuldp 43, 35, 36 \n\t" + + "xvmuldp 52, 48, 36 \n\t" // c * y + "xvmuldp 53, 49, 36 \n\t" + "xvmuldp 54, 50, 36 \n\t" + "xvmuldp 55, 51, 36 \n\t" + + "xvmuldp 44, 32, 37 \n\t" // s * x + "xvmuldp 45, 33, 37 \n\t" + "xvmuldp 46, 34, 37 \n\t" + "xvmuldp 47, 35, 37 \n\t" + + "xvmuldp 38, 48, 37 \n\t" // s * y + "xvmuldp 39, 49, 37 \n\t" + "xvmuldp 56, 50, 37 \n\t" + "xvmuldp 57, 51, 37 \n\t" + + "xvadddp 40, 40, 38 \n\t" // c * x + s * y + "xvadddp 41, 41, 39 \n\t" // c * x + s * y + "xvadddp 42, 42, 56 \n\t" // c * x + s * y + "xvadddp 43, 43, 57 \n\t" // c * x + s * y + + "stxvp 40, 0(%3) \n\t" // store x + "stxvp 42, 32(%3) \n\t" + "xvsubdp 52, 52, 44 \n\t" // c * y - s * x + "xvsubdp 53, 53, 45 \n\t" // c * y - s * x + "xvsubdp 54, 54, 46 \n\t" // c * y - s * x + "xvsubdp 55, 55, 47 \n\t" // c * y - s * x + + "stxvp 52, 0(%4) \n\t" // store y + "stxvp 54, 32(%4) \n\t" + + "#n=%2 x=%0=%3 y=%1=%4 c=%5 s=%6\n" + : + "+m" (*x), + "+m" (*y), + "+r" (n), // 2 + "+b" (x), // 3 + "+b" (y) // 4 + : + "d" (c), // 5 + "d" (s) // 6 + : + "cr0", + "vs32","vs33","vs34","vs35","vs36","vs37","vs38","vs39", + "vs40","vs41","vs42","vs43","vs44","vs45","vs46","vs47", + "vs48","vs49","vs50","vs51","vs52","vs53","vs54","vs55", + "vs56","vs57" + ); +} diff --git a/kernel/power/drot_microk_power8.c b/kernel/power/drot_microk_power8.c index 016b7764d..259c08187 100644 --- a/kernel/power/drot_microk_power8.c +++ b/kernel/power/drot_microk_power8.c @@ -51,8 +51,8 @@ static void drot_kernel_16 (long n, double *x, double *y, double c, double s) __asm__ ( - "xxspltd 36, %x13, 0 \n\t" // load c to both dwords - "xxspltd 37, %x14, 0 \n\t" // load s to both dwords + XXSPLTD_S(36,%x13,0) // load c to both dwords + XXSPLTD_S(37,%x14,0) // load s to both dwords "lxvd2x 32, 0, %3 \n\t" // load x "lxvd2x 33, %15, %3 \n\t" @@ -68,10 +68,10 @@ static void drot_kernel_16 (long n, double *x, double *y, double c, double s) "addi %4, %4, 64 \n\t" "addic. %2, %2, -8 \n\t" - "ble 2f \n\t" + "ble two%= \n\t" - ".p2align 5 \n" - "1: \n\t" + ".align 5 \n" + "one%=: \n\t" "xvmuldp 40, 32, 36 \n\t" // c * x "xvmuldp 41, 33, 36 \n\t" @@ -135,9 +135,9 @@ static void drot_kernel_16 (long n, double *x, double *y, double c, double s) "addi %4, %4, 128 \n\t" "addic. %2, %2, -8 \n\t" - "bgt 1b \n" + "bgt one%= \n" - "2: \n\t" + "two%=: \n\t" "xvmuldp 40, 32, 36 \n\t" // c * x "xvmuldp 41, 33, 36 \n\t" diff --git a/kernel/power/dscal.c b/kernel/power/dscal.c index 779a08e9c..96c4e51bc 100644 --- a/kernel/power/dscal.c +++ b/kernel/power/dscal.c @@ -35,8 +35,12 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" +#if defined(__VEC__) || defined(__ALTIVEC__) #if defined(POWER8) || defined(POWER9) #include "dscal_microk_power8.c" +#elif defined(POWER10) +#include "dscal_microk_power10.c" +#endif #endif #if !defined(HAVE_KERNEL_8) @@ -98,12 +102,28 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da, FLOAT *x, BLAS if ( da == 0.0 ) { +#if defined(POWER10) + if ( n >= 16 ) + { + BLASLONG align = ((32 - ((uintptr_t)x & (uintptr_t)0x1F)) >> 3) & 0x3; + for (j = 0; j < align; j++) { + x[j] = 0.0; + } + } + BLASLONG n1 = (n-j) & -16; + if ( n1 > 0 ) + { + dscal_kernel_8_zero(n1, &x[j]); + j+=n1; + } +#else BLASLONG n1 = n & -16; if ( n1 > 0 ) { dscal_kernel_8_zero(n1, x); j=n1; } +#endif while(j < n) { @@ -116,12 +136,28 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da, FLOAT *x, BLAS else { +#if defined(POWER10) + if ( n >= 16 ) + { + BLASLONG align = ((32 - ((uintptr_t)x & (uintptr_t)0x1F)) >> 3) & 0x3; + for (j = 0; j < align; j++) { + x[j] = da * x[j]; + } + } + BLASLONG n1 = (n-j) & -16; + if ( n1 > 0 ) + { + dscal_kernel_8(n1, &x[j], da); + j+=n1; + } +#else BLASLONG n1 = n & -16; if ( n1 > 0 ) { dscal_kernel_8(n1, x, da); j=n1; } +#endif while(j < n) { diff --git a/kernel/power/dscal_microk_power10.c b/kernel/power/dscal_microk_power10.c new file mode 100644 index 000000000..d0d506f24 --- /dev/null +++ b/kernel/power/dscal_microk_power10.c @@ -0,0 +1,134 @@ +/*************************************************************************** +Copyright (c) 2021, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#define HAVE_KERNEL_8 1 + +static void dscal_kernel_8 (long n, double *x, double alpha) +{ + __asm__ + ( + "dcbt 0, %2 \n\t" + + XXSPLTD_S(48,%x3,0) + + "lxvp 32, 0(%2) \n\t" + "lxvp 34, 32(%2) \n\t" + "lxvp 36, 64(%2) \n\t" + "lxvp 38, 96(%2) \n\t" + + "addic. %1, %1, -16 \n\t" + "ble two%= \n\t" + + ".align 5 \n" + "one%=: \n\t" + + "xvmuldp 40, 32, 48 \n\t" + "xvmuldp 41, 33, 48 \n\t" + "xvmuldp 42, 34, 48 \n\t" + "xvmuldp 43, 35, 48 \n\t" + "lxvp 32, 128(%2) \n\t" + "lxvp 34, 160(%2) \n\t" + "xvmuldp 44, 36, 48 \n\t" + "xvmuldp 45, 37, 48 \n\t" + "xvmuldp 46, 38, 48 \n\t" + "xvmuldp 47, 39, 48 \n\t" + "lxvp 36, 192(%2) \n\t" + "lxvp 38, 224(%2) \n\t" + + "stxvp 40, 0(%2) \n\t" + "stxvp 42, 32(%2) \n\t" + "stxvp 44, 64(%2) \n\t" + "stxvp 46, 96(%2) \n\t" + + "addi %2, %2, 128 \n\t" + + "addic. %1, %1, -16 \n\t" + "bgt one%= \n" + + "two%=: \n\t" + + "xvmuldp 40, 32, 48 \n\t" + "xvmuldp 41, 33, 48 \n\t" + "xvmuldp 42, 34, 48 \n\t" + "xvmuldp 43, 35, 48 \n\t" + + "xvmuldp 44, 36, 48 \n\t" + "xvmuldp 45, 37, 48 \n\t" + "xvmuldp 46, 38, 48 \n\t" + "xvmuldp 47, 39, 48 \n\t" + + "stxvp 40, 0(%2) \n\t" + "stxvp 42, 32(%2) \n\t" + "stxvp 44, 64(%2) \n\t" + "stxvp 46, 96(%2) \n\t" + + "#n=%1 alpha=%3 x=%0=%2" + : + "+m" (*x), + "+r" (n), // 1 + "+b" (x) // 2 + : + "d" (alpha) // 3 + : + "cr0", + "vs32","vs33","vs34","vs35","vs36","vs37","vs38","vs39", + "vs40","vs41","vs42","vs43","vs44","vs45","vs46","vs47","vs48" + ); +} + + +static void dscal_kernel_8_zero (long n, double *x) +{ + + __asm__ + ( + "xxlxor 32, 32, 32 \n\t" + "xxlxor 33, 33, 33 \n\t" + + ".align 5 \n" + "one%=: \n\t" + + "stxvp 32, 0(%2) \n\t" + "stxvp 32, 32(%2) \n\t" + "stxvp 32, 64(%2) \n\t" + "stxvp 32, 96(%2) \n\t" + + "addi %2, %2, 128 \n\t" + + "addic. %1, %1, -16 \n\t" + "bgt one%= \n" + + "#n=%1 x=%0=%2 " + : + "=m" (*x), + "+r" (n), // 1 + "+b" (x) // 2 + : + : + "cr0","vs32","vs33" + ); +} diff --git a/kernel/power/dscal_microk_power8.c b/kernel/power/dscal_microk_power8.c index 04898eb3d..e9bacd05a 100644 --- a/kernel/power/dscal_microk_power8.c +++ b/kernel/power/dscal_microk_power8.c @@ -41,7 +41,7 @@ static void dscal_kernel_8 (long n, double *x, double alpha) ( "dcbt 0, %2 \n\t" - "xxspltd %x3, %x3, 0 \n\t" + XXSPLTD_S(%x3,%x3,0) "lxvd2x 32, 0, %2 \n\t" "lxvd2x 33, %4, %2 \n\t" @@ -55,10 +55,10 @@ static void dscal_kernel_8 (long n, double *x, double alpha) "addi %2, %2, 128 \n\t" "addic. %1, %1, -16 \n\t" - "ble 2f \n\t" + "ble two%= \n\t" - ".p2align 5 \n" - "1: \n\t" + ".align 5 \n" + "one%=: \n\t" "xvmuldp 40, 32, %x3 \n\t" "xvmuldp 41, 33, %x3 \n\t" @@ -91,9 +91,9 @@ static void dscal_kernel_8 (long n, double *x, double alpha) "addi %2, %2, 256 \n\t" "addic. %1, %1, -16 \n\t" - "bgt 1b \n" + "bgt one%= \n" - "2: \n\t" + "two%=: \n\t" "xvmuldp 40, 32, %x3 \n\t" "xvmuldp 41, 33, %x3 \n\t" @@ -146,8 +146,8 @@ static void dscal_kernel_8_zero (long n, double *x) ( "xxlxor %x3, %x3, %x3 \n\t" - ".p2align 5 \n" - "1: \n\t" + ".align 5 \n" + "one%=: \n\t" "stxvd2x %x3, 0, %2 \n\t" "stxvd2x %x3, %4, %2 \n\t" @@ -161,7 +161,7 @@ static void dscal_kernel_8_zero (long n, double *x) "addi %2, %2, 128 \n\t" "addic. %1, %1, -16 \n\t" - "bgt 1b \n" + "bgt one%= \n" "#n=%1 x=%0=%2 t0=%x3 o16=%4 o32=%5 o48=%6 o64=%7 o80=%8 o96=%9 o112=%10" : diff --git a/kernel/power/dswap.c b/kernel/power/dswap.c index 52b7f50da..9e6229c6a 100644 --- a/kernel/power/dswap.c +++ b/kernel/power/dswap.c @@ -35,8 +35,12 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" +#if defined(__VEC__) || defined(__ALTIVEC__) #if defined(POWER8) || defined(POWER9) #include "dswap_microk_power8.c" +#elif defined(POWER10) +#include "swap_microk_power10.c" +#endif #endif #ifndef HAVE_KERNEL_32 @@ -113,12 +117,30 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT dummy3, FLOAT *x, if ( (inc_x == 1) && (inc_y == 1 )) { +#if defined(POWER10) + if ( n >= 32 ) + { + BLASLONG align = ((32 - ((uintptr_t)y & (uintptr_t)0x1F)) >> 3) & 0x3; + for (i = 0; i < align; i++) { + temp = y[i]; + y[i] = x[i]; + x[i] = temp; + } + } + BLASLONG n1 = (n-i) & -32; + if ( n1 > 0 ) + { + dswap_kernel_32(n1,&x[i], &y[i]); + i+=n1; + } +#else BLASLONG n1 = n & -32; if ( n1 > 0 ) { dswap_kernel_32(n1, x, y); i=n1; } +#endif while(i < n) { diff --git a/kernel/power/dswap_microk_power8.c b/kernel/power/dswap_microk_power8.c index 31eff3449..ecfd5c9f9 100644 --- a/kernel/power/dswap_microk_power8.c +++ b/kernel/power/dswap_microk_power8.c @@ -39,8 +39,8 @@ static void dswap_kernel_32 (long n, double *x, double *y) { __asm__ ( - ".p2align 5 \n" - "1: \n\t" + ".align 5 \n" + "one%=: \n\t" "lxvd2x 32, 0, %4 \n\t" "lxvd2x 33, %5, %4 \n\t" @@ -131,7 +131,7 @@ static void dswap_kernel_32 (long n, double *x, double *y) "addi %4, %4, 128 \n\t" "addic. %2, %2, -32 \n\t" - "bgt 1b \n" + "bgt one%= \n" "#n=%2 x=%0=%3 y=%1=%4 o16=%5 o32=%6 o48=%7 o64=%8 o80=%9 o96=%10 o112=%11" : diff --git a/kernel/power/dtrmm_kernel_16x4_power8.S b/kernel/power/dtrmm_kernel_16x4_power8.S index 47e703a3a..91154ad37 100644 --- a/kernel/power/dtrmm_kernel_16x4_power8.S +++ b/kernel/power/dtrmm_kernel_16x4_power8.S @@ -82,7 +82,6 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endif #ifdef __64BIT__ -#define STACKSIZE 320 #define STACKSIZE 520 #define ALPHA_SP 296+200(SP) #define FZERO 304+200(SP) @@ -96,7 +95,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define N r4 #define K r5 -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define A r6 #define B r7 @@ -257,8 +256,6 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. stvx v31, r11, r0 li r11,0 - stw r31, 144(SP) - stfd f1, ALPHA_SP stw r0, FZERO @@ -271,7 +268,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. slwi LDC, LDC, BASE_SHIFT #if defined(TRMMKERNEL) -#if defined(linux) && defined(__64BIT__) +#if (defined(linux) || defined(__FreeBSD__)) && defined(__64BIT__) ld OFFSET, FRAMESLOT(0) + STACKSIZE(SP) #endif diff --git a/kernel/power/dtrmm_macros_16x4_power8.S b/kernel/power/dtrmm_macros_16x4_power8.S index 079144a90..efb034594 100644 --- a/kernel/power/dtrmm_macros_16x4_power8.S +++ b/kernel/power/dtrmm_macros_16x4_power8.S @@ -37,7 +37,11 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * Macros for N=4, M=16 * *********************************************************************/ +#if defined(_AIX) +define(`LOAD4x16_1', ` +#else .macro LOAD4x16_1 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -60,9 +64,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi AO, AO, 64 addi BO, BO, 32 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x16_I1', ` +#else .macro KERNEL4x16_I1 +#endif xvmuldp vs32, vs0, vs24 xvmuldp vs33, vs1, vs24 @@ -127,9 +139,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi AO, AO, 64 addi BO, BO, 32 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x16_1', ` +#else .macro KERNEL4x16_1 +#endif xvmaddadp vs32, vs0, vs24 xvmaddadp vs33, vs1, vs24 @@ -195,9 +215,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi AO, AO, 64 addi BO, BO, 32 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x16_2', ` +#else .macro KERNEL4x16_2 +#endif xvmaddadp vs32, vs8, vs28 xvmaddadp vs33, vs9, vs28 @@ -262,9 +290,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi AO, AO, 64 addi BO, BO, 32 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x16_E2', ` +#else .macro KERNEL4x16_E2 +#endif xvmaddadp vs32, vs8, vs28 @@ -303,9 +339,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs62, vs14, vs31 xvmaddadp vs63, vs15, vs31 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x16_SUBI1', ` +#else .macro KERNEL4x16_SUBI1 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -364,9 +408,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs62, vs6, vs27 xvmuldp vs63, vs7, vs27 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x16_SUB1', ` +#else .macro KERNEL4x16_SUB1 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -425,9 +477,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs62, vs6, vs27 xvmaddadp vs63, vs7, vs27 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE4x16', ` +#else .macro SAVE4x16 +#endif mr T1, CO addi T2, T1, 64 @@ -615,13 +675,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 128 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************* * Macros for N=4, M=8 * *********************************************************************/ +#if defined(_AIX) +define(`LOAD4x8_1', ` +#else .macro LOAD4x8_1 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -638,9 +706,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi AO, AO, 64 addi BO, BO, 32 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x8_I1', ` +#else .macro KERNEL4x8_I1 +#endif xvmuldp vs32, vs0, vs24 xvmuldp vs33, vs1, vs24 @@ -679,9 +755,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi AO, AO, 64 addi BO, BO, 32 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x8_1', ` +#else .macro KERNEL4x8_1 +#endif xvmaddadp vs32, vs0, vs24 xvmaddadp vs33, vs1, vs24 @@ -719,9 +803,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi AO, AO, 64 addi BO, BO, 32 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x8_2', ` +#else .macro KERNEL4x8_2 +#endif xvmaddadp vs32, vs8, vs28 xvmaddadp vs33, vs9, vs28 @@ -759,9 +851,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi AO, AO, 64 addi BO, BO, 32 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x8_E2', ` +#else .macro KERNEL4x8_E2 +#endif xvmaddadp vs32, vs8, vs28 @@ -784,9 +884,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs58, vs10, vs31 xvmaddadp vs59, vs11, vs31 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x8_SUBI1', ` +#else .macro KERNEL4x8_SUBI1 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -822,9 +930,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs58, vs2, vs27 xvmuldp vs59, vs3, vs27 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x8_SUB1', ` +#else .macro KERNEL4x8_SUB1 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -860,9 +976,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs58, vs2, vs27 xvmaddadp vs59, vs3, vs27 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE4x8', ` +#else .macro SAVE4x8 +#endif mr T1, CO @@ -970,13 +1094,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 64 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************* * Macros for N=4, M=4 * *********************************************************************/ +#if defined(_AIX) +define(`LOAD4x4_1', ` +#else .macro LOAD4x4_1 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -989,9 +1121,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi AO, AO, 32 addi BO, BO, 32 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x4_I1', ` +#else .macro KERNEL4x4_I1 +#endif lxvd2x vs8, 0, AO lxvd2x vs9, o16, AO @@ -1017,9 +1157,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs56, vs0, vs27 xvmuldp vs57, vs1, vs27 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x4_1', ` +#else .macro KERNEL4x4_1 +#endif lxvd2x vs8, 0, AO lxvd2x vs9, o16, AO @@ -1045,9 +1193,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs56, vs0, vs27 xvmaddadp vs57, vs1, vs27 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x4_2', ` +#else .macro KERNEL4x4_2 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -1073,9 +1229,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs56, vs8, vs31 xvmaddadp vs57, vs9, vs31 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x4_E2', ` +#else .macro KERNEL4x4_E2 +#endif xvmaddadp vs32, vs8, vs28 @@ -1090,9 +1254,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs56, vs8, vs31 xvmaddadp vs57, vs9, vs31 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x4_SUBI1', ` +#else .macro KERNEL4x4_SUBI1 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -1118,9 +1290,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs56, vs0, vs27 xvmuldp vs57, vs1, vs27 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x4_SUB1', ` +#else .macro KERNEL4x4_SUB1 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -1146,9 +1326,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs56, vs0, vs27 xvmaddadp vs57, vs1, vs27 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE4x4', ` +#else .macro SAVE4x4 +#endif mr T1, CO @@ -1224,13 +1412,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 32 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************* * Macros for N=4, M=2 * *********************************************************************/ +#if defined(_AIX) +define(`LOAD4x2_1', ` +#else .macro LOAD4x2_1 +#endif lxvd2x vs0, 0, AO @@ -1242,9 +1438,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi AO, AO, 16 addi BO, BO, 32 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x2_I1', ` +#else .macro KERNEL4x2_I1 +#endif lxvd2x vs8, 0, AO @@ -1265,9 +1469,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs56, vs0, vs27 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x2_1', ` +#else .macro KERNEL4x2_1 +#endif lxvd2x vs8, 0, AO @@ -1288,9 +1500,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs56, vs0, vs27 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x2_2', ` +#else .macro KERNEL4x2_2 +#endif lxvd2x vs0, 0, AO @@ -1311,9 +1531,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs56, vs8, vs31 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x2_E2', ` +#else .macro KERNEL4x2_E2 +#endif xvmaddadp vs32, vs8, vs28 @@ -1324,9 +1552,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs56, vs8, vs31 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x2_SUBI1', ` +#else .macro KERNEL4x2_SUBI1 +#endif lxvd2x vs0, 0, AO @@ -1347,9 +1583,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs56, vs0, vs27 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x2_SUB1', ` +#else .macro KERNEL4x2_SUB1 +#endif lxvd2x vs0, 0, AO @@ -1370,9 +1614,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs56, vs0, vs27 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE4x2', ` +#else .macro SAVE4x2 +#endif mr T1, CO @@ -1432,13 +1684,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 16 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************* * Macros for N=4, M=1 * *********************************************************************/ +#if defined(_AIX) +define(`LOAD4x1_1', ` +#else .macro LOAD4x1_1 +#endif lxsdx vs0, 0, AO @@ -1450,9 +1710,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi AO, AO, 8 addi BO, BO, 32 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x1_I1', ` +#else .macro KERNEL4x1_I1 +#endif lxsdx vs8, 0, AO @@ -1473,9 +1741,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmuldp vs56, vs0, vs27 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x1_1', ` +#else .macro KERNEL4x1_1 +#endif lxsdx vs8, 0, AO @@ -1496,9 +1772,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs56, vs0, vs27 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x1_2', ` +#else .macro KERNEL4x1_2 +#endif lxsdx vs0, 0, AO @@ -1519,9 +1803,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs56, vs8, vs31 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x1_E2', ` +#else .macro KERNEL4x1_E2 +#endif xsmaddadp vs32, vs8, vs28 @@ -1532,9 +1824,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs56, vs8, vs31 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x1_SUBI1', ` +#else .macro KERNEL4x1_SUBI1 +#endif lxsdx vs0, 0, AO @@ -1555,9 +1855,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmuldp vs56, vs0, vs27 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x1_SUB1', ` +#else .macro KERNEL4x1_SUB1 +#endif lxsdx vs0, 0, AO @@ -1578,9 +1886,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs56, vs0, vs27 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE4x1', ` +#else .macro SAVE4x1 +#endif mr T1, CO @@ -1640,13 +1956,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 8 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************* * Macros for N=2, M=16 * *********************************************************************/ +#if defined(_AIX) +define(`LOAD2x16_1', ` +#else .macro LOAD2x16_1 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -1666,9 +1990,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi AO, AO, 64 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x16_I1', ` +#else .macro KERNEL2x16_I1 +#endif lxvd2x vs8, 0, AO lxvd2x vs9, o16, AO @@ -1707,9 +2039,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs46, vs6, vs25 xvmuldp vs47, vs7, vs25 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x16_1', ` +#else .macro KERNEL2x16_1 +#endif lxvd2x vs8, 0, AO lxvd2x vs9, o16, AO @@ -1748,9 +2088,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs46, vs6, vs25 xvmaddadp vs47, vs7, vs25 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x16_2', ` +#else .macro KERNEL2x16_2 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -1789,9 +2137,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs46, vs14, vs29 xvmaddadp vs47, vs15, vs29 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x16_E2', ` +#else .macro KERNEL2x16_E2 +#endif xvmaddadp vs32, vs8, vs28 @@ -1812,9 +2168,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs46, vs14, vs29 xvmaddadp vs47, vs15, vs29 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x16_SUBI1', ` +#else .macro KERNEL2x16_SUBI1 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -1853,9 +2217,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs46, vs6, vs25 xvmuldp vs47, vs7, vs25 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x16_SUB1', ` +#else .macro KERNEL2x16_SUB1 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -1894,9 +2266,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs46, vs6, vs25 xvmaddadp vs47, vs7, vs25 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE2x16', ` +#else .macro SAVE2x16 +#endif mr T1, CO addi T2, T1, 64 @@ -1990,13 +2370,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 128 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************* * Macros for N=4, M=8 * *********************************************************************/ +#if defined(_AIX) +define(`LOAD2x8_1', ` +#else .macro LOAD2x8_1 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -2009,9 +2397,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi AO, AO, 64 addi BO, BO, 16 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x8_I1', ` +#else .macro KERNEL2x8_I1 +#endif lxvd2x vs8, 0, AO lxvd2x vs9, o16, AO @@ -2035,9 +2431,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs42, vs2, vs25 xvmuldp vs43, vs3, vs25 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x8_1', ` +#else .macro KERNEL2x8_1 +#endif lxvd2x vs8, 0, AO lxvd2x vs9, o16, AO @@ -2061,9 +2465,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs42, vs2, vs25 xvmaddadp vs43, vs3, vs25 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x8_2', ` +#else .macro KERNEL2x8_2 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -2087,9 +2499,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs42, vs10, vs29 xvmaddadp vs43, vs11, vs29 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x8_E2', ` +#else .macro KERNEL2x8_E2 +#endif xvmaddadp vs32, vs8, vs28 @@ -2102,9 +2522,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs42, vs10, vs29 xvmaddadp vs43, vs11, vs29 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x8_SUBI1', ` +#else .macro KERNEL2x8_SUBI1 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -2128,9 +2556,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs42, vs2, vs25 xvmuldp vs43, vs3, vs25 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x8_SUB1', ` +#else .macro KERNEL2x8_SUB1 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -2154,9 +2590,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs42, vs2, vs25 xvmaddadp vs43, vs3, vs25 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE2x8', ` +#else .macro SAVE2x8 +#endif mr T1, CO @@ -2212,13 +2656,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 64 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************* * Macros for N=2, M=4 * *********************************************************************/ +#if defined(_AIX) +define(`LOAD2x4_1', ` +#else .macro LOAD2x4_1 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -2229,9 +2681,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi AO, AO, 32 addi BO, BO, 16 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x4_I1', ` +#else .macro KERNEL2x4_I1 +#endif lxvd2x vs8, 0, AO lxvd2x vs9, o16, AO @@ -2249,9 +2709,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs40, vs0, vs25 xvmuldp vs41, vs1, vs25 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x4_1', ` +#else .macro KERNEL2x4_1 +#endif lxvd2x vs8, 0, AO lxvd2x vs9, o16, AO @@ -2269,9 +2737,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs40, vs0, vs25 xvmaddadp vs41, vs1, vs25 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x4_2', ` +#else .macro KERNEL2x4_2 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -2289,9 +2765,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs40, vs8, vs29 xvmaddadp vs41, vs9, vs29 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x4_E2', ` +#else .macro KERNEL2x4_E2 +#endif xvmaddadp vs32, vs8, vs28 @@ -2300,9 +2784,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs40, vs8, vs29 xvmaddadp vs41, vs9, vs29 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x4_SUBI1', ` +#else .macro KERNEL2x4_SUBI1 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -2320,9 +2812,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs40, vs0, vs25 xvmuldp vs41, vs1, vs25 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x4_SUB1', ` +#else .macro KERNEL2x4_SUB1 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -2340,9 +2840,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs40, vs0, vs25 xvmaddadp vs41, vs1, vs25 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE2x4', ` +#else .macro SAVE2x4 +#endif mr T1, CO @@ -2382,13 +2890,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 32 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************* * Macros for N=2, M=2 * *********************************************************************/ +#if defined(_AIX) +define(`LOAD2x2_1', ` +#else .macro LOAD2x2_1 +#endif lxvd2x vs0, 0, AO @@ -2398,9 +2914,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi AO, AO, 16 addi BO, BO, 16 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x2_I1', ` +#else .macro KERNEL2x2_I1 +#endif lxvd2x vs8, 0, AO @@ -2415,9 +2939,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs40, vs0, vs25 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x2_1', ` +#else .macro KERNEL2x2_1 +#endif lxvd2x vs8, 0, AO @@ -2432,9 +2964,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs40, vs0, vs25 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x2_2', ` +#else .macro KERNEL2x2_2 +#endif lxvd2x vs0, 0, AO @@ -2449,18 +2989,34 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs40, vs8, vs29 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x2_E2', ` +#else .macro KERNEL2x2_E2 +#endif xvmaddadp vs32, vs8, vs28 xvmaddadp vs40, vs8, vs29 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x2_SUBI1', ` +#else .macro KERNEL2x2_SUBI1 +#endif lxvd2x vs0, 0, AO @@ -2475,9 +3031,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs40, vs0, vs25 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x2_SUB1', ` +#else .macro KERNEL2x2_SUB1 +#endif lxvd2x vs0, 0, AO @@ -2492,9 +3056,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs40, vs0, vs25 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE2x2', ` +#else .macro SAVE2x2 +#endif mr T1, CO @@ -2526,13 +3098,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 16 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************* * Macros for N=2, M=1 * *********************************************************************/ +#if defined(_AIX) +define(`LOAD2x1_1', ` +#else .macro LOAD2x1_1 +#endif lxsdx vs0, 0, AO @@ -2542,9 +3122,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi AO, AO, 8 addi BO, BO, 16 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x1_I1', ` +#else .macro KERNEL2x1_I1 +#endif lxsdx vs8, 0, AO @@ -2559,9 +3147,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmuldp vs40, vs0, vs25 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x1_1', ` +#else .macro KERNEL2x1_1 +#endif lxsdx vs8, 0, AO @@ -2576,9 +3172,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs40, vs0, vs25 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x1_2', ` +#else .macro KERNEL2x1_2 +#endif lxsdx vs0, 0, AO @@ -2593,18 +3197,34 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs40, vs8, vs29 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x1_E2', ` +#else .macro KERNEL2x1_E2 +#endif xsmaddadp vs32, vs8, vs28 xsmaddadp vs40, vs8, vs29 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x1_SUBI1', ` +#else .macro KERNEL2x1_SUBI1 +#endif lxsdx vs0, 0, AO @@ -2619,9 +3239,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmuldp vs40, vs0, vs25 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x1_SUB1', ` +#else .macro KERNEL2x1_SUB1 +#endif lxsdx vs0, 0, AO @@ -2636,9 +3264,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs40, vs0, vs25 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE2x1', ` +#else .macro SAVE2x1 +#endif mr T1, CO @@ -2670,13 +3306,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 8 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************* * Macros for N=1, M=16 * *********************************************************************/ +#if defined(_AIX) +define(`LOAD1x16_1', ` +#else .macro LOAD1x16_1 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -2695,9 +3339,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi AO, AO, 64 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x16_I1', ` +#else .macro KERNEL1x16_I1 +#endif lxvd2x vs8, 0, AO lxvd2x vs9, o16, AO @@ -2726,9 +3378,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs38, vs6, vs24 xvmuldp vs39, vs7, vs24 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x16_1', ` +#else .macro KERNEL1x16_1 +#endif lxvd2x vs8, 0, AO lxvd2x vs9, o16, AO @@ -2757,9 +3417,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs38, vs6, vs24 xvmaddadp vs39, vs7, vs24 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x16_2', ` +#else .macro KERNEL1x16_2 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -2788,9 +3456,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs38, vs14, vs28 xvmaddadp vs39, vs15, vs28 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x16_E2', ` +#else .macro KERNEL1x16_E2 +#endif xvmaddadp vs32, vs8, vs28 @@ -2802,9 +3478,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs38, vs14, vs28 xvmaddadp vs39, vs15, vs28 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x16_SUBI1', ` +#else .macro KERNEL1x16_SUBI1 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -2833,9 +3517,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs38, vs6, vs24 xvmuldp vs39, vs7, vs24 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x16_SUB1', ` +#else .macro KERNEL1x16_SUB1 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -2864,9 +3556,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs38, vs6, vs24 xvmaddadp vs39, vs7, vs24 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE1x16', ` +#else .macro SAVE1x16 +#endif mr T1, CO addi T2, T1, 64 @@ -2915,13 +3615,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 128 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************* * Macros for N=4, M=8 * *********************************************************************/ +#if defined(_AIX) +define(`LOAD1x8_1', ` +#else .macro LOAD1x8_1 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -2933,9 +3641,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi AO, AO, 64 addi BO, BO, 8 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x8_I1', ` +#else .macro KERNEL1x8_I1 +#endif lxvd2x vs8, 0, AO lxvd2x vs9, o16, AO @@ -2953,9 +3669,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs34, vs2, vs24 xvmuldp vs35, vs3, vs24 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x8_1', ` +#else .macro KERNEL1x8_1 +#endif lxvd2x vs8, 0, AO lxvd2x vs9, o16, AO @@ -2973,9 +3697,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs34, vs2, vs24 xvmaddadp vs35, vs3, vs24 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x8_2', ` +#else .macro KERNEL1x8_2 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -2993,9 +3725,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs34, vs10, vs28 xvmaddadp vs35, vs11, vs28 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x8_E2', ` +#else .macro KERNEL1x8_E2 +#endif xvmaddadp vs32, vs8, vs28 @@ -3003,9 +3743,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs34, vs10, vs28 xvmaddadp vs35, vs11, vs28 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x8_SUBI1', ` +#else .macro KERNEL1x8_SUBI1 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -3023,9 +3771,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs34, vs2, vs24 xvmuldp vs35, vs3, vs24 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x8_SUB1', ` +#else .macro KERNEL1x8_SUB1 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -3043,9 +3799,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs34, vs2, vs24 xvmaddadp vs35, vs3, vs24 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE1x8', ` +#else .macro SAVE1x8 +#endif mr T1, CO @@ -3075,13 +3839,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 64 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************* * Macros for N=1, M=4 * *********************************************************************/ +#if defined(_AIX) +define(`LOAD1x4_1', ` +#else .macro LOAD1x4_1 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -3091,9 +3863,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi AO, AO, 32 addi BO, BO, 8 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x4_I1', ` +#else .macro KERNEL1x4_I1 +#endif lxvd2x vs8, 0, AO lxvd2x vs9, o16, AO @@ -3107,9 +3887,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs32, vs0, vs24 xvmuldp vs33, vs1, vs24 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x4_1', ` +#else .macro KERNEL1x4_1 +#endif lxvd2x vs8, 0, AO lxvd2x vs9, o16, AO @@ -3123,9 +3911,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs32, vs0, vs24 xvmaddadp vs33, vs1, vs24 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x4_2', ` +#else .macro KERNEL1x4_2 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -3139,17 +3935,33 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs32, vs8, vs28 xvmaddadp vs33, vs9, vs28 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x4_E2', ` +#else .macro KERNEL1x4_E2 +#endif xvmaddadp vs32, vs8, vs28 xvmaddadp vs33, vs9, vs28 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x4_SUBI1', ` +#else .macro KERNEL1x4_SUBI1 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -3163,9 +3975,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs32, vs0, vs24 xvmuldp vs33, vs1, vs24 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x4_SUB1', ` +#else .macro KERNEL1x4_SUB1 +#endif lxvd2x vs0, 0, AO lxvd2x vs1, o16, AO @@ -3179,9 +3999,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs32, vs0, vs24 xvmaddadp vs33, vs1, vs24 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE1x4', ` +#else .macro SAVE1x4 +#endif mr T1, CO @@ -3203,13 +4031,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 32 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************* * Macros for N=1, M=2 * *********************************************************************/ +#if defined(_AIX) +define(`LOAD1x2_1', ` +#else .macro LOAD1x2_1 +#endif lxvd2x vs0, 0, AO @@ -3218,9 +4054,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi AO, AO, 16 addi BO, BO, 8 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x2_I1', ` +#else .macro KERNEL1x2_I1 +#endif lxvd2x vs8, 0, AO @@ -3232,9 +4076,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs32, vs0, vs24 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x2_1', ` +#else .macro KERNEL1x2_1 +#endif lxvd2x vs8, 0, AO @@ -3246,9 +4098,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs32, vs0, vs24 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x2_2', ` +#else .macro KERNEL1x2_2 +#endif lxvd2x vs0, 0, AO @@ -3260,16 +4120,32 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs32, vs8, vs28 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x2_E2', ` +#else .macro KERNEL1x2_E2 +#endif xvmaddadp vs32, vs8, vs28 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x2_SUBI1', ` +#else .macro KERNEL1x2_SUBI1 +#endif lxvd2x vs0, 0, AO @@ -3281,9 +4157,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs32, vs0, vs24 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x2_SUB1', ` +#else .macro KERNEL1x2_SUB1 +#endif lxvd2x vs0, 0, AO @@ -3295,9 +4179,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs32, vs0, vs24 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE1x2', ` +#else .macro SAVE1x2 +#endif mr T1, CO @@ -3315,13 +4207,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 16 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************* * Macros for N=1, M=1 * *********************************************************************/ +#if defined(_AIX) +define(`LOAD1x1_1', ` +#else .macro LOAD1x1_1 +#endif lxsdx vs0, 0, AO @@ -3330,9 +4230,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi AO, AO, 8 addi BO, BO, 8 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x1_I1', ` +#else .macro KERNEL1x1_I1 +#endif lxsdx vs8, 0, AO @@ -3344,9 +4252,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmuldp vs32, vs0, vs24 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x1_1', ` +#else .macro KERNEL1x1_1 +#endif lxsdx vs8, 0, AO @@ -3358,9 +4274,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs32, vs0, vs24 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x1_2', ` +#else .macro KERNEL1x1_2 +#endif lxsdx vs0, 0, AO @@ -3372,16 +4296,32 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs32, vs8, vs28 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x1_E2', ` +#else .macro KERNEL1x1_E2 +#endif xsmaddadp vs32, vs8, vs28 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x1_SUBI1', ` +#else .macro KERNEL1x1_SUBI1 +#endif lxsdx vs0, 0, AO @@ -3393,9 +4333,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmuldp vs32, vs0, vs24 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x1_SUB1', ` +#else .macro KERNEL1x1_SUB1 +#endif lxsdx vs0, 0, AO @@ -3407,9 +4355,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs32, vs0, vs24 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE1x1', ` +#else .macro SAVE1x1 +#endif mr T1, CO @@ -3427,5 +4383,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 8 +#if defined(_AIX) +') +#else .endm +#endif diff --git a/kernel/power/dtrsm_kernel_LT_16x4_power8.S b/kernel/power/dtrsm_kernel_LT_16x4_power8.S index 7a4a30390..5b349db12 100644 --- a/kernel/power/dtrsm_kernel_LT_16x4_power8.S +++ b/kernel/power/dtrsm_kernel_LT_16x4_power8.S @@ -47,7 +47,6 @@ #endif #ifdef __64BIT__ -#define STACKSIZE 320 #define STACKSIZE 520 #define ALPHA 296+200(SP) #define FZERO 304+200(SP) @@ -61,7 +60,7 @@ #define N r4 #define K r5 -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define A r6 #define B r7 @@ -217,7 +216,7 @@ li r11,0 #endif -#if defined(linux) && defined(__64BIT__) +#if (defined(linux) || defined(__FreeBSD__)) && defined(__64BIT__) ld OFFSET, FRAMESLOT(0) + STACKSIZE(SP) #endif diff --git a/kernel/power/dtrsm_macros_LT_16x4_power8.S b/kernel/power/dtrsm_macros_LT_16x4_power8.S index dc47daa3a..5a5c4037c 100644 --- a/kernel/power/dtrsm_macros_LT_16x4_power8.S +++ b/kernel/power/dtrsm_macros_LT_16x4_power8.S @@ -1,46 +1,58 @@ +#if defined(_AIX) +define(`INIT_16x4', ` +#else .macro INIT_16x4 +#endif xxlxor vs0, vs0, vs0 - xvmovdp vs32, vs0 - xvmovdp vs33, vs0 - xvmovdp vs34, vs0 - xvmovdp vs35, vs0 - xvmovdp vs36, vs0 - xvmovdp vs37, vs0 - xvmovdp vs38, vs0 - xvmovdp vs39, vs0 - xvmovdp vs40, vs0 - xvmovdp vs41, vs0 - xvmovdp vs42, vs0 - xvmovdp vs43, vs0 - xvmovdp vs44, vs0 - xvmovdp vs45, vs0 - xvmovdp vs46, vs0 - xvmovdp vs47, vs0 - xvmovdp vs48, vs0 - xvmovdp vs49, vs0 - xvmovdp vs50, vs0 - xvmovdp vs51, vs0 - xvmovdp vs52, vs0 - xvmovdp vs53, vs0 - xvmovdp vs54, vs0 - xvmovdp vs55, vs0 - xvmovdp vs56, vs0 - xvmovdp vs57, vs0 - xvmovdp vs58, vs0 - xvmovdp vs59, vs0 - xvmovdp vs60, vs0 - xvmovdp vs61, vs0 - xvmovdp vs62, vs0 - xvmovdp vs63, vs0 - + XVMOVDP(vs32,vs0) + XVMOVDP(vs33,vs0) + XVMOVDP(vs34,vs0) + XVMOVDP(vs35,vs0) + XVMOVDP(vs36,vs0) + XVMOVDP(vs37,vs0) + XVMOVDP(vs38,vs0) + XVMOVDP(vs39,vs0) + XVMOVDP(vs40,vs0) + XVMOVDP(vs41,vs0) + XVMOVDP(vs42,vs0) + XVMOVDP(vs43,vs0) + XVMOVDP(vs44,vs0) + XVMOVDP(vs45,vs0) + XVMOVDP(vs46,vs0) + XVMOVDP(vs47,vs0) + XVMOVDP(vs48,vs0) + XVMOVDP(vs49,vs0) + XVMOVDP(vs50,vs0) + XVMOVDP(vs51,vs0) + XVMOVDP(vs52,vs0) + XVMOVDP(vs53,vs0) + XVMOVDP(vs54,vs0) + XVMOVDP(vs55,vs0) + XVMOVDP(vs56,vs0) + XVMOVDP(vs57,vs0) + XVMOVDP(vs58,vs0) + XVMOVDP(vs59,vs0) + XVMOVDP(vs60,vs0) + XVMOVDP(vs61,vs0) + XVMOVDP(vs62,vs0) + XVMOVDP(vs63,vs0) + +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL_16x4', ` +#else .macro KERNEL_16x4 +#endif lxvd2x vs0, o0, AO @@ -98,35 +110,51 @@ xvmaddadp vs63, vs7, vs19 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`INIT_8x4', ` +#else .macro INIT_8x4 +#endif xxlxor vs0, vs0, vs0 - xvmovdp vs32, vs0 - xvmovdp vs33, vs0 - xvmovdp vs34, vs0 - xvmovdp vs35, vs0 - xvmovdp vs36, vs0 - xvmovdp vs37, vs0 - xvmovdp vs38, vs0 - xvmovdp vs39, vs0 - xvmovdp vs40, vs0 - xvmovdp vs41, vs0 - xvmovdp vs42, vs0 - xvmovdp vs43, vs0 - xvmovdp vs44, vs0 - xvmovdp vs45, vs0 - xvmovdp vs46, vs0 - xvmovdp vs47, vs0 - + XVMOVDP(vs32,vs0) + XVMOVDP(vs33,vs0) + XVMOVDP(vs34,vs0) + XVMOVDP(vs35,vs0) + XVMOVDP(vs36,vs0) + XVMOVDP(vs37,vs0) + XVMOVDP(vs38,vs0) + XVMOVDP(vs39,vs0) + XVMOVDP(vs40,vs0) + XVMOVDP(vs41,vs0) + XVMOVDP(vs42,vs0) + XVMOVDP(vs43,vs0) + XVMOVDP(vs44,vs0) + XVMOVDP(vs45,vs0) + XVMOVDP(vs46,vs0) + XVMOVDP(vs47,vs0) + +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL_8x4', ` +#else .macro KERNEL_8x4 +#endif lxvd2x vs0, o0, AO @@ -161,27 +189,43 @@ xvmaddadp vs47, vs3, vs19 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`INIT_4x4', ` +#else .macro INIT_4x4 +#endif xxlxor vs0, vs0, vs0 - xvmovdp vs32, vs0 - xvmovdp vs33, vs0 - xvmovdp vs34, vs0 - xvmovdp vs35, vs0 - xvmovdp vs36, vs0 - xvmovdp vs37, vs0 - xvmovdp vs38, vs0 - xvmovdp vs39, vs0 - + XVMOVDP(vs32,vs0) + XVMOVDP(vs33,vs0) + XVMOVDP(vs34,vs0) + XVMOVDP(vs35,vs0) + XVMOVDP(vs36,vs0) + XVMOVDP(vs37,vs0) + XVMOVDP(vs38,vs0) + XVMOVDP(vs39,vs0) + +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL_4x4', ` +#else .macro KERNEL_4x4 +#endif lxvd2x vs0, o0, AO @@ -206,23 +250,39 @@ xvmaddadp vs39, vs1, vs19 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`INIT_2x4', ` +#else .macro INIT_2x4 +#endif xxlxor vs0, vs0, vs0 - xvmovdp vs32, vs0 - xvmovdp vs33, vs0 - xvmovdp vs34, vs0 - xvmovdp vs35, vs0 + XVMOVDP(vs32,vs0) + XVMOVDP(vs33,vs0) + XVMOVDP(vs34,vs0) + XVMOVDP(vs35,vs0) +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL_2x4', ` +#else .macro KERNEL_2x4 +#endif lxvd2x vs0, o0, AO @@ -242,23 +302,39 @@ xvmaddadp vs35, vs0, vs19 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`INIT_1x4', ` +#else .macro INIT_1x4 +#endif xxlxor vs0, vs0, vs0 - xvmovdp vs32, vs0 - xvmovdp vs33, vs0 - xvmovdp vs34, vs0 - xvmovdp vs35, vs0 + XVMOVDP(vs32,vs0) + XVMOVDP(vs33,vs0) + XVMOVDP(vs34,vs0) + XVMOVDP(vs35,vs0) +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL_1x4', ` +#else .macro KERNEL_1x4 +#endif lxvdsx vs0, o0, AO @@ -278,14 +354,22 @@ xvmaddadp vs35, vs0, vs19 +#if defined(_AIX) +') +#else .endm +#endif /*########################################################################################## SOLVE_LT 16x4 ##########################################################################################*/ +#if defined(_AIX) +define(`SOLVE_LT_16x4', ` +#else .macro SOLVE_LT_16x4 +#endif //############### LOAD B ####################### @@ -1149,46 +1233,46 @@ stxsdx vs32, o0, T1 - xxswapd vs32, vs32 + XXSWAPD(vs32,vs32) stxsdx vs34, o8, T1 - xxswapd vs34, vs34 + XXSWAPD(vs34,vs34) stxsdx vs36, o16, T1 - xxswapd vs36, vs36 + XXSWAPD(vs36,vs36) stxsdx vs38, o24, T1 - xxswapd vs38, vs38 + XXSWAPD(vs38,vs38) addi T1, T1, 32 stxsdx vs40, o0, T1 - xxswapd vs40, vs40 + XXSWAPD(vs40,vs40) stxsdx vs42, o8, T1 - xxswapd vs42, vs42 + XXSWAPD(vs42,vs42) stxsdx vs44, o16, T1 - xxswapd vs44, vs44 + XXSWAPD(vs44,vs44) stxsdx vs46, o24, T1 - xxswapd vs46, vs46 + XXSWAPD(vs46,vs46) addi T1, T1, 32 stxsdx vs48, o0, T1 - xxswapd vs48, vs48 + XXSWAPD(vs48,vs48) stxsdx vs50, o8, T1 - xxswapd vs50, vs50 + XXSWAPD(vs50,vs50) stxsdx vs52, o16, T1 - xxswapd vs52, vs52 + XXSWAPD(vs52,vs52) stxsdx vs54, o24, T1 - xxswapd vs54, vs54 + XXSWAPD(vs54,vs54) addi T1, T1, 32 stxsdx vs56, o0, T1 - xxswapd vs56, vs56 + XXSWAPD(vs56,vs56) stxsdx vs58, o8, T1 - xxswapd vs58, vs58 + XXSWAPD(vs58,vs58) stxsdx vs60, o16, T1 - xxswapd vs60, vs60 + XXSWAPD(vs60,vs60) stxsdx vs62, o24, T1 - xxswapd vs62, vs62 + XXSWAPD(vs62,vs62) stxsdx vs32, o0, T2 stxsdx vs34, o8, T2 @@ -1225,46 +1309,46 @@ stxsdx vs33, o0, T1 - xxswapd vs33, vs33 + XXSWAPD(vs33,vs33) stxsdx vs35, o8, T1 - xxswapd vs35, vs35 + XXSWAPD(vs35,vs35) stxsdx vs37, o16, T1 - xxswapd vs37, vs37 + XXSWAPD(vs37,vs37) stxsdx vs39, o24, T1 - xxswapd vs39, vs39 + XXSWAPD(vs39,vs39) addi T1, T1, 32 stxsdx vs41, o0, T1 - xxswapd vs41, vs41 + XXSWAPD(vs41,vs41) stxsdx vs43, o8, T1 - xxswapd vs43, vs43 + XXSWAPD(vs43,vs43) stxsdx vs45, o16, T1 - xxswapd vs45, vs45 + XXSWAPD(vs45,vs45) stxsdx vs47, o24, T1 - xxswapd vs47, vs47 + XXSWAPD(vs47,vs47) addi T1, T1, 32 stxsdx vs49, o0, T1 - xxswapd vs49, vs49 + XXSWAPD(vs49,vs49) stxsdx vs51, o8, T1 - xxswapd vs51, vs51 + XXSWAPD(vs51,vs51) stxsdx vs53, o16, T1 - xxswapd vs53, vs53 + XXSWAPD(vs53,vs53) stxsdx vs55, o24, T1 - xxswapd vs55, vs55 + XXSWAPD(vs55,vs55) addi T1, T1, 32 stxsdx vs57, o0, T1 - xxswapd vs57, vs57 + XXSWAPD(vs57,vs57) stxsdx vs59, o8, T1 - xxswapd vs59, vs59 + XXSWAPD(vs59,vs59) stxsdx vs61, o16, T1 - xxswapd vs61, vs61 + XXSWAPD(vs61,vs61) stxsdx vs63, o24, T1 - xxswapd vs63, vs63 + XXSWAPD(vs63,vs63) stxsdx vs33, o0, T2 stxsdx vs35, o8, T2 @@ -1292,14 +1376,22 @@ stxsdx vs61, o16, T2 stxsdx vs63, o24, T2 +#if defined(_AIX) +') +#else .endm +#endif /*########################################################################################## SOLVE_LT 8x4 ##########################################################################################*/ +#if defined(_AIX) +define(`SOLVE_LT_8x4', ` +#else .macro SOLVE_LT_8x4 +#endif xxpermdi vs0, vs32, vs33, 0 xxpermdi vs1, vs34, vs35, 0 @@ -1603,24 +1695,24 @@ stxsdx vs32, o0, T1 - xxswapd vs32, vs32 + XXSWAPD(vs32,vs32) stxsdx vs34, o8, T1 - xxswapd vs34, vs34 + XXSWAPD(vs34,vs34) stxsdx vs36, o16, T1 - xxswapd vs36, vs36 + XXSWAPD(vs36,vs36) stxsdx vs38, o24, T1 - xxswapd vs38, vs38 + XXSWAPD(vs38,vs38) addi T1, T1, 32 stxsdx vs40, o0, T1 - xxswapd vs40, vs40 + XXSWAPD(vs40,vs40) stxsdx vs42, o8, T1 - xxswapd vs42, vs42 + XXSWAPD(vs42,vs42) stxsdx vs44, o16, T1 - xxswapd vs44, vs44 + XXSWAPD(vs44,vs44) stxsdx vs46, o24, T1 - xxswapd vs46, vs46 + XXSWAPD(vs46,vs46) stxsdx vs32, o0, T2 stxsdx vs34, o8, T2 @@ -1643,24 +1735,24 @@ stxsdx vs33, o0, T1 - xxswapd vs33, vs33 + XXSWAPD(vs33,vs33) stxsdx vs35, o8, T1 - xxswapd vs35, vs35 + XXSWAPD(vs35,vs35) stxsdx vs37, o16, T1 - xxswapd vs37, vs37 + XXSWAPD(vs37,vs37) stxsdx vs39, o24, T1 - xxswapd vs39, vs39 + XXSWAPD(vs39,vs39) addi T1, T1, 32 stxsdx vs41, o0, T1 - xxswapd vs41, vs41 + XXSWAPD(vs41,vs41) stxsdx vs43, o8, T1 - xxswapd vs43, vs43 + XXSWAPD(vs43,vs43) stxsdx vs45, o16, T1 - xxswapd vs45, vs45 + XXSWAPD(vs45,vs45) stxsdx vs47, o24, T1 - xxswapd vs47, vs47 + XXSWAPD(vs47,vs47) stxsdx vs33, o0, T2 stxsdx vs35, o8, T2 @@ -1674,14 +1766,22 @@ stxsdx vs45, o16, T2 stxsdx vs47, o24, T2 +#if defined(_AIX) +') +#else .endm +#endif /*########################################################################################## SOLVE_LT 4x4 ##########################################################################################*/ +#if defined(_AIX) +define(`SOLVE_LT_4x4', ` +#else .macro SOLVE_LT_4x4 +#endif xxpermdi vs0, vs32, vs33, 0 xxpermdi vs1, vs34, vs35, 0 @@ -1813,13 +1913,13 @@ stxsdx vs32, o0, T1 - xxswapd vs32, vs32 + XXSWAPD(vs32,vs32) stxsdx vs34, o8, T1 - xxswapd vs34, vs34 + XXSWAPD(vs34,vs34) stxsdx vs36, o16, T1 - xxswapd vs36, vs36 + XXSWAPD(vs36,vs36) stxsdx vs38, o24, T1 - xxswapd vs38, vs38 + XXSWAPD(vs38,vs38) stxsdx vs32, o0, T2 stxsdx vs34, o8, T2 @@ -1835,27 +1935,35 @@ stxsdx vs33, o0, T1 - xxswapd vs33, vs33 + XXSWAPD(vs33,vs33) stxsdx vs35, o8, T1 - xxswapd vs35, vs35 + XXSWAPD(vs35,vs35) stxsdx vs37, o16, T1 - xxswapd vs37, vs37 + XXSWAPD(vs37,vs37) stxsdx vs39, o24, T1 - xxswapd vs39, vs39 + XXSWAPD(vs39,vs39) stxsdx vs33, o0, T2 stxsdx vs35, o8, T2 stxsdx vs37, o16, T2 stxsdx vs39, o24, T2 +#if defined(_AIX) +') +#else .endm +#endif /*########################################################################################## SOLVE_LT 2x4 ##########################################################################################*/ +#if defined(_AIX) +define(`SOLVE_LT_2x4', ` +#else .macro SOLVE_LT_2x4 +#endif xxpermdi vs0, vs32, vs33, 0 xxpermdi vs1, vs34, vs35, 0 @@ -1925,9 +2033,9 @@ stxsdx vs32, o0, T1 - xxswapd vs32, vs32 + XXSWAPD(vs32,vs32) stxsdx vs34, o8, T1 - xxswapd vs34, vs34 + XXSWAPD(vs34,vs34) stxsdx vs32, o0, T2 stxsdx vs34, o8, T2 @@ -1941,21 +2049,29 @@ stxsdx vs33, o0, T1 - xxswapd vs33, vs33 + XXSWAPD(vs33,vs33) stxsdx vs35, o8, T1 - xxswapd vs35, vs35 + XXSWAPD(vs35,vs35) stxsdx vs33, o0, T2 stxsdx vs35, o8, T2 +#if defined(_AIX) +') +#else .endm +#endif /*########################################################################################## SOLVE_LT 1x4 ##########################################################################################*/ +#if defined(_AIX) +define(`SOLVE_LT_1x4', ` +#else .macro SOLVE_LT_1x4 +#endif xxpermdi vs0, vs32, vs33, 0 xxpermdi vs1, vs34, vs35, 0 @@ -2001,7 +2117,7 @@ stxsdx vs32, o0, T1 - xxswapd vs32, vs32 + XXSWAPD(vs32,vs32) stxsdx vs32, o0, T2 @@ -2014,39 +2130,55 @@ stxsdx vs33, o0, T1 - xxswapd vs33, vs33 + XXSWAPD(vs33,vs33) stxsdx vs33, o0, T2 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`INIT_16x2', ` +#else .macro INIT_16x2 +#endif xxlxor vs0, vs0, vs0 - xvmovdp vs32, vs0 - xvmovdp vs33, vs0 - xvmovdp vs34, vs0 - xvmovdp vs35, vs0 - xvmovdp vs36, vs0 - xvmovdp vs37, vs0 - xvmovdp vs38, vs0 - xvmovdp vs39, vs0 - xvmovdp vs40, vs0 - xvmovdp vs41, vs0 - xvmovdp vs42, vs0 - xvmovdp vs43, vs0 - xvmovdp vs44, vs0 - xvmovdp vs45, vs0 - xvmovdp vs46, vs0 - xvmovdp vs47, vs0 - + XVMOVDP(vs32,vs0) + XVMOVDP(vs33,vs0) + XVMOVDP(vs34,vs0) + XVMOVDP(vs35,vs0) + XVMOVDP(vs36,vs0) + XVMOVDP(vs37,vs0) + XVMOVDP(vs38,vs0) + XVMOVDP(vs39,vs0) + XVMOVDP(vs40,vs0) + XVMOVDP(vs41,vs0) + XVMOVDP(vs42,vs0) + XVMOVDP(vs43,vs0) + XVMOVDP(vs44,vs0) + XVMOVDP(vs45,vs0) + XVMOVDP(vs46,vs0) + XVMOVDP(vs47,vs0) + +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL_16x2', ` +#else .macro KERNEL_16x2 +#endif lxvd2x vs0, o0, AO @@ -2086,27 +2218,43 @@ xvmaddadp vs47, vs7, vs17 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`INIT_8x2', ` +#else .macro INIT_8x2 +#endif xxlxor vs0, vs0, vs0 - xvmovdp vs32, vs0 - xvmovdp vs33, vs0 - xvmovdp vs34, vs0 - xvmovdp vs35, vs0 - xvmovdp vs36, vs0 - xvmovdp vs37, vs0 - xvmovdp vs38, vs0 - xvmovdp vs39, vs0 - + XVMOVDP(vs32,vs0) + XVMOVDP(vs33,vs0) + XVMOVDP(vs34,vs0) + XVMOVDP(vs35,vs0) + XVMOVDP(vs36,vs0) + XVMOVDP(vs37,vs0) + XVMOVDP(vs38,vs0) + XVMOVDP(vs39,vs0) + +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL_8x2', ` +#else .macro KERNEL_8x2 +#endif lxvd2x vs0, o0, AO @@ -2131,23 +2279,39 @@ xvmaddadp vs39, vs3, vs17 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`INIT_4x2', ` +#else .macro INIT_4x2 +#endif xxlxor vs0, vs0, vs0 - xvmovdp vs32, vs0 - xvmovdp vs33, vs0 - xvmovdp vs34, vs0 - xvmovdp vs35, vs0 + XVMOVDP(vs32,vs0) + XVMOVDP(vs33,vs0) + XVMOVDP(vs34,vs0) + XVMOVDP(vs35,vs0) +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL_4x2', ` +#else .macro KERNEL_4x2 +#endif lxvd2x vs0, o0, AO @@ -2166,21 +2330,37 @@ xvmaddadp vs35, vs1, vs17 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`INIT_2x2', ` +#else .macro INIT_2x2 +#endif xxlxor vs0, vs0, vs0 - xvmovdp vs32, vs0 - xvmovdp vs33, vs0 + XVMOVDP(vs32,vs0) + XVMOVDP(vs33,vs0) +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL_2x2', ` +#else .macro KERNEL_2x2 +#endif lxvd2x vs0, o0, AO @@ -2196,21 +2376,37 @@ xvmaddadp vs33, vs0, vs17 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`INIT_1x2', ` +#else .macro INIT_1x2 +#endif xxlxor vs0, vs0, vs0 - xvmovdp vs32, vs0 - xvmovdp vs33, vs0 + XVMOVDP(vs32,vs0) + XVMOVDP(vs33,vs0) +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL_1x2', ` +#else .macro KERNEL_1x2 +#endif lxvdsx vs0, o0, AO @@ -2226,14 +2422,22 @@ xvmaddadp vs33, vs0, vs17 +#if defined(_AIX) +') +#else .endm +#endif /*########################################################################################## SOLVE_LT 16x2 ##########################################################################################*/ +#if defined(_AIX) +define(`SOLVE_LT_16x2', ` +#else .macro SOLVE_LT_16x2 +#endif xxpermdi vs0, vs32, vs33, 0 xxpermdi vs1, vs32, vs33, 3 @@ -2821,46 +3025,46 @@ stxsdx vs32, o0, T1 - xxswapd vs32, vs32 + XXSWAPD(vs32,vs32) stxsdx vs33, o8, T1 - xxswapd vs33, vs33 + XXSWAPD(vs33,vs33) stxsdx vs34, o16, T1 - xxswapd vs34, vs34 + XXSWAPD(vs34,vs34) stxsdx vs35, o24, T1 - xxswapd vs35, vs35 + XXSWAPD(vs35,vs35) addi T1, T1, 32 stxsdx vs36, o0, T1 - xxswapd vs36, vs36 + XXSWAPD(vs36,vs36) stxsdx vs37, o8, T1 - xxswapd vs37, vs37 + XXSWAPD(vs37,vs37) stxsdx vs38, o16, T1 - xxswapd vs38, vs38 + XXSWAPD(vs38,vs38) stxsdx vs39, o24, T1 - xxswapd vs39, vs39 + XXSWAPD(vs39,vs39) addi T1, T1, 32 stxsdx vs40, o0, T1 - xxswapd vs40, vs40 + XXSWAPD(vs40,vs40) stxsdx vs41, o8, T1 - xxswapd vs41, vs41 + XXSWAPD(vs41,vs41) stxsdx vs42, o16, T1 - xxswapd vs42, vs42 + XXSWAPD(vs42,vs42) stxsdx vs43, o24, T1 - xxswapd vs43, vs43 + XXSWAPD(vs43,vs43) addi T1, T1, 32 stxsdx vs44, o0, T1 - xxswapd vs44, vs44 + XXSWAPD(vs44,vs44) stxsdx vs45, o8, T1 - xxswapd vs45, vs45 + XXSWAPD(vs45,vs45) stxsdx vs46, o16, T1 - xxswapd vs46, vs46 + XXSWAPD(vs46,vs46) stxsdx vs47, o24, T1 - xxswapd vs47, vs47 + XXSWAPD(vs47,vs47) stxsdx vs32, o0, T2 stxsdx vs33, o8, T2 @@ -2888,14 +3092,22 @@ stxsdx vs46, o16, T2 stxsdx vs47, o24, T2 +#if defined(_AIX) +') +#else .endm +#endif /*########################################################################################## SOLVE_LT 8x2 ##########################################################################################*/ +#if defined(_AIX) +define(`SOLVE_LT_8x2', ` +#else .macro SOLVE_LT_8x2 +#endif xxpermdi vs0, vs32, vs33, 0 xxpermdi vs1, vs32, vs33, 3 @@ -3111,24 +3323,24 @@ stxsdx vs32, o0, T1 - xxswapd vs32, vs32 + XXSWAPD(vs32,vs32) stxsdx vs33, o8, T1 - xxswapd vs33, vs33 + XXSWAPD(vs33,vs33) stxsdx vs34, o16, T1 - xxswapd vs34, vs34 + XXSWAPD(vs34,vs34) stxsdx vs35, o24, T1 - xxswapd vs35, vs35 + XXSWAPD(vs35,vs35) addi T1, T1, 32 stxsdx vs36, o0, T1 - xxswapd vs36, vs36 + XXSWAPD(vs36,vs36) stxsdx vs37, o8, T1 - xxswapd vs37, vs37 + XXSWAPD(vs37,vs37) stxsdx vs38, o16, T1 - xxswapd vs38, vs38 + XXSWAPD(vs38,vs38) stxsdx vs39, o24, T1 - xxswapd vs39, vs39 + XXSWAPD(vs39,vs39) stxsdx vs32, o0, T2 stxsdx vs33, o8, T2 @@ -3142,14 +3354,22 @@ stxsdx vs38, o16, T2 stxsdx vs39, o24, T2 +#if defined(_AIX) +') +#else .endm +#endif /*########################################################################################## SOLVE_LT 4x2 ##########################################################################################*/ +#if defined(_AIX) +define(`SOLVE_LT_4x2', ` +#else .macro SOLVE_LT_4x2 +#endif xxpermdi vs0, vs32, vs33, 0 xxpermdi vs1, vs32, vs33, 3 @@ -3245,27 +3465,35 @@ stxsdx vs32, o0, T1 - xxswapd vs32, vs32 + XXSWAPD(vs32,vs32) stxsdx vs33, o8, T1 - xxswapd vs33, vs33 + XXSWAPD(vs33,vs33) stxsdx vs34, o16, T1 - xxswapd vs34, vs34 + XXSWAPD(vs34,vs34) stxsdx vs35, o24, T1 - xxswapd vs35, vs35 + XXSWAPD(vs35,vs35) stxsdx vs32, o0, T2 stxsdx vs33, o8, T2 stxsdx vs34, o16, T2 stxsdx vs35, o24, T2 +#if defined(_AIX) +') +#else .endm +#endif /*########################################################################################## SOLVE_LT 2x2 ##########################################################################################*/ +#if defined(_AIX) +define(`SOLVE_LT_2x2', ` +#else .macro SOLVE_LT_2x2 +#endif xxpermdi vs0, vs32, vs33, 0 xxpermdi vs1, vs32, vs33, 3 @@ -3322,21 +3550,29 @@ stxsdx vs32, o0, T1 - xxswapd vs32, vs32 + XXSWAPD(vs32,vs32) stxsdx vs33, o8, T1 - xxswapd vs33, vs33 + XXSWAPD(vs33,vs33) stxsdx vs32, o0, T2 stxsdx vs33, o8, T2 +#if defined(_AIX) +') +#else .endm +#endif /*########################################################################################## SOLVE_LT 1x2 ##########################################################################################*/ +#if defined(_AIX) +define(`SOLVE_LT_1x2', ` +#else .macro SOLVE_LT_1x2 +#endif xxpermdi vs0, vs32, vs33, 0 @@ -3376,39 +3612,55 @@ stxsdx vs32, o0, T1 - xxswapd vs32, vs32 + XXSWAPD(vs32,vs32) stxsdx vs32, o0, T2 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`INIT_16x1', ` +#else .macro INIT_16x1 +#endif xxlxor vs0, vs0, vs0 - xvmovdp vs32, vs0 - xvmovdp vs33, vs0 - xvmovdp vs34, vs0 - xvmovdp vs35, vs0 - xvmovdp vs36, vs0 - xvmovdp vs37, vs0 - xvmovdp vs38, vs0 - xvmovdp vs39, vs0 - xvmovdp vs40, vs0 - xvmovdp vs41, vs0 - xvmovdp vs42, vs0 - xvmovdp vs43, vs0 - xvmovdp vs44, vs0 - xvmovdp vs45, vs0 - xvmovdp vs46, vs0 - xvmovdp vs47, vs0 - + XVMOVDP(vs32,vs0) + XVMOVDP(vs33,vs0) + XVMOVDP(vs34,vs0) + XVMOVDP(vs35,vs0) + XVMOVDP(vs36,vs0) + XVMOVDP(vs37,vs0) + XVMOVDP(vs38,vs0) + XVMOVDP(vs39,vs0) + XVMOVDP(vs40,vs0) + XVMOVDP(vs41,vs0) + XVMOVDP(vs42,vs0) + XVMOVDP(vs43,vs0) + XVMOVDP(vs44,vs0) + XVMOVDP(vs45,vs0) + XVMOVDP(vs46,vs0) + XVMOVDP(vs47,vs0) + +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL_16x1', ` +#else .macro KERNEL_16x1 +#endif lxvdsx vs0, o0, AO @@ -3461,27 +3713,43 @@ xvmaddadp vs47, vs15, vs16 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`INIT_8x1', ` +#else .macro INIT_8x1 +#endif xxlxor vs0, vs0, vs0 - xvmovdp vs32, vs0 - xvmovdp vs33, vs0 - xvmovdp vs34, vs0 - xvmovdp vs35, vs0 - xvmovdp vs36, vs0 - xvmovdp vs37, vs0 - xvmovdp vs38, vs0 - xvmovdp vs39, vs0 - + XVMOVDP(vs32,vs0) + XVMOVDP(vs33,vs0) + XVMOVDP(vs34,vs0) + XVMOVDP(vs35,vs0) + XVMOVDP(vs36,vs0) + XVMOVDP(vs37,vs0) + XVMOVDP(vs38,vs0) + XVMOVDP(vs39,vs0) + +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL_8x1', ` +#else .macro KERNEL_8x1 +#endif lxvdsx vs0, o0, AO @@ -3512,23 +3780,39 @@ xvmaddadp vs39, vs7, vs16 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`INIT_4x1', ` +#else .macro INIT_4x1 +#endif xxlxor vs0, vs0, vs0 - xvmovdp vs32, vs0 - xvmovdp vs33, vs0 - xvmovdp vs34, vs0 - xvmovdp vs35, vs0 + XVMOVDP(vs32,vs0) + XVMOVDP(vs33,vs0) + XVMOVDP(vs34,vs0) + XVMOVDP(vs35,vs0) +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL_4x1', ` +#else .macro KERNEL_4x1 +#endif lxvdsx vs0, o0, AO @@ -3548,21 +3832,37 @@ xvmaddadp vs35, vs3, vs16 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`INIT_2x1', ` +#else .macro INIT_2x1 +#endif xxlxor vs0, vs0, vs0 - xvmovdp vs32, vs0 - xvmovdp vs33, vs0 + XVMOVDP(vs32,vs0) + XVMOVDP(vs33,vs0) +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL_2x1', ` +#else .macro KERNEL_2x1 +#endif lxvdsx vs0, o0, AO @@ -3578,20 +3878,36 @@ xvmaddadp vs33, vs1, vs16 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`INIT_1x1', ` +#else .macro INIT_1x1 +#endif xxlxor vs0, vs0, vs0 - xvmovdp vs32, vs0 + XVMOVDP(vs32,vs0) +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL_1x1', ` +#else .macro KERNEL_1x1 +#endif lxvdsx vs0, o0, AO @@ -3605,31 +3921,39 @@ xvmaddadp vs32, vs0, vs16 +#if defined(_AIX) +') +#else .endm +#endif /*########################################################################################## SOLVE_LT 16x1 ##########################################################################################*/ +#if defined(_AIX) +define(`SOLVE_LT_16x1', ` +#else .macro SOLVE_LT_16x1 - - xxswapd vs0, vs32 - xxswapd vs1, vs33 - xxswapd vs2, vs34 - xxswapd vs3, vs35 - xxswapd vs4, vs36 - xxswapd vs5, vs37 - xxswapd vs6, vs38 - xxswapd vs7, vs39 - xxswapd vs8, vs40 - xxswapd vs9, vs41 - xxswapd vs10, vs42 - xxswapd vs11, vs43 - xxswapd vs12, vs44 - xxswapd vs13, vs45 - xxswapd vs14, vs46 - xxswapd vs15, vs47 +#endif + + XXSWAPD(vs0,vs32) + XXSWAPD(vs1,vs33) + XXSWAPD(vs2,vs34) + XXSWAPD(vs3,vs35) + XXSWAPD(vs4,vs36) + XXSWAPD(vs5,vs37) + XXSWAPD(vs6,vs38) + XXSWAPD(vs7,vs39) + XXSWAPD(vs8,vs40) + XXSWAPD(vs9,vs41) + XXSWAPD(vs10,vs42) + XXSWAPD(vs11,vs43) + XXSWAPD(vs12,vs44) + XXSWAPD(vs13,vs45) + XXSWAPD(vs14,vs46) + XXSWAPD(vs15,vs47) //############### LOAD B ####################### @@ -4215,23 +4539,31 @@ stxsdx vs46, o16, T1 stxsdx vs47, o24, T1 +#if defined(_AIX) +') +#else .endm +#endif /*########################################################################################## SOLVE_LT 8x1 ##########################################################################################*/ +#if defined(_AIX) +define(`SOLVE_LT_8x1', ` +#else .macro SOLVE_LT_8x1 +#endif - xxswapd vs0, vs32 - xxswapd vs1, vs33 - xxswapd vs2, vs34 - xxswapd vs3, vs35 - xxswapd vs4, vs36 - xxswapd vs5, vs37 - xxswapd vs6, vs38 - xxswapd vs7, vs39 + XXSWAPD(vs0,vs32) + XXSWAPD(vs1,vs33) + XXSWAPD(vs2,vs34) + XXSWAPD(vs3,vs35) + XXSWAPD(vs4,vs36) + XXSWAPD(vs5,vs37) + XXSWAPD(vs6,vs38) + XXSWAPD(vs7,vs39) //############### LOAD B ####################### @@ -4443,19 +4775,27 @@ stxsdx vs38, o16, T1 stxsdx vs39, o24, T1 +#if defined(_AIX) +') +#else .endm +#endif /*########################################################################################## SOLVE_LT 4x1 ##########################################################################################*/ +#if defined(_AIX) +define(`SOLVE_LT_4x1', ` +#else .macro SOLVE_LT_4x1 +#endif - xxswapd vs0, vs32 - xxswapd vs1, vs33 - xxswapd vs2, vs34 - xxswapd vs3, vs35 + XXSWAPD(vs0,vs32) + XXSWAPD(vs1,vs33) + XXSWAPD(vs2,vs34) + XXSWAPD(vs3,vs35) //############### LOAD B ####################### @@ -4546,17 +4886,25 @@ stxsdx vs34, o16, T1 stxsdx vs35, o24, T1 +#if defined(_AIX) +') +#else .endm +#endif /*########################################################################################## SOLVE_LT 2x1 ##########################################################################################*/ +#if defined(_AIX) +define(`SOLVE_LT_2x1', ` +#else .macro SOLVE_LT_2x1 +#endif - xxswapd vs0, vs32 - xxswapd vs1, vs33 + XXSWAPD(vs0,vs32) + XXSWAPD(vs1,vs33) //############### LOAD B ####################### @@ -4609,16 +4957,24 @@ stxsdx vs32, o0, T1 stxsdx vs33, o8, T1 +#if defined(_AIX) +') +#else .endm +#endif /*########################################################################################## SOLVE_LT 1x1 ##########################################################################################*/ +#if defined(_AIX) +define(`SOLVE_LT_1x1', ` +#else .macro SOLVE_LT_1x1 +#endif - xxswapd vs0, vs32 + XXSWAPD(vs0,vs32) //############### LOAD B ####################### @@ -4655,5 +5011,9 @@ stxsdx vs32, o0, T1 +#if defined(_AIX) +') +#else .endm +#endif diff --git a/kernel/power/gemm_beta.S b/kernel/power/gemm_beta.S index 7acc05b4d..81457b698 100644 --- a/kernel/power/gemm_beta.S +++ b/kernel/power/gemm_beta.S @@ -62,7 +62,7 @@ stfd f31, 16(SP) stw r0, 24(SP) -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ lwz LDC, FRAMESLOT(0) + STACKSIZE(SP) #else diff --git a/kernel/power/gemm_kernel.S b/kernel/power/gemm_kernel.S index e5e9ec346..37ff9c9e7 100644 --- a/kernel/power/gemm_kernel.S +++ b/kernel/power/gemm_kernel.S @@ -59,7 +59,7 @@ #define N r4 #define K r5 -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define A r6 #define B r7 @@ -186,7 +186,7 @@ slwi LDC, LDC, BASE_SHIFT #if defined(TRMMKERNEL) -#if defined(linux) && defined(__64BIT__) +#if (defined(linux) || defined(__FreeBSD__)) && defined(__64BIT__) ld OFFSET, FRAMESLOT(0) + STACKSIZE(SP) #endif @@ -228,7 +228,7 @@ #else -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ mr PREA, r10 lwz PREB, FRAMESLOT(0) + STACKSIZE(SP) diff --git a/kernel/power/gemm_kernel_altivec.S b/kernel/power/gemm_kernel_altivec.S index 6c7e78319..2dae49cb8 100644 --- a/kernel/power/gemm_kernel_altivec.S +++ b/kernel/power/gemm_kernel_altivec.S @@ -58,7 +58,7 @@ #define N r4 #define K r5 -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define A r6 #define B r7 diff --git a/kernel/power/gemm_kernel_altivec_cell.S b/kernel/power/gemm_kernel_altivec_cell.S index b7445a1f6..0823420dd 100644 --- a/kernel/power/gemm_kernel_altivec_cell.S +++ b/kernel/power/gemm_kernel_altivec_cell.S @@ -58,7 +58,7 @@ #define N r4 #define K r5 -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define A r6 #define B r7 diff --git a/kernel/power/gemm_kernel_altivec_g4.S b/kernel/power/gemm_kernel_altivec_g4.S index 548150143..3a214b248 100644 --- a/kernel/power/gemm_kernel_altivec_g4.S +++ b/kernel/power/gemm_kernel_altivec_g4.S @@ -58,7 +58,7 @@ #define N r4 #define K r5 -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define A r6 #define B r7 diff --git a/kernel/power/gemm_kernel_cell.S b/kernel/power/gemm_kernel_cell.S index f3d3b8325..26f9cb023 100644 --- a/kernel/power/gemm_kernel_cell.S +++ b/kernel/power/gemm_kernel_cell.S @@ -59,7 +59,7 @@ #define N r4 #define K r5 -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define A r6 #define B r7 @@ -192,7 +192,7 @@ slwi LDC, LDC, BASE_SHIFT #if defined(TRMMKERNEL) -#if defined(linux) && defined(__64BIT__) +#if (defined(linux) || defined(__FreeBSD__)) && defined(__64BIT__) ld OFFSET, FRAMESLOT(0) + STACKSIZE(SP) #endif @@ -226,7 +226,7 @@ li PREC, 4 * SIZE #endif -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ mr PREA, r10 lwz PREB, FRAMESLOT(0) + STACKSIZE(SP) diff --git a/kernel/power/gemm_kernel_g4.S b/kernel/power/gemm_kernel_g4.S index 259f04c4e..a5c4d3a43 100644 --- a/kernel/power/gemm_kernel_g4.S +++ b/kernel/power/gemm_kernel_g4.S @@ -59,7 +59,7 @@ #define N r4 #define K r5 -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define A r6 #define B r7 @@ -184,7 +184,7 @@ slwi LDC, LDC, BASE_SHIFT #if defined(TRMMKERNEL) -#if defined(linux) && defined(__64BIT__) +#if (defined(linux) || defined(__FreeBSD__)) && defined(__64BIT__) ld OFFSET, FRAMESLOT(0) + STACKSIZE(SP) #endif diff --git a/kernel/power/gemm_kernel_hummer.S b/kernel/power/gemm_kernel_hummer.S index 3a8e1edfa..6ecbeb3e0 100644 --- a/kernel/power/gemm_kernel_hummer.S +++ b/kernel/power/gemm_kernel_hummer.S @@ -46,7 +46,7 @@ #define N r4 #define K r5 -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #define A r6 #define B r7 #define C r8 diff --git a/kernel/power/gemm_kernel_power3.S b/kernel/power/gemm_kernel_power3.S index 4a6b5da62..f88bc291c 100644 --- a/kernel/power/gemm_kernel_power3.S +++ b/kernel/power/gemm_kernel_power3.S @@ -59,7 +59,7 @@ #define N r4 #define K r5 -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define A r6 #define B r7 @@ -187,7 +187,7 @@ li PREC, 4 * SIZE #else -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ mr PREA, r10 lwz PREB, FRAMESLOT(0) + STACKSIZE(SP) diff --git a/kernel/power/gemm_kernel_power6.S b/kernel/power/gemm_kernel_power6.S index 1a412c4fb..b274f7655 100644 --- a/kernel/power/gemm_kernel_power6.S +++ b/kernel/power/gemm_kernel_power6.S @@ -59,7 +59,7 @@ #define N r4 #define K r5 -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define A r6 #define B r7 @@ -183,7 +183,7 @@ slwi LDC, LDC, BASE_SHIFT #if defined(TRMMKERNEL) -#if defined(linux) && defined(__64BIT__) +#if (defined(linux) || defined(__FreeBSD__)) && defined(__64BIT__) ld OFFSET, FRAMESLOT(0) + STACKSIZE(SP) #endif diff --git a/kernel/power/gemm_kernel_ppc440.S b/kernel/power/gemm_kernel_ppc440.S index b128beb38..c5ef6e4e5 100644 --- a/kernel/power/gemm_kernel_ppc440.S +++ b/kernel/power/gemm_kernel_ppc440.S @@ -59,7 +59,7 @@ #define N r4 #define K r5 -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define A r6 #define B r7 @@ -183,7 +183,7 @@ slwi LDC, LDC, BASE_SHIFT #if defined(TRMMKERNEL) -#if defined(linux) && defined(__64BIT__) +#if (defined(linux) || defined(__FreeBSD__)) && defined(__64BIT__) ld OFFSET, FRAMESLOT(0) + STACKSIZE(SP) #endif diff --git a/kernel/power/gemv_n.S b/kernel/power/gemv_n.S index 02160bd61..abc61b62e 100644 --- a/kernel/power/gemv_n.S +++ b/kernel/power/gemv_n.S @@ -39,7 +39,7 @@ #define ASSEMBLER #include "common.h" -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define M r3 #define N r4 @@ -252,7 +252,7 @@ stw r27, 196(SP) #endif -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ lwz INCY, FRAMESLOT(0) + STACKSIZE(SP) lwz BUFFER, FRAMESLOT(1) + STACKSIZE(SP) diff --git a/kernel/power/gemv_n_ppc440.S b/kernel/power/gemv_n_ppc440.S index beb21200a..18d804520 100644 --- a/kernel/power/gemv_n_ppc440.S +++ b/kernel/power/gemv_n_ppc440.S @@ -39,7 +39,7 @@ #define ASSEMBLER #include "common.h" -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define M r3 #define N r4 @@ -199,7 +199,7 @@ stw r23, 180(SP) #endif -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ lwz INCY, FRAMESLOT(0) + STACKSIZE(SP) lwz BUFFER, FRAMESLOT(1) + STACKSIZE(SP) diff --git a/kernel/power/gemv_t.S b/kernel/power/gemv_t.S index 457753065..25a4dd01b 100644 --- a/kernel/power/gemv_t.S +++ b/kernel/power/gemv_t.S @@ -39,7 +39,7 @@ #define ASSEMBLER #include "common.h" -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define M r3 #define N r4 @@ -260,7 +260,7 @@ stw r29, 220(SP) #endif -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ lwz INCY, FRAMESLOT(0) + STACKSIZE(SP) lwz BUFFER, FRAMESLOT(1) + STACKSIZE(SP) diff --git a/kernel/power/gemv_t_ppc440.S b/kernel/power/gemv_t_ppc440.S index 6e560db6c..7d12b07a4 100644 --- a/kernel/power/gemv_t_ppc440.S +++ b/kernel/power/gemv_t_ppc440.S @@ -39,7 +39,7 @@ #define ASSEMBLER #include "common.h" -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define M r3 #define N r4 @@ -190,7 +190,7 @@ stw r22, 192(SP) #endif -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ lwz INCY, FRAMESLOT(0) + STACKSIZE(SP) lwz BUFFER, FRAMESLOT(1) + STACKSIZE(SP) diff --git a/kernel/power/ger.S b/kernel/power/ger.S index fd397ce8c..d83546b0d 100644 --- a/kernel/power/ger.S +++ b/kernel/power/ger.S @@ -47,7 +47,7 @@ #endif #endif -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define M r3 #define N r4 @@ -224,7 +224,7 @@ stw r27, 196(SP) #endif -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ lwz LDA, FRAMESLOT(0) + STACKSIZE(SP) lwz BUFFER, FRAMESLOT(1) + STACKSIZE(SP) diff --git a/kernel/power/icamax_power8.S b/kernel/power/icamax_power8.S new file mode 100644 index 000000000..4872aff40 --- /dev/null +++ b/kernel/power/icamax_power8.S @@ -0,0 +1,458 @@ +/* .file "icamax.c" + .abiversion 2 + .section ".text" + .align 2 + .p2align 4,,15 + .globl icamax_k + .type icamax_k, @function +*/ +#define ASSEMBLER +#include "common.h" + + PROLOGUE + +icamax_k: +.LCF0: +0: addis 2,12,.TOC.-.LCF0@ha + addi 2,2,.TOC.-.LCF0@l + .localentry icamax_k,.-icamax_k + mr. 9,3 + ble 0,.L25 + cmpdi 7,5,0 + li 3,0 + blelr 7 + cmpdi 7,5,1 + beq 7,.L54 + lfs 11,0(4) + lfs 0,4(4) + cmpdi 7,9,1 + fabs 11,11 + fabs 0,0 + fadds 11,11,0 + beq 7,.L29 + addi 9,9,-1 + sldi 5,5,3 + mtctr 9 + add 4,4,5 + li 3,0 + li 9,1 + .p2align 4,,15 +.L24: + lfs 0,4(4) + lfs 12,0(4) + add 4,4,5 + fabs 0,0 + fabs 12,12 + fadds 0,0,12 + fcmpu 7,0,11 + bng 7,.L23 + fmr 11,0 + mr 3,9 +.L23: + addi 9,9,1 + bdnz .L24 +.L52: + addi 3,3,1 + blr + .p2align 4,,15 +.L25: + li 3,0 + blr + .p2align 4,,15 +.L54: + rldicr. 8,9,0,58 + bne 0,.L55 + addi 7,8,1 + li 10,0 + xxlxor 11,11,11 + cmpd 7,7,9 + sldi 10,10,2 + add 4,4,10 + subf 10,8,9 + mtctr 10 + li 3,0 + bgt 7,.L43 + li 10,-1 + rldicr 10,10,0,0 + cmpd 7,9,10 + beq 7,.L43 + .p2align 4,,15 +.L44: + lfs 0,4(4) + lfs 12,0(4) + addi 4,4,8 + fabs 0,0 + fabs 12,12 + fadds 0,0,12 + fcmpu 7,0,11 + bng 7,.L46 + fmr 11,0 + mr 3,8 +.L46: + addi 8,8,1 + bdnz .L44 + b .L52 + .p2align 4,,15 +.L55: + li 0,-144 + std 31,-8(1) + addis 5,2,.LC2@toc@ha + vspltisw 18,0 + vspltisw 19,0 + addis 6,2,.LC3@toc@ha + addi 5,5,.LC2@toc@l + stvx 24,1,0 + li 0,-128 + addi 6,6,.LC3@toc@l + xxlor 49,50,50 + addis 7,2,.LC4@toc@ha + lxvd2x 44,0,5 + addis 10,2,.LC5@toc@ha + stvx 25,1,0 + li 0,-112 + addi 7,7,.LC4@toc@l + lxvd2x 45,0,6 + addis 5,2,.LC6@toc@ha + addis 6,2,.LC7@toc@ha + stvx 26,1,0 + li 0,-96 + addi 10,10,.LC5@toc@l + addi 6,6,.LC7@toc@l + addi 5,5,.LC6@toc@l + stvx 27,1,0 + li 0,-80 + lxvd2x 46,0,10 + xxpermdi 44,44,44,2 + mr 10,4 + lxvd2x 48,0,6 + lxvd2x 47,0,5 + xxpermdi 45,45,45,2 + li 6,0 + stvx 28,1,0 + li 0,-64 + xxlnand 44,44,44 + xxlnand 45,45,45 + stvx 29,1,0 + li 0,-48 + vspltisw 29,8 + vadduwm 29,29,29 + xxpermdi 46,46,46,2 + stvx 30,1,0 + li 0,-32 + xxpermdi 47,47,47,2 + xxpermdi 48,48,48,2 + stvx 31,1,0 + lxvd2x 63,0,7 + addis 7,2,.LC8@toc@ha + addi 7,7,.LC8@toc@l + lxvd2x 62,0,7 + xxpermdi 63,63,63,2 + .p2align 4,,15 +.L5: + addi 3,10,16 + addi 5,10,32 + lxvd2x 34,0,10 + addi 7,10,64 + addi 31,10,48 + addi 12,10,80 + addi 11,10,96 + lxvd2x 36,0,3 + lxvd2x 37,0,5 + addi 3,10,112 + addi 5,10,128 + lxvd2x 38,0,7 + lxvd2x 7,0,31 + addi 7,10,160 + addi 31,10,144 + lxvd2x 33,0,12 + lxvd2x 39,0,11 + addi 12,10,176 + addi 11,10,192 + lxvd2x 8,0,3 + lxvd2x 40,0,5 + xxpermdi 34,34,34,2 + addi 3,10,208 + addi 5,10,224 + lxvd2x 41,0,7 + lxvd2x 9,0,31 + addi 7,10,240 + lxvd2x 10,0,12 + lxvd2x 42,0,11 + xxpermdi 37,37,37,2 + xxpermdi 36,36,36,2 + addi 6,6,32 + lxvd2x 32,0,3 + lxvd2x 43,0,5 + xxpermdi 7,7,7,2 + xxpermdi 38,38,38,2 + cmpd 7,8,6 + addi 10,10,256 + lxvd2x 11,0,7 + xxpermdi 39,39,39,2 + xxpermdi 33,33,33,2 + xxpermdi 40,40,40,2 + xxpermdi 8,8,8,2 + xxpermdi 41,41,41,2 + xxpermdi 9,9,9,2 + xxpermdi 10,10,10,2 + xxpermdi 42,42,42,2 + xxpermdi 43,43,43,2 + xxpermdi 32,32,32,2 + xxpermdi 11,11,11,2 + xvabssp 57,37 + xvabssp 58,39 + xvabssp 35,40 + xvabssp 59,41 + xvabssp 34,34 + xvabssp 33,33 + xvabssp 32,32 + xvabssp 60,43 + xvabssp 36,36 + xvabssp 37,7 + xvabssp 38,38 + xvabssp 39,8 + xvabssp 40,9 + xvabssp 41,10 + xvabssp 42,42 + xvabssp 43,11 + vperm 24,4,2,12 + vperm 4,4,2,13 + vperm 2,5,25,12 + vperm 5,5,25,13 + vperm 25,1,6,12 + vperm 6,1,6,13 + vperm 1,7,26,12 + vperm 7,7,26,13 + vperm 26,8,3,12 + vperm 8,8,3,13 + vperm 3,9,27,12 + vperm 9,9,27,13 + vperm 27,0,10,12 + vperm 10,0,10,13 + vperm 0,11,28,12 + vperm 11,11,28,13 + xvaddsp 12,33,39 + xvaddsp 38,57,38 + xvaddsp 0,32,43 + xvaddsp 42,59,42 + xvaddsp 36,56,36 + xvaddsp 37,34,37 + xvaddsp 40,58,40 + xvaddsp 41,35,41 + xvcmpgtsp 32,12,38 + xvcmpgtsp 33,0,42 + xvcmpgtsp 43,37,36 + xvcmpgtsp 39,41,40 + xxsel 12,38,12,32 + xxsel 38,47,48,32 + xxsel 0,42,0,33 + xxsel 42,47,48,33 + xxsel 37,36,37,43 + xxsel 43,63,46,43 + xxsel 41,40,41,39 + xxsel 39,63,46,39 + xvcmpgtsp 32,12,37 + xvcmpgtsp 33,0,41 + xxsel 12,37,12,32 + xxsel 43,43,38,32 + xxsel 0,41,0,33 + xxsel 33,39,42,33 + xvcmpgtsp 32,0,12 + vadduwm 1,1,29 + xxsel 0,12,0,32 + xxsel 32,43,33,32 + xvcmpgtsp 33,0,51 + vadduwm 0,17,0 + vadduwm 17,17,30 + xxsel 50,50,32,33 + xxsel 51,51,0,33 + bgt 7,.L5 + xxsldwi 11,51,51,3 + xxsldwi 12,51,51,2 + vspltw 0,18,3 + xxsldwi 0,51,51,1 + xscvspdp 11,11 + xscvspdp 12,12 + mfvsrwz 6,32 + vspltw 0,18,2 + xscvspdp 0,0 + mfvsrwz 7,50 + mfvsrwz 5,32 + vspltw 0,18,0 + xscvspdp 51,51 + mfvsrwz 10,32 + fcmpu 7,11,12 + rldicl 3,6,0,32 + fmr 10,0 + rldicl 11,7,0,32 + rldicl 31,5,0,32 + rldicl 0,10,0,32 + beq 7,.L56 + bnl 7,.L8 + fmr 11,12 + mr 3,31 +.L8: + xscmpudp 7,0,51 + bne 7,.L11 + cmplw 7,7,10 + ble 7,.L12 + mr 7,10 +.L12: + rldicl 11,7,0,32 +.L13: + fcmpu 7,11,10 + beq 7,.L57 + blt 7,.L58 +.L17: + cmpd 7,9,8 + ble 7,.L19 + addi 7,8,1 + sldi 10,8,1 + cmpd 7,7,9 + sldi 10,10,2 + add 4,4,10 + subf 10,8,9 + mtctr 10 + bgt 7,.L37 + li 10,-1 + rldicr 10,10,0,0 + cmpd 7,9,10 + beq 7,.L37 + .p2align 4,,15 +.L21: + lfs 0,4(4) + lfs 12,0(4) + addi 4,4,8 + fabs 0,0 + fabs 12,12 + fadds 0,0,12 + fcmpu 7,0,11 + bng 7,.L20 + fmr 11,0 + mr 3,8 +.L20: + addi 8,8,1 + bdnz .L21 +.L19: + li 0,-144 + ld 31,-8(1) + addi 3,3,1 + lvx 24,1,0 + li 0,-128 + lvx 25,1,0 + li 0,-112 + lvx 26,1,0 + li 0,-96 + lvx 27,1,0 + li 0,-80 + lvx 28,1,0 + li 0,-64 + lvx 29,1,0 + li 0,-48 + lvx 30,1,0 + li 0,-32 + lvx 31,1,0 + blr + .p2align 4,,15 +.L56: + cmplw 7,6,5 + ble 7,.L7 + mr 6,5 +.L7: + rldicl 3,6,0,32 + b .L8 + .p2align 4,,15 +.L29: + li 3,1 + blr + .p2align 4,,15 +.L11: + bnl 7,.L13 + xscpsgndp 10,51,51 + mr 11,0 + b .L13 + .p2align 4,,15 +.L57: + cmpd 7,3,11 + ble 7,.L17 + mr 3,11 + b .L17 + .p2align 4,,15 +.L58: + fmr 11,10 + mr 3,11 + b .L17 +.L43: + li 9,1 + mtctr 9 + b .L44 +.L37: + li 9,1 + mtctr 9 + b .L21 + .long 0 + .byte 0,0,0,0,0,1,0,0 + .size icamax_k,.-icamax_k + .section .rodata.cst16,"aM",@progbits,16 + .align 4 +.LC2: + .byte 0 + .byte 1 + .byte 2 + .byte 3 + .byte 8 + .byte 9 + .byte 10 + .byte 11 + .byte 16 + .byte 17 + .byte 18 + .byte 19 + .byte 24 + .byte 25 + .byte 26 + .byte 27 +.LC3: + .byte 4 + .byte 5 + .byte 6 + .byte 7 + .byte 12 + .byte 13 + .byte 14 + .byte 15 + .byte 20 + .byte 21 + .byte 22 + .byte 23 + .byte 28 + .byte 29 + .byte 30 + .byte 31 +.LC4: + .long 0 + .long 1 + .long 2 + .long 3 +.LC5: + .long 4 + .long 5 + .long 6 + .long 7 +.LC6: + .long 8 + .long 9 + .long 10 + .long 11 +.LC7: + .long 12 + .long 13 + .long 14 + .long 15 +.LC8: + .long 32 + .long 32 + .long 32 + .long 32 + .ident "GCC: (SUSE Linux) 7.3.1 20180323 [gcc-7-branch revision 258812]" + .section .note.GNU-stack,"",@progbits diff --git a/kernel/power/icamax_power9.S b/kernel/power/icamax_power9.S new file mode 100644 index 000000000..bf6ab6e82 --- /dev/null +++ b/kernel/power/icamax_power9.S @@ -0,0 +1,394 @@ +/* + .file "icamax.c" + .abiversion 2 + .section ".text" + .align 2 + .p2align 4,,15 + .globl icamax_k + .type icamax_k, @function +*/ +#define ASSEMBLER +#include "common.h" + + PROLOGUE + +icamax_k: +.LCF0: +0: addis 2,12,.TOC.-.LCF0@ha + addi 2,2,.TOC.-.LCF0@l + .localentry icamax_k,.-icamax_k + mr. 9,3 + ble 0,.L25 + cmpdi 7,5,0 + li 3,0 + blelr 7 + cmpdi 7,5,1 + beq 7,.L53 + lfs 11,0(4) + lfs 0,4(4) + cmpdi 7,9,1 + fabs 11,11 + fabs 0,0 + fadds 11,11,0 + beq 7,.L29 + addi 9,9,-1 + sldi 5,5,3 + li 3,0 + mtctr 9 + add 4,4,5 + li 9,1 + .p2align 4,,15 +.L24: + lfs 0,4(4) + lfs 12,0(4) + add 4,4,5 + fabs 0,0 + fabs 12,12 + fadds 0,0,12 + fcmpu 7,0,11 + bng 7,.L23 + fmr 11,0 + mr 3,9 +.L23: + addi 9,9,1 + bdnz .L24 +.L51: + addi 3,3,1 + blr + .p2align 4,,15 +.L25: + li 3,0 + blr + .p2align 4,,15 +.L53: + rldicr. 8,9,0,58 + bne 0,.L54 + addi 7,8,1 + li 10,0 + subf 6,8,9 + li 3,0 + xxlxor 11,11,11 + cmpd 7,7,9 + sldi 10,10,2 + mtctr 6 + add 4,4,10 + bgt 7,.L43 + li 10,-1 + rldicr 10,10,0,0 + cmpd 7,9,10 + beq 7,.L43 + .p2align 4,,15 +.L44: + lfs 0,4(4) + lfs 12,0(4) + addi 4,4,8 + fabs 0,0 + fabs 12,12 + fadds 0,0,12 + fcmpu 7,0,11 + bng 7,.L46 + fmr 11,0 + mr 3,8 +.L46: + addi 8,8,1 + bdnz .L44 + b .L51 + .p2align 4,,15 +.L54: + addis 11,2,.LC2@toc@ha + addis 3,2,.LC3@toc@ha + addis 5,2,.LC6@toc@ha + addis 6,2,.LC7@toc@ha + xxspltib 47,0 + addis 7,2,.LC4@toc@ha + addis 10,2,.LC5@toc@ha + stxv 58,-96(1) + stxv 59,-80(1) + addi 11,11,.LC2@toc@l + addi 3,3,.LC3@toc@l + addi 5,5,.LC6@toc@l + addi 6,6,.LC7@toc@l + stxv 62,-32(1) + stxv 63,-16(1) + xxspltib 58,16 + addi 7,7,.LC4@toc@l + addi 10,10,.LC5@toc@l + xxspltib 59,32 + lxv 44,0(11) + lxv 45,0(3) + xxspltib 48,0 + lxv 62,0(5) + xxlor 46,47,47 + lxv 63,0(6) + stxv 60,-64(1) + stxv 61,-48(1) + lxv 60,0(7) + lxv 61,0(10) + li 7,0 + mr 10,4 + vextsb2w 26,26 + vextsb2w 27,27 + stxv 56,-128(1) + stxv 57,-112(1) + .p2align 4,,15 +.L5: + lxv 0,0(10) + addi 7,7,32 + addi 10,10,256 + cmpd 7,8,7 + xvabssp 34,0 + lxv 0,-240(10) + xvabssp 42,0 + lxv 0,-224(10) + xvabssp 49,0 + lxv 0,-208(10) + vpermr 25,10,2,12 + vpermr 2,10,2,13 + xvabssp 35,0 + lxv 0,-192(10) + xvaddsp 34,57,34 + xvabssp 36,0 + lxv 0,-176(10) + vpermr 10,3,17,12 + vpermr 3,3,17,13 + xvabssp 33,0 + lxv 0,-160(10) + xvaddsp 10,42,35 + xvabssp 50,0 + lxv 0,-144(10) + vpermr 17,1,4,12 + vpermr 4,1,4,13 + xvabssp 37,0 + lxv 0,-128(10) + xvaddsp 36,49,36 + xvabssp 38,0 + lxv 0,-112(10) + vpermr 1,5,18,12 + vpermr 5,5,18,13 + xvabssp 43,0 + lxv 0,-96(10) + xvaddsp 12,33,37 + xvabssp 51,0 + lxv 0,-80(10) + vpermr 18,11,6,12 + vpermr 6,11,6,13 + xvabssp 39,0 + lxv 0,-64(10) + xvaddsp 38,50,38 + xvabssp 40,0 + lxv 0,-48(10) + vpermr 11,7,19,12 + vpermr 7,7,19,13 + xvabssp 32,0 + lxv 0,-32(10) + xvaddsp 11,43,39 + xvcmpgtsp 39,10,34 + xvcmpgtsp 43,12,36 + xvabssp 56,0 + lxv 0,-16(10) + vpermr 19,0,8,12 + vpermr 8,0,8,13 + xxsel 10,34,10,39 + xxsel 12,36,12,43 + xxsel 39,60,61,39 + xxsel 43,62,63,43 + xvabssp 41,0 + xvaddsp 40,51,40 + vpermr 0,9,24,12 + vpermr 9,9,24,13 + xvaddsp 0,32,41 + xvcmpgtsp 41,11,38 + xvcmpgtsp 32,12,10 + xvcmpgtsp 42,0,40 + xxsel 11,38,11,41 + xxsel 12,10,12,32 + xxsel 43,39,43,32 + xxsel 41,60,61,41 + xxsel 0,40,0,42 + xxsel 42,62,63,42 + xvcmpgtsp 33,0,11 + xxsel 0,11,0,33 + xxsel 33,41,42,33 + xvcmpgtsp 32,0,12 + vadduwm 1,1,26 + xxsel 0,12,0,32 + xxsel 32,43,33,32 + xvcmpgtsp 33,0,48 + vadduwm 0,14,0 + vadduwm 14,14,27 + xxsel 47,47,32,33 + xxsel 48,48,0,33 + bgt 7,.L5 + xxsldwi 11,48,48,3 + xxsldwi 12,48,48,2 + li 10,0 + li 3,12 + xxsldwi 0,48,48,1 + xscvspdp 48,48 + vextuwrx 6,10,15 + li 10,4 + xscvspdp 11,11 + xscvspdp 12,12 + xscvspdp 0,0 + vextuwrx 5,10,15 + li 10,8 + vextuwrx 7,10,15 + vextuwrx 10,3,15 + rldicl 12,5,0,32 + rldicl 3,6,0,32 + rldicl 11,7,0,32 + rldicl 0,10,0,32 + fcmpu 7,11,12 + fmr 10,0 + beq 7,.L55 + bnl 7,.L8 + mr 3,12 + fmr 11,12 +.L8: + xscmpudp 7,0,48 + bne 7,.L11 + cmplw 7,7,10 + ble 7,.L12 + mr 7,10 +.L12: + rldicl 11,7,0,32 +.L13: + fcmpu 7,11,10 + beq 7,.L56 + bnl 7,.L17 + mr 3,11 + fmr 11,10 +.L17: + cmpd 7,9,8 + ble 7,.L19 + addi 7,8,1 + sldi 10,8,1 + subf 6,8,9 + cmpd 7,7,9 + sldi 10,10,2 + mtctr 6 + add 4,4,10 + bgt 7,.L37 + li 10,-1 + rldicr 10,10,0,0 + cmpd 7,9,10 + beq 7,.L37 + .p2align 4,,15 +.L21: + lfs 0,4(4) + lfs 12,0(4) + addi 4,4,8 + fabs 0,0 + fabs 12,12 + fadds 0,0,12 + fcmpu 7,0,11 + bng 7,.L20 + fmr 11,0 + mr 3,8 +.L20: + addi 8,8,1 + bdnz .L21 +.L19: + lxv 56,-128(1) + lxv 57,-112(1) + addi 3,3,1 + lxv 58,-96(1) + lxv 59,-80(1) + lxv 60,-64(1) + lxv 61,-48(1) + lxv 62,-32(1) + lxv 63,-16(1) + blr + .p2align 4,,15 +.L55: + cmplw 7,6,5 + ble 7,.L7 + mr 6,5 +.L7: + rldicl 3,6,0,32 + b .L8 + .p2align 4,,15 +.L29: + li 3,1 + blr + .p2align 4,,15 +.L11: + bnl 7,.L13 + mr 11,0 + xscpsgndp 10,48,48 + b .L13 + .p2align 4,,15 +.L56: + cmpd 7,3,11 + ble 7,.L17 + mr 3,11 + b .L17 +.L37: + li 9,1 + mtctr 9 + b .L21 +.L43: + li 9,1 + mtctr 9 + b .L44 + .long 0 + .byte 0,0,0,0,0,0,0,0 + .size icamax_k,.-icamax_k + .section .rodata.cst16,"aM",@progbits,16 + .align 4 +.LC2: + .byte 0 + .byte 1 + .byte 2 + .byte 3 + .byte 8 + .byte 9 + .byte 10 + .byte 11 + .byte 16 + .byte 17 + .byte 18 + .byte 19 + .byte 24 + .byte 25 + .byte 26 + .byte 27 +.LC3: + .byte 4 + .byte 5 + .byte 6 + .byte 7 + .byte 12 + .byte 13 + .byte 14 + .byte 15 + .byte 20 + .byte 21 + .byte 22 + .byte 23 + .byte 28 + .byte 29 + .byte 30 + .byte 31 +.LC4: + .long 0 + .long 1 + .long 2 + .long 3 +.LC5: + .long 4 + .long 5 + .long 6 + .long 7 +.LC6: + .long 8 + .long 9 + .long 10 + .long 11 +.LC7: + .long 12 + .long 13 + .long 14 + .long 15 + .ident "GCC: (SUSE Linux) 7.3.1 20180323 [gcc-7-branch revision 258812]" + .section .note.GNU-stack,"",@progbits diff --git a/kernel/power/icamin_power8.S b/kernel/power/icamin_power8.S new file mode 100644 index 000000000..e4469eb64 --- /dev/null +++ b/kernel/power/icamin_power8.S @@ -0,0 +1,459 @@ +/* .file "icamin.c" + .abiversion 2 + .section ".text" + .align 2 + .p2align 4,,15 + .globl icamin_k + .type icamin_k, @function +*/ +#define ASSEMBLER +#include "common.h" + + PROLOGUE +#if _CALL_ELF ==2 +icamin_k: +#endif +.LCF0: +0: addis 2,12,.TOC.-.LCF0@ha + addi 2,2,.TOC.-.LCF0@l +#if _CALL_ELF ==2 + .localentry icamin_k,.-icamin_k +#endif + mr. 9,3 + ble 0,.L25 + cmpdi 7,5,0 + li 3,0 + blelr 7 + lfs 11,0(4) + lfs 0,4(4) + cmpdi 7,5,1 + fabs 11,11 + fabs 0,0 + fadds 11,11,0 + beq 7,.L54 + cmpdi 7,9,1 + beq 7,.L29 + addi 9,9,-1 + sldi 5,5,3 + mtctr 9 + add 4,4,5 + li 3,0 + li 9,1 + .p2align 4,,15 +.L24: + lfs 0,4(4) + lfs 12,0(4) + add 4,4,5 + fabs 0,0 + fabs 12,12 + fadds 0,0,12 + fcmpu 7,0,11 + bnl 7,.L23 + fmr 11,0 + mr 3,9 +.L23: + addi 9,9,1 + bdnz .L24 +.L52: + addi 3,3,1 + blr + .p2align 4,,15 +.L25: + li 3,0 + blr + .p2align 4,,15 +.L54: + rldicr. 8,9,0,58 + bne 0,.L55 + addi 7,8,1 + li 10,0 + cmpd 7,7,9 + sldi 10,10,2 + add 4,4,10 + subf 10,8,9 + mtctr 10 + li 3,0 + bgt 7,.L43 + li 10,-1 + rldicr 10,10,0,0 + cmpd 7,9,10 + beq 7,.L43 + .p2align 4,,15 +.L44: + lfs 0,0(4) + lfs 12,4(4) + addi 4,4,8 + fabs 0,0 + fabs 12,12 + fadds 0,0,12 + fcmpu 7,11,0 + bng 7,.L46 + fmr 11,0 + mr 3,8 +.L46: + addi 8,8,1 + bdnz .L44 + b .L52 + .p2align 4,,15 +.L55: + li 0,-128 + std 31,-8(1) + addis 5,2,.LC2@toc@ha + xscvdpspn 11,11 + vspltisw 19,0 + addis 6,2,.LC3@toc@ha + addi 5,5,.LC2@toc@l + stvx 25,1,0 + li 0,-112 + addi 6,6,.LC3@toc@l + xxlor 50,51,51 + addis 7,2,.LC4@toc@ha + lxvd2x 44,0,5 + addis 10,2,.LC5@toc@ha + stvx 26,1,0 + li 0,-96 + addi 7,7,.LC4@toc@l + lxvd2x 45,0,6 + addis 5,2,.LC6@toc@ha + addis 6,2,.LC7@toc@ha + stvx 27,1,0 + li 0,-80 + addi 10,10,.LC5@toc@l + xxspltw 5,11,0 + addi 6,6,.LC7@toc@l + addi 5,5,.LC6@toc@l + stvx 28,1,0 + li 0,-64 + lxvd2x 47,0,10 + xxpermdi 44,44,44,2 + mr 10,4 + lxvd2x 49,0,6 + lxvd2x 48,0,5 + xxpermdi 45,45,45,2 + li 6,0 + stvx 29,1,0 + li 0,-48 + xxlnand 44,44,44 + xxlnand 45,45,45 + stvx 30,1,0 + lxvd2x 62,0,7 + addis 7,2,.LC8@toc@ha + li 0,-32 + addi 7,7,.LC8@toc@l + xxpermdi 47,47,47,2 + stvx 31,1,0 + vspltisw 31,8 + xxpermdi 48,48,48,2 + lxvd2x 46,0,7 + vadduwm 31,31,31 + xxpermdi 49,49,49,2 + xxpermdi 62,62,62,2 + .p2align 4,,15 +.L5: + addi 3,10,16 + addi 5,10,32 + lxvd2x 34,0,10 + addi 7,10,64 + addi 31,10,48 + addi 12,10,80 + addi 11,10,96 + lxvd2x 36,0,3 + lxvd2x 37,0,5 + addi 3,10,112 + addi 5,10,128 + lxvd2x 38,0,7 + lxvd2x 6,0,31 + addi 7,10,160 + addi 31,10,144 + lxvd2x 33,0,12 + lxvd2x 39,0,11 + addi 12,10,176 + addi 11,10,192 + lxvd2x 7,0,3 + lxvd2x 40,0,5 + xxpermdi 34,34,34,2 + addi 3,10,208 + addi 5,10,224 + lxvd2x 41,0,7 + lxvd2x 8,0,31 + addi 7,10,240 + lxvd2x 9,0,12 + lxvd2x 42,0,11 + xxpermdi 37,37,37,2 + xxpermdi 36,36,36,2 + addi 6,6,32 + lxvd2x 32,0,3 + lxvd2x 43,0,5 + xxpermdi 6,6,6,2 + xxpermdi 38,38,38,2 + cmpd 7,8,6 + addi 10,10,256 + lxvd2x 10,0,7 + xxpermdi 39,39,39,2 + xxpermdi 33,33,33,2 + xxpermdi 40,40,40,2 + xxpermdi 7,7,7,2 + xxpermdi 41,41,41,2 + xxpermdi 8,8,8,2 + xxpermdi 9,9,9,2 + xxpermdi 42,42,42,2 + xxpermdi 43,43,43,2 + xxpermdi 32,32,32,2 + xxpermdi 10,10,10,2 + xvabssp 58,37 + xvabssp 59,39 + xvabssp 35,40 + xvabssp 60,41 + xvabssp 34,34 + xvabssp 33,33 + xvabssp 32,32 + xvabssp 61,43 + xvabssp 36,36 + xvabssp 37,6 + xvabssp 38,38 + xvabssp 39,7 + xvabssp 40,8 + xvabssp 41,9 + xvabssp 42,42 + xvabssp 43,10 + vperm 25,4,2,12 + vperm 4,4,2,13 + vperm 2,5,26,12 + vperm 5,5,26,13 + vperm 26,1,6,12 + vperm 6,1,6,13 + vperm 1,7,27,12 + vperm 7,7,27,13 + vperm 27,8,3,12 + vperm 8,8,3,13 + vperm 3,9,28,12 + vperm 9,9,28,13 + vperm 28,0,10,12 + vperm 10,0,10,13 + vperm 0,11,29,12 + vperm 11,11,29,13 + xvaddsp 12,33,39 + xvaddsp 38,58,38 + xvaddsp 0,32,43 + xvaddsp 42,60,42 + xvaddsp 36,57,36 + xvaddsp 37,34,37 + xvaddsp 40,59,40 + xvaddsp 41,35,41 + xvcmpgtsp 32,38,12 + xvcmpgtsp 33,42,0 + xvcmpgtsp 43,36,37 + xvcmpgtsp 39,40,41 + xxsel 12,38,12,32 + xxsel 38,48,49,32 + xxsel 0,42,0,33 + xxsel 42,48,49,33 + xxsel 37,36,37,43 + xxsel 43,62,47,43 + xxsel 41,40,41,39 + xxsel 39,62,47,39 + xvcmpgtsp 32,37,12 + xvcmpgtsp 33,41,0 + xxsel 12,37,12,32 + xxsel 43,43,38,32 + xxsel 0,41,0,33 + xxsel 33,39,42,33 + xvcmpgtsp 32,12,0 + vadduwm 1,1,31 + xxsel 0,12,0,32 + xxsel 32,43,33,32 + xvcmpgtsp 33,5,0 + vadduwm 0,0,18 + vadduwm 18,18,14 + xxsel 51,51,32,33 + xxsel 5,5,0,33 + bgt 7,.L5 + xxsldwi 11,5,5,3 + xxsldwi 12,5,5,2 + vspltw 0,19,3 + xxsldwi 0,5,5,1 + xscvspdp 11,11 + xscvspdp 12,12 + mfvsrwz 6,32 + vspltw 0,19,2 + xscvspdp 0,0 + mfvsrwz 7,51 + mfvsrwz 5,32 + vspltw 0,19,0 + xscvspdp 5,5 + mfvsrwz 10,32 + fcmpu 7,11,12 + rldicl 3,6,0,32 + fmr 10,0 + rldicl 11,7,0,32 + rldicl 31,5,0,32 + rldicl 0,10,0,32 + beq 7,.L56 + bng 7,.L8 + fmr 11,12 + mr 3,31 +.L8: + fcmpu 7,0,5 + bne 7,.L11 + cmplw 7,7,10 + ble 7,.L12 + mr 7,10 +.L12: + rldicl 11,7,0,32 +.L13: + fcmpu 7,11,10 + beq 7,.L57 + bgt 7,.L58 +.L17: + cmpd 7,9,8 + ble 7,.L19 + addi 7,8,1 + sldi 10,8,1 + cmpd 7,7,9 + sldi 10,10,2 + add 4,4,10 + subf 10,8,9 + mtctr 10 + bgt 7,.L37 + li 10,-1 + rldicr 10,10,0,0 + cmpd 7,9,10 + beq 7,.L37 + .p2align 4,,15 +.L21: + lfs 0,0(4) + lfs 12,4(4) + addi 4,4,8 + fabs 0,0 + fabs 12,12 + fadds 0,0,12 + fcmpu 7,11,0 + bng 7,.L20 + fmr 11,0 + mr 3,8 +.L20: + addi 8,8,1 + bdnz .L21 +.L19: + li 0,-128 + ld 31,-8(1) + addi 3,3,1 + lvx 25,1,0 + li 0,-112 + lvx 26,1,0 + li 0,-96 + lvx 27,1,0 + li 0,-80 + lvx 28,1,0 + li 0,-64 + lvx 29,1,0 + li 0,-48 + lvx 30,1,0 + li 0,-32 + lvx 31,1,0 + blr + .p2align 4,,15 +.L56: + cmplw 7,6,5 + ble 7,.L7 + mr 6,5 +.L7: + rldicl 3,6,0,32 + b .L8 + .p2align 4,,15 +.L29: + li 3,1 + blr + .p2align 4,,15 +.L11: + bng 7,.L13 + fmr 10,5 + mr 11,0 + b .L13 + .p2align 4,,15 +.L57: + cmpd 7,3,11 + ble 7,.L17 + mr 3,11 + b .L17 + .p2align 4,,15 +.L58: + fmr 11,10 + mr 3,11 + b .L17 +.L43: + li 9,1 + mtctr 9 + b .L44 +.L37: + li 9,1 + mtctr 9 + b .L21 + .long 0 + .byte 0,0,0,0,0,1,0,0 +#if _CALL_ELF ==2 + .size icamin_k,.-icamin_k +#endif + .section .rodata.cst16,"aM",@progbits,16 + .align 4 +.LC2: + .byte 0 + .byte 1 + .byte 2 + .byte 3 + .byte 8 + .byte 9 + .byte 10 + .byte 11 + .byte 16 + .byte 17 + .byte 18 + .byte 19 + .byte 24 + .byte 25 + .byte 26 + .byte 27 +.LC3: + .byte 4 + .byte 5 + .byte 6 + .byte 7 + .byte 12 + .byte 13 + .byte 14 + .byte 15 + .byte 20 + .byte 21 + .byte 22 + .byte 23 + .byte 28 + .byte 29 + .byte 30 + .byte 31 +.LC4: + .long 0 + .long 1 + .long 2 + .long 3 +.LC5: + .long 4 + .long 5 + .long 6 + .long 7 +.LC6: + .long 8 + .long 9 + .long 10 + .long 11 +.LC7: + .long 12 + .long 13 + .long 14 + .long 15 +.LC8: + .long 32 + .long 32 + .long 32 + .long 32 + .ident "GCC: (SUSE Linux) 7.3.1 20180323 [gcc-7-branch revision 258812]" + .section .note.GNU-stack,"",@progbits diff --git a/kernel/power/icamin_power9.S b/kernel/power/icamin_power9.S new file mode 100644 index 000000000..58a3c53a3 --- /dev/null +++ b/kernel/power/icamin_power9.S @@ -0,0 +1,392 @@ +/* + .file "icamin.c" + .abiversion 2 + .section ".text" + .align 2 + .p2align 4,,15 + .globl icamin_k + .type icamin_k, @function +*/ +#define ASSEMBLER +#include "common.h" + + PROLOGUE + +icamin_k: +.LCF0: +0: addis 2,12,.TOC.-.LCF0@ha + addi 2,2,.TOC.-.LCF0@l + .localentry icamin_k,.-icamin_k + mr. 9,3 + ble 0,.L25 + cmpdi 7,5,0 + li 3,0 + blelr 7 + lfs 11,0(4) + lfs 0,4(4) + cmpdi 7,5,1 + fabs 11,11 + fabs 0,0 + fadds 11,11,0 + beq 7,.L53 + cmpdi 7,9,1 + beq 7,.L29 + addi 9,9,-1 + sldi 5,5,3 + li 3,0 + mtctr 9 + add 4,4,5 + li 9,1 + .p2align 4,,15 +.L24: + lfs 0,4(4) + lfs 12,0(4) + add 4,4,5 + fabs 0,0 + fabs 12,12 + fadds 0,0,12 + fcmpu 7,0,11 + bnl 7,.L23 + fmr 11,0 + mr 3,9 +.L23: + addi 9,9,1 + bdnz .L24 +.L51: + addi 3,3,1 + blr + .p2align 4,,15 +.L25: + li 3,0 + blr + .p2align 4,,15 +.L53: + rldicr. 8,9,0,58 + bne 0,.L54 + addi 7,8,1 + li 10,0 + subf 6,8,9 + li 3,0 + cmpd 7,7,9 + sldi 10,10,2 + mtctr 6 + add 4,4,10 + bgt 7,.L43 + li 10,-1 + rldicr 10,10,0,0 + cmpd 7,9,10 + beq 7,.L43 + .p2align 4,,15 +.L44: + lfs 0,0(4) + lfs 12,4(4) + addi 4,4,8 + fabs 0,0 + fabs 12,12 + fadds 0,0,12 + fcmpu 7,11,0 + bng 7,.L46 + fmr 11,0 + mr 3,8 +.L46: + addi 8,8,1 + bdnz .L44 + b .L51 + .p2align 4,,15 +.L54: + xscvdpspn 9,11 + addis 11,2,.LC2@toc@ha + addis 3,2,.LC3@toc@ha + addis 5,2,.LC6@toc@ha + addis 6,2,.LC7@toc@ha + addis 7,2,.LC4@toc@ha + addis 10,2,.LC5@toc@ha + xxspltib 48,0 + addi 11,11,.LC2@toc@l + addi 3,3,.LC3@toc@l + addi 5,5,.LC6@toc@l + stxv 59,-80(1) + addi 6,6,.LC7@toc@l + stxv 60,-64(1) + stxv 63,-16(1) + addi 7,7,.LC4@toc@l + xxspltib 59,16 + lxv 44,0(11) + xxspltib 60,32 + lxv 45,0(3) + lxv 63,0(5) + xxlor 47,48,48 + lxv 46,0(6) + addi 10,10,.LC5@toc@l + stxv 61,-48(1) + stxv 62,-32(1) + xxspltw 9,9,0 + lxv 61,0(7) + lxv 62,0(10) + li 7,0 + mr 10,4 + vextsb2w 27,27 + vextsb2w 28,28 + stxv 57,-112(1) + stxv 58,-96(1) + .p2align 4,,15 +.L5: + lxv 0,0(10) + addi 7,7,32 + addi 10,10,256 + cmpd 7,8,7 + xvabssp 34,0 + lxv 0,-240(10) + xvabssp 42,0 + lxv 0,-224(10) + xvabssp 49,0 + lxv 0,-208(10) + vpermr 26,10,2,12 + vpermr 2,10,2,13 + xvabssp 35,0 + lxv 0,-192(10) + xvaddsp 34,58,34 + xvabssp 36,0 + lxv 0,-176(10) + vpermr 10,3,17,12 + vpermr 3,3,17,13 + xvabssp 33,0 + lxv 0,-160(10) + xvaddsp 10,42,35 + xvabssp 50,0 + lxv 0,-144(10) + vpermr 17,1,4,12 + vpermr 4,1,4,13 + xvabssp 37,0 + lxv 0,-128(10) + xvaddsp 36,49,36 + xvabssp 38,0 + lxv 0,-112(10) + vpermr 1,5,18,12 + vpermr 5,5,18,13 + xvabssp 43,0 + lxv 0,-96(10) + xvaddsp 12,33,37 + xvabssp 51,0 + lxv 0,-80(10) + vpermr 18,11,6,12 + vpermr 6,11,6,13 + xvabssp 39,0 + lxv 0,-64(10) + xvaddsp 38,50,38 + xvabssp 40,0 + lxv 0,-48(10) + vpermr 11,7,19,12 + vpermr 7,7,19,13 + xvabssp 32,0 + lxv 0,-32(10) + xvaddsp 11,43,39 + xvcmpgtsp 39,34,10 + xvcmpgtsp 43,36,12 + xvabssp 57,0 + lxv 0,-16(10) + vpermr 19,0,8,12 + vpermr 8,0,8,13 + xxsel 10,34,10,39 + xxsel 12,36,12,43 + xxsel 39,61,62,39 + xxsel 43,63,46,43 + xvabssp 41,0 + xvaddsp 40,51,40 + vpermr 0,9,25,12 + vpermr 9,9,25,13 + xvaddsp 0,32,41 + xvcmpgtsp 41,38,11 + xvcmpgtsp 32,10,12 + xvcmpgtsp 42,40,0 + xxsel 11,38,11,41 + xxsel 12,10,12,32 + xxsel 43,39,43,32 + xxsel 41,61,62,41 + xxsel 0,40,0,42 + xxsel 42,63,46,42 + xvcmpgtsp 33,11,0 + xxsel 0,11,0,33 + xxsel 33,41,42,33 + xvcmpgtsp 32,12,0 + vadduwm 1,1,27 + xxsel 0,12,0,32 + xxsel 32,43,33,32 + xvcmpgtsp 33,9,0 + vadduwm 0,0,15 + vadduwm 15,15,28 + xxsel 48,48,32,33 + xxsel 9,9,0,33 + bgt 7,.L5 + xxsldwi 11,9,9,3 + xxsldwi 12,9,9,2 + li 10,0 + li 3,12 + xxsldwi 0,9,9,1 + xscvspdp 9,9 + vextuwrx 6,10,16 + li 10,4 + xscvspdp 11,11 + xscvspdp 12,12 + xscvspdp 0,0 + vextuwrx 5,10,16 + li 10,8 + vextuwrx 7,10,16 + vextuwrx 10,3,16 + rldicl 12,5,0,32 + rldicl 3,6,0,32 + rldicl 11,7,0,32 + rldicl 0,10,0,32 + fcmpu 7,11,12 + fmr 10,0 + beq 7,.L55 + bng 7,.L8 + mr 3,12 + fmr 11,12 +.L8: + fcmpu 7,0,9 + bne 7,.L11 + cmplw 7,7,10 + ble 7,.L12 + mr 7,10 +.L12: + rldicl 11,7,0,32 +.L13: + fcmpu 7,11,10 + beq 7,.L56 + bng 7,.L17 + mr 3,11 + fmr 11,10 +.L17: + cmpd 7,9,8 + ble 7,.L19 + addi 7,8,1 + sldi 10,8,1 + subf 6,8,9 + cmpd 7,7,9 + sldi 10,10,2 + mtctr 6 + add 4,4,10 + bgt 7,.L37 + li 10,-1 + rldicr 10,10,0,0 + cmpd 7,9,10 + beq 7,.L37 + .p2align 4,,15 +.L21: + lfs 0,0(4) + lfs 12,4(4) + addi 4,4,8 + fabs 0,0 + fabs 12,12 + fadds 0,0,12 + fcmpu 7,11,0 + bng 7,.L20 + fmr 11,0 + mr 3,8 +.L20: + addi 8,8,1 + bdnz .L21 +.L19: + lxv 57,-112(1) + lxv 58,-96(1) + addi 3,3,1 + lxv 59,-80(1) + lxv 60,-64(1) + lxv 61,-48(1) + lxv 62,-32(1) + lxv 63,-16(1) + blr + .p2align 4,,15 +.L55: + cmplw 7,6,5 + ble 7,.L7 + mr 6,5 +.L7: + rldicl 3,6,0,32 + b .L8 + .p2align 4,,15 +.L29: + li 3,1 + blr + .p2align 4,,15 +.L11: + bng 7,.L13 + mr 11,0 + fmr 10,9 + b .L13 + .p2align 4,,15 +.L56: + cmpd 7,3,11 + ble 7,.L17 + mr 3,11 + b .L17 +.L37: + li 9,1 + mtctr 9 + b .L21 +.L43: + li 9,1 + mtctr 9 + b .L44 + .long 0 + .byte 0,0,0,0,0,0,0,0 + .size icamin_k,.-icamin_k + .section .rodata.cst16,"aM",@progbits,16 + .align 4 +.LC2: + .byte 0 + .byte 1 + .byte 2 + .byte 3 + .byte 8 + .byte 9 + .byte 10 + .byte 11 + .byte 16 + .byte 17 + .byte 18 + .byte 19 + .byte 24 + .byte 25 + .byte 26 + .byte 27 +.LC3: + .byte 4 + .byte 5 + .byte 6 + .byte 7 + .byte 12 + .byte 13 + .byte 14 + .byte 15 + .byte 20 + .byte 21 + .byte 22 + .byte 23 + .byte 28 + .byte 29 + .byte 30 + .byte 31 +.LC4: + .long 0 + .long 1 + .long 2 + .long 3 +.LC5: + .long 4 + .long 5 + .long 6 + .long 7 +.LC6: + .long 8 + .long 9 + .long 10 + .long 11 +.LC7: + .long 12 + .long 13 + .long 14 + .long 15 + .ident "GCC: (SUSE Linux) 7.3.1 20180323 [gcc-7-branch revision 258812]" + .section .note.GNU-stack,"",@progbits diff --git a/kernel/power/idamax.c b/kernel/power/idamax.c index 5bdc0a13c..5016f67dd 100644 --- a/kernel/power/idamax.c +++ b/kernel/power/idamax.c @@ -26,7 +26,10 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ #include "common.h" #include +#if defined(__VEC__) || defined(__ALTIVEC__) #include +#endif + #if defined(DOUBLE) #define ABS fabs @@ -37,6 +40,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endif +#if defined(__VEC__) || defined(__ALTIVEC__) + /** * Find maximum index * Warning: requirements n>0 and n % 32 == 0 @@ -58,8 +63,8 @@ static BLASLONG diamax_kernel_32(BLASLONG n, FLOAT *x, FLOAT *maxf) { "lxvd2x 47, %[i48],%[ptr_tmp] \n\t" "lxvd2x 48, %[i64],%[ptr_tmp] \n\t" "lxvd2x 49, %[i80],%[ptr_tmp] \n\t" - "lxvd2x 50, %[i96],%[ptr_tmp] \n\t" - "lxvd2x 51,%[i112],%[ptr_tmp] \n\t" + "lxvd2x 6, %[i96],%[ptr_tmp] \n\t" + "lxvd2x 7,%[i112],%[ptr_tmp] \n\t" "xxlor 40,%x[start],%x[start] \n\t" //{ 1,0} vs40 | v8 "vaddudm 9,8,%[adder] \n\t" //{3,2} vs41 @@ -69,7 +74,7 @@ static BLASLONG diamax_kernel_32(BLASLONG n, FLOAT *x, FLOAT *maxf) { "vaddudm 11,10,%[adder] \n\t" //{7,6} vs43 "xxlxor 39,39,39 \n\t" // vs39 vec_max_value "vaddudm 4,11, %[adder] \n\t" // {9,8} -{8;8} vs36 | v4 - "xxspltd 36,36,0 \n\t" + XXSPLTD_S(36,36,0) "xvabsdp 44, 44 \n\t" "xvabsdp 45, 45 \n\t" @@ -77,21 +82,21 @@ static BLASLONG diamax_kernel_32(BLASLONG n, FLOAT *x, FLOAT *maxf) { "xvabsdp 47, 47 \n\t" "xvabsdp 48, 48 \n\t" "xvabsdp 49, 49 \n\t" - "xvabsdp 50, 50 \n\t" - "xvabsdp 51, 51 \n\t" + "xvabsdp 6, 6 \n\t" + "xvabsdp 7, 7 \n\t" //jump first half forward - "b 2f \n\t" + "b two%= \n\t" //=================================================================== - ".p2align 5 \n\t" + ".align 5 \n\t" - "1: \n\t" + "one%=: \n\t" "xvcmpgtdp 2,45,44 \n\t " "xvcmpgtdp 3,47,46 \n\t " "xvcmpgtdp 4,49,48 \n\t " - "xvcmpgtdp 5,51,50 \n\t" + "xvcmpgtdp 5,7,6 \n\t" "xxsel 32,40,41,2 \n\t" "xxsel 0,44,45,2 \n\t" @@ -100,7 +105,7 @@ static BLASLONG diamax_kernel_32(BLASLONG n, FLOAT *x, FLOAT *maxf) { "xxsel 34,40,41,4 \n\t" "xxsel 45,48,49,4 \n\t" "xxsel 35,42,43,5 \n\t" - "xxsel 47,50,51,5 \n\t" + "xxsel 47,6,7,5 \n\t" "xvcmpgtdp 2, 1,0 \n\t" "xvcmpgtdp 3,47, 45 \n\t" @@ -134,8 +139,8 @@ static BLASLONG diamax_kernel_32(BLASLONG n, FLOAT *x, FLOAT *maxf) { "vaddudm 1,1,5 \n\t" // get real index for first bigger - "lxvd2x 50, %[i96],%[ptr_tmp] \n\t" - "lxvd2x 51,%[i112],%[ptr_tmp] \n\t" + "lxvd2x 6, %[i96],%[ptr_tmp] \n\t" + "lxvd2x 7,%[i112],%[ptr_tmp] \n\t" //compare with previous to get vec_max_index(v6 | vs38 ) and vec_max_value (vs39) "xvcmpgtdp 2, 3,39 \n\t" @@ -155,16 +160,16 @@ static BLASLONG diamax_kernel_32(BLASLONG n, FLOAT *x, FLOAT *maxf) { "xvabsdp 48, 48 \n\t" "xvabsdp 49, 49 \n\t" - "xvabsdp 50, 50 \n\t" - "xvabsdp 51, 51 \n\t" + "xvabsdp 6, 6 \n\t" + "xvabsdp 7, 7 \n\t" //<-----------jump here from first load - "2: \n\t" + "two%=: \n\t" "xvcmpgtdp 2,45,44 \n\t " "xvcmpgtdp 3,47,46 \n\t " "xvcmpgtdp 4,49,48 \n\t " - "xvcmpgtdp 5,51,50 \n\t" + "xvcmpgtdp 5,7,6 \n\t" "xxsel 32,40,41,2 \n\t" "xxsel 0,44,45,2 \n\t" @@ -173,7 +178,7 @@ static BLASLONG diamax_kernel_32(BLASLONG n, FLOAT *x, FLOAT *maxf) { "xxsel 34,40,41,4 \n\t" "xxsel 45,48,49,4 \n\t" "xxsel 35,42,43,5 \n\t" - "xxsel 47,50,51,5 \n\t" + "xxsel 47,6,7,5 \n\t" "xvcmpgtdp 2, 1,0 \n\t" "xvcmpgtdp 3,47, 45 \n\t" @@ -203,8 +208,8 @@ static BLASLONG diamax_kernel_32(BLASLONG n, FLOAT *x, FLOAT *maxf) { "vaddudm 1,1,5 \n\t" // get real index for first bigger - "lxvd2x 50, %[i96],%[ptr_tmp] \n\t" - "lxvd2x 51,%[i112],%[ptr_tmp] \n\t" + "lxvd2x 6, %[i96],%[ptr_tmp] \n\t" + "lxvd2x 7,%[i112],%[ptr_tmp] \n\t" @@ -226,21 +231,21 @@ static BLASLONG diamax_kernel_32(BLASLONG n, FLOAT *x, FLOAT *maxf) { "xvabsdp 48, 48 \n\t" "xvabsdp 49, 49 \n\t" - "xvabsdp 50, 50 \n\t" - "xvabsdp 51, 51 \n\t" + "xvabsdp 6, 6 \n\t" + "xvabsdp 7, 7 \n\t" //decrement n "addic. %[n], %[n], -32 \n\t" //Loop back if >0 - "bgt+ 1b \n\t" + "bgt+ one%= \n\t" //============================================================================== "xvcmpgtdp 2,45,44 \n\t " "xvcmpgtdp 3,47,46 \n\t " "xvcmpgtdp 4,49,48 \n\t " - "xvcmpgtdp 5,51,50 \n\t" + "xvcmpgtdp 5,7,6 \n\t" "xxsel 32,40,41,2 \n\t" "xxsel 0,44,45,2 \n\t" @@ -249,7 +254,7 @@ static BLASLONG diamax_kernel_32(BLASLONG n, FLOAT *x, FLOAT *maxf) { "xxsel 34,40,41,4 \n\t" "xxsel 45,48,49,4 \n\t" "xxsel 35,42,43,5 \n\t" - "xxsel 47,50,51,5 \n\t" + "xxsel 47,6,7,5 \n\t" "xvcmpgtdp 2, 1,0 \n\t" "xvcmpgtdp 3,47, 45 \n\t" @@ -276,28 +281,28 @@ static BLASLONG diamax_kernel_32(BLASLONG n, FLOAT *x, FLOAT *maxf) { ///////extract max value and max index from vector - "xxspltd 32,38,1 \n\t" - "xxspltd 40,39,1 \n\t" + XXSPLTD_S(32,38,1) + XXSPLTD_S(40,39,1) "xvcmpeqdp. 2, 40,39 \n\t" //cr6 0 bit set if all true, cr6=4*6+bit_ind=24,0011at CR(BI)==1, at=10 hint that it occurs rarely //0b001110=14 - "bc 14,24, 3f \n\t" + "bc 14,24, three%= \n\t" "xvcmpgtdp 4, 40,39 \n\t" "xxsel 0,39,40,4 \n\t" "xxsel 1,38,32,4 \n\t" "stxsdx 0,0,%[ptr_maxf] \n\t" - "b 4f \n\t" + "b four%= \n\t" - "3: \n\t" + "three%=: \n\t" //if elements value are equal then choose minimum index - "xxspltd 0,40,0 \n\t" + XXSPLTD_S(0,40,0) "vminud 0,0,6 \n\t" //vs32 vs38 "xxlor 1,32,32 \n\t" "stxsdx 0,0,%[ptr_maxf] \n\t" - "4: \n\t" + "four%=: \n\t" "mfvsrd %[index],1 \n\t" : [maxf] "=m"(*maxf),[ptr_tmp] "+&b"(x),[index] "=r"(index), [n] "+&r"(n) @@ -306,13 +311,14 @@ static BLASLONG diamax_kernel_32(BLASLONG n, FLOAT *x, FLOAT *maxf) { [i64] "b"(64), [i80] "b"(80), [i96] "b"(96), [i112] "b"(112), [start] "v"(start), [adder] "v"(temp_add_index) : "cc", "vs0", "vs1","vs2","vs3", "vs4","vs5","vs32", "vs33", "vs34", "vs35", "vs36", - "vs37", "vs38", "vs39", "vs40", "vs41", "vs42", "vs43", "vs44", "vs45", "vs46", "vs47", "vs48", "vs49", "vs50", "vs51" + "vs37", "vs38", "vs39", "vs40", "vs41", "vs42", "vs43", "vs44", "vs45", "vs46", "vs47", "vs48", "vs49", "vs6", "vs7" ); return index; } +#endif BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) { BLASLONG i = 0; @@ -325,13 +331,17 @@ BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) { if (inc_x == 1) { BLASLONG n1 = n & -32; - if (n1 > 0) { +#if defined(_CALL_ELF) && (_CALL_ELF == 2) +#if defined(__VEC__) || defined(__ALTIVEC__) + + if (n1 > 0) { max = diamax_kernel_32(n1, x, &maxf); i = n1; } - +#endif +#endif while (i < n) { if (ABS(x[i]) > maxf) { max = i; diff --git a/kernel/power/idamin.c b/kernel/power/idamin.c index 7fe0f8a33..e37718c48 100644 --- a/kernel/power/idamin.c +++ b/kernel/power/idamin.c @@ -37,6 +37,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endif +#if defined(__VEC__) || defined(__ALTIVEC__) + /** * Find minimum index * Warning: requirements n>0 and n % 32 == 0 @@ -58,8 +60,8 @@ static BLASLONG diamin_kernel_32(BLASLONG n, FLOAT *x, FLOAT *minf) { "lxvd2x 47, %[i48],%[ptr_tmp] \n\t" "lxvd2x 48, %[i64],%[ptr_tmp] \n\t" "lxvd2x 49, %[i80],%[ptr_tmp] \n\t" - "lxvd2x 50, %[i96],%[ptr_tmp] \n\t" - "lxvd2x 51,%[i112],%[ptr_tmp] \n\t" + "lxvd2x 6, %[i96],%[ptr_tmp] \n\t" + "lxvd2x 7,%[i112],%[ptr_tmp] \n\t" "xxlor 40,%x[start],%x[start] \n\t" //{ 1,0} vs40 | v8 "vaddudm 9,8, %[adder] \n\t" //{3,2} vs41 @@ -69,7 +71,7 @@ static BLASLONG diamin_kernel_32(BLASLONG n, FLOAT *x, FLOAT *minf) { "vaddudm 11,10,%[adder] \n\t" //{7,6} vs43 "lxvdsx 39,0,%[ptr_minf] \n\t" // vs39 vec_min_value "vaddudm 4,11, %[adder] \n\t" // {9,8} -{8;8} vs36 | v4 - "xxspltd 36,36,0 \n\t" + XXSPLTD_S(36,36,0) "xvabsdp 39, 39 \n\t" "xvabsdp 44, 44 \n\t" @@ -78,21 +80,21 @@ static BLASLONG diamin_kernel_32(BLASLONG n, FLOAT *x, FLOAT *minf) { "xvabsdp 47, 47 \n\t" "xvabsdp 48, 48 \n\t" "xvabsdp 49, 49 \n\t" - "xvabsdp 50, 50 \n\t" - "xvabsdp 51, 51 \n\t" + "xvabsdp 6, 6 \n\t" + "xvabsdp 7, 7 \n\t" //jump first half forward - "b 2f \n\t" + "b two%= \n\t" //=================================================================== - ".p2align 5 \n\t" + ".align 5 \n\t" - "1: \n\t" + "one%=: \n\t" "xvcmpgtdp 2,44,45 \n\t " "xvcmpgtdp 3,46,47 \n\t " "xvcmpgtdp 4,48,49 \n\t " - "xvcmpgtdp 5,50,51 \n\t" + "xvcmpgtdp 5,6,7 \n\t" "xxsel 32,40,41,2 \n\t" "xxsel 0,44,45,2 \n\t" @@ -101,7 +103,7 @@ static BLASLONG diamin_kernel_32(BLASLONG n, FLOAT *x, FLOAT *minf) { "xxsel 34,40,41,4 \n\t" "xxsel 45,48,49,4 \n\t" "xxsel 35,42,43,5 \n\t" - "xxsel 47,50,51,5 \n\t" + "xxsel 47,6,7,5 \n\t" "xvcmpgtdp 2,0, 1 \n\t" "xvcmpgtdp 3, 45,47 \n\t" @@ -135,8 +137,8 @@ static BLASLONG diamin_kernel_32(BLASLONG n, FLOAT *x, FLOAT *minf) { "vaddudm 1,1,5 \n\t" // get real index for first smaller - "lxvd2x 50, %[i96],%[ptr_tmp] \n\t" - "lxvd2x 51,%[i112],%[ptr_tmp] \n\t" + "lxvd2x 6, %[i96],%[ptr_tmp] \n\t" + "lxvd2x 7,%[i112],%[ptr_tmp] \n\t" //compare with previous to get vec_min_index(v6 | vs38 ) and vec_min_value (vs39) "xvcmpgtdp 2,39, 3 \n\t" @@ -156,16 +158,16 @@ static BLASLONG diamin_kernel_32(BLASLONG n, FLOAT *x, FLOAT *minf) { "xvabsdp 48, 48 \n\t" "xvabsdp 49, 49 \n\t" - "xvabsdp 50, 50 \n\t" - "xvabsdp 51, 51 \n\t" + "xvabsdp 6, 6 \n\t" + "xvabsdp 7, 7 \n\t" //<-----------jump here from first load - "2: \n\t" + "two%=: \n\t" "xvcmpgtdp 2,44,45 \n\t " "xvcmpgtdp 3,46,47 \n\t " "xvcmpgtdp 4,48,49 \n\t " - "xvcmpgtdp 5,50,51 \n\t" + "xvcmpgtdp 5,6,7 \n\t" "xxsel 32,40,41,2 \n\t" "xxsel 0,44,45,2 \n\t" @@ -174,7 +176,7 @@ static BLASLONG diamin_kernel_32(BLASLONG n, FLOAT *x, FLOAT *minf) { "xxsel 34,40,41,4 \n\t" "xxsel 45,48,49,4 \n\t" "xxsel 35,42,43,5 \n\t" - "xxsel 47,50,51,5 \n\t" + "xxsel 47,6,7,5 \n\t" "xvcmpgtdp 2,0, 1 \n\t" "xvcmpgtdp 3, 45,47 \n\t" @@ -204,8 +206,8 @@ static BLASLONG diamin_kernel_32(BLASLONG n, FLOAT *x, FLOAT *minf) { "vaddudm 1,1,5 \n\t" // get real index for first smaller - "lxvd2x 50, %[i96],%[ptr_tmp] \n\t" - "lxvd2x 51,%[i112],%[ptr_tmp] \n\t" + "lxvd2x 6, %[i96],%[ptr_tmp] \n\t" + "lxvd2x 7,%[i112],%[ptr_tmp] \n\t" @@ -227,21 +229,21 @@ static BLASLONG diamin_kernel_32(BLASLONG n, FLOAT *x, FLOAT *minf) { "xvabsdp 48, 48 \n\t" "xvabsdp 49, 49 \n\t" - "xvabsdp 50, 50 \n\t" - "xvabsdp 51, 51 \n\t" + "xvabsdp 6, 6 \n\t" + "xvabsdp 7, 7 \n\t" //decrement n "addic. %[n], %[n], -32 \n\t" //Loop back if >0 - "bgt+ 1b \n\t" + "bgt+ one%= \n\t" //============================================================================== "xvcmpgtdp 2,44,45 \n\t " "xvcmpgtdp 3,46,47 \n\t " "xvcmpgtdp 4,48,49 \n\t " - "xvcmpgtdp 5,50,51 \n\t" + "xvcmpgtdp 5,6,7 \n\t" "xxsel 32,40,41,2 \n\t" "xxsel 0,44,45,2 \n\t" @@ -250,7 +252,7 @@ static BLASLONG diamin_kernel_32(BLASLONG n, FLOAT *x, FLOAT *minf) { "xxsel 34,40,41,4 \n\t" "xxsel 45,48,49,4 \n\t" "xxsel 35,42,43,5 \n\t" - "xxsel 47,50,51,5 \n\t" + "xxsel 47,6,7,5 \n\t" "xvcmpgtdp 2,0, 1 \n\t" "xvcmpgtdp 3, 45,47 \n\t" @@ -277,28 +279,28 @@ static BLASLONG diamin_kernel_32(BLASLONG n, FLOAT *x, FLOAT *minf) { ///////extract min value and min index from vector - "xxspltd 32,38,1 \n\t" - "xxspltd 40,39,1 \n\t" + XXSPLTD_S(32,38,1) + XXSPLTD_S(40,39,1) "xvcmpeqdp. 2, 40,39 \n\t" //cr6 0 bit set if all true, cr6=4*6+bit_ind=24,0011at CR(BI)==1, at=10 hint that it occurs rarely //0b001110=14 - "bc 14,24, 3f \n\t" + "bc 14,24, three%= \n\t" "xvcmpgtdp 4,39, 40 \n\t" "xxsel 0,39,40,4 \n\t" "xxsel 1,38,32,4 \n\t" "stxsdx 0,0,%[ptr_minf] \n\t" - "b 4f \n\t" + "b four%= \n\t" - "3: \n\t" + "three%=: \n\t" //if elements value are equal then choose minimum index - "xxspltd 0,40,0 \n\t" + XXSPLTD_S(0,40,0) "vminud 0,0,6 \n\t" //vs32 vs38 "xxlor 1,32,32 \n\t" "stxsdx 0,0,%[ptr_minf] \n\t" - "4: \n\t" + "four%=: \n\t" "mfvsrd %[index],1 \n\t" : [minf] "=m"(*minf),[ptr_tmp] "+&b"(x),[index] "=r"(index), [n] "+&r"(n) @@ -307,13 +309,13 @@ static BLASLONG diamin_kernel_32(BLASLONG n, FLOAT *x, FLOAT *minf) { [i64] "b"(64), [i80] "b"(80), [i96] "b"(96), [i112] "b"(112), [start] "v"(start), [adder] "v"(temp_add_index) : "cc", "vs0", "vs1","vs2","vs3", "vs4","vs5","vs32", "vs33", "vs34", "vs35", "vs36", - "vs37", "vs38", "vs39", "vs40", "vs41", "vs42", "vs43", "vs44", "vs45", "vs46", "vs47", "vs48", "vs49", "vs50", "vs51" + "vs37", "vs38", "vs39", "vs40", "vs41", "vs42", "vs43", "vs44", "vs45", "vs46", "vs47", "vs48", "vs49", "vs6", "vs7" ); return index; } - +#endif BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) { @@ -326,13 +328,17 @@ BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) { minf = ABS(x[0]); //index's not incremented if (inc_x == 1) { +#if defined(_CALL_ELF) && (_CALL_ELF == 2) +#if defined(__VEC__) || defined(__ALTIVEC__) + BLASLONG n1 = n & -32; - if (n1 > 0) { + if (n1 > 0) { min = diamin_kernel_32(n1, x, &minf); i = n1; } - +#endif +#endif while (i < n) { if (ABS(x[i]) < minf) { min = i; diff --git a/kernel/power/isamax_power8.S b/kernel/power/isamax_power8.S new file mode 100644 index 000000000..ec1c283f3 --- /dev/null +++ b/kernel/power/isamax_power8.S @@ -0,0 +1,440 @@ +/* .file "isamax.c" + .abiversion 2 + .section ".text" + .align 2 + .p2align 4,,15 + .globl isamax_k + .type isamax_k, @function +*/ + +#define ASSEMBLER +#include "common.h" + + PROLOGUE + +#if _CALL_ELF == 2 +isamax_k: +#endif +.LCF0: +0: addis 2,12,.TOC.-.LCF0@ha + addi 2,2,.TOC.-.LCF0@l +#if _CALL_ELF ==2 + .localentry isamax_k,.-isamax_k +#endif + mr. 11,3 + ble 0,.L36 + cmpdi 7,5,0 + li 3,0 + blelr 7 + cmpdi 7,5,1 + beq 7,.L69 + rldicr. 7,11,0,61 + beq 0,.L40 + sldi 3,5,1 + xxlxor 0,0,0 + sldi 6,5,2 + add 3,3,5 + sldi 0,5,4 + sldi 3,3,2 + sldi 5,5,3 + mr 9,4 + li 8,0 + li 10,0 + .p2align 4,,15 +.L31: + lfs 12,0(9) + fabs 12,12 + fcmpu 7,12,0 + bng 7,.L23 + fmr 0,12 + mr 8,10 +.L23: + lfsx 12,9,6 + fabs 12,12 + fcmpu 7,12,0 + bng 7,.L25 + fmr 0,12 + addi 8,10,1 +.L25: + lfsx 12,9,5 + fabs 12,12 + fcmpu 7,12,0 + bng 7,.L27 + fmr 0,12 + addi 8,10,2 +.L27: + lfsx 12,9,3 + add 9,9,0 + fabs 12,12 + fcmpu 7,12,0 + bng 7,.L29 + fmr 0,12 + addi 8,10,3 +.L29: + addi 10,10,4 + cmpd 7,7,10 + bgt 7,.L31 + addi 7,7,-1 + srdi 7,7,2 + addi 7,7,1 + sldi 9,7,2 + mulld 7,6,7 + cmpd 7,11,9 + ble 7,.L67 +.L22: + addi 10,9,1 + sldi 7,7,2 + cmpd 7,10,11 + subf 10,9,11 + mtctr 10 + add 4,4,7 + bgt 7,.L54 + li 3,-1 + rldicr 3,3,0,0 + cmpd 7,11,3 + beq 7,.L54 + .p2align 4,,15 +.L35: + lfs 12,0(4) + add 4,4,6 + fabs 12,12 + fcmpu 7,12,0 + bng 7,.L33 + fmr 0,12 + mr 8,9 +.L33: + addi 9,9,1 + bdnz .L35 +.L67: + addi 3,8,1 + blr + .p2align 4,,15 +.L36: + li 3,0 + blr + .p2align 4,,15 +.L69: + rldicr. 10,11,0,57 + bne 0,.L70 + addi 7,10,1 + sldi 9,10,2 + xxlxor 12,12,12 + cmpd 7,7,11 + add 4,4,9 + subf 9,10,11 + li 8,0 + mtctr 9 + bgt 7,.L60 + li 3,-1 + rldicr 3,3,0,0 + cmpd 7,11,3 + beq 7,.L60 + .p2align 4,,15 +.L61: + lfs 0,0(4) + addi 4,4,4 + fabs 0,0 + fcmpu 7,0,12 + bng 7,.L63 + fmr 12,0 + mr 8,10 +.L63: + addi 10,10,1 + bdnz .L61 + b .L67 + .p2align 4,,15 +.L70: + li 0,-64 + std 31,-8(1) + addis 3,2,.LC2@toc@ha + vspltisw 18,0 + vspltisw 12,0 + addis 5,2,.LC3@toc@ha + addis 6,2,.LC6@toc@ha + stvx 29,1,0 + li 0,-48 + addis 8,2,.LC7@toc@ha + xxlor 35,50,50 + addi 3,3,.LC2@toc@l + addi 5,5,.LC3@toc@l + stvx 30,1,0 + addi 6,6,.LC6@toc@l + li 0,-32 + addi 8,8,.LC7@toc@l + lxvd2x 51,0,3 + lxvd2x 34,0,5 + addis 7,2,.LC4@toc@ha + stvx 31,1,0 + lxvd2x 47,0,6 + addis 9,2,.LC5@toc@ha + addi 7,7,.LC4@toc@l + lxvd2x 48,0,8 + addi 9,9,.LC5@toc@l + vspltisw 17,8 + vadduwm 17,17,17 + lxvd2x 36,0,7 + li 7,0 + lxvd2x 37,0,9 + mr 9,4 + .p2align 4,,15 +.L5: + addi 5,9,16 + addi 6,9,32 + lxvd2x 41,0,9 + vadduwm 31,3,15 + addi 8,9,64 + addi 31,9,48 + addi 12,9,80 + addi 3,9,96 + lxvd2x 5,0,5 + lxvd2x 43,0,6 + addi 5,9,112 + addi 6,9,128 + lxvd2x 1,0,8 + lxvd2x 9,0,31 + addi 8,9,160 + addi 31,9,144 + lxvd2x 6,0,12 + lxvd2x 13,0,3 + addi 12,9,176 + addi 3,9,192 + lxvd2x 11,0,5 + lxvd2x 2,0,6 + xvabssp 41,41 + addi 5,9,208 + addi 6,9,224 + lxvd2x 3,0,8 + lxvd2x 7,0,31 + addi 8,9,240 + lxvd2x 10,0,12 + lxvd2x 4,0,3 + xvabssp 43,43 + xvabssp 5,5 + addi 7,7,64 + lxvd2x 8,0,5 + lxvd2x 0,0,6 + xvabssp 9,9 + xvabssp 1,1 + cmpd 7,10,7 + addi 9,9,256 + lxvd2x 12,0,8 + xvabssp 6,6 + xvabssp 13,13 + xvabssp 11,11 + xvabssp 2,2 + xvabssp 7,7 + xvabssp 3,3 + xvabssp 10,10 + xvabssp 4,4 + xvabssp 8,8 + xvabssp 0,0 + xvabssp 12,12 + xvcmpgtsp 32,5,41 + xvcmpgtsp 61,9,43 + xvcmpgtsp 45,6,1 + xvcmpgtsp 62,11,13 + xvcmpgtsp 38,7,2 + xvcmpgtsp 46,10,3 + xvcmpgtsp 40,8,4 + xvcmpgtsp 39,12,0 + xxsel 5,41,5,32 + xxsel 32,51,34,32 + xxsel 9,43,9,61 + xxsel 6,1,6,45 + xxsel 11,13,11,62 + xxsel 43,51,34,45 + xxsel 7,2,7,38 + xvcmpgtsp 41,9,5 + xxsel 10,3,10,46 + xvcmpgtsp 45,11,6 + xxsel 8,4,8,40 + xxsel 62,36,37,62 + xxsel 0,0,12,39 + xvcmpgtsp 42,10,7 + xxsel 61,36,37,61 + xxsel 40,51,34,40 + xvcmpgtsp 33,0,8 + xxsel 39,36,37,39 + xxsel 38,51,34,38 + xxsel 46,36,37,46 + xxsel 9,5,9,41 + xxsel 41,32,61,41 + xxsel 12,6,11,45 + xxsel 45,43,62,45 + xxsel 11,7,10,42 + xvcmpgtsp 32,12,9 + vadduwm 13,13,17 + xxsel 42,38,46,42 + xxsel 0,8,0,33 + xxsel 33,40,39,33 + xvcmpgtsp 43,0,11 + vadduwm 1,1,17 + xxsel 12,9,12,32 + xxsel 32,41,45,32 + vadduwm 0,3,0 + vadduwm 3,3,16 + xxsel 0,11,0,43 + xxsel 33,42,33,43 + xvcmpgtsp 45,0,12 + vadduwm 1,31,1 + xxsel 0,12,0,45 + xxsel 32,32,33,45 + xvcmpgtsp 33,0,44 + xxsel 50,50,32,33 + xxsel 44,44,0,33 + bgt 7,.L5 + xxsldwi 12,44,44,1 + xscvspdp 10,44 + vspltw 0,18,0 + xxsldwi 0,44,44,3 + xscvspdp 12,12 + mfvsrwz 3,50 + mfvsrwz 6,32 + vspltw 0,18,3 + xscvspdp 0,0 + xxsldwi 44,44,44,2 + mfvsrwz 7,32 + vspltw 0,18,2 + xscvspdp 44,44 + mfvsrwz 9,32 + fcmpu 7,12,10 + rldicl 8,3,0,32 + rldicl 31,6,0,32 + fmr 11,0 + rldicl 0,7,0,32 + rldicl 5,9,0,32 + beq 7,.L71 + bnl 7,.L8 + fmr 12,10 + mr 8,31 +.L8: + xscmpudp 7,0,44 + bne 7,.L11 + cmplw 7,7,9 + ble 7,.L12 + mr 7,9 +.L12: + rldicl 5,7,0,32 +.L13: + fcmpu 7,12,11 + beq 7,.L72 + bnl 7,.L17 + fmr 12,11 + mr 8,5 +.L17: + cmpd 7,11,10 + ble 7,.L16 + addi 7,10,1 + sldi 9,10,2 + cmpd 7,7,11 + add 4,4,9 + subf 9,10,11 + mtctr 9 + bgt 7,.L53 + li 3,-1 + rldicr 3,3,0,0 + cmpd 7,11,3 + beq 7,.L53 + .p2align 4,,15 +.L21: + lfs 0,0(4) + addi 4,4,4 + fabs 0,0 + fcmpu 7,0,12 + bng 7,.L19 + fmr 12,0 + mr 8,10 +.L19: + addi 10,10,1 + bdnz .L21 +.L16: + li 0,-64 + ld 31,-8(1) + addi 3,8,1 + lvx 29,1,0 + li 0,-48 + lvx 30,1,0 + li 0,-32 + lvx 31,1,0 + blr + .p2align 4,,15 +.L71: + cmplw 7,3,6 + ble 7,.L7 + mr 3,6 +.L7: + rldicl 8,3,0,32 + b .L8 + .p2align 4,,15 +.L40: + xxlxor 0,0,0 + sldi 6,5,2 + li 8,0 + li 9,0 + b .L22 + .p2align 4,,15 +.L11: + blt 7,.L39 + mr 5,0 + b .L13 + .p2align 4,,15 +.L72: + cmpd 7,8,5 + ble 7,.L17 + mr 8,5 + b .L17 + .p2align 4,,15 +.L39: + xscpsgndp 11,44,44 + b .L13 +.L53: + li 9,1 + mtctr 9 + b .L21 +.L54: + li 10,1 + mtctr 10 + b .L35 +.L60: + li 9,1 + mtctr 9 + b .L61 + .long 0 + .byte 0,0,0,0,0,1,0,0 +#if _CALL_ELF ==2 + .size isamax_k,.-isamax_k +#endif + .section .rodata.cst16,"aM",@progbits,16 + .align 4 +.LC2: + .long 0 + .long 1 + .long 2 + .long 3 +.LC3: + .long 4 + .long 5 + .long 6 + .long 7 +.LC4: + .long 8 + .long 9 + .long 10 + .long 11 +.LC5: + .long 12 + .long 13 + .long 14 + .long 15 +.LC6: + .long 32 + .long 32 + .long 32 + .long 32 +.LC7: + .long 64 + .long 64 + .long 64 + .long 64 + .ident "GCC: (SUSE Linux) 7.3.1 20180323 [gcc-7-branch revision 258812]" + .section .note.GNU-stack,"",@progbits diff --git a/kernel/power/isamax_power9.S b/kernel/power/isamax_power9.S new file mode 100644 index 000000000..259c996fc --- /dev/null +++ b/kernel/power/isamax_power9.S @@ -0,0 +1,404 @@ +/* + .file "isamax.c" + .abiversion 2 + .section ".text" + .align 2 + .p2align 4,,15 + .globl isamax_k + .type isamax_k, @function +*/ +#define ASSEMBLER +#include "common.h" + + PROLOGUE + +isamax_k: +.LCF0: +0: addis 2,12,.TOC.-.LCF0@ha + addi 2,2,.TOC.-.LCF0@l + .localentry isamax_k,.-isamax_k + mr. 11,3 + ble 0,.L36 + cmpdi 7,5,0 + li 3,0 + blelr 7 + cmpdi 7,5,1 + beq 7,.L69 + rldicr. 7,11,0,61 + beq 0,.L40 + sldi 10,5,1 + sldi 6,5,2 + sldi 0,5,4 + sldi 3,5,3 + mr 9,4 + xxlxor 0,0,0 + li 8,0 + add 5,10,5 + li 10,0 + sldi 5,5,2 + .p2align 4,,15 +.L31: + lfs 12,0(9) + fabs 12,12 + fcmpu 7,12,0 + bng 7,.L23 + fmr 0,12 + mr 8,10 +.L23: + lfsx 12,9,6 + fabs 12,12 + fcmpu 7,12,0 + bng 7,.L25 + fmr 0,12 + addi 8,10,1 +.L25: + lfsx 12,9,3 + fabs 12,12 + fcmpu 7,12,0 + bng 7,.L27 + fmr 0,12 + addi 8,10,2 +.L27: + lfsx 12,9,5 + add 9,9,0 + fabs 12,12 + fcmpu 7,12,0 + bng 7,.L29 + fmr 0,12 + addi 8,10,3 +.L29: + addi 10,10,4 + cmpd 7,7,10 + bgt 7,.L31 + addi 7,7,-1 + srdi 7,7,2 + addi 7,7,1 + sldi 9,7,2 + mulld 7,6,7 + cmpd 7,11,9 + ble 7,.L67 +.L22: + addi 10,9,1 + sldi 7,7,2 + subf 5,9,11 + cmpd 7,10,11 + mtctr 5 + add 4,4,7 + bgt 7,.L54 + li 3,-1 + rldicr 3,3,0,0 + cmpd 7,11,3 + beq 7,.L54 + .p2align 4,,15 +.L35: + lfs 12,0(4) + add 4,4,6 + fabs 12,12 + fcmpu 7,12,0 + bng 7,.L33 + fmr 0,12 + mr 8,9 +.L33: + addi 9,9,1 + bdnz .L35 +.L67: + addi 3,8,1 + blr + .p2align 4,,15 +.L36: + li 3,0 + blr + .p2align 4,,15 +.L69: + rldicr. 10,11,0,57 + bne 0,.L70 + addi 7,10,1 + sldi 9,10,2 + subf 6,10,11 + li 8,0 + xxlxor 12,12,12 + cmpd 7,7,11 + mtctr 6 + add 4,4,9 + bgt 7,.L60 + li 3,-1 + rldicr 3,3,0,0 + cmpd 7,11,3 + beq 7,.L60 + .p2align 4,,15 +.L61: + lfs 0,0(4) + addi 4,4,4 + fabs 0,0 + fcmpu 7,0,12 + bng 7,.L63 + fmr 12,0 + mr 8,10 +.L63: + addi 10,10,1 + bdnz .L61 + b .L67 + .p2align 4,,15 +.L70: + addis 6,2,.LC2@toc@ha + addis 7,2,.LC3@toc@ha + addis 8,2,.LC4@toc@ha + addis 9,2,.LC5@toc@ha + xxspltib 46,0 + stxv 61,-48(1) + stxv 62,-32(1) + addi 6,6,.LC2@toc@l + addi 7,7,.LC3@toc@l + stxv 63,-16(1) + xxspltib 61,32 + xxspltib 63,16 + xxspltib 62,64 + addi 8,8,.LC4@toc@l + addi 9,9,.LC5@toc@l + lxv 47,0(6) + xxspltib 34,0 + lxv 48,0(7) + xxlor 51,46,46 + lxv 49,0(8) + lxv 50,0(9) + li 8,0 + mr 9,4 + vextsb2w 29,29 + vextsb2w 31,31 + vextsb2w 30,30 + stxv 59,-80(1) + stxv 60,-64(1) + .p2align 4,,15 +.L5: + lxv 0,0(9) + vadduwm 27,19,29 + lxv 12,240(9) + addi 8,8,64 + addi 9,9,256 + cmpd 7,10,8 + xvabssp 44,0 + lxv 0,-240(9) + xvabssp 12,12 + xvabssp 5,0 + lxv 0,-224(9) + xvabssp 32,0 + lxv 0,-208(9) + xvcmpgtsp 35,5,44 + xvabssp 9,0 + lxv 0,-192(9) + xxsel 5,44,5,35 + xxsel 35,47,48,35 + xvabssp 1,0 + lxv 0,-176(9) + xvcmpgtsp 60,9,32 + xvabssp 6,0 + lxv 0,-160(9) + xxsel 9,32,9,60 + xxsel 60,49,50,60 + xvabssp 13,0 + lxv 0,-144(9) + xvcmpgtsp 42,9,5 + xvcmpgtsp 37,6,1 + xvabssp 11,0 + lxv 0,-128(9) + xxsel 9,5,9,42 + xxsel 42,35,60,42 + xxsel 6,1,6,37 + xxsel 37,47,48,37 + xvabssp 2,0 + lxv 0,-112(9) + xvcmpgtsp 36,11,13 + xvabssp 7,0 + lxv 0,-96(9) + xxsel 11,13,11,36 + xxsel 36,49,50,36 + xvabssp 3,0 + lxv 0,-80(9) + xvcmpgtsp 45,11,6 + xvcmpgtsp 39,7,2 + xvabssp 10,0 + lxv 0,-64(9) + xxsel 7,2,7,39 + xxsel 39,47,48,39 + xvabssp 4,0 + lxv 0,-48(9) + xvcmpgtsp 38,10,3 + xvabssp 8,0 + lxv 0,-32(9) + xxsel 10,3,10,38 + xxsel 38,49,50,38 + xvabssp 0,0 + xvcmpgtsp 43,10,7 + xvcmpgtsp 41,8,4 + xvcmpgtsp 40,12,0 + xxsel 8,4,8,41 + xxsel 41,47,48,41 + xxsel 0,0,12,40 + xxsel 12,6,11,45 + xxsel 11,7,10,43 + xxsel 45,37,36,45 + xvcmpgtsp 33,0,8 + xvcmpgtsp 32,12,9 + vadduwm 13,13,31 + xxsel 40,49,50,40 + xxsel 43,39,38,43 + xxsel 0,8,0,33 + xxsel 12,9,12,32 + xxsel 33,41,40,33 + xxsel 32,42,45,32 + xvcmpgtsp 44,0,11 + vadduwm 1,1,31 + vadduwm 0,19,0 + vadduwm 19,19,30 + xxsel 0,11,0,44 + xxsel 33,43,33,44 + xvcmpgtsp 45,0,12 + vadduwm 1,27,1 + xxsel 0,12,0,45 + xxsel 32,32,33,45 + xvcmpgtsp 33,0,34 + xxsel 46,46,32,33 + xxsel 34,34,0,33 + bgt 7,.L5 + xxsldwi 12,34,34,3 + xxsldwi 11,34,34,2 + li 9,0 + li 8,12 + xxsldwi 0,34,34,1 + xscvspdp 34,34 + vextuwrx 3,9,14 + li 9,4 + xscvspdp 12,12 + xscvspdp 11,11 + xscvspdp 0,0 + vextuwrx 6,9,14 + li 9,8 + vextuwrx 7,9,14 + vextuwrx 9,8,14 + rldicl 12,6,0,32 + rldicl 8,3,0,32 + rldicl 0,7,0,32 + rldicl 5,9,0,32 + fcmpu 7,12,11 + fmr 10,0 + beq 7,.L71 + bnl 7,.L8 + mr 8,12 + fmr 12,11 +.L8: + xscmpudp 7,0,34 + bne 7,.L11 + cmplw 7,7,9 + ble 7,.L12 + mr 7,9 +.L12: + rldicl 5,7,0,32 +.L13: + fcmpu 7,12,10 + beq 7,.L72 + bnl 7,.L17 + mr 8,5 + fmr 12,10 +.L17: + cmpd 7,11,10 + ble 7,.L16 + addi 7,10,1 + sldi 9,10,2 + subf 6,10,11 + cmpd 7,7,11 + mtctr 6 + add 4,4,9 + bgt 7,.L53 + li 3,-1 + rldicr 3,3,0,0 + cmpd 7,11,3 + beq 7,.L53 + .p2align 4,,15 +.L21: + lfs 0,0(4) + addi 4,4,4 + fabs 0,0 + fcmpu 7,0,12 + bng 7,.L19 + fmr 12,0 + mr 8,10 +.L19: + addi 10,10,1 + bdnz .L21 +.L16: + lxv 59,-80(1) + lxv 60,-64(1) + addi 3,8,1 + lxv 61,-48(1) + lxv 62,-32(1) + lxv 63,-16(1) + blr + .p2align 4,,15 +.L71: + cmplw 7,3,6 + ble 7,.L7 + mr 3,6 +.L7: + rldicl 8,3,0,32 + b .L8 + .p2align 4,,15 +.L40: + sldi 6,5,2 + li 8,0 + li 9,0 + xxlxor 0,0,0 + b .L22 + .p2align 4,,15 +.L11: + blt 7,.L39 + mr 5,0 + b .L13 + .p2align 4,,15 +.L72: + cmpd 7,8,5 + ble 7,.L17 + mr 8,5 + b .L17 + .p2align 4,,15 +.L39: + xscpsgndp 10,34,34 + b .L13 +.L53: + li 9,1 + mtctr 9 + b .L21 +.L54: + li 10,1 + mtctr 10 + b .L35 +.L60: + li 9,1 + mtctr 9 + b .L61 + .long 0 + .byte 0,0,0,0,0,0,0,0 + .size isamax_k,.-isamax_k + .section .rodata.cst16,"aM",@progbits,16 + .align 4 +.LC2: + .long 0 + .long 1 + .long 2 + .long 3 +.LC3: + .long 4 + .long 5 + .long 6 + .long 7 +.LC4: + .long 8 + .long 9 + .long 10 + .long 11 +.LC5: + .long 12 + .long 13 + .long 14 + .long 15 + .ident "GCC: (SUSE Linux) 7.3.1 20180323 [gcc-7-branch revision 258812]" + .section .note.GNU-stack,"",@progbits diff --git a/kernel/power/isamin_power8.S b/kernel/power/isamin_power8.S new file mode 100644 index 000000000..1978af880 --- /dev/null +++ b/kernel/power/isamin_power8.S @@ -0,0 +1,423 @@ +/* .file "isamin.c" + .abiversion 2 + .section ".text" + .align 2 + .p2align 4,,15 + .globl isamin_k + .type isamin_k, @function +*/ +#define ASSEMBLER +#include "common.h" + + PROLOGUE + +#if _CALL_ELF ==2 +isamin_k: +#endif +.LCF0: +0: addis 2,12,.TOC.-.LCF0@ha + addi 2,2,.TOC.-.LCF0@l +#if _CALL_ELF ==2 + .localentry isamin_k,.-isamin_k +#endif + mr. 11,3 + ble 0,.L36 + cmpdi 7,5,0 + li 3,0 + blelr 7 + lfs 0,0(4) + li 0,-48 + cmpdi 7,5,1 + stvx 30,1,0 + li 0,-32 + stvx 31,1,0 + fabs 0,0 + beq 7,.L62 + rldicr. 6,11,0,61 + beq 0,.L40 + sldi 0,5,1 + sldi 12,5,2 + std 31,-8(1) + add 0,0,5 + neg 31,5 + sldi 3,5,4 + sldi 0,0,2 + add 7,4,12 + sldi 31,31,2 + sldi 5,5,3 + li 9,0 + li 10,0 + b .L24 + .p2align 4,,15 +.L41: + mr 10,9 +.L25: + fmr 0,12 + add 7,7,3 +.L24: + lfs 12,0(7) + fabs 12,12 + fcmpu 7,12,0 + bnl 7,.L26 + fmr 0,12 + addi 10,9,1 +.L26: + add 8,31,7 + lfsx 12,8,5 + fabs 12,12 + fcmpu 7,12,0 + bnl 7,.L28 + fmr 0,12 + addi 10,9,2 +.L28: + lfsx 12,8,0 + fabs 12,12 + fcmpu 7,12,0 + bnl 7,.L30 + fmr 0,12 + addi 10,9,3 +.L30: + addi 9,9,4 + cmpd 7,6,9 + ble 7,.L63 + lfsx 12,8,3 + fabs 12,12 + fcmpu 7,12,0 + blt 7,.L41 + fmr 12,0 + b .L25 + .p2align 4,,15 +.L36: + li 3,0 + blr + .p2align 4,,15 +.L63: + addi 6,6,-1 + ld 31,-8(1) + srdi 6,6,2 + addi 6,6,1 + sldi 9,6,2 + mulld 6,12,6 + cmpd 7,11,9 + ble 7,.L33 +.L23: + addi 8,9,1 + sldi 6,6,2 + cmpd 7,8,11 + subf 8,9,11 + mtctr 8 + add 4,4,6 + bgt 7,.L52 + li 3,-1 + rldicr 3,3,0,0 + cmpd 7,11,3 + beq 7,.L52 + .p2align 4,,15 +.L35: + lfs 12,0(4) + add 4,4,12 + fabs 12,12 + fcmpu 7,12,0 + bnl 7,.L34 + fmr 0,12 + mr 10,9 +.L34: + addi 9,9,1 + bdnz .L35 +.L33: + li 0,-48 + addi 3,10,1 + lvx 30,1,0 + li 0,-32 + lvx 31,1,0 + blr + .p2align 4,,15 +.L62: + rldicr. 8,11,0,57 + li 10,0 + bne 0,.L64 +.L4: + addi 7,8,1 + sldi 9,8,2 + cmpd 7,7,11 + add 4,4,9 + subf 9,8,11 + mtctr 9 + bgt 7,.L51 + li 3,-1 + rldicr 3,3,0,0 + cmpd 7,11,3 + beq 7,.L51 + .p2align 4,,15 +.L22: + lfs 12,0(4) + addi 4,4,4 + fabs 12,12 + fcmpu 7,0,12 + bng 7,.L21 + fmr 0,12 + mr 10,8 +.L21: + addi 8,8,1 + bdnz .L22 + li 0,-48 + addi 3,10,1 + lvx 30,1,0 + li 0,-32 + lvx 31,1,0 + blr + .p2align 4,,15 +.L64: + lxvd2x 4,0,4 + addis 10,2,.LC2@toc@ha + addis 5,2,.LC3@toc@ha + std 31,-8(1) + vspltisw 2,0 + addi 10,10,.LC2@toc@l + addis 7,2,.LC4@toc@ha + addis 9,2,.LC5@toc@ha + addis 6,2,.LC6@toc@ha + lxvd2x 51,0,10 + addis 10,2,.LC7@toc@ha + addi 7,7,.LC4@toc@l + addi 9,9,.LC5@toc@l + addi 5,5,.LC3@toc@l + xvabssp 4,4 + addi 6,6,.LC6@toc@l + addi 10,10,.LC7@toc@l + lxvd2x 36,0,7 + vspltisw 18,8 + lxvd2x 37,0,9 + lxvd2x 35,0,5 + mr 9,4 + li 7,0 + lxvd2x 48,0,6 + lxvd2x 49,0,10 + vadduwm 18,18,18 + xxlor 38,51,51 + xxlor 40,4,4 + b .L6 + .p2align 4,,15 +.L65: + lxvd2x 5,0,9 + xvabssp 40,5 +.L6: + addi 5,9,16 + addi 6,9,32 + vadduwm 14,2,16 + addi 10,9,64 + addi 12,9,48 + addi 31,9,80 + addi 3,9,96 + lxvd2x 5,0,5 + lxvd2x 42,0,6 + addi 5,9,112 + addi 6,9,128 + lxvd2x 44,0,10 + lxvd2x 9,0,12 + addi 10,9,160 + addi 12,9,144 + lxvd2x 6,0,31 + lxvd2x 1,0,3 + addi 31,9,176 + addi 3,9,192 + lxvd2x 11,0,5 + lxvd2x 13,0,6 + addi 5,9,208 + addi 6,9,224 + lxvd2x 2,0,10 + lxvd2x 7,0,12 + addi 10,9,240 + lxvd2x 10,0,31 + lxvd2x 3,0,3 + xvabssp 42,42 + xvabssp 5,5 + addi 7,7,64 + lxvd2x 8,0,5 + lxvd2x 0,0,6 + xvabssp 44,44 + xvabssp 9,9 + cmpd 7,8,7 + addi 9,9,256 + lxvd2x 12,0,10 + xvabssp 6,6 + xvabssp 1,1 + xvabssp 11,11 + xvabssp 13,13 + xvabssp 7,7 + xvabssp 2,2 + xvabssp 10,10 + xvabssp 3,3 + xvabssp 8,8 + xvabssp 0,0 + xvabssp 12,12 + xvcmpgtsp 32,40,5 + xvcmpgtsp 62,42,9 + xvcmpgtsp 45,44,6 + xvcmpgtsp 63,1,11 + xvcmpgtsp 39,13,7 + xvcmpgtsp 47,2,10 + xvcmpgtsp 41,3,8 + xvcmpgtsp 33,0,12 + xxsel 5,40,5,32 + xxsel 32,38,35,32 + xxsel 9,42,9,62 + xxsel 6,44,6,45 + xxsel 11,1,11,63 + xxsel 44,38,35,45 + xxsel 7,13,7,39 + xvcmpgtsp 42,5,9 + xxsel 10,2,10,47 + xvcmpgtsp 45,6,11 + xxsel 8,3,8,41 + xxsel 63,36,37,63 + xxsel 0,0,12,33 + xvcmpgtsp 43,7,10 + xxsel 40,36,37,33 + xxsel 62,36,37,62 + xvcmpgtsp 33,8,0 + xxsel 41,38,35,41 + xxsel 39,38,35,39 + xxsel 47,36,37,47 + xxsel 9,5,9,42 + xxsel 42,32,62,42 + xxsel 12,6,11,45 + xxsel 45,44,63,45 + xxsel 11,7,10,43 + xvcmpgtsp 32,9,12 + vadduwm 13,13,18 + xxsel 43,39,47,43 + xxsel 0,8,0,33 + xxsel 33,41,40,33 + xvcmpgtsp 44,11,0 + vadduwm 1,1,18 + xxsel 12,9,12,32 + xxsel 32,42,45,32 + vadduwm 0,2,0 + vadduwm 2,2,17 + xxsel 0,11,0,44 + xxsel 33,43,33,44 + xvcmpgtsp 45,12,0 + vadduwm 1,14,1 + xxsel 0,12,0,45 + xxsel 32,32,33,45 + xvcmpgtsp 33,4,0 + xxsel 51,51,32,33 + xxsel 4,4,0,33 + bgt 7,.L65 + xxsldwi 0,4,4,1 + xscvspdp 10,4 + vspltw 0,19,0 + xxsldwi 12,4,4,3 + xscvspdp 0,0 + mfvsrwz 3,51 + mfvsrwz 6,32 + vspltw 0,19,3 + xscvspdp 12,12 + xxsldwi 4,4,4,2 + mfvsrwz 7,32 + vspltw 0,19,2 + xscvspdp 4,4 + mfvsrwz 9,32 + fcmpu 7,0,10 + rldicl 10,3,0,32 + rldicl 31,6,0,32 + fmr 11,12 + rldicl 5,7,0,32 + rldicl 0,9,0,32 + beq 7,.L66 + bng 7,.L9 + fmr 0,10 + mr 10,31 +.L9: + fcmpu 7,12,4 + bne 7,.L12 + cmplw 7,7,9 + ble 7,.L13 + mr 7,9 +.L13: + rldicl 5,7,0,32 +.L14: + fcmpu 7,0,11 + beq 7,.L67 + bng 7,.L19 + fmr 0,11 + mr 10,5 +.L19: + cmpd 7,11,8 + ld 31,-8(1) + bgt 7,.L4 + b .L33 + .p2align 4,,15 +.L66: + cmplw 7,3,6 + ble 7,.L8 + mr 3,6 +.L8: + rldicl 10,3,0,32 + b .L9 + .p2align 4,,15 +.L40: + sldi 12,5,2 + li 10,0 + li 9,0 + b .L23 + .p2align 4,,15 +.L12: + bng 7,.L14 + fmr 11,4 + mr 5,0 + b .L14 + .p2align 4,,15 +.L67: + cmpd 7,10,5 + ble 7,.L19 + mr 10,5 + b .L19 +.L51: + li 9,1 + mtctr 9 + b .L22 +.L52: + li 8,1 + mtctr 8 + b .L35 + .long 0 + .byte 0,0,0,0,0,1,0,0 +#if _CALL_ELF ==2 + .size isamin_k,.-isamin_k +#endif + .section .rodata.cst16,"aM",@progbits,16 + .align 4 +.LC2: + .long 0 + .long 1 + .long 2 + .long 3 +.LC3: + .long 4 + .long 5 + .long 6 + .long 7 +.LC4: + .long 8 + .long 9 + .long 10 + .long 11 +.LC5: + .long 12 + .long 13 + .long 14 + .long 15 +.LC6: + .long 32 + .long 32 + .long 32 + .long 32 +.LC7: + .long 64 + .long 64 + .long 64 + .long 64 + .ident "GCC: (SUSE Linux) 7.3.1 20180323 [gcc-7-branch revision 258812]" + .section .note.GNU-stack,"",@progbits diff --git a/kernel/power/isamin_power9.S b/kernel/power/isamin_power9.S new file mode 100644 index 000000000..36486ff02 --- /dev/null +++ b/kernel/power/isamin_power9.S @@ -0,0 +1,389 @@ +/* + .file "isamin.c" + .abiversion 2 + .section ".text" + .align 2 + .p2align 4,,15 + .globl isamin_k + .type isamin_k, @function +*/ +#define ASSEMBLER +#include "common.h" + + PROLOGUE + +isamin_k: +.LCF0: +0: addis 2,12,.TOC.-.LCF0@ha + addi 2,2,.TOC.-.LCF0@l + .localentry isamin_k,.-isamin_k + mr. 11,3 + ble 0,.L36 + cmpdi 7,5,0 + li 3,0 + blelr 7 + lfs 0,0(4) + cmpdi 7,5,1 + stxv 61,-64(1) + stxv 62,-48(1) + stxv 63,-32(1) + fabs 0,0 + beq 7,.L62 + rldicr. 6,11,0,61 + beq 0,.L40 + sldi 8,5,1 + sldi 0,5,2 + neg 12,5 + std 31,-8(1) + sldi 3,5,4 + sldi 31,5,3 + li 9,0 + li 10,0 + add 5,8,5 + add 7,4,0 + sldi 12,12,2 + sldi 5,5,2 + b .L24 + .p2align 4,,15 +.L41: + mr 10,9 +.L25: + add 7,7,3 + fmr 0,12 +.L24: + lfs 12,0(7) + fabs 12,12 + fcmpu 7,12,0 + bnl 7,.L26 + fmr 0,12 + addi 10,9,1 +.L26: + add 8,7,12 + lfsx 12,8,31 + fabs 12,12 + fcmpu 7,12,0 + bnl 7,.L28 + fmr 0,12 + addi 10,9,2 +.L28: + lfsx 12,8,5 + fabs 12,12 + fcmpu 7,12,0 + bnl 7,.L30 + fmr 0,12 + addi 10,9,3 +.L30: + addi 9,9,4 + cmpd 7,6,9 + ble 7,.L63 + lfsx 12,8,3 + fabs 12,12 + fcmpu 7,12,0 + blt 7,.L41 + fmr 12,0 + b .L25 + .p2align 4,,15 +.L36: + li 3,0 + blr + .p2align 4,,15 +.L63: + addi 6,6,-1 + ld 31,-8(1) + srdi 6,6,2 + addi 6,6,1 + sldi 9,6,2 + mulld 6,0,6 + cmpd 7,11,9 + ble 7,.L33 +.L23: + addi 8,9,1 + sldi 6,6,2 + subf 7,9,11 + cmpd 7,8,11 + mtctr 7 + add 4,4,6 + bgt 7,.L52 + li 3,-1 + rldicr 3,3,0,0 + cmpd 7,11,3 + beq 7,.L52 + .p2align 4,,15 +.L35: + lfs 12,0(4) + add 4,4,0 + fabs 12,12 + fcmpu 7,12,0 + bnl 7,.L34 + fmr 0,12 + mr 10,9 +.L34: + addi 9,9,1 + bdnz .L35 +.L33: + lxv 61,-64(1) + lxv 62,-48(1) + addi 3,10,1 + lxv 63,-32(1) + blr + .p2align 4,,15 +.L62: + rldicr. 8,11,0,57 + li 10,0 + bne 0,.L64 +.L4: + addi 7,8,1 + sldi 9,8,2 + subf 6,8,11 + cmpd 7,7,11 + mtctr 6 + add 4,4,9 + bgt 7,.L51 + li 3,-1 + rldicr 3,3,0,0 + cmpd 7,11,3 + beq 7,.L51 + .p2align 4,,15 +.L22: + lfs 12,0(4) + addi 4,4,4 + fabs 12,12 + fcmpu 7,0,12 + bng 7,.L21 + fmr 0,12 + mr 10,8 +.L21: + addi 8,8,1 + bdnz .L22 + lxv 61,-64(1) + lxv 62,-48(1) + addi 3,10,1 + lxv 63,-32(1) + blr + .p2align 4,,15 +.L64: + lxv 0,0(4) + xxspltib 47,16 + addis 6,2,.LC2@toc@ha + addis 7,2,.LC3@toc@ha + addis 10,2,.LC4@toc@ha + addis 9,2,.LC5@toc@ha + xxspltib 63,32 + xxspltib 46,64 + addi 6,6,.LC2@toc@l + addi 10,10,.LC4@toc@l + addi 7,7,.LC3@toc@l + std 31,-8(1) + addi 9,9,.LC5@toc@l + xxspltib 50,0 + vextsb2w 15,15 + lxv 48,0(6) + lxv 51,0(10) + vextsb2w 31,31 + vextsb2w 14,14 + xvabssp 4,0 + lxv 34,0(9) + lxv 49,0(7) + mr 9,4 + li 10,0 + xxlor 35,48,48 + xxlor 40,4,4 + b .L6 + .p2align 4,,15 +.L65: + lxv 0,0(9) + xvabssp 40,0 +.L6: + lxv 0,16(9) + vadduwm 29,18,31 + lxv 12,240(9) + addi 10,10,64 + addi 9,9,256 + cmpd 7,8,10 + xvabssp 5,0 + lxv 0,-224(9) + xvabssp 12,12 + xvabssp 32,0 + lxv 0,-208(9) + xvcmpgtsp 42,40,5 + xvabssp 9,0 + lxv 0,-192(9) + xxsel 5,40,5,42 + xvabssp 44,0 + lxv 0,-176(9) + xvcmpgtsp 62,32,9 + xvabssp 6,0 + lxv 0,-160(9) + xxsel 9,32,9,62 + xxsel 32,35,49,42 + xvabssp 1,0 + lxv 0,-144(9) + xxsel 62,51,34,62 + xvcmpgtsp 42,5,9 + xvcmpgtsp 37,44,6 + xvabssp 11,0 + lxv 0,-128(9) + xxsel 9,5,9,42 + xxsel 42,32,62,42 + xxsel 6,44,6,37 + xxsel 37,35,49,37 + xvabssp 13,0 + lxv 0,-112(9) + xvcmpgtsp 36,1,11 + xvabssp 7,0 + lxv 0,-96(9) + xxsel 11,1,11,36 + xxsel 36,51,34,36 + xvabssp 2,0 + lxv 0,-80(9) + xvcmpgtsp 45,6,11 + xvcmpgtsp 39,13,7 + xvabssp 10,0 + lxv 0,-64(9) + xxsel 7,13,7,39 + xxsel 39,35,49,39 + xvabssp 3,0 + lxv 0,-48(9) + xvcmpgtsp 38,2,10 + xvabssp 8,0 + lxv 0,-32(9) + xxsel 10,2,10,38 + xxsel 38,51,34,38 + xvabssp 0,0 + xvcmpgtsp 43,7,10 + xvcmpgtsp 41,3,8 + xvcmpgtsp 33,0,12 + xxsel 8,3,8,41 + xxsel 41,35,49,41 + xxsel 0,0,12,33 + xxsel 40,51,34,33 + xxsel 12,6,11,45 + xxsel 11,7,10,43 + xvcmpgtsp 33,8,0 + xxsel 45,37,36,45 + xvcmpgtsp 32,9,12 + xxsel 43,39,38,43 + vadduwm 13,13,15 + xxsel 0,8,0,33 + xxsel 33,41,40,33 + xxsel 12,9,12,32 + xxsel 32,42,45,32 + xvcmpgtsp 44,11,0 + vadduwm 1,1,15 + vadduwm 0,18,0 + vadduwm 18,18,14 + xxsel 0,11,0,44 + xxsel 33,43,33,44 + xvcmpgtsp 45,12,0 + vadduwm 1,29,1 + xxsel 0,12,0,45 + xxsel 32,32,33,45 + xvcmpgtsp 33,4,0 + xxsel 48,48,32,33 + xxsel 4,4,0,33 + bgt 7,.L65 + xxsldwi 0,4,4,3 + xxsldwi 11,4,4,2 + li 9,0 + li 10,12 + xxsldwi 12,4,4,1 + xscvspdp 4,4 + vextuwrx 3,9,16 + li 9,4 + xscvspdp 0,0 + xscvspdp 11,11 + xscvspdp 12,12 + vextuwrx 6,9,16 + li 9,8 + vextuwrx 7,9,16 + vextuwrx 9,10,16 + rldicl 31,6,0,32 + rldicl 10,3,0,32 + rldicl 5,7,0,32 + rldicl 0,9,0,32 + fcmpu 7,0,11 + fmr 10,12 + beq 7,.L66 + bng 7,.L9 + mr 10,31 + fmr 0,11 +.L9: + fcmpu 7,12,4 + bne 7,.L12 + cmplw 7,7,9 + ble 7,.L13 + mr 7,9 +.L13: + rldicl 5,7,0,32 +.L14: + fcmpu 7,0,10 + beq 7,.L67 + bng 7,.L19 + mr 10,5 + fmr 0,10 +.L19: + cmpd 7,11,8 + ld 31,-8(1) + bgt 7,.L4 + b .L33 + .p2align 4,,15 +.L66: + cmplw 7,3,6 + ble 7,.L8 + mr 3,6 +.L8: + rldicl 10,3,0,32 + b .L9 + .p2align 4,,15 +.L40: + sldi 0,5,2 + li 10,0 + li 9,0 + b .L23 + .p2align 4,,15 +.L12: + bng 7,.L14 + mr 5,0 + fmr 10,4 + b .L14 + .p2align 4,,15 +.L67: + cmpd 7,10,5 + ble 7,.L19 + mr 10,5 + b .L19 +.L51: + li 9,1 + mtctr 9 + b .L22 +.L52: + li 8,1 + mtctr 8 + b .L35 + .long 0 + .byte 0,0,0,0,0,1,0,0 + .size isamin_k,.-isamin_k + .section .rodata.cst16,"aM",@progbits,16 + .align 4 +.LC2: + .long 0 + .long 1 + .long 2 + .long 3 +.LC3: + .long 4 + .long 5 + .long 6 + .long 7 +.LC4: + .long 8 + .long 9 + .long 10 + .long 11 +.LC5: + .long 12 + .long 13 + .long 14 + .long 15 + .ident "GCC: (SUSE Linux) 7.3.1 20180323 [gcc-7-branch revision 258812]" + .section .note.GNU-stack,"",@progbits diff --git a/kernel/power/izamax.c b/kernel/power/izamax.c index cfe78c8c0..fe9d5bf95 100644 --- a/kernel/power/izamax.c +++ b/kernel/power/izamax.c @@ -34,6 +34,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +#if defined(__VEC__) || defined(__ALTIVEC__) /** * Find maximum index @@ -56,8 +57,8 @@ static BLASLONG ziamax_kernel_16(BLASLONG n, FLOAT *x, FLOAT *maxf) { "lxvd2x 47, %[i48],%[ptr_tmp] \n\t" "lxvd2x 48, %[i64],%[ptr_tmp] \n\t" "lxvd2x 49, %[i80],%[ptr_tmp] \n\t" - "lxvd2x 50, %[i96],%[ptr_tmp] \n\t" - "lxvd2x 51,%[i112],%[ptr_tmp] \n\t" + "lxvd2x 6, %[i96],%[ptr_tmp] \n\t" + "lxvd2x 7,%[i112],%[ptr_tmp] \n\t" "xxlor 40,%x[start],%x[start] \n\t" //{ 1,0} vs40 | v8 "vaddudm 9,8,%[adder] \n\t" //{3,2} vs41 @@ -67,7 +68,7 @@ static BLASLONG ziamax_kernel_16(BLASLONG n, FLOAT *x, FLOAT *maxf) { "vaddudm 11,10,%[adder] \n\t" //{7,6} vs43 "xxlxor 39,39,39 \n\t" // vs39 vec_max_value is zero "vaddudm 4,11, %[adder] \n\t" // {9,8} -{8;8} vs36 | v4 - "xxspltd 36,36,0 \n\t" + XXSPLTD_S(36,36,0) @@ -77,24 +78,24 @@ static BLASLONG ziamax_kernel_16(BLASLONG n, FLOAT *x, FLOAT *maxf) { "xvabsdp 47, 47 \n\t" "xvabsdp 48, 48 \n\t" "xvabsdp 49, 49 \n\t" - "xvabsdp 50, 50 \n\t" - "xvabsdp 51, 51 \n\t" + "xvabsdp 6, 6 \n\t" + "xvabsdp 7, 7 \n\t" //jump first half forward - "b 2f \n\t" + "b two%= \n\t" - ".p2align 5 \n\t" - "1: \n\t" + ".align 5 \n\t" + "one%=: \n\t" - "xxmrghd 0,44,45 \n\t" - "xxmrgld 1,44,45 \n\t" - "xxmrghd 2,46,47 \n\t" - "xxmrgld 3,46,47 \n\t" - "xxmrghd 4,48,49 \n\t" - "xxmrgld 5,48,49 \n\t" - "xxmrghd 44,50,51 \n\t" - "xxmrgld 45,50,51 \n\t" + XXMRGHD_S(0,44,45) + XXMRGLD_S(1,44,45) + XXMRGHD_S(2,46,47) + XXMRGLD_S(3,46,47) + XXMRGHD_S(4,48,49) + XXMRGLD_S(5,48,49) + XXMRGHD_S(44,6,7) + XXMRGLD_S(45,6,7) "xvadddp 46, 0,1 \n\t" "xvadddp 47, 2,3 \n\t" @@ -103,15 +104,15 @@ static BLASLONG ziamax_kernel_16(BLASLONG n, FLOAT *x, FLOAT *maxf) { - "xvcmpgtdp 50,47,46 \n\t " - "xvcmpgtdp 51,49,48 \n\t " + "xvcmpgtdp 6,47,46 \n\t " + "xvcmpgtdp 7,49,48 \n\t " "addi %[ptr_tmp] ,%[ptr_tmp] , 128 \n\t" - "xxsel 32,40,41,50 \n\t" - "xxsel 0,46,47,50 \n\t" - "xxsel 33,42,43,51 \n\t" - "xxsel 1,48,49,51 \n\t" + "xxsel 32,40,41,6 \n\t" + "xxsel 0,46,47,6 \n\t" + "xxsel 33,42,43,7 \n\t" + "xxsel 1,48,49,7 \n\t" "lxvd2x 44, 0,%[ptr_tmp] \n\t" "lxvd2x 45, %[i16],%[ptr_tmp] \n\t" @@ -133,8 +134,8 @@ static BLASLONG ziamax_kernel_16(BLASLONG n, FLOAT *x, FLOAT *maxf) { "lxvd2x 48, %[i64],%[ptr_tmp] \n\t" "lxvd2x 49, %[i80],%[ptr_tmp] \n\t" - "lxvd2x 50, %[i96],%[ptr_tmp] \n\t" - "lxvd2x 51,%[i112],%[ptr_tmp] \n\t" + "lxvd2x 6, %[i96],%[ptr_tmp] \n\t" + "lxvd2x 7,%[i112],%[ptr_tmp] \n\t" //select with previous "xxsel 38,38,32,4 \n\t" "xxsel 39,39,3,4 \n\t" @@ -148,35 +149,35 @@ static BLASLONG ziamax_kernel_16(BLASLONG n, FLOAT *x, FLOAT *maxf) { "xvabsdp 47, 47 \n\t" "xvabsdp 48, 48 \n\t" "xvabsdp 49, 49 \n\t" - "xvabsdp 50, 50 \n\t" - "xvabsdp 51, 51 \n\t" + "xvabsdp 6, 6 \n\t" + "xvabsdp 7, 7 \n\t" //>>/////////////////////////////// half start - "2: \n\t" - "xxmrghd 0,44,45 \n\t" - "xxmrgld 1,44,45 \n\t" - "xxmrghd 2,46,47 \n\t" - "xxmrgld 3,46,47 \n\t" - "xxmrghd 4,48,49 \n\t" - "xxmrgld 5,48,49 \n\t" - "xxmrghd 44,50,51 \n\t" - "xxmrgld 45,50,51 \n\t" + "two%=: \n\t" + XXMRGHD_S(0,44,45) + XXMRGLD_S(1,44,45) + XXMRGHD_S(2,46,47) + XXMRGLD_S(3,46,47) + XXMRGHD_S(4,48,49) + XXMRGLD_S(5,48,49) + XXMRGHD_S(44,6,7) + XXMRGLD_S(45,6,7) "xvadddp 46, 0,1 \n\t" "xvadddp 47, 2,3 \n\t" "xvadddp 48, 4,5 \n\t" "xvadddp 49, 44,45 \n\t" - "xvcmpgtdp 50,47,46 \n\t " - "xvcmpgtdp 51,49,48 \n\t " + "xvcmpgtdp 6,47,46 \n\t " + "xvcmpgtdp 7,49,48 \n\t " "addi %[ptr_tmp] ,%[ptr_tmp] , 128 \n\t" - "xxsel 32,40,41,50 \n\t" - "xxsel 0,46,47,50 \n\t" - "xxsel 33,42,43,51 \n\t" - "xxsel 1,48,49,51 \n\t" + "xxsel 32,40,41,6 \n\t" + "xxsel 0,46,47,6 \n\t" + "xxsel 33,42,43,7 \n\t" + "xxsel 1,48,49,7 \n\t" "lxvd2x 44, 0,%[ptr_tmp] \n\t" "lxvd2x 45, %[i16],%[ptr_tmp] \n\t" @@ -198,8 +199,8 @@ static BLASLONG ziamax_kernel_16(BLASLONG n, FLOAT *x, FLOAT *maxf) { "lxvd2x 48, %[i64],%[ptr_tmp] \n\t" "lxvd2x 49, %[i80],%[ptr_tmp] \n\t" - "lxvd2x 50, %[i96],%[ptr_tmp] \n\t" - "lxvd2x 51,%[i112],%[ptr_tmp] \n\t" + "lxvd2x 6, %[i96],%[ptr_tmp] \n\t" + "lxvd2x 7,%[i112],%[ptr_tmp] \n\t" //select with previous "xxsel 38,38,32,4 \n\t" "xxsel 39,39,3,4 \n\t" @@ -211,24 +212,24 @@ static BLASLONG ziamax_kernel_16(BLASLONG n, FLOAT *x, FLOAT *maxf) { "xvabsdp 47, 47 \n\t" "xvabsdp 48, 48 \n\t" "xvabsdp 49, 49 \n\t" - "xvabsdp 50, 50 \n\t" - "xvabsdp 51, 51 \n\t" + "xvabsdp 6, 6 \n\t" + "xvabsdp 7, 7 \n\t" //decrement n "addic. %[n], %[n], -16 \n\t" //Loop back if >0 - "bgt+ 1b \n\t" + "bgt+ one%= \n\t" - "xxmrghd 0,44,45 \n\t" - "xxmrgld 1,44,45 \n\t" - "xxmrghd 2,46,47 \n\t" - "xxmrgld 3,46,47 \n\t" - "xxmrghd 4,48,49 \n\t" - "xxmrgld 5,48,49 \n\t" - "xxmrghd 44,50,51 \n\t" - "xxmrgld 45,50,51 \n\t" + XXMRGHD_S(0,44,45) + XXMRGLD_S(1,44,45) + XXMRGHD_S(2,46,47) + XXMRGLD_S(3,46,47) + XXMRGHD_S(4,48,49) + XXMRGLD_S(5,48,49) + XXMRGHD_S(44,6,7) + XXMRGLD_S(45,6,7) "xvadddp 46, 0,1 \n\t" "xvadddp 47, 2,3 \n\t" @@ -237,13 +238,13 @@ static BLASLONG ziamax_kernel_16(BLASLONG n, FLOAT *x, FLOAT *maxf) { - "xvcmpgtdp 50,47,46 \n\t " - "xvcmpgtdp 51,49,48 \n\t " + "xvcmpgtdp 6,47,46 \n\t " + "xvcmpgtdp 7,49,48 \n\t " - "xxsel 32,40,41,50 \n\t" - "xxsel 0,46,47,50 \n\t" - "xxsel 33,42,43,51 \n\t" - "xxsel 1,48,49,51 \n\t" + "xxsel 32,40,41,6 \n\t" + "xxsel 0,46,47,6 \n\t" + "xxsel 33,42,43,7 \n\t" + "xxsel 1,48,49,7 \n\t" "xvcmpgtdp 2,1,0 \n\t " "xxsel 32,32,33,2 \n\t" @@ -262,28 +263,28 @@ static BLASLONG ziamax_kernel_16(BLASLONG n, FLOAT *x, FLOAT *maxf) { ///////extract max value and max index from vector - "xxspltd 32,38,1 \n\t" - "xxspltd 40,39,1 \n\t" + XXSPLTD_S(32,38,1) + XXSPLTD_S(40,39,1) "xvcmpeqdp. 2, 40,39 \n\t" //cr6 0 bit set if all true, cr6=4*6+bit_ind=24,0011at CR(BI)==1, at=10 hint that it occurs rarely //0b001110=14 - "bc 14,24, 3f \n\t" + "bc 14,24, three%= \n\t" "xvcmpgtdp 4, 40,39 \n\t" "xxsel 0,39,40,4 \n\t" "xxsel 1,38,32,4 \n\t" "stxsdx 0,0,%[ptr_maxf] \n\t" - "b 4f \n\t" + "b four%= \n\t" - "3: \n\t" + "three%=: \n\t" //if elements value are equal then choose minimum index - "xxspltd 0,40,0 \n\t" + XXSPLTD_S(0,40,0) "vminud 0,0,6 \n\t" //vs32 vs38 "xxlor 1,32,32 \n\t" "stxsdx 0,0,%[ptr_maxf] \n\t" - "4: \n\t" + "four%=: \n\t" "mfvsrd %[index],1 \n\t" : [maxf] "=m"(*maxf),[ptr_tmp] "+&b"(x),[index] "=r"(index), [n] "+&r"(n) @@ -292,14 +293,14 @@ static BLASLONG ziamax_kernel_16(BLASLONG n, FLOAT *x, FLOAT *maxf) { [i64] "b"(64), [i80] "b"(80), [i96] "b"(96), [i112] "b"(112), [start] "v"(start), [adder] "v"(temp_add_index) : "cc", "vs0", "vs1","vs2","vs3", "vs4","vs5","vs32", "vs33", "vs34", "vs35", "vs36", - "vs37", "vs38", "vs39", "vs40", "vs41", "vs42", "vs43", "vs44", "vs45", "vs46", "vs47", "vs48", "vs49", "vs50", "vs51" + "vs37", "vs38", "vs39", "vs40", "vs41", "vs42", "vs43", "vs44", "vs45", "vs46", "vs47", "vs48", "vs49", "vs6", "vs7" ); return index; } - +#endif @@ -316,6 +317,9 @@ BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) if (inc_x == 1) { +#if defined(_CALL_ELF) && (_CALL_ELF == 2) +#if defined(__VEC__) || defined(__ALTIVEC__) + BLASLONG n1 = n & -16; if (n1 > 0) { @@ -323,6 +327,8 @@ BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) i = n1; ix = n1 << 1; } +#endif +#endif while(i < n) { diff --git a/kernel/power/izamin.c b/kernel/power/izamin.c index 1ffa3ba8b..94f2383e0 100644 --- a/kernel/power/izamin.c +++ b/kernel/power/izamin.c @@ -24,7 +24,6 @@ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ - #include "common.h" #include @@ -32,6 +31,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define ABS fabs #define CABS1(x,i) ABS(x[i])+ABS(x[i+1]) +#if defined(__VEC__) || defined(__ALTIVEC__) /** * Find minimum index @@ -54,8 +54,8 @@ static BLASLONG ziamin_kernel_16_TUNED(BLASLONG n, FLOAT *x, FLOAT *minf) { "lxvd2x 47, %[i48],%[ptr_tmp] \n\t" "lxvd2x 48, %[i64],%[ptr_tmp] \n\t" "lxvd2x 49, %[i80],%[ptr_tmp] \n\t" - "lxvd2x 50, %[i96],%[ptr_tmp] \n\t" - "lxvd2x 51,%[i112],%[ptr_tmp] \n\t" + "lxvd2x 6, %[i96],%[ptr_tmp] \n\t" + "lxvd2x 7,%[i112],%[ptr_tmp] \n\t" "xxlor 40,%x[start],%x[start] \n\t" //{ 1,0} vs40 | v8 "vaddudm 9,8,%[adder] \n\t" //{3,2} vs41 @@ -65,7 +65,7 @@ static BLASLONG ziamin_kernel_16_TUNED(BLASLONG n, FLOAT *x, FLOAT *minf) { "vaddudm 11,10,%[adder] \n\t" //{7,6} vs43 "lxvdsx 39,0,%[ptr_minf] \n\t" // vs39 vec_min_value "vaddudm 4,11, %[adder] \n\t" // {9,8} -{8;8} vs36 | v4 - "xxspltd 36,36,0 \n\t" + XXSPLTD_S(36,36,0) @@ -75,24 +75,24 @@ static BLASLONG ziamin_kernel_16_TUNED(BLASLONG n, FLOAT *x, FLOAT *minf) { "xvabsdp 47, 47 \n\t" "xvabsdp 48, 48 \n\t" "xvabsdp 49, 49 \n\t" - "xvabsdp 50, 50 \n\t" - "xvabsdp 51, 51 \n\t" + "xvabsdp 6, 6 \n\t" + "xvabsdp 7, 7 \n\t" //jump first half forward - "b 2f \n\t" + "b two%= \n\t" - ".p2align 5 \n\t" - "1: \n\t" + ".align 5 \n\t" + "one%=: \n\t" - "xxmrghd 0,44,45 \n\t" - "xxmrgld 1,44,45 \n\t" - "xxmrghd 2,46,47 \n\t" - "xxmrgld 3,46,47 \n\t" - "xxmrghd 4,48,49 \n\t" - "xxmrgld 5,48,49 \n\t" - "xxmrghd 44,50,51 \n\t" - "xxmrgld 45,50,51 \n\t" + XXMRGHD_S(0,44,45) + XXMRGLD_S(1,44,45) + XXMRGHD_S(2,46,47) + XXMRGLD_S(3,46,47) + XXMRGHD_S(4,48,49) + XXMRGLD_S(5,48,49) + XXMRGHD_S(44,6,7) + XXMRGLD_S(45,6,7) "xvadddp 46, 0,1 \n\t" "xvadddp 47, 2,3 \n\t" @@ -101,15 +101,15 @@ static BLASLONG ziamin_kernel_16_TUNED(BLASLONG n, FLOAT *x, FLOAT *minf) { - "xvcmpgtdp 50,46,47 \n\t " - "xvcmpgtdp 51,48,49 \n\t " + "xvcmpgtdp 6,46,47 \n\t " + "xvcmpgtdp 7,48,49 \n\t " "addi %[ptr_tmp] ,%[ptr_tmp] , 128 \n\t" - "xxsel 32,40,41,50 \n\t" - "xxsel 0,46,47,50 \n\t" - "xxsel 33,42,43,51 \n\t" - "xxsel 1,48,49,51 \n\t" + "xxsel 32,40,41,6 \n\t" + "xxsel 0,46,47,6 \n\t" + "xxsel 33,42,43,7 \n\t" + "xxsel 1,48,49,7 \n\t" "lxvd2x 44, 0,%[ptr_tmp] \n\t" "lxvd2x 45, %[i16],%[ptr_tmp] \n\t" @@ -131,8 +131,8 @@ static BLASLONG ziamin_kernel_16_TUNED(BLASLONG n, FLOAT *x, FLOAT *minf) { "lxvd2x 48, %[i64],%[ptr_tmp] \n\t" "lxvd2x 49, %[i80],%[ptr_tmp] \n\t" - "lxvd2x 50, %[i96],%[ptr_tmp] \n\t" - "lxvd2x 51,%[i112],%[ptr_tmp] \n\t" + "lxvd2x 6, %[i96],%[ptr_tmp] \n\t" + "lxvd2x 7,%[i112],%[ptr_tmp] \n\t" //select with previous "xxsel 38,38,32,4 \n\t" "xxsel 39,39,3,4 \n\t" @@ -146,35 +146,35 @@ static BLASLONG ziamin_kernel_16_TUNED(BLASLONG n, FLOAT *x, FLOAT *minf) { "xvabsdp 47, 47 \n\t" "xvabsdp 48, 48 \n\t" "xvabsdp 49, 49 \n\t" - "xvabsdp 50, 50 \n\t" - "xvabsdp 51, 51 \n\t" + "xvabsdp 6, 6 \n\t" + "xvabsdp 7, 7 \n\t" //>>/////////////////////////////// half start - "2: \n\t" - "xxmrghd 0,44,45 \n\t" - "xxmrgld 1,44,45 \n\t" - "xxmrghd 2,46,47 \n\t" - "xxmrgld 3,46,47 \n\t" - "xxmrghd 4,48,49 \n\t" - "xxmrgld 5,48,49 \n\t" - "xxmrghd 44,50,51 \n\t" - "xxmrgld 45,50,51 \n\t" + "two%=: \n\t" + XXMRGHD_S(0,44,45) + XXMRGLD_S(1,44,45) + XXMRGHD_S(2,46,47) + XXMRGLD_S(3,46,47) + XXMRGHD_S(4,48,49) + XXMRGLD_S(5,48,49) + XXMRGHD_S(44,6,7) + XXMRGLD_S(45,6,7) "xvadddp 46, 0,1 \n\t" "xvadddp 47, 2,3 \n\t" "xvadddp 48, 4,5 \n\t" "xvadddp 49, 44,45 \n\t" - "xvcmpgtdp 50,46,47 \n\t " - "xvcmpgtdp 51,48,49 \n\t " + "xvcmpgtdp 6,46,47 \n\t " + "xvcmpgtdp 7,48,49 \n\t " "addi %[ptr_tmp] ,%[ptr_tmp] , 128 \n\t" - "xxsel 32,40,41,50 \n\t" - "xxsel 0,46,47,50 \n\t" - "xxsel 33,42,43,51 \n\t" - "xxsel 1,48,49,51 \n\t" + "xxsel 32,40,41,6 \n\t" + "xxsel 0,46,47,6 \n\t" + "xxsel 33,42,43,7 \n\t" + "xxsel 1,48,49,7 \n\t" "lxvd2x 44, 0,%[ptr_tmp] \n\t" "lxvd2x 45, %[i16],%[ptr_tmp] \n\t" @@ -196,8 +196,8 @@ static BLASLONG ziamin_kernel_16_TUNED(BLASLONG n, FLOAT *x, FLOAT *minf) { "lxvd2x 48, %[i64],%[ptr_tmp] \n\t" "lxvd2x 49, %[i80],%[ptr_tmp] \n\t" - "lxvd2x 50, %[i96],%[ptr_tmp] \n\t" - "lxvd2x 51,%[i112],%[ptr_tmp] \n\t" + "lxvd2x 6, %[i96],%[ptr_tmp] \n\t" + "lxvd2x 7,%[i112],%[ptr_tmp] \n\t" //select with previous "xxsel 38,38,32,4 \n\t" "xxsel 39,39,3,4 \n\t" @@ -209,24 +209,24 @@ static BLASLONG ziamin_kernel_16_TUNED(BLASLONG n, FLOAT *x, FLOAT *minf) { "xvabsdp 47, 47 \n\t" "xvabsdp 48, 48 \n\t" "xvabsdp 49, 49 \n\t" - "xvabsdp 50, 50 \n\t" - "xvabsdp 51, 51 \n\t" + "xvabsdp 6, 6 \n\t" + "xvabsdp 7, 7 \n\t" //decrement n "addic. %[n], %[n], -16 \n\t" //Loop back if >0 - "bgt+ 1b \n\t" + "bgt+ one%= \n\t" - "xxmrghd 0,44,45 \n\t" - "xxmrgld 1,44,45 \n\t" - "xxmrghd 2,46,47 \n\t" - "xxmrgld 3,46,47 \n\t" - "xxmrghd 4,48,49 \n\t" - "xxmrgld 5,48,49 \n\t" - "xxmrghd 44,50,51 \n\t" - "xxmrgld 45,50,51 \n\t" + XXMRGHD_S(0,44,45) + XXMRGLD_S(1,44,45) + XXMRGHD_S(2,46,47) + XXMRGLD_S(3,46,47) + XXMRGHD_S(4,48,49) + XXMRGLD_S(5,48,49) + XXMRGHD_S(44,6,7) + XXMRGLD_S(45,6,7) "xvadddp 46, 0,1 \n\t" "xvadddp 47, 2,3 \n\t" @@ -235,13 +235,13 @@ static BLASLONG ziamin_kernel_16_TUNED(BLASLONG n, FLOAT *x, FLOAT *minf) { - "xvcmpgtdp 50,46,47 \n\t " - "xvcmpgtdp 51,48,49 \n\t " + "xvcmpgtdp 6,46,47 \n\t " + "xvcmpgtdp 7,48,49 \n\t " - "xxsel 32,40,41,50 \n\t" - "xxsel 0,46,47,50 \n\t" - "xxsel 33,42,43,51 \n\t" - "xxsel 1,48,49,51 \n\t" + "xxsel 32,40,41,6 \n\t" + "xxsel 0,46,47,6 \n\t" + "xxsel 33,42,43,7 \n\t" + "xxsel 1,48,49,7 \n\t" "xvcmpgtdp 2,0,1 \n\t " "xxsel 32,32,33,2 \n\t" @@ -260,28 +260,28 @@ static BLASLONG ziamin_kernel_16_TUNED(BLASLONG n, FLOAT *x, FLOAT *minf) { ///////extract min value and min index from vector - "xxspltd 32,38,1 \n\t" - "xxspltd 40,39,1 \n\t" + XXSPLTD_S(32,38,1) + XXSPLTD_S(40,39,1) "xvcmpeqdp. 2, 40,39 \n\t" //cr6 0 bit set if all true, cr6=4*6+bit_ind=24,0011at CR(BI)==1, at=10 hint that it occurs rarely //0b001110=14 - "bc 14,24, 3f \n\t" + "bc 14,24, three%= \n\t" "xvcmpgtdp 4,39, 40 \n\t" "xxsel 0,39,40,4 \n\t" "xxsel 1,38,32,4 \n\t" "stxsdx 0,0,%[ptr_minf] \n\t" - "b 4f \n\t" + "b four%= \n\t" - "3: \n\t" + "three%=: \n\t" //if elements value are equal then choose minimum index - "xxspltd 0,40,0 \n\t" + XXSPLTD_S(0,40,0) "vminud 0,0,6 \n\t" //vs32 vs38 "xxlor 1,32,32 \n\t" "stxsdx 0,0,%[ptr_minf] \n\t" - "4: \n\t" + "four%=: \n\t" "mfvsrd %[index],1 \n\t" : [minf] "=m"(*minf),[ptr_tmp] "+&b"(x),[index] "=r"(index), [n] "+&r"(n) @@ -290,12 +290,13 @@ static BLASLONG ziamin_kernel_16_TUNED(BLASLONG n, FLOAT *x, FLOAT *minf) { [i64] "b"(64), [i80] "b"(80), [i96] "b"(96), [i112] "b"(112), [start] "v"(start), [adder] "v"(temp_add_index) : "cc", "vs0", "vs1","vs2","vs3", "vs4","vs5","vs32", "vs33", "vs34", "vs35", "vs36", - "vs37", "vs38", "vs39", "vs40", "vs41", "vs42", "vs43", "vs44", "vs45", "vs46", "vs47", "vs48", "vs49", "vs50", "vs51" + "vs37", "vs38", "vs39", "vs40", "vs41", "vs42", "vs43", "vs44", "vs45", "vs46", "vs47", "vs48", "vs49", "vs6", "vs7" ); return index; } +#endif @@ -314,14 +315,19 @@ BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) if (inc_x == 1) { minf = CABS1(x,0); //index will not be incremented - BLASLONG n1 = n & -16; + +#if defined(_CALL_ELF) && (_CALL_ELF == 2) +#if defined(__VEC__) || defined(__ALTIVEC__) + + BLASLONG n1 = n & -16; if (n1 > 0) { min = ziamin_kernel_16_TUNED(n1, x, &minf); i = n1; ix = n1 << 1; } - +#endif +#endif while(i < n) { @@ -357,5 +363,3 @@ BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) } } - - diff --git a/kernel/power/lock.c b/kernel/power/lock.c index 51348d63c..1c1b006b0 100644 --- a/kernel/power/lock.c +++ b/kernel/power/lock.c @@ -46,10 +46,10 @@ static void __inline blas_lock(volatile BLASULONG *address){ " .machine \"any\" ;" "0: lwarx %0,0, %1 ;" " cmpwi 0,%0,0;" - " bne 1f;" + " bne one%=;" " stwcx. %2,0, %1 ;" " bne- 0b;" - "1: " + "one%=: " : "=&r"(ret) : "r"(address), "r" (val) : "cr0", "memory"); diff --git a/kernel/power/sasum.c b/kernel/power/sasum.c index 5908347d3..733137012 100644 --- a/kernel/power/sasum.c +++ b/kernel/power/sasum.c @@ -46,9 +46,11 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endif -#if defined(POWER8) || defined(POWER9) +#if defined(POWER8) || defined(POWER9) || defined(POWER10) +#if defined(__VEC__) || defined(__ALTIVEC__) #include "sasum_microk_power8.c" #endif +#endif #ifndef HAVE_KERNEL_32 diff --git a/kernel/power/sasum_microk_power8.c b/kernel/power/sasum_microk_power8.c index 4bb515de8..aa465c38e 100644 --- a/kernel/power/sasum_microk_power8.c +++ b/kernel/power/sasum_microk_power8.c @@ -68,10 +68,10 @@ static float sasum_kernel_32 (long n, float *x) "addi %2, %2, 128 \n\t" "addic. %1, %1, -32 \n\t" - "ble 2f \n\t" + "ble two%= \n\t" - ".p2align 5 \n" - "1: \n\t" + ".align 5 \n" + "one%=: \n\t" "xvabssp 48, 40 \n\t" "xvabssp 49, 41 \n\t" @@ -108,9 +108,9 @@ static float sasum_kernel_32 (long n, float *x) "xvaddsp 38, 38, %x5 \n\t" "xvaddsp 39, 39, %x6 \n\t" - "bgt 1b \n" + "bgt one%= \n" - "2: \n\t" + "two%=: \n\t" "xvabssp 48, 40 \n\t" "xvabssp 49, 41 \n\t" diff --git a/kernel/power/saxpy.c b/kernel/power/saxpy.c index 393cdfadc..3d3b1613c 100644 --- a/kernel/power/saxpy.c +++ b/kernel/power/saxpy.c @@ -28,8 +28,25 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" +#define offset_0 0 +#define offset_1 16 +#define offset_2 32 +#define offset_3 48 +#define offset_4 64 +#define offset_5 80 +#define offset_6 96 +#define offset_7 112 +#define offset_8 128 +#define offset_9 144 +#define offset_10 160 +#define offset_11 176 +#define offset_12 192 +#define offset_13 208 +#define offset_14 224 +#define offset_15 240 +#if defined(__VEC__) || defined(__ALTIVEC__) #ifndef HAVE_KERNEL_8 #include @@ -37,12 +54,85 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. static void saxpy_kernel_64(BLASLONG n, FLOAT *x, FLOAT *y, FLOAT alpha) { BLASLONG i = 0; - __vector float v_a = {alpha,alpha,alpha,alpha}; - __vector float * v_y=(__vector float *)y; - __vector float * v_x=(__vector float *)x; + __vector float v_a __attribute((aligned(16))) = {alpha,alpha,alpha,alpha}; + __vector float * vptr_y =(__vector float *)y; + __vector float * vptr_x =(__vector float *)x; for(; i= 64 ) + { + BLASLONG align = ((32 - ((uintptr_t)y & (uintptr_t)0x1F)) >> 2) & 0x7; + for (i = 0; i < align; i++) { + y[i] += da * x[i] ; + } + } + BLASLONG n1 = (n-i) & -64; + if ( n1 ) + saxpy_kernel_64(n1, &x[i], &y[i], da); + + i += n1; + while(i < n) + { + + y[i] += da * x[i] ; + i++ ; + + } + return(0); + + + } + + BLASLONG n1 = n & -4; + + while(i < n1) + { + + FLOAT m1 = da * x[ix] ; + FLOAT m2 = da * x[ix+inc_x] ; + FLOAT m3 = da * x[ix+2*inc_x] ; + FLOAT m4 = da * x[ix+3*inc_x] ; + + y[iy] += m1 ; + y[iy+inc_y] += m2 ; + y[iy+2*inc_y] += m3 ; + y[iy+3*inc_y] += m4 ; + + ix += inc_x*4 ; + iy += inc_y*4 ; + i+=4 ; + + } + + while(i < n) + { + + y[iy] += da * x[ix] ; + ix += inc_x ; + iy += inc_y ; + i++ ; + + } + return(0); + +} + + diff --git a/kernel/power/sbgemm_kernel_power10.c b/kernel/power/sbgemm_kernel_power10.c new file mode 100644 index 000000000..d15586703 --- /dev/null +++ b/kernel/power/sbgemm_kernel_power10.c @@ -0,0 +1,959 @@ +/********************************************************************************* +Copyright (c) 2020, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +**********************************************************************************/ +#include "common.h" +#include +#if defined(BFLOAT16) && defined(BFLOAT16CONVERSION) +static float +bfloat16tof32 (bfloat16 f16) +{ + float result = 0; + unsigned short *q = (unsigned short *) (&result); +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + q[0] = f16; +#else + q[1] = f16; +#endif + return result; +} + +#define BF16TOF32(x) (bfloat16tof32(x)) +#else +#define BF16TOF32(x) x +#endif + +typedef __vector unsigned char vec_t; +typedef FLOAT v4sf_t __attribute__ ((vector_size (16))); +typedef FLOAT v2sf_t __attribute__ ((vector_size (8))); + +vector char mask = + { 0x0, 0x1, 0x8, 0x9, 0x2, 0x3, 0xa, 0xb, 0x4, 0x5, 0xc, 0xd, 0x6, 0x7, 0xe, + 0xf +}; + +/* + * BFLOAT16 xvbf16ger2pp instruction needs 4×2 matrix of + * bfloat16 floating-point values as input. Hence this + * merging is needed on A and B matrices. + */ +#define MERGE_ROW(x) vec_perm(x, x, mask) +#define MERGE_HIGH(x, y) (vec_t) vec_mergeh ((vector short)x, (vector short)y) +#define MERGE_LOW(x, y) (vec_t) vec_mergel ((vector short)x, (vector short)y) + +#define SAVE_ACC(ACC, J) \ + __builtin_mma_disassemble_acc ((void *)result, ACC); \ + rowC = (v4sf_t *) &CO[0* ldc+J]; \ + rowC[0] += result[0] * alpha; \ + rowC = (v4sf_t *) &CO[1*ldc+J]; \ + rowC[0] += result[1] * alpha; \ + rowC = (v4sf_t *) &CO[2*ldc+J]; \ + rowC[0] += result[2] * alpha; \ + rowC = (v4sf_t *) &CO[3*ldc+J]; \ + rowC[0] += result[3] * alpha; +#define SAVE_ACC1(ACC, J) \ + __builtin_mma_disassemble_acc ((void *)result, ACC); \ + rowC = (v4sf_t *) &CO[4* ldc+J]; \ + rowC[0] += result[0] * alpha; \ + rowC = (v4sf_t *) &CO[5*ldc+J]; \ + rowC[0] += result[1] * alpha; \ + rowC = (v4sf_t *) &CO[6*ldc+J]; \ + rowC[0] += result[2] * alpha; \ + rowC = (v4sf_t *) &CO[7*ldc+J]; \ + rowC[0] += result[3] * alpha; +#define SAVE4x2_ACC(ACC, J) \ + __builtin_mma_disassemble_acc ((void *)result, ACC); \ + rowC = (v2sf_t *) &CO[0* ldc+J]; \ + rowC[0] += result[0] * alpha; \ + rowC = (v2sf_t *) &CO[1* ldc+J]; \ + rowC[0] += result[2] * alpha; \ + rowC = (v2sf_t *) &CO[2* ldc+J]; \ + rowC[0] += result[4] * alpha; \ + rowC = (v2sf_t *) &CO[3* ldc+J]; \ + rowC[0] += result[6] * alpha; +#define SAVE4x2_ACC1(ACC, J) \ + __builtin_mma_disassemble_acc ((void *)result, ACC); \ + rowC = (v2sf_t *) &CO[4* ldc+J]; \ + rowC[0] += result[0] * alpha; \ + rowC = (v2sf_t *) &CO[5* ldc+J]; \ + rowC[0] += result[2] * alpha; \ + rowC = (v2sf_t *) &CO[6* ldc+J]; \ + rowC[0] += result[4] * alpha; \ + rowC = (v2sf_t *) &CO[7* ldc+J]; \ + rowC[0] += result[6] * alpha; + +#define MMA __builtin_mma_xvbf16ger2pp + +#define SAVE2x4_ACC(ACC, J) \ + __builtin_mma_disassemble_acc ((void *)result, ACC); \ + rowC = (v4sf_t *) &CO[0* ldc+J]; \ + rowC[0] += result[0] * alpha; \ + rowC = (v4sf_t *) &CO[1* ldc+J]; \ + rowC[0] += result[1] * alpha; + +#define SET_ACC_ZERO4() \ + __builtin_mma_xxsetaccz (&acc0); \ + __builtin_mma_xxsetaccz (&acc1); \ + __builtin_mma_xxsetaccz (&acc2); \ + __builtin_mma_xxsetaccz (&acc3); + +#define SET_ACC_ZERO8() \ + __builtin_mma_xxsetaccz (&acc0); \ + __builtin_mma_xxsetaccz (&acc1); \ + __builtin_mma_xxsetaccz (&acc2); \ + __builtin_mma_xxsetaccz (&acc3); \ + __builtin_mma_xxsetaccz (&acc4); \ + __builtin_mma_xxsetaccz (&acc5); \ + __builtin_mma_xxsetaccz (&acc6); \ + __builtin_mma_xxsetaccz (&acc7); + +#define PREFETCH1(x, y) asm volatile ("dcbt %0, %1" : : "r" (x), "b" (y) : "memory"); +/************************************************************************************* +* SBGEMM Kernel +*************************************************************************************/ +int +CNAME (BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alpha, IFLOAT * A, + IFLOAT * B, FLOAT * C, BLASLONG ldc) +{ + BLASLONG i1; + v4sf_t valpha = { alpha, alpha, alpha, alpha }; + vector short vzero = { 0, 0, 0, 0, 0, 0, 0, 0 }; + /* Loop for n >= 8. */ + for (i1 = 0; i1 < (n >> 3); i1++) + { + BLASLONG j; + FLOAT *CO; + IFLOAT *AO; + CO = C; + C += ldc << 3; + AO = A; + PREFETCH1 (A, 128); + PREFETCH1 (A, 256); + /* Loop for m >= 16. */ + for (j = 0; j < (m >> 4); j++) + { + IFLOAT *BO = B; + v4sf_t *rowC; + v4sf_t result[4]; + __vector_quad acc0, acc1, acc2, acc3, acc4, acc5, acc6, acc7; + SET_ACC_ZERO8 (); + BLASLONG l = 0; + for (l = 0; l < k / 2; l++) + { + vec_t *rowA = (vec_t *) & (AO[l << 5]); + vec_t *rowB = (vec_t *) & (BO[l << 4]); + MMA (&acc0, rowB[0], rowA[0]); + MMA (&acc1, rowB[1], rowA[0]); + MMA (&acc2, rowB[0], rowA[1]); + MMA (&acc3, rowB[1], rowA[1]); + MMA (&acc4, rowB[0], rowA[2]); + MMA (&acc5, rowB[1], rowA[2]); + MMA (&acc6, rowB[0], rowA[3]); + MMA (&acc7, rowB[1], rowA[3]); + } + if (k % 2 == 1) + { + if (k > 1) + l = (k / 2) << 4; + vec_t *rowA = (vec_t *) & (AO[l << 1]); + vec_t *rowB = (vec_t *) & (BO[l]); + vec_t rowB_h = MERGE_HIGH (rowB[0], rowB[1]); + vec_t rowB_l = MERGE_LOW (rowB[0], rowB[1]); + vec_t rowA_h = MERGE_HIGH (rowA[0], vzero); + vec_t rowA_l = MERGE_LOW (rowA[0], vzero); + vec_t rowA2_h = MERGE_HIGH (rowA[1], vzero); + vec_t rowA2_l = MERGE_LOW (rowA[1], vzero); + MMA (&acc0, rowB_h, rowA_h); + MMA (&acc1, rowB_l, rowA_h); + MMA (&acc2, rowB_h, rowA_l); + MMA (&acc3, rowB_l, rowA_l); + MMA (&acc4, rowB_h, rowA2_h); + MMA (&acc5, rowB_l, rowA2_h); + MMA (&acc6, rowB_h, rowA2_l); + MMA (&acc7, rowB_l, rowA2_l); + } + SAVE_ACC (&acc0, 0); + SAVE_ACC (&acc2, 4); + SAVE_ACC1 (&acc1, 0); + SAVE_ACC1 (&acc3, 4); + SAVE_ACC (&acc4, 8); + SAVE_ACC (&acc6, 12); + SAVE_ACC1 (&acc5, 8); + SAVE_ACC1 (&acc7, 12); + CO += 16; + + AO += (k << 4); + BO += (k << 3); + } + if (m & 8) + { + IFLOAT *BO = B; + v4sf_t *rowC; + v4sf_t result[4]; + __vector_quad acc0, acc1, acc2, acc3; + SET_ACC_ZERO4 (); + BLASLONG l = 0; + for (l = 0; l < k / 2; l++) + { + vec_t *rowA = (vec_t *) & (AO[l << 4]); + vec_t *rowB = (vec_t *) & (BO[l << 4]); + + MMA (&acc0, rowB[0], rowA[0]); + MMA (&acc1, rowB[1], rowA[0]); + MMA (&acc2, rowB[0], rowA[1]); + MMA (&acc3, rowB[1], rowA[1]); + } + if (k % 2 == 1) + { + if (k > 1) + l = (k / 2) << 4; + vec_t *rowA = (vec_t *) & (AO[l]); + vec_t *rowB = (vec_t *) & (BO[l]); + vec_t rowB_h = MERGE_HIGH (rowB[0], rowB[1]); + vec_t rowB_l = MERGE_LOW (rowB[0], rowB[1]); + vec_t rowA_h = MERGE_HIGH (rowA[0], vzero); + vec_t rowA_l = MERGE_LOW (rowA[0], vzero); + MMA (&acc0, rowB_h, rowA_h); + MMA (&acc1, rowB_l, rowA_h); + MMA (&acc2, rowB_h, rowA_l); + MMA (&acc3, rowB_l, rowA_l); + } + SAVE_ACC (&acc0, 0); + SAVE_ACC (&acc2, 4); + SAVE_ACC1 (&acc1, 0); + SAVE_ACC1 (&acc3, 4); + CO += 8; + AO += (k << 3); + BO += (k << 3); + } + if (m & 4) + { + IFLOAT *BO = B; + v4sf_t *rowC; + v4sf_t result[4]; + __vector_quad acc0, acc1; + __builtin_mma_xxsetaccz (&acc0); + __builtin_mma_xxsetaccz (&acc1); + BLASLONG l = 0; + for (l = 0; l < k / 2; l++) + { + vec_t *rowA = (vec_t *) & (AO[l << 3]); + vec_t *rowB = (vec_t *) & (BO[l << 4]); + MMA (&acc0, rowB[0], rowA[0]); + MMA (&acc1, rowB[1], rowA[0]); + } + if (k % 2 == 1) + { + if (k > 1) + l = (k / 2) << 3; + vector short rowA = + { AO[l + 0], 0, AO[l + 1], 0, AO[l + 2], 0, AO[l + 3], 0 }; + vec_t *rowB = (vec_t *) & (BO[l << 1]); + MMA (&acc0, MERGE_HIGH (rowB[0], rowB[1]), (vec_t) rowA); + MMA (&acc1, MERGE_LOW (rowB[0], rowB[1]), (vec_t) rowA); + } + SAVE_ACC (&acc0, 0); + SAVE_ACC1 (&acc1, 0); + CO += 4; + AO += (k << 2); + BO += (k << 3); + } + if (m & 2) + { + IFLOAT *BO = B; + v2sf_t *rowC; + v2sf_t result[8]; + __vector_quad acc0, acc1; + __builtin_mma_xxsetaccz (&acc0); + __builtin_mma_xxsetaccz (&acc1); + BLASLONG l = 0; + for (l = 0; l < k / 2; l++) + { + vector short rowA = + { AO[(l << 2) + 0], AO[(l << 2) + 2], AO[(l << 2) + 1], + AO[(l << 2) + 3], + 0, 0, 0, 0 + }; + vec_t *rowB = (vec_t *) & (BO[l << 4]); + MMA (&acc0, rowB[0], (vec_t) rowA); + MMA (&acc1, rowB[1], (vec_t) rowA); + } + if (k % 2 == 1) + { + if (k > 1) + l = (k / 2) << 2; + vector short rowA = { AO[l + 0], 0, AO[l + 1], 0, 0, 0, 0, 0 }; + vec_t *rowB = (vec_t *) & (BO[(l << 2)]); + MMA (&acc0, MERGE_HIGH (rowB[0], rowB[1]), (vec_t) rowA); + MMA (&acc1, MERGE_LOW (rowB[0], rowB[1]), (vec_t) rowA); + } + SAVE4x2_ACC (&acc0, 0); + SAVE4x2_ACC1 (&acc1, 0); + CO += 2; + AO += (k << 1); + BO += (k << 3); + } + if (m & 1) + { + IFLOAT *BO = B; + v2sf_t *rowC; + v2sf_t result[8]; + __vector_quad acc0, acc1; + __builtin_mma_xxsetaccz (&acc0); + __builtin_mma_xxsetaccz (&acc1); + BLASLONG l = 0; + for (l = 0; l < k / 2; l++) + { + vector short rowA = + { AO[(l << 1) + 0], AO[(l << 1) + 1], 0, 0, 0, 0, 0, 0}; + vec_t *rowB = (vec_t *) & (BO[l << 4]); + MMA (&acc0, rowB[0], (vec_t) rowA); + MMA (&acc1, rowB[1], (vec_t) rowA); + } + if (k % 2 == 1) + { + if (k > 1) + l = (k / 2) << 1; + vector short rowA = { AO[l], 0, 0, 0, 0, 0, 0, 0 }; + vec_t *rowB = (vec_t *) & (BO[(l << 3)]); + MMA (&acc0, MERGE_HIGH (rowB[0], rowB[1]), (vec_t) rowA); + MMA (&acc1, MERGE_LOW (rowB[0], rowB[1]), (vec_t) rowA); + } + SAVE4x2_ACC (&acc0, 0); + SAVE4x2_ACC1 (&acc1, 0); + CO += 1; + AO += k; + BO += (k << 3); + } + B += k << 3; + } + if (n & 4) + { + BLASLONG j; + FLOAT *CO; + IFLOAT *AO; + CO = C; + C += ldc << 2; + AO = A; + /* Loop for m >= 32. */ + for (j = 0; j < (m >> 5); j++) + { + IFLOAT *BO = B; + IFLOAT *A1 = AO + (16 * k); + v4sf_t *rowC; + v4sf_t result[4]; + __vector_quad acc0, acc1, acc2, acc3, acc4, acc5, acc6, acc7; + SET_ACC_ZERO8 (); + BLASLONG l = 0; + for (l = 0; l < k / 2; l++) + { + vec_t *rowA = (vec_t *) & (AO[l << 5]); + vec_t *rowA1 = (vec_t *) & (A1[l << 5]); + vec_t *rowB = (vec_t *) & (BO[l << 3]); + MMA (&acc0, rowB[0], rowA[0]); + MMA (&acc1, rowB[0], rowA[1]); + MMA (&acc2, rowB[0], rowA[2]); + MMA (&acc3, rowB[0], rowA[3]); + MMA (&acc4, rowB[0], rowA1[0]); + MMA (&acc5, rowB[0], rowA1[1]); + MMA (&acc6, rowB[0], rowA1[2]); + MMA (&acc7, rowB[0], rowA1[3]); + } + if (k % 2 == 1) + { + if (k > 1) + l = (k / 2) << 3; + vec_t *rowA = (vec_t *) & (AO[(l << 2)]); + vec_t *rowA1 = (vec_t *) & (A1[(l << 2)]); + vec_t *rowB = (vec_t *) & (BO[l]); + vec_t rowB_mrg = MERGE_ROW (rowB[0]); + MMA (&acc0, rowB_mrg, MERGE_HIGH (rowA[0], vzero)); + MMA (&acc1, rowB_mrg, MERGE_LOW (rowA[0], vzero)); + MMA (&acc2, rowB_mrg, MERGE_HIGH (rowA[1], vzero)); + MMA (&acc3, rowB_mrg, MERGE_LOW (rowA[1], vzero)); + MMA (&acc4, rowB_mrg, MERGE_HIGH (rowA1[0], vzero)); + MMA (&acc5, rowB_mrg, MERGE_LOW (rowA1[0], vzero)); + MMA (&acc6, rowB_mrg, MERGE_HIGH (rowA1[1], vzero)); + MMA (&acc7, rowB_mrg, MERGE_LOW (rowA1[1], vzero)); + } + + SAVE_ACC (&acc0, 0); + SAVE_ACC (&acc1, 4); + CO += 8; + SAVE_ACC (&acc2, 0); + SAVE_ACC (&acc3, 4); + CO += 8; + SAVE_ACC (&acc4, 0); + SAVE_ACC (&acc5, 4); + CO += 8; + SAVE_ACC (&acc6, 0); + SAVE_ACC (&acc7, 4); + CO += 8; + AO += k << 5; + BO += k << 2; + } + if (m & 16) + { + IFLOAT *BO = B; + v4sf_t *rowC; + v4sf_t result[4]; + __vector_quad acc0, acc1, acc2, acc3; + SET_ACC_ZERO4 (); + BLASLONG l = 0; + for (l = 0; l < k / 2; l++) + { + vec_t *rowA = (vec_t *) & (AO[l << 5]); + vec_t *rowB = (vec_t *) & (BO[l << 3]); + MMA (&acc0, rowB[0], rowA[0]); + MMA (&acc1, rowB[0], rowA[1]); + MMA (&acc2, rowB[0], rowA[2]); + MMA (&acc3, rowB[0], rowA[3]); + } + if (k % 2 == 1) + { + if (k > 1) + l = (k / 2) << 3; + vec_t *rowA = (vec_t *) & (AO[(l << 2)]); + vec_t *rowB = (vec_t *) & (BO[l]); + vec_t rowB_mrg = MERGE_ROW (rowB[0]); + MMA (&acc0, rowB_mrg, MERGE_HIGH (rowA[0], vzero)); + MMA (&acc1, rowB_mrg, MERGE_LOW (rowA[0], vzero)); + MMA (&acc2, rowB_mrg, MERGE_HIGH (rowA[1], vzero)); + MMA (&acc3, rowB_mrg, MERGE_LOW (rowA[1], vzero)); + } + + SAVE_ACC (&acc0, 0); + SAVE_ACC (&acc1, 4); + CO += 8; + SAVE_ACC (&acc2, 0); + SAVE_ACC (&acc3, 4); + CO += 8; + AO += k << 4; + BO += k << 2; + } + if (m & 8) + { + IFLOAT *BO = B; + v4sf_t *rowC; + v4sf_t result[4]; + __vector_quad acc0, acc1; + __builtin_mma_xxsetaccz (&acc0); + __builtin_mma_xxsetaccz (&acc1); + BLASLONG l = 0; + for (l = 0; l < k / 2; l++) + { + vec_t *rowA = (vec_t *) & (AO[l << 4]); + vec_t *rowB = (vec_t *) & (BO[l << 3]); + MMA (&acc0, rowB[0], rowA[0]); + MMA (&acc1, rowB[0], rowA[1]); + } + if (k % 2 == 1) + { + if (k > 1) + l = (k / 2) << 3; + vec_t *rowA = (vec_t *) & (AO[l << 1]); + vec_t *rowB = (vec_t *) & (BO[l]); + vec_t rowB_mrg = MERGE_ROW (rowB[0]); + MMA (&acc0, rowB_mrg, MERGE_HIGH (rowA[0], vzero)); + MMA (&acc1, rowB_mrg, MERGE_LOW (rowA[0], vzero)); + } + SAVE_ACC (&acc0, 0); + SAVE_ACC (&acc1, 4); + CO += 8; + AO += k << 3; + BO += k << 2; + } + if (m & 4) + { + IFLOAT *BO = B; + v4sf_t *rowC; + __vector_quad acc0; + v4sf_t result[4]; + BLASLONG l = 0; + __builtin_mma_xxsetaccz (&acc0); + for (l = 0; l < k / 2; l++) + { + vec_t *rowA = (vec_t *) & (AO[l << 3]); + vec_t *rowB = (vec_t *) & (BO[l << 3]); + MMA (&acc0, rowB[0], rowA[0]); + } + if (k % 2 == 1) + { + if (k > 1) + l = (k / 2) << 3; + vector short rowA = + { AO[l], 0, AO[l + 1], 0, AO[l + 2], 0, AO[l + 3], 0 }; + vec_t *rowB = (vec_t *) & (BO[l]); + MMA (&acc0, MERGE_ROW (rowB[0]), (vec_t) rowA); + } + SAVE_ACC (&acc0, 0); + CO += 4; + AO += k << 2; + BO += k << 2; + } + if (m & 2) + { + IFLOAT *BO = B; + v2sf_t *rowC; + v2sf_t result[8]; + __vector_quad acc0; + BLASLONG l = 0; + __builtin_mma_xxsetaccz (&acc0); + for (l = 0; l < k / 2; l++) + { + vector short rowA = + { AO[(l << 2) + 0], AO[(l << 2) + 2], AO[(l << 2) + 1], + AO[(l << 2) + 3], + 0, 0, 0, 0 + }; + vec_t *rowB = (vec_t *) & (BO[l << 3]); + MMA (&acc0, rowB[0], (vec_t) rowA); + } + if (k % 2 == 1) + { + if (k > 1) + l = (k / 2) << 2; + vector short rowA = { AO[l], 0, AO[l + 1], 0, 0, 0, 0, 0 }; + vec_t *rowB = (vec_t *) & (BO[l << 1]); + MMA (&acc0, MERGE_ROW (rowB[0]), (vec_t) rowA); + } + SAVE4x2_ACC (&acc0, 0); + CO += 2; + AO += k << 1; + BO += k << 2; + } + if (m & 1) + { + IFLOAT *BO = B; + v2sf_t *rowC; + v2sf_t result[8]; + __vector_quad acc0; + BLASLONG l = 0; + __builtin_mma_xxsetaccz (&acc0); + for (l = 0; l < k / 2; l++) + { + vector short rowA = + { AO[(l << 1) + 0], AO[(l << 1) + 1], 0, + 0, 0, 0, 0 + }; + vec_t *rowB = (vec_t *) & (BO[l << 3]); + MMA (&acc0, rowB[0], (vec_t) rowA); + } + if (k % 2 == 1) + { + if (k > 1) + l = (k / 2) << 1; + vector short rowA = { AO[l], 0, 0, 0, 0, 0, 0, 0 }; + vec_t *rowB = (vec_t *) & (BO[l << 2]); + MMA (&acc0, MERGE_ROW (rowB[0]), (vec_t) rowA); + } + SAVE4x2_ACC (&acc0, 0); + AO += k; + BO += (k << 2); + CO += 1; + } + + B += k << 2; + } + if (n & 2) + { + BLASLONG j; + FLOAT *CO; + IFLOAT *AO; + CO = C; + C += ldc << 1; + AO = A; + /* Loop for m >= 32. */ + for (j = 0; j < (m >> 5); j++) + { + IFLOAT *BO = B; + v4sf_t *rowC; + v4sf_t result[4]; + IFLOAT *A1 = AO + (16 * k); + __vector_quad acc0, acc1, acc2, acc3, acc4, acc5, acc6, acc7; + SET_ACC_ZERO8 (); + BLASLONG l = 0; + for (l = 0; l < k / 2; l++) + { + vector short rowB = + { BO[(l << 2) + 0], BO[(l << 2) + 2], BO[(l << 2) + 1], + BO[(l << 2) + 3], + 0, 0, 0, 0 + }; + vec_t *rowA = (vec_t *) & (AO[l << 5]); + vec_t *rowA1 = (vec_t *) & (A1[l << 5]); + MMA (&acc0, (vec_t) rowB, rowA[0]); + MMA (&acc1, (vec_t) rowB, rowA[1]); + MMA (&acc2, (vec_t) rowB, rowA[2]); + MMA (&acc3, (vec_t) rowB, rowA[3]); + MMA (&acc4, (vec_t) rowB, rowA1[0]); + MMA (&acc5, (vec_t) rowB, rowA1[1]); + MMA (&acc6, (vec_t) rowB, rowA1[2]); + MMA (&acc7, (vec_t) rowB, rowA1[3]); + } + if (k % 2 == 1) + { + if (k > 1) + l = (k / 2) << 2; + vector short rowB = { BO[l + 0], 0, BO[l + 1], 0, 0, 0, 0, 0 }; + vec_t *rowA = (vec_t *) & (AO[l << 3]); + vec_t *rowA1 = (vec_t *) & (A1[l << 3]); + MMA (&acc0, (vec_t) rowB, MERGE_HIGH (rowA[0], rowA[2])); + MMA (&acc1, (vec_t) rowB, MERGE_LOW (rowA[0], rowA[2])); + MMA (&acc2, (vec_t) rowB, MERGE_HIGH (rowA[1], rowA[3])); + MMA (&acc3, (vec_t) rowB, MERGE_LOW (rowA[1], rowA[3])); + MMA (&acc4, (vec_t) rowB, MERGE_HIGH (rowA1[0], rowA1[2])); + MMA (&acc5, (vec_t) rowB, MERGE_LOW (rowA1[0], rowA1[2])); + MMA (&acc6, (vec_t) rowB, MERGE_HIGH (rowA1[1], rowA1[3])); + MMA (&acc7, (vec_t) rowB, MERGE_LOW (rowA1[1], rowA1[3])); + } + SAVE2x4_ACC (&acc0, 0); + SAVE2x4_ACC (&acc1, 4); + SAVE2x4_ACC (&acc2, 8); + SAVE2x4_ACC (&acc3, 12); + CO += 16; + SAVE2x4_ACC (&acc4, 0); + SAVE2x4_ACC (&acc5, 4); + SAVE2x4_ACC (&acc6, 8); + SAVE2x4_ACC (&acc7, 12); + CO += 16; + AO += k << 5; + BO += k << 1; + } + if (m & 16) + { + IFLOAT *BO = B; + v4sf_t *rowC; + v4sf_t result[4]; + __vector_quad acc0, acc1, acc2, acc3; + SET_ACC_ZERO4 (); + BLASLONG l = 0; + for (l = 0; l < k / 2; l++) + { + vector short rowB = + { BO[(l << 2) + 0], BO[(l << 2) + 2], BO[(l << 2) + 1], + BO[(l << 2) + 3], + 0, 0, 0, 0 + }; + vec_t *rowA = (vec_t *) & (AO[l << 5]); + MMA (&acc0, (vec_t) rowB, rowA[0]); + MMA (&acc1, (vec_t) rowB, rowA[1]); + MMA (&acc2, (vec_t) rowB, rowA[2]); + MMA (&acc3, (vec_t) rowB, rowA[3]); + } + if (k % 2 == 1) + { + if (k > 1) + l = (k / 2) << 2; + vector short rowB = { BO[l + 0], 0, BO[l + 1], 0, 0, 0, 0, 0 }; + vec_t *rowA = (vec_t *) & (AO[l << 3]); + MMA (&acc0, (vec_t) rowB, MERGE_HIGH (rowA[0], rowA[2])); + MMA (&acc1, (vec_t) rowB, MERGE_LOW (rowA[0], rowA[2])); + MMA (&acc2, (vec_t) rowB, MERGE_HIGH (rowA[1], rowA[3])); + MMA (&acc3, (vec_t) rowB, MERGE_LOW (rowA[1], rowA[3])); + } + SAVE2x4_ACC (&acc0, 0); + SAVE2x4_ACC (&acc1, 4); + SAVE2x4_ACC (&acc2, 8); + SAVE2x4_ACC (&acc3, 12); + CO += 16; + AO += k << 4; + BO += k << 1; + } + if (m & 8) + { + IFLOAT *BO = B; + v4sf_t *rowC; + v4sf_t result[4]; + __vector_quad acc0, acc1; + __builtin_mma_xxsetaccz (&acc0); + __builtin_mma_xxsetaccz (&acc1); + BLASLONG l = 0; + for (l = 0; l < k / 2; l++) + { + vector short rowB = + { BO[(l << 2) + 0], BO[(l << 2) + 2], BO[(l << 2) + 1], + BO[(l << 2) + 3], + 0, 0, 0, 0 + }; + vec_t *rowA = (vec_t *) & (AO[l << 4]); + MMA (&acc0, (vec_t) rowB, rowA[0]); + MMA (&acc1, (vec_t) rowB, rowA[1]); + } + if (k % 2 == 1) + { + if (k > 1) + l = (k / 2) << 2; + vector short rowB = { BO[l + 0], 0, BO[l + 1], 0, 0, 0, 0, 0 }; + vec_t *rowA = (vec_t *) & (AO[(l << 2)]); + MMA (&acc0, (vec_t) rowB, MERGE_HIGH (rowA[0], rowA[1])); + MMA (&acc1, (vec_t) rowB, MERGE_LOW (rowA[0], rowA[1])); + } + SAVE2x4_ACC (&acc0, 0); + SAVE2x4_ACC (&acc1, 4); + CO += 8; + AO += k << 3; + BO += k << 1; + } + if (m & 4) + { + IFLOAT *BO = B; + v4sf_t *rowC; + v4sf_t result[4]; + __vector_quad acc0; + __builtin_mma_xxsetaccz (&acc0); + BLASLONG l = 0; + for (l = 0; l < k / 2; l++) + { + vector short rowB = + { BO[(l << 2) + 0], BO[(l << 2) + 2], BO[(l << 2) + 1], + BO[(l << 2) + 3], + 0, 0, 0, 0 + }; + vec_t *rowA = (vec_t *) & (AO[l << 3]); + MMA (&acc0, (vec_t) rowB, rowA[0]); + } + if (k % 2 == 1) + { + if (k > 1) + l = (k / 2) << 2; + vector short rowB = { BO[l + 0], 0, BO[l + 1], 0, 0, 0, 0, 0 }; + vec_t *rowA = (vec_t *) & (AO[l << 1]); + MMA (&acc0, (vec_t) rowB, MERGE_ROW (rowA[0])); + } + SAVE2x4_ACC (&acc0, 0); + CO += 4; + AO += k << 2; + BO += k << 1; + } + if (m & 2) + { + IFLOAT *BO = B; + BLASLONG l = 0; + v4sf_t t = { 0, 0, 0, 0 }; + for (l = 0; l < (k << 1); l += 2) + { + v4sf_t rowA = + { BF16TOF32 (AO[l]), BF16TOF32 (AO[l]), BF16TOF32 (AO[l + 1]), + BF16TOF32 (AO[l + 1]) + }; + v4sf_t rowB = + { BF16TOF32 (BO[l]), BF16TOF32 (BO[l + 1]), BF16TOF32 (BO[l]), + BF16TOF32 (BO[l + 1]) + }; + t += rowA * rowB; + } + t = t * valpha; + CO[0 * ldc] += t[0]; + CO[1 * ldc] += t[1]; + CO[0 * ldc + 1] += t[2]; + CO[1 * ldc + 1] += t[3]; + CO += 2; + AO += k << 1; + BO += k << 1; + } + if (m & 1) + { + IFLOAT *BO = B; + BLASLONG l = 0; + v4sf_t t = { 0, 0, 0, 0 }; + for (l = 0; l < k; l++) + { + v4sf_t rowA = { BF16TOF32 (AO[l]), BF16TOF32 (AO[l]), 0, 0 }; + v4sf_t rowB = + { BF16TOF32 (BO[l << 1]), BF16TOF32 (BO[(l << 1) + 1]), 0, + 0 + }; + t += rowA * rowB; + } + CO[0 * ldc] += t[0] * alpha; + CO[1 * ldc] += t[1] * alpha; + CO += 1; + AO += k; + BO += k << 1; + } + B += k << 1; + } + if (n & 1) + { + BLASLONG j; + FLOAT *CO; + IFLOAT *AO; + CO = C; + C += ldc; + AO = A; + /* Loop for m >= 16. */ + for (j = 0; j < (m >> 4); j++) + { + IFLOAT *BO = B; + v4sf_t *rowC; + v4sf_t result[4]; + __vector_quad acc0, acc1, acc2, acc3; + SET_ACC_ZERO4 (); + BLASLONG l = 0; + for (l = 0; l < k / 2; l++) + { + vector short rowB = + { BO[l << 1], BO[(l << 1) + 1], 0, 0, 0, 0, 0, 0}; + vec_t *rowA = (vec_t *) & (AO[l << 5]); + MMA (&acc0, (vec_t) rowB, rowA[0]); + MMA (&acc1, (vec_t) rowB, rowA[1]); + MMA (&acc2, (vec_t) rowB, rowA[2]); + MMA (&acc3, (vec_t) rowB, rowA[3]); + } + if (k % 2 == 1) + { + if (k > 1) + l = (k / 2) << 1; + vector short rowB = { BO[l], 0, 0, 0, 0, 0, 0, 0 }; + vec_t *rowA = (vec_t *) & (AO[(l << 4)]); + MMA (&acc0, (vec_t) rowB, MERGE_HIGH (rowA[0], rowA[2])); + MMA (&acc1, (vec_t) rowB, MERGE_LOW (rowA[0], rowA[2])); + MMA (&acc2, (vec_t) rowB, MERGE_HIGH (rowA[1], rowA[3])); + MMA (&acc3, (vec_t) rowB, MERGE_LOW (rowA[1], rowA[3])); + } + rowC = (v4sf_t *) &CO[0]; + __builtin_mma_disassemble_acc ((void *)result, &acc0); + rowC[0] += result[0] * alpha; + __builtin_mma_disassemble_acc ((void *)result, &acc1); + rowC[1] += result[0] * alpha; + __builtin_mma_disassemble_acc ((void *)result, &acc2); + rowC[2] += result[0] * alpha; + __builtin_mma_disassemble_acc ((void *)result, &acc3); + rowC[3] += result[0] * alpha; + AO += k << 4; + BO += k; + CO += 16; + } + /* Loop for m >= 8. */ + if (m & 8) + { + IFLOAT *BO = B; + v4sf_t *rowC; + v4sf_t result[4]; + __vector_quad acc0, acc1; + __builtin_mma_xxsetaccz (&acc0); + __builtin_mma_xxsetaccz (&acc1); + BLASLONG l = 0; + for (l = 0; l < k / 2; l++) + { + vector short rowB = + { BO[l << 1], BO[(l << 1) + 1], 0, 0, 0, 0, 0, 0}; + vec_t *rowA = (vec_t *) & (AO[l << 4]); + MMA (&acc0, (vec_t) rowB, rowA[0]); + MMA (&acc1, (vec_t) rowB, rowA[1]); + } + if (k % 2 == 1) + { + if (k > 1) + l = (k / 2) << 1; + vector short rowB = { BO[l], 0, 0, 0, 0, 0, 0, 0 }; + vec_t *rowA = (vec_t *) & (AO[(l << 3)]); + MMA (&acc0, (vec_t) rowB, MERGE_HIGH (rowA[0], rowA[1])); + MMA (&acc1, (vec_t) rowB, MERGE_LOW (rowA[0], rowA[1])); + } + rowC = (v4sf_t *) &CO[0]; + __builtin_mma_disassemble_acc ((void *)result, &acc0); + rowC[0] += result[0] * alpha; + __builtin_mma_disassemble_acc ((void *)result, &acc1); + rowC[1] += result[0] * alpha; + AO += k << 3; + BO += k; + CO += 8; + } + /* Loop for m >= 4. */ + if (m & 4) + { + IFLOAT *BO = B; + v4sf_t *rowC; + v4sf_t result[4]; + __vector_quad acc0; + __builtin_mma_xxsetaccz (&acc0); + BLASLONG l = 0; + for (l = 0; l < k / 2; l++) + { + vector short rowB = + { BO[l << 1], BO[(l << 1) + 1], 0, 0, 0, 0, 0, 0}; + vec_t *rowA = (vec_t *) & (AO[l << 3]); + MMA (&acc0, (vec_t) rowB, rowA[0]); + } + if (k % 2 == 1) + { + if (k > 1) + l = (k / 2) << 1; + vector short rowB = { BO[l], 0, 0, 0, 0, 0, 0, 0 }; + vec_t *rowA = (vec_t *) & (AO[(l << 2)]); + MMA (&acc0, (vec_t) rowB, MERGE_ROW (rowA[0])); + } + rowC = (v4sf_t *) &CO[0]; + __builtin_mma_disassemble_acc ((void *)result, &acc0); + rowC[0] += result[0] * alpha; + AO += k << 2; + BO += k; + CO += 4; + } + /* Loop for m >= 2. */ + if (m & 2) + { + IFLOAT *BO = B; + BLASLONG l = 0; + v4sf_t t = { 0, 0, 0, 0 }; + for (l = 0; l < k; l++) + { + v4sf_t rowB = { BF16TOF32 (BO[l]), BF16TOF32 (BO[l]), 0, 0 }; + v4sf_t rowA = + { BF16TOF32 (AO[l << 1]), BF16TOF32 (AO[(l << 1) + 1]), 0, + 0 + }; + t += rowA * rowB; + } + t = t * valpha; + CO[0] += t[0]; + CO[1] += t[1]; + AO += k << 1; + BO += k; + CO += 2; + } + /* Loop for m = 1. */ + if (m & 1) + { + IFLOAT *BO = B; + BLASLONG l = 0; + FLOAT t = 0; + for (l = 0; l < k; l++) + { + t += BF16TOF32 (AO[l]) * BF16TOF32 (BO[l]); + } + AO += k; + BO += k; + CO[0] += t * alpha; + CO += 1; + } + + B += k; + } + + return 0; +} diff --git a/kernel/power/sbgemm_ncopy_16_power10.c b/kernel/power/sbgemm_ncopy_16_power10.c new file mode 100644 index 000000000..c6b633011 --- /dev/null +++ b/kernel/power/sbgemm_ncopy_16_power10.c @@ -0,0 +1,437 @@ +/*********************************************************************/ +/* Copyright 2009, 2010 The University of Texas at Austin. */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* 1. Redistributions of source code must retain the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer. */ +/* */ +/* 2. Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials */ +/* provided with the distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ +/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ +/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ +/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ +/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ +/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ +/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* */ +/* The views and conclusions contained in the software and */ +/* documentation are those of the authors and should not be */ +/* interpreted as representing official policies, either expressed */ +/* or implied, of The University of Texas at Austin. */ +/*********************************************************************/ + +#include +#include "common.h" + +int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b){ + BLASLONG i, j; + + IFLOAT *aoffset; + IFLOAT *aoffset1, *aoffset2, *aoffset3, *aoffset4; + IFLOAT *aoffset5, *aoffset6, *aoffset7, *aoffset8; + IFLOAT *aoffset9, *aoffset10, *aoffset11, *aoffset12; + IFLOAT *aoffset13, *aoffset14, *aoffset15, *aoffset16; + + IFLOAT *boffset; + IFLOAT ctemp01, ctemp02, ctemp03, ctemp04; + IFLOAT ctemp05, ctemp06, ctemp07, ctemp08; + IFLOAT ctemp09, ctemp10, ctemp11, ctemp12; + IFLOAT ctemp13, ctemp14, ctemp15, ctemp16; + IFLOAT ctemp17, ctemp18, ctemp19, ctemp20; + IFLOAT ctemp21, ctemp22, ctemp23, ctemp24; + IFLOAT ctemp25, ctemp26, ctemp27, ctemp28; + IFLOAT ctemp29, ctemp30, ctemp31, ctemp32; + + aoffset = a; + boffset = b; + + j = (n >> 4); + if (j > 0){ + do{ + aoffset1 = aoffset; + aoffset2 = aoffset1 + lda; + aoffset3 = aoffset2 + lda; + aoffset4 = aoffset3 + lda; + aoffset5 = aoffset4 + lda; + aoffset6 = aoffset5 + lda; + aoffset7 = aoffset6 + lda; + aoffset8 = aoffset7 + lda; + aoffset9 = aoffset8 + lda; + aoffset10 = aoffset9 + lda; + aoffset11 = aoffset10 + lda; + aoffset12 = aoffset11 + lda; + aoffset13 = aoffset12 + lda; + aoffset14 = aoffset13 + lda; + aoffset15 = aoffset14 + lda; + aoffset16 = aoffset15 + lda; + aoffset += 16 * lda; + + i = (m >> 1); + if (i > 0){ + do{ + ctemp01 = *(aoffset1 + 0); + ctemp02 = *(aoffset1 + 1); + ctemp03 = *(aoffset2 + 0); + ctemp04 = *(aoffset2 + 1); + + ctemp05 = *(aoffset3 + 0); + ctemp06 = *(aoffset3 + 1); + ctemp07 = *(aoffset4 + 0); + ctemp08 = *(aoffset4 + 1); + + ctemp09 = *(aoffset5 + 0); + ctemp10 = *(aoffset5 + 1); + ctemp11 = *(aoffset6 + 0); + ctemp12 = *(aoffset6 + 1); + + ctemp13 = *(aoffset7 + 0); + ctemp14 = *(aoffset7 + 1); + ctemp15 = *(aoffset8 + 0); + ctemp16 = *(aoffset8 + 1); + + ctemp17 = *(aoffset9 + 0); + ctemp18 = *(aoffset9 + 1); + ctemp19 = *(aoffset10 + 0); + ctemp20 = *(aoffset10 + 1); + + ctemp21 = *(aoffset11 + 0); + ctemp22 = *(aoffset11 + 1); + ctemp23 = *(aoffset12 + 0); + ctemp24 = *(aoffset12 + 1); + + ctemp25 = *(aoffset13 + 0); + ctemp26 = *(aoffset13 + 1); + ctemp27 = *(aoffset14 + 0); + ctemp28 = *(aoffset14 + 1); + + ctemp29 = *(aoffset15 + 0); + ctemp30 = *(aoffset15 + 1); + ctemp31 = *(aoffset16 + 0); + ctemp32 = *(aoffset16 + 1); + + *(boffset + 0) = ctemp01; + *(boffset + 1) = ctemp02; + *(boffset + 2) = ctemp03; + *(boffset + 3) = ctemp04; + *(boffset + 4) = ctemp05; + *(boffset + 5) = ctemp06; + *(boffset + 6) = ctemp07; + *(boffset + 7) = ctemp08; + + *(boffset + 8) = ctemp09; + *(boffset + 9) = ctemp10; + *(boffset + 10) = ctemp11; + *(boffset + 11) = ctemp12; + *(boffset + 12) = ctemp13; + *(boffset + 13) = ctemp14; + *(boffset + 14) = ctemp15; + *(boffset + 15) = ctemp16; + + *(boffset + 16) = ctemp17; + *(boffset + 17) = ctemp18; + *(boffset + 18) = ctemp19; + *(boffset + 19) = ctemp20; + *(boffset + 20) = ctemp21; + *(boffset + 21) = ctemp22; + *(boffset + 22) = ctemp23; + *(boffset + 23) = ctemp24; + + *(boffset + 24) = ctemp25; + *(boffset + 25) = ctemp26; + *(boffset + 26) = ctemp27; + *(boffset + 27) = ctemp28; + *(boffset + 28) = ctemp29; + *(boffset + 29) = ctemp30; + *(boffset + 30) = ctemp31; + *(boffset + 31) = ctemp32; + + aoffset1 += 2; + aoffset2 += 2; + aoffset3 += 2; + aoffset4 += 2; + aoffset5 += 2; + aoffset6 += 2; + aoffset7 += 2; + aoffset8 += 2; + + aoffset9 += 2; + aoffset10 += 2; + aoffset11 += 2; + aoffset12 += 2; + aoffset13 += 2; + aoffset14 += 2; + aoffset15 += 2; + aoffset16 += 2; + boffset += 32; + + i --; + }while(i > 0); + } + + if (m & 1){ + ctemp01 = *(aoffset1 + 0); + ctemp03 = *(aoffset2 + 0); + ctemp05 = *(aoffset3 + 0); + ctemp07 = *(aoffset4 + 0); + ctemp09 = *(aoffset5 + 0); + ctemp11 = *(aoffset6 + 0); + ctemp13 = *(aoffset7 + 0); + ctemp15 = *(aoffset8 + 0); + + ctemp17 = *(aoffset9 + 0); + ctemp19 = *(aoffset10 + 0); + ctemp21 = *(aoffset11 + 0); + ctemp23 = *(aoffset12 + 0); + ctemp25 = *(aoffset13 + 0); + ctemp27 = *(aoffset14 + 0); + ctemp29 = *(aoffset15 + 0); + ctemp31 = *(aoffset16 + 0); + + *(boffset + 0) = ctemp01; + *(boffset + 1) = ctemp03; + *(boffset + 2) = ctemp05; + *(boffset + 3) = ctemp07; + *(boffset + 4) = ctemp09; + *(boffset + 5) = ctemp11; + *(boffset + 6) = ctemp13; + *(boffset + 7) = ctemp15; + + *(boffset + 8) = ctemp17; + *(boffset + 9) = ctemp19; + *(boffset + 10) = ctemp21; + *(boffset + 11) = ctemp23; + *(boffset + 12) = ctemp25; + *(boffset + 13) = ctemp27; + *(boffset + 14) = ctemp29; + *(boffset + 15) = ctemp31; + + boffset += 16; + } + j--; + }while(j > 0); + } /* end of if(j > 0) */ + + if (n & 8){ + aoffset1 = aoffset; + aoffset2 = aoffset1 + lda; + aoffset3 = aoffset2 + lda; + aoffset4 = aoffset3 + lda; + aoffset5 = aoffset4 + lda; + aoffset6 = aoffset5 + lda; + aoffset7 = aoffset6 + lda; + aoffset8 = aoffset7 + lda; + aoffset += 8 * lda; + + i = (m >> 1); + if (i > 0){ + do{ + ctemp01 = *(aoffset1 + 0); + ctemp02 = *(aoffset1 + 1); + ctemp03 = *(aoffset2 + 0); + ctemp04 = *(aoffset2 + 1); + + ctemp05 = *(aoffset3 + 0); + ctemp06 = *(aoffset3 + 1); + ctemp07 = *(aoffset4 + 0); + ctemp08 = *(aoffset4 + 1); + + ctemp09 = *(aoffset5 + 0); + ctemp10 = *(aoffset5 + 1); + ctemp11 = *(aoffset6 + 0); + ctemp12 = *(aoffset6 + 1); + + ctemp13 = *(aoffset7 + 0); + ctemp14 = *(aoffset7 + 1); + ctemp15 = *(aoffset8 + 0); + ctemp16 = *(aoffset8 + 1); + + *(boffset + 0) = ctemp01; + *(boffset + 1) = ctemp02; + *(boffset + 2) = ctemp03; + *(boffset + 3) = ctemp04; + *(boffset + 4) = ctemp05; + *(boffset + 5) = ctemp06; + *(boffset + 6) = ctemp07; + *(boffset + 7) = ctemp08; + + *(boffset + 8) = ctemp09; + *(boffset + 9) = ctemp10; + *(boffset + 10) = ctemp11; + *(boffset + 11) = ctemp12; + *(boffset + 12) = ctemp13; + *(boffset + 13) = ctemp14; + *(boffset + 14) = ctemp15; + *(boffset + 15) = ctemp16; + + aoffset1 += 2; + aoffset2 += 2; + aoffset3 += 2; + aoffset4 += 2; + aoffset5 += 2; + aoffset6 += 2; + aoffset7 += 2; + aoffset8 += 2; + + boffset += 16; + + i --; + }while(i > 0); + } + + if (m & 1){ + ctemp01 = *(aoffset1 + 0); + ctemp03 = *(aoffset2 + 0); + ctemp05 = *(aoffset3 + 0); + ctemp07 = *(aoffset4 + 0); + ctemp09 = *(aoffset5 + 0); + ctemp11 = *(aoffset6 + 0); + ctemp13 = *(aoffset7 + 0); + ctemp15 = *(aoffset8 + 0); + + *(boffset + 0) = ctemp01; + *(boffset + 1) = ctemp03; + *(boffset + 2) = ctemp05; + *(boffset + 3) = ctemp07; + *(boffset + 4) = ctemp09; + *(boffset + 5) = ctemp11; + *(boffset + 6) = ctemp13; + *(boffset + 7) = ctemp15; + + boffset += 8; + } + } + + if (n & 4){ + aoffset1 = aoffset; + aoffset2 = aoffset1 + lda; + aoffset3 = aoffset2 + lda; + aoffset4 = aoffset3 + lda; + aoffset += 4 * lda; + + i = (m >> 1); + if (i > 0){ + do{ + ctemp01 = *(aoffset1 + 0); + ctemp02 = *(aoffset1 + 1); + ctemp03 = *(aoffset2 + 0); + ctemp04 = *(aoffset2 + 1); + + ctemp05 = *(aoffset3 + 0); + ctemp06 = *(aoffset3 + 1); + ctemp07 = *(aoffset4 + 0); + ctemp08 = *(aoffset4 + 1); + + *(boffset + 0) = ctemp01; + *(boffset + 1) = ctemp02; + *(boffset + 2) = ctemp03; + *(boffset + 3) = ctemp04; + *(boffset + 4) = ctemp05; + *(boffset + 5) = ctemp06; + *(boffset + 6) = ctemp07; + *(boffset + 7) = ctemp08; + + aoffset1 += 2; + aoffset2 += 2; + aoffset3 += 2; + aoffset4 += 2; + boffset += 8; + + i --; + }while(i > 0); + } + + if (m & 1){ + ctemp01 = *(aoffset1 + 0); + ctemp03 = *(aoffset2 + 0); + ctemp05 = *(aoffset3 + 0); + ctemp07 = *(aoffset4 + 0); + + *(boffset + 0) = ctemp01; + *(boffset + 1) = ctemp03; + *(boffset + 2) = ctemp05; + *(boffset + 3) = ctemp07; + boffset += 4; + } + } + + if (n & 2){ + aoffset1 = aoffset; + aoffset2 = aoffset1 + lda; + aoffset += 2 * lda; + + i = (m >> 1); + if (i > 0){ + do{ + ctemp01 = *(aoffset1 + 0); + ctemp02 = *(aoffset1 + 1); + ctemp03 = *(aoffset2 + 0); + ctemp04 = *(aoffset2 + 1); + + *(boffset + 0) = ctemp01; + *(boffset + 1) = ctemp03; + *(boffset + 2) = ctemp02; + *(boffset + 3) = ctemp04; + + aoffset1 += 2; + aoffset2 += 2; + boffset += 4; + + i --; + }while(i > 0); + } + + if (m & 1){ + ctemp01 = *(aoffset1 + 0); + ctemp03 = *(aoffset2 + 0); + + *(boffset + 0) = ctemp01; + *(boffset + 1) = ctemp03; + boffset += 2; + } + } + + if (n & 1){ + aoffset1 = aoffset; + + i = (m >> 1); + if (i > 0){ + do{ + ctemp01 = *(aoffset1 + 0); + ctemp02 = *(aoffset1 + 1); + + *(boffset + 0) = ctemp01; + *(boffset + 1) = ctemp02; + + aoffset1 += 2; + boffset += 2; + + i --; + }while(i > 0); + } + + if (m & 1){ + ctemp01 = *(aoffset1 + 0); + + *(boffset + 0) = ctemp01; + // boffset += 1; + } + } + + return 0; +} diff --git a/kernel/power/sbgemm_ncopy_8_power10.c b/kernel/power/sbgemm_ncopy_8_power10.c new file mode 100644 index 000000000..0e4a680fb --- /dev/null +++ b/kernel/power/sbgemm_ncopy_8_power10.c @@ -0,0 +1,383 @@ +/*********************************************************************/ +/* Copyright 2009, 2010 The University of Texas at Austin. */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* 1. Redistributions of source code must retain the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer. */ +/* */ +/* 2. Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials */ +/* provided with the distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ +/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ +/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ +/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ +/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ +/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ +/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* */ +/* The views and conclusions contained in the software and */ +/* documentation are those of the authors and should not be */ +/* interpreted as representing official policies, either expressed */ +/* or implied, of The University of Texas at Austin. */ +/*********************************************************************/ + +#include +#include +#include "common.h" + +typedef IFLOAT vec_bf16 __attribute__ ((vector_size (16))); +int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b){ + BLASLONG i, j; + + IFLOAT *aoffset; + IFLOAT *aoffset1, *aoffset2, *aoffset3, *aoffset4; + IFLOAT *aoffset5, *aoffset6, *aoffset7, *aoffset8; + + IFLOAT *boffset; + vec_bf16 vtemp01, vtemp02, vtemp03, vtemp04; + vec_bf16 vtemp05, vtemp06, vtemp07, vtemp08; + vec_bf16 vtemp09, vtemp10, vtemp11, vtemp12; + vector char mask = + { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 }; + vector char mask1 = + { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 }; + IFLOAT ctemp01, ctemp02, ctemp03, ctemp04; + IFLOAT ctemp05, ctemp06, ctemp07, ctemp08; + IFLOAT ctemp09, ctemp10, ctemp11, ctemp12; + IFLOAT ctemp13, ctemp14, ctemp15, ctemp16; + IFLOAT ctemp17; + IFLOAT ctemp25; + IFLOAT ctemp33; + IFLOAT ctemp41; + IFLOAT ctemp49; + IFLOAT ctemp57; + + + aoffset = a; + boffset = b; + + j = (n >> 3); + if (j > 0){ + do{ + aoffset1 = aoffset; + aoffset2 = aoffset1 + lda; + aoffset3 = aoffset2 + lda; + aoffset4 = aoffset3 + lda; + aoffset5 = aoffset4 + lda; + aoffset6 = aoffset5 + lda; + aoffset7 = aoffset6 + lda; + aoffset8 = aoffset7 + lda; + aoffset += 8 * lda; + + i = (m >> 3); + if (i > 0){ + do{ + vtemp01 = *(vec_bf16 *)(aoffset1); + vtemp02 = *(vec_bf16 *)(aoffset2); + vtemp03 = *(vec_bf16 *)(aoffset3); + vtemp04 = *(vec_bf16 *)(aoffset4); + vtemp05 = *(vec_bf16 *)(aoffset5); + vtemp06 = *(vec_bf16 *)(aoffset6); + vtemp07 = *(vec_bf16 *)(aoffset7); + vtemp08 = *(vec_bf16 *)(aoffset8); + + vtemp09 = vec_perm(vtemp01, vtemp02, mask); + vtemp10 = vec_perm(vtemp03, vtemp04, mask); + vtemp11 = vec_perm(vtemp05, vtemp06, mask); + vtemp12 = vec_perm(vtemp07, vtemp08, mask); + + *(vec_bf16 *)(boffset + 0) = vec_xxpermdi(vtemp09, vtemp10, 0); + *(vec_bf16 *)(boffset + 8) = vec_xxpermdi(vtemp11, vtemp12, 0); + *(vec_bf16 *)(boffset + 16) = vec_xxpermdi(vtemp09, vtemp10, 3); + *(vec_bf16 *)(boffset + 24) = vec_xxpermdi(vtemp11, vtemp12, 3); + + vtemp09 = vec_perm(vtemp01, vtemp02, mask1); + vtemp10 = vec_perm(vtemp03, vtemp04, mask1); + vtemp11 = vec_perm(vtemp05, vtemp06, mask1); + vtemp12 = vec_perm(vtemp07, vtemp08, mask1); + + *(vec_bf16 *)(boffset + 32) = vec_xxpermdi(vtemp09, vtemp10, 0); + *(vec_bf16 *)(boffset + 40) = vec_xxpermdi(vtemp11, vtemp12, 0); + *(vec_bf16 *)(boffset + 48) = vec_xxpermdi(vtemp09, vtemp10, 3); + *(vec_bf16 *)(boffset + 56) = vec_xxpermdi(vtemp11, vtemp12, 3); + + aoffset1 += 8; + aoffset2 += 8; + aoffset3 += 8; + aoffset4 += 8; + aoffset5 += 8; + aoffset6 += 8; + aoffset7 += 8; + aoffset8 += 8; + boffset += 64; + i --; + }while(i > 0); + } + + i = (m & 7); + if (i >= 2){ + do{ + ctemp01 = *(aoffset1 + 0); + ctemp09 = *(aoffset1 + 1); + ctemp17 = *(aoffset2 + 0); + ctemp25 = *(aoffset2 + 1); + ctemp33 = *(aoffset3 + 0); + ctemp41 = *(aoffset3 + 1); + ctemp49 = *(aoffset4 + 0); + ctemp57 = *(aoffset4 + 1); + + *(boffset + 0) = ctemp01; + *(boffset + 1) = ctemp09; + *(boffset + 2) = ctemp17; + *(boffset + 3) = ctemp25; + *(boffset + 4) = ctemp33; + *(boffset + 5) = ctemp41; + *(boffset + 6) = ctemp49; + *(boffset + 7) = ctemp57; + aoffset1 += 2; + aoffset2 += 2; + aoffset3 += 2; + aoffset4 += 2; + + ctemp01 = *(aoffset5 + 0); + ctemp09 = *(aoffset5 + 1); + ctemp17 = *(aoffset6 + 0); + ctemp25 = *(aoffset6 + 1); + ctemp33 = *(aoffset7 + 0); + ctemp41 = *(aoffset7 + 1); + ctemp49 = *(aoffset8 + 0); + ctemp57 = *(aoffset8 + 1); + *(boffset + 8) = ctemp01; + *(boffset + 9) = ctemp09; + *(boffset + 10) = ctemp17; + *(boffset + 11) = ctemp25; + *(boffset + 12) = ctemp33; + *(boffset + 13) = ctemp41; + *(boffset + 14) = ctemp49; + *(boffset + 15) = ctemp57; + + aoffset5 += 2; + aoffset6 += 2; + aoffset7 += 2; + aoffset8 += 2; + + boffset += 16; + i -= 2; + }while(i > 1); + } + if (m & 1){ + ctemp01 = *(aoffset1 + 0); + ctemp09 = *(aoffset2 + 0); + ctemp17 = *(aoffset3 + 0); + ctemp25 = *(aoffset4 + 0); + ctemp33 = *(aoffset5 + 0); + ctemp41 = *(aoffset6 + 0); + ctemp49 = *(aoffset7 + 0); + ctemp57 = *(aoffset8 + 0); + + *(boffset + 0) = ctemp01; + *(boffset + 1) = ctemp09; + *(boffset + 2) = ctemp17; + *(boffset + 3) = ctemp25; + *(boffset + 4) = ctemp33; + *(boffset + 5) = ctemp41; + *(boffset + 6) = ctemp49; + *(boffset + 7) = ctemp57; + + aoffset1 ++; + aoffset2 ++; + aoffset3 ++; + aoffset4 ++; + aoffset5 ++; + aoffset6 ++; + aoffset7 ++; + aoffset8 ++; + + boffset += 8; + } + + j--; + }while(j > 0); + } /* end of if(j > 0) */ + + if (n & 4){ + aoffset1 = aoffset; + aoffset2 = aoffset1 + lda; + aoffset3 = aoffset2 + lda; + aoffset4 = aoffset3 + lda; + aoffset += 4 * lda; + + i = (m >> 2); + if (i > 0){ + do{ + ctemp01 = *(aoffset1 + 0); + ctemp02 = *(aoffset1 + 1); + ctemp03 = *(aoffset1 + 2); + ctemp04 = *(aoffset1 + 3); + + ctemp05 = *(aoffset2 + 0); + ctemp06 = *(aoffset2 + 1); + ctemp07 = *(aoffset2 + 2); + ctemp08 = *(aoffset2 + 3); + + ctemp09 = *(aoffset3 + 0); + ctemp10 = *(aoffset3 + 1); + ctemp11 = *(aoffset3 + 2); + ctemp12 = *(aoffset3 + 3); + + ctemp13 = *(aoffset4 + 0); + ctemp14 = *(aoffset4 + 1); + ctemp15 = *(aoffset4 + 2); + ctemp16 = *(aoffset4 + 3); + + *(boffset + 0) = ctemp01; + *(boffset + 1) = ctemp02; + *(boffset + 2) = ctemp05; + *(boffset + 3) = ctemp06; + + *(boffset + 4) = ctemp09; + *(boffset + 5) = ctemp10; + *(boffset + 6) = ctemp13; + *(boffset + 7) = ctemp14; + + *(boffset + 8) = ctemp03; + *(boffset + 9) = ctemp04; + *(boffset + 10) = ctemp07; + *(boffset + 11) = ctemp08; + + *(boffset + 12) = ctemp11; + *(boffset + 13) = ctemp12; + *(boffset + 14) = ctemp15; + *(boffset + 15) = ctemp16; + + aoffset1 += 4; + aoffset2 += 4; + aoffset3 += 4; + aoffset4 += 4; + boffset += 16; + i --; + }while(i > 0); + } + + i = (m & 3); + if (i >= 2){ + do{ + ctemp01 = *(aoffset1 + 0); + ctemp09 = *(aoffset1 + 1); + ctemp17 = *(aoffset2 + 0); + ctemp25 = *(aoffset2 + 1); + ctemp33 = *(aoffset3 + 0); + ctemp41 = *(aoffset3 + 1); + ctemp49 = *(aoffset4 + 0); + ctemp57 = *(aoffset4 + 1); + + *(boffset + 0) = ctemp01; + *(boffset + 1) = ctemp09; + *(boffset + 2) = ctemp17; + *(boffset + 3) = ctemp25; + *(boffset + 4) = ctemp33; + *(boffset + 5) = ctemp41; + *(boffset + 6) = ctemp49; + *(boffset + 7) = ctemp57; + aoffset1 += 2; + aoffset2 += 2; + aoffset3 += 2; + aoffset4 += 2; + + boffset += 8; + i -= 2; + }while(i > 1); + } + if (m & 1){ + ctemp01 = *(aoffset1 + 0); + ctemp02 = *(aoffset2 + 0); + ctemp03 = *(aoffset3 + 0); + ctemp04 = *(aoffset4 + 0); + + *(boffset + 0) = ctemp01; + *(boffset + 1) = ctemp02; + *(boffset + 2) = ctemp03; + *(boffset + 3) = ctemp04; + + aoffset1 ++; + aoffset2 ++; + aoffset3 ++; + aoffset4 ++; + + boffset += 4; + } + } + + if (n & 2){ + aoffset1 = aoffset; + aoffset2 = aoffset1 + lda; + aoffset += 2 * lda; + + i = (m >> 1); + if (i > 0){ + do{ + ctemp01 = *(aoffset1 + 0); + ctemp02 = *(aoffset1 + 1); + ctemp03 = *(aoffset2 + 0); + ctemp04 = *(aoffset2 + 1); + + *(boffset + 0) = ctemp01; + *(boffset + 1) = ctemp03; + *(boffset + 2) = ctemp02; + *(boffset + 3) = ctemp04; + + aoffset1 += 2; + aoffset2 += 2; + boffset += 4; + i --; + }while(i > 0); + } + + if (m & 1){ + ctemp01 = *(aoffset1 + 0); + ctemp02 = *(aoffset2 + 0); + + *(boffset + 0) = ctemp01; + *(boffset + 1) = ctemp02; + + aoffset1 ++; + aoffset2 ++; + boffset += 2; + } + } /* end of if(j > 0) */ + + if (n & 1){ + aoffset1 = aoffset; + + i = m; + if (i > 0){ + do{ + ctemp01 = *(aoffset1 + 0); + + *(boffset + 0) = ctemp01; + + aoffset1 ++; + boffset ++; + i --; + }while(i > 0); + } + + } /* end of if(j > 0) */ + + return 0; +} diff --git a/kernel/power/sbgemm_tcopy_16_power10.c b/kernel/power/sbgemm_tcopy_16_power10.c new file mode 100644 index 000000000..120c5ab7c --- /dev/null +++ b/kernel/power/sbgemm_tcopy_16_power10.c @@ -0,0 +1,244 @@ +/*********************************************************************/ +/* Copyright 2009, 2010 The University of Texas at Austin. */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* 1. Redistributions of source code must retain the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer. */ +/* */ +/* 2. Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials */ +/* provided with the distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ +/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ +/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ +/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ +/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ +/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ +/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* */ +/* The views and conclusions contained in the software and */ +/* documentation are those of the authors and should not be */ +/* interpreted as representing official policies, either expressed */ +/* or implied, of The University of Texas at Austin. */ +/*********************************************************************/ + +#include +#include +#include "common.h" +typedef IFLOAT vec_bf16 __attribute__ ((vector_size (16))); + +int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b){ + + BLASLONG i, j; + + IFLOAT *aoffset; + IFLOAT *aoffset1, *aoffset2; + IFLOAT *boffset; + + vec_bf16 vtemp01, vtemp02, vtemp03, vtemp04; + IFLOAT ctemp01, ctemp02, ctemp03, ctemp04; + IFLOAT ctemp05, ctemp06, ctemp07, ctemp08; + + aoffset = a; + boffset = b; + +#if 0 + fprintf(stderr, "m = %d n = %d\n", m, n); +#endif + + j = (n >> 4); + if (j > 0){ + do{ + aoffset1 = aoffset; + aoffset2 = aoffset + lda; + aoffset += 16; + + i = (m >> 1); + if (i > 0){ + do{ + vtemp01 = *(vec_bf16 *)(aoffset1); + vtemp02 = *(vec_bf16 *)(aoffset1+8); + vtemp03 = *(vec_bf16 *)(aoffset2); + vtemp04 = *(vec_bf16 *)(aoffset2+8); + *(vec_bf16 *)(boffset + 0) = vec_mergeh(vtemp01, vtemp03); + *(vec_bf16 *)(boffset + 8) = vec_mergel(vtemp01, vtemp03); + *(vec_bf16 *)(boffset + 16) = vec_mergeh(vtemp02, vtemp04); + *(vec_bf16 *)(boffset + 24) = vec_mergel(vtemp02, vtemp04); + aoffset1 += 2 * lda; + aoffset2 += 2 * lda; + boffset += 32; + + i --; + }while(i > 0); + } + + if (m & 1){ + vtemp01 = *(vec_bf16 *)(aoffset1); + vtemp02 = *(vec_bf16 *)(aoffset1+8); + *(vec_bf16 *)(boffset + 0) = vtemp01; + *(vec_bf16 *)(boffset + 8) = vtemp02; + boffset += 16; + } + + j--; + }while(j > 0); + } /* end of if(j > 0) */ + + if (n & 8){ + aoffset1 = aoffset; + aoffset2 = aoffset + lda; + aoffset += 8; + + i = (m >> 1); + if (i > 0){ + do{ + vtemp01 = *(vec_bf16 *)(aoffset1); + vtemp03 = *(vec_bf16 *)(aoffset2); + *(vec_bf16 *)(boffset + 0) = vec_mergeh(vtemp01, vtemp03); + *(vec_bf16 *)(boffset + 8) = vec_mergel(vtemp01, vtemp03); + + aoffset1 += 2 * lda; + aoffset2 += 2 * lda; + boffset += 16; + + i --; + }while(i > 0); + } + + if (m & 1){ + vtemp01 = *(vec_bf16 *)(aoffset1); + *(vec_bf16 *)(boffset + 0) = vtemp01; + boffset += 8; + } + } + + if (n & 4){ + aoffset1 = aoffset; + aoffset2 = aoffset + lda; + aoffset += 4; + + i = (m >> 1); + if (i > 0){ + do{ + ctemp01 = *(aoffset1 + 0); + ctemp02 = *(aoffset1 + 1); + ctemp03 = *(aoffset1 + 2); + ctemp04 = *(aoffset1 + 3); + + ctemp05 = *(aoffset2 + 0); + ctemp06 = *(aoffset2 + 1); + ctemp07 = *(aoffset2 + 2); + ctemp08 = *(aoffset2 + 3); + + *(boffset + 0) = ctemp01; + *(boffset + 1) = ctemp05; + *(boffset + 2) = ctemp02; + *(boffset + 3) = ctemp06; + *(boffset + 4) = ctemp03; + *(boffset + 5) = ctemp07; + *(boffset + 6) = ctemp04; + *(boffset + 7) = ctemp08; + + aoffset1 += 2 * lda; + aoffset2 += 2 * lda; + boffset += 8; + + i --; + }while(i > 0); + } + + if (m & 1){ + ctemp01 = *(aoffset1 + 0); + ctemp02 = *(aoffset1 + 1); + ctemp03 = *(aoffset1 + 2); + ctemp04 = *(aoffset1 + 3); + + *(boffset + 0) = ctemp01; + *(boffset + 1) = ctemp02; + *(boffset + 2) = ctemp03; + *(boffset + 3) = ctemp04; + + boffset += 4; + } + } + + if (n & 2){ + aoffset1 = aoffset; + aoffset2 = aoffset + lda; + aoffset += 2; + + i = (m >> 1); + if (i > 0){ + do{ + ctemp01 = *(aoffset1 + 0); + ctemp02 = *(aoffset1 + 1); + ctemp03 = *(aoffset2 + 0); + ctemp04 = *(aoffset2 + 1); + + *(boffset + 0) = ctemp01; + *(boffset + 1) = ctemp02; + *(boffset + 2) = ctemp03; + *(boffset + 3) = ctemp04; + + aoffset1 += 2 * lda; + aoffset2 += 2 * lda; + boffset += 4; + + i --; + }while(i > 0); + } + + if (m & 1){ + ctemp01 = *(aoffset1 + 0); + ctemp02 = *(aoffset1 + 1); + + *(boffset + 0) = ctemp01; + *(boffset + 1) = ctemp02; + boffset += 2; + } + } + + if (n & 1){ + aoffset1 = aoffset; + aoffset2 = aoffset + lda; + + i = (m >> 1); + if (i > 0){ + do{ + ctemp01 = *(aoffset1 + 0); + ctemp02 = *(aoffset2 + 0); + + *(boffset + 0) = ctemp01; + *(boffset + 1) = ctemp02; + + aoffset1 += 2 * lda; + aoffset2 += 2 * lda; + boffset += 2; + + i --; + }while(i > 0); + } + + if (m & 1){ + ctemp01 = *(aoffset1 + 0); + *(boffset + 0) = ctemp01; + // boffset += 1; + } + } + + return 0; +} diff --git a/kernel/power/sbgemm_tcopy_8_power10.c b/kernel/power/sbgemm_tcopy_8_power10.c new file mode 100644 index 000000000..aceb0c9d8 --- /dev/null +++ b/kernel/power/sbgemm_tcopy_8_power10.c @@ -0,0 +1,659 @@ +/*********************************************************************/ +/* Copyright 2009, 2010 The University of Texas at Austin. */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* 1. Redistributions of source code must retain the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer. */ +/* */ +/* 2. Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials */ +/* provided with the distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ +/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ +/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ +/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ +/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ +/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ +/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* */ +/* The views and conclusions contained in the software and */ +/* documentation are those of the authors and should not be */ +/* interpreted as representing official policies, either expressed */ +/* or implied, of The University of Texas at Austin. */ +/*********************************************************************/ + +#include +#include "common.h" +#include + +typedef IFLOAT vec_bf16 __attribute__ ((vector_size (16))); + +int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b){ + + BLASLONG i, j; + + IFLOAT *aoffset; + IFLOAT *aoffset1, *aoffset2, *aoffset3, *aoffset4; + IFLOAT *aoffset5, *aoffset6, *aoffset7, *aoffset8; + + IFLOAT *boffset, *boffset1, *boffset2, *boffset3, *boffset4; + vec_bf16 vtemp01, vtemp02, vtemp03, vtemp04; + vec_bf16 vtemp05, vtemp06, vtemp07, vtemp08; + IFLOAT ctemp01, ctemp02, ctemp03, ctemp04; + IFLOAT ctemp05, ctemp06, ctemp07, ctemp08; + IFLOAT ctemp09, ctemp10, ctemp11, ctemp12; + IFLOAT ctemp13, ctemp14, ctemp15, ctemp16; + IFLOAT ctemp17, ctemp18, ctemp19, ctemp20; + IFLOAT ctemp21, ctemp22, ctemp23, ctemp24; + IFLOAT ctemp25, ctemp26, ctemp27, ctemp28; + IFLOAT ctemp29, ctemp30, ctemp31, ctemp32; + + aoffset = a; + boffset = b; + +#if 0 + fprintf(stderr, "M = %d N = %d\n", m, n); +#endif + + boffset2 = b + m * (n & ~7); + boffset3 = b + m * (n & ~3); + boffset4 = b + m * (n & ~1); + + j = (m >> 3); + if (j > 0){ + do{ + aoffset1 = aoffset; + aoffset2 = aoffset1 + lda; + aoffset3 = aoffset2 + lda; + aoffset4 = aoffset3 + lda; + aoffset5 = aoffset4 + lda; + aoffset6 = aoffset5 + lda; + aoffset7 = aoffset6 + lda; + aoffset8 = aoffset7 + lda; + aoffset += 8 * lda; + + boffset1 = boffset; + boffset += 64; + + i = (n >> 3); + if (i > 0){ + do{ + vtemp01 = *(vec_bf16 *)(aoffset1); + vtemp02 = *(vec_bf16 *)(aoffset2); + vtemp03 = *(vec_bf16 *)(aoffset3); + vtemp04 = *(vec_bf16 *)(aoffset4); + vtemp05 = *(vec_bf16 *)(aoffset5); + vtemp06 = *(vec_bf16 *)(aoffset6); + vtemp07 = *(vec_bf16 *)(aoffset7); + vtemp08 = *(vec_bf16 *)(aoffset8); + aoffset1 += 8; + aoffset2 += 8; + aoffset3 += 8; + aoffset4 += 8; + aoffset5 += 8; + aoffset6 += 8; + aoffset7 += 8; + aoffset8 += 8; + + *(vec_bf16 *)(boffset1 + 0) = vec_mergeh(vtemp01, vtemp02); + *(vec_bf16 *)(boffset1 + 8) = vec_mergel(vtemp01, vtemp02); + *(vec_bf16 *)(boffset1 + 16) = vec_mergeh(vtemp03, vtemp04); + *(vec_bf16 *)(boffset1 + 24) = vec_mergel(vtemp03, vtemp04); + *(vec_bf16 *)(boffset1 + 32) = vec_mergeh(vtemp05, vtemp06); + *(vec_bf16 *)(boffset1 + 40) = vec_mergel(vtemp05, vtemp06); + *(vec_bf16 *)(boffset1 + 48) = vec_mergeh(vtemp07, vtemp08); + *(vec_bf16 *)(boffset1 + 56) = vec_mergel(vtemp07, vtemp08); + + boffset1 += m * 8; + i --; + }while(i > 0); + } + + if (n & 4){ + ctemp01 = *(aoffset1 + 0); + ctemp02 = *(aoffset1 + 1); + ctemp03 = *(aoffset1 + 2); + ctemp04 = *(aoffset1 + 3); + aoffset1 += 4; + + ctemp05 = *(aoffset2 + 0); + ctemp06 = *(aoffset2 + 1); + ctemp07 = *(aoffset2 + 2); + ctemp08 = *(aoffset2 + 3); + aoffset2 += 4; + + ctemp09 = *(aoffset3 + 0); + ctemp10 = *(aoffset3 + 1); + ctemp11 = *(aoffset3 + 2); + ctemp12 = *(aoffset3 + 3); + aoffset3 += 4; + + ctemp13 = *(aoffset4 + 0); + ctemp14 = *(aoffset4 + 1); + ctemp15 = *(aoffset4 + 2); + ctemp16 = *(aoffset4 + 3); + aoffset4 += 4; + + ctemp17 = *(aoffset5 + 0); + ctemp18 = *(aoffset5 + 1); + ctemp19 = *(aoffset5 + 2); + ctemp20 = *(aoffset5 + 3); + aoffset5 += 4; + + ctemp21 = *(aoffset6 + 0); + ctemp22 = *(aoffset6 + 1); + ctemp23 = *(aoffset6 + 2); + ctemp24 = *(aoffset6 + 3); + aoffset6 += 4; + + ctemp25 = *(aoffset7 + 0); + ctemp26 = *(aoffset7 + 1); + ctemp27 = *(aoffset7 + 2); + ctemp28 = *(aoffset7 + 3); + aoffset7 += 4; + + ctemp29 = *(aoffset8 + 0); + ctemp30 = *(aoffset8 + 1); + ctemp31 = *(aoffset8 + 2); + ctemp32 = *(aoffset8 + 3); + aoffset8 += 4; + + *(boffset2 + 0) = ctemp01; + *(boffset2 + 1) = ctemp05; + *(boffset2 + 2) = ctemp02; + *(boffset2 + 3) = ctemp06; + *(boffset2 + 4) = ctemp03; + *(boffset2 + 5) = ctemp07; + *(boffset2 + 6) = ctemp04; + *(boffset2 + 7) = ctemp08; + + *(boffset2 + 8) = ctemp09; + *(boffset2 + 9) = ctemp13; + *(boffset2 + 10) = ctemp10; + *(boffset2 + 11) = ctemp14; + *(boffset2 + 12) = ctemp11; + *(boffset2 + 13) = ctemp15; + *(boffset2 + 14) = ctemp12; + *(boffset2 + 15) = ctemp16; + + *(boffset2 + 16) = ctemp17; + *(boffset2 + 17) = ctemp21; + *(boffset2 + 18) = ctemp18; + *(boffset2 + 19) = ctemp22; + *(boffset2 + 20) = ctemp19; + *(boffset2 + 21) = ctemp23; + *(boffset2 + 22) = ctemp20; + *(boffset2 + 23) = ctemp24; + + *(boffset2 + 24) = ctemp25; + *(boffset2 + 25) = ctemp29; + *(boffset2 + 26) = ctemp26; + *(boffset2 + 27) = ctemp30; + *(boffset2 + 28) = ctemp27; + *(boffset2 + 29) = ctemp31; + *(boffset2 + 30) = ctemp28; + *(boffset2 + 31) = ctemp32; + + boffset2 += 32; + } + + if (n & 2){ + ctemp01 = *(aoffset1 + 0); + ctemp02 = *(aoffset1 + 1); + aoffset1 += 2; + + ctemp03 = *(aoffset2 + 0); + ctemp04 = *(aoffset2 + 1); + aoffset2 += 2; + + ctemp05 = *(aoffset3 + 0); + ctemp06 = *(aoffset3 + 1); + aoffset3 += 2; + + ctemp07 = *(aoffset4 + 0); + ctemp08 = *(aoffset4 + 1); + aoffset4 += 2; + + ctemp09 = *(aoffset5 + 0); + ctemp10 = *(aoffset5 + 1); + aoffset5 += 2; + + ctemp11 = *(aoffset6 + 0); + ctemp12 = *(aoffset6 + 1); + aoffset6 += 2; + + ctemp13 = *(aoffset7 + 0); + ctemp14 = *(aoffset7 + 1); + aoffset7 += 2; + + ctemp15 = *(aoffset8 + 0); + ctemp16 = *(aoffset8 + 1); + aoffset8 += 2; + + *(boffset3 + 0) = ctemp01; + *(boffset3 + 1) = ctemp02; + *(boffset3 + 2) = ctemp03; + *(boffset3 + 3) = ctemp04; + *(boffset3 + 4) = ctemp05; + *(boffset3 + 5) = ctemp06; + *(boffset3 + 6) = ctemp07; + *(boffset3 + 7) = ctemp08; + *(boffset3 + 8) = ctemp09; + *(boffset3 + 9) = ctemp10; + *(boffset3 + 10) = ctemp11; + *(boffset3 + 11) = ctemp12; + *(boffset3 + 12) = ctemp13; + *(boffset3 + 13) = ctemp14; + *(boffset3 + 14) = ctemp15; + *(boffset3 + 15) = ctemp16; + boffset3 += 16; + } + + if (n & 1){ + ctemp01 = *(aoffset1 + 0); + aoffset1 ++; + ctemp02 = *(aoffset2 + 0); + aoffset2 ++; + ctemp03 = *(aoffset3 + 0); + aoffset3 ++; + ctemp04 = *(aoffset4 + 0); + aoffset4 ++; + ctemp05 = *(aoffset5 + 0); + aoffset5 ++; + ctemp06 = *(aoffset6 + 0); + aoffset6 ++; + ctemp07 = *(aoffset7 + 0); + aoffset7 ++; + ctemp08 = *(aoffset8 + 0); + aoffset8 ++; + + *(boffset4 + 0) = ctemp01; + *(boffset4 + 1) = ctemp02; + *(boffset4 + 2) = ctemp03; + *(boffset4 + 3) = ctemp04; + *(boffset4 + 4) = ctemp05; + *(boffset4 + 5) = ctemp06; + *(boffset4 + 6) = ctemp07; + *(boffset4 + 7) = ctemp08; + boffset4 += 8; + } + + j--; + }while(j > 0); + } + + if (m & 4){ + + aoffset1 = aoffset; + aoffset2 = aoffset1 + lda; + aoffset3 = aoffset2 + lda; + aoffset4 = aoffset3 + lda; + aoffset += 4 * lda; + + boffset1 = boffset; + boffset += 32; + + i = (n >> 3); + if (i > 0){ + + do{ + ctemp01 = *(aoffset1 + 0); + ctemp02 = *(aoffset1 + 1); + ctemp03 = *(aoffset1 + 2); + ctemp04 = *(aoffset1 + 3); + ctemp05 = *(aoffset1 + 4); + ctemp06 = *(aoffset1 + 5); + ctemp07 = *(aoffset1 + 6); + ctemp08 = *(aoffset1 + 7); + aoffset1 += 8; + + ctemp09 = *(aoffset2 + 0); + ctemp10 = *(aoffset2 + 1); + ctemp11 = *(aoffset2 + 2); + ctemp12 = *(aoffset2 + 3); + ctemp13 = *(aoffset2 + 4); + ctemp14 = *(aoffset2 + 5); + ctemp15 = *(aoffset2 + 6); + ctemp16 = *(aoffset2 + 7); + aoffset2 += 8; + + ctemp17 = *(aoffset3 + 0); + ctemp18 = *(aoffset3 + 1); + ctemp19 = *(aoffset3 + 2); + ctemp20 = *(aoffset3 + 3); + ctemp21 = *(aoffset3 + 4); + ctemp22 = *(aoffset3 + 5); + ctemp23 = *(aoffset3 + 6); + ctemp24 = *(aoffset3 + 7); + aoffset3 += 8; + + ctemp25 = *(aoffset4 + 0); + ctemp26 = *(aoffset4 + 1); + ctemp27 = *(aoffset4 + 2); + ctemp28 = *(aoffset4 + 3); + ctemp29 = *(aoffset4 + 4); + ctemp30 = *(aoffset4 + 5); + ctemp31 = *(aoffset4 + 6); + ctemp32 = *(aoffset4 + 7); + aoffset4 += 8; + + *(boffset1 + 0) = ctemp01; + *(boffset1 + 1) = ctemp09; + *(boffset1 + 2) = ctemp02; + *(boffset1 + 3) = ctemp10; + *(boffset1 + 4) = ctemp03; + *(boffset1 + 5) = ctemp11; + *(boffset1 + 6) = ctemp04; + *(boffset1 + 7) = ctemp12; + + *(boffset1 + 8) = ctemp05; + *(boffset1 + 9) = ctemp13; + *(boffset1 + 10) = ctemp06; + *(boffset1 + 11) = ctemp14; + *(boffset1 + 12) = ctemp07; + *(boffset1 + 13) = ctemp15; + *(boffset1 + 14) = ctemp08; + *(boffset1 + 15) = ctemp16; + + *(boffset1 + 16) = ctemp17; + *(boffset1 + 17) = ctemp25; + *(boffset1 + 18) = ctemp18; + *(boffset1 + 19) = ctemp26; + *(boffset1 + 20) = ctemp19; + *(boffset1 + 21) = ctemp27; + *(boffset1 + 22) = ctemp20; + *(boffset1 + 23) = ctemp28; + + *(boffset1 + 24) = ctemp21; + *(boffset1 + 25) = ctemp29; + *(boffset1 + 26) = ctemp22; + *(boffset1 + 27) = ctemp30; + *(boffset1 + 28) = ctemp23; + *(boffset1 + 29) = ctemp31; + *(boffset1 + 30) = ctemp24; + *(boffset1 + 31) = ctemp32; + + boffset1 += 8 * m; + i --; + }while(i > 0); + } + + if (n & 4) { + ctemp01 = *(aoffset1 + 0); + ctemp02 = *(aoffset1 + 1); + ctemp03 = *(aoffset1 + 2); + ctemp04 = *(aoffset1 + 3); + aoffset1 += 4; + + ctemp05 = *(aoffset2 + 0); + ctemp06 = *(aoffset2 + 1); + ctemp07 = *(aoffset2 + 2); + ctemp08 = *(aoffset2 + 3); + aoffset2 += 4; + + ctemp09 = *(aoffset3 + 0); + ctemp10 = *(aoffset3 + 1); + ctemp11 = *(aoffset3 + 2); + ctemp12 = *(aoffset3 + 3); + aoffset3 += 4; + + ctemp13 = *(aoffset4 + 0); + ctemp14 = *(aoffset4 + 1); + ctemp15 = *(aoffset4 + 2); + ctemp16 = *(aoffset4 + 3); + aoffset4 += 4; + + *(boffset2 + 0) = ctemp01; + *(boffset2 + 1) = ctemp05; + *(boffset2 + 2) = ctemp02; + *(boffset2 + 3) = ctemp06; + *(boffset2 + 4) = ctemp03; + *(boffset2 + 5) = ctemp07; + *(boffset2 + 6) = ctemp04; + *(boffset2 + 7) = ctemp08; + + *(boffset2 + 8) = ctemp09; + *(boffset2 + 9) = ctemp13; + *(boffset2 + 10) = ctemp10; + *(boffset2 + 11) = ctemp14; + *(boffset2 + 12) = ctemp11; + *(boffset2 + 13) = ctemp15; + *(boffset2 + 14) = ctemp12; + *(boffset2 + 15) = ctemp16; + boffset2 += 16; + } + + if (n & 2){ + ctemp01 = *(aoffset1 + 0); + ctemp02 = *(aoffset1 + 1); + aoffset1 += 2; + + ctemp03 = *(aoffset2 + 0); + ctemp04 = *(aoffset2 + 1); + aoffset2 += 2; + + ctemp05 = *(aoffset3 + 0); + ctemp06 = *(aoffset3 + 1); + aoffset3 += 2; + + ctemp07 = *(aoffset4 + 0); + ctemp08 = *(aoffset4 + 1); + aoffset4 += 2; + + *(boffset3 + 0) = ctemp01; + *(boffset3 + 1) = ctemp02; + *(boffset3 + 2) = ctemp03; + *(boffset3 + 3) = ctemp04; + *(boffset3 + 4) = ctemp05; + *(boffset3 + 5) = ctemp06; + *(boffset3 + 6) = ctemp07; + *(boffset3 + 7) = ctemp08; + boffset3 += 8; + } + + if (n & 1){ + ctemp01 = *(aoffset1 + 0); + aoffset1 ++; + ctemp02 = *(aoffset2 + 0); + aoffset2 ++; + ctemp03 = *(aoffset3 + 0); + aoffset3 ++; + ctemp04 = *(aoffset4 + 0); + aoffset4 ++; + + *(boffset4 + 0) = ctemp01; + *(boffset4 + 1) = ctemp02; + *(boffset4 + 2) = ctemp03; + *(boffset4 + 3) = ctemp04; + boffset4 += 4; + } + } + + if (m & 2){ + aoffset1 = aoffset; + aoffset2 = aoffset1 + lda; + aoffset += 2 * lda; + + boffset1 = boffset; + boffset += 16; + + i = (n >> 3); + if (i > 0){ + do{ + ctemp01 = *(aoffset1 + 0); + ctemp02 = *(aoffset1 + 1); + ctemp03 = *(aoffset1 + 2); + ctemp04 = *(aoffset1 + 3); + ctemp05 = *(aoffset1 + 4); + ctemp06 = *(aoffset1 + 5); + ctemp07 = *(aoffset1 + 6); + ctemp08 = *(aoffset1 + 7); + aoffset1 += 8; + + ctemp09 = *(aoffset2 + 0); + ctemp10 = *(aoffset2 + 1); + ctemp11 = *(aoffset2 + 2); + ctemp12 = *(aoffset2 + 3); + ctemp13 = *(aoffset2 + 4); + ctemp14 = *(aoffset2 + 5); + ctemp15 = *(aoffset2 + 6); + ctemp16 = *(aoffset2 + 7); + aoffset2 += 8; + + *(boffset1 + 0) = ctemp01; + *(boffset1 + 1) = ctemp09; + *(boffset1 + 2) = ctemp02; + *(boffset1 + 3) = ctemp10; + *(boffset1 + 4) = ctemp03; + *(boffset1 + 5) = ctemp11; + *(boffset1 + 6) = ctemp04; + *(boffset1 + 7) = ctemp12; + + *(boffset1 + 8) = ctemp05; + *(boffset1 + 9) = ctemp13; + *(boffset1 + 10) = ctemp06; + *(boffset1 + 11) = ctemp14; + *(boffset1 + 12) = ctemp07; + *(boffset1 + 13) = ctemp15; + *(boffset1 + 14) = ctemp08; + *(boffset1 + 15) = ctemp16; + + boffset1 += 8 * m; + i --; + }while(i > 0); + } + + if (n & 4){ + ctemp01 = *(aoffset1 + 0); + ctemp02 = *(aoffset1 + 1); + ctemp03 = *(aoffset1 + 2); + ctemp04 = *(aoffset1 + 3); + aoffset1 += 4; + + ctemp05 = *(aoffset2 + 0); + ctemp06 = *(aoffset2 + 1); + ctemp07 = *(aoffset2 + 2); + ctemp08 = *(aoffset2 + 3); + aoffset2 += 4; + + *(boffset2 + 0) = ctemp01; + *(boffset2 + 1) = ctemp05; + *(boffset2 + 2) = ctemp02; + *(boffset2 + 3) = ctemp06; + *(boffset2 + 4) = ctemp03; + *(boffset2 + 5) = ctemp07; + *(boffset2 + 6) = ctemp04; + *(boffset2 + 7) = ctemp08; + boffset2 += 8; + } + + if (n & 2){ + ctemp01 = *(aoffset1 + 0); + ctemp02 = *(aoffset1 + 1); + aoffset1 += 2; + + ctemp03 = *(aoffset2 + 0); + ctemp04 = *(aoffset2 + 1); + aoffset2 += 2; + + *(boffset3 + 0) = ctemp01; + *(boffset3 + 1) = ctemp02; + *(boffset3 + 2) = ctemp03; + *(boffset3 + 3) = ctemp04; + boffset3 += 4; + } + + if (n & 1){ + ctemp01 = *(aoffset1 + 0); + aoffset1 ++; + ctemp02 = *(aoffset2 + 0); + aoffset2 ++; + + *(boffset4 + 0) = ctemp01; + *(boffset4 + 1) = ctemp02; + boffset4 += 2; + } + } + + if (m & 1){ + aoffset1 = aoffset; + // aoffset += lda; + + boffset1 = boffset; + // boffset += 8; + + i = (n >> 3); + if (i > 0){ + do{ + ctemp01 = *(aoffset1 + 0); + ctemp02 = *(aoffset1 + 1); + ctemp03 = *(aoffset1 + 2); + ctemp04 = *(aoffset1 + 3); + ctemp05 = *(aoffset1 + 4); + ctemp06 = *(aoffset1 + 5); + ctemp07 = *(aoffset1 + 6); + ctemp08 = *(aoffset1 + 7); + aoffset1 += 8; + + *(boffset1 + 0) = ctemp01; + *(boffset1 + 1) = ctemp02; + *(boffset1 + 2) = ctemp03; + *(boffset1 + 3) = ctemp04; + *(boffset1 + 4) = ctemp05; + *(boffset1 + 5) = ctemp06; + *(boffset1 + 6) = ctemp07; + *(boffset1 + 7) = ctemp08; + + boffset1 += 8 * m; + i --; + }while(i > 0); + } + + if (n & 4){ + ctemp01 = *(aoffset1 + 0); + ctemp02 = *(aoffset1 + 1); + ctemp03 = *(aoffset1 + 2); + ctemp04 = *(aoffset1 + 3); + aoffset1 += 4; + + *(boffset2 + 0) = ctemp01; + *(boffset2 + 1) = ctemp02; + *(boffset2 + 2) = ctemp03; + *(boffset2 + 3) = ctemp04; + // boffset2 += 4; + } + + if (n & 2){ + ctemp01 = *(aoffset1 + 0); + ctemp02 = *(aoffset1 + 1); + aoffset1 += 2; + + *(boffset3 + 0) = ctemp01; + *(boffset3 + 1) = ctemp02; + // boffset3 += 2; + } + + if (n & 1){ + ctemp01 = *(aoffset1 + 0); + aoffset1 ++; + *(boffset4 + 0) = ctemp01; + boffset4 ++; + } + } + + return 0; +} diff --git a/kernel/power/scal.S b/kernel/power/scal.S index 7c65d1234..19fdd32ab 100644 --- a/kernel/power/scal.S +++ b/kernel/power/scal.S @@ -43,7 +43,7 @@ #define XX r4 #define PREA r5 -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define X r6 #define INCX r7 diff --git a/kernel/power/scal_ppc440.S b/kernel/power/scal_ppc440.S index ed148834d..d977b0b59 100644 --- a/kernel/power/scal_ppc440.S +++ b/kernel/power/scal_ppc440.S @@ -43,7 +43,7 @@ #define XX r4 #define PRE r5 -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define X r6 #define INCX r7 diff --git a/kernel/power/scopy.c b/kernel/power/scopy.c index 5e3fe45a5..8ff8cb329 100644 --- a/kernel/power/scopy.c +++ b/kernel/power/scopy.c @@ -35,9 +35,11 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" -#if defined(POWER8) || defined(POWER9) +#if defined(POWER8) || defined(POWER9) || defined(POWER10) +#if defined(__VEC__) || defined(__ALTIVEC__) #include "scopy_microk_power8.c" #endif +#endif #ifndef HAVE_KERNEL_32 diff --git a/kernel/power/scopy_microk_power8.c b/kernel/power/scopy_microk_power8.c index 7a54d5e1e..da39789b1 100644 --- a/kernel/power/scopy_microk_power8.c +++ b/kernel/power/scopy_microk_power8.c @@ -51,10 +51,10 @@ static void scopy_kernel_32 (long n, float *x, float *y) "addi %2, %2, 128 \n\t" "addic. %1, %1, -32 \n\t" - "ble 2f \n\t" + "ble two%= \n\t" - ".p2align 5 \n" - "1: \n\t" + ".align 5 \n" + "one%=: \n\t" "stxvd2x 40, 0, %3 \n\t" "stxvd2x 41, %5, %3 \n\t" @@ -77,9 +77,9 @@ static void scopy_kernel_32 (long n, float *x, float *y) "addi %2, %2, 128 \n\t" "addic. %1, %1, -32 \n\t" - "bgt 1b \n" + "bgt one%= \n" - "2: \n\t" + "two%=: \n\t" "stxvd2x 40, 0, %3 \n\t" "stxvd2x 41, %5, %3 \n\t" diff --git a/kernel/power/scopy_power10.c b/kernel/power/scopy_power10.c new file mode 100644 index 000000000..3398ce827 --- /dev/null +++ b/kernel/power/scopy_power10.c @@ -0,0 +1,130 @@ +/*************************************************************************** +Copyright (c) 2013-2016, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "common.h" + +#if defined(__VEC__) || defined(__ALTIVEC__) +#include "copy_microk_power10.c" +#endif + +#ifndef HAVE_KERNEL + +static void copy_kernel (BLASLONG n, FLOAT *x, FLOAT *y) +{ + + BLASLONG i=0; + FLOAT f0, f1, f2, f3, f4, f5, f6, f7; + FLOAT *x1=x; + FLOAT *y1=y; + + while ( i= 128 ) + { + BLASLONG align = ((32 - ((uintptr_t)y & (uintptr_t)0x1F)) >> 2) & 0x7; + for (i = 0; i < align; i++) { + y[i] = x[i] ; + } + } + BLASLONG n1 = (n-i) & -128; + if ( n1 ) + { + copy_kernel(n1, &x[i], &y[i]); + i += n1; + } + + while(i < n) + { + y[i] = x[i] ; + i++ ; + + } + + + } + else + { + + while(i < n) + { + y[iy] = x[ix] ; + ix += inc_x ; + iy += inc_y ; + i++ ; + + } + + } + return(0); + + +} + + diff --git a/kernel/power/sdot.c b/kernel/power/sdot.c index ae527dde9..ffeab6638 100644 --- a/kernel/power/sdot.c +++ b/kernel/power/sdot.c @@ -35,9 +35,12 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" -#if defined(POWER8) || defined(POWER9) +#if defined(POWER8) || defined(POWER9) || defined(POWER10) +#if defined(__VEC__) || defined(__ALTIVEC__) + #include "sdot_microk_power8.c" #endif +#endif #ifndef HAVE_KERNEL_16 diff --git a/kernel/power/sdot_microk_power10.c b/kernel/power/sdot_microk_power10.c new file mode 100644 index 000000000..2f028c5a0 --- /dev/null +++ b/kernel/power/sdot_microk_power10.c @@ -0,0 +1,135 @@ +/*************************************************************************** +Copyright (c) 2020, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#define HAVE_KERNEL_16 1 + +static float sdot_kernel_16 (long n, float *x, float *y) +{ + float dot; + + __asm__ + ( + "dcbt 0, %2 \n\t" + "dcbt 0, %3 \n\t" + + "xxlxor 32, 32, 32 \n\t" + "xxlxor 33, 33, 33 \n\t" + "xxlxor 34, 34, 34 \n\t" + "xxlxor 35, 35, 35 \n\t" + "xxlxor 36, 36, 36 \n\t" + "xxlxor 37, 37, 37 \n\t" + "xxlxor 38, 38, 38 \n\t" + "xxlxor 39, 39, 39 \n\t" + + "lxvp 40, 0(%2) \n\t" + "lxvp 42, 32(%2) \n\t" + "lxvp 44, 64(%2) \n\t" + "lxvp 46, 96(%2) \n\t" + "lxvp 48, 0(%3) \n\t" + "lxvp 50, 32(%3) \n\t" + "lxvp 52, 64(%3) \n\t" + "lxvp 54, 96(%3) \n\t" + + "addi %2, %2, 128 \n\t" + "addi %3, %3, 128 \n\t" + + "addic. %1, %1, -32 \n\t" + "ble two%= \n\t" + + ".align 5 \n" + "one%=: \n\t" + + "xvmaddasp 32, 40, 48 \n\t" + "xvmaddasp 33, 41, 49 \n\t" + "lxvp 40, 0(%2) \n\t" + "lxvp 48, 0(%3) \n\t" + "xvmaddasp 34, 42, 50 \n\t" + "xvmaddasp 35, 43, 51 \n\t" + "lxvp 42, 32(%2) \n\t" + "lxvp 50, 32(%3) \n\t" + "xvmaddasp 36, 44, 52 \n\t" + "xvmaddasp 37, 45, 53 \n\t" + "lxvp 44, 64(%2) \n\t" + "lxvp 52, 64(%3) \n\t" + "xvmaddasp 38, 46, 54 \n\t" + "xvmaddasp 39, 47, 55 \n\t" + "lxvp 46, 96(%2) \n\t" + "lxvp 54, 96(%3) \n\t" + + "addi %2, %2, 128 \n\t" + "addi %3, %3, 128 \n\t" + + "addic. %1, %1, -32 \n\t" + "bgt one%= \n" + + "two%=: \n\t" + + "xvmaddasp 32, 40, 48 \n\t" + "xvmaddasp 33, 41, 49 \n\t" + "xvmaddasp 34, 42, 50 \n\t" + "xvmaddasp 35, 43, 51 \n\t" + "xvmaddasp 36, 44, 52 \n\t" + "xvmaddasp 37, 45, 53 \n\t" + "xvmaddasp 38, 46, 54 \n\t" + "xvmaddasp 39, 47, 55 \n\t" + + "xvaddsp 32, 32, 33 \n\t" + "xvaddsp 34, 34, 35 \n\t" + "xvaddsp 36, 36, 37 \n\t" + "xvaddsp 38, 38, 39 \n\t" + + "xvaddsp 32, 32, 34 \n\t" + "xvaddsp 36, 36, 38 \n\t" + + "xvaddsp 32, 32, 36 \n\t" + + "xxsldwi 33, 32, 32, 2 \n\t" + "xvaddsp 32, 32, 33 \n\t" + + "xxsldwi 33, 32, 32, 1 \n\t" + "xvaddsp 32, 32, 33 \n\t" + + "xscvspdp %x0, 32 \n" + + "#dot=%0 n=%1 x=%4=%2 y=%5=%3\n" + : + "=f" (dot), // 0 + "+r" (n), // 1 + "+b" (x), // 2 + "+b" (y) // 3 + : + "m" (*x), + "m" (*y) + : + "cr0", + "vs32","vs33","vs34","vs35","vs36","vs37","vs38","vs39", + "vs40","vs41","vs42","vs43","vs44","vs45","vs46","vs47", + "vs48","vs49","vs50","vs51","vs52","vs53","vs54","vs55" + ); + + return dot; +} diff --git a/kernel/power/sdot_microk_power8.c b/kernel/power/sdot_microk_power8.c index bfe100c8b..a8db6a8d6 100644 --- a/kernel/power/sdot_microk_power8.c +++ b/kernel/power/sdot_microk_power8.c @@ -78,10 +78,10 @@ static float sdot_kernel_16 (long n, float *x, float *y) "addi %3, %3, 128 \n\t" "addic. %1, %1, -32 \n\t" - "ble 2f \n\t" + "ble two%= \n\t" - ".p2align 5 \n" - "1: \n\t" + ".align 5 \n" + "one%=: \n\t" "xvmaddasp 32, 40, 48 \n\t" "lxvd2x 40, 0, %2 \n\t" @@ -112,9 +112,9 @@ static float sdot_kernel_16 (long n, float *x, float *y) "addi %3, %3, 128 \n\t" "addic. %1, %1, -32 \n\t" - "bgt 1b \n" + "bgt one%= \n" - "2: \n\t" + "two%=: \n\t" "xvmaddasp 32, 40, 48 \n\t" "xvmaddasp 33, 41, 49 \n\t" diff --git a/kernel/power/sdot_power10.c b/kernel/power/sdot_power10.c new file mode 100644 index 000000000..b61f0a90d --- /dev/null +++ b/kernel/power/sdot_power10.c @@ -0,0 +1,154 @@ +/*************************************************************************** +Copyright (c) 2020, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + + +#include "common.h" + +#if defined(__VEC__) || defined(__ALTIVEC__) +#include "sdot_microk_power10.c" +#endif + + +#ifndef HAVE_KERNEL_16 + +static FLOAT sdot_kernel_16(BLASLONG n, FLOAT *x, FLOAT *y) +{ + BLASLONG register i = 0; + FLOAT dot = 0.0; + + while(i < n) + { + dot += y[i] * x[i] + + y[i+1] * x[i+1] + + y[i+2] * x[i+2] + + y[i+3] * x[i+3] + + y[i+4] * x[i+4] + + y[i+5] * x[i+5] + + y[i+6] * x[i+6] + + y[i+7] * x[i+7] ; + + i+=8 ; + + } + return dot; +} + +#endif + +#if defined (DSDOT) +double CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y) +#else +FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y) +#endif +{ + BLASLONG i=0; + BLASLONG ix=0,iy=0; + double dot = 0.0 ; + +#if defined (DSDOT) + double mydot = 0.0; + FLOAT asmdot = 0.0; +#else + FLOAT mydot=0.0; +#endif + BLASLONG n1; + + if ( n <= 0 ) return(dot); + + if ( (inc_x == 1) && (inc_y == 1) ) + { + + n1 = n & (BLASLONG)(-32); + + if ( n1 ) +#if defined(DSDOT) + { + FLOAT *x1=x; + FLOAT *y1=y; + BLASLONG n2 = 32; + while (i + +typedef __vector unsigned char vec_t; +typedef FLOAT v4sf_t __attribute__ ((vector_size (16))); +typedef FLOAT v2sf_t __attribute__ ((vector_size (8))); +#if defined(TRMMKERNEL) +#define SAVE_ACC(ACC, J) \ + __builtin_mma_disassemble_acc ((void *)result, ACC); \ + rowC = (v4sf_t *) &CO[0* ldc+J]; \ + rowC[0] = result[0] * alpha; \ + rowC = (v4sf_t *) &CO[1*ldc+J]; \ + rowC[0] = result[1] * alpha; \ + rowC = (v4sf_t *) &CO[2*ldc+J]; \ + rowC[0] = result[2] * alpha; \ + rowC = (v4sf_t *) &CO[3*ldc+J]; \ + rowC[0] = result[3] * alpha; +#define SAVE_ACC1(ACC, J) \ + __builtin_mma_disassemble_acc ((void *)result, ACC); \ + rowC = (v4sf_t *) &CO[4* ldc+J]; \ + rowC[0] = result[0] * alpha; \ + rowC = (v4sf_t *) &CO[5*ldc+J]; \ + rowC[0] = result[1] * alpha; \ + rowC = (v4sf_t *) &CO[6*ldc+J]; \ + rowC[0] = result[2] * alpha; \ + rowC = (v4sf_t *) &CO[7*ldc+J]; \ + rowC[0] = result[3] * alpha; +#define SAVE4x2_ACC(ACC, J) \ + __builtin_mma_disassemble_acc ((void *)result, ACC); \ + rowC = (v2sf_t *) &CO[0* ldc+J]; \ + rowC[0] = result[0] * alpha; \ + rowC = (v2sf_t *) &CO[1* ldc+J]; \ + rowC[0] = result[2] * alpha; \ + rowC = (v2sf_t *) &CO[2* ldc+J]; \ + rowC[0] = result[4] * alpha; \ + rowC = (v2sf_t *) &CO[3* ldc+J]; \ + rowC[0] = result[6] * alpha; +#define SAVE4x2_ACC1(ACC, J) \ + __builtin_mma_disassemble_acc ((void *)result, ACC); \ + rowC = (v2sf_t *) &CO[4* ldc+J]; \ + rowC[0] = result[0] * alpha; \ + rowC = (v2sf_t *) &CO[5* ldc+J]; \ + rowC[0] = result[2] * alpha; \ + rowC = (v2sf_t *) &CO[6* ldc+J]; \ + rowC[0] = result[4] * alpha; \ + rowC = (v2sf_t *) &CO[7* ldc+J]; \ + rowC[0] = result[6] * alpha; +#define SAVE2x4_ACC(ACC, J) \ + __builtin_mma_disassemble_acc ((void *)result, ACC); \ + rowC = (v4sf_t *) &CO[0* ldc+J]; \ + rowC[0] = result[0] * alpha; \ + rowC = (v4sf_t *) &CO[1* ldc+J]; \ + rowC[0] = result[1] * alpha; +#else +#define SAVE_ACC(ACC, J) \ + __builtin_mma_disassemble_acc ((void *)result, ACC); \ + rowC = (v4sf_t *) &CO[0* ldc+J]; \ + rowC[0] += result[0] * alpha; \ + rowC = (v4sf_t *) &CO[1*ldc+J]; \ + rowC[0] += result[1] * alpha; \ + rowC = (v4sf_t *) &CO[2*ldc+J]; \ + rowC[0] += result[2] * alpha; \ + rowC = (v4sf_t *) &CO[3*ldc+J]; \ + rowC[0] += result[3] * alpha; +#define SAVE_ACC1(ACC, J) \ + __builtin_mma_disassemble_acc ((void *)result, ACC); \ + rowC = (v4sf_t *) &CO[4* ldc+J]; \ + rowC[0] += result[0] * alpha; \ + rowC = (v4sf_t *) &CO[5*ldc+J]; \ + rowC[0] += result[1] * alpha; \ + rowC = (v4sf_t *) &CO[6*ldc+J]; \ + rowC[0] += result[2] * alpha; \ + rowC = (v4sf_t *) &CO[7*ldc+J]; \ + rowC[0] += result[3] * alpha; +#define SAVE4x2_ACC(ACC, J) \ + __builtin_mma_disassemble_acc ((void *)result, ACC); \ + rowC = (v2sf_t *) &CO[0* ldc+J]; \ + rowC[0] += result[0] * alpha; \ + rowC = (v2sf_t *) &CO[1* ldc+J]; \ + rowC[0] += result[2] * alpha; \ + rowC = (v2sf_t *) &CO[2* ldc+J]; \ + rowC[0] += result[4] * alpha; \ + rowC = (v2sf_t *) &CO[3* ldc+J]; \ + rowC[0] += result[6] * alpha; +#define SAVE4x2_ACC1(ACC, J) \ + __builtin_mma_disassemble_acc ((void *)result, ACC); \ + rowC = (v2sf_t *) &CO[4* ldc+J]; \ + rowC[0] += result[0] * alpha; \ + rowC = (v2sf_t *) &CO[5* ldc+J]; \ + rowC[0] += result[2] * alpha; \ + rowC = (v2sf_t *) &CO[6* ldc+J]; \ + rowC[0] += result[4] * alpha; \ + rowC = (v2sf_t *) &CO[7* ldc+J]; \ + rowC[0] += result[6] * alpha; +#define SAVE2x4_ACC(ACC, J) \ + __builtin_mma_disassemble_acc ((void *)result, ACC); \ + rowC = (v4sf_t *) &CO[0* ldc+J]; \ + rowC[0] += result[0] * alpha; \ + rowC = (v4sf_t *) &CO[1* ldc+J]; \ + rowC[0] += result[1] * alpha; +#endif +#define KERNEL(i, j) \ + __builtin_mma_xvf32gerpp (&acc0, rowB[i], rowA[j]); \ + __builtin_mma_xvf32gerpp (&acc1, rowB[i+1], rowA[j]); \ + __builtin_mma_xvf32gerpp (&acc2, rowB[i], rowA[j+1]); \ + __builtin_mma_xvf32gerpp (&acc3, rowB[i+1], rowA[j+1]); \ + __builtin_mma_xvf32gerpp (&acc4, rowB[i], rowA[j+2]); \ + __builtin_mma_xvf32gerpp (&acc5, rowB[i+1], rowA[j+2]); \ + __builtin_mma_xvf32gerpp (&acc6, rowB[i], rowA[j+3]); \ + __builtin_mma_xvf32gerpp (&acc7, rowB[i+1], rowA[j+3]); + +#define PREFETCH1(x, y) asm volatile ("dcbt %0, %1" : : "r" (x), "b" (y) : "memory"); + +#if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) +#define REFRESH_TEMP_BK(x, y) \ + temp = k - off; +#elif defined(LEFT) +#define REFRESH_TEMP_BK(x, y) \ + temp = off + x; +#else +#define REFRESH_TEMP_BK(x, y) \ + temp = off + y; +#endif +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) +#define REFRESH_POINTERS(x, y) \ + BO = B; \ + REFRESH_TEMP_BK(x, y) +#else +#define REFRESH_POINTERS(x, y) \ + AO += off * x; \ + BO = B + off * y; \ + REFRESH_TEMP_BK(x, y) +#endif + +#ifdef LEFT +#define REFRESH_OFF(x) \ + off += x; +#else +#define REFRESH_OFF(x) +#endif + +#ifdef LEFT +#define UPDATE_TEMP(x, y) \ + temp -= x; +#else +#define UPDATE_TEMP(x, y) \ + temp -= y; +#endif + +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) +#define REFRESH_TMP_AFTER_SAVE(x, y) \ + temp = k - off; \ + UPDATE_TEMP(x, y) \ + AO += temp * x; \ + BO += temp * y; +#else +#define REFRESH_TMP_AFTER_SAVE(x, y) +#endif + +#define REFRESH_AFTER_SAVE(x,y) \ + REFRESH_TMP_AFTER_SAVE(x, y) \ + REFRESH_OFF(x) +/************************************************************************************* +* GEMM Kernel +*************************************************************************************/ +int +CNAME (BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alpha, FLOAT * A, FLOAT * B, + FLOAT * C, BLASLONG ldc +#ifdef TRMMKERNEL + , BLASLONG offset +#endif + ) +{ + BLASLONG i1; +#if defined(TRMMKERNEL) + BLASLONG off; +#endif +#if defined(TRMMKERNEL) && !defined(LEFT) + off = -offset; +#endif + + v4sf_t valpha = { alpha, alpha, alpha, alpha }; + for (i1 = 0; i1 < (n >> 3); i1++) + { + BLASLONG j, temp; + FLOAT *CO; + FLOAT *AO; +#if defined(TRMMKERNEL) && defined(LEFT) + off = offset; +#endif + CO = C; + C += ldc << 3; + AO = A; + PREFETCH1 (A, 128); + PREFETCH1 (A, 256); + for (j = 0; j < (m >> 4); j++) + { + FLOAT *BO; +#if defined(TRMMKERNEL) + REFRESH_POINTERS (16, 8); +#else + BO = B; + temp = k; +#endif + v4sf_t *rowC; + v4sf_t result[4]; + __vector_quad acc0, acc1, acc2, acc3, acc4, acc5, acc6, acc7; + BLASLONG l = 0; + vec_t *rowA1 = (vec_t *) & AO[0]; + vec_t *rowB1 = (vec_t *) & BO[0]; + __builtin_mma_xvf32ger (&acc0, rowB1[0], rowA1[0]); + __builtin_mma_xvf32ger (&acc1, rowB1[1], rowA1[0]); + __builtin_mma_xvf32ger (&acc2, rowB1[0], rowA1[1]); + __builtin_mma_xvf32ger (&acc3, rowB1[1], rowA1[1]); + __builtin_mma_xvf32ger (&acc4, rowB1[0], rowA1[2]); + __builtin_mma_xvf32ger (&acc5, rowB1[1], rowA1[2]); + __builtin_mma_xvf32ger (&acc6, rowB1[0], rowA1[3]); + __builtin_mma_xvf32ger (&acc7, rowB1[1], rowA1[3]); + AO += 16; + BO += 8; + temp--; + BLASLONG K = temp / 64; + for (l = 0; l < K; l++) + { + vec_t *rowA = (vec_t *) & AO[0]; + vec_t *rowB = (vec_t *) & BO[0]; + KERNEL (0, 0); + KERNEL (2, 4); + KERNEL (4, 8); + KERNEL (6, 12); + KERNEL (8, 16); + KERNEL (10, 20); + KERNEL (12, 24); + KERNEL (14, 28); + KERNEL (16, 32); + KERNEL (18, 36); + KERNEL (20, 40); + KERNEL (22, 44); + KERNEL (24, 48); + KERNEL (26, 52); + KERNEL (28, 56); + KERNEL (30, 60); + KERNEL (32, 64); + KERNEL (34, 68); + KERNEL (36, 72); + KERNEL (38, 76); + KERNEL (40, 80); + KERNEL (42, 84); + KERNEL (44, 88); + KERNEL (46, 92); + KERNEL (48, 96); + KERNEL (50, 100); + KERNEL (52, 104); + KERNEL (54, 108); + KERNEL (56, 112); + KERNEL (58, 116); + KERNEL (60, 120); + KERNEL (62, 124); + KERNEL (64, 128); + KERNEL (66, 132); + KERNEL (68, 136); + KERNEL (70, 140); + KERNEL (72, 144); + KERNEL (74, 148); + KERNEL (76, 152); + KERNEL (78, 156); + KERNEL (80, 160); + KERNEL (82, 164); + KERNEL (84, 168); + KERNEL (86, 172); + KERNEL (88, 176); + KERNEL (90, 180); + KERNEL (92, 184); + KERNEL (94, 188); + KERNEL (96, 192); + KERNEL (98, 196); + KERNEL (100, 200); + KERNEL (102, 204); + KERNEL (104, 208); + KERNEL (106, 212); + KERNEL (108, 216); + KERNEL (110, 220); + KERNEL (112, 224); + KERNEL (114, 228); + KERNEL (116, 232); + KERNEL (118, 236); + KERNEL (120, 240); + KERNEL (122, 244); + KERNEL (124, 248); + KERNEL (126, 252); + AO += 1024; + BO += 512; + } + if ((temp & 63) >> 5) + { + vec_t *rowA = (vec_t *) & AO[0]; + vec_t *rowB = (vec_t *) & BO[0]; + KERNEL (0, 0); + KERNEL (2, 4); + KERNEL (4, 8); + KERNEL (6, 12); + KERNEL (8, 16); + KERNEL (10, 20); + KERNEL (12, 24); + KERNEL (14, 28); + KERNEL (16, 32); + KERNEL (18, 36); + KERNEL (20, 40); + KERNEL (22, 44); + KERNEL (24, 48); + KERNEL (26, 52); + KERNEL (28, 56); + KERNEL (30, 60); + KERNEL (32, 64); + KERNEL (34, 68); + KERNEL (36, 72); + KERNEL (38, 76); + KERNEL (40, 80); + KERNEL (42, 84); + KERNEL (44, 88); + KERNEL (46, 92); + KERNEL (48, 96); + KERNEL (50, 100); + KERNEL (52, 104); + KERNEL (54, 108); + KERNEL (56, 112); + KERNEL (58, 116); + KERNEL (60, 120); + KERNEL (62, 124); + AO += 512; + BO += 256; + } + if ((temp & 31) >> 4) + { + vec_t *rowA = (vec_t *) & AO[0]; + vec_t *rowB = (vec_t *) & BO[0]; + KERNEL (0, 0); + KERNEL (2, 4); + KERNEL (4, 8); + KERNEL (6, 12); + KERNEL (8, 16); + KERNEL (10, 20); + KERNEL (12, 24); + KERNEL (14, 28); + KERNEL (16, 32); + KERNEL (18, 36); + KERNEL (20, 40); + KERNEL (22, 44); + KERNEL (24, 48); + KERNEL (26, 52); + KERNEL (28, 56); + KERNEL (30, 60); + AO += 256; + BO += 128; + } + if ((temp & 15) >> 3) + { + vec_t *rowA = (vec_t *) & AO[0]; + vec_t *rowB = (vec_t *) & BO[0]; + KERNEL (0, 0); + KERNEL (2, 4); + KERNEL (4, 8); + KERNEL (6, 12); + KERNEL (8, 16); + KERNEL (10, 20); + KERNEL (12, 24); + KERNEL (14, 28); + AO += 128; + BO += 64; + } + if ((temp & 7) >> 2) + { + vec_t *rowA = (vec_t *) & AO[0]; + vec_t *rowB = (vec_t *) & BO[0]; + KERNEL (0, 0); + KERNEL (2, 4); + KERNEL (4, 8); + KERNEL (6, 12); + AO += 64; + BO += 32; + } + if ((temp & 3) >> 1) + { + vec_t *rowA = (vec_t *) & AO[0]; + vec_t *rowB = (vec_t *) & BO[0]; + KERNEL (0, 0); + KERNEL (2, 4); + AO += 32; + BO += 16; + } + if ((temp & 1) >> 0) + { + vec_t *rowA = (vec_t *) & AO[0]; + vec_t *rowB = (vec_t *) & BO[0]; + KERNEL (0, 0); + AO += 16; + BO += 8; + } + SAVE_ACC (&acc0, 0); + SAVE_ACC (&acc2, 4); + SAVE_ACC1 (&acc1, 0); + SAVE_ACC1 (&acc3, 4); + SAVE_ACC (&acc4, 8); + SAVE_ACC (&acc6, 12); + SAVE_ACC1 (&acc5, 8); + SAVE_ACC1 (&acc7, 12); +#if defined(TRMMKERNEL) + REFRESH_AFTER_SAVE (16, 8) +#endif + CO += 16; + } + if (m & 8) + { + FLOAT *BO; +#if defined(TRMMKERNEL) + REFRESH_POINTERS (8, 8); +#else + BO = B; + temp = k; +#endif + v4sf_t *rowC; + v4sf_t result[4]; + __vector_quad acc0, acc1, acc2, acc3; + BLASLONG l = 0; + vec_t *rowA = (vec_t *) & AO[0]; + vec_t *rowB = (vec_t *) & BO[0]; + __builtin_mma_xvf32ger (&acc0, rowB[0], rowA[0]); + __builtin_mma_xvf32ger (&acc1, rowB[1], rowA[0]); + __builtin_mma_xvf32ger (&acc2, rowB[0], rowA[1]); + __builtin_mma_xvf32ger (&acc3, rowB[1], rowA[1]); + for (l = 1; l < temp; l++) + { + rowA = (vec_t *) & AO[l << 3]; + rowB = (vec_t *) & BO[l << 3]; + __builtin_mma_xvf32gerpp (&acc0, rowB[0], rowA[0]); + __builtin_mma_xvf32gerpp (&acc1, rowB[1], rowA[0]); + __builtin_mma_xvf32gerpp (&acc2, rowB[0], rowA[1]); + __builtin_mma_xvf32gerpp (&acc3, rowB[1], rowA[1]); + } + SAVE_ACC (&acc0, 0); + SAVE_ACC (&acc2, 4); + SAVE_ACC1 (&acc1, 0); + SAVE_ACC1 (&acc3, 4); + AO += (temp << 3); + BO += (temp << 3); + CO += 8; +#if defined(TRMMKERNEL) + REFRESH_AFTER_SAVE (8, 8) +#endif + } + if (m & 4) + { + FLOAT *BO; +#if defined(TRMMKERNEL) + REFRESH_POINTERS (4, 8); +#else + BO = B; + temp = k; +#endif + v4sf_t *rowC; + v4sf_t result[4]; + __vector_quad acc0, acc1; + BLASLONG l = 0; + vec_t *rowA = (vec_t *) & AO[0]; + vec_t *rowB = (vec_t *) & BO[0]; + __builtin_mma_xvf32ger (&acc0, rowB[0], rowA[0]); + __builtin_mma_xvf32ger (&acc1, rowB[1], rowA[0]); + for (l = 1; l < temp; l++) + { + rowA = (vec_t *) & AO[l << 2]; + rowB = (vec_t *) & BO[l << 3]; + __builtin_mma_xvf32gerpp (&acc0, rowB[0], rowA[0]); + __builtin_mma_xvf32gerpp (&acc1, rowB[1], rowA[0]); + } + SAVE_ACC (&acc0, 0); + SAVE_ACC1 (&acc1, 0); + CO += 4; + AO += (temp << 2); + BO += (temp << 3); +#if defined(TRMMKERNEL) + REFRESH_AFTER_SAVE (4, 8) +#endif + } + if (m & 2) + { + FLOAT *BO; +#if defined(TRMMKERNEL) + REFRESH_POINTERS (2, 8); +#else + BO = B; + temp = k; +#endif + + v2sf_t *rowC; + v2sf_t result[8]; + __vector_quad acc0, acc1; + BLASLONG l = 0; + FLOAT t[4] = { 0 }; + t[0] = AO[0], t[1] = AO[1]; + vec_t *rowA = (vec_t *) & t[0]; + vec_t *rowB = (vec_t *) & BO[0]; + __builtin_mma_xvf32ger (&acc0, rowB[0], rowA[0]); + __builtin_mma_xvf32ger (&acc1, rowB[1], rowA[0]); + for (l = 1; l < temp; l++) + { + t[0] = AO[l << 1], t[1] = AO[(l << 1) + 1]; + rowA = (vec_t *) & t[0]; + rowB = (vec_t *) & BO[l << 3]; + __builtin_mma_xvf32gerpp (&acc0, rowB[0], rowA[0]); + __builtin_mma_xvf32gerpp (&acc1, rowB[1], rowA[0]); + } + SAVE4x2_ACC (&acc0, 0); + SAVE4x2_ACC1 (&acc1, 0); + CO += 2; + AO += (temp << 1); + BO += (temp << 3); +#if defined(TRMMKERNEL) + REFRESH_AFTER_SAVE (2, 8) +#endif + } + if (m & 1) + { + FLOAT *BO; +#if defined(TRMMKERNEL) + REFRESH_POINTERS (1, 8); +#else + BO = B; + temp = k; +#endif + BLASLONG l = 0; + v4sf_t t = { 0, 0, 0, 0 }; + v4sf_t t1 = { 0, 0, 0, 0 }; + for (l = 0; l < temp; l++) + { + v4sf_t rowA = { AO[l], AO[l], AO[l], AO[l] }; + v4sf_t rowB = { BO[l << 3], BO[(l << 3) + 1], BO[(l << 3) + 2], + BO[(l << 3) + 3] + }; + v4sf_t rowB1 = + { BO[(l << 3) + 4], BO[(l << 3) + 5], BO[(l << 3) + 6], + BO[(l << 3) + 7] + }; + t += rowA * rowB; + t1 += rowA * rowB1; + } + t = t * valpha; + t1 = t1 * valpha; +#if defined(TRMMKERNEL) + CO[0 * ldc] = t[0]; + CO[1 * ldc] = t[1]; + CO[2 * ldc] = t[2]; + CO[3 * ldc] = t[3]; + CO[4 * ldc] = t1[0]; + CO[5 * ldc] = t1[1]; + CO[6 * ldc] = t1[2]; + CO[7 * ldc] = t1[3]; +#else + CO[0 * ldc] += t[0]; + CO[1 * ldc] += t[1]; + CO[2 * ldc] += t[2]; + CO[3 * ldc] += t[3]; + CO[4 * ldc] += t1[0]; + CO[5 * ldc] += t1[1]; + CO[6 * ldc] += t1[2]; + CO[7 * ldc] += t1[3]; +#endif + CO += 1; + AO += temp; + BO += (temp << 3); +#if defined(TRMMKERNEL) + REFRESH_AFTER_SAVE (1, 8) +#endif + } +#if defined(TRMMKERNEL) && !defined(LEFT) + off += 8; // number of values in A +#endif + + B += k << 3; + } + if (n & 4) + { + BLASLONG i, j, temp; +#if defined(TRMMKERNEL) && defined(LEFT) + off = offset; +#endif + FLOAT *CO; + FLOAT *AO; + CO = C; + C += ldc << 2; + AO = A; +#if !defined(TRMMKERNEL) + i = m >> 5; + for (j = 0; j < i; j++) + { + FLOAT *BO = B; + v4sf_t *rowC; + v4sf_t result[4]; + FLOAT *A1; + A1 = AO + (16 * k); + __vector_quad acc0, acc1, acc2, acc3, acc4, acc5, acc6, acc7; + BLASLONG l = 0; + vec_t *rowA = (vec_t *) & AO[0]; + vec_t *rowA1 = (vec_t *) & A1[0]; + vec_t *rowB = (vec_t *) & BO[0]; + __builtin_mma_xvf32ger (&acc0, rowB[0], rowA[0]); + __builtin_mma_xvf32ger (&acc1, rowB[0], rowA[1]); + __builtin_mma_xvf32ger (&acc2, rowB[0], rowA[2]); + __builtin_mma_xvf32ger (&acc3, rowB[0], rowA[3]); + __builtin_mma_xvf32ger (&acc4, rowB[0], rowA1[0]); + __builtin_mma_xvf32ger (&acc5, rowB[0], rowA1[1]); + __builtin_mma_xvf32ger (&acc6, rowB[0], rowA1[2]); + __builtin_mma_xvf32ger (&acc7, rowB[0], rowA1[3]); + for (l = 1; l < k; l++) + { + rowA = (vec_t *) & AO[l << 4]; + rowA1 = (vec_t *) & A1[l << 4]; + rowB = (vec_t *) & BO[l << 2]; + __builtin_mma_xvf32gerpp (&acc0, rowB[0], rowA[0]); + __builtin_mma_xvf32gerpp (&acc1, rowB[0], rowA[1]); + __builtin_mma_xvf32gerpp (&acc2, rowB[0], rowA[2]); + __builtin_mma_xvf32gerpp (&acc3, rowB[0], rowA[3]); + __builtin_mma_xvf32gerpp (&acc4, rowB[0], rowA1[0]); + __builtin_mma_xvf32gerpp (&acc5, rowB[0], rowA1[1]); + __builtin_mma_xvf32gerpp (&acc6, rowB[0], rowA1[2]); + __builtin_mma_xvf32gerpp (&acc7, rowB[0], rowA1[3]); + } + + SAVE_ACC (&acc0, 0); + SAVE_ACC (&acc1, 4); + CO += 8; + SAVE_ACC (&acc2, 0); + SAVE_ACC (&acc3, 4); + CO += 8; + SAVE_ACC (&acc4, 0); + SAVE_ACC (&acc5, 4); + CO += 8; + SAVE_ACC (&acc6, 0); + SAVE_ACC (&acc7, 4); + CO += 8; + AO += k << 5; + BO += k << 2; + } + i = (m & 31) >> 4; +#else + i = m >> 4; +#endif + for (j = 0; j < i; j++) + { + FLOAT *BO; +#if defined(TRMMKERNEL) + REFRESH_POINTERS (16, 4); +#else + BO = B; + temp = k; +#endif + v4sf_t *rowC; + v4sf_t result[4]; + __vector_quad acc0, acc1, acc2, acc3; + BLASLONG l = 0; + vec_t *rowA = (vec_t *) & AO[0]; + vec_t *rowB = (vec_t *) & BO[0]; + __builtin_mma_xvf32ger (&acc0, rowB[0], rowA[0]); + __builtin_mma_xvf32ger (&acc1, rowB[0], rowA[1]); + __builtin_mma_xvf32ger (&acc2, rowB[0], rowA[2]); + __builtin_mma_xvf32ger (&acc3, rowB[0], rowA[3]); + for (l = 1; l < temp; l++) + { + rowA = (vec_t *) & AO[l << 4]; + rowB = (vec_t *) & BO[l << 2]; + __builtin_mma_xvf32gerpp (&acc0, rowB[0], rowA[0]); + __builtin_mma_xvf32gerpp (&acc1, rowB[0], rowA[1]); + __builtin_mma_xvf32gerpp (&acc2, rowB[0], rowA[2]); + __builtin_mma_xvf32gerpp (&acc3, rowB[0], rowA[3]); + } + + SAVE_ACC (&acc0, 0); + SAVE_ACC (&acc1, 4); + CO += 8; + SAVE_ACC (&acc2, 0); + SAVE_ACC (&acc3, 4); + CO += 8; + AO += temp << 4; + BO += temp << 2; +#if defined(TRMMKERNEL) + REFRESH_AFTER_SAVE (16, 4) +#endif + } + if (m & 8) + { + FLOAT *BO; +#if defined(TRMMKERNEL) + REFRESH_POINTERS (8, 4); +#else + BO = B; + temp = k; +#endif + v4sf_t *rowC; + v4sf_t result[4]; + __vector_quad acc0, acc1; + BLASLONG l = 0; + vec_t *rowA = (vec_t *) & AO[0]; + vec_t *rowB = (vec_t *) & BO[0]; + __builtin_mma_xvf32ger (&acc0, rowB[0], rowA[0]); + __builtin_mma_xvf32ger (&acc1, rowB[0], rowA[1]); + for (l = 1; l < temp; l++) + { + rowA = (vec_t *) & AO[l << 3]; + rowB = (vec_t *) & BO[l << 2]; + __builtin_mma_xvf32gerpp (&acc0, rowB[0], rowA[0]); + __builtin_mma_xvf32gerpp (&acc1, rowB[0], rowA[1]); + } + SAVE_ACC (&acc0, 0); + SAVE_ACC (&acc1, 4); + CO += 8; + AO += temp << 3; + BO += temp << 2; +#if defined(TRMMKERNEL) + REFRESH_AFTER_SAVE (8, 4) +#endif + } + if (m & 4) + { + FLOAT *BO; +#if defined(TRMMKERNEL) + REFRESH_POINTERS (4, 4); +#else + BO = B; + temp = k; +#endif + v4sf_t *rowC; + __vector_quad acc0; + v4sf_t result[4]; + BLASLONG l = 0; + vec_t *rowA = (vec_t *) & AO[0]; + vec_t *rowB = (vec_t *) & BO[0]; + __builtin_mma_xvf32ger (&acc0, rowB[0], rowA[0]); + for (l = 1; l < temp; l++) + { + rowA = (vec_t *) & AO[l << 2]; + rowB = (vec_t *) & BO[l << 2]; + __builtin_mma_xvf32gerpp (&acc0, rowB[0], rowA[0]); + } + SAVE_ACC (&acc0, 0); + CO += 4; + AO += temp << 2; + BO += temp << 2; +#if defined(TRMMKERNEL) + REFRESH_AFTER_SAVE (4, 4) +#endif + } + if (m & 2) + { + FLOAT *BO; +#if defined(TRMMKERNEL) + REFRESH_POINTERS (2, 4); +#else + BO = B; + temp = k; +#endif + v2sf_t *rowC; + v2sf_t result[8]; + __vector_quad acc0; + BLASLONG l = 0; + FLOAT t[4] = { 0 }; + t[0] = AO[0], t[1] = AO[1]; + vec_t *rowA = (vec_t *) & t[0]; + vec_t *rowB = (vec_t *) & BO[0]; + __builtin_mma_xvf32ger (&acc0, rowB[0], rowA[0]); + for (l = 1; l < temp; l++) + { + t[0] = AO[l << 1], t[1] = AO[(l << 1) + 1]; + rowA = (vec_t *) & t[0]; + rowB = (vec_t *) & BO[l << 2]; + __builtin_mma_xvf32gerpp (&acc0, rowB[0], rowA[0]); + } + SAVE4x2_ACC (&acc0, 0); + CO += 2; + AO += temp << 1; + BO += temp << 2; +#if defined(TRMMKERNEL) + REFRESH_AFTER_SAVE (2, 4) +#endif + } + if (m & 1) + { + FLOAT *BO; +#if defined(TRMMKERNEL) + REFRESH_POINTERS (1, 4) +#else + BO = B; + temp = k; +#endif + BLASLONG l = 0; + v4sf_t t = { 0, 0, 0, 0 }; + for (l = 0; l < temp; l++) + { + v4sf_t rowA = { AO[l], AO[l], AO[l], AO[l] }; + v4sf_t rowB = { BO[l << 2], BO[(l << 2) + 1], BO[(l << 2) + 2], + BO[(l << 2) + 3] + }; + t += rowA * rowB; + } + t = t * valpha; +#if defined(TRMMKERNEL) + CO[0 * ldc] = t[0]; + CO[1 * ldc] = t[1]; + CO[2 * ldc] = t[2]; + CO[3 * ldc] = t[3]; +#else + CO[0 * ldc] += t[0]; + CO[1 * ldc] += t[1]; + CO[2 * ldc] += t[2]; + CO[3 * ldc] += t[3]; +#endif + CO += 1; + AO += temp; + BO += temp << 2; +#if defined(TRMMKERNEL) + REFRESH_AFTER_SAVE (1, 4) +#endif + } +#if defined(TRMMKERNEL) && !defined(LEFT) + off += 4; // number of values in A +#endif + + B += k << 2; + } + if (n & 2) + { + BLASLONG i, j, temp; +#if defined(TRMMKERNEL) && defined(LEFT) + off = offset; +#endif + FLOAT *CO; + FLOAT *AO; + CO = C; + C += ldc << 1; + AO = A; +#if !defined(TRMMKERNEL) + i = m >> 5; + for (j = 0; j < i; j++) + { + FLOAT *BO = B; + v4sf_t *rowC; + v4sf_t result[4]; + FLOAT *A1; + A1 = AO + (16 * k); + __vector_quad acc0, acc1, acc2, acc3, acc4, acc5, acc6, acc7; + BLASLONG l = 0; + FLOAT t[4] = { 0 }; + t[0] = BO[0], t[1] = BO[1]; + vec_t *rowB = (vec_t *) & t[0]; + vec_t *rowA = (vec_t *) & AO[0]; + vec_t *rowA1 = (vec_t *) & A1[0]; + __builtin_mma_xvf32ger (&acc0, rowB[0], rowA[0]); + __builtin_mma_xvf32ger (&acc1, rowB[0], rowA[1]); + __builtin_mma_xvf32ger (&acc2, rowB[0], rowA[2]); + __builtin_mma_xvf32ger (&acc3, rowB[0], rowA[3]); + __builtin_mma_xvf32ger (&acc4, rowB[0], rowA1[0]); + __builtin_mma_xvf32ger (&acc5, rowB[0], rowA1[1]); + __builtin_mma_xvf32ger (&acc6, rowB[0], rowA1[2]); + __builtin_mma_xvf32ger (&acc7, rowB[0], rowA1[3]); + for (l = 1; l < k; l++) + { + t[0] = BO[l << 1], t[1] = BO[(l << 1) + 1]; + rowB = (vec_t *) & t[0]; + rowA = (vec_t *) & AO[l << 4]; + rowA1 = (vec_t *) & A1[l << 4]; + __builtin_mma_xvf32gerpp (&acc0, rowB[0], rowA[0]); + __builtin_mma_xvf32gerpp (&acc1, rowB[0], rowA[1]); + __builtin_mma_xvf32gerpp (&acc2, rowB[0], rowA[2]); + __builtin_mma_xvf32gerpp (&acc3, rowB[0], rowA[3]); + __builtin_mma_xvf32gerpp (&acc4, rowB[0], rowA1[0]); + __builtin_mma_xvf32gerpp (&acc5, rowB[0], rowA1[1]); + __builtin_mma_xvf32gerpp (&acc6, rowB[0], rowA1[2]); + __builtin_mma_xvf32gerpp (&acc7, rowB[0], rowA1[3]); + } + SAVE2x4_ACC (&acc0, 0); + SAVE2x4_ACC (&acc1, 4); + SAVE2x4_ACC (&acc2, 8); + SAVE2x4_ACC (&acc3, 12); + CO += 16; + SAVE2x4_ACC (&acc4, 0); + SAVE2x4_ACC (&acc5, 4); + SAVE2x4_ACC (&acc6, 8); + SAVE2x4_ACC (&acc7, 12); + CO += 16; + AO += k << 5; + BO += k << 1; + } + i = (m & 31) >> 4; +#else + i = m >> 4; +#endif + for (j = 0; j < i; j++) + { + FLOAT *BO; + v4sf_t *rowC; + v4sf_t result[4]; + __vector_quad acc0, acc1, acc2, acc3; + BLASLONG l = 0; +#if defined(TRMMKERNEL) + REFRESH_POINTERS (16, 2) +#else + BO = B; + temp = k; +#endif + FLOAT t[4] = { 0 }; + t[0] = BO[0], t[1] = BO[1]; + vec_t *rowB = (vec_t *) & t[0]; + vec_t *rowA = (vec_t *) & AO[0]; + __builtin_mma_xvf32ger (&acc0, rowB[0], rowA[0]); + __builtin_mma_xvf32ger (&acc1, rowB[0], rowA[1]); + __builtin_mma_xvf32ger (&acc2, rowB[0], rowA[2]); + __builtin_mma_xvf32ger (&acc3, rowB[0], rowA[3]); + for (l = 1; l < temp; l++) + { + t[0] = BO[l << 1], t[1] = BO[(l << 1) + 1]; + rowB = (vec_t *) & t[0]; + rowA = (vec_t *) & AO[l << 4]; + __builtin_mma_xvf32gerpp (&acc0, rowB[0], rowA[0]); + __builtin_mma_xvf32gerpp (&acc1, rowB[0], rowA[1]); + __builtin_mma_xvf32gerpp (&acc2, rowB[0], rowA[2]); + __builtin_mma_xvf32gerpp (&acc3, rowB[0], rowA[3]); + } + SAVE2x4_ACC (&acc0, 0); + SAVE2x4_ACC (&acc1, 4); + SAVE2x4_ACC (&acc2, 8); + SAVE2x4_ACC (&acc3, 12); + CO += 16; + AO += temp << 4; + BO += temp << 1; +#if defined(TRMMKERNEL) + REFRESH_AFTER_SAVE (16, 2) +#endif + } + if (m & 8) + { + FLOAT *BO; + v4sf_t *rowC; + v4sf_t result[4]; + __vector_quad acc0, acc1; +#if defined(TRMMKERNEL) + REFRESH_POINTERS (8, 2) +#else + BO = B; + temp = k; +#endif + BLASLONG l = 0; + FLOAT t[4] = { 0 }; + t[0] = BO[0], t[1] = BO[1]; + vec_t *rowB = (vec_t *) & t[0]; + vec_t *rowA = (vec_t *) & AO[0]; + __builtin_mma_xvf32ger (&acc0, rowB[0], rowA[0]); + __builtin_mma_xvf32ger (&acc1, rowB[0], rowA[1]); + for (l = 1; l < temp; l++) + { + t[0] = BO[l << 1], t[1] = BO[(l << 1) + 1]; + rowB = (vec_t *) & t[0]; + rowA = (vec_t *) & AO[l << 3]; + __builtin_mma_xvf32gerpp (&acc0, rowB[0], rowA[0]); + __builtin_mma_xvf32gerpp (&acc1, rowB[0], rowA[1]); + } + SAVE2x4_ACC (&acc0, 0); + SAVE2x4_ACC (&acc1, 4); + CO += 8; + AO += temp << 3; + BO += temp << 1; +#if defined(TRMMKERNEL) + REFRESH_AFTER_SAVE (8, 2) +#endif + } + if (m & 4) + { + FLOAT *BO; + v4sf_t *rowC; + v4sf_t result[4]; + __vector_quad acc0; +#if defined(TRMMKERNEL) + REFRESH_POINTERS (4, 2) +#else + BO = B; + temp = k; +#endif + BLASLONG l = 0; + FLOAT t[4] = { 0 }; + t[0] = BO[0], t[1] = BO[1]; + vec_t *rowB = (vec_t *) & t[0]; + vec_t *rowA = (vec_t *) & AO[0]; + __builtin_mma_xvf32ger (&acc0, rowB[0], rowA[0]); + for (l = 1; l < temp; l++) + { + t[0] = BO[l << 1], t[1] = BO[(l << 1) + 1]; + rowB = (vec_t *) & t[0]; + rowA = (vec_t *) & AO[l << 2]; + __builtin_mma_xvf32gerpp (&acc0, rowB[0], rowA[0]); + } + SAVE2x4_ACC (&acc0, 0); + CO += 4; + AO += temp << 2; + BO += temp << 1; +#if defined(TRMMKERNEL) + REFRESH_AFTER_SAVE (4, 2) +#endif + } + if (m & 2) + { + FLOAT *BO; + BLASLONG l = 0; +#if defined(TRMMKERNEL) + REFRESH_POINTERS (2, 2) +#else + BO = B; + temp = k; +#endif + v4sf_t t = { 0, 0, 0, 0 }; + for (l = 0; l < (temp << 1); l += 2) + { + v4sf_t rowA = { AO[l], AO[l], AO[l + 1], AO[l + 1] }; + v4sf_t rowB = { BO[l], BO[l + 1], BO[l], BO[l + 1] }; + t += rowA * rowB; + } + t = t * valpha; +#if defined(TRMMKERNEL) + CO[0 * ldc] = t[0]; + CO[1 * ldc] = t[1]; + CO[0 * ldc + 1] = t[2]; + CO[1 * ldc + 1] = t[3]; +#else + CO[0 * ldc] += t[0]; + CO[1 * ldc] += t[1]; + CO[0 * ldc + 1] += t[2]; + CO[1 * ldc + 1] += t[3]; +#endif + CO += 2; + AO += temp << 1; + BO += temp << 1; +#if defined(TRMMKERNEL) + REFRESH_AFTER_SAVE (2, 2) +#endif + } + if (m & 1) + { + FLOAT *BO; + BLASLONG l = 0; +#if defined(TRMMKERNEL) + REFRESH_POINTERS (1, 2) +#else + BO = B; + temp = k; +#endif + v4sf_t t = { 0, 0, 0, 0 }; + for (l = 0; l < temp; l++) + { + v4sf_t rowA = { AO[l], AO[l], 0, 0 }; + v4sf_t rowB = { BO[l << 1], BO[(l << 1) + 1], 0, 0 }; + t += rowA * rowB; + } + t = t * valpha; +#if defined(TRMMKERNEL) + CO[0 * ldc] = t[0]; + CO[1 * ldc] = t[1]; +#else + CO[0 * ldc] += t[0]; + CO[1 * ldc] += t[1]; +#endif + CO += 1; + AO += temp; + BO += temp << 1; +#if defined(TRMMKERNEL) + REFRESH_AFTER_SAVE (1, 2) +#endif + } +#if defined(TRMMKERNEL) && !defined(LEFT) + off += 2; // number of values in A +#endif + + B += k << 1; + } + if (n & 1) + { + BLASLONG i, temp; +#if defined(TRMMKERNEL) && defined(LEFT) + off = offset; +#endif + FLOAT *CO; + FLOAT *AO; + CO = C; + C += ldc; + AO = A; + for (i = 0; i < (m >> 4); i++) + { + FLOAT *BO; + BLASLONG l = 0; +#if defined(TRMMKERNEL) + REFRESH_POINTERS (16, 1) +#else + BO = B; + temp = k; +#endif + + v4sf_t t = { 0, 0, 0, 0 }; + v4sf_t t1 = { 0, 0, 0, 0 }; + v4sf_t t2 = { 0, 0, 0, 0 }; + v4sf_t t3 = { 0, 0, 0, 0 }; + for (l = 0; l < temp; l++) + { + v4sf_t rowB = { BO[l], BO[l], BO[l], BO[l] }; + v4sf_t rowA = { AO[l << 4], AO[(l << 4) + 1], AO[(l << 4) + 2], + AO[(l << 4) + 3] + }; + v4sf_t rowA1 = + { AO[(l << 4) + 4], AO[(l << 4) + 5], AO[(l << 4) + 6], + AO[(l << 4) + 7] + }; + v4sf_t rowA2 = + { AO[(l << 4) + 8], AO[(l << 4) + 9], AO[(l << 4) + 10], + AO[(l << 4) + 11] + }; + v4sf_t rowA3 = + { AO[(l << 4) + 12], AO[(l << 4) + 13], AO[(l << 4) + 14], + AO[(l << 4) + 15] + }; + t += rowA * rowB; + t1 += rowA1 * rowB; + t2 += rowA2 * rowB; + t3 += rowA3 * rowB; + } + t = t * valpha; + t1 = t1 * valpha; + t2 = t2 * valpha; + t3 = t3 * valpha; +#if defined(TRMMKERNEL) + CO[0] = t[0]; + CO[1] = t[1]; + CO[2] = t[2]; + CO[3] = t[3]; + CO[4] = t1[0]; + CO[5] = t1[1]; + CO[6] = t1[2]; + CO[7] = t1[3]; + CO[8] = t2[0]; + CO[9] = t2[1]; + CO[10] = t2[2]; + CO[11] = t2[3]; + CO[12] = t3[0]; + CO[13] = t3[1]; + CO[14] = t3[2]; + CO[15] = t3[3]; +#else + CO[0] += t[0]; + CO[1] += t[1]; + CO[2] += t[2]; + CO[3] += t[3]; + CO[4] += t1[0]; + CO[5] += t1[1]; + CO[6] += t1[2]; + CO[7] += t1[3]; + CO[8] += t2[0]; + CO[9] += t2[1]; + CO[10] += t2[2]; + CO[11] += t2[3]; + CO[12] += t3[0]; + CO[13] += t3[1]; + CO[14] += t3[2]; + CO[15] += t3[3]; +#endif + AO += temp << 4; + BO += temp; + CO += 16; +#if defined(TRMMKERNEL) + REFRESH_AFTER_SAVE (16, 1) +#endif + } + if (m & 8) + { + FLOAT *BO; + BLASLONG l = 0; + v4sf_t t = { 0, 0, 0, 0 }; + v4sf_t t1 = { 0, 0, 0, 0 }; +#if defined(TRMMKERNEL) + REFRESH_POINTERS (8, 1) +#else + BO = B; + temp = k; +#endif + + for (l = 0; l < temp; l++) + { + v4sf_t rowB = { BO[l], BO[l], BO[l], BO[l] }; + v4sf_t rowA = { AO[l << 3], AO[(l << 3) + 1], AO[(l << 3) + 2], + AO[(l << 3) + 3] + }; + v4sf_t rowA1 = + { AO[(l << 3) + 4], AO[(l << 3) + 5], AO[(l << 3) + 6], + AO[(l << 3) + 7] + }; + t += rowA * rowB; + t1 += rowA1 * rowB; + } + t = t * valpha; + t1 = t1 * valpha; +#if defined(TRMMKERNEL) + CO[0] = t[0]; + CO[1] = t[1]; + CO[2] = t[2]; + CO[3] = t[3]; + CO[4] = t1[0]; + CO[5] = t1[1]; + CO[6] = t1[2]; + CO[7] = t1[3]; +#else + CO[0] += t[0]; + CO[1] += t[1]; + CO[2] += t[2]; + CO[3] += t[3]; + CO[4] += t1[0]; + CO[5] += t1[1]; + CO[6] += t1[2]; + CO[7] += t1[3]; +#endif + AO += temp << 3; + BO += temp; + CO += 8; +#if defined(TRMMKERNEL) + REFRESH_AFTER_SAVE (8, 1) +#endif + } + if (m & 4) + { + FLOAT *BO; + BLASLONG l = 0; + v4sf_t t = { 0, 0, 0, 0 }; +#if defined(TRMMKERNEL) + REFRESH_POINTERS (4, 1) +#else + BO = B; + temp = k; +#endif + + for (l = 0; l < temp; l++) + { + v4sf_t rowB = { BO[l], BO[l], BO[l], BO[l] }; + v4sf_t rowA = { AO[l << 2], AO[(l << 2) + 1], AO[(l << 2) + 2], + AO[(l << 2) + 3] + }; + t += rowA * rowB; + } + t = t * valpha; +#if defined(TRMMKERNEL) + CO[0] = t[0]; + CO[1] = t[1]; + CO[2] = t[2]; + CO[3] = t[3]; +#else + CO[0] += t[0]; + CO[1] += t[1]; + CO[2] += t[2]; + CO[3] += t[3]; +#endif + AO += temp << 2; + BO += temp; + CO += 4; +#if defined(TRMMKERNEL) + REFRESH_AFTER_SAVE (4, 1) +#endif + } + if (m & 2) + { + FLOAT *BO; + BLASLONG l = 0; +#if defined(TRMMKERNEL) + REFRESH_POINTERS (2, 1) +#else + BO = B; + temp = k; +#endif + + v4sf_t t = { 0, 0, 0, 0 }; + for (l = 0; l < temp; l++) + { + v4sf_t rowB = { BO[l], BO[l], 0, 0 }; + v4sf_t rowA = { AO[l << 1], AO[(l << 1) + 1], 0, 0 }; + t += rowA * rowB; + } + t = t * valpha; +#if defined(TRMMKERNEL) + CO[0] = t[0]; + CO[1] = t[1]; +#else + CO[0] += t[0]; + CO[1] += t[1]; +#endif + AO += temp << 1; + BO += temp; + CO += 2; +#if defined(TRMMKERNEL) + REFRESH_AFTER_SAVE (2, 1) +#endif + } + if (m & 1) + { + FLOAT *BO; +#if defined(TRMMKERNEL) + REFRESH_POINTERS (1, 1) +#else + BO = B; + temp = k; +#endif + + BLASLONG l = 0; + FLOAT t = 0; + for (l = 0; l < temp; l++) + { + t += AO[l] * BO[l]; + } + AO += temp; + BO += temp; +#if defined(TRMMKERNEL) + CO[0] = t * alpha; +#else + CO[0] += t * alpha; +#endif + CO += 1; +#if defined(TRMMKERNEL) + REFRESH_AFTER_SAVE (1, 1) +#endif + } + +#if defined(TRMMKERNEL) && !defined(LEFT) + off += 1; // number of values in A +#endif + B += k; + } + return 0; +} diff --git a/kernel/power/sgemm_kernel_power9.S b/kernel/power/sgemm_kernel_power9.S index a44659468..7a0f3143e 100644 --- a/kernel/power/sgemm_kernel_power9.S +++ b/kernel/power/sgemm_kernel_power9.S @@ -32,7 +32,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define LOAD ld #define STACKSIZE (512 ) - +#define FLINK_SAVE (STACKSIZE+16) /* 16($r12) */ #define M r3 #define N r4 #define K r5 @@ -91,7 +91,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. PROFCODE addi SP, SP, -STACKSIZE - li r0, 0 + mflr r0 + stfd f14, 0(SP) stfd f15, 8(SP) @@ -137,19 +138,19 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. std r14, 280(SP) - stxv v20, 288(SP) - stxv v21, 304(SP) - stxv v22, 320(SP) - stxv v23, 336(SP) - stxv v24, 352(SP) - stxv v25, 368(SP) - stxv v26, 384(SP) - stxv v27, 400(SP) - stxv v28, 416(SP) - stxv v29, 432(SP) - stxv v30, 448(SP) - stxv v31, 464(SP) - + stxv vs52, 288(SP) + stxv vs53, 304(SP) + stxv vs54, 320(SP) + stxv vs55, 336(SP) + stxv vs56, 352(SP) + stxv vs57, 368(SP) + stxv vs58, 384(SP) + stxv vs59, 400(SP) + stxv vs60, 416(SP) + stxv vs61, 432(SP) + stxv vs62, 448(SP) + stxv vs63, 464(SP) + std r0, FLINK_SAVE(SP) #if defined(TRMMKERNEL) @@ -157,72 +158,54 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endif slwi LDC, LDC, 2 - -/* cmpwi cr0, M, 0 - ble .L999_H1 - cmpwi cr0, N, 0 - ble .L999_H1 - cmpwi cr0, K, 0 - ble .L999_H1 -*/ /*alpha is stored in f1. convert to single and splat*/ xscvdpspn alpha_r,vs1 - xxspltw alpha_r,alpha_r,0 - + xxspltw alpha_r,alpha_r,0 /*load reverse permute mask for big endian uint128 = 0xc0d0e0f08090a0b0405060700010203 */ lis T2, perm_const2@highest - ori T2, T2, perm_const2@higher - rldicr T2, T2, 32, 31 - oris T2, T2, perm_const2@h - ori T2, T2, perm_const2@l - lis T1, perm_const1@highest + lis T3, save_permute_12@highest + lis T4, save_permute_11@highest + lis T5, save_permute_22@highest + lis T6, save_permute_21@highest + ori T2, T2, perm_const2@higher ori T1, T1, perm_const1@higher + ori T3, T3, save_permute_12@higher + ori T4, T4, save_permute_11@higher + ori T5, T5, save_permute_22@higher + ori T6, T6, save_permute_21@higher + rldicr T2, T2, 32, 31 rldicr T1, T1, 32, 31 + rldicr T3, T3, 32, 31 + rldicr T4, T4, 32, 31 + rldicr T5, T5, 32, 31 + rldicr T6, T6, 32, 31 + oris T2, T2, perm_const2@h oris T1, T1, perm_const1@h + oris T3, T3, save_permute_12@h + oris T4, T4, save_permute_11@h + oris T5, T5, save_permute_22@h + oris T6, T6, save_permute_21@h + ori T2, T2, perm_const2@l ori T1, T1, perm_const1@l - + ori T3, T3, save_permute_12@l + ori T4, T4, save_permute_11@l + ori T5, T5, save_permute_22@l + ori T6, T6, save_permute_21@l + li r0,0 mtvsrdd permute_mask,T2,T1 - - lis T2, save_permute_12@highest - ori T2, T2, save_permute_12@higher - rldicr T2, T2, 32, 31 - oris T2, T2, save_permute_12@h - ori T2, T2, save_permute_12@l - - lis T1, save_permute_11@highest - ori T1, T1, save_permute_11@higher - rldicr T1, T1, 32, 31 - oris T1, T1, save_permute_11@h - ori T1, T1, save_permute_11@l - - mtvsrdd save_permute_1,T2,T1 - - lis T2, save_permute_22@highest - ori T2, T2, save_permute_22@higher - rldicr T2, T2, 32, 31 - oris T2, T2, save_permute_22@h - ori T2, T2, save_permute_22@l - - lis T1, save_permute_21@highest - ori T1, T1, save_permute_21@higher - rldicr T1, T1, 32, 31 - oris T1, T1, save_permute_21@h - ori T1, T1, save_permute_21@l - - mtvsrdd save_permute_2,T2,T1 + mtvsrdd save_permute_1,T3,T4 + mtvsrdd save_permute_2,T5,T6 #include "sgemm_logic_power9.S" -.L999: - addi r3, 0, 0 - +.L999: lfd f14, 0(SP) lfd f15, 8(SP) lfd f16, 16(SP) @@ -264,23 +247,26 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ld r16, 264(SP) ld r15, 272(SP) ld r14, 280(SP) - - lxv v20, 288(SP) - lxv v21, 304(SP) - lxv v22, 320(SP) - lxv v23, 336(SP) - lxv v24, 352(SP) - lxv v25, 368(SP) - lxv v26, 384(SP) - lxv v27, 400(SP) - lxv v28, 416(SP) - lxv v29, 432(SP) - lxv v30, 448(SP) - lxv v31, 464(SP) + ld r0, FLINK_SAVE(SP) - addi SP, SP, STACKSIZE + lxv vs52, 288(SP) + lxv vs53, 304(SP) + lxv vs54, 320(SP) + lxv vs55, 336(SP) + lxv vs56, 352(SP) + lxv vs57, 368(SP) + lxv vs58, 384(SP) + lxv vs59, 400(SP) + mtlr r0 + lxv vs60, 416(SP) + lxv vs61, 432(SP) + lxv vs62, 448(SP) + lxv vs63, 464(SP) + + addi SP, SP, STACKSIZE blr + EPILOGUE #endif diff --git a/kernel/power/sgemm_logic_power9.S b/kernel/power/sgemm_logic_power9.S index 300e30470..a34ed32b8 100644 --- a/kernel/power/sgemm_logic_power9.S +++ b/kernel/power/sgemm_logic_power9.S @@ -1,5 +1,94 @@ #define MY_ALIGN .align 3 +b L8 + MY_ALIGN +LSGEMM_L8x16_LMAIN_SUB: + LOAD8x16_2 + MY_ALIGN + +LSGEMM_L8x16_LOOP: + KERNEL8x16_L2 128,64,0,0 +LSGEMM_L8x16_K128: + KERNEL8x16_L2 128,64,1,0 + KERNEL8x16_I1_L4_2 128,64, 1,0 + KERNEL8x16_I1_L4_2 128,64, 2,0 + KERNEL8x16_I1_L4_2 128,64, 3,0 + KERNEL8x16_I1_L4_2 128,64, 4,0 + KERNEL8x16_I1_L4_2 128,64, 5,0 + KERNEL8x16_I1_L4_2 128,64, 6,0 + KERNEL8x16_I1_L4_2 128,64, 7,0 + KERNEL8x16_I1_L4_2 128,64, 8,0 + KERNEL8x16_I1_L4_2 128,64, 9,0 + KERNEL8x16_I1_L4_2 128,64, 10,0 + KERNEL8x16_I1_L4_2 128,64, 11,0 + KERNEL8x16_I1_L4_2 128,64, 12,0 + KERNEL8x16_I1_L4_2 128,64, 13,0 + KERNEL8x16_I1_L4_2 128,64, 14,0 + KERNEL8x16_I1_L4_2 128,64, 15,0 + KERNEL8x16_I1_L4_2 128,64, 16,0 + KERNEL8x16_I1_L4_2 128,64, 17,0 + KERNEL8x16_I1_L4_2 128,64, 18,0 + KERNEL8x16_I1_L4_2 128,64, 19,0 + KERNEL8x16_I1_L4_2 128,64, 20,0 + KERNEL8x16_I1_L4_2 128,64, 21,0 + KERNEL8x16_I1_L4_2 128,64, 22,0 + KERNEL8x16_I1_L4_2 128,64, 23,0 + KERNEL8x16_I1_L4_2 128,64, 24,0 + KERNEL8x16_I1_L4_2 128,64, 25,0 + KERNEL8x16_I1_L4_2 128,64, 26,0 + KERNEL8x16_I1_L4_2 128,64, 27,0 + KERNEL8x16_I1_L4_2 128,64, 28,0 + KERNEL8x16_I1_L4_2 128,64, 29,0 + KERNEL8x16_I1_L4_2 128,64, 30,0 + KERNEL8x16_I1_L4_2 128,64, 31,1 + bdnz LSGEMM_L8x16_LOOP + + MY_ALIGN +LSGEMM_L8x16_LOOP_END: + END8x16_2 + blr + + MY_ALIGN +LSGEMM_L8x16_L64_SUB: + LOAD8x16_2 + KERNEL8x16_I1_L4_2 128,64, 0,0 + KERNEL8x16_I1_L4_2 128,64, 1,0 + KERNEL8x16_I1_L4_2 128,64, 2,0 + KERNEL8x16_I1_L4_2 128,64,3,0 + KERNEL8x16_I1_L4_2 128,64,4,0 + KERNEL8x16_I1_L4_2 128,64,5,0 + KERNEL8x16_I1_L4_2 128,64,6,0 + KERNEL8x16_I1_L4_2 128,64,7,0 + KERNEL8x16_I1_L4_2 128,64,8,0 + KERNEL8x16_I1_L4_2 128,64,9,0 + KERNEL8x16_I1_L4_2 128,64,10,0 + KERNEL8x16_I1_L4_2 128,64,11,0 + KERNEL8x16_I1_L4_2 128,64,12,0 + KERNEL8x16_I1_L4_2 128,64,13,0 + KERNEL8x16_I1_L4_2 128,64,14,0 + KERNEL8x16_I1_L4_3 128,64,15,1 + blr +LSGEMM_L8x16_L32_SUB: + LOAD8x16_2 + KERNEL8x16_I1_L4_2 128,64,0,0 + KERNEL8x16_I1_L4_2 128,64,1,0 + KERNEL8x16_I1_L4_2 128,64,2,0 + KERNEL8x16_I1_L4_2 128,64,3,0 + KERNEL8x16_I1_L4_2 128,64,4,0 + KERNEL8x16_I1_L4_2 128,64,5,0 + KERNEL8x16_I1_L4_2 128,64,6,0 + KERNEL8x16_I1_L4_3 128,64,7,1 + blr + +LSGEMM_L8x16_L16_SUB: + LOAD8x16_2 + KERNEL8x16_I1_L4_2 128,64,0,0 + KERNEL8x16_I1_L4_2 128,64,1,0 + KERNEL8x16_I1_L4_2 128,64,2,0 + KERNEL8x16_I1_L4_3 128,64,3,1 + blr + +L8: #if defined(TRMMKERNEL) && !defined(LEFT) neg TEMP_REG, OFFSET #endif @@ -38,125 +127,95 @@ LSGEMM_L8x16_BEGIN: #if defined(TRMMKERNEL) REFRESH_TEMP_BK T11,K,TEMP_REG,16,8 mr T12, T11 - addi T12,T12, -1 - srawi. L, T12, 6 /**(T11-1) % 64x */ + addi T12,T12, -2 + srawi. L, T12, 7 /**(T11-2) % 128x */ #else mr T12, K - addi T12,T12, -1 - srawi. L, T12, 6 /**(K-1) % 64x */ + addi T12,T12, -2 + srawi. L, T12, 7 /**(K-2) % 128x */ #endif - ZERO8x16 + ZERO8x16 ble LSGEMM_L8x16_SUB0 - - MY_ALIGN -LSGEMM_L8x16_LOOP_START: - - LOAD8x16_0 /*we already zeroed */ - ##OffsetA=64 OffsetB=32 - addi AO,AO,2112 - addi BO,BO,32 - - mtctr L - - MY_ALIGN - -LSGEMM_L8x16_LOOP: - - KERNEL8x16_I1_L4_2 -2048,0, 0,0 - KERNEL8x16_I1_L4_2 -2048,0, 1,0 - KERNEL8x16_I1_L4_2 -2048,0, 2,0 - KERNEL8x16_I1_L4_2 -2048,0, 3,0 - KERNEL8x16_I1_L4_2 -2048,0, 4,0 - KERNEL8x16_I1_L4_2 -2048,0, 5,0 - KERNEL8x16_I1_L4_2 -2048,0, 6,0 - KERNEL8x16_I1_L4_2 -2048,0, 7,0 - KERNEL8x16_I1_L4_2 -2048,0, 8,0 - KERNEL8x16_I1_L4_2 -2048,0, 9,0 - KERNEL8x16_I1_L4_2 -2048,0, 10,0 - KERNEL8x16_I1_L4_2 -2048,0, 11,0 - KERNEL8x16_I1_L4_2 -2048,0, 12,0 - KERNEL8x16_I1_L4_2 -2048,0, 13,0 - KERNEL8x16_I1_L4_2 -2048,0, 14,0 - KERNEL8x16_I1_L4_2 -2048,0, 15,1 - - bdnz LSGEMM_L8x16_LOOP - - MY_ALIGN -LSGEMM_L8x16_LOOP_END: - - END8x16 0, AO, BO, -2048, 0 - - b LSGEMM_L8x16_SUB1 + mtctr L + bl LSGEMM_L8x16_LMAIN_SUB + andi. L, T12, 127 + ble LSGEMM_L8x16_SAVE + b LSGEMM_L8x16_SUB2 MY_ALIGN LSGEMM_L8x16_SUB0: #if defined(TRMMKERNEL) - andi. L, T11, 127 -#else - andi. L, K, 127 -#endif - b LSGEMM_L8x16_SUB2 - MY_ALIGN -LSGEMM_L8x16_SUB1: -#if defined(TRMMKERNEL) - andi. L, T12, 63 + andi. L, T11, 255 + cmpwi T11,129 #else - andi. L, T12, 63 -#endif - ble LSGEMM_L8x16_SAVE + andi. L, K, 255 + cmpwi K,129 +#endif + li T10,1 + bne CMP8x16_128K + addi BO,BO,-32 + addi AO,AO,-64 + LOAD8x16 64,32 + END8x16_WITHOUT_ADD + LOAD8x16_2O AO,BO, 128, 64 + mtctr T10 + bl LSGEMM_L8x16_K128 + b LSGEMM_L8x16_SAVE +CMP8x16_128K: +/*----------------------------------------*/ +#if defined(TRMMKERNEL) + cmpwi T11,128 +#else + cmpwi K,128 +#endif + bne LSGEMM_L8x16_SUB2 + MY_ALIGN + mtctr T10 + addi BO,BO,-64 + addi AO,AO,-128 + LOAD8x16_2O AO,BO, 128,64 + bl LSGEMM_L8x16_K128 + b LSGEMM_L8x16_SAVE MY_ALIGN LSGEMM_L8x16_SUB2: - - srawi. T10,L, 5 + andi. T10,L,64 + ble LSGEMM_L8x16_SUB2_32 + bl LSGEMM_L8x16_L64_SUB + MY_ALIGN +LSGEMM_L8x16_SUB2_32: + andi. T10,L, 32 ble LSGEMM_L8x16_SUB2_16 - mtctr T10 - MY_ALIGN -LSGEMM_L8x16_SUB2_LOOP: - LOAD8x16_0 - KERNEL8x16_I1_L4_2 64,32, 0,0 - KERNEL8x16_I1_L4_2 64,32, 1,0 - KERNEL8x16_I1_L4_2 64,32, 2,0 - KERNEL8x16_I1_L4_2 64,32, 3,0 - KERNEL8x16_I1_L4_2 64,32, 4,0 - KERNEL8x16_I1_L4_2 64,32, 5,0 - KERNEL8x16_I1_L4_2 64,32, 6,0 - KERNEL8x16_I1_L4_3 64,32, 7,1 - bdnz LSGEMM_L8x16_SUB2_LOOP - MY_ALIGN + bl LSGEMM_L8x16_L32_SUB + MY_ALIGN LSGEMM_L8x16_SUB2_16: andi. T10,L, 16 ble LSGEMM_L8x16_SUB2_8 - LOAD8x16_0 - KERNEL8x16_I1_L4_2 64,32, 0,0 - KERNEL8x16_I1_L4_2 64,32, 1,0 - KERNEL8x16_I1_L4_2 64,32, 2,0 - KERNEL8x16_I1_L4_3 64,32, 3,1 + bl LSGEMM_L8x16_L16_SUB MY_ALIGN LSGEMM_L8x16_SUB2_8: andi. T10,L, 8 ble LSGEMM_L8x16_SUB2_4 - LOAD8x16_0 - KERNEL8x16_I1_L4_2 64,32, 0,0 - KERNEL8x16_I1_L4_3 64,32, 1,1 + LOAD8x16_2 + KERNEL8x16_I1_L4_2 128,64, 0,0 + KERNEL8x16_I1_L4_3 128,64, 1,1 MY_ALIGN LSGEMM_L8x16_SUB2_4: andi. T10,L, 4 ble LSGEMM_L8x16_SUB2_2 - LOAD8x16_0 - KERNEL8x16_I1_L4_3 64,32, 0,1 + LOAD8x16_2 + KERNEL8x16_I1_L4_3 128,64, 0,1 MY_ALIGN LSGEMM_L8x16_SUB2_2: andi. T10,L, 2 ble LSGEMM_L8x16_SUB2_1 - LOAD8x16_0 - KERNEL8x16_I1_L2_3 64,32, 0,1 + LOAD8x16_2 + KERNEL8x16_E2 128,64, 0,1 MY_ALIGN LSGEMM_L8x16_SUB2_1: andi. T10,L, 1 ble LSGEMM_L8x16_SAVE KERNEL8x16 0 -# addic. L, L, -1 -# bgt LSGEMM_L8x16_SUB2 + MY_ALIGN LSGEMM_L8x16_SAVE: diff --git a/kernel/power/sgemm_macros_16x8_power8.S b/kernel/power/sgemm_macros_16x8_power8.S index 98414857f..9bcfca827 100644 --- a/kernel/power/sgemm_macros_16x8_power8.S +++ b/kernel/power/sgemm_macros_16x8_power8.S @@ -38,7 +38,11 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * Macros for N=8 and M=16 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD8x16_1', ` +#else .macro LOAD8x16_1 +#endif lxvw4x vs0, o0, AO lxvw4x vs1, o16, AO @@ -63,9 +67,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 128 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL8x16_I1', ` +#else .macro KERNEL8x16_I1 +#endif lxvw4x vs4, o0, AO @@ -133,9 +145,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs63, vs3, vs15 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL8x16_1', ` +#else .macro KERNEL8x16_1 +#endif lxvw4x vs4, o0, AO @@ -203,9 +223,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs63, vs3, vs15 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL8x16_2', ` +#else .macro KERNEL8x16_2 +#endif lxvw4x vs0, o0, AO @@ -273,9 +301,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs63, vs7, vs23 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL8x16_E2', ` +#else .macro KERNEL8x16_E2 +#endif xvmaddasp vs32, vs4, vs16 @@ -319,9 +355,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs63, vs7, vs23 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL8x16_SUBI1', ` +#else .macro KERNEL8x16_SUBI1 +#endif lxvw4x vs0, o0, AO @@ -389,9 +433,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs63, vs3, vs15 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL8x16_SUB1', ` +#else .macro KERNEL8x16_SUB1 +#endif lxvw4x vs0, o0, AO @@ -459,9 +511,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs63, vs3, vs15 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE8x16', ` +#else .macro SAVE8x16 +#endif mr T1, CO @@ -698,14 +758,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 64 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=8 and M=8 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD8x8_1', ` +#else .macro LOAD8x8_1 +#endif lxvw4x vs0, o0, AO lxvw4x vs1, o16, AO @@ -728,9 +796,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 128 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL8x8_I1', ` +#else .macro KERNEL8x8_I1 +#endif lxvw4x vs4, o0, AO @@ -780,9 +856,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs47, vs1, vs15 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL8x8_1', ` +#else .macro KERNEL8x8_1 +#endif lxvw4x vs4, o0, AO @@ -832,9 +916,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs47, vs1, vs15 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL8x8_2', ` +#else .macro KERNEL8x8_2 +#endif lxvw4x vs0, o0, AO @@ -884,9 +976,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs47, vs5, vs23 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL8x8_E2', ` +#else .macro KERNEL8x8_E2 +#endif xvmaddasp vs32, vs4, vs16 @@ -914,9 +1014,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs47, vs5, vs23 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL8x8_SUBI1', ` +#else .macro KERNEL8x8_SUBI1 +#endif lxvw4x vs0, o0, AO @@ -966,9 +1074,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs47, vs1, vs15 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL8x8_SUB1', ` +#else .macro KERNEL8x8_SUB1 +#endif lxvw4x vs0, o0, AO @@ -1018,9 +1134,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs47, vs1, vs15 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE8x8', ` +#else .macro SAVE8x8 +#endif mr T1, CO @@ -1193,14 +1317,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 32 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=8 and M=4 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD8x4_1', ` +#else .macro LOAD8x4_1 +#endif lxvw4x vs0, o0, AO @@ -1222,9 +1354,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 128 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL8x4_I1', ` +#else .macro KERNEL8x4_I1 +#endif lxvw4x vs4, o0, AO @@ -1265,9 +1405,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs39, vs0, vs15 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL8x4_1', ` +#else .macro KERNEL8x4_1 +#endif lxvw4x vs4, o0, AO @@ -1308,9 +1456,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs39, vs0, vs15 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL8x4_2', ` +#else .macro KERNEL8x4_2 +#endif lxvw4x vs0, o0, AO @@ -1351,9 +1507,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs39, vs4, vs23 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL8x4_E2', ` +#else .macro KERNEL8x4_E2 +#endif xvmaddasp vs32, vs4, vs16 @@ -1373,9 +1537,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs39, vs4, vs23 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL8x4_SUBI1', ` +#else .macro KERNEL8x4_SUBI1 +#endif lxvw4x vs0, o0, AO @@ -1416,9 +1588,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs39, vs0, vs15 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL8x4_SUB1', ` +#else .macro KERNEL8x4_SUB1 +#endif lxvw4x vs0, o0, AO @@ -1459,9 +1639,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs39, vs0, vs15 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE8x4', ` +#else .macro SAVE8x4 +#endif mr T1, CO @@ -1602,14 +1790,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 16 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=8 and M=2 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD8x2_1', ` +#else .macro LOAD8x2_1 +#endif lxsspx vs0, o0, AO lxsspx vs1, o4, AO @@ -1633,9 +1829,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 128 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL8x2_I1', ` +#else .macro KERNEL8x2_I1 +#endif lxsspx vs4, o0, AO @@ -1686,9 +1890,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmuldp vs47, vs1, vs15 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL8x2_1', ` +#else .macro KERNEL8x2_1 +#endif lxsspx vs4, o0, AO @@ -1739,9 +1951,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs47, vs1, vs15 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL8x2_2', ` +#else .macro KERNEL8x2_2 +#endif lxsspx vs0, o0, AO @@ -1792,9 +2012,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs47, vs5, vs23 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL8x2_E2', ` +#else .macro KERNEL8x2_E2 +#endif xsmaddadp vs32, vs4, vs16 @@ -1822,9 +2050,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs47, vs5, vs23 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL8x2_SUBI1', ` +#else .macro KERNEL8x2_SUBI1 +#endif lxsspx vs0, o0, AO @@ -1875,9 +2111,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmuldp vs47, vs1, vs15 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL8x2_SUB1', ` +#else .macro KERNEL8x2_SUB1 +#endif lxsspx vs0, o0, AO @@ -1928,9 +2172,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs47, vs1, vs15 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE8x2', ` +#else .macro SAVE8x2 +#endif mr T1, CO @@ -2103,14 +2355,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 8 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=8 and M=1 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD8x1_1', ` +#else .macro LOAD8x1_1 +#endif lxsspx vs0, o0, AO @@ -2133,9 +2393,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 128 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL8x1_I1', ` +#else .macro KERNEL8x1_I1 +#endif lxsspx vs4, o0, AO @@ -2177,9 +2445,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmuldp vs39, vs0, vs15 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL8x1_1', ` +#else .macro KERNEL8x1_1 +#endif lxsspx vs4, o0, AO @@ -2221,9 +2497,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs39, vs0, vs15 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL8x1_2', ` +#else .macro KERNEL8x1_2 +#endif lxsspx vs0, o0, AO @@ -2265,9 +2549,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs39, vs4, vs23 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL8x1_E2', ` +#else .macro KERNEL8x1_E2 +#endif xsmaddadp vs32, vs4, vs16 @@ -2287,9 +2579,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs39, vs4, vs23 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL8x1_SUBI1', ` +#else .macro KERNEL8x1_SUBI1 +#endif lxsspx vs0, o0, AO @@ -2331,9 +2631,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmuldp vs39, vs0, vs15 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL8x1_SUB1', ` +#else .macro KERNEL8x1_SUB1 +#endif lxsspx vs0, o0, AO @@ -2375,9 +2683,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs39, vs0, vs15 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE8x1', ` +#else .macro SAVE8x1 +#endif mr T1, CO @@ -2518,14 +2834,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 4 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=4 and M=16 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD4x16_1', ` +#else .macro LOAD4x16_1 +#endif lxvw4x vs0, o0, AO lxvw4x vs1, o16, AO @@ -2543,9 +2867,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 64 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x16_I1', ` +#else .macro KERNEL4x16_I1 +#endif lxvw4x vs4, o0, AO @@ -2586,9 +2918,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs47, vs3, vs11 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x16_1', ` +#else .macro KERNEL4x16_1 +#endif lxvw4x vs4, o0, AO @@ -2629,9 +2969,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs47, vs3, vs11 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x16_2', ` +#else .macro KERNEL4x16_2 +#endif lxvw4x vs0, o0, AO @@ -2672,9 +3020,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs47, vs7, vs19 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x16_E2', ` +#else .macro KERNEL4x16_E2 +#endif xvmaddasp vs32, vs4, vs16 @@ -2698,9 +3054,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs47, vs7, vs19 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x16_SUBI1', ` +#else .macro KERNEL4x16_SUBI1 +#endif lxvw4x vs0, o0, AO @@ -2741,9 +3105,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs47, vs3, vs11 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x16_SUB1', ` +#else .macro KERNEL4x16_SUB1 +#endif lxvw4x vs0, o0, AO @@ -2784,9 +3156,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs47, vs3, vs11 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE4x16', ` +#else .macro SAVE4x16 +#endif mr T1, CO @@ -2907,14 +3287,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 64 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=4 and M=8 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD4x8_1', ` +#else .macro LOAD4x8_1 +#endif lxvw4x vs0, o0, AO lxvw4x vs1, o16, AO @@ -2930,9 +3318,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 64 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x8_I1', ` +#else .macro KERNEL4x8_I1 +#endif lxvw4x vs4, o0, AO @@ -2963,9 +3359,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs39, vs1, vs11 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x8_1', ` +#else .macro KERNEL4x8_1 +#endif lxvw4x vs4, o0, AO @@ -2996,9 +3400,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs39, vs1, vs11 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x8_2', ` +#else .macro KERNEL4x8_2 +#endif lxvw4x vs0, o0, AO @@ -3029,9 +3441,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs39, vs5, vs19 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x8_E2', ` +#else .macro KERNEL4x8_E2 +#endif xvmaddasp vs32, vs4, vs16 @@ -3047,9 +3467,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs39, vs5, vs19 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x8_SUBI1', ` +#else .macro KERNEL4x8_SUBI1 +#endif lxvw4x vs0, o0, AO @@ -3080,9 +3508,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs39, vs1, vs11 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x8_SUB1', ` +#else .macro KERNEL4x8_SUB1 +#endif lxvw4x vs0, o0, AO @@ -3113,9 +3549,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs39, vs1, vs11 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE4x8', ` +#else .macro SAVE4x8 +#endif mr T1, CO @@ -3204,14 +3648,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 32 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=4 and M=4 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD4x4_1', ` +#else .macro LOAD4x4_1 +#endif lxvw4x vs0, o0, AO @@ -3226,9 +3678,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 64 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x4_I1', ` +#else .macro KERNEL4x4_I1 +#endif lxvw4x vs4, o0, AO @@ -3254,9 +3714,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs35, vs0, vs11 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x4_1', ` +#else .macro KERNEL4x4_1 +#endif lxvw4x vs4, o0, AO @@ -3282,9 +3750,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs35, vs0, vs11 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x4_2', ` +#else .macro KERNEL4x4_2 +#endif lxvw4x vs0, o0, AO @@ -3310,9 +3786,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs35, vs4, vs19 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x4_E2', ` +#else .macro KERNEL4x4_E2 +#endif xvmaddasp vs32, vs4, vs16 @@ -3324,9 +3808,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs35, vs4, vs19 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x4_SUBI1', ` +#else .macro KERNEL4x4_SUBI1 +#endif lxvw4x vs0, o0, AO @@ -3352,9 +3844,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs35, vs0, vs11 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x4_SUB1', ` +#else .macro KERNEL4x4_SUB1 +#endif lxvw4x vs0, o0, AO @@ -3380,9 +3880,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs35, vs0, vs11 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE4x4', ` +#else .macro SAVE4x4 +#endif mr T1, CO @@ -3455,14 +3963,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 16 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=4 and M=2 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD4x2_1', ` +#else .macro LOAD4x2_1 +#endif lxsspx vs0, o0, AO lxsspx vs1, o4, AO @@ -3479,9 +3995,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 64 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x2_I1', ` +#else .macro KERNEL4x2_I1 +#endif lxsspx vs4, o0, AO @@ -3513,9 +4037,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmuldp vs39, vs1, vs11 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x2_1', ` +#else .macro KERNEL4x2_1 +#endif lxsspx vs4, o0, AO @@ -3547,9 +4079,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs39, vs1, vs11 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x2_2', ` +#else .macro KERNEL4x2_2 +#endif lxsspx vs0, o0, AO @@ -3581,9 +4121,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs39, vs5, vs19 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x2_E2', ` +#else .macro KERNEL4x2_E2 +#endif xsmaddadp vs32, vs4, vs16 @@ -3599,9 +4147,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs39, vs5, vs19 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x2_SUBI1', ` +#else .macro KERNEL4x2_SUBI1 +#endif lxsspx vs0, o0, AO @@ -3633,9 +4189,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmuldp vs39, vs1, vs11 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x2_SUB1', ` +#else .macro KERNEL4x2_SUB1 +#endif lxsspx vs0, o0, AO @@ -3667,9 +4231,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs39, vs1, vs11 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE4x2', ` +#else .macro SAVE4x2 +#endif mr T1, CO @@ -3758,14 +4330,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 8 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=4 and M=1 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD4x1_1', ` +#else .macro LOAD4x1_1 +#endif lxsspx vs0, o0, AO @@ -3781,9 +4361,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 64 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x1_I1', ` +#else .macro KERNEL4x1_I1 +#endif lxsspx vs4, o0, AO @@ -3810,9 +4398,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmuldp vs35, vs0, vs11 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x1_1', ` +#else .macro KERNEL4x1_1 +#endif lxsspx vs4, o0, AO @@ -3839,9 +4435,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs35, vs0, vs11 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x1_2', ` +#else .macro KERNEL4x1_2 +#endif lxsspx vs0, o0, AO @@ -3868,9 +4472,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs35, vs4, vs19 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x1_E2', ` +#else .macro KERNEL4x1_E2 +#endif xsmaddadp vs32, vs4, vs16 @@ -3882,9 +4494,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs35, vs4, vs19 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x1_SUBI1', ` +#else .macro KERNEL4x1_SUBI1 +#endif lxsspx vs0, o0, AO @@ -3911,9 +4531,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmuldp vs35, vs0, vs11 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x1_SUB1', ` +#else .macro KERNEL4x1_SUB1 +#endif lxsspx vs0, o0, AO @@ -3940,9 +4568,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs35, vs0, vs11 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE4x1', ` +#else .macro SAVE4x1 +#endif mr T1, CO @@ -4015,14 +4651,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 4 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=2 and M=16 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD2x16_1', ` +#else .macro LOAD2x16_1 +#endif lxvw4x vs0, o0, AO lxvw4x vs1, o16, AO @@ -4038,9 +4682,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 32 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x16_I1', ` +#else .macro KERNEL2x16_I1 +#endif lxvw4x vs4, o0, AO @@ -4069,9 +4721,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs39, vs3, vs9 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x16_1', ` +#else .macro KERNEL2x16_1 +#endif lxvw4x vs4, o0, AO @@ -4100,9 +4760,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs39, vs3, vs9 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x16_2', ` +#else .macro KERNEL2x16_2 +#endif lxvw4x vs0, o0, AO @@ -4131,9 +4799,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs39, vs7, vs17 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x16_E2', ` +#else .macro KERNEL2x16_E2 +#endif xvmaddasp vs32, vs4, vs16 @@ -4147,9 +4823,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs39, vs7, vs17 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x16_SUBI1', ` +#else .macro KERNEL2x16_SUBI1 +#endif lxvw4x vs0, o0, AO @@ -4178,9 +4862,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs39, vs3, vs9 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x16_SUB1', ` +#else .macro KERNEL2x16_SUB1 +#endif lxvw4x vs0, o0, AO @@ -4209,9 +4901,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs39, vs3, vs9 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE2x16', ` +#else .macro SAVE2x16 +#endif mr T1, CO @@ -4274,14 +4974,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 64 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=2 and M=8 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD2x8_1', ` +#else .macro LOAD2x8_1 +#endif lxvw4x vs0, o0, AO lxvw4x vs1, o16, AO @@ -4295,9 +5003,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 32 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x8_I1', ` +#else .macro KERNEL2x8_I1 +#endif lxvw4x vs4, o0, AO @@ -4320,9 +5036,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs35, vs1, vs9 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x8_1', ` +#else .macro KERNEL2x8_1 +#endif lxvw4x vs4, o0, AO @@ -4345,9 +5069,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs35, vs1, vs9 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x8_2', ` +#else .macro KERNEL2x8_2 +#endif lxvw4x vs0, o0, AO @@ -4370,9 +5102,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs35, vs5, vs17 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x8_E2', ` +#else .macro KERNEL2x8_E2 +#endif xvmaddasp vs32, vs4, vs16 @@ -4382,9 +5122,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs35, vs5, vs17 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x8_SUBI1', ` +#else .macro KERNEL2x8_SUBI1 +#endif lxvw4x vs0, o0, AO @@ -4407,9 +5155,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs35, vs1, vs9 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x8_SUB1', ` +#else .macro KERNEL2x8_SUB1 +#endif lxvw4x vs0, o0, AO @@ -4432,9 +5188,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs35, vs1, vs9 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE2x8', ` +#else .macro SAVE2x8 +#endif mr T1, CO @@ -4481,14 +5245,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 32 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=2 and M=4 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD2x4_1', ` +#else .macro LOAD2x4_1 +#endif lxvw4x vs0, o0, AO @@ -4501,9 +5273,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 32 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x4_I1', ` +#else .macro KERNEL2x4_I1 +#endif lxvw4x vs4, o0, AO @@ -4523,9 +5303,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs33, vs0, vs9 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x4_1', ` +#else .macro KERNEL2x4_1 +#endif lxvw4x vs4, o0, AO @@ -4545,9 +5333,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs33, vs0, vs9 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x4_2', ` +#else .macro KERNEL2x4_2 +#endif lxvw4x vs0, o0, AO @@ -4567,9 +5363,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs33, vs4, vs17 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x4_E2', ` +#else .macro KERNEL2x4_E2 +#endif xvmaddasp vs32, vs4, vs16 @@ -4577,9 +5381,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs33, vs4, vs17 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x4_SUBI1', ` +#else .macro KERNEL2x4_SUBI1 +#endif lxvw4x vs0, o0, AO @@ -4599,9 +5411,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs33, vs0, vs9 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x4_SUB1', ` +#else .macro KERNEL2x4_SUB1 +#endif lxvw4x vs0, o0, AO @@ -4621,9 +5441,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs33, vs0, vs9 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE2x4', ` +#else .macro SAVE2x4 +#endif mr T1, CO @@ -4662,14 +5490,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 16 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=2 and M=2 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD2x2_1', ` +#else .macro LOAD2x2_1 +#endif lxsspx vs0, o0, AO lxsspx vs1, o4, AO @@ -4684,9 +5520,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 32 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x2_I1', ` +#else .macro KERNEL2x2_I1 +#endif lxsspx vs4, o0, AO @@ -4710,9 +5554,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmuldp vs35, vs1, vs9 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x2_1', ` +#else .macro KERNEL2x2_1 +#endif lxsspx vs4, o0, AO @@ -4736,9 +5588,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs35, vs1, vs9 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x2_2', ` +#else .macro KERNEL2x2_2 +#endif lxsspx vs0, o0, AO @@ -4762,9 +5622,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs35, vs5, vs17 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x2_E2', ` +#else .macro KERNEL2x2_E2 +#endif xsmaddadp vs32, vs4, vs16 @@ -4774,9 +5642,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs35, vs5, vs17 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x2_SUBI1', ` +#else .macro KERNEL2x2_SUBI1 +#endif lxsspx vs0, o0, AO @@ -4800,9 +5676,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmuldp vs35, vs1, vs9 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x2_SUB1', ` +#else .macro KERNEL2x2_SUB1 +#endif lxsspx vs0, o0, AO @@ -4826,9 +5710,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs35, vs1, vs9 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE2x2', ` +#else .macro SAVE2x2 +#endif mr T1, CO @@ -4875,14 +5767,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 8 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=2 and M=1 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD2x1_1', ` +#else .macro LOAD2x1_1 +#endif lxsspx vs0, o0, AO @@ -4896,9 +5796,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 32 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x1_I1', ` +#else .macro KERNEL2x1_I1 +#endif lxsspx vs4, o0, AO @@ -4919,9 +5827,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmuldp vs33, vs0, vs9 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x1_1', ` +#else .macro KERNEL2x1_1 +#endif lxsspx vs4, o0, AO @@ -4942,9 +5858,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs33, vs0, vs9 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x1_2', ` +#else .macro KERNEL2x1_2 +#endif lxsspx vs0, o0, AO @@ -4965,9 +5889,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs33, vs4, vs17 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x1_E2', ` +#else .macro KERNEL2x1_E2 +#endif xsmaddadp vs32, vs4, vs16 @@ -4975,9 +5907,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs33, vs4, vs17 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x1_SUBI1', ` +#else .macro KERNEL2x1_SUBI1 +#endif lxsspx vs0, o0, AO @@ -4998,9 +5938,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmuldp vs33, vs0, vs9 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x1_SUB1', ` +#else .macro KERNEL2x1_SUB1 +#endif lxsspx vs0, o0, AO @@ -5021,9 +5969,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs33, vs0, vs9 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE2x1', ` +#else .macro SAVE2x1 +#endif mr T1, CO @@ -5062,14 +6018,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 4 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=1 and M=16 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD1x16_1', ` +#else .macro LOAD1x16_1 +#endif lxvw4x vs0, o0, AO lxvw4x vs1, o16, AO @@ -5084,9 +6048,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 16 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x16_I1', ` +#else .macro KERNEL1x16_I1 +#endif lxvw4x vs4, o0, AO @@ -5109,9 +6081,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs35, vs3, vs8 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x16_1', ` +#else .macro KERNEL1x16_1 +#endif lxvw4x vs4, o0, AO @@ -5134,9 +6114,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs35, vs3, vs8 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x16_2', ` +#else .macro KERNEL1x16_2 +#endif lxvw4x vs0, o0, AO @@ -5159,9 +6147,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs35, vs7, vs16 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x16_E2', ` +#else .macro KERNEL1x16_E2 +#endif xvmaddasp vs32, vs4, vs16 @@ -5170,9 +6166,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs35, vs7, vs16 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x16_SUBI1', ` +#else .macro KERNEL1x16_SUBI1 +#endif lxvw4x vs0, o0, AO @@ -5195,9 +6199,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs35, vs3, vs8 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x16_SUB1', ` +#else .macro KERNEL1x16_SUB1 +#endif lxvw4x vs0, o0, AO @@ -5220,9 +6232,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs35, vs3, vs8 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE1x16', ` +#else .macro SAVE1x16 +#endif mr T1, CO @@ -5256,14 +6276,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 64 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=1 and M=8 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD1x8_1', ` +#else .macro LOAD1x8_1 +#endif lxvw4x vs0, o0, AO lxvw4x vs1, o16, AO @@ -5276,9 +6304,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 16 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x8_I1', ` +#else .macro KERNEL1x8_I1 +#endif lxvw4x vs4, o0, AO @@ -5297,9 +6333,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs33, vs1, vs8 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x8_1', ` +#else .macro KERNEL1x8_1 +#endif lxvw4x vs4, o0, AO @@ -5318,9 +6362,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs33, vs1, vs8 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x8_2', ` +#else .macro KERNEL1x8_2 +#endif lxvw4x vs0, o0, AO @@ -5339,18 +6391,34 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs33, vs5, vs16 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x8_E2', ` +#else .macro KERNEL1x8_E2 +#endif xvmaddasp vs32, vs4, vs16 xvmaddasp vs33, vs5, vs16 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x8_SUBI1', ` +#else .macro KERNEL1x8_SUBI1 +#endif lxvw4x vs0, o0, AO @@ -5369,9 +6437,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs33, vs1, vs8 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x8_SUB1', ` +#else .macro KERNEL1x8_SUB1 +#endif lxvw4x vs0, o0, AO @@ -5390,9 +6466,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs33, vs1, vs8 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE1x8', ` +#else .macro SAVE1x8 +#endif mr T1, CO @@ -5418,14 +6502,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 32 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=1 and M=4 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD1x4_1', ` +#else .macro LOAD1x4_1 +#endif lxvw4x vs0, o0, AO @@ -5437,9 +6529,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 16 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x4_I1', ` +#else .macro KERNEL1x4_I1 +#endif lxvw4x vs4, o0, AO @@ -5456,9 +6556,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs32, vs0, vs8 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x4_1', ` +#else .macro KERNEL1x4_1 +#endif lxvw4x vs4, o0, AO @@ -5475,9 +6583,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs32, vs0, vs8 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x4_2', ` +#else .macro KERNEL1x4_2 +#endif lxvw4x vs0, o0, AO @@ -5494,17 +6610,33 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs32, vs4, vs16 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x4_E2', ` +#else .macro KERNEL1x4_E2 +#endif xvmaddasp vs32, vs4, vs16 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x4_SUBI1', ` +#else .macro KERNEL1x4_SUBI1 +#endif lxvw4x vs0, o0, AO @@ -5521,9 +6653,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs32, vs0, vs8 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x4_SUB1', ` +#else .macro KERNEL1x4_SUB1 +#endif lxvw4x vs0, o0, AO @@ -5540,9 +6680,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs32, vs0, vs8 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE1x4', ` +#else .macro SAVE1x4 +#endif mr T1, CO @@ -5564,14 +6712,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 16 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=1 and M=2 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD1x2_1', ` +#else .macro LOAD1x2_1 +#endif lxsspx vs0, o0, AO lxsspx vs1, o4, AO @@ -5585,9 +6741,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 16 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x2_I1', ` +#else .macro KERNEL1x2_I1 +#endif lxsspx vs4, o0, AO @@ -5607,9 +6771,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmuldp vs33, vs1, vs8 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x2_1', ` +#else .macro KERNEL1x2_1 +#endif lxsspx vs4, o0, AO @@ -5629,9 +6801,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs33, vs1, vs8 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x2_2', ` +#else .macro KERNEL1x2_2 +#endif lxsspx vs0, o0, AO @@ -5651,18 +6831,34 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs33, vs5, vs16 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x2_E2', ` +#else .macro KERNEL1x2_E2 +#endif xsmaddadp vs32, vs4, vs16 xsmaddadp vs33, vs5, vs16 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x2_SUBI1', ` +#else .macro KERNEL1x2_SUBI1 +#endif lxsspx vs0, o0, AO @@ -5682,9 +6878,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmuldp vs33, vs1, vs8 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x2_SUB1', ` +#else .macro KERNEL1x2_SUB1 +#endif lxsspx vs0, o0, AO @@ -5704,9 +6908,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs33, vs1, vs8 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE1x2', ` +#else .macro SAVE1x2 +#endif mr T1, CO @@ -5732,14 +6944,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 8 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=1 and M=1 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD1x1_1', ` +#else .macro LOAD1x1_1 +#endif lxsspx vs0, o0, AO @@ -5752,9 +6972,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 16 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x1_I1', ` +#else .macro KERNEL1x1_I1 +#endif lxsspx vs4, o0, AO @@ -5772,9 +7000,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmuldp vs32, vs0, vs8 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x1_1', ` +#else .macro KERNEL1x1_1 +#endif lxsspx vs4, o0, AO @@ -5792,9 +7028,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs32, vs0, vs8 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x1_2', ` +#else .macro KERNEL1x1_2 +#endif lxsspx vs0, o0, AO @@ -5812,17 +7056,33 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs32, vs4, vs16 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x1_E2', ` +#else .macro KERNEL1x1_E2 +#endif xsmaddadp vs32, vs4, vs16 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x1_SUBI1', ` +#else .macro KERNEL1x1_SUBI1 +#endif lxsspx vs0, o0, AO @@ -5840,9 +7100,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmuldp vs32, vs0, vs8 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x1_SUB1', ` +#else .macro KERNEL1x1_SUB1 +#endif lxsspx vs0, o0, AO @@ -5860,9 +7128,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs32, vs0, vs8 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE1x1', ` +#else .macro SAVE1x1 +#endif mr T1, CO @@ -5884,13 +7160,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 4 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`COPYB_4x8', ` +#else .macro COPYB_4x8 +#endif lxvw4x vs5, o0, BO @@ -5993,10 +7277,18 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. stxvw4x vs54, o48, BBO addi BBO, BBO, 64 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`COPYB_1x8', ` +#else .macro COPYB_1x8 +#endif lxvw4x vs5, o0, BO @@ -6026,5 +7318,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. stxvw4x vs14, o48, BBO addi BBO, BBO, 64 +#if defined(_AIX) +') +#else .endm +#endif diff --git a/kernel/power/sgemm_macros_power9.S b/kernel/power/sgemm_macros_power9.S index c61f419ac..2c9e537c7 100644 --- a/kernel/power/sgemm_macros_power9.S +++ b/kernel/power/sgemm_macros_power9.S @@ -38,13 +38,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * Macros for N=8 and M=16 **********************************************************************************************/ -.macro LOAD8x16_1 - LOAD8x16 1 -.endm - -.macro LOAD8x16_0 - LOAD8x16 0 -.endm + .macro KERNEL8x16_L1_L4 Index,IsLast KERNEL8x16_L1_L4_I AO,BO, 0,0, \Index,\IsLast,0 @@ -61,10 +55,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL8x16_I1_L4_3 OffsetA,OffsetB, Index,IsLast KERNEL8x16_L1_L4_I AO,BO, \OffsetA,\OffsetB,\Index,\IsLast,1 .endm -.macro KERNEL8x16_I1_L2_3 OffsetA,OffsetB, Index,IsLast - KERNEL8x16_L1_L2_I AO,BO,0, \OffsetA,\OffsetB,\Index,\IsLast,1 -.endm - + .macro KERNEL8x16_I2_L4_2 AREG,BREG,OffsetA,OffsetB, Index,IsLast KERNEL8x16_L1_L4_I \AREG,\BREG, \OffsetA,\OffsetB,\Index,\IsLast,0 .endm @@ -108,62 +99,31 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs63, vs63, vs63 .endm -.macro LOAD8x16 Zero +.macro LOAD8x16 OffsetA,OffsetB - lxv vs24, 0(BO) - lxv vs28, 16(BO) - lxv vs0, 0(AO) - lxv vs1, 16(AO) - lxv vs2, 32(AO) - lxv vs3, 48(AO) + lxv vs24, (\OffsetB+0)(BO) + lxv vs28, (\OffsetB+16)(BO) xxperm vs26, vs24, permute_mask - xxperm vs30, vs28, permute_mask + xxperm vs30, vs28, permute_mask + lxv vs0, (\OffsetA+0)(AO) + lxv vs1, (\OffsetA+16)(AO) xxpermdi vs25, vs24, vs24,2 xxpermdi vs29, vs28, vs28,2 - + lxv vs2, (\OffsetA+32)(AO) + lxv vs3, (\OffsetA+48)(AO) xxpermdi vs27, vs26, vs26,2 xxpermdi vs31, vs30, vs30,2 -.if \Zero==1 - xxlxor vs32, vs32, vs32 - xxlxor vs33, vs33, vs33 - xxlxor vs34, vs34, vs34 - xxlxor vs35, vs35, vs35 - xxlxor vs36, vs36, vs36 - xxlxor vs37, vs37, vs37 - xxlxor vs38, vs38, vs38 - xxlxor vs39, vs39, vs39 - xxlxor vs40, vs40, vs40 - xxlxor vs41, vs41, vs41 - xxlxor vs42, vs42, vs42 - xxlxor vs43, vs43, vs43 - xxlxor vs44, vs44, vs44 - xxlxor vs45, vs45, vs45 - xxlxor vs46, vs46, vs46 - xxlxor vs47, vs47, vs47 - xxlxor vs48, vs48, vs48 - xxlxor vs49, vs49, vs49 - xxlxor vs50, vs50, vs50 - xxlxor vs51, vs51, vs51 - xxlxor vs52, vs52, vs52 - xxlxor vs53, vs53, vs53 - xxlxor vs54, vs54, vs54 - xxlxor vs55, vs55, vs55 - xxlxor vs56, vs56, vs56 - xxlxor vs57, vs57, vs57 - xxlxor vs58, vs58, vs58 - xxlxor vs59, vs59, vs59 - xxlxor vs60, vs60, vs60 - xxlxor vs61, vs61, vs61 - xxlxor vs62, vs62, vs62 - xxlxor vs63, vs63, vs63 -.endif .endm .macro END8x16_NORMAL END8x16 0, AO, BO, 64,32 .endm +.macro END8x16_WITHOUT_ADD + END8x16 0, AO,BO,0,0 +.endm + .macro END8x16 First, AREG, BREG, OffsetA, OffsetB .if \OffsetB != 0 @@ -259,473 +219,201 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL8x16_L1_L4_I AREG,BREG, OffsetA,OffsetB, Index,IsLast ,Complete - lxv vs8, DISP32(\Index, 0+\OffsetB)(\BREG) - lxv vs12, DISP32(\Index,16+\OffsetB)(\BREG) - - lxv vs4, DISP64(\Index, 0+\OffsetA)(\AREG) - lxv vs5, DISP64(\Index,16+\OffsetA)(\AREG) - lxv vs6, DISP64(\Index,32+\OffsetA)(\AREG) - lxv vs7, DISP64(\Index,48+\OffsetA)(\AREG) - - xxperm vs10, vs8, permute_mask - xxperm vs14, vs12, permute_mask - xxpermdi vs9, vs8, vs8,2 - xxpermdi vs13, vs12, vs12,2 - - xvmaddasp vs32, vs0,vs24 - xvmaddasp vs33, vs1,vs24 - xvmaddasp vs34, vs2,vs24 - xvmaddasp vs35, vs3,vs24 - - xvmaddasp vs36, vs0,vs25 - xvmaddasp vs37, vs1,vs25 - xvmaddasp vs38, vs2,vs25 - xvmaddasp vs39, vs3,vs25 - - xxpermdi vs11, vs10, vs10,2 - xxpermdi vs15, vs14, vs14,2 - - xvmaddasp vs40, vs0,vs26 - xvmaddasp vs41, vs1,vs26 - xvmaddasp vs42, vs2,vs26 - xvmaddasp vs43, vs3,vs26 - - xvmaddasp vs44, vs0,vs27 - xvmaddasp vs45, vs1,vs27 - xvmaddasp vs46, vs2,vs27 - xvmaddasp vs47, vs3,vs27 - - xvmaddasp vs48, vs0,vs28 - xvmaddasp vs49, vs1,vs28 - xvmaddasp vs50, vs2,vs28 - xvmaddasp vs51, vs3,vs28 - - xvmaddasp vs52, vs0,vs29 - xvmaddasp vs53, vs1,vs29 - xvmaddasp vs54, vs2,vs29 - xvmaddasp vs55, vs3,vs29 - - xvmaddasp vs56, vs0,vs30 - xvmaddasp vs57, vs1,vs30 - xvmaddasp vs58, vs2,vs30 - xvmaddasp vs59, vs3,vs30 - - xvmaddasp vs60, vs0,vs31 - xvmaddasp vs61, vs1,vs31 - xvmaddasp vs62, vs2,vs31 - xvmaddasp vs63, vs3,vs31 - - lxv vs24, DISP32(\Index,32+\OffsetB)(\BREG) - lxv vs28, DISP32(\Index,32+16+\OffsetB)(\BREG) - - lxv vs0, DISP64(\Index,64+\OffsetA)(\AREG) - lxv vs1, DISP64(\Index,64+16+\OffsetA)(\AREG) - lxv vs2, DISP64(\Index,64+32+\OffsetA)(\AREG) - lxv vs3, DISP64(\Index,64+48+\OffsetA)(\AREG) - - xxperm vs26, vs24, permute_mask - xxperm vs30, vs28, permute_mask - xxpermdi vs25, vs24, vs24,2 - xxpermdi vs29, vs28, vs28,2 - - - xvmaddasp vs32, vs4,vs8 - xvmaddasp vs33, vs5,vs8 - xvmaddasp vs34, vs6,vs8 - xvmaddasp vs35, vs7,vs8 - - xvmaddasp vs36, vs4,vs9 - xvmaddasp vs37, vs5,vs9 - xvmaddasp vs38, vs6,vs9 - xvmaddasp vs39, vs7,vs9 - - xxpermdi vs27, vs26, vs26,2 - xxpermdi vs31, vs30, vs30,2 - - xvmaddasp vs40, vs4,vs10 - xvmaddasp vs41, vs5,vs10 - xvmaddasp vs42, vs6,vs10 - xvmaddasp vs43, vs7,vs10 - - xvmaddasp vs44, vs4,vs11 - xvmaddasp vs45, vs5,vs11 - xvmaddasp vs46, vs6,vs11 - xvmaddasp vs47, vs7,vs11 - - xvmaddasp vs48, vs4,vs12 - xvmaddasp vs49, vs5,vs12 - xvmaddasp vs50, vs6,vs12 - xvmaddasp vs51, vs7,vs12 - - xvmaddasp vs52, vs4,vs13 - xvmaddasp vs53, vs5,vs13 - xvmaddasp vs54, vs6,vs13 - xvmaddasp vs55, vs7,vs13 - - xvmaddasp vs56, vs4,vs14 - xvmaddasp vs57, vs5,vs14 - xvmaddasp vs58, vs6,vs14 - xvmaddasp vs59, vs7,vs14 - - xvmaddasp vs60, vs4,vs15 - xvmaddasp vs61, vs5,vs15 - xvmaddasp vs62, vs6,vs15 - xvmaddasp vs63, vs7,vs15 - - lxv vs8, DISP32(\Index,64+\OffsetB)(\BREG) - lxv vs12, DISP32(\Index,64+16+\OffsetB)(\BREG) - - lxv vs4, DISP64(\Index,128+0+\OffsetA)(\AREG) - lxv vs5, DISP64(\Index,128+16+\OffsetA)(\AREG) - lxv vs6, DISP64(\Index,128+32+\OffsetA)(\AREG) - lxv vs7, DISP64(\Index,128+48+\OffsetA)(\AREG) - - xxperm vs10, vs8, permute_mask - xxperm vs14, vs12, permute_mask - xxpermdi vs9, vs8, vs8,2 - xxpermdi vs13, vs12, vs12,2 - - xvmaddasp vs32, vs0,vs24 - xvmaddasp vs33, vs1,vs24 - xvmaddasp vs34, vs2,vs24 - xvmaddasp vs35, vs3,vs24 - - xvmaddasp vs36, vs0,vs25 - xvmaddasp vs37, vs1,vs25 - xvmaddasp vs38, vs2,vs25 - xvmaddasp vs39, vs3,vs25 - - xxpermdi vs11, vs10, vs10,2 - xxpermdi vs15, vs14, vs14,2 - - xvmaddasp vs40, vs0,vs26 - xvmaddasp vs41, vs1,vs26 - xvmaddasp vs42, vs2,vs26 - xvmaddasp vs43, vs3,vs26 - - xvmaddasp vs44, vs0,vs27 - xvmaddasp vs45, vs1,vs27 - xvmaddasp vs46, vs2,vs27 - xvmaddasp vs47, vs3,vs27 - - xvmaddasp vs48, vs0,vs28 - xvmaddasp vs49, vs1,vs28 - xvmaddasp vs50, vs2,vs28 - xvmaddasp vs51, vs3,vs28 - - xvmaddasp vs52, vs0,vs29 - xvmaddasp vs53, vs1,vs29 - xvmaddasp vs54, vs2,vs29 - xvmaddasp vs55, vs3,vs29 - - xvmaddasp vs56, vs0,vs30 - xvmaddasp vs57, vs1,vs30 - xvmaddasp vs58, vs2,vs30 - xvmaddasp vs59, vs3,vs30 - - xvmaddasp vs60, vs0,vs31 - xvmaddasp vs61, vs1,vs31 - xvmaddasp vs62, vs2,vs31 - xvmaddasp vs63, vs3,vs31 - -.if \Complete==0 - lxv vs24, DISP32(\Index,96+\OffsetB)(\BREG) - lxv vs28, DISP32(\Index,96+16+\OffsetB)(\BREG) - - lxv vs0, DISP64(\Index,192+\OffsetA)(\AREG) - lxv vs1, DISP64(\Index,192+16+\OffsetA)(\AREG) - lxv vs2, DISP64(\Index,192+32+\OffsetA)(\AREG) - lxv vs3, DISP64(\Index,192+48+\OffsetA)(\AREG) - - xxperm vs26, vs24, permute_mask - xxperm vs30, vs28, permute_mask - xxpermdi vs25, vs24, vs24,2 - xxpermdi vs29, vs28, vs28,2 - -.endif -.if \IsLast==1 -.if \Complete==1 - - addi \BREG, \BREG, DISP32(\Index,32*3+\OffsetB) - addi \AREG, \AREG, DISP64(\Index,64*3+\OffsetA) -.else - - addi \BREG, \BREG, DISP32(\Index,128) - addi \AREG, \AREG, DISP64(\Index,256) -.endif -.endif - - xvmaddasp vs32, vs4,vs8 - xvmaddasp vs33, vs5,vs8 - xvmaddasp vs34, vs6,vs8 - xvmaddasp vs35, vs7,vs8 - - xvmaddasp vs36, vs4,vs9 - xvmaddasp vs37, vs5,vs9 - xvmaddasp vs38, vs6,vs9 - xvmaddasp vs39, vs7,vs9 - -.if \Complete==0 - xxpermdi vs27, vs26, vs26,2 - xxpermdi vs31, vs30, vs30,2 - -.endif - - xvmaddasp vs40, vs4,vs10 - xvmaddasp vs41, vs5,vs10 - xvmaddasp vs42, vs6,vs10 - xvmaddasp vs43, vs7,vs10 - - xvmaddasp vs44, vs4,vs11 - xvmaddasp vs45, vs5,vs11 - xvmaddasp vs46, vs6,vs11 - xvmaddasp vs47, vs7,vs11 - - xvmaddasp vs48, vs4,vs12 - xvmaddasp vs49, vs5,vs12 - xvmaddasp vs50, vs6,vs12 - xvmaddasp vs51, vs7,vs12 - - xvmaddasp vs52, vs4,vs13 - xvmaddasp vs53, vs5,vs13 - xvmaddasp vs54, vs6,vs13 - xvmaddasp vs55, vs7,vs13 - - xvmaddasp vs56, vs4,vs14 - xvmaddasp vs57, vs5,vs14 - xvmaddasp vs58, vs6,vs14 - xvmaddasp vs59, vs7,vs14 - - xvmaddasp vs60, vs4,vs15 - xvmaddasp vs61, vs5,vs15 - xvmaddasp vs62, vs6,vs15 - xvmaddasp vs63, vs7,vs15 +KERNEL8x16_2 \AREG,\BREG, \OffsetA,\OffsetB, (\Index*2),0 ,0 +KERNEL8x16_2 \AREG,\BREG,\OffsetA,\OffsetB, (\Index*2+1),\IsLast ,\Complete .endm .macro KERNEL8x16 First - LOAD8x16 0 + LOAD8x16 0,0 END8x16 \First, AO, BO, 64,32 .endm -.macro KERNEL8x16_L1_L2_I AREG,BREG,First,OffsetA,OffsetB, Index,IsLast ,Complete - - lxv vs8, DISP16(\Index, 0+\OffsetB)(\BREG) - lxv vs12, DISP16(\Index,16+\OffsetB)(\BREG) +.macro LOAD8x16_2 + LOAD8x16_2O AO,BO, 0,0 +.endm - lxv vs4, DISP32(\Index, 0+\OffsetA)(\AREG) - lxv vs5, DISP32(\Index,16+\OffsetA)(\AREG) - lxv vs6, DISP32(\Index,32+\OffsetA)(\AREG) - lxv vs7, DISP32(\Index,48+\OffsetA)(\AREG) +.macro LOAD8x16_2O AREG,BREG, OffsetA,OffsetB + lxv vs8, (\OffsetB)(\BREG) + lxv vs12, (16+\OffsetB)(\BREG) + lxv vs24, (32+\OffsetB)(\BREG) + lxv vs28, (32+16+\OffsetB)(\BREG) + lxv vs4, (0+\OffsetA)(\AREG) + lxv vs5, (16+\OffsetA)(\AREG) + xxperm vs10, vs8, permute_mask + xxperm vs14, vs12, permute_mask + lxv vs6, (32+\OffsetA)(\AREG) + lxv vs7, (48+\OffsetA)(\AREG) + xxpermdi vs9, vs8, vs8,2 + xxpermdi vs13, vs12, vs12,2 + lxv vs0, (64+\OffsetA)(\AREG) + lxv vs1, (64+16+\OffsetA)(\AREG) + xxpermdi vs11, vs10, vs10,2 + xxpermdi vs15, vs14, vs14,2 + lxv vs2, (64+32+\OffsetA)(\AREG) + lxv vs3, (64+48+\OffsetA)(\AREG) - xxperm vs10, vs8, permute_mask - xxperm vs14, vs12, permute_mask - xxpermdi vs9, vs8, vs8,2 - xxpermdi vs13, vs12, vs12,2 -.if \First==1 - xvmulsp vs32, vs0,vs24 - xvmulsp vs33, vs1,vs24 - xvmulsp vs34, vs2,vs24 - xvmulsp vs35, vs3,vs24 + xxperm vs26, vs24, permute_mask + xxperm vs30, vs28, permute_mask + xxpermdi vs25, vs24, vs24,2 + xxpermdi vs29, vs28, vs28,2 + xxpermdi vs27, vs26, vs26,2 + xxpermdi vs31, vs30, vs30,2 +.endm - xvmulsp vs36, vs0,vs25 - xvmulsp vs37, vs1,vs25 - xvmulsp vs38, vs2,vs25 - xvmulsp vs39, vs3,vs25 -.else - xvmaddasp vs32, vs0,vs24 - xvmaddasp vs33, vs1,vs24 - xvmaddasp vs34, vs2,vs24 - xvmaddasp vs35, vs3,vs24 +.macro END8x16_2 + /*for load2 offset will be 128 and 64*/ + KERNEL8x16_2 AO,BO, 128,64,0 ,1,1 +.endm + - xvmaddasp vs36, vs0,vs25 - xvmaddasp vs37, vs1,vs25 - xvmaddasp vs38, vs2,vs25 - xvmaddasp vs39, vs3,vs25 -.endif - xxpermdi vs11, vs10, vs10,2 - xxpermdi vs15, vs14, vs14,2 - -.if \First==1 - xvmulsp vs40, vs0,vs26 - xvmulsp vs41, vs1,vs26 - xvmulsp vs42, vs2,vs26 - xvmulsp vs43, vs3,vs26 +.macro KERNEL8x16_E2 OffsetA,OffsetB, Index,IsLast + KERNEL8x16_2 AO,BO, \OffsetA,\OffsetB, \Index,\IsLast ,1 +.endm - xvmulsp vs44, vs0,vs27 - xvmulsp vs45, vs1,vs27 - xvmulsp vs46, vs2,vs27 - xvmulsp vs47, vs3,vs27 - xvmulsp vs48, vs0,vs28 - xvmulsp vs49, vs1,vs28 - xvmulsp vs50, vs2,vs28 - xvmulsp vs51, vs3,vs28 +.macro KERNEL8x16_L2 OffsetA,OffsetB, Index,IsLast + KERNEL8x16_2 AO,BO, \OffsetA,\OffsetB, \Index,\IsLast ,0 +.endm - xvmulsp vs52, vs0,vs29 - xvmulsp vs53, vs1,vs29 - xvmulsp vs54, vs2,vs29 - xvmulsp vs55, vs3,vs29 - xvmulsp vs56, vs0,vs30 - xvmulsp vs57, vs1,vs30 - xvmulsp vs58, vs2,vs30 - xvmulsp vs59, vs3,vs30 +.macro KERNEL8x16_2 AREG,BREG, OffsetA,OffsetB, Index,IsLast ,Complete + xvmaddasp vs32, vs4,vs8 + xvmaddasp vs33, vs5,vs8 + xvmaddasp vs48, vs4,vs12 + xvmaddasp vs49, vs5,vs12 - xvmulsp vs60, vs0,vs31 - xvmulsp vs61, vs1,vs31 - xvmulsp vs62, vs2,vs31 - xvmulsp vs63, vs3,vs31 + xvmaddasp vs40, vs4,vs10 + xvmaddasp vs41, vs5,vs10 + xvmaddasp vs56, vs4,vs14 + xvmaddasp vs57, vs5,vs14 -.else - xvmaddasp vs40, vs0,vs26 - xvmaddasp vs41, vs1,vs26 - xvmaddasp vs42, vs2,vs26 - xvmaddasp vs43, vs3,vs26 + xvmaddasp vs36, vs4,vs9 + xvmaddasp vs37, vs5,vs9 + xvmaddasp vs52, vs4,vs13 + xvmaddasp vs53, vs5,vs13 - xvmaddasp vs44, vs0,vs27 - xvmaddasp vs45, vs1,vs27 - xvmaddasp vs46, vs2,vs27 - xvmaddasp vs47, vs3,vs27 + xvmaddasp vs44, vs4,vs11 + xvmaddasp vs45, vs5,vs11 + xvmaddasp vs60, vs4,vs15 + xvmaddasp vs61, vs5,vs15 - xvmaddasp vs48, vs0,vs28 - xvmaddasp vs49, vs1,vs28 - xvmaddasp vs50, vs2,vs28 - xvmaddasp vs51, vs3,vs28 +.if \Complete==0 + lxv vs4, DISP32(\Index,0+\OffsetA)(\AREG) + lxv vs5, DISP32(\Index,16+\OffsetA)(\AREG) +.endif - xvmaddasp vs52, vs0,vs29 - xvmaddasp vs53, vs1,vs29 - xvmaddasp vs54, vs2,vs29 - xvmaddasp vs55, vs3,vs29 + xvmaddasp vs34, vs6,vs8 + xvmaddasp vs35, vs7,vs8 + xvmaddasp vs50, vs6,vs12 + xvmaddasp vs51, vs7,vs12 +.if \Complete==0 + lxv vs8, DISP16(\Index,\OffsetB)(\BREG) + lxv vs12, DISP16(\Index,16+\OffsetB)(\BREG) +.endif + xvmaddasp vs42, vs6,vs10 + xvmaddasp vs43, vs7,vs10 + xvmaddasp vs58, vs6,vs14 + xvmaddasp vs59, vs7,vs14 +.if \Complete==0 + xxperm vs10, vs8, permute_mask + xxperm vs14, vs12, permute_mask +.endif + xvmaddasp vs38, vs6,vs9 + xvmaddasp vs39, vs7,vs9 + xvmaddasp vs54, vs6,vs13 + xvmaddasp vs55, vs7,vs13 +.if \Complete==0 + xxpermdi vs9, vs8, vs8,2 + xxpermdi vs13, vs12, vs12,2 +.endif + xvmaddasp vs46, vs6,vs11 + xvmaddasp vs47, vs7,vs11 + xvmaddasp vs62, vs6,vs15 + xvmaddasp vs63, vs7,vs15 +.if \Complete==0 + xxpermdi vs11, vs10, vs10,2 + xxpermdi vs15, vs14, vs14,2 +.endif - xvmaddasp vs56, vs0,vs30 - xvmaddasp vs57, vs1,vs30 - xvmaddasp vs58, vs2,vs30 - xvmaddasp vs59, vs3,vs30 +.if \Complete==0 + lxv vs6, DISP32(\Index,32+\OffsetA)(\AREG) + lxv vs7, DISP32(\Index,48+\OffsetA)(\AREG) +.endif - xvmaddasp vs60, vs0,vs31 - xvmaddasp vs61, vs1,vs31 - xvmaddasp vs62, vs2,vs31 - xvmaddasp vs63, vs3,vs31 + xvmaddasp vs32, vs0,vs24 + xvmaddasp vs33, vs1,vs24 + xvmaddasp vs48, vs0,vs28 + xvmaddasp vs49, vs1,vs28 + xvmaddasp vs40, vs0,vs26 + xvmaddasp vs41, vs1,vs26 + xvmaddasp vs56, vs0,vs30 + xvmaddasp vs57, vs1,vs30 + xvmaddasp vs36, vs0,vs25 + xvmaddasp vs37, vs1,vs25 + xvmaddasp vs52, vs0,vs29 + xvmaddasp vs53, vs1,vs29 + xvmaddasp vs44, vs0,vs27 + xvmaddasp vs45, vs1,vs27 + xvmaddasp vs60, vs0,vs31 + xvmaddasp vs61, vs1,vs31 +.if \Complete==0 + lxv vs0, DISP32(\Index,64+\OffsetA)(\AREG) + lxv vs1, DISP32(\Index,64+16+\OffsetA)(\AREG) +.endif + xvmaddasp vs34, vs2,vs24 + xvmaddasp vs35, vs3,vs24 + xvmaddasp vs50, vs2,vs28 + xvmaddasp vs51, vs3,vs28 +.if \Complete==0 + lxv vs24, DISP16(\Index,32+\OffsetB)(\BREG) + lxv vs28, DISP16(\Index,32+16+\OffsetB)(\BREG) +.endif + xvmaddasp vs42, vs2,vs26 + xvmaddasp vs43, vs3,vs26 + xvmaddasp vs58, vs2,vs30 + xvmaddasp vs59, vs3,vs30 +.if \Complete==0 + xxperm vs26, vs24, permute_mask + xxperm vs30, vs28, permute_mask +.endif + xvmaddasp vs38, vs2,vs25 + xvmaddasp vs39, vs3,vs25 + xvmaddasp vs54, vs2,vs29 + xvmaddasp vs55, vs3,vs29 +.if \Complete==0 + xxpermdi vs25, vs24, vs24,2 + xxpermdi vs29, vs28, vs28,2 +.endif + xvmaddasp vs46, vs2,vs27 + xvmaddasp vs47, vs3,vs27 + xvmaddasp vs62, vs2,vs31 + xvmaddasp vs63, vs3,vs31 +.if \Complete==0 + xxpermdi vs27, vs26, vs26,2 + xxpermdi vs31, vs30, vs30,2 .endif .if \Complete==0 - lxv vs24, DISP16(\Index,32+\OffsetB)(\BREG) - lxv vs28, DISP16(\Index,32+16+\OffsetB)(\BREG) + lxv vs2, DISP32(\Index,64+32+\OffsetA)(\AREG) + lxv vs3, DISP32(\Index,64+48+\OffsetA)(\AREG) +.endif - lxv vs0, DISP32(\Index,64+\OffsetA)(\AREG) - lxv vs1, DISP32(\Index,64+16+\OffsetA)(\AREG) - lxv vs2, DISP32(\Index,64+32+\OffsetA)(\AREG) - lxv vs3, DISP32(\Index,64+48+\OffsetA)(\AREG) - xxperm vs26, vs24, permute_mask - xxperm vs30, vs28, permute_mask - xxpermdi vs25, vs24, vs24,2 - xxpermdi vs29, vs28, vs28,2 -.endif .if \IsLast==1 .if \Complete==1 - addi \BREG, \BREG, DISP16(\Index,32+\OffsetB) - addi \AREG, \AREG, DISP32(\Index,64+\OffsetA) - -.else - addi \BREG, \BREG, DISP16(\Index,64) - addi \AREG, \AREG, DISP32(\Index,128) -.endif -.endif - -.if \First==1 - xvmulsp vs32, vs4,vs8 - xvmulsp vs33, vs5,vs8 - xvmulsp vs34, vs6,vs8 - xvmulsp vs35, vs7,vs8 + addi \BREG, \BREG, DISP16(\Index,\OffsetB) + addi \AREG, \AREG, DISP32(\Index,\OffsetA) - xvmulsp vs36, vs4,vs9 - xvmulsp vs37, vs5,vs9 - xvmulsp vs38, vs6,vs9 - xvmulsp vs39, vs7,vs9 .else - xvmaddasp vs32, vs4,vs8 - xvmaddasp vs33, vs5,vs8 - xvmaddasp vs34, vs6,vs8 - xvmaddasp vs35, vs7,vs8 + addi \BREG, \BREG, DISP16(\Index,64) + addi \AREG, \AREG, DISP32(\Index,128) - xvmaddasp vs36, vs4,vs9 - xvmaddasp vs37, vs5,vs9 - xvmaddasp vs38, vs6,vs9 - xvmaddasp vs39, vs7,vs9 -.endif - -.if \Complete==0 - xxpermdi vs27, vs26, vs26,2 - xxpermdi vs31, vs30, vs30,2 - .endif -.if \First==1 - xvmulsp vs40, vs4,vs10 - xvmulsp vs41, vs5,vs10 - xvmulsp vs42, vs6,vs10 - xvmulsp vs43, vs7,vs10 - - xvmulsp vs44, vs4,vs11 - xvmulsp vs45, vs5,vs11 - xvmulsp vs46, vs6,vs11 - xvmulsp vs47, vs7,vs11 - - xvmulsp vs48, vs4,vs12 - xvmulsp vs49, vs5,vs12 - xvmulsp vs50, vs6,vs12 - xvmulsp vs51, vs7,vs12 - - xvmulsp vs52, vs4,vs13 - xvmulsp vs53, vs5,vs13 - xvmulsp vs54, vs6,vs13 - xvmulsp vs55, vs7,vs13 - - xvmulsp vs56, vs4,vs14 - xvmulsp vs57, vs5,vs14 - xvmulsp vs58, vs6,vs14 - xvmulsp vs59, vs7,vs14 - - xvmulsp vs60, vs4,vs15 - xvmulsp vs61, vs5,vs15 - xvmulsp vs62, vs6,vs15 - xvmulsp vs63, vs7,vs15 - -.else - xvmaddasp vs40, vs4,vs10 - xvmaddasp vs41, vs5,vs10 - xvmaddasp vs42, vs6,vs10 - xvmaddasp vs43, vs7,vs10 - - xvmaddasp vs44, vs4,vs11 - xvmaddasp vs45, vs5,vs11 - xvmaddasp vs46, vs6,vs11 - xvmaddasp vs47, vs7,vs11 - - xvmaddasp vs48, vs4,vs12 - xvmaddasp vs49, vs5,vs12 - xvmaddasp vs50, vs6,vs12 - xvmaddasp vs51, vs7,vs12 - - xvmaddasp vs52, vs4,vs13 - xvmaddasp vs53, vs5,vs13 - xvmaddasp vs54, vs6,vs13 - xvmaddasp vs55, vs7,vs13 - - xvmaddasp vs56, vs4,vs14 - xvmaddasp vs57, vs5,vs14 - xvmaddasp vs58, vs6,vs14 - xvmaddasp vs59, vs7,vs14 +.endif - xvmaddasp vs60, vs4,vs15 - xvmaddasp vs61, vs5,vs15 - xvmaddasp vs62, vs6,vs15 - xvmaddasp vs63, vs7,vs15 - -.endif .endm @@ -763,7 +451,10 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxmrghw vs2, vs37, vs41 xxmrghw vs3, vs33, vs45 - +#ifndef TRMMKERNEL + lxv vs32, 0(CO) + lxv vs33, 16(CO) +#endif xxmrglw vs16, vs34, vs46 xxmrglw vs18, vs38, vs42 @@ -784,176 +475,203 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxmrghw vs30, vs39, vs43 xxmrghw vs31, vs35, vs47 - - xxperm vs8, vs0, save_permute_1 - xxperm vs10, vs1, save_permute_1 - xxperm vs9, vs0, save_permute_2 - xxperm vs11, vs1, save_permute_2 - -#ifndef TRMMKERNEL - lxv vs32, 0(CO) - lxv vs33, 16(CO) +#ifndef TRMMKERNEL lxv vs34, 32(CO) lxv vs35, 48(CO) #endif - xxlor vs25, vs24, vs24 - xxlor vs27, vs26, vs26 - + xxperm vs8, vs0, save_permute_1 + xxperm vs10, vs1, save_permute_1 #ifndef TRMMKERNEL lxv vs36, 0(T1) lxv vs37, 16(T1) +#endif + xxperm vs9, vs0, save_permute_2 + xxperm vs11, vs1, save_permute_2 + +#ifndef TRMMKERNEL lxv vs38, 32(T1) lxv vs39, 48(T1) #endif + + xxlor vs25, vs24, vs24 + xxlor vs27, vs26, vs26 + + + #ifndef TRMMKERNEL lxv vs40, 0(T2) lxv vs41, 16(T2) +#endif + + xxperm vs12, vs2, save_permute_1 + xxperm vs14, vs3, save_permute_1 +#ifndef TRMMKERNEL lxv vs42, 32(T2) lxv vs43, 48(T2) #endif + + xxperm vs13, vs2, save_permute_2 + xxperm vs15, vs3, save_permute_2 #ifndef TRMMKERNEL lxv vs44, 0(T3) - lxv vs45, 16(T3) + lxv vs45, 16(T3) +#endif + xxperm vs16, vs4, save_permute_1 + xxperm vs18, vs5, save_permute_1 +#ifndef TRMMKERNEL lxv vs46, 32(T3) lxv vs47, 48(T3) #endif - xxperm vs12, vs2, save_permute_1 - xxperm vs14, vs3, save_permute_1 - - xxperm vs13, vs2, save_permute_2 - xxperm vs15, vs3, save_permute_2 + + - xxperm vs16, vs4, save_permute_1 - xxperm vs18, vs5, save_permute_1 xxperm vs17, vs4, save_permute_2 xxperm vs19, vs5, save_permute_2 - +#ifdef TRMMKERNEL + xvmulsp vs32, vs8, alpha_r + xvmulsp vs33, vs12, alpha_r +#else + xvmaddasp vs32, vs8, alpha_r + xvmaddasp vs33, vs12, alpha_r +#endif xxperm vs24, vs30, save_permute_1 xxperm vs26, vs31, save_permute_1 + + + stxv vs32, 0(CO) + stxv vs33, 16(CO) +#ifdef TRMMKERNEL + xvmulsp vs34, vs16, alpha_r + xvmulsp vs35, vs24, alpha_r +#else + xvmaddasp vs34, vs16, alpha_r + xvmaddasp vs35, vs24, alpha_r +#endif xxperm vs25, vs30, save_permute_2 xxperm vs27, vs31, save_permute_2 - /* multiply add normal way */ - -#ifdef TRMMKERNEL - xvmulsp vs32, vs8, alpha_r - xvmulsp vs33, vs12, alpha_r - xvmulsp vs34, vs16, alpha_r - xvmulsp vs35, vs24, alpha_r + stxv vs34, 32(CO) + stxv vs35, 48(CO) +#ifdef TRMMKERNEL xvmulsp vs36, vs9, alpha_r - xvmulsp vs37, vs13, alpha_r + xvmulsp vs37, vs13, alpha_r +#else + xvmaddasp vs36, vs9, alpha_r + xvmaddasp vs37, vs13, alpha_r +#endif + stxv vs36, 0(T1) + stxv vs37, 16(T1) +#ifdef TRMMKERNEL xvmulsp vs38, vs17, alpha_r xvmulsp vs39, vs25, alpha_r -#else - xvmaddasp vs32, vs8, alpha_r - xvmaddasp vs33, vs12, alpha_r - xvmaddasp vs34, vs16, alpha_r - xvmaddasp vs35, vs24, alpha_r - xvmaddasp vs36, vs9, alpha_r - xvmaddasp vs37, vs13, alpha_r +#else xvmaddasp vs38, vs17, alpha_r xvmaddasp vs39, vs25, alpha_r #endif - - + stxv vs38, 32(T1) + stxv vs39, 48(T1) #ifdef TRMMKERNEL xvmulsp vs40, vs10, alpha_r - xvmulsp vs41, vs14, alpha_r - xvmulsp vs42, vs18, alpha_r - xvmulsp vs43, vs26, alpha_r - xvmulsp vs44, vs11, alpha_r - xvmulsp vs45, vs15, alpha_r - xvmulsp vs46, vs19, alpha_r - xvmulsp vs47, vs27, alpha_r -#else - + xvmulsp vs41, vs14, alpha_r +#else xvmaddasp vs40, vs10, alpha_r xvmaddasp vs41, vs14, alpha_r - xvmaddasp vs42, vs18, alpha_r - xvmaddasp vs43, vs26, alpha_r - xvmaddasp vs44, vs11, alpha_r - xvmaddasp vs45, vs15, alpha_r - xvmaddasp vs46, vs19, alpha_r - xvmaddasp vs47, vs27, alpha_r - -#endif - - stxv vs32, 0(CO) - stxv vs33, 16(CO) - stxv vs34, 32(CO) - stxv vs35, 48(CO) - - stxv vs36, 0(T1) - stxv vs37, 16(T1) - stxv vs38, 32(T1) - stxv vs39, 48(T1) +#endif stxv vs40, 0(T2) stxv vs41, 16(T2) +#ifdef TRMMKERNEL + xvmulsp vs42, vs18, alpha_r + xvmulsp vs43, vs26, alpha_r +#else + xvmaddasp vs42, vs18, alpha_r + xvmaddasp vs43, vs26, alpha_r +#endif stxv vs42, 32(T2) stxv vs43, 48(T2) +#ifdef TRMMKERNEL + xvmulsp vs44, vs11, alpha_r + xvmulsp vs45, vs15, alpha_r +#else + xvmaddasp vs44, vs11, alpha_r + xvmaddasp vs45, vs15, alpha_r +#endif stxv vs44, 0(T3) stxv vs45, 16(T3) +#ifdef TRMMKERNEL + xvmulsp vs46, vs19, alpha_r + xvmulsp vs47, vs27, alpha_r +#else + xvmaddasp vs46, vs19, alpha_r + xvmaddasp vs47, vs27, alpha_r +#endif stxv vs46, 32(T3) stxv vs47, 48(T3) /*****the same with the second 8X8 ****/ -#ifndef TRMMKERNEL - + #ifndef TRMMKERNEL lxv vs32, 0(T4) lxv vs33, 16(T4) - lxv vs34, 32(T4) - lxv vs35, 48(T4) - lxv vs36, 0(T5) - lxv vs37, 16(T5) - lxv vs38,32(T5) - lxv vs39, 48(T5) #endif - xxmrglw vs8, vs48, vs60 xxmrglw vs10, vs52, vs56 - +#ifndef TRMMKERNEL + lxv vs34, 32(T4) + lxv vs35, 48(T4) +#endif xxmrghw vs1, vs48, vs60 xxmrghw vs0, vs52, vs56 +#ifndef TRMMKERNEL + lxv vs36, 0(T5) + lxv vs37, 16(T5) +#endif xxmrglw vs12, vs49, vs61 xxmrglw vs14, vs53, vs57 - +#ifndef TRMMKERNEL + lxv vs38,32(T5) + lxv vs39, 48(T5) +#endif + + xxmrghw vs2, vs53, vs57 + xxmrghw vs3, vs49, vs61 #ifndef TRMMKERNEL lxv vs40, 0(T6) - lxv vs41, 16(T6) - lxv vs42, 32(T6) - lxv vs43, 48(T6) - lxv vs44, 0(T7) - lxv vs45, 16(T7) - lxv vs46, 32(T7) - lxv vs47, 48(T7) + lxv vs41, 16(T6) #endif - xxmrghw vs2, vs53, vs57 - xxmrghw vs3, vs49, vs61 - xxmrglw vs16, vs50, vs62 xxmrglw vs18, vs54, vs58 - +#ifndef TRMMKERNEL + lxv vs42, 32(T6) + lxv vs43, 48(T6) +#endif xxlor vs9, vs8, vs8 xxlor vs11, vs10, vs10 xxmrghw vs4, vs54, vs58 xxmrghw vs5, vs50, vs62 - +#ifndef TRMMKERNEL + lxv vs44, 0(T7) + lxv vs45, 16(T7) +#endif xxlor vs13, vs12, vs12 xxlor vs15, vs14, vs14 xxmrglw vs24, vs51, vs63 - xxmrglw vs26, vs55, vs59 - + xxmrglw vs26, vs55, vs59 +#ifndef TRMMKERNEL + lxv vs46, 32(T7) + lxv vs47, 48(T7) +#endif xxlor vs17, vs16, vs16 xxlor vs19, vs18, vs18 xxmrghw vs30, vs55, vs59 - xxmrghw vs31, vs51, vs63 + xxmrghw vs31, vs51, vs63 + + xxperm vs8, vs0, save_permute_1 xxperm vs10, vs1, save_permute_1 @@ -965,11 +683,20 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlor vs27, vs26, vs26 xxperm vs12, vs2, save_permute_1 xxperm vs14, vs3, save_permute_1 + xxperm vs13, vs2, save_permute_2 xxperm vs15, vs3, save_permute_2 - + #ifdef TRMMKERNEL + xvmulsp vs32, vs8, alpha_r + xvmulsp vs33, vs12, alpha_r +#else + xvmaddasp vs32, vs8, alpha_r + xvmaddasp vs33, vs12, alpha_r +#endif xxperm vs16, vs4, save_permute_1 xxperm vs18, vs5, save_permute_1 + stxv vs32, 0(T4) + stxv vs33, 16(T4) xxperm vs17, vs4, save_permute_2 xxperm vs19, vs5, save_permute_2 xxperm vs24, vs30, save_permute_1 @@ -977,64 +704,77 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxperm vs25, vs30, save_permute_2 xxperm vs27, vs31, save_permute_2 -#ifdef TRMMKERNEL - xvmulsp vs32, vs8, alpha_r - xvmulsp vs33, vs12, alpha_r +#ifdef TRMMKERNEL xvmulsp vs34, vs16, alpha_r - xvmulsp vs35, vs24, alpha_r + xvmulsp vs35, vs24, alpha_r +#else + xvmaddasp vs34, vs16, alpha_r + xvmaddasp vs35, vs24, alpha_r +#endif + stxv vs34, 32(T4) + stxv vs35, 48(T4) + +#ifdef TRMMKERNEL xvmulsp vs36, vs9, alpha_r - xvmulsp vs37, vs13, alpha_r + xvmulsp vs37, vs13, alpha_r +#else + xvmaddasp vs36, vs9, alpha_r + xvmaddasp vs37, vs13, alpha_r +#endif + stxv vs36, 0(T5) + stxv vs37, 16(T5) + +#ifdef TRMMKERNEL xvmulsp vs38, vs17, alpha_r xvmulsp vs39, vs25, alpha_r -#else - xvmaddasp vs32, vs8, alpha_r - xvmaddasp vs33, vs12, alpha_r - xvmaddasp vs34, vs16, alpha_r - xvmaddasp vs35, vs24, alpha_r - xvmaddasp vs36, vs9, alpha_r - xvmaddasp vs37, vs13, alpha_r +#else xvmaddasp vs38, vs17, alpha_r xvmaddasp vs39, vs25, alpha_r #endif - stxv vs32, 0(T4) - stxv vs33, 16(T4) - stxv vs34, 32(T4) - stxv vs35, 48(T4) - stxv vs36, 0(T5) - stxv vs37, 16(T5) + + stxv vs38, 32(T5) stxv vs39, 48(T5) + #ifdef TRMMKERNEL xvmulsp vs40, vs10, alpha_r - xvmulsp vs41, vs14, alpha_r - xvmulsp vs42, vs18, alpha_r - xvmulsp vs43, vs26, alpha_r - xvmulsp vs44, vs11, alpha_r - xvmulsp vs45, vs15, alpha_r - xvmulsp vs46, vs19, alpha_r - xvmulsp vs47, vs27, alpha_r -#else - + xvmulsp vs41, vs14, alpha_r +#else xvmaddasp vs40, vs10, alpha_r xvmaddasp vs41, vs14, alpha_r - xvmaddasp vs42, vs18, alpha_r - xvmaddasp vs43, vs26, alpha_r - xvmaddasp vs44, vs11, alpha_r - xvmaddasp vs45, vs15, alpha_r - xvmaddasp vs46, vs19, alpha_r - xvmaddasp vs47, vs27, alpha_r - #endif - stxv vs40, 0(T6) - stxv vs41, 16(T6) + stxv vs41, 16(T6) +#ifdef TRMMKERNEL + xvmulsp vs42, vs18, alpha_r + xvmulsp vs43, vs26, alpha_r +#else + xvmaddasp vs42, vs18, alpha_r + xvmaddasp vs43, vs26, alpha_r +#endif stxv vs42, 32(T6) stxv vs43, 48(T6) +#ifdef TRMMKERNEL + xvmulsp vs44, vs11, alpha_r + xvmulsp vs45, vs15, alpha_r +#else + xvmaddasp vs44, vs11, alpha_r + xvmaddasp vs45, vs15, alpha_r +#endif + stxv vs44, 0(T7) stxv vs45, 16(T7) +#ifdef TRMMKERNEL + xvmulsp vs46, vs19, alpha_r + xvmulsp vs47, vs27, alpha_r +#else + xvmaddasp vs46, vs19, alpha_r + xvmaddasp vs47, vs27, alpha_r +#endif + stxv vs46, 32(T7) stxv vs47, 48(T7) @@ -1224,12 +964,14 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxperm vs10, vs8, permute_mask xxperm vs14, vs12, permute_mask - xxpermdi vs9, vs8, vs8,2 - xxpermdi vs13, vs12, vs12,2 xvmaddasp vs32, vs0,vs24 xvmaddasp vs33, vs1,vs24 + xxpermdi vs9, vs8, vs8,2 + xxpermdi vs13, vs12, vs12,2 + + xvmaddasp vs36, vs0,vs25 xvmaddasp vs37, vs1,vs25 @@ -1247,21 +989,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs52, vs0,vs29 xvmaddasp vs53, vs1,vs29 - + lxv vs24, DISP32(\Index,32+\OffsetB)(\BREG) + lxv vs28, DISP32(\Index,32+16+\OffsetB)(\BREG) xvmaddasp vs56, vs0,vs30 xvmaddasp vs57, vs1,vs30 xvmaddasp vs60, vs0,vs31 xvmaddasp vs61, vs1,vs31 - lxv vs24, DISP32(\Index,32+\OffsetB)(\BREG) - lxv vs28, DISP32(\Index,32+16+\OffsetB)(\BREG) + xxperm vs26, vs24, permute_mask + xxperm vs30, vs28, permute_mask lxv vs0, DISP32(\Index,32+\OffsetA)(\AREG) lxv vs1, DISP32(\Index,32+16+\OffsetA)(\AREG) - xxperm vs26, vs24, permute_mask - xxperm vs30, vs28, permute_mask + xxpermdi vs25, vs24, vs24,2 xxpermdi vs29, vs28, vs28,2 @@ -1285,21 +1027,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs52, vs4,vs13 xvmaddasp vs53, vs5,vs13 - + lxv vs8, DISP32(\Index,64+\OffsetB)(\BREG) + lxv vs12, DISP32(\Index,64+16+\OffsetB)(\BREG) xvmaddasp vs56, vs4,vs14 xvmaddasp vs57, vs5,vs14 xvmaddasp vs60, vs4,vs15 xvmaddasp vs61, vs5,vs15 - lxv vs8, DISP32(\Index,64+\OffsetB)(\BREG) - lxv vs12, DISP32(\Index,64+16+\OffsetB)(\BREG) + xxperm vs10, vs8, permute_mask + xxperm vs14, vs12, permute_mask + lxv vs4, DISP32(\Index,64+0+\OffsetA)(\AREG) lxv vs5, DISP32(\Index,64+16+\OffsetA)(\AREG) - xxperm vs10, vs8, permute_mask - xxperm vs14, vs12, permute_mask + xxpermdi vs9, vs8, vs8,2 xxpermdi vs13, vs12, vs12,2 @@ -1323,22 +1066,26 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs52, vs0,vs29 xvmaddasp vs53, vs1,vs29 - +.if \Complete==0 + lxv vs24, DISP32(\Index,96+\OffsetB)(\BREG) + lxv vs28, DISP32(\Index,96+16+\OffsetB)(\BREG) +.endif xvmaddasp vs56, vs0,vs30 xvmaddasp vs57, vs1,vs30 - +.if \Complete==0 + xxperm vs26, vs24, permute_mask + xxperm vs30, vs28, permute_mask +.endif xvmaddasp vs60, vs0,vs31 xvmaddasp vs61, vs1,vs31 -.if \Complete==0 - lxv vs24, DISP32(\Index,96+\OffsetB)(\BREG) - lxv vs28, DISP32(\Index,96+16+\OffsetB)(\BREG) +.if \Complete==0 lxv vs0, DISP32(\Index,96+\OffsetA)(\AREG) lxv vs1, DISP32(\Index,96+16+\OffsetA)(\AREG) +.endif - xxperm vs26, vs24, permute_mask - xxperm vs30, vs28, permute_mask +.if \Complete==0 xxpermdi vs25, vs24, vs24,2 xxpermdi vs29, vs28, vs28,2 diff --git a/kernel/power/sgemm_tcopy_macros_16_power8.S b/kernel/power/sgemm_tcopy_macros_16_power8.S index 53f9c8b82..ed592a604 100644 --- a/kernel/power/sgemm_tcopy_macros_16_power8.S +++ b/kernel/power/sgemm_tcopy_macros_16_power8.S @@ -38,7 +38,11 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * Macros for N=4 and M=16 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_4x16', ` +#else .macro COPY_4x16 +#endif lxvw4x vs32, o0, A0 lxvw4x vs33, o16, A0 @@ -88,13 +92,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. stxvw4x vs46, o32, T1 stxvw4x vs47, o48, T1 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=4 and M=8 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_4x8', ` +#else .macro COPY_4x8 +#endif lxvw4x vs32, o0, A0 lxvw4x vs33, o16, A0 @@ -124,13 +136,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. stxvw4x vs38, o32, T1 stxvw4x vs39, o48, T1 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=4 and M=4 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_4x4', ` +#else .macro COPY_4x4 +#endif lxvw4x vs32, o0, A0 @@ -150,13 +170,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. stxvw4x vs35, o48, T1 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=4 and M=2 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_4x2', ` +#else .macro COPY_4x2 +#endif lxsspx vs32, o0, A0 lxsspx vs33, o4, A0 @@ -190,13 +218,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. stxsspx vs38, o0, T1 stxsspx vs39, o4, T1 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=4 and M=1 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_4x1', ` +#else .macro COPY_4x1 +#endif lxsspx vs32, o0, A0 @@ -218,13 +254,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. stxsspx vs35, o4, T1 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=2 and M=16 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_2x16', ` +#else .macro COPY_2x16 +#endif lxvw4x vs32, o0, A0 lxvw4x vs33, o16, A0 @@ -250,13 +294,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. stxvw4x vs38, o32, T1 stxvw4x vs39, o48, T1 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=2 and M=8 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_2x8', ` +#else .macro COPY_2x8 +#endif lxvw4x vs32, o0, A0 lxvw4x vs33, o16, A0 @@ -272,13 +324,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. stxvw4x vs34, o32, T1 stxvw4x vs35, o48, T1 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=2 and M=4 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_2x4', ` +#else .macro COPY_2x4 +#endif lxvw4x vs32, o0, A0 @@ -290,13 +350,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. stxvw4x vs33, o16, T1 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=2 and M=2 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_2x2', ` +#else .macro COPY_2x2 +#endif lxsspx vs32, o0, A0 lxsspx vs33, o4, A0 @@ -314,13 +382,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. stxsspx vs34, o0, T1 stxsspx vs35, o4, T1 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=2 and M=1 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_2x1', ` +#else .macro COPY_2x1 +#endif lxsspx vs32, o0, A0 @@ -332,13 +408,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. stxsspx vs33, o4, T1 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=1 and M=16 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_1x16', ` +#else .macro COPY_1x16 +#endif lxvw4x vs32, o0, A0 lxvw4x vs33, o16, A0 @@ -352,13 +436,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. stxvw4x vs34, o32, T1 stxvw4x vs35, o48, T1 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=1 and M=8 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_1x8', ` +#else .macro COPY_1x8 +#endif lxvw4x vs32, o0, A0 lxvw4x vs33, o16, A0 @@ -368,13 +460,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. stxvw4x vs32, o0, T1 stxvw4x vs33, o16, T1 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=1 and M=4 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_1x4', ` +#else .macro COPY_1x4 +#endif lxvw4x vs32, o0, A0 @@ -382,13 +482,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. stxvw4x vs32, o0, T1 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=1 and M=2 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_1x2', ` +#else .macro COPY_1x2 +#endif lxsspx vs32, o0, A0 lxsspx vs33, o4, A0 @@ -398,13 +506,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. stxsspx vs32, o0, T1 stxsspx vs33, o4, T1 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=1 and M=1 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_1x1', ` +#else .macro COPY_1x1 +#endif lxsspx vs32, o0, A0 @@ -412,5 +528,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. stxsspx vs32, o0, T1 +#if defined(_AIX) +') +#else .endm +#endif diff --git a/kernel/power/sgemm_tcopy_macros_8_power8.S b/kernel/power/sgemm_tcopy_macros_8_power8.S index 1b71d5bb3..f80f095dc 100644 --- a/kernel/power/sgemm_tcopy_macros_8_power8.S +++ b/kernel/power/sgemm_tcopy_macros_8_power8.S @@ -38,7 +38,11 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * Macros for N=4 and M=8 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_4x8', ` +#else .macro COPY_4x8 +#endif lxvw4x vs32, o0, A0 lxvw4x vs33, o16, A0 @@ -68,13 +72,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. stxvw4x vs38, o32, T1 stxvw4x vs39, o48, T1 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=4 and M=4 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_4x4', ` +#else .macro COPY_4x4 +#endif lxvw4x vs32, o0, A0 @@ -94,13 +106,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. stxvw4x vs35, o48, T1 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=4 and M=2 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_4x2', ` +#else .macro COPY_4x2 +#endif lxsspx vs32, o0, A0 lxsspx vs33, o4, A0 @@ -134,13 +154,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. stxsspx vs38, o0, T1 stxsspx vs39, o4, T1 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=4 and M=1 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_4x1', ` +#else .macro COPY_4x1 +#endif lxsspx vs32, o0, A0 @@ -162,13 +190,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. stxsspx vs35, o4, T1 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=2 and M=8 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_2x8', ` +#else .macro COPY_2x8 +#endif lxvw4x vs32, o0, A0 lxvw4x vs33, o16, A0 @@ -184,13 +220,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. stxvw4x vs34, o32, T1 stxvw4x vs35, o48, T1 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=2 and M=4 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_2x4', ` +#else .macro COPY_2x4 +#endif lxvw4x vs32, o0, A0 @@ -202,13 +246,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. stxvw4x vs33, o16, T1 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=2 and M=2 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_2x2', ` +#else .macro COPY_2x2 +#endif lxsspx vs32, o0, A0 lxsspx vs33, o4, A0 @@ -226,13 +278,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. stxsspx vs34, o0, T1 stxsspx vs35, o4, T1 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=2 and M=1 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_2x1', ` +#else .macro COPY_2x1 +#endif lxsspx vs32, o0, A0 @@ -244,13 +304,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. stxsspx vs33, o4, T1 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=1 and M=8 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_1x8', ` +#else .macro COPY_1x8 +#endif lxvw4x vs32, o0, A0 lxvw4x vs33, o16, A0 @@ -260,13 +328,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. stxvw4x vs32, o0, T1 stxvw4x vs33, o16, T1 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=1 and M=4 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_1x4', ` +#else .macro COPY_1x4 +#endif lxvw4x vs32, o0, A0 @@ -274,13 +350,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. stxvw4x vs32, o0, T1 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=1 and M=2 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_1x2', ` +#else .macro COPY_1x2 +#endif lxsspx vs32, o0, A0 lxsspx vs33, o4, A0 @@ -290,13 +374,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. stxsspx vs32, o0, T1 stxsspx vs33, o4, T1 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=1 and M=1 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_1x1', ` +#else .macro COPY_1x1 +#endif lxsspx vs32, o0, A0 @@ -304,5 +396,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. stxsspx vs32, o0, T1 +#if defined(_AIX) +') +#else .endm +#endif diff --git a/kernel/power/sgemv_n.c b/kernel/power/sgemv_n.c index 9704757fe..5dfb18f5b 100644 --- a/kernel/power/sgemv_n.c +++ b/kernel/power/sgemv_n.c @@ -24,7 +24,10 @@ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ +#if !defined(__VEC__) || !defined(__ALTIVEC__) +#include "../arm/gemv_n.c" +#else #include "common.h" @@ -174,7 +177,8 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, FLOAT *a, BLASLO BLASLONG n2; BLASLONG lda4 = lda << 2; BLASLONG lda8 = lda << 3; - FLOAT xbuffer[8],*ybuffer; + FLOAT xbuffer[8] __attribute__((aligned(16))); + FLOAT *ybuffer; if ( m < 1 ) return(0); if ( n < 1 ) return(0); @@ -462,4 +466,5 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, FLOAT *a, BLASLO return(0); } +#endif diff --git a/kernel/power/sgemv_n_8.c b/kernel/power/sgemv_n_8.c index 9bc93ced6..64696236a 100644 --- a/kernel/power/sgemv_n_8.c +++ b/kernel/power/sgemv_n_8.c @@ -213,7 +213,8 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, FLOAT *a, BLASLO BLASLONG n2; BLASLONG lda4 = lda << 2; BLASLONG lda8 = lda << 3; - FLOAT xbuffer[8],*ybuffer; + FLOAT xbuffer[8] __attribute__((aligned(16))); + FLOAT *ybuffer; if ( m < 1 ) return(0); if ( n < 1 ) return(0); diff --git a/kernel/power/sgemv_t.c b/kernel/power/sgemv_t.c index 96434a13f..62c517a9d 100644 --- a/kernel/power/sgemv_t.c +++ b/kernel/power/sgemv_t.c @@ -24,6 +24,10 @@ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ +#if !defined(__VEC__) || !defined(__ALTIVEC__) +#include "../arm/gemv_t.c" + +#else #include "common.h" @@ -177,10 +181,9 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, FLOAT *a, BLASLO BLASLONG m1; BLASLONG m2; BLASLONG m3; - BLASLONG n2; - - FLOAT ybuffer[8], *xbuffer; - + BLASLONG n2; + FLOAT ybuffer[8] __attribute__((aligned(16))); + FLOAT *xbuffer; if (m < 1) return (0); if (n < 1) return (0); @@ -478,3 +481,4 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, FLOAT *a, BLASLO } +#endif diff --git a/kernel/power/sgemv_t_8.c b/kernel/power/sgemv_t_8.c index 5e9cd63ac..b90512162 100644 --- a/kernel/power/sgemv_t_8.c +++ b/kernel/power/sgemv_t_8.c @@ -204,8 +204,8 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, FLOAT *a, BLASLO BLASLONG m3; BLASLONG n2; - FLOAT ybuffer[8], *xbuffer; - + FLOAT ybuffer[8] __attribute__((aligned(16))); + FLOAT *xbuffer; if (m < 1) return (0); if (n < 1) return (0); diff --git a/kernel/power/srot.c b/kernel/power/srot.c index 6af813c16..3e4f93e2a 100644 --- a/kernel/power/srot.c +++ b/kernel/power/srot.c @@ -39,8 +39,12 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #pragma GCC optimize "O1" +#if defined(__VEC__) || defined(__ALTIVEC__) #if defined(POWER8) || defined(POWER9) #include "srot_microk_power8.c" +#elif defined(POWER10) +#include "srot_microk_power10.c" +#endif #endif @@ -113,6 +117,23 @@ int CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT if ( (inc_x == 1) && (inc_y == 1) ) { +#if defined(POWER10) + if ( n >= 16 ) + { + BLASLONG align = ((32 - ((uintptr_t)y & (uintptr_t)0x1F)) >> 2) & 0x7; + for (i = 0; i < align; i++) { + temp = c*x[i] + s*y[i] ; + y[i] = c*y[i] - s*x[i] ; + x[i] = temp ; + } + } + BLASLONG n1 = (n-i) & -16; + if ( n1 > 0 ) + { + srot_kernel_16(n1, &x1[i], &y1[i], c, s); + i+=n1; + } +#else BLASLONG n1 = n & -16; if ( n1 > 0 ) { @@ -120,6 +141,7 @@ int CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT i=n1; } +#endif while(i < n) { temp = c*x[i] + s*y[i] ; diff --git a/kernel/power/srot_microk_power10.c b/kernel/power/srot_microk_power10.c new file mode 100644 index 000000000..c54c30742 --- /dev/null +++ b/kernel/power/srot_microk_power10.c @@ -0,0 +1,151 @@ +/*************************************************************************** +Copyright (c) 2021, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#define HAVE_KERNEL_16 1 + +static void srot_kernel_16 (long n, float *x, float *y, float c, float s) +{ + __asm__ + ( + "xscvdpspn 36, %x5 \n\t" // load c to all words + "xxspltw 36, 36, 0 \n\t" + + "xscvdpspn 37, %x6 \n\t" // load s to all words + "xxspltw 37, 37, 0 \n\t" + "lxvp 32, 0(%3) \n\t" // load x + "lxvp 34, 32(%3) \n\t" + "lxvp 48, 0(%4) \n\t" // load y + "lxvp 50, 32(%4) \n\t" + + "addic. %2, %2, -16 \n\t" + "ble two%= \n\t" + + ".align 5 \n" + "one%=: \n\t" + + "xvmulsp 40, 32, 36 \n\t" // c * x + "xvmulsp 41, 33, 36 \n\t" + "xvmulsp 42, 34, 36 \n\t" + "xvmulsp 43, 35, 36 \n\t" + + "xvmulsp 44, 32, 37 \n\t" // s * x + "xvmulsp 45, 33, 37 \n\t" + "xvmulsp 46, 34, 37 \n\t" + "xvmulsp 47, 35, 37 \n\t" + + "lxvp 32, 64(%3) \n\t" // load x + "lxvp 34, 96(%3) \n\t" + "xvmulsp 52, 48, 36 \n\t" // c * y + "xvmulsp 53, 49, 36 \n\t" + "xvmulsp 54, 50, 36 \n\t" + "xvmulsp 55, 51, 36 \n\t" + + "xvmulsp 38, 48, 37 \n\t" // s * y + "xvmulsp 39, 49, 37 \n\t" + "xvmulsp 56, 50, 37 \n\t" + "xvmulsp 57, 51, 37 \n\t" + + "lxvp 48, 64(%4) \n\t" // load y + "lxvp 50, 96(%4) \n\t" + + "xvaddsp 40, 40, 38 \n\t" // c * x + s * y + "xvaddsp 41, 41, 39 \n\t" // c * x + s * y + "xvaddsp 42, 42, 56 \n\t" // c * x + s * y + "xvaddsp 43, 43, 57 \n\t" // c * x + s * y + + "stxvp 40, 0(%3) \n\t" // store x + "stxvp 42, 32(%3) \n\t" + + "xvsubsp 52, 52, 44 \n\t" // c * y - s * x + "xvsubsp 53, 53, 45 \n\t" // c * y - s * x + "xvsubsp 54, 54, 46 \n\t" // c * y - s * x + "xvsubsp 55, 55, 47 \n\t" // c * y - s * x + + "stxvp 52, 0(%4) \n\t" // store y + "stxvp 54, 32(%4) \n\t" + + "addi %3, %3, 64 \n\t" + "addi %4, %4, 64 \n\t" + + "addic. %2, %2, -16 \n\t" + "bgt one%= \n" + + "two%=: \n\t" + + "xvmulsp 40, 32, 36 \n\t" // c * x + "xvmulsp 41, 33, 36 \n\t" + "xvmulsp 42, 34, 36 \n\t" + "xvmulsp 43, 35, 36 \n\t" + + "xvmulsp 52, 48, 36 \n\t" // c * y + "xvmulsp 53, 49, 36 \n\t" + "xvmulsp 54, 50, 36 \n\t" + "xvmulsp 55, 51, 36 \n\t" + + "xvmulsp 44, 32, 37 \n\t" // s * x + "xvmulsp 45, 33, 37 \n\t" + "xvmulsp 46, 34, 37 \n\t" + "xvmulsp 47, 35, 37 \n\t" + + "xvmulsp 38, 48, 37 \n\t" // s * y + "xvmulsp 39, 49, 37 \n\t" + "xvmulsp 56, 50, 37 \n\t" + "xvmulsp 57, 51, 37 \n\t" + + "xvaddsp 40, 40, 38 \n\t" // c * x + s * y + "xvaddsp 41, 41, 39 \n\t" // c * x + s * y + "xvaddsp 42, 42, 56 \n\t" // c * x + s * y + "xvaddsp 43, 43, 57 \n\t" // c * x + s * y + + "stxvp 40, 0(%3) \n\t" // store x + "stxvp 42, 32(%3) \n\t" + "xvsubsp 52, 52, 44 \n\t" // c * y - s * x + "xvsubsp 53, 53, 45 \n\t" // c * y - s * x + "xvsubsp 54, 54, 46 \n\t" // c * y - s * x + "xvsubsp 55, 55, 47 \n\t" // c * y - s * x + + "stxvp 52, 0(%4) \n\t" // store y + "stxvp 54, 32(%4) \n\t" + + "#n=%2 x=%0=%3 y=%1=%4 c=%5 s=%6\n" + : + "+m" (*x), + "+m" (*y), + "+r" (n), // 2 + "+b" (x), // 3 + "+b" (y) // 4 + : + "f" (c), // 5 + "f" (s) // 6 + : + "cr0", + "vs32","vs33","vs34","vs35","vs36","vs37","vs38","vs39", + "vs40","vs41","vs42","vs43","vs44","vs45","vs46","vs47", + "vs48","vs49","vs50","vs51","vs52","vs53","vs54","vs55", + "vs56","vs57" + ); +} diff --git a/kernel/power/srot_microk_power8.c b/kernel/power/srot_microk_power8.c index 6eecb60a1..329a8cd06 100644 --- a/kernel/power/srot_microk_power8.c +++ b/kernel/power/srot_microk_power8.c @@ -71,10 +71,10 @@ static void srot_kernel_16 (long n, float *x, float *y, float c, float s) "addi %4, %4, 64 \n\t" "addic. %2, %2, -16 \n\t" - "ble 2f \n\t" + "ble two%= \n\t" - ".p2align 5 \n" - "1: \n\t" + ".align 5 \n" + "one%=: \n\t" "xvmulsp 40, 32, 36 \n\t" // c * x "xvmulsp 41, 33, 36 \n\t" @@ -138,9 +138,9 @@ static void srot_kernel_16 (long n, float *x, float *y, float c, float s) "addi %4, %4, 128 \n\t" "addic. %2, %2, -16 \n\t" - "bgt 1b \n" + "bgt one%= \n" - "2: \n\t" + "two%=: \n\t" "xvmulsp 40, 32, 36 \n\t" // c * x "xvmulsp 41, 33, 36 \n\t" diff --git a/kernel/power/sscal.c b/kernel/power/sscal.c index 4f3ba5698..65572a8c1 100644 --- a/kernel/power/sscal.c +++ b/kernel/power/sscal.c @@ -35,8 +35,12 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" +#if defined(__VEC__) || defined(__ALTIVEC__) #if defined(POWER8) || defined(POWER9) #include "sscal_microk_power8.c" +#elif defined(POWER10) +#include "sscal_microk_power10.c" +#endif #endif @@ -100,12 +104,28 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da, FLOAT *x, BLAS if ( da == 0.0 ) { +#if defined(POWER10) + if ( n >= 32 ) + { + BLASLONG align = ((32 - ((uintptr_t)x & (uintptr_t)0x1F)) >> 2) & 0x7; + for (j = 0; j < align; j++) { + x[j] = 0.0; + } + } + BLASLONG n1 = (n-j) & -32; + if ( n1 > 0 ) + { + sscal_kernel_16_zero(n1, &x[j]); + j+=n1; + } +#else BLASLONG n1 = n & -32; if ( n1 > 0 ) { sscal_kernel_16_zero(n1, x); j=n1; } +#endif while(j < n) { @@ -118,12 +138,28 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da, FLOAT *x, BLAS else { +#if defined(POWER10) + if ( n >= 32 ) + { + BLASLONG align = ((32 - ((uintptr_t)x & (uintptr_t)0x1F)) >> 2) & 0x7; + for (j = 0; j < align; j++) { + x[j] = da * x[j]; + } + } + BLASLONG n1 = (n-j) & -32; + if ( n1 > 0 ) + { + sscal_kernel_16(n1, &x[j], da); + j+=n1; + } +#else BLASLONG n1 = n & -32; if ( n1 > 0 ) { sscal_kernel_16(n1, x, da); j=n1; } +#endif while(j < n) { diff --git a/kernel/power/sscal_microk_power10.c b/kernel/power/sscal_microk_power10.c new file mode 100644 index 000000000..a523a1675 --- /dev/null +++ b/kernel/power/sscal_microk_power10.c @@ -0,0 +1,135 @@ +/*************************************************************************** +Copyright (c) 2021, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#define HAVE_KERNEL_16 1 + +static void sscal_kernel_16 (long n, float *x, float alpha) +{ + __asm__ + ( + "dcbt 0, %2 \n\t" + + "xscvdpspn 48, %x3 \n\t" + "xxspltw 48, 48, 0 \n\t" + + "lxvp 32, 0(%2) \n\t" + "lxvp 34, 32(%2) \n\t" + "lxvp 36, 64(%2) \n\t" + "lxvp 38, 96(%2) \n\t" + + "addic. %1, %1, -32 \n\t" + "ble two%= \n\t" + + ".align 5 \n" + "one%=: \n\t" + + "xvmulsp 40, 32, 48 \n\t" + "xvmulsp 41, 33, 48 \n\t" + "xvmulsp 42, 34, 48 \n\t" + "xvmulsp 43, 35, 48 \n\t" + "lxvp 32, 128(%2) \n\t" + "lxvp 34, 160(%2) \n\t" + "xvmulsp 44, 36, 48 \n\t" + "xvmulsp 45, 37, 48 \n\t" + "xvmulsp 46, 38, 48 \n\t" + "xvmulsp 47, 39, 48 \n\t" + "lxvp 36, 192(%2) \n\t" + "lxvp 38, 224(%2) \n\t" + + "stxvp 40, 0(%2) \n\t" + "stxvp 42, 32(%2) \n\t" + "stxvp 44, 64(%2) \n\t" + "stxvp 46, 96(%2) \n\t" + + "addi %2, %2, 128 \n\t" + + "addic. %1, %1, -32 \n\t" + "bgt one%= \n" + + "two%=: \n\t" + + "xvmulsp 40, 32, 48 \n\t" + "xvmulsp 41, 33, 48 \n\t" + "xvmulsp 42, 34, 48 \n\t" + "xvmulsp 43, 35, 48 \n\t" + + "xvmulsp 44, 36, 48 \n\t" + "xvmulsp 45, 37, 48 \n\t" + "xvmulsp 46, 38, 48 \n\t" + "xvmulsp 47, 39, 48 \n\t" + + "stxvp 40, 0(%2) \n\t" + "stxvp 42, 32(%2) \n\t" + "stxvp 44, 64(%2) \n\t" + "stxvp 46, 96(%2) \n\t" + + "#n=%1 alpha=%3 x=%0=%2" + : + "+m" (*x), + "+r" (n), // 1 + "+b" (x) // 2 + : + "f" (alpha) // 3 + : + "cr0", + "vs32","vs33","vs34","vs35","vs36","vs37","vs38","vs39", + "vs40","vs41","vs42","vs43","vs44","vs45","vs46","vs47","vs48" + ); +} + + +static void sscal_kernel_16_zero (long n, float *x) +{ + + __asm__ + ( + "xxlxor 32, 32, 32 \n\t" + "xxlxor 33, 33, 33 \n\t" + + ".align 5 \n" + "one%=: \n\t" + + "stxvp 32, 0(%2) \n\t" + "stxvp 32, 32(%2) \n\t" + "stxvp 32, 64(%2) \n\t" + "stxvp 32, 96(%2) \n\t" + + "addi %2, %2, 128 \n\t" + + "addic. %1, %1, -32 \n\t" + "bgt one%= \n" + + "#n=%1 x=%0=%2 " + : + "=m" (*x), + "+r" (n), // 1 + "+b" (x) // 2 + : + : + "cr0","vs32","vs33" + ); +} diff --git a/kernel/power/sscal_microk_power8.c b/kernel/power/sscal_microk_power8.c index 058ff3399..88fba3166 100644 --- a/kernel/power/sscal_microk_power8.c +++ b/kernel/power/sscal_microk_power8.c @@ -56,10 +56,10 @@ static void sscal_kernel_16 (long n, float *x, float alpha) "addi %2, %2, 128 \n\t" "addic. %1, %1, -32 \n\t" - "ble 2f \n\t" + "ble two%= \n\t" - ".p2align 5 \n" - "1: \n\t" + ".align 5 \n" + "one%=: \n\t" "xvmulsp 40, 32, %x3 \n\t" "xvmulsp 41, 33, %x3 \n\t" @@ -92,9 +92,9 @@ static void sscal_kernel_16 (long n, float *x, float alpha) "addi %2, %2, 256 \n\t" "addic. %1, %1, -32 \n\t" - "bgt 1b \n" + "bgt one%= \n" - "2: \n\t" + "two%=: \n\t" "xvmulsp 40, 32, %x3 \n\t" "xvmulsp 41, 33, %x3 \n\t" @@ -147,8 +147,8 @@ static void sscal_kernel_16_zero (long n, float *x) ( "xxlxor %x3, %x3, %x3 \n\t" - ".p2align 5 \n" - "1: \n\t" + ".align 5 \n" + "one%=: \n\t" "stxvd2x %x3, 0, %2 \n\t" "stxvd2x %x3, %4, %2 \n\t" @@ -162,7 +162,7 @@ static void sscal_kernel_16_zero (long n, float *x) "addi %2, %2, 128 \n\t" "addic. %1, %1, -32 \n\t" - "bgt 1b \n" + "bgt one%= \n" "#n=%1 x=%0=%2 t0=%x3 o16=%4 o32=%5 o48=%6 o64=%7 o80=%8 o96=%9 o112=%10" : diff --git a/kernel/power/sswap.c b/kernel/power/sswap.c index 23d13280f..dd249fd36 100644 --- a/kernel/power/sswap.c +++ b/kernel/power/sswap.c @@ -35,8 +35,12 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" +#if defined(__VEC__) || defined(__ALTIVEC__) #if defined(POWER8) || defined(POWER9) #include "sswap_microk_power8.c" +#elif defined(POWER10) +#include "swap_microk_power10.c" +#endif #endif #ifndef HAVE_KERNEL_32 @@ -113,12 +117,30 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT dummy3, FLOAT *x, if ( (inc_x == 1) && (inc_y == 1 )) { +#if defined(POWER10) + if ( n >= 64 ) + { + BLASLONG align = ((32 - ((uintptr_t)y & (uintptr_t)0x1F)) >> 2) & 0x7; + for (i = 0; i < align; i++) { + temp = y[i]; + y[i] = x[i]; + x[i] = temp; + } + } + BLASLONG n1 = (n-i) & -64; + if ( n1 > 0 ) + { + sswap_kernel_32(n1,&x[i], &y[i]); + i+=n1; + } +#else BLASLONG n1 = n & -32; if ( n1 > 0 ) { sswap_kernel_32(n1, x, y); i=n1; } +#endif while(i < n) { diff --git a/kernel/power/sswap_microk_power8.c b/kernel/power/sswap_microk_power8.c index cfefdd6ef..a407018a8 100644 --- a/kernel/power/sswap_microk_power8.c +++ b/kernel/power/sswap_microk_power8.c @@ -39,8 +39,8 @@ static void sswap_kernel_32 (long n, float *x, float *y) { __asm__ ( - ".p2align 5 \n" - "1: \n\t" + ".align 5 \n" + "one%=: \n\t" "lxvd2x 32, 0, %4 \n\t" "lxvd2x 33, %5, %4 \n\t" @@ -83,7 +83,7 @@ static void sswap_kernel_32 (long n, float *x, float *y) "addi %4, %4, 128 \n\t" "addic. %2, %2, -32 \n\t" - "bgt 1b \n" + "bgt one%= \n" "#n=%2 x=%0=%3 y=%1=%4 o16=%5 o32=%6 o48=%7 o64=%8 o80=%9 o96=%10 o112=%11" : diff --git a/kernel/power/strmm_kernel_16x8_power8.S b/kernel/power/strmm_kernel_16x8_power8.S index f9b8a0bb8..1f9912c49 100644 --- a/kernel/power/strmm_kernel_16x8_power8.S +++ b/kernel/power/strmm_kernel_16x8_power8.S @@ -82,7 +82,6 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endif #ifdef __64BIT__ -#define STACKSIZE 340 #define STACKSIZE 540 #define ALPHA_SP 296+200(SP) #define FZERO 304+200(SP) @@ -96,7 +95,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define N r4 #define K r5 -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define A r6 #define B r7 @@ -271,7 +270,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. slwi LDC, LDC, BASE_SHIFT #if defined(TRMMKERNEL) -#if defined(linux) && defined(__64BIT__) +#if (defined(linux) || defined(__FreeBSD__)) && defined(__64BIT__) ld OFFSET, FRAMESLOT(0) + STACKSIZE(SP) #endif diff --git a/kernel/power/strmm_macros_16x8_power8.S b/kernel/power/strmm_macros_16x8_power8.S index 27bc1e89c..6c016d6fa 100644 --- a/kernel/power/strmm_macros_16x8_power8.S +++ b/kernel/power/strmm_macros_16x8_power8.S @@ -38,7 +38,11 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * Macros for N=8 and M=16 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD8x16_1', ` +#else .macro LOAD8x16_1 +#endif lxvw4x vs0, o0, AO lxvw4x vs1, o16, AO @@ -63,9 +67,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 32 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL8x16_I1', ` +#else .macro KERNEL8x16_I1 +#endif lxvw4x vs4, o0, AO @@ -133,9 +145,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs63, vs3, vs15 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL8x16_1', ` +#else .macro KERNEL8x16_1 +#endif lxvw4x vs4, o0, AO @@ -203,9 +223,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs63, vs3, vs15 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL8x16_2', ` +#else .macro KERNEL8x16_2 +#endif lxvw4x vs0, o0, AO @@ -273,9 +301,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs63, vs7, vs23 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL8x16_E2', ` +#else .macro KERNEL8x16_E2 +#endif xvmaddasp vs32, vs4, vs16 @@ -319,9 +355,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs63, vs7, vs23 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL8x16_SUBI1', ` +#else .macro KERNEL8x16_SUBI1 +#endif lxvw4x vs0, o0, AO @@ -389,9 +433,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs63, vs3, vs15 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL8x16_SUB1', ` +#else .macro KERNEL8x16_SUB1 +#endif lxvw4x vs0, o0, AO @@ -459,9 +511,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs63, vs3, vs15 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE8x16', ` +#else .macro SAVE8x16 +#endif mr T1, CO @@ -698,14 +758,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 64 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=8 and M=8 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD8x8_1', ` +#else .macro LOAD8x8_1 +#endif lxvw4x vs0, o0, AO lxvw4x vs1, o16, AO @@ -728,9 +796,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 32 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL8x8_I1', ` +#else .macro KERNEL8x8_I1 +#endif lxvw4x vs4, o0, AO @@ -780,9 +856,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs47, vs1, vs15 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL8x8_1', ` +#else .macro KERNEL8x8_1 +#endif lxvw4x vs4, o0, AO @@ -832,9 +916,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs47, vs1, vs15 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL8x8_2', ` +#else .macro KERNEL8x8_2 +#endif lxvw4x vs0, o0, AO @@ -884,9 +976,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs47, vs5, vs23 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL8x8_E2', ` +#else .macro KERNEL8x8_E2 +#endif xvmaddasp vs32, vs4, vs16 @@ -914,9 +1014,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs47, vs5, vs23 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL8x8_SUBI1', ` +#else .macro KERNEL8x8_SUBI1 +#endif lxvw4x vs0, o0, AO @@ -966,9 +1074,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs47, vs1, vs15 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL8x8_SUB1', ` +#else .macro KERNEL8x8_SUB1 +#endif lxvw4x vs0, o0, AO @@ -1018,9 +1134,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs47, vs1, vs15 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE8x8', ` +#else .macro SAVE8x8 +#endif mr T1, CO @@ -1193,14 +1317,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 32 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=8 and M=4 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD8x4_1', ` +#else .macro LOAD8x4_1 +#endif lxvw4x vs0, o0, AO @@ -1222,9 +1354,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 32 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL8x4_I1', ` +#else .macro KERNEL8x4_I1 +#endif lxvw4x vs4, o0, AO @@ -1265,9 +1405,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs39, vs0, vs15 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL8x4_1', ` +#else .macro KERNEL8x4_1 +#endif lxvw4x vs4, o0, AO @@ -1308,9 +1456,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs39, vs0, vs15 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL8x4_2', ` +#else .macro KERNEL8x4_2 +#endif lxvw4x vs0, o0, AO @@ -1351,9 +1507,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs39, vs4, vs23 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL8x4_E2', ` +#else .macro KERNEL8x4_E2 +#endif xvmaddasp vs32, vs4, vs16 @@ -1373,9 +1537,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs39, vs4, vs23 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL8x4_SUBI1', ` +#else .macro KERNEL8x4_SUBI1 +#endif lxvw4x vs0, o0, AO @@ -1416,9 +1588,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs39, vs0, vs15 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL8x4_SUB1', ` +#else .macro KERNEL8x4_SUB1 +#endif lxvw4x vs0, o0, AO @@ -1459,9 +1639,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs39, vs0, vs15 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE8x4', ` +#else .macro SAVE8x4 +#endif mr T1, CO @@ -1602,14 +1790,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 16 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=8 and M=2 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD8x2_1', ` +#else .macro LOAD8x2_1 +#endif lxsspx vs0, o0, AO lxsspx vs1, o4, AO @@ -1632,9 +1828,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 32 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL8x2_I1', ` +#else .macro KERNEL8x2_I1 +#endif lxsspx vs4, o0, AO @@ -1684,9 +1888,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmuldp vs47, vs1, vs15 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL8x2_1', ` +#else .macro KERNEL8x2_1 +#endif lxsspx vs4, o0, AO @@ -1736,9 +1948,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs47, vs1, vs15 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL8x2_2', ` +#else .macro KERNEL8x2_2 +#endif lxsspx vs0, o0, AO @@ -1788,9 +2008,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs47, vs5, vs23 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL8x2_E2', ` +#else .macro KERNEL8x2_E2 +#endif xsmaddadp vs32, vs4, vs16 @@ -1818,9 +2046,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs47, vs5, vs23 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL8x2_SUBI1', ` +#else .macro KERNEL8x2_SUBI1 +#endif lxsspx vs0, o0, AO @@ -1870,9 +2106,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmuldp vs47, vs1, vs15 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL8x2_SUB1', ` +#else .macro KERNEL8x2_SUB1 +#endif lxsspx vs0, o0, AO @@ -1922,9 +2166,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs47, vs1, vs15 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE8x2', ` +#else .macro SAVE8x2 +#endif mr T1, CO @@ -2097,14 +2349,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 8 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=8 and M=1 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD8x1_1', ` +#else .macro LOAD8x1_1 +#endif lxsspx vs0, o0, AO @@ -2126,9 +2386,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 32 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL8x1_I1', ` +#else .macro KERNEL8x1_I1 +#endif lxsspx vs4, o0, AO @@ -2169,9 +2437,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmuldp vs39, vs0, vs15 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL8x1_1', ` +#else .macro KERNEL8x1_1 +#endif lxsspx vs4, o0, AO @@ -2212,9 +2488,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs39, vs0, vs15 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL8x1_2', ` +#else .macro KERNEL8x1_2 +#endif lxsspx vs0, o0, AO @@ -2255,9 +2539,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs39, vs4, vs23 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL8x1_E2', ` +#else .macro KERNEL8x1_E2 +#endif xsmaddadp vs32, vs4, vs16 @@ -2277,9 +2569,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs39, vs4, vs23 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL8x1_SUBI1', ` +#else .macro KERNEL8x1_SUBI1 +#endif lxsspx vs0, o0, AO @@ -2320,9 +2620,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmuldp vs39, vs0, vs15 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL8x1_SUB1', ` +#else .macro KERNEL8x1_SUB1 +#endif lxsspx vs0, o0, AO @@ -2363,9 +2671,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs39, vs0, vs15 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE8x1', ` +#else .macro SAVE8x1 +#endif mr T1, CO @@ -2506,14 +2822,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 4 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=4 and M=16 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD4x16_1', ` +#else .macro LOAD4x16_1 +#endif lxvw4x vs0, o0, AO lxvw4x vs1, o16, AO @@ -2531,9 +2855,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 16 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x16_I1', ` +#else .macro KERNEL4x16_I1 +#endif lxvw4x vs4, o0, AO @@ -2574,9 +2906,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs47, vs3, vs11 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x16_1', ` +#else .macro KERNEL4x16_1 +#endif lxvw4x vs4, o0, AO @@ -2617,9 +2957,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs47, vs3, vs11 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x16_2', ` +#else .macro KERNEL4x16_2 +#endif lxvw4x vs0, o0, AO @@ -2660,9 +3008,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs47, vs7, vs19 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x16_E2', ` +#else .macro KERNEL4x16_E2 +#endif xvmaddasp vs32, vs4, vs16 @@ -2686,9 +3042,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs47, vs7, vs19 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x16_SUBI1', ` +#else .macro KERNEL4x16_SUBI1 +#endif lxvw4x vs0, o0, AO @@ -2729,9 +3093,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs47, vs3, vs11 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x16_SUB1', ` +#else .macro KERNEL4x16_SUB1 +#endif lxvw4x vs0, o0, AO @@ -2772,9 +3144,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs47, vs3, vs11 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE4x16', ` +#else .macro SAVE4x16 +#endif mr T1, CO @@ -2895,14 +3275,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 64 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=4 and M=8 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD4x8_1', ` +#else .macro LOAD4x8_1 +#endif lxvw4x vs0, o0, AO lxvw4x vs1, o16, AO @@ -2918,9 +3306,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 16 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x8_I1', ` +#else .macro KERNEL4x8_I1 +#endif lxvw4x vs4, o0, AO @@ -2951,9 +3347,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs39, vs1, vs11 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x8_1', ` +#else .macro KERNEL4x8_1 +#endif lxvw4x vs4, o0, AO @@ -2984,9 +3388,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs39, vs1, vs11 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x8_2', ` +#else .macro KERNEL4x8_2 +#endif lxvw4x vs0, o0, AO @@ -3017,9 +3429,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs39, vs5, vs19 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x8_E2', ` +#else .macro KERNEL4x8_E2 +#endif xvmaddasp vs32, vs4, vs16 @@ -3035,9 +3455,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs39, vs5, vs19 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x8_SUBI1', ` +#else .macro KERNEL4x8_SUBI1 +#endif lxvw4x vs0, o0, AO @@ -3068,9 +3496,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs39, vs1, vs11 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x8_SUB1', ` +#else .macro KERNEL4x8_SUB1 +#endif lxvw4x vs0, o0, AO @@ -3101,9 +3537,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs39, vs1, vs11 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE4x8', ` +#else .macro SAVE4x8 +#endif mr T1, CO @@ -3192,14 +3636,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 32 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=4 and M=4 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD4x4_1', ` +#else .macro LOAD4x4_1 +#endif lxvw4x vs0, o0, AO @@ -3214,9 +3666,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 16 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x4_I1', ` +#else .macro KERNEL4x4_I1 +#endif lxvw4x vs4, o0, AO @@ -3242,9 +3702,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs35, vs0, vs11 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x4_1', ` +#else .macro KERNEL4x4_1 +#endif lxvw4x vs4, o0, AO @@ -3270,9 +3738,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs35, vs0, vs11 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x4_2', ` +#else .macro KERNEL4x4_2 +#endif lxvw4x vs0, o0, AO @@ -3298,9 +3774,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs35, vs4, vs19 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x4_E2', ` +#else .macro KERNEL4x4_E2 +#endif xvmaddasp vs32, vs4, vs16 @@ -3312,9 +3796,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs35, vs4, vs19 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x4_SUBI1', ` +#else .macro KERNEL4x4_SUBI1 +#endif lxvw4x vs0, o0, AO @@ -3340,9 +3832,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs35, vs0, vs11 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x4_SUB1', ` +#else .macro KERNEL4x4_SUB1 +#endif lxvw4x vs0, o0, AO @@ -3368,9 +3868,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs35, vs0, vs11 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE4x4', ` +#else .macro SAVE4x4 +#endif mr T1, CO @@ -3443,14 +3951,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 16 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=4 and M=2 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD4x2_1', ` +#else .macro LOAD4x2_1 +#endif lxsspx vs0, o0, AO lxsspx vs1, o4, AO @@ -3466,9 +3982,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 16 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x2_I1', ` +#else .macro KERNEL4x2_I1 +#endif lxsspx vs4, o0, AO @@ -3499,9 +4023,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmuldp vs39, vs1, vs11 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x2_1', ` +#else .macro KERNEL4x2_1 +#endif lxsspx vs4, o0, AO @@ -3532,9 +4064,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs39, vs1, vs11 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x2_2', ` +#else .macro KERNEL4x2_2 +#endif lxsspx vs0, o0, AO @@ -3565,9 +4105,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs39, vs5, vs19 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x2_E2', ` +#else .macro KERNEL4x2_E2 +#endif xsmaddadp vs32, vs4, vs16 @@ -3583,9 +4131,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs39, vs5, vs19 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x2_SUBI1', ` +#else .macro KERNEL4x2_SUBI1 +#endif lxsspx vs0, o0, AO @@ -3616,9 +4172,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmuldp vs39, vs1, vs11 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x2_SUB1', ` +#else .macro KERNEL4x2_SUB1 +#endif lxsspx vs0, o0, AO @@ -3649,9 +4213,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs39, vs1, vs11 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE4x2', ` +#else .macro SAVE4x2 +#endif mr T1, CO @@ -3740,14 +4312,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 8 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=4 and M=1 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD4x1_1', ` +#else .macro LOAD4x1_1 +#endif lxsspx vs0, o0, AO @@ -3762,9 +4342,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 16 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x1_I1', ` +#else .macro KERNEL4x1_I1 +#endif lxsspx vs4, o0, AO @@ -3790,9 +4378,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmuldp vs35, vs0, vs11 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x1_1', ` +#else .macro KERNEL4x1_1 +#endif lxsspx vs4, o0, AO @@ -3818,9 +4414,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs35, vs0, vs11 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x1_2', ` +#else .macro KERNEL4x1_2 +#endif lxsspx vs0, o0, AO @@ -3846,9 +4450,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs35, vs4, vs19 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x1_E2', ` +#else .macro KERNEL4x1_E2 +#endif xsmaddadp vs32, vs4, vs16 @@ -3860,9 +4472,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs35, vs4, vs19 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x1_SUBI1', ` +#else .macro KERNEL4x1_SUBI1 +#endif lxsspx vs0, o0, AO @@ -3888,9 +4508,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmuldp vs35, vs0, vs11 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL4x1_SUB1', ` +#else .macro KERNEL4x1_SUB1 +#endif lxsspx vs0, o0, AO @@ -3916,9 +4544,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs35, vs0, vs11 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE4x1', ` +#else .macro SAVE4x1 +#endif mr T1, CO @@ -3991,14 +4627,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 4 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=2 and M=16 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD2x16_1', ` +#else .macro LOAD2x16_1 +#endif lxvw4x vs0, o0, AO lxvw4x vs1, o16, AO @@ -4014,9 +4658,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 8 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x16_I1', ` +#else .macro KERNEL2x16_I1 +#endif lxvw4x vs4, o0, AO @@ -4045,9 +4697,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs39, vs3, vs9 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x16_1', ` +#else .macro KERNEL2x16_1 +#endif lxvw4x vs4, o0, AO @@ -4076,9 +4736,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs39, vs3, vs9 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x16_2', ` +#else .macro KERNEL2x16_2 +#endif lxvw4x vs0, o0, AO @@ -4107,9 +4775,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs39, vs7, vs17 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x16_E2', ` +#else .macro KERNEL2x16_E2 +#endif xvmaddasp vs32, vs4, vs16 @@ -4123,9 +4799,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs39, vs7, vs17 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x16_SUBI1', ` +#else .macro KERNEL2x16_SUBI1 +#endif lxvw4x vs0, o0, AO @@ -4154,9 +4838,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs39, vs3, vs9 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x16_SUB1', ` +#else .macro KERNEL2x16_SUB1 +#endif lxvw4x vs0, o0, AO @@ -4185,9 +4877,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs39, vs3, vs9 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE2x16', ` +#else .macro SAVE2x16 +#endif mr T1, CO @@ -4250,14 +4950,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 64 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=2 and M=8 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD2x8_1', ` +#else .macro LOAD2x8_1 +#endif lxvw4x vs0, o0, AO lxvw4x vs1, o16, AO @@ -4271,9 +4979,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 8 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x8_I1', ` +#else .macro KERNEL2x8_I1 +#endif lxvw4x vs4, o0, AO @@ -4296,9 +5012,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs35, vs1, vs9 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x8_1', ` +#else .macro KERNEL2x8_1 +#endif lxvw4x vs4, o0, AO @@ -4321,9 +5045,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs35, vs1, vs9 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x8_2', ` +#else .macro KERNEL2x8_2 +#endif lxvw4x vs0, o0, AO @@ -4346,9 +5078,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs35, vs5, vs17 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x8_E2', ` +#else .macro KERNEL2x8_E2 +#endif xvmaddasp vs32, vs4, vs16 @@ -4358,9 +5098,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs35, vs5, vs17 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x8_SUBI1', ` +#else .macro KERNEL2x8_SUBI1 +#endif lxvw4x vs0, o0, AO @@ -4383,9 +5131,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs35, vs1, vs9 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x8_SUB1', ` +#else .macro KERNEL2x8_SUB1 +#endif lxvw4x vs0, o0, AO @@ -4408,9 +5164,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs35, vs1, vs9 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE2x8', ` +#else .macro SAVE2x8 +#endif mr T1, CO @@ -4457,14 +5221,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 32 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=2 and M=4 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD2x4_1', ` +#else .macro LOAD2x4_1 +#endif lxvw4x vs0, o0, AO @@ -4477,9 +5249,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 8 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x4_I1', ` +#else .macro KERNEL2x4_I1 +#endif lxvw4x vs4, o0, AO @@ -4499,9 +5279,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs33, vs0, vs9 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x4_1', ` +#else .macro KERNEL2x4_1 +#endif lxvw4x vs4, o0, AO @@ -4521,9 +5309,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs33, vs0, vs9 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x4_2', ` +#else .macro KERNEL2x4_2 +#endif lxvw4x vs0, o0, AO @@ -4543,9 +5339,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs33, vs4, vs17 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x4_E2', ` +#else .macro KERNEL2x4_E2 +#endif xvmaddasp vs32, vs4, vs16 @@ -4553,9 +5357,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs33, vs4, vs17 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x4_SUBI1', ` +#else .macro KERNEL2x4_SUBI1 +#endif lxvw4x vs0, o0, AO @@ -4575,9 +5387,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs33, vs0, vs9 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x4_SUB1', ` +#else .macro KERNEL2x4_SUB1 +#endif lxvw4x vs0, o0, AO @@ -4597,9 +5417,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs33, vs0, vs9 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE2x4', ` +#else .macro SAVE2x4 +#endif mr T1, CO @@ -4638,14 +5466,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 16 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=2 and M=2 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD2x2_1', ` +#else .macro LOAD2x2_1 +#endif lxsspx vs0, o0, AO lxsspx vs1, o4, AO @@ -4659,9 +5495,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 8 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x2_I1', ` +#else .macro KERNEL2x2_I1 +#endif lxsspx vs4, o0, AO @@ -4684,9 +5528,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmuldp vs35, vs1, vs9 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x2_1', ` +#else .macro KERNEL2x2_1 +#endif lxsspx vs4, o0, AO @@ -4709,9 +5561,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs35, vs1, vs9 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x2_2', ` +#else .macro KERNEL2x2_2 +#endif lxsspx vs0, o0, AO @@ -4734,9 +5594,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs35, vs5, vs17 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x2_E2', ` +#else .macro KERNEL2x2_E2 +#endif xsmaddadp vs32, vs4, vs16 @@ -4746,9 +5614,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs35, vs5, vs17 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x2_SUBI1', ` +#else .macro KERNEL2x2_SUBI1 +#endif lxsspx vs0, o0, AO @@ -4771,9 +5647,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmuldp vs35, vs1, vs9 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x2_SUB1', ` +#else .macro KERNEL2x2_SUB1 +#endif lxsspx vs0, o0, AO @@ -4796,9 +5680,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs35, vs1, vs9 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE2x2', ` +#else .macro SAVE2x2 +#endif mr T1, CO @@ -4845,14 +5737,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 8 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=2 and M=1 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD2x1_1', ` +#else .macro LOAD2x1_1 +#endif lxsspx vs0, o0, AO @@ -4865,9 +5765,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 8 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x1_I1', ` +#else .macro KERNEL2x1_I1 +#endif lxsspx vs4, o0, AO @@ -4887,9 +5795,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmuldp vs33, vs0, vs9 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x1_1', ` +#else .macro KERNEL2x1_1 +#endif lxsspx vs4, o0, AO @@ -4909,9 +5825,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs33, vs0, vs9 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x1_2', ` +#else .macro KERNEL2x1_2 +#endif lxsspx vs0, o0, AO @@ -4931,9 +5855,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs33, vs4, vs17 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x1_E2', ` +#else .macro KERNEL2x1_E2 +#endif xsmaddadp vs32, vs4, vs16 @@ -4941,9 +5873,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs33, vs4, vs17 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x1_SUBI1', ` +#else .macro KERNEL2x1_SUBI1 +#endif lxsspx vs0, o0, AO @@ -4963,9 +5903,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmuldp vs33, vs0, vs9 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x1_SUB1', ` +#else .macro KERNEL2x1_SUB1 +#endif lxsspx vs0, o0, AO @@ -4985,9 +5933,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs33, vs0, vs9 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE2x1', ` +#else .macro SAVE2x1 +#endif mr T1, CO @@ -5026,14 +5982,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 4 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=1 and M=16 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD1x16_1', ` +#else .macro LOAD1x16_1 +#endif lxvw4x vs0, o0, AO lxvw4x vs1, o16, AO @@ -5048,9 +6012,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 4 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x16_I1', ` +#else .macro KERNEL1x16_I1 +#endif lxvw4x vs4, o0, AO @@ -5073,9 +6045,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs35, vs3, vs8 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x16_1', ` +#else .macro KERNEL1x16_1 +#endif lxvw4x vs4, o0, AO @@ -5098,9 +6078,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs35, vs3, vs8 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x16_2', ` +#else .macro KERNEL1x16_2 +#endif lxvw4x vs0, o0, AO @@ -5123,9 +6111,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs35, vs7, vs16 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x16_E2', ` +#else .macro KERNEL1x16_E2 +#endif xvmaddasp vs32, vs4, vs16 @@ -5134,9 +6130,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs35, vs7, vs16 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x16_SUBI1', ` +#else .macro KERNEL1x16_SUBI1 +#endif lxvw4x vs0, o0, AO @@ -5159,9 +6163,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs35, vs3, vs8 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x16_SUB1', ` +#else .macro KERNEL1x16_SUB1 +#endif lxvw4x vs0, o0, AO @@ -5184,9 +6196,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs35, vs3, vs8 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE1x16', ` +#else .macro SAVE1x16 +#endif mr T1, CO @@ -5220,14 +6240,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 64 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=1 and M=8 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD1x8_1', ` +#else .macro LOAD1x8_1 +#endif lxvw4x vs0, o0, AO lxvw4x vs1, o16, AO @@ -5240,9 +6268,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 4 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x8_I1', ` +#else .macro KERNEL1x8_I1 +#endif lxvw4x vs4, o0, AO @@ -5261,9 +6297,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs33, vs1, vs8 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x8_1', ` +#else .macro KERNEL1x8_1 +#endif lxvw4x vs4, o0, AO @@ -5282,9 +6326,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs33, vs1, vs8 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x8_2', ` +#else .macro KERNEL1x8_2 +#endif lxvw4x vs0, o0, AO @@ -5303,18 +6355,34 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs33, vs5, vs16 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x8_E2', ` +#else .macro KERNEL1x8_E2 +#endif xvmaddasp vs32, vs4, vs16 xvmaddasp vs33, vs5, vs16 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x8_SUBI1', ` +#else .macro KERNEL1x8_SUBI1 +#endif lxvw4x vs0, o0, AO @@ -5333,9 +6401,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs33, vs1, vs8 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x8_SUB1', ` +#else .macro KERNEL1x8_SUB1 +#endif lxvw4x vs0, o0, AO @@ -5354,9 +6430,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs33, vs1, vs8 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE1x8', ` +#else .macro SAVE1x8 +#endif mr T1, CO @@ -5382,14 +6466,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 32 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=1 and M=4 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD1x4_1', ` +#else .macro LOAD1x4_1 +#endif lxvw4x vs0, o0, AO @@ -5401,9 +6493,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 4 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x4_I1', ` +#else .macro KERNEL1x4_I1 +#endif lxvw4x vs4, o0, AO @@ -5420,9 +6520,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs32, vs0, vs8 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x4_1', ` +#else .macro KERNEL1x4_1 +#endif lxvw4x vs4, o0, AO @@ -5439,9 +6547,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs32, vs0, vs8 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x4_2', ` +#else .macro KERNEL1x4_2 +#endif lxvw4x vs0, o0, AO @@ -5458,17 +6574,33 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs32, vs4, vs16 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x4_E2', ` +#else .macro KERNEL1x4_E2 +#endif xvmaddasp vs32, vs4, vs16 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x4_SUBI1', ` +#else .macro KERNEL1x4_SUBI1 +#endif lxvw4x vs0, o0, AO @@ -5485,9 +6617,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmulsp vs32, vs0, vs8 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x4_SUB1', ` +#else .macro KERNEL1x4_SUB1 +#endif lxvw4x vs0, o0, AO @@ -5504,9 +6644,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddasp vs32, vs0, vs8 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE1x4', ` +#else .macro SAVE1x4 +#endif mr T1, CO @@ -5528,14 +6676,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 16 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=1 and M=2 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD1x2_1', ` +#else .macro LOAD1x2_1 +#endif lxsspx vs0, o0, AO lxsspx vs1, o4, AO @@ -5548,9 +6704,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 4 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x2_I1', ` +#else .macro KERNEL1x2_I1 +#endif lxsspx vs4, o0, AO @@ -5569,9 +6733,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmuldp vs33, vs1, vs8 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x2_1', ` +#else .macro KERNEL1x2_1 +#endif lxsspx vs4, o0, AO @@ -5590,9 +6762,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs33, vs1, vs8 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x2_2', ` +#else .macro KERNEL1x2_2 +#endif lxsspx vs0, o0, AO @@ -5611,18 +6791,34 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs33, vs5, vs16 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x2_E2', ` +#else .macro KERNEL1x2_E2 +#endif xsmaddadp vs32, vs4, vs16 xsmaddadp vs33, vs5, vs16 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x2_SUBI1', ` +#else .macro KERNEL1x2_SUBI1 +#endif lxsspx vs0, o0, AO @@ -5641,9 +6837,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmuldp vs33, vs1, vs8 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x2_SUB1', ` +#else .macro KERNEL1x2_SUB1 +#endif lxsspx vs0, o0, AO @@ -5662,9 +6866,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs33, vs1, vs8 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE1x2', ` +#else .macro SAVE1x2 +#endif mr T1, CO @@ -5690,14 +6902,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 8 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=1 and M=1 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD1x1_1', ` +#else .macro LOAD1x1_1 +#endif lxsspx vs0, o0, AO @@ -5709,9 +6929,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi BO, BO, 4 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x1_I1', ` +#else .macro KERNEL1x1_I1 +#endif lxsspx vs4, o0, AO @@ -5728,9 +6956,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmuldp vs32, vs0, vs8 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x1_1', ` +#else .macro KERNEL1x1_1 +#endif lxsspx vs4, o0, AO @@ -5747,9 +6983,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs32, vs0, vs8 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x1_2', ` +#else .macro KERNEL1x1_2 +#endif lxsspx vs0, o0, AO @@ -5766,17 +7010,33 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs32, vs4, vs16 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x1_E2', ` +#else .macro KERNEL1x1_E2 +#endif xsmaddadp vs32, vs4, vs16 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x1_SUBI1', ` +#else .macro KERNEL1x1_SUBI1 +#endif lxsspx vs0, o0, AO @@ -5793,9 +7053,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmuldp vs32, vs0, vs8 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x1_SUB1', ` +#else .macro KERNEL1x1_SUB1 +#endif lxsspx vs0, o0, AO @@ -5812,9 +7080,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xsmaddadp vs32, vs0, vs8 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE1x1', ` +#else .macro SAVE1x1 +#endif mr T1, CO @@ -5836,5 +7112,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi CO, CO, 4 +#if defined(_AIX) +') +#else .endm +#endif diff --git a/kernel/power/swap.S b/kernel/power/swap.S index e862b17bb..c9c0f86b0 100644 --- a/kernel/power/swap.S +++ b/kernel/power/swap.S @@ -39,7 +39,7 @@ #define ASSEMBLER #include "common.h" -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define N r3 #define X r6 diff --git a/kernel/power/swap_microk_power10.c b/kernel/power/swap_microk_power10.c new file mode 100644 index 000000000..f9c1fee52 --- /dev/null +++ b/kernel/power/swap_microk_power10.c @@ -0,0 +1,105 @@ +/*************************************************************************** +Copyright (c) 2021, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ +#define HAVE_KERNEL_32 1 + +#if defined(DOUBLE) +static void dswap_kernel_32 (long n, double *x, double *y) +#else +static void sswap_kernel_32 (long n, float *x, float *y) +#endif +{ + __asm__ + ( + ".align 5 \n" + "one%=: \n\t" + + "lxvp 32, 0(%4) \n\t" + "lxvp 34, 32(%4) \n\t" + "lxvp 36, 64(%4) \n\t" + "lxvp 38, 96(%4) \n\t" + + "lxvp 40, 128(%4) \n\t" + "lxvp 42, 160(%4) \n\t" + "lxvp 44, 192(%4) \n\t" + "lxvp 46, 224(%4) \n\t" + + "lxvp 48, 0(%3) \n\t" + "lxvp 50, 32(%3) \n\t" + "lxvp 52, 64(%3) \n\t" + "lxvp 54, 96(%3) \n\t" + + "lxvp 56, 128(%3) \n\t" + "lxvp 58, 160(%3) \n\t" + "lxvp 60, 192(%3) \n\t" + "lxvp 62, 224(%3) \n\t" + + "stxvp 32, 0(%3) \n\t" + "stxvp 34, 32(%3) \n\t" + "stxvp 36, 64(%3) \n\t" + "stxvp 38, 96(%3) \n\t" + + "stxvp 40, 128(%3) \n\t" + "stxvp 42, 160(%3) \n\t" + "stxvp 44, 192(%3) \n\t" + "stxvp 46, 224(%3) \n\t" + + "stxvp 48, 0(%4) \n\t" + "stxvp 50, 32(%4) \n\t" + "stxvp 52, 64(%4) \n\t" + "stxvp 54, 96(%4) \n\t" + + "stxvp 56, 128(%4) \n\t" + "stxvp 58, 160(%4) \n\t" + "stxvp 60, 192(%4) \n\t" + "stxvp 62, 224(%4) \n\t" + + "addi %4, %4, 256 \n\t" + "addi %3, %3, 256 \n\t" + +#if defined(DOUBLE) + "addic. %2, %2, -32 \n\t" +#else + "addic. %2, %2, -64 \n\t" +#endif + "bgt one%= \n" + + "#n=%2 x=%0=%3 y=%1=%4" + : + "+m" (*x), + "+m" (*y), + "+r" (n), // 2 + "+b" (x), // 3 + "+b" (y) // 4 + : + : + "cr0", + "vs32","vs33","vs34","vs35","vs36","vs37","vs38","vs39", + "vs40","vs41","vs42","vs43","vs44","vs45","vs46","vs47", + "vs48","vs49","vs50","vs51","vs52","vs53","vs54","vs55", + "vs56","vs57","vs58","vs59","vs60","vs61","vs62","vs63" + ); +} diff --git a/kernel/power/symv_L.S b/kernel/power/symv_L.S index f7d768c50..a4ff703e2 100644 --- a/kernel/power/symv_L.S +++ b/kernel/power/symv_L.S @@ -39,7 +39,7 @@ #define ASSEMBLER #include "common.h" -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define M r3 #define N r4 @@ -248,7 +248,7 @@ stw r27, 196(SP) #endif -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ lwz BUFFER, FRAMESLOT(0) + STACKSIZE(SP) #else diff --git a/kernel/power/symv_U.S b/kernel/power/symv_U.S index d8e082397..c3063e077 100644 --- a/kernel/power/symv_U.S +++ b/kernel/power/symv_U.S @@ -39,7 +39,7 @@ #define ASSEMBLER #include "common.h" -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define M r3 #define IS r4 @@ -247,7 +247,7 @@ stw r27, 196(SP) #endif -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ lwz BUFFER, FRAMESLOT(0) + STACKSIZE(SP) #else diff --git a/kernel/power/trsm_kernel_LN.S b/kernel/power/trsm_kernel_LN.S index 7983c573b..8319d5ed8 100644 --- a/kernel/power/trsm_kernel_LN.S +++ b/kernel/power/trsm_kernel_LN.S @@ -59,7 +59,7 @@ #define N r4 #define K r5 -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define A r6 #define B r7 @@ -180,7 +180,7 @@ slwi LDC, LDC, BASE_SHIFT -#if defined(linux) && defined(__64BIT__) +#if (defined(linux) || defined(__FreeBSD__)) && defined(__64BIT__) ld OFFSET, FRAMESLOT(0) + STACKSIZE(SP) #endif @@ -236,7 +236,7 @@ #else -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ mr PREA, r10 lwz PREB, FRAMESLOT(0) + STACKSIZE(SP) diff --git a/kernel/power/trsm_kernel_LN_power10.c b/kernel/power/trsm_kernel_LN_power10.c new file mode 100644 index 000000000..5ca1603a6 --- /dev/null +++ b/kernel/power/trsm_kernel_LN_power10.c @@ -0,0 +1,1280 @@ +/*********************************************************************/ +/* Copyright 2009, 2010 The University of Texas at Austin. */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* 1. Redistributions of source code must retain the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer. */ +/* */ +/* 2. Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials */ +/* provided with the distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ +/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ +/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ +/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ +/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ +/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ +/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* */ +/* The views and conclusions contained in the software and */ +/* documentation are those of the authors and should not be */ +/* interpreted as representing official policies, either expressed */ +/* or implied, of The University of Texas at Austin. */ +/*********************************************************************/ + +#include "common.h" +#include + +static FLOAT dm1 = -1.; + +#ifdef CONJ +#define GEMM_KERNEL GEMM_KERNEL_L +#else +#define GEMM_KERNEL GEMM_KERNEL_N +#endif + +#if GEMM_DEFAULT_UNROLL_M == 1 +#define GEMM_UNROLL_M_SHIFT 0 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 2 +#define GEMM_UNROLL_M_SHIFT 1 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 4 +#define GEMM_UNROLL_M_SHIFT 2 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 6 +#define GEMM_UNROLL_M_SHIFT 2 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 8 +#define GEMM_UNROLL_M_SHIFT 3 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 16 +#define GEMM_UNROLL_M_SHIFT 4 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 1 +#define GEMM_UNROLL_N_SHIFT 0 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 2 +#define GEMM_UNROLL_N_SHIFT 1 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 4 +#define GEMM_UNROLL_N_SHIFT 2 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 8 +#define GEMM_UNROLL_N_SHIFT 3 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 16 +#define GEMM_UNROLL_N_SHIFT 4 +#endif + +#ifndef COMPLEX + +#ifdef DOUBLE + +static inline __attribute__ ((always_inline)) void solve8x8(FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) { + FLOAT *c0, *c1, *c2, *c3, *c4, *c5, *c6, *c7; + c0 = &c[0*ldc]; + c1 = &c[1*ldc]; + c2 = &c[2*ldc]; + c3 = &c[3*ldc]; + c4 = &c[4*ldc]; + c5 = &c[5*ldc]; + c6 = &c[6*ldc]; + c7 = &c[7*ldc]; + vector FLOAT *Va = (vector FLOAT *) a; + vector FLOAT *Vb = (vector FLOAT *) b; + vector FLOAT *Vc0 = (vector FLOAT *) c0; + vector FLOAT *Vc1 = (vector FLOAT *) c1; + vector FLOAT *Vc2 = (vector FLOAT *) c2; + vector FLOAT *Vc3 = (vector FLOAT *) c3; + vector FLOAT *Vc4 = (vector FLOAT *) c4; + vector FLOAT *Vc5 = (vector FLOAT *) c5; + vector FLOAT *Vc6 = (vector FLOAT *) c6; + vector FLOAT *Vc7 = (vector FLOAT *) c7; + vector FLOAT VbS0, VbS1, VbS2, VbS3, VbS4, VbS5, VbS6, VbS7; + + b[56] = (c0[7] *= a[63]); + b[57] = (c1[7] *= a[63]); + b[58] = (c2[7] *= a[63]); + b[59] = (c3[7] *= a[63]); + b[60] = (c4[7] *= a[63]); + b[61] = (c5[7] *= a[63]); + b[62] = (c6[7] *= a[63]); + b[63] = (c7[7] *= a[63]); + VbS0 = vec_splat(Vb[28], 0); + VbS1 = vec_splat(Vb[28], 1); + VbS2 = vec_splat(Vb[29], 0); + VbS3 = vec_splat(Vb[29], 1); + VbS4 = vec_splat(Vb[30], 0); + VbS5 = vec_splat(Vb[30], 1); + VbS6 = vec_splat(Vb[31], 0); + VbS7 = vec_splat(Vb[31], 1); + Vc0[0] = vec_nmsub(VbS0, Va[28], Vc0[0]); + Vc0[1] = vec_nmsub(VbS0, Va[29], Vc0[1]); + Vc0[2] = vec_nmsub(VbS0, Va[30], Vc0[2]); + Vc1[0] = vec_nmsub(VbS1, Va[28], Vc1[0]); + Vc1[1] = vec_nmsub(VbS1, Va[29], Vc1[1]); + Vc1[2] = vec_nmsub(VbS1, Va[30], Vc1[2]); + Vc2[0] = vec_nmsub(VbS2, Va[28], Vc2[0]); + Vc2[1] = vec_nmsub(VbS2, Va[29], Vc2[1]); + Vc2[2] = vec_nmsub(VbS2, Va[30], Vc2[2]); + Vc3[0] = vec_nmsub(VbS3, Va[28], Vc3[0]); + Vc3[1] = vec_nmsub(VbS3, Va[29], Vc3[1]); + Vc3[2] = vec_nmsub(VbS3, Va[30], Vc3[2]); + Vc4[0] = vec_nmsub(VbS4, Va[28], Vc4[0]); + Vc4[1] = vec_nmsub(VbS4, Va[29], Vc4[1]); + Vc4[2] = vec_nmsub(VbS4, Va[30], Vc4[2]); + Vc5[0] = vec_nmsub(VbS5, Va[28], Vc5[0]); + Vc5[1] = vec_nmsub(VbS5, Va[29], Vc5[1]); + Vc5[2] = vec_nmsub(VbS5, Va[30], Vc5[2]); + Vc6[0] = vec_nmsub(VbS6, Va[28], Vc6[0]); + Vc6[1] = vec_nmsub(VbS6, Va[29], Vc6[1]); + Vc6[2] = vec_nmsub(VbS6, Va[30], Vc6[2]); + Vc7[0] = vec_nmsub(VbS7, Va[28], Vc7[0]); + Vc7[1] = vec_nmsub(VbS7, Va[29], Vc7[1]); + Vc7[2] = vec_nmsub(VbS7, Va[30], Vc7[2]); + c0[6] -= c0[7] * a[62]; + c1[6] -= c1[7] * a[62]; + c2[6] -= c2[7] * a[62]; + c3[6] -= c3[7] * a[62]; + c4[6] -= c4[7] * a[62]; + c5[6] -= c5[7] * a[62]; + c6[6] -= c6[7] * a[62]; + c7[6] -= c7[7] * a[62]; + + b[48] = (c0[6] *= a[54]); + b[49] = (c1[6] *= a[54]); + b[50] = (c2[6] *= a[54]); + b[51] = (c3[6] *= a[54]); + b[52] = (c4[6] *= a[54]); + b[53] = (c5[6] *= a[54]); + b[54] = (c6[6] *= a[54]); + b[55] = (c7[6] *= a[54]); + VbS0 = vec_splat(Vb[24], 0); + VbS1 = vec_splat(Vb[24], 1); + VbS2 = vec_splat(Vb[25], 0); + VbS3 = vec_splat(Vb[25], 1); + VbS4 = vec_splat(Vb[26], 0); + VbS5 = vec_splat(Vb[26], 1); + VbS6 = vec_splat(Vb[27], 0); + VbS7 = vec_splat(Vb[27], 1); + Vc0[0] = vec_nmsub(VbS0, Va[24], Vc0[0]); + Vc0[1] = vec_nmsub(VbS0, Va[25], Vc0[1]); + Vc0[2] = vec_nmsub(VbS0, Va[26], Vc0[2]); + Vc1[0] = vec_nmsub(VbS1, Va[24], Vc1[0]); + Vc1[1] = vec_nmsub(VbS1, Va[25], Vc1[1]); + Vc1[2] = vec_nmsub(VbS1, Va[26], Vc1[2]); + Vc2[0] = vec_nmsub(VbS2, Va[24], Vc2[0]); + Vc2[1] = vec_nmsub(VbS2, Va[25], Vc2[1]); + Vc2[2] = vec_nmsub(VbS2, Va[26], Vc2[2]); + Vc3[0] = vec_nmsub(VbS3, Va[24], Vc3[0]); + Vc3[1] = vec_nmsub(VbS3, Va[25], Vc3[1]); + Vc3[2] = vec_nmsub(VbS3, Va[26], Vc3[2]); + Vc4[0] = vec_nmsub(VbS4, Va[24], Vc4[0]); + Vc4[1] = vec_nmsub(VbS4, Va[25], Vc4[1]); + Vc4[2] = vec_nmsub(VbS4, Va[26], Vc4[2]); + Vc5[0] = vec_nmsub(VbS5, Va[24], Vc5[0]); + Vc5[1] = vec_nmsub(VbS5, Va[25], Vc5[1]); + Vc5[2] = vec_nmsub(VbS5, Va[26], Vc5[2]); + Vc6[0] = vec_nmsub(VbS6, Va[24], Vc6[0]); + Vc6[1] = vec_nmsub(VbS6, Va[25], Vc6[1]); + Vc6[2] = vec_nmsub(VbS6, Va[26], Vc6[2]); + Vc7[0] = vec_nmsub(VbS7, Va[24], Vc7[0]); + Vc7[1] = vec_nmsub(VbS7, Va[25], Vc7[1]); + Vc7[2] = vec_nmsub(VbS7, Va[26], Vc7[2]); + + b[40] = (c0[5] *= a[45]); + b[41] = (c1[5] *= a[45]); + b[42] = (c2[5] *= a[45]); + b[43] = (c3[5] *= a[45]); + b[44] = (c4[5] *= a[45]); + b[45] = (c5[5] *= a[45]); + b[46] = (c6[5] *= a[45]); + b[47] = (c7[5] *= a[45]); + VbS0 = vec_splat(Vb[20], 0); + VbS1 = vec_splat(Vb[20], 1); + VbS2 = vec_splat(Vb[21], 0); + VbS3 = vec_splat(Vb[21], 1); + VbS4 = vec_splat(Vb[22], 0); + VbS5 = vec_splat(Vb[22], 1); + VbS6 = vec_splat(Vb[23], 0); + VbS7 = vec_splat(Vb[23], 1); + Vc0[0] = vec_nmsub(VbS0, Va[20], Vc0[0]); + Vc0[1] = vec_nmsub(VbS0, Va[21], Vc0[1]); + Vc1[0] = vec_nmsub(VbS1, Va[20], Vc1[0]); + Vc1[1] = vec_nmsub(VbS1, Va[21], Vc1[1]); + Vc2[0] = vec_nmsub(VbS2, Va[20], Vc2[0]); + Vc2[1] = vec_nmsub(VbS2, Va[21], Vc2[1]); + Vc3[0] = vec_nmsub(VbS3, Va[20], Vc3[0]); + Vc3[1] = vec_nmsub(VbS3, Va[21], Vc3[1]); + Vc4[0] = vec_nmsub(VbS4, Va[20], Vc4[0]); + Vc4[1] = vec_nmsub(VbS4, Va[21], Vc4[1]); + Vc5[0] = vec_nmsub(VbS5, Va[20], Vc5[0]); + Vc5[1] = vec_nmsub(VbS5, Va[21], Vc5[1]); + Vc6[0] = vec_nmsub(VbS6, Va[20], Vc6[0]); + Vc6[1] = vec_nmsub(VbS6, Va[21], Vc6[1]); + Vc7[0] = vec_nmsub(VbS7, Va[20], Vc7[0]); + Vc7[1] = vec_nmsub(VbS7, Va[21], Vc7[1]); + c0[4] -= c0[5] * a[44]; + c1[4] -= c1[5] * a[44]; + c2[4] -= c2[5] * a[44]; + c3[4] -= c3[5] * a[44]; + c4[4] -= c4[5] * a[44]; + c5[4] -= c5[5] * a[44]; + c6[4] -= c6[5] * a[44]; + c7[4] -= c7[5] * a[44]; + + b[32] = (c0[4] *= a[36]); + b[33] = (c1[4] *= a[36]); + b[34] = (c2[4] *= a[36]); + b[35] = (c3[4] *= a[36]); + b[36] = (c4[4] *= a[36]); + b[37] = (c5[4] *= a[36]); + b[38] = (c6[4] *= a[36]); + b[39] = (c7[4] *= a[36]); + VbS0 = vec_splat(Vb[16], 0); + VbS1 = vec_splat(Vb[16], 1); + VbS2 = vec_splat(Vb[17], 0); + VbS3 = vec_splat(Vb[17], 1); + VbS4 = vec_splat(Vb[18], 0); + VbS5 = vec_splat(Vb[18], 1); + VbS6 = vec_splat(Vb[19], 0); + VbS7 = vec_splat(Vb[19], 1); + Vc0[0] = vec_nmsub(VbS0, Va[16], Vc0[0]); + Vc0[1] = vec_nmsub(VbS0, Va[17], Vc0[1]); + Vc1[0] = vec_nmsub(VbS1, Va[16], Vc1[0]); + Vc1[1] = vec_nmsub(VbS1, Va[17], Vc1[1]); + Vc2[0] = vec_nmsub(VbS2, Va[16], Vc2[0]); + Vc2[1] = vec_nmsub(VbS2, Va[17], Vc2[1]); + Vc3[0] = vec_nmsub(VbS3, Va[16], Vc3[0]); + Vc3[1] = vec_nmsub(VbS3, Va[17], Vc3[1]); + Vc4[0] = vec_nmsub(VbS4, Va[16], Vc4[0]); + Vc4[1] = vec_nmsub(VbS4, Va[17], Vc4[1]); + Vc5[0] = vec_nmsub(VbS5, Va[16], Vc5[0]); + Vc5[1] = vec_nmsub(VbS5, Va[17], Vc5[1]); + Vc6[0] = vec_nmsub(VbS6, Va[16], Vc6[0]); + Vc6[1] = vec_nmsub(VbS6, Va[17], Vc6[1]); + Vc7[0] = vec_nmsub(VbS7, Va[16], Vc7[0]); + Vc7[1] = vec_nmsub(VbS7, Va[17], Vc7[1]); + + b[24] = (c0[3] *= a[27]); + b[25] = (c1[3] *= a[27]); + b[26] = (c2[3] *= a[27]); + b[27] = (c3[3] *= a[27]); + b[28] = (c4[3] *= a[27]); + b[29] = (c5[3] *= a[27]); + b[30] = (c6[3] *= a[27]); + b[31] = (c7[3] *= a[27]); + VbS0 = vec_splat(Vb[12], 0); + VbS1 = vec_splat(Vb[12], 1); + VbS2 = vec_splat(Vb[13], 0); + VbS3 = vec_splat(Vb[13], 1); + VbS4 = vec_splat(Vb[14], 0); + VbS5 = vec_splat(Vb[14], 1); + VbS6 = vec_splat(Vb[15], 0); + VbS7 = vec_splat(Vb[15], 1); + Vc0[0] = vec_nmsub(VbS0, Va[12], Vc0[0]); + Vc1[0] = vec_nmsub(VbS1, Va[12], Vc1[0]); + Vc2[0] = vec_nmsub(VbS2, Va[12], Vc2[0]); + Vc3[0] = vec_nmsub(VbS3, Va[12], Vc3[0]); + Vc4[0] = vec_nmsub(VbS4, Va[12], Vc4[0]); + Vc5[0] = vec_nmsub(VbS5, Va[12], Vc5[0]); + Vc6[0] = vec_nmsub(VbS6, Va[12], Vc6[0]); + Vc7[0] = vec_nmsub(VbS7, Va[12], Vc7[0]); + c0[2] -= c0[3] * a[26]; + c1[2] -= c1[3] * a[26]; + c2[2] -= c2[3] * a[26]; + c3[2] -= c3[3] * a[26]; + c4[2] -= c4[3] * a[26]; + c5[2] -= c5[3] * a[26]; + c6[2] -= c6[3] * a[26]; + c7[2] -= c7[3] * a[26]; + + b[16] = (c0[2] *= a[18]); + b[17] = (c1[2] *= a[18]); + b[18] = (c2[2] *= a[18]); + b[19] = (c3[2] *= a[18]); + b[20] = (c4[2] *= a[18]); + b[21] = (c5[2] *= a[18]); + b[22] = (c6[2] *= a[18]); + b[23] = (c7[2] *= a[18]); + VbS0 = vec_splat(Vb[ 8], 0); + VbS1 = vec_splat(Vb[ 8], 1); + VbS2 = vec_splat(Vb[ 9], 0); + VbS3 = vec_splat(Vb[ 9], 1); + VbS4 = vec_splat(Vb[10], 0); + VbS5 = vec_splat(Vb[10], 1); + VbS6 = vec_splat(Vb[11], 0); + VbS7 = vec_splat(Vb[11], 1); + Vc0[0] = vec_nmsub(VbS0, Va[8], Vc0[0]); + Vc1[0] = vec_nmsub(VbS1, Va[8], Vc1[0]); + Vc2[0] = vec_nmsub(VbS2, Va[8], Vc2[0]); + Vc3[0] = vec_nmsub(VbS3, Va[8], Vc3[0]); + Vc4[0] = vec_nmsub(VbS4, Va[8], Vc4[0]); + Vc5[0] = vec_nmsub(VbS5, Va[8], Vc5[0]); + Vc6[0] = vec_nmsub(VbS6, Va[8], Vc6[0]); + Vc7[0] = vec_nmsub(VbS7, Va[8], Vc7[0]); + + b[ 8] = (c0[1] *= a[9]); + b[ 9] = (c1[1] *= a[9]); + b[10] = (c2[1] *= a[9]); + b[11] = (c3[1] *= a[9]); + b[12] = (c4[1] *= a[9]); + b[13] = (c5[1] *= a[9]); + b[14] = (c6[1] *= a[9]); + b[15] = (c7[1] *= a[9]); + c0[0] -= c0[1] * a[8]; + c1[0] -= c1[1] * a[8]; + c2[0] -= c2[1] * a[8]; + c3[0] -= c3[1] * a[8]; + c4[0] -= c4[1] * a[8]; + c5[0] -= c5[1] * a[8]; + c6[0] -= c6[1] * a[8]; + c7[0] -= c7[1] * a[8]; + + b[0] = (c0[0] *= a[0]); + b[1] = (c1[0] *= a[0]); + b[2] = (c2[0] *= a[0]); + b[3] = (c3[0] *= a[0]); + b[4] = (c4[0] *= a[0]); + b[5] = (c5[0] *= a[0]); + b[6] = (c6[0] *= a[0]); + b[7] = (c7[0] *= a[0]); +} + +#else + +static inline __attribute__ ((always_inline)) void solve16x8(FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) { + FLOAT *c0, *c1, *c2, *c3, *c4, *c5, *c6, *c7; + c0 = &c[0*ldc]; + c1 = &c[1*ldc]; + c2 = &c[2*ldc]; + c3 = &c[3*ldc]; + c4 = &c[4*ldc]; + c5 = &c[5*ldc]; + c6 = &c[6*ldc]; + c7 = &c[7*ldc]; + vector FLOAT *Va = (vector FLOAT *) a; + vector FLOAT *Vb = (vector FLOAT *) b; + vector FLOAT *Vc0 = (vector FLOAT *) c0; + vector FLOAT *Vc1 = (vector FLOAT *) c1; + vector FLOAT *Vc2 = (vector FLOAT *) c2; + vector FLOAT *Vc3 = (vector FLOAT *) c3; + vector FLOAT *Vc4 = (vector FLOAT *) c4; + vector FLOAT *Vc5 = (vector FLOAT *) c5; + vector FLOAT *Vc6 = (vector FLOAT *) c6; + vector FLOAT *Vc7 = (vector FLOAT *) c7; + vector FLOAT VbS0, VbS1, VbS2, VbS3, VbS4, VbS5, VbS6, VbS7; + int j; + + b[120] = (c0[15] *= a[255]); + b[121] = (c1[15] *= a[255]); + b[122] = (c2[15] *= a[255]); + b[123] = (c3[15] *= a[255]); + b[124] = (c4[15] *= a[255]); + b[125] = (c5[15] *= a[255]); + b[126] = (c6[15] *= a[255]); + b[127] = (c7[15] *= a[255]); + VbS0 = vec_splat(Vb[30], 0); + VbS1 = vec_splat(Vb[30], 1); + VbS2 = vec_splat(Vb[30], 2); + VbS3 = vec_splat(Vb[30], 3); + VbS4 = vec_splat(Vb[31], 0); + VbS5 = vec_splat(Vb[31], 1); + VbS6 = vec_splat(Vb[31], 2); + VbS7 = vec_splat(Vb[31], 3); + Vc0[0] = vec_nmsub(VbS0, Va[60], Vc0[0]); + Vc0[1] = vec_nmsub(VbS0, Va[61], Vc0[1]); + Vc0[2] = vec_nmsub(VbS0, Va[62], Vc0[2]); + Vc1[0] = vec_nmsub(VbS1, Va[60], Vc1[0]); + Vc1[1] = vec_nmsub(VbS1, Va[61], Vc1[1]); + Vc1[2] = vec_nmsub(VbS1, Va[62], Vc1[2]); + Vc2[0] = vec_nmsub(VbS2, Va[60], Vc2[0]); + Vc2[1] = vec_nmsub(VbS2, Va[61], Vc2[1]); + Vc2[2] = vec_nmsub(VbS2, Va[62], Vc2[2]); + Vc3[0] = vec_nmsub(VbS3, Va[60], Vc3[0]); + Vc3[1] = vec_nmsub(VbS3, Va[61], Vc3[1]); + Vc3[2] = vec_nmsub(VbS3, Va[62], Vc3[2]); + Vc4[0] = vec_nmsub(VbS4, Va[60], Vc4[0]); + Vc4[1] = vec_nmsub(VbS4, Va[61], Vc4[1]); + Vc4[2] = vec_nmsub(VbS4, Va[62], Vc4[2]); + Vc5[0] = vec_nmsub(VbS5, Va[60], Vc5[0]); + Vc5[1] = vec_nmsub(VbS5, Va[61], Vc5[1]); + Vc5[2] = vec_nmsub(VbS5, Va[62], Vc5[2]); + Vc6[0] = vec_nmsub(VbS6, Va[60], Vc6[0]); + Vc6[1] = vec_nmsub(VbS6, Va[61], Vc6[1]); + Vc6[2] = vec_nmsub(VbS6, Va[62], Vc6[2]); + Vc7[0] = vec_nmsub(VbS7, Va[60], Vc7[0]); + Vc7[1] = vec_nmsub(VbS7, Va[61], Vc7[1]); + Vc7[2] = vec_nmsub(VbS7, Va[62], Vc7[2]); + c0[12] -= b[120] * a[252]; + c0[13] -= b[120] * a[253]; + c0[14] -= b[120] * a[254]; + c1[12] -= b[121] * a[252]; + c1[13] -= b[121] * a[253]; + c1[14] -= b[121] * a[254]; + c2[12] -= b[122] * a[252]; + c2[13] -= b[122] * a[253]; + c2[14] -= b[122] * a[254]; + c3[12] -= b[123] * a[252]; + c3[13] -= b[123] * a[253]; + c3[14] -= b[123] * a[254]; + c4[12] -= b[124] * a[252]; + c4[13] -= b[124] * a[253]; + c4[14] -= b[124] * a[254]; + c5[12] -= b[125] * a[252]; + c5[13] -= b[125] * a[253]; + c5[14] -= b[125] * a[254]; + c6[12] -= b[126] * a[252]; + c6[13] -= b[126] * a[253]; + c6[14] -= b[126] * a[254]; + c7[12] -= b[127] * a[252]; + c7[13] -= b[127] * a[253]; + c7[14] -= b[127] * a[254]; + + b[112] = (c0[14] *= a[238]); + b[113] = (c1[14] *= a[238]); + b[114] = (c2[14] *= a[238]); + b[115] = (c3[14] *= a[238]); + b[116] = (c4[14] *= a[238]); + b[117] = (c5[14] *= a[238]); + b[118] = (c6[14] *= a[238]); + b[119] = (c7[14] *= a[238]); + VbS0 = vec_splat(Vb[28], 0); + VbS1 = vec_splat(Vb[28], 1); + VbS2 = vec_splat(Vb[28], 2); + VbS3 = vec_splat(Vb[28], 3); + VbS4 = vec_splat(Vb[29], 0); + VbS5 = vec_splat(Vb[29], 1); + VbS6 = vec_splat(Vb[29], 2); + VbS7 = vec_splat(Vb[29], 3); + Vc0[0] = vec_nmsub(VbS0, Va[56], Vc0[0]); + Vc0[1] = vec_nmsub(VbS0, Va[57], Vc0[1]); + Vc0[2] = vec_nmsub(VbS0, Va[58], Vc0[2]); + Vc1[0] = vec_nmsub(VbS1, Va[56], Vc1[0]); + Vc1[1] = vec_nmsub(VbS1, Va[57], Vc1[1]); + Vc1[2] = vec_nmsub(VbS1, Va[58], Vc1[2]); + Vc2[0] = vec_nmsub(VbS2, Va[56], Vc2[0]); + Vc2[1] = vec_nmsub(VbS2, Va[57], Vc2[1]); + Vc2[2] = vec_nmsub(VbS2, Va[58], Vc2[2]); + Vc3[0] = vec_nmsub(VbS3, Va[56], Vc3[0]); + Vc3[1] = vec_nmsub(VbS3, Va[57], Vc3[1]); + Vc3[2] = vec_nmsub(VbS3, Va[58], Vc3[2]); + Vc4[0] = vec_nmsub(VbS4, Va[56], Vc4[0]); + Vc4[1] = vec_nmsub(VbS4, Va[57], Vc4[1]); + Vc4[2] = vec_nmsub(VbS4, Va[58], Vc4[2]); + Vc5[0] = vec_nmsub(VbS5, Va[56], Vc5[0]); + Vc5[1] = vec_nmsub(VbS5, Va[57], Vc5[1]); + Vc5[2] = vec_nmsub(VbS5, Va[58], Vc5[2]); + Vc6[0] = vec_nmsub(VbS6, Va[56], Vc6[0]); + Vc6[1] = vec_nmsub(VbS6, Va[57], Vc6[1]); + Vc6[2] = vec_nmsub(VbS6, Va[58], Vc6[2]); + Vc7[0] = vec_nmsub(VbS7, Va[56], Vc7[0]); + Vc7[1] = vec_nmsub(VbS7, Va[57], Vc7[1]); + Vc7[2] = vec_nmsub(VbS7, Va[58], Vc7[2]); + c0[12] -= b[112] * a[236]; + c0[13] -= b[112] * a[237]; + c1[12] -= b[113] * a[236]; + c1[13] -= b[113] * a[237]; + c2[12] -= b[114] * a[236]; + c2[13] -= b[114] * a[237]; + c3[12] -= b[115] * a[236]; + c3[13] -= b[115] * a[237]; + c4[12] -= b[116] * a[236]; + c4[13] -= b[116] * a[237]; + c5[12] -= b[117] * a[236]; + c5[13] -= b[117] * a[237]; + c6[12] -= b[118] * a[236]; + c6[13] -= b[118] * a[237]; + c7[12] -= b[119] * a[236]; + c7[13] -= b[119] * a[237]; + + b[104] = (c0[13] *= a[221]); + b[105] = (c1[13] *= a[221]); + b[106] = (c2[13] *= a[221]); + b[107] = (c3[13] *= a[221]); + b[108] = (c4[13] *= a[221]); + b[109] = (c5[13] *= a[221]); + b[110] = (c6[13] *= a[221]); + b[111] = (c7[13] *= a[221]); + VbS0 = vec_splat(Vb[26], 0); + VbS1 = vec_splat(Vb[26], 1); + VbS2 = vec_splat(Vb[26], 2); + VbS3 = vec_splat(Vb[26], 3); + VbS4 = vec_splat(Vb[27], 0); + VbS5 = vec_splat(Vb[27], 1); + VbS6 = vec_splat(Vb[27], 2); + VbS7 = vec_splat(Vb[27], 3); + Vc0[0] = vec_nmsub(VbS0, Va[52], Vc0[0]); + Vc0[1] = vec_nmsub(VbS0, Va[53], Vc0[1]); + Vc0[2] = vec_nmsub(VbS0, Va[54], Vc0[2]); + Vc1[0] = vec_nmsub(VbS1, Va[52], Vc1[0]); + Vc1[1] = vec_nmsub(VbS1, Va[53], Vc1[1]); + Vc1[2] = vec_nmsub(VbS1, Va[54], Vc1[2]); + Vc2[0] = vec_nmsub(VbS2, Va[52], Vc2[0]); + Vc2[1] = vec_nmsub(VbS2, Va[53], Vc2[1]); + Vc2[2] = vec_nmsub(VbS2, Va[54], Vc2[2]); + Vc3[0] = vec_nmsub(VbS3, Va[52], Vc3[0]); + Vc3[1] = vec_nmsub(VbS3, Va[53], Vc3[1]); + Vc3[2] = vec_nmsub(VbS3, Va[54], Vc3[2]); + Vc4[0] = vec_nmsub(VbS4, Va[52], Vc4[0]); + Vc4[1] = vec_nmsub(VbS4, Va[53], Vc4[1]); + Vc4[2] = vec_nmsub(VbS4, Va[54], Vc4[2]); + Vc5[0] = vec_nmsub(VbS5, Va[52], Vc5[0]); + Vc5[1] = vec_nmsub(VbS5, Va[53], Vc5[1]); + Vc5[2] = vec_nmsub(VbS5, Va[54], Vc5[2]); + Vc6[0] = vec_nmsub(VbS6, Va[52], Vc6[0]); + Vc6[1] = vec_nmsub(VbS6, Va[53], Vc6[1]); + Vc6[2] = vec_nmsub(VbS6, Va[54], Vc6[2]); + Vc7[0] = vec_nmsub(VbS7, Va[52], Vc7[0]); + Vc7[1] = vec_nmsub(VbS7, Va[53], Vc7[1]); + Vc7[2] = vec_nmsub(VbS7, Va[54], Vc7[2]); + c0[12] -= b[104] * a[220]; + c1[12] -= b[105] * a[220]; + c2[12] -= b[106] * a[220]; + c3[12] -= b[107] * a[220]; + c4[12] -= b[108] * a[220]; + c5[12] -= b[109] * a[220]; + c6[12] -= b[110] * a[220]; + c7[12] -= b[111] * a[220]; + + b[ 96] = (c0[12] *= a[204]); + b[ 97] = (c1[12] *= a[204]); + b[ 98] = (c2[12] *= a[204]); + b[ 99] = (c3[12] *= a[204]); + b[100] = (c4[12] *= a[204]); + b[101] = (c5[12] *= a[204]); + b[102] = (c6[12] *= a[204]); + b[103] = (c7[12] *= a[204]); + VbS0 = vec_splat(Vb[24], 0); + VbS1 = vec_splat(Vb[24], 1); + VbS2 = vec_splat(Vb[24], 2); + VbS3 = vec_splat(Vb[24], 3); + VbS4 = vec_splat(Vb[25], 0); + VbS5 = vec_splat(Vb[25], 1); + VbS6 = vec_splat(Vb[25], 2); + VbS7 = vec_splat(Vb[25], 3); + Vc0[0] = vec_nmsub(VbS0, Va[48], Vc0[0]); + Vc0[1] = vec_nmsub(VbS0, Va[49], Vc0[1]); + Vc0[2] = vec_nmsub(VbS0, Va[50], Vc0[2]); + Vc1[0] = vec_nmsub(VbS1, Va[48], Vc1[0]); + Vc1[1] = vec_nmsub(VbS1, Va[49], Vc1[1]); + Vc1[2] = vec_nmsub(VbS1, Va[50], Vc1[2]); + Vc2[0] = vec_nmsub(VbS2, Va[48], Vc2[0]); + Vc2[1] = vec_nmsub(VbS2, Va[49], Vc2[1]); + Vc2[2] = vec_nmsub(VbS2, Va[50], Vc2[2]); + Vc3[0] = vec_nmsub(VbS3, Va[48], Vc3[0]); + Vc3[1] = vec_nmsub(VbS3, Va[49], Vc3[1]); + Vc3[2] = vec_nmsub(VbS3, Va[50], Vc3[2]); + Vc4[0] = vec_nmsub(VbS4, Va[48], Vc4[0]); + Vc4[1] = vec_nmsub(VbS4, Va[49], Vc4[1]); + Vc4[2] = vec_nmsub(VbS4, Va[50], Vc4[2]); + Vc5[0] = vec_nmsub(VbS5, Va[48], Vc5[0]); + Vc5[1] = vec_nmsub(VbS5, Va[49], Vc5[1]); + Vc5[2] = vec_nmsub(VbS5, Va[50], Vc5[2]); + Vc6[0] = vec_nmsub(VbS6, Va[48], Vc6[0]); + Vc6[1] = vec_nmsub(VbS6, Va[49], Vc6[1]); + Vc6[2] = vec_nmsub(VbS6, Va[50], Vc6[2]); + Vc7[0] = vec_nmsub(VbS7, Va[48], Vc7[0]); + Vc7[1] = vec_nmsub(VbS7, Va[49], Vc7[1]); + Vc7[2] = vec_nmsub(VbS7, Va[50], Vc7[2]); + + b[88] = (c0[11] *= a[187]); + b[89] = (c1[11] *= a[187]); + b[90] = (c2[11] *= a[187]); + b[91] = (c3[11] *= a[187]); + b[92] = (c4[11] *= a[187]); + b[93] = (c5[11] *= a[187]); + b[94] = (c6[11] *= a[187]); + b[95] = (c7[11] *= a[187]); + VbS0 = vec_splat(Vb[22], 0); + VbS1 = vec_splat(Vb[22], 1); + VbS2 = vec_splat(Vb[22], 2); + VbS3 = vec_splat(Vb[22], 3); + VbS4 = vec_splat(Vb[23], 0); + VbS5 = vec_splat(Vb[23], 1); + VbS6 = vec_splat(Vb[23], 2); + VbS7 = vec_splat(Vb[23], 3); + Vc0[0] = vec_nmsub(VbS0, Va[44], Vc0[0]); + Vc0[1] = vec_nmsub(VbS0, Va[45], Vc0[1]); + Vc1[0] = vec_nmsub(VbS1, Va[44], Vc1[0]); + Vc1[1] = vec_nmsub(VbS1, Va[45], Vc1[1]); + Vc2[0] = vec_nmsub(VbS2, Va[44], Vc2[0]); + Vc2[1] = vec_nmsub(VbS2, Va[45], Vc2[1]); + Vc3[0] = vec_nmsub(VbS3, Va[44], Vc3[0]); + Vc3[1] = vec_nmsub(VbS3, Va[45], Vc3[1]); + Vc4[0] = vec_nmsub(VbS4, Va[44], Vc4[0]); + Vc4[1] = vec_nmsub(VbS4, Va[45], Vc4[1]); + Vc5[0] = vec_nmsub(VbS5, Va[44], Vc5[0]); + Vc5[1] = vec_nmsub(VbS5, Va[45], Vc5[1]); + Vc6[0] = vec_nmsub(VbS6, Va[44], Vc6[0]); + Vc6[1] = vec_nmsub(VbS6, Va[45], Vc6[1]); + Vc7[0] = vec_nmsub(VbS7, Va[44], Vc7[0]); + Vc7[1] = vec_nmsub(VbS7, Va[45], Vc7[1]); + c0[ 8] -= b[88] * a[184]; + c0[ 9] -= b[88] * a[185]; + c0[10] -= b[88] * a[186]; + c1[ 8] -= b[89] * a[184]; + c1[ 9] -= b[89] * a[185]; + c1[10] -= b[89] * a[186]; + c2[ 8] -= b[90] * a[184]; + c2[ 9] -= b[90] * a[185]; + c2[10] -= b[90] * a[186]; + c3[ 8] -= b[91] * a[184]; + c3[ 9] -= b[91] * a[185]; + c3[10] -= b[91] * a[186]; + c4[ 8] -= b[92] * a[184]; + c4[ 9] -= b[92] * a[185]; + c4[10] -= b[92] * a[186]; + c5[ 8] -= b[93] * a[184]; + c5[ 9] -= b[93] * a[185]; + c5[10] -= b[93] * a[186]; + c6[ 8] -= b[94] * a[184]; + c6[ 9] -= b[94] * a[185]; + c6[10] -= b[94] * a[186]; + c7[ 8] -= b[95] * a[184]; + c7[ 9] -= b[95] * a[185]; + c7[10] -= b[95] * a[186]; + + b[80] = (c0[10] *= a[170]); + b[81] = (c1[10] *= a[170]); + b[82] = (c2[10] *= a[170]); + b[83] = (c3[10] *= a[170]); + b[84] = (c4[10] *= a[170]); + b[85] = (c5[10] *= a[170]); + b[86] = (c6[10] *= a[170]); + b[87] = (c7[10] *= a[170]); + VbS0 = vec_splat(Vb[20], 0); + VbS1 = vec_splat(Vb[20], 1); + VbS2 = vec_splat(Vb[20], 2); + VbS3 = vec_splat(Vb[20], 3); + VbS4 = vec_splat(Vb[21], 0); + VbS5 = vec_splat(Vb[21], 1); + VbS6 = vec_splat(Vb[21], 2); + VbS7 = vec_splat(Vb[21], 3); + Vc0[0] = vec_nmsub(VbS0, Va[40], Vc0[0]); + Vc0[1] = vec_nmsub(VbS0, Va[41], Vc0[1]); + Vc1[0] = vec_nmsub(VbS1, Va[40], Vc1[0]); + Vc1[1] = vec_nmsub(VbS1, Va[41], Vc1[1]); + Vc2[0] = vec_nmsub(VbS2, Va[40], Vc2[0]); + Vc2[1] = vec_nmsub(VbS2, Va[41], Vc2[1]); + Vc3[0] = vec_nmsub(VbS3, Va[40], Vc3[0]); + Vc3[1] = vec_nmsub(VbS3, Va[41], Vc3[1]); + Vc4[0] = vec_nmsub(VbS4, Va[40], Vc4[0]); + Vc4[1] = vec_nmsub(VbS4, Va[41], Vc4[1]); + Vc5[0] = vec_nmsub(VbS5, Va[40], Vc5[0]); + Vc5[1] = vec_nmsub(VbS5, Va[41], Vc5[1]); + Vc6[0] = vec_nmsub(VbS6, Va[40], Vc6[0]); + Vc6[1] = vec_nmsub(VbS6, Va[41], Vc6[1]); + Vc7[0] = vec_nmsub(VbS7, Va[40], Vc7[0]); + Vc7[1] = vec_nmsub(VbS7, Va[41], Vc7[1]); + c0[8] -= b[80] * a[168]; + c0[9] -= b[80] * a[169]; + c1[8] -= b[81] * a[168]; + c1[9] -= b[81] * a[169]; + c2[8] -= b[82] * a[168]; + c2[9] -= b[82] * a[169]; + c3[8] -= b[83] * a[168]; + c3[9] -= b[83] * a[169]; + c4[8] -= b[84] * a[168]; + c4[9] -= b[84] * a[169]; + c5[8] -= b[85] * a[168]; + c5[9] -= b[85] * a[169]; + c6[8] -= b[86] * a[168]; + c6[9] -= b[86] * a[169]; + c7[8] -= b[87] * a[168]; + c7[9] -= b[87] * a[169]; + + b[72] = (c0[9] *= a[153]); + b[73] = (c1[9] *= a[153]); + b[74] = (c2[9] *= a[153]); + b[75] = (c3[9] *= a[153]); + b[76] = (c4[9] *= a[153]); + b[77] = (c5[9] *= a[153]); + b[78] = (c6[9] *= a[153]); + b[79] = (c7[9] *= a[153]); + VbS0 = vec_splat(Vb[18], 0); + VbS1 = vec_splat(Vb[18], 1); + VbS2 = vec_splat(Vb[18], 2); + VbS3 = vec_splat(Vb[18], 3); + VbS4 = vec_splat(Vb[19], 0); + VbS5 = vec_splat(Vb[19], 1); + VbS6 = vec_splat(Vb[19], 2); + VbS7 = vec_splat(Vb[19], 3); + Vc0[0] = vec_nmsub(VbS0, Va[36], Vc0[0]); + Vc0[1] = vec_nmsub(VbS0, Va[37], Vc0[1]); + Vc1[0] = vec_nmsub(VbS1, Va[36], Vc1[0]); + Vc1[1] = vec_nmsub(VbS1, Va[37], Vc1[1]); + Vc2[0] = vec_nmsub(VbS2, Va[36], Vc2[0]); + Vc2[1] = vec_nmsub(VbS2, Va[37], Vc2[1]); + Vc3[0] = vec_nmsub(VbS3, Va[36], Vc3[0]); + Vc3[1] = vec_nmsub(VbS3, Va[37], Vc3[1]); + Vc4[0] = vec_nmsub(VbS4, Va[36], Vc4[0]); + Vc4[1] = vec_nmsub(VbS4, Va[37], Vc4[1]); + Vc5[0] = vec_nmsub(VbS5, Va[36], Vc5[0]); + Vc5[1] = vec_nmsub(VbS5, Va[37], Vc5[1]); + Vc6[0] = vec_nmsub(VbS6, Va[36], Vc6[0]); + Vc6[1] = vec_nmsub(VbS6, Va[37], Vc6[1]); + Vc7[0] = vec_nmsub(VbS7, Va[36], Vc7[0]); + Vc7[1] = vec_nmsub(VbS7, Va[37], Vc7[1]); + c0[8] -= b[72] * a[152]; + c1[8] -= b[73] * a[152]; + c2[8] -= b[74] * a[152]; + c3[8] -= b[75] * a[152]; + c4[8] -= b[76] * a[152]; + c5[8] -= b[77] * a[152]; + c6[8] -= b[78] * a[152]; + c7[8] -= b[79] * a[152]; + + b[64] = (c0[8] *= a[136]); + b[65] = (c1[8] *= a[136]); + b[66] = (c2[8] *= a[136]); + b[67] = (c3[8] *= a[136]); + b[68] = (c4[8] *= a[136]); + b[69] = (c5[8] *= a[136]); + b[70] = (c6[8] *= a[136]); + b[71] = (c7[8] *= a[136]); + VbS0 = vec_splat(Vb[16], 0); + VbS1 = vec_splat(Vb[16], 1); + VbS2 = vec_splat(Vb[16], 2); + VbS3 = vec_splat(Vb[16], 3); + VbS4 = vec_splat(Vb[17], 0); + VbS5 = vec_splat(Vb[17], 1); + VbS6 = vec_splat(Vb[17], 2); + VbS7 = vec_splat(Vb[17], 3); + Vc0[0] = vec_nmsub(VbS0, Va[32], Vc0[0]); + Vc0[1] = vec_nmsub(VbS0, Va[33], Vc0[1]); + Vc1[0] = vec_nmsub(VbS1, Va[32], Vc1[0]); + Vc1[1] = vec_nmsub(VbS1, Va[33], Vc1[1]); + Vc2[0] = vec_nmsub(VbS2, Va[32], Vc2[0]); + Vc2[1] = vec_nmsub(VbS2, Va[33], Vc2[1]); + Vc3[0] = vec_nmsub(VbS3, Va[32], Vc3[0]); + Vc3[1] = vec_nmsub(VbS3, Va[33], Vc3[1]); + Vc4[0] = vec_nmsub(VbS4, Va[32], Vc4[0]); + Vc4[1] = vec_nmsub(VbS4, Va[33], Vc4[1]); + Vc5[0] = vec_nmsub(VbS5, Va[32], Vc5[0]); + Vc5[1] = vec_nmsub(VbS5, Va[33], Vc5[1]); + Vc6[0] = vec_nmsub(VbS6, Va[32], Vc6[0]); + Vc6[1] = vec_nmsub(VbS6, Va[33], Vc6[1]); + Vc7[0] = vec_nmsub(VbS7, Va[32], Vc7[0]); + Vc7[1] = vec_nmsub(VbS7, Va[33], Vc7[1]); + + b[56] = (c0[7] *= a[119]); + b[57] = (c1[7] *= a[119]); + b[58] = (c2[7] *= a[119]); + b[59] = (c3[7] *= a[119]); + b[60] = (c4[7] *= a[119]); + b[61] = (c5[7] *= a[119]); + b[62] = (c6[7] *= a[119]); + b[63] = (c7[7] *= a[119]); + VbS0 = vec_splat(Vb[14], 0); + VbS1 = vec_splat(Vb[14], 1); + VbS2 = vec_splat(Vb[14], 2); + VbS3 = vec_splat(Vb[14], 3); + VbS4 = vec_splat(Vb[15], 0); + VbS5 = vec_splat(Vb[15], 1); + VbS6 = vec_splat(Vb[15], 2); + VbS7 = vec_splat(Vb[15], 3); + Vc0[0] = vec_nmsub(VbS0, Va[28], Vc0[0]); + Vc1[0] = vec_nmsub(VbS1, Va[28], Vc1[0]); + Vc2[0] = vec_nmsub(VbS2, Va[28], Vc2[0]); + Vc3[0] = vec_nmsub(VbS3, Va[28], Vc3[0]); + Vc4[0] = vec_nmsub(VbS4, Va[28], Vc4[0]); + Vc5[0] = vec_nmsub(VbS5, Va[28], Vc5[0]); + Vc6[0] = vec_nmsub(VbS6, Va[28], Vc6[0]); + Vc7[0] = vec_nmsub(VbS7, Va[28], Vc7[0]); + c0[4] -= b[56] * a[116]; + c0[5] -= b[56] * a[117]; + c0[6] -= b[56] * a[118]; + c1[4] -= b[57] * a[116]; + c1[5] -= b[57] * a[117]; + c1[6] -= b[57] * a[118]; + c2[4] -= b[58] * a[116]; + c2[5] -= b[58] * a[117]; + c2[6] -= b[58] * a[118]; + c3[4] -= b[59] * a[116]; + c3[5] -= b[59] * a[117]; + c3[6] -= b[59] * a[118]; + c4[4] -= b[60] * a[116]; + c4[5] -= b[60] * a[117]; + c4[6] -= b[60] * a[118]; + c5[4] -= b[61] * a[116]; + c5[5] -= b[61] * a[117]; + c5[6] -= b[61] * a[118]; + c6[4] -= b[62] * a[116]; + c6[5] -= b[62] * a[117]; + c6[6] -= b[62] * a[118]; + c7[4] -= b[63] * a[116]; + c7[5] -= b[63] * a[117]; + c7[6] -= b[63] * a[118]; + + b[48] = (c0[6] *= a[102]); + b[49] = (c1[6] *= a[102]); + b[50] = (c2[6] *= a[102]); + b[51] = (c3[6] *= a[102]); + b[52] = (c4[6] *= a[102]); + b[53] = (c5[6] *= a[102]); + b[54] = (c6[6] *= a[102]); + b[55] = (c7[6] *= a[102]); + VbS0 = vec_splat(Vb[12], 0); + VbS1 = vec_splat(Vb[12], 1); + VbS2 = vec_splat(Vb[12], 2); + VbS3 = vec_splat(Vb[12], 3); + VbS4 = vec_splat(Vb[13], 0); + VbS5 = vec_splat(Vb[13], 1); + VbS6 = vec_splat(Vb[13], 2); + VbS7 = vec_splat(Vb[13], 3); + Vc0[0] = vec_nmsub(VbS0, Va[24], Vc0[0]); + Vc1[0] = vec_nmsub(VbS1, Va[24], Vc1[0]); + Vc2[0] = vec_nmsub(VbS2, Va[24], Vc2[0]); + Vc3[0] = vec_nmsub(VbS3, Va[24], Vc3[0]); + Vc4[0] = vec_nmsub(VbS4, Va[24], Vc4[0]); + Vc5[0] = vec_nmsub(VbS5, Va[24], Vc5[0]); + Vc6[0] = vec_nmsub(VbS6, Va[24], Vc6[0]); + Vc7[0] = vec_nmsub(VbS7, Va[24], Vc7[0]); + c0[4] -= b[48] * a[100]; + c0[5] -= b[48] * a[101]; + c1[4] -= b[49] * a[100]; + c1[5] -= b[49] * a[101]; + c2[4] -= b[50] * a[100]; + c2[5] -= b[50] * a[101]; + c3[4] -= b[51] * a[100]; + c3[5] -= b[51] * a[101]; + c4[4] -= b[52] * a[100]; + c4[5] -= b[52] * a[101]; + c5[4] -= b[53] * a[100]; + c5[5] -= b[53] * a[101]; + c6[4] -= b[54] * a[100]; + c6[5] -= b[54] * a[101]; + c7[4] -= b[55] * a[100]; + c7[5] -= b[55] * a[101]; + + b[40] = (c0[5] *= a[85]); + b[41] = (c1[5] *= a[85]); + b[42] = (c2[5] *= a[85]); + b[43] = (c3[5] *= a[85]); + b[44] = (c4[5] *= a[85]); + b[45] = (c5[5] *= a[85]); + b[46] = (c6[5] *= a[85]); + b[47] = (c7[5] *= a[85]); + VbS0 = vec_splat(Vb[10], 0); + VbS1 = vec_splat(Vb[10], 1); + VbS2 = vec_splat(Vb[10], 2); + VbS3 = vec_splat(Vb[10], 3); + VbS4 = vec_splat(Vb[11], 0); + VbS5 = vec_splat(Vb[11], 1); + VbS6 = vec_splat(Vb[11], 2); + VbS7 = vec_splat(Vb[11], 3); + Vc0[0] = vec_nmsub(VbS0, Va[20], Vc0[0]); + Vc1[0] = vec_nmsub(VbS1, Va[20], Vc1[0]); + Vc2[0] = vec_nmsub(VbS2, Va[20], Vc2[0]); + Vc3[0] = vec_nmsub(VbS3, Va[20], Vc3[0]); + Vc4[0] = vec_nmsub(VbS4, Va[20], Vc4[0]); + Vc5[0] = vec_nmsub(VbS5, Va[20], Vc5[0]); + Vc6[0] = vec_nmsub(VbS6, Va[20], Vc6[0]); + Vc7[0] = vec_nmsub(VbS7, Va[20], Vc7[0]); + c0[4] -= b[40] * a[84]; + c1[4] -= b[41] * a[84]; + c2[4] -= b[42] * a[84]; + c3[4] -= b[43] * a[84]; + c4[4] -= b[44] * a[84]; + c5[4] -= b[45] * a[84]; + c6[4] -= b[46] * a[84]; + c7[4] -= b[47] * a[84]; + + b[32] = (c0[4] *= a[68]); + b[33] = (c1[4] *= a[68]); + b[34] = (c2[4] *= a[68]); + b[35] = (c3[4] *= a[68]); + b[36] = (c4[4] *= a[68]); + b[37] = (c5[4] *= a[68]); + b[38] = (c6[4] *= a[68]); + b[39] = (c7[4] *= a[68]); + VbS0 = vec_splat(Vb[8], 0); + VbS1 = vec_splat(Vb[8], 1); + VbS2 = vec_splat(Vb[8], 2); + VbS3 = vec_splat(Vb[8], 3); + VbS4 = vec_splat(Vb[9], 0); + VbS5 = vec_splat(Vb[9], 1); + VbS6 = vec_splat(Vb[9], 2); + VbS7 = vec_splat(Vb[9], 3); + Vc0[0] = vec_nmsub(VbS0, Va[16], Vc0[0]); + Vc1[0] = vec_nmsub(VbS1, Va[16], Vc1[0]); + Vc2[0] = vec_nmsub(VbS2, Va[16], Vc2[0]); + Vc3[0] = vec_nmsub(VbS3, Va[16], Vc3[0]); + Vc4[0] = vec_nmsub(VbS4, Va[16], Vc4[0]); + Vc5[0] = vec_nmsub(VbS5, Va[16], Vc5[0]); + Vc6[0] = vec_nmsub(VbS6, Va[16], Vc6[0]); + Vc7[0] = vec_nmsub(VbS7, Va[16], Vc7[0]); + + b[24] = (c0[3] *= a[51]); + b[25] = (c1[3] *= a[51]); + b[26] = (c2[3] *= a[51]); + b[27] = (c3[3] *= a[51]); + b[28] = (c4[3] *= a[51]); + b[29] = (c5[3] *= a[51]); + b[30] = (c6[3] *= a[51]); + b[31] = (c7[3] *= a[51]); + c0[0] -= b[24] * a[48]; + c0[1] -= b[24] * a[49]; + c0[2] -= b[24] * a[50]; + c1[0] -= b[25] * a[48]; + c1[1] -= b[25] * a[49]; + c1[2] -= b[25] * a[50]; + c2[0] -= b[26] * a[48]; + c2[1] -= b[26] * a[49]; + c2[2] -= b[26] * a[50]; + c3[0] -= b[27] * a[48]; + c3[1] -= b[27] * a[49]; + c3[2] -= b[27] * a[50]; + c4[0] -= b[28] * a[48]; + c4[1] -= b[28] * a[49]; + c4[2] -= b[28] * a[50]; + c5[0] -= b[29] * a[48]; + c5[1] -= b[29] * a[49]; + c5[2] -= b[29] * a[50]; + c6[0] -= b[30] * a[48]; + c6[1] -= b[30] * a[49]; + c6[2] -= b[30] * a[50]; + c7[0] -= b[31] * a[48]; + c7[1] -= b[31] * a[49]; + c7[2] -= b[31] * a[50]; + + b[16] = (c0[2] *= a[34]); + b[17] = (c1[2] *= a[34]); + b[18] = (c2[2] *= a[34]); + b[19] = (c3[2] *= a[34]); + b[20] = (c4[2] *= a[34]); + b[21] = (c5[2] *= a[34]); + b[22] = (c6[2] *= a[34]); + b[23] = (c7[2] *= a[34]); + c0[0] -= b[16] * a[32]; + c0[1] -= b[16] * a[33]; + c1[0] -= b[17] * a[32]; + c1[1] -= b[17] * a[33]; + c2[0] -= b[18] * a[32]; + c2[1] -= b[18] * a[33]; + c3[0] -= b[19] * a[32]; + c3[1] -= b[19] * a[33]; + c4[0] -= b[20] * a[32]; + c4[1] -= b[20] * a[33]; + c5[0] -= b[21] * a[32]; + c5[1] -= b[21] * a[33]; + c6[0] -= b[22] * a[32]; + c6[1] -= b[22] * a[33]; + c7[0] -= b[23] * a[32]; + c7[1] -= b[23] * a[33]; + + b[ 8] = (c0[1] *= a[17]); + b[ 9] = (c1[1] *= a[17]); + b[10] = (c2[1] *= a[17]); + b[11] = (c3[1] *= a[17]); + b[12] = (c4[1] *= a[17]); + b[13] = (c5[1] *= a[17]); + b[14] = (c6[1] *= a[17]); + b[15] = (c7[1] *= a[17]); + c0[0] -= b[ 8] * a[16]; + c1[0] -= b[ 9] * a[16]; + c2[0] -= b[10] * a[16]; + c3[0] -= b[11] * a[16]; + c4[0] -= b[12] * a[16]; + c5[0] -= b[13] * a[16]; + c6[0] -= b[14] * a[16]; + c7[0] -= b[15] * a[16]; + + b[0] = (c0[0] *= a[0]); + b[1] = (c1[0] *= a[0]); + b[2] = (c2[0] *= a[0]); + b[3] = (c3[0] *= a[0]); + b[4] = (c4[0] *= a[0]); + b[5] = (c5[0] *= a[0]); + b[6] = (c6[0] *= a[0]); + b[7] = (c7[0] *= a[0]); +} + +#endif + +static inline __attribute__ ((always_inline)) void solve(BLASLONG m, BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) { + + FLOAT aa, bb; + + int i, j, k; + + a += (m - 1) * m; + b += (m - 1) * n; + + for (i = m - 1; i >= 0; i--) { + + aa = *(a + i); + + for (j = 0; j < n; j ++) { + bb = *(c + i + j * ldc); + bb *= aa; + *b = bb; + *(c + i + j * ldc) = bb; + b ++; + + for (k = 0; k < i; k ++){ + *(c + k + j * ldc) -= bb * *(a + k); + } + + } + a -= m; + b -= 2 * n; + } + +} + +#else + +static inline __attribute__ ((always_inline)) void solve(BLASLONG m, BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) { + + FLOAT aa1, aa2; + FLOAT bb1, bb2; + FLOAT cc1, cc2; + + int i, j, k; + + ldc *= 2; + a += (m - 1) * m * 2; + b += (m - 1) * n * 2; + + for (i = m - 1; i >= 0; i--) { + + aa1 = *(a + i * 2 + 0); + aa2 = *(a + i * 2 + 1); + + for (j = 0; j < n; j ++) { + bb1 = *(c + i * 2 + 0 + j * ldc); + bb2 = *(c + i * 2 + 1 + j * ldc); + +#ifndef CONJ + cc1 = aa1 * bb1 - aa2 * bb2; + cc2 = aa1 * bb2 + aa2 * bb1; +#else + cc1 = aa1 * bb1 + aa2 * bb2; + cc2 = aa1 * bb2 - aa2 * bb1; +#endif + + + *(b + 0) = cc1; + *(b + 1) = cc2; + *(c + i * 2 + 0 + j * ldc) = cc1; + *(c + i * 2 + 1 + j * ldc) = cc2; + b += 2; + + for (k = 0; k < i; k ++){ +#ifndef CONJ + *(c + k * 2 + 0 + j * ldc) -= cc1 * *(a + k * 2 + 0) - cc2 * *(a + k * 2 + 1); + *(c + k * 2 + 1 + j * ldc) -= cc1 * *(a + k * 2 + 1) + cc2 * *(a + k * 2 + 0); +#else + *(c + k * 2 + 0 + j * ldc) -= cc1 * *(a + k * 2 + 0) + cc2 * *(a + k * 2 + 1); + *(c + k * 2 + 1 + j * ldc) -= - cc1 * *(a + k * 2 + 1) + cc2 * *(a + k * 2 + 0); +#endif + } + + } + a -= m * 2; + b -= 4 * n; + } + +} + +#endif + + +int CNAME(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT dummy1, +#ifdef COMPLEX + FLOAT dummy2, +#endif + FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, BLASLONG offset){ + + BLASLONG i, j; + FLOAT *aa, *cc; + BLASLONG kk; + +#if 0 + fprintf(stderr, "TRSM KERNEL LN : m = %3ld n = %3ld k = %3ld offset = %3ld\n", + m, n, k, offset); +#endif + +#ifdef DOUBLE + int well_aligned = (GEMM_UNROLL_M==8) && (GEMM_UNROLL_N==8) && ((((unsigned long) a) & 0x7) == 0); +#else + int well_aligned = (GEMM_UNROLL_M==16) && (GEMM_UNROLL_N==8) && ((((unsigned long) a) & 0x7) == 0); +#endif + + j = (n >> GEMM_UNROLL_N_SHIFT); + + while (j > 0) { + + kk = m + offset; + + if (m & (GEMM_UNROLL_M - 1)) { + for (i = 1; i < GEMM_UNROLL_M; i *= 2){ + if (m & i) { + aa = a + ((m & ~(i - 1)) - i) * k * COMPSIZE; + cc = c + ((m & ~(i - 1)) - i) * COMPSIZE; + + if (k - kk > 0) { + GEMM_KERNEL(i, GEMM_UNROLL_N, k - kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa + i * kk * COMPSIZE, + b + GEMM_UNROLL_N * kk * COMPSIZE, + cc, + ldc); + } + + solve(i, GEMM_UNROLL_N, + aa + (kk - i) * i * COMPSIZE, + b + (kk - i) * GEMM_UNROLL_N * COMPSIZE, + cc, ldc); + + kk -= i; + } + } + } + + i = (m >> GEMM_UNROLL_M_SHIFT); + if (i > 0) { + aa = a + ((m & ~(GEMM_UNROLL_M - 1)) - GEMM_UNROLL_M) * k * COMPSIZE; + cc = c + ((m & ~(GEMM_UNROLL_M - 1)) - GEMM_UNROLL_M) * COMPSIZE; + + do { + if (k - kk > 0) { + GEMM_KERNEL(GEMM_UNROLL_M, GEMM_UNROLL_N, k - kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa + GEMM_UNROLL_M * kk * COMPSIZE, + b + GEMM_UNROLL_N * kk * COMPSIZE, + cc, + ldc); + } + + if (well_aligned) { +#ifdef DOUBLE + solve8x8(aa + (kk - GEMM_UNROLL_M) * GEMM_UNROLL_M * COMPSIZE, + b + (kk - GEMM_UNROLL_M) * GEMM_UNROLL_N * COMPSIZE, cc, ldc); +#else + solve16x8(aa + (kk - GEMM_UNROLL_M) * GEMM_UNROLL_M * COMPSIZE, + b + (kk - GEMM_UNROLL_M) * GEMM_UNROLL_N * COMPSIZE, cc, ldc); +#endif + } + else { + solve(GEMM_UNROLL_M, GEMM_UNROLL_N, + aa + (kk - GEMM_UNROLL_M) * GEMM_UNROLL_M * COMPSIZE, + b + (kk - GEMM_UNROLL_M) * GEMM_UNROLL_N * COMPSIZE, + cc, ldc); + } + + aa -= GEMM_UNROLL_M * k * COMPSIZE; + cc -= GEMM_UNROLL_M * COMPSIZE; + kk -= GEMM_UNROLL_M; + i --; + } while (i > 0); + } + + b += GEMM_UNROLL_N * k * COMPSIZE; + c += GEMM_UNROLL_N * ldc * COMPSIZE; + j --; + } + + if (n & (GEMM_UNROLL_N - 1)) { + + j = (GEMM_UNROLL_N >> 1); + while (j > 0) { + if (n & j) { + + kk = m + offset; + + if (m & (GEMM_UNROLL_M - 1)) { + for (i = 1; i < GEMM_UNROLL_M; i *= 2){ + if (m & i) { + aa = a + ((m & ~(i - 1)) - i) * k * COMPSIZE; + cc = c + ((m & ~(i - 1)) - i) * COMPSIZE; + + if (k - kk > 0) { + GEMM_KERNEL(i, j, k - kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa + i * kk * COMPSIZE, + b + j * kk * COMPSIZE, + cc, ldc); + } + + solve(i, j, + aa + (kk - i) * i * COMPSIZE, + b + (kk - i) * j * COMPSIZE, + cc, ldc); + + kk -= i; + } + } + } + + i = (m >> GEMM_UNROLL_M_SHIFT); + if (i > 0) { + aa = a + ((m & ~(GEMM_UNROLL_M - 1)) - GEMM_UNROLL_M) * k * COMPSIZE; + cc = c + ((m & ~(GEMM_UNROLL_M - 1)) - GEMM_UNROLL_M) * COMPSIZE; + + do { + if (k - kk > 0) { + GEMM_KERNEL(GEMM_UNROLL_M, j, k - kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa + GEMM_UNROLL_M * kk * COMPSIZE, + b + j * kk * COMPSIZE, + cc, + ldc); + } + + solve(GEMM_UNROLL_M, j, + aa + (kk - GEMM_UNROLL_M) * GEMM_UNROLL_M * COMPSIZE, + b + (kk - GEMM_UNROLL_M) * j * COMPSIZE, + cc, ldc); + + aa -= GEMM_UNROLL_M * k * COMPSIZE; + cc -= GEMM_UNROLL_M * COMPSIZE; + kk -= GEMM_UNROLL_M; + i --; + } while (i > 0); + } + + b += j * k * COMPSIZE; + c += j * ldc * COMPSIZE; + } + j >>= 1; + } + } + + return 0; +} diff --git a/kernel/power/trsm_kernel_LT.S b/kernel/power/trsm_kernel_LT.S index c561fd014..30f25e015 100644 --- a/kernel/power/trsm_kernel_LT.S +++ b/kernel/power/trsm_kernel_LT.S @@ -59,7 +59,7 @@ #define N r4 #define K r5 -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define A r6 #define B r7 @@ -180,7 +180,7 @@ slwi LDC, LDC, BASE_SHIFT -#if defined(linux) && defined(__64BIT__) +#if (defined(linux) || defined(__FreeBSD__)) && defined(__64BIT__) ld OFFSET, FRAMESLOT(0) + STACKSIZE(SP) #endif @@ -257,7 +257,7 @@ #else -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ mr PREA, r10 lwz PREB, FRAMESLOT(0) + STACKSIZE(SP) diff --git a/kernel/power/trsm_kernel_LT_power10.c b/kernel/power/trsm_kernel_LT_power10.c new file mode 100644 index 000000000..14ff12fe4 --- /dev/null +++ b/kernel/power/trsm_kernel_LT_power10.c @@ -0,0 +1,1265 @@ +/*********************************************************************/ +/* Copyright 2009, 2010 The University of Texas at Austin. */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* 1. Redistributions of source code must retain the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer. */ +/* */ +/* 2. Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials */ +/* provided with the distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ +/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ +/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ +/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ +/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ +/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ +/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* */ +/* The views and conclusions contained in the software and */ +/* documentation are those of the authors and should not be */ +/* interpreted as representing official policies, either expressed */ +/* or implied, of The University of Texas at Austin. */ +/*********************************************************************/ + +#include "common.h" +#include + +static FLOAT dm1 = -1.; + +#ifdef CONJ +#define GEMM_KERNEL GEMM_KERNEL_L +#else +#define GEMM_KERNEL GEMM_KERNEL_N +#endif + +#if GEMM_DEFAULT_UNROLL_M == 1 +#define GEMM_UNROLL_M_SHIFT 0 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 2 +#define GEMM_UNROLL_M_SHIFT 1 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 4 +#define GEMM_UNROLL_M_SHIFT 2 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 6 +#define GEMM_UNROLL_M_SHIFT 2 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 8 +#define GEMM_UNROLL_M_SHIFT 3 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 16 +#define GEMM_UNROLL_M_SHIFT 4 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 1 +#define GEMM_UNROLL_N_SHIFT 0 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 2 +#define GEMM_UNROLL_N_SHIFT 1 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 4 +#define GEMM_UNROLL_N_SHIFT 2 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 8 +#define GEMM_UNROLL_N_SHIFT 3 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 16 +#define GEMM_UNROLL_N_SHIFT 4 +#endif + +#ifndef COMPLEX + +#ifdef DOUBLE + +static inline __attribute__ ((always_inline)) void solve8x8(FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) { + FLOAT *c0, *c1, *c2, *c3, *c4, *c5, *c6, *c7; + c0 = &c[0*ldc]; + c1 = &c[1*ldc]; + c2 = &c[2*ldc]; + c3 = &c[3*ldc]; + c4 = &c[4*ldc]; + c5 = &c[5*ldc]; + c6 = &c[6*ldc]; + c7 = &c[7*ldc]; + vector FLOAT *Va = (vector FLOAT *) a; + vector FLOAT *Vb = (vector FLOAT *) b; + vector FLOAT *Vc0 = (vector FLOAT *) c0; + vector FLOAT *Vc1 = (vector FLOAT *) c1; + vector FLOAT *Vc2 = (vector FLOAT *) c2; + vector FLOAT *Vc3 = (vector FLOAT *) c3; + vector FLOAT *Vc4 = (vector FLOAT *) c4; + vector FLOAT *Vc5 = (vector FLOAT *) c5; + vector FLOAT *Vc6 = (vector FLOAT *) c6; + vector FLOAT *Vc7 = (vector FLOAT *) c7; + vector FLOAT VbS0, VbS1, VbS2, VbS3, VbS4, VbS5, VbS6, VbS7; + + b[0] = (c0[0] *= a[0]); + b[1] = (c1[0] *= a[0]); + b[2] = (c2[0] *= a[0]); + b[3] = (c3[0] *= a[0]); + b[4] = (c4[0] *= a[0]); + b[5] = (c5[0] *= a[0]); + b[6] = (c6[0] *= a[0]); + b[7] = (c7[0] *= a[0]); + VbS0 = vec_splat(Vb[0], 0); + VbS1 = vec_splat(Vb[0], 1); + VbS2 = vec_splat(Vb[1], 0); + VbS3 = vec_splat(Vb[1], 1); + VbS4 = vec_splat(Vb[2], 0); + VbS5 = vec_splat(Vb[2], 1); + VbS6 = vec_splat(Vb[3], 0); + VbS7 = vec_splat(Vb[3], 1); + Vc0[1] = vec_nmsub(VbS0, Va[1], Vc0[1]); + Vc0[2] = vec_nmsub(VbS0, Va[2], Vc0[2]); + Vc0[3] = vec_nmsub(VbS0, Va[3], Vc0[3]); + Vc1[1] = vec_nmsub(VbS1, Va[1], Vc1[1]); + Vc1[2] = vec_nmsub(VbS1, Va[2], Vc1[2]); + Vc1[3] = vec_nmsub(VbS1, Va[3], Vc1[3]); + Vc2[1] = vec_nmsub(VbS2, Va[1], Vc2[1]); + Vc2[2] = vec_nmsub(VbS2, Va[2], Vc2[2]); + Vc2[3] = vec_nmsub(VbS2, Va[3], Vc2[3]); + Vc3[1] = vec_nmsub(VbS3, Va[1], Vc3[1]); + Vc3[2] = vec_nmsub(VbS3, Va[2], Vc3[2]); + Vc3[3] = vec_nmsub(VbS3, Va[3], Vc3[3]); + Vc4[1] = vec_nmsub(VbS4, Va[1], Vc4[1]); + Vc4[2] = vec_nmsub(VbS4, Va[2], Vc4[2]); + Vc4[3] = vec_nmsub(VbS4, Va[3], Vc4[3]); + Vc5[1] = vec_nmsub(VbS5, Va[1], Vc5[1]); + Vc5[2] = vec_nmsub(VbS5, Va[2], Vc5[2]); + Vc5[3] = vec_nmsub(VbS5, Va[3], Vc5[3]); + Vc6[1] = vec_nmsub(VbS6, Va[1], Vc6[1]); + Vc6[2] = vec_nmsub(VbS6, Va[2], Vc6[2]); + Vc6[3] = vec_nmsub(VbS6, Va[3], Vc6[3]); + Vc7[1] = vec_nmsub(VbS7, Va[1], Vc7[1]); + Vc7[2] = vec_nmsub(VbS7, Va[2], Vc7[2]); + Vc7[3] = vec_nmsub(VbS7, Va[3], Vc7[3]); + c0[1] -= c0[0] * a[1]; + c1[1] -= c1[0] * a[1]; + c2[1] -= c2[0] * a[1]; + c3[1] -= c3[0] * a[1]; + c4[1] -= c4[0] * a[1]; + c5[1] -= c5[0] * a[1]; + c6[1] -= c6[0] * a[1]; + c7[1] -= c7[0] * a[1]; + + b[ 8] = (c0[1] *= a[9]); + b[ 9] = (c1[1] *= a[9]); + b[10] = (c2[1] *= a[9]); + b[11] = (c3[1] *= a[9]); + b[12] = (c4[1] *= a[9]); + b[13] = (c5[1] *= a[9]); + b[14] = (c6[1] *= a[9]); + b[15] = (c7[1] *= a[9]); + VbS0 = vec_splat(Vb[4], 0); + VbS1 = vec_splat(Vb[4], 1); + VbS2 = vec_splat(Vb[5], 0); + VbS3 = vec_splat(Vb[5], 1); + VbS4 = vec_splat(Vb[6], 0); + VbS5 = vec_splat(Vb[6], 1); + VbS6 = vec_splat(Vb[7], 0); + VbS7 = vec_splat(Vb[7], 1); + Vc0[1] = vec_nmsub(VbS0, Va[5], Vc0[1]); + Vc0[2] = vec_nmsub(VbS0, Va[6], Vc0[2]); + Vc0[3] = vec_nmsub(VbS0, Va[7], Vc0[3]); + Vc1[1] = vec_nmsub(VbS1, Va[5], Vc1[1]); + Vc1[2] = vec_nmsub(VbS1, Va[6], Vc1[2]); + Vc1[3] = vec_nmsub(VbS1, Va[7], Vc1[3]); + Vc2[1] = vec_nmsub(VbS2, Va[5], Vc2[1]); + Vc2[2] = vec_nmsub(VbS2, Va[6], Vc2[2]); + Vc2[3] = vec_nmsub(VbS2, Va[7], Vc2[3]); + Vc3[1] = vec_nmsub(VbS3, Va[5], Vc3[1]); + Vc3[2] = vec_nmsub(VbS3, Va[6], Vc3[2]); + Vc3[3] = vec_nmsub(VbS3, Va[7], Vc3[3]); + Vc4[1] = vec_nmsub(VbS4, Va[5], Vc4[1]); + Vc4[2] = vec_nmsub(VbS4, Va[6], Vc4[2]); + Vc4[3] = vec_nmsub(VbS4, Va[7], Vc4[3]); + Vc5[1] = vec_nmsub(VbS5, Va[5], Vc5[1]); + Vc5[2] = vec_nmsub(VbS5, Va[6], Vc5[2]); + Vc5[3] = vec_nmsub(VbS5, Va[7], Vc5[3]); + Vc6[1] = vec_nmsub(VbS6, Va[5], Vc6[1]); + Vc6[2] = vec_nmsub(VbS6, Va[6], Vc6[2]); + Vc6[3] = vec_nmsub(VbS6, Va[7], Vc6[3]); + Vc7[1] = vec_nmsub(VbS7, Va[5], Vc7[1]); + Vc7[2] = vec_nmsub(VbS7, Va[6], Vc7[2]); + Vc7[3] = vec_nmsub(VbS7, Va[7], Vc7[3]); + + b[16] = (c0[2] *= a[18]); + b[17] = (c1[2] *= a[18]); + b[18] = (c2[2] *= a[18]); + b[19] = (c3[2] *= a[18]); + b[20] = (c4[2] *= a[18]); + b[21] = (c5[2] *= a[18]); + b[22] = (c6[2] *= a[18]); + b[23] = (c7[2] *= a[18]); + VbS0 = vec_splat(Vb[ 8], 0); + VbS1 = vec_splat(Vb[ 8], 1); + VbS2 = vec_splat(Vb[ 9], 0); + VbS3 = vec_splat(Vb[ 9], 1); + VbS4 = vec_splat(Vb[10], 0); + VbS5 = vec_splat(Vb[10], 1); + VbS6 = vec_splat(Vb[11], 0); + VbS7 = vec_splat(Vb[11], 1); + Vc0[2] = vec_nmsub(VbS0, Va[10], Vc0[2]); + Vc0[3] = vec_nmsub(VbS0, Va[11], Vc0[3]); + Vc1[2] = vec_nmsub(VbS1, Va[10], Vc1[2]); + Vc1[3] = vec_nmsub(VbS1, Va[11], Vc1[3]); + Vc2[2] = vec_nmsub(VbS2, Va[10], Vc2[2]); + Vc2[3] = vec_nmsub(VbS2, Va[11], Vc2[3]); + Vc3[2] = vec_nmsub(VbS3, Va[10], Vc3[2]); + Vc3[3] = vec_nmsub(VbS3, Va[11], Vc3[3]); + Vc4[2] = vec_nmsub(VbS4, Va[10], Vc4[2]); + Vc4[3] = vec_nmsub(VbS4, Va[11], Vc4[3]); + Vc5[2] = vec_nmsub(VbS5, Va[10], Vc5[2]); + Vc5[3] = vec_nmsub(VbS5, Va[11], Vc5[3]); + Vc6[2] = vec_nmsub(VbS6, Va[10], Vc6[2]); + Vc6[3] = vec_nmsub(VbS6, Va[11], Vc6[3]); + Vc7[2] = vec_nmsub(VbS7, Va[10], Vc7[2]); + Vc7[3] = vec_nmsub(VbS7, Va[11], Vc7[3]); + c0[3] -= c0[2] * a[19]; + c1[3] -= c1[2] * a[19]; + c2[3] -= c2[2] * a[19]; + c3[3] -= c3[2] * a[19]; + c4[3] -= c4[2] * a[19]; + c5[3] -= c5[2] * a[19]; + c6[3] -= c6[2] * a[19]; + c7[3] -= c7[2] * a[19]; + + b[24] = (c0[3] *= a[27]); + b[25] = (c1[3] *= a[27]); + b[26] = (c2[3] *= a[27]); + b[27] = (c3[3] *= a[27]); + b[28] = (c4[3] *= a[27]); + b[29] = (c5[3] *= a[27]); + b[30] = (c6[3] *= a[27]); + b[31] = (c7[3] *= a[27]); + VbS0 = vec_splat(Vb[12], 0); + VbS1 = vec_splat(Vb[12], 1); + VbS2 = vec_splat(Vb[13], 0); + VbS3 = vec_splat(Vb[13], 1); + VbS4 = vec_splat(Vb[14], 0); + VbS5 = vec_splat(Vb[14], 1); + VbS6 = vec_splat(Vb[15], 0); + VbS7 = vec_splat(Vb[15], 1); + Vc0[2] = vec_nmsub(VbS0, Va[14], Vc0[2]); + Vc0[3] = vec_nmsub(VbS0, Va[15], Vc0[3]); + Vc1[2] = vec_nmsub(VbS1, Va[14], Vc1[2]); + Vc1[3] = vec_nmsub(VbS1, Va[15], Vc1[3]); + Vc2[2] = vec_nmsub(VbS2, Va[14], Vc2[2]); + Vc2[3] = vec_nmsub(VbS2, Va[15], Vc2[3]); + Vc3[2] = vec_nmsub(VbS3, Va[14], Vc3[2]); + Vc3[3] = vec_nmsub(VbS3, Va[15], Vc3[3]); + Vc4[2] = vec_nmsub(VbS4, Va[14], Vc4[2]); + Vc4[3] = vec_nmsub(VbS4, Va[15], Vc4[3]); + Vc5[2] = vec_nmsub(VbS5, Va[14], Vc5[2]); + Vc5[3] = vec_nmsub(VbS5, Va[15], Vc5[3]); + Vc6[2] = vec_nmsub(VbS6, Va[14], Vc6[2]); + Vc6[3] = vec_nmsub(VbS6, Va[15], Vc6[3]); + Vc7[2] = vec_nmsub(VbS7, Va[14], Vc7[2]); + Vc7[3] = vec_nmsub(VbS7, Va[15], Vc7[3]); + + b[32] = (c0[4] *= a[36]); + b[33] = (c1[4] *= a[36]); + b[34] = (c2[4] *= a[36]); + b[35] = (c3[4] *= a[36]); + b[36] = (c4[4] *= a[36]); + b[37] = (c5[4] *= a[36]); + b[38] = (c6[4] *= a[36]); + b[39] = (c7[4] *= a[36]); + VbS0 = vec_splat(Vb[16], 0); + VbS1 = vec_splat(Vb[16], 1); + VbS2 = vec_splat(Vb[17], 0); + VbS3 = vec_splat(Vb[17], 1); + VbS4 = vec_splat(Vb[18], 0); + VbS5 = vec_splat(Vb[18], 1); + VbS6 = vec_splat(Vb[19], 0); + VbS7 = vec_splat(Vb[19], 1); + Vc0[3] = vec_nmsub(VbS0, Va[19], Vc0[3]); + Vc1[3] = vec_nmsub(VbS1, Va[19], Vc1[3]); + Vc2[3] = vec_nmsub(VbS2, Va[19], Vc2[3]); + Vc3[3] = vec_nmsub(VbS3, Va[19], Vc3[3]); + Vc4[3] = vec_nmsub(VbS4, Va[19], Vc4[3]); + Vc5[3] = vec_nmsub(VbS5, Va[19], Vc5[3]); + Vc6[3] = vec_nmsub(VbS6, Va[19], Vc6[3]); + Vc7[3] = vec_nmsub(VbS7, Va[19], Vc7[3]); + c0[5] -= c0[4] * a[37]; + c1[5] -= c1[4] * a[37]; + c2[5] -= c2[4] * a[37]; + c3[5] -= c3[4] * a[37]; + c4[5] -= c4[4] * a[37]; + c5[5] -= c5[4] * a[37]; + c6[5] -= c6[4] * a[37]; + c7[5] -= c7[4] * a[37]; + + b[40] = (c0[5] *= a[45]); + b[41] = (c1[5] *= a[45]); + b[42] = (c2[5] *= a[45]); + b[43] = (c3[5] *= a[45]); + b[44] = (c4[5] *= a[45]); + b[45] = (c5[5] *= a[45]); + b[46] = (c6[5] *= a[45]); + b[47] = (c7[5] *= a[45]); + VbS0 = vec_splat(Vb[20], 0); + VbS1 = vec_splat(Vb[20], 1); + VbS2 = vec_splat(Vb[21], 0); + VbS3 = vec_splat(Vb[21], 1); + VbS4 = vec_splat(Vb[22], 0); + VbS5 = vec_splat(Vb[22], 1); + VbS6 = vec_splat(Vb[23], 0); + VbS7 = vec_splat(Vb[23], 1); + Vc0[3] = vec_nmsub(VbS0, Va[23], Vc0[3]); + Vc1[3] = vec_nmsub(VbS1, Va[23], Vc1[3]); + Vc2[3] = vec_nmsub(VbS2, Va[23], Vc2[3]); + Vc3[3] = vec_nmsub(VbS3, Va[23], Vc3[3]); + Vc4[3] = vec_nmsub(VbS4, Va[23], Vc4[3]); + Vc5[3] = vec_nmsub(VbS5, Va[23], Vc5[3]); + Vc6[3] = vec_nmsub(VbS6, Va[23], Vc6[3]); + Vc7[3] = vec_nmsub(VbS7, Va[23], Vc7[3]); + + b[48] = (c0[6] *= a[54]); + b[49] = (c1[6] *= a[54]); + b[50] = (c2[6] *= a[54]); + b[51] = (c3[6] *= a[54]); + b[52] = (c4[6] *= a[54]); + b[53] = (c5[6] *= a[54]); + b[54] = (c6[6] *= a[54]); + b[55] = (c7[6] *= a[54]); + c0[7] -= c0[6] * a[55]; + c1[7] -= c1[6] * a[55]; + c2[7] -= c2[6] * a[55]; + c3[7] -= c3[6] * a[55]; + c4[7] -= c4[6] * a[55]; + c5[7] -= c5[6] * a[55]; + c6[7] -= c6[6] * a[55]; + c7[7] -= c7[6] * a[55]; + + b[56] = (c0[7] *= a[63]); + b[57] = (c1[7] *= a[63]); + b[58] = (c2[7] *= a[63]); + b[59] = (c3[7] *= a[63]); + b[60] = (c4[7] *= a[63]); + b[61] = (c5[7] *= a[63]); + b[62] = (c6[7] *= a[63]); + b[63] = (c7[7] *= a[63]); +} + +#else + +static inline __attribute__ ((always_inline)) void solve16x8(FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) { + FLOAT *c0, *c1, *c2, *c3, *c4, *c5, *c6, *c7; + c0 = &c[0*ldc]; + c1 = &c[1*ldc]; + c2 = &c[2*ldc]; + c3 = &c[3*ldc]; + c4 = &c[4*ldc]; + c5 = &c[5*ldc]; + c6 = &c[6*ldc]; + c7 = &c[7*ldc]; + + vector FLOAT *Va = (vector FLOAT *) a; + vector FLOAT *Vb = (vector FLOAT *) b; + vector FLOAT *Vc0 = (vector FLOAT *) c0; + vector FLOAT *Vc1 = (vector FLOAT *) c1; + vector FLOAT *Vc2 = (vector FLOAT *) c2; + vector FLOAT *Vc3 = (vector FLOAT *) c3; + vector FLOAT *Vc4 = (vector FLOAT *) c4; + vector FLOAT *Vc5 = (vector FLOAT *) c5; + vector FLOAT *Vc6 = (vector FLOAT *) c6; + vector FLOAT *Vc7 = (vector FLOAT *) c7; + vector FLOAT VbS0, VbS1, VbS2, VbS3, VbS4, VbS5, VbS6, VbS7; + int j; + + b[0] = (c0[0] *= a[0]); + b[1] = (c1[0] *= a[0]); + b[2] = (c2[0] *= a[0]); + b[3] = (c3[0] *= a[0]); + b[4] = (c4[0] *= a[0]); + b[5] = (c5[0] *= a[0]); + b[6] = (c6[0] *= a[0]); + b[7] = (c7[0] *= a[0]); + VbS0 = vec_splat(Vb[0], 0); + VbS1 = vec_splat(Vb[0], 1); + VbS2 = vec_splat(Vb[0], 2); + VbS3 = vec_splat(Vb[0], 3); + VbS4 = vec_splat(Vb[1], 0); + VbS5 = vec_splat(Vb[1], 1); + VbS6 = vec_splat(Vb[1], 2); + VbS7 = vec_splat(Vb[1], 3); + Vc0[1] = vec_nmsub(VbS0, Va[1], Vc0[1]); + Vc0[2] = vec_nmsub(VbS0, Va[2], Vc0[2]); + Vc0[3] = vec_nmsub(VbS0, Va[3], Vc0[3]); + Vc1[1] = vec_nmsub(VbS1, Va[1], Vc1[1]); + Vc1[2] = vec_nmsub(VbS1, Va[2], Vc1[2]); + Vc1[3] = vec_nmsub(VbS1, Va[3], Vc1[3]); + Vc2[1] = vec_nmsub(VbS2, Va[1], Vc2[1]); + Vc2[2] = vec_nmsub(VbS2, Va[2], Vc2[2]); + Vc2[3] = vec_nmsub(VbS2, Va[3], Vc2[3]); + Vc3[1] = vec_nmsub(VbS3, Va[1], Vc3[1]); + Vc3[2] = vec_nmsub(VbS3, Va[2], Vc3[2]); + Vc3[3] = vec_nmsub(VbS3, Va[3], Vc3[3]); + Vc4[1] = vec_nmsub(VbS4, Va[1], Vc4[1]); + Vc4[2] = vec_nmsub(VbS4, Va[2], Vc4[2]); + Vc4[3] = vec_nmsub(VbS4, Va[3], Vc4[3]); + Vc5[1] = vec_nmsub(VbS5, Va[1], Vc5[1]); + Vc5[2] = vec_nmsub(VbS5, Va[2], Vc5[2]); + Vc5[3] = vec_nmsub(VbS5, Va[3], Vc5[3]); + Vc6[1] = vec_nmsub(VbS6, Va[1], Vc6[1]); + Vc6[2] = vec_nmsub(VbS6, Va[2], Vc6[2]); + Vc6[3] = vec_nmsub(VbS6, Va[3], Vc6[3]); + Vc7[1] = vec_nmsub(VbS7, Va[1], Vc7[1]); + Vc7[2] = vec_nmsub(VbS7, Va[2], Vc7[2]); + Vc7[3] = vec_nmsub(VbS7, Va[3], Vc7[3]); + c0[1] -= b[0] * a[ 1]; + c0[2] -= b[0] * a[ 2]; + c0[3] -= b[0] * a[ 3]; + c1[1] -= b[1] * a[ 1]; + c1[2] -= b[1] * a[ 2]; + c1[3] -= b[1] * a[ 3]; + c2[1] -= b[2] * a[ 1]; + c2[2] -= b[2] * a[ 2]; + c2[3] -= b[2] * a[ 3]; + c3[1] -= b[3] * a[ 1]; + c3[2] -= b[3] * a[ 2]; + c3[3] -= b[3] * a[ 3]; + c4[1] -= b[4] * a[ 1]; + c4[2] -= b[4] * a[ 2]; + c4[3] -= b[4] * a[ 3]; + c5[1] -= b[5] * a[ 1]; + c5[2] -= b[5] * a[ 2]; + c5[3] -= b[5] * a[ 3]; + c6[1] -= b[6] * a[ 1]; + c6[2] -= b[6] * a[ 2]; + c6[3] -= b[6] * a[ 3]; + c7[1] -= b[7] * a[ 1]; + c7[2] -= b[7] * a[ 2]; + c7[3] -= b[7] * a[ 3]; + + b[ 8] = (c0[1] *= a[17]); + b[ 9] = (c1[1] *= a[17]); + b[10] = (c2[1] *= a[17]); + b[11] = (c3[1] *= a[17]); + b[12] = (c4[1] *= a[17]); + b[13] = (c5[1] *= a[17]); + b[14] = (c6[1] *= a[17]); + b[15] = (c7[1] *= a[17]); + VbS0 = vec_splat(Vb[2], 0); + VbS1 = vec_splat(Vb[2], 1); + VbS2 = vec_splat(Vb[2], 2); + VbS3 = vec_splat(Vb[2], 3); + VbS4 = vec_splat(Vb[3], 0); + VbS5 = vec_splat(Vb[3], 1); + VbS6 = vec_splat(Vb[3], 2); + VbS7 = vec_splat(Vb[3], 3); + Vc0[1] = vec_nmsub(VbS0, Va[5], Vc0[1]); + Vc0[2] = vec_nmsub(VbS0, Va[6], Vc0[2]); + Vc0[3] = vec_nmsub(VbS0, Va[7], Vc0[3]); + Vc1[1] = vec_nmsub(VbS1, Va[5], Vc1[1]); + Vc1[2] = vec_nmsub(VbS1, Va[6], Vc1[2]); + Vc1[3] = vec_nmsub(VbS1, Va[7], Vc1[3]); + Vc2[1] = vec_nmsub(VbS2, Va[5], Vc2[1]); + Vc2[2] = vec_nmsub(VbS2, Va[6], Vc2[2]); + Vc2[3] = vec_nmsub(VbS2, Va[7], Vc2[3]); + Vc3[1] = vec_nmsub(VbS3, Va[5], Vc3[1]); + Vc3[2] = vec_nmsub(VbS3, Va[6], Vc3[2]); + Vc3[3] = vec_nmsub(VbS3, Va[7], Vc3[3]); + Vc4[1] = vec_nmsub(VbS4, Va[5], Vc4[1]); + Vc4[2] = vec_nmsub(VbS4, Va[6], Vc4[2]); + Vc4[3] = vec_nmsub(VbS4, Va[7], Vc4[3]); + Vc5[1] = vec_nmsub(VbS5, Va[5], Vc5[1]); + Vc5[2] = vec_nmsub(VbS5, Va[6], Vc5[2]); + Vc5[3] = vec_nmsub(VbS5, Va[7], Vc5[3]); + Vc6[1] = vec_nmsub(VbS6, Va[5], Vc6[1]); + Vc6[2] = vec_nmsub(VbS6, Va[6], Vc6[2]); + Vc6[3] = vec_nmsub(VbS6, Va[7], Vc6[3]); + Vc7[1] = vec_nmsub(VbS7, Va[5], Vc7[1]); + Vc7[2] = vec_nmsub(VbS7, Va[6], Vc7[2]); + Vc7[3] = vec_nmsub(VbS7, Va[7], Vc7[3]); + c0[2] -= b[ 8] * a[18]; + c0[3] -= b[ 8] * a[19]; + c1[2] -= b[ 9] * a[18]; + c1[3] -= b[ 9] * a[19]; + c2[2] -= b[10] * a[18]; + c2[3] -= b[10] * a[19]; + c3[2] -= b[11] * a[18]; + c3[3] -= b[11] * a[19]; + c4[2] -= b[12] * a[18]; + c4[3] -= b[12] * a[19]; + c5[2] -= b[13] * a[18]; + c5[3] -= b[13] * a[19]; + c6[2] -= b[14] * a[18]; + c6[3] -= b[14] * a[19]; + c7[2] -= b[15] * a[18]; + c7[3] -= b[15] * a[19]; + + b[16] = (c0[2] *= a[34]); + b[17] = (c1[2] *= a[34]); + b[18] = (c2[2] *= a[34]); + b[19] = (c3[2] *= a[34]); + b[20] = (c4[2] *= a[34]); + b[21] = (c5[2] *= a[34]); + b[22] = (c6[2] *= a[34]); + b[23] = (c7[2] *= a[34]); + VbS0 = vec_splat(Vb[4], 0); + VbS1 = vec_splat(Vb[4], 1); + VbS2 = vec_splat(Vb[4], 2); + VbS3 = vec_splat(Vb[4], 3); + VbS4 = vec_splat(Vb[5], 0); + VbS5 = vec_splat(Vb[5], 1); + VbS6 = vec_splat(Vb[5], 2); + VbS7 = vec_splat(Vb[5], 3); + Vc0[1] = vec_nmsub(VbS0, Va[ 9], Vc0[1]); + Vc0[2] = vec_nmsub(VbS0, Va[10], Vc0[2]); + Vc0[3] = vec_nmsub(VbS0, Va[11], Vc0[3]); + Vc1[1] = vec_nmsub(VbS1, Va[ 9], Vc1[1]); + Vc1[2] = vec_nmsub(VbS1, Va[10], Vc1[2]); + Vc1[3] = vec_nmsub(VbS1, Va[11], Vc1[3]); + Vc2[1] = vec_nmsub(VbS2, Va[ 9], Vc2[1]); + Vc2[2] = vec_nmsub(VbS2, Va[10], Vc2[2]); + Vc2[3] = vec_nmsub(VbS2, Va[11], Vc2[3]); + Vc3[1] = vec_nmsub(VbS3, Va[ 9], Vc3[1]); + Vc3[2] = vec_nmsub(VbS3, Va[10], Vc3[2]); + Vc3[3] = vec_nmsub(VbS3, Va[11], Vc3[3]); + Vc4[1] = vec_nmsub(VbS4, Va[ 9], Vc4[1]); + Vc4[2] = vec_nmsub(VbS4, Va[10], Vc4[2]); + Vc4[3] = vec_nmsub(VbS4, Va[11], Vc4[3]); + Vc5[1] = vec_nmsub(VbS5, Va[ 9], Vc5[1]); + Vc5[2] = vec_nmsub(VbS5, Va[10], Vc5[2]); + Vc5[3] = vec_nmsub(VbS5, Va[11], Vc5[3]); + Vc6[1] = vec_nmsub(VbS6, Va[ 9], Vc6[1]); + Vc6[2] = vec_nmsub(VbS6, Va[10], Vc6[2]); + Vc6[3] = vec_nmsub(VbS6, Va[11], Vc6[3]); + Vc7[1] = vec_nmsub(VbS7, Va[ 9], Vc7[1]); + Vc7[2] = vec_nmsub(VbS7, Va[10], Vc7[2]); + Vc7[3] = vec_nmsub(VbS7, Va[11], Vc7[3]); + c0[3] -= b[16] * a[35]; + c1[3] -= b[17] * a[35]; + c2[3] -= b[18] * a[35]; + c3[3] -= b[19] * a[35]; + c4[3] -= b[20] * a[35]; + c5[3] -= b[21] * a[35]; + c6[3] -= b[22] * a[35]; + c7[3] -= b[23] * a[35]; + + b[24] = (c0[3] *= a[51]); + b[25] = (c1[3] *= a[51]); + b[26] = (c2[3] *= a[51]); + b[27] = (c3[3] *= a[51]); + b[28] = (c4[3] *= a[51]); + b[29] = (c5[3] *= a[51]); + b[30] = (c6[3] *= a[51]); + b[31] = (c7[3] *= a[51]); + VbS0 = vec_splat(Vb[6], 0); + VbS1 = vec_splat(Vb[6], 1); + VbS2 = vec_splat(Vb[6], 2); + VbS3 = vec_splat(Vb[6], 3); + VbS4 = vec_splat(Vb[7], 0); + VbS5 = vec_splat(Vb[7], 1); + VbS6 = vec_splat(Vb[7], 2); + VbS7 = vec_splat(Vb[7], 3); + Vc0[1] = vec_nmsub(VbS0, Va[13], Vc0[1]); + Vc0[2] = vec_nmsub(VbS0, Va[14], Vc0[2]); + Vc0[3] = vec_nmsub(VbS0, Va[15], Vc0[3]); + Vc1[1] = vec_nmsub(VbS1, Va[13], Vc1[1]); + Vc1[2] = vec_nmsub(VbS1, Va[14], Vc1[2]); + Vc1[3] = vec_nmsub(VbS1, Va[15], Vc1[3]); + Vc2[1] = vec_nmsub(VbS2, Va[13], Vc2[1]); + Vc2[2] = vec_nmsub(VbS2, Va[14], Vc2[2]); + Vc2[3] = vec_nmsub(VbS2, Va[15], Vc2[3]); + Vc3[1] = vec_nmsub(VbS3, Va[13], Vc3[1]); + Vc3[2] = vec_nmsub(VbS3, Va[14], Vc3[2]); + Vc3[3] = vec_nmsub(VbS3, Va[15], Vc3[3]); + Vc4[1] = vec_nmsub(VbS4, Va[13], Vc4[1]); + Vc4[2] = vec_nmsub(VbS4, Va[14], Vc4[2]); + Vc4[3] = vec_nmsub(VbS4, Va[15], Vc4[3]); + Vc5[1] = vec_nmsub(VbS5, Va[13], Vc5[1]); + Vc5[2] = vec_nmsub(VbS5, Va[14], Vc5[2]); + Vc5[3] = vec_nmsub(VbS5, Va[15], Vc5[3]); + Vc6[1] = vec_nmsub(VbS6, Va[13], Vc6[1]); + Vc6[2] = vec_nmsub(VbS6, Va[14], Vc6[2]); + Vc6[3] = vec_nmsub(VbS6, Va[15], Vc6[3]); + Vc7[1] = vec_nmsub(VbS7, Va[13], Vc7[1]); + Vc7[2] = vec_nmsub(VbS7, Va[14], Vc7[2]); + Vc7[3] = vec_nmsub(VbS7, Va[15], Vc7[3]); + + b[32] = (c0[4] *= a[68]); + b[33] = (c1[4] *= a[68]); + b[34] = (c2[4] *= a[68]); + b[35] = (c3[4] *= a[68]); + b[36] = (c4[4] *= a[68]); + b[37] = (c5[4] *= a[68]); + b[38] = (c6[4] *= a[68]); + b[39] = (c7[4] *= a[68]); + VbS0 = vec_splat(Vb[8], 0); + VbS1 = vec_splat(Vb[8], 1); + VbS2 = vec_splat(Vb[8], 2); + VbS3 = vec_splat(Vb[8], 3); + VbS4 = vec_splat(Vb[9], 0); + VbS5 = vec_splat(Vb[9], 1); + VbS6 = vec_splat(Vb[9], 2); + VbS7 = vec_splat(Vb[9], 3); + Vc0[2] = vec_nmsub(VbS0, Va[18], Vc0[2]); + Vc0[3] = vec_nmsub(VbS0, Va[19], Vc0[3]); + Vc1[2] = vec_nmsub(VbS1, Va[18], Vc1[2]); + Vc1[3] = vec_nmsub(VbS1, Va[19], Vc1[3]); + Vc2[2] = vec_nmsub(VbS2, Va[18], Vc2[2]); + Vc2[3] = vec_nmsub(VbS2, Va[19], Vc2[3]); + Vc3[2] = vec_nmsub(VbS3, Va[18], Vc3[2]); + Vc3[3] = vec_nmsub(VbS3, Va[19], Vc3[3]); + Vc4[2] = vec_nmsub(VbS4, Va[18], Vc4[2]); + Vc4[3] = vec_nmsub(VbS4, Va[19], Vc4[3]); + Vc5[2] = vec_nmsub(VbS5, Va[18], Vc5[2]); + Vc5[3] = vec_nmsub(VbS5, Va[19], Vc5[3]); + Vc6[2] = vec_nmsub(VbS6, Va[18], Vc6[2]); + Vc6[3] = vec_nmsub(VbS6, Va[19], Vc6[3]); + Vc7[2] = vec_nmsub(VbS7, Va[18], Vc7[2]); + Vc7[3] = vec_nmsub(VbS7, Va[19], Vc7[3]); + c0[5] -= b[32] * a[69]; + c0[6] -= b[32] * a[70]; + c0[7] -= b[32] * a[71]; + c1[5] -= b[33] * a[69]; + c1[6] -= b[33] * a[70]; + c1[7] -= b[33] * a[71]; + c2[5] -= b[34] * a[69]; + c2[6] -= b[34] * a[70]; + c2[7] -= b[34] * a[71]; + c3[5] -= b[35] * a[69]; + c3[6] -= b[35] * a[70]; + c3[7] -= b[35] * a[71]; + c4[5] -= b[36] * a[69]; + c4[6] -= b[36] * a[70]; + c4[7] -= b[36] * a[71]; + c5[5] -= b[37] * a[69]; + c5[6] -= b[37] * a[70]; + c5[7] -= b[37] * a[71]; + c6[5] -= b[38] * a[69]; + c6[6] -= b[38] * a[70]; + c6[7] -= b[38] * a[71]; + c7[5] -= b[39] * a[69]; + c7[6] -= b[39] * a[70]; + c7[7] -= b[39] * a[71]; + + b[40] = (c0[5] *= a[85]); + b[41] = (c1[5] *= a[85]); + b[42] = (c2[5] *= a[85]); + b[43] = (c3[5] *= a[85]); + b[44] = (c4[5] *= a[85]); + b[45] = (c5[5] *= a[85]); + b[46] = (c6[5] *= a[85]); + b[47] = (c7[5] *= a[85]); + VbS0 = vec_splat(Vb[10], 0); + VbS1 = vec_splat(Vb[10], 1); + VbS2 = vec_splat(Vb[10], 2); + VbS3 = vec_splat(Vb[10], 3); + VbS4 = vec_splat(Vb[11], 0); + VbS5 = vec_splat(Vb[11], 1); + VbS6 = vec_splat(Vb[11], 2); + VbS7 = vec_splat(Vb[11], 3); + Vc0[2] = vec_nmsub(VbS0, Va[22], Vc0[2]); + Vc0[3] = vec_nmsub(VbS0, Va[23], Vc0[3]); + Vc1[2] = vec_nmsub(VbS1, Va[22], Vc1[2]); + Vc1[3] = vec_nmsub(VbS1, Va[23], Vc1[3]); + Vc2[2] = vec_nmsub(VbS2, Va[22], Vc2[2]); + Vc2[3] = vec_nmsub(VbS2, Va[23], Vc2[3]); + Vc3[2] = vec_nmsub(VbS3, Va[22], Vc3[2]); + Vc3[3] = vec_nmsub(VbS3, Va[23], Vc3[3]); + Vc4[2] = vec_nmsub(VbS4, Va[22], Vc4[2]); + Vc4[3] = vec_nmsub(VbS4, Va[23], Vc4[3]); + Vc5[2] = vec_nmsub(VbS5, Va[22], Vc5[2]); + Vc5[3] = vec_nmsub(VbS5, Va[23], Vc5[3]); + Vc6[2] = vec_nmsub(VbS6, Va[22], Vc6[2]); + Vc6[3] = vec_nmsub(VbS6, Va[23], Vc6[3]); + Vc7[2] = vec_nmsub(VbS7, Va[22], Vc7[2]); + Vc7[3] = vec_nmsub(VbS7, Va[23], Vc7[3]); + c0[6] -= b[40] * a[86]; + c0[7] -= b[40] * a[87]; + c1[6] -= b[41] * a[86]; + c1[7] -= b[41] * a[87]; + c2[6] -= b[42] * a[86]; + c2[7] -= b[42] * a[87]; + c3[6] -= b[43] * a[86]; + c3[7] -= b[43] * a[87]; + c4[6] -= b[44] * a[86]; + c4[7] -= b[44] * a[87]; + c5[6] -= b[45] * a[86]; + c5[7] -= b[45] * a[87]; + c6[6] -= b[46] * a[86]; + c6[7] -= b[46] * a[87]; + c7[6] -= b[47] * a[86]; + c7[7] -= b[47] * a[87]; + + b[48] = (c0[6] *= a[102]); + b[49] = (c1[6] *= a[102]); + b[50] = (c2[6] *= a[102]); + b[51] = (c3[6] *= a[102]); + b[52] = (c4[6] *= a[102]); + b[53] = (c5[6] *= a[102]); + b[54] = (c6[6] *= a[102]); + b[55] = (c7[6] *= a[102]); + VbS0 = vec_splat(Vb[12], 0); + VbS1 = vec_splat(Vb[12], 1); + VbS2 = vec_splat(Vb[12], 2); + VbS3 = vec_splat(Vb[12], 3); + VbS4 = vec_splat(Vb[13], 0); + VbS5 = vec_splat(Vb[13], 1); + VbS6 = vec_splat(Vb[13], 2); + VbS7 = vec_splat(Vb[13], 3); + Vc0[2] = vec_nmsub(VbS0, Va[26], Vc0[2]); + Vc0[3] = vec_nmsub(VbS0, Va[27], Vc0[3]); + Vc1[2] = vec_nmsub(VbS1, Va[26], Vc1[2]); + Vc1[3] = vec_nmsub(VbS1, Va[27], Vc1[3]); + Vc2[2] = vec_nmsub(VbS2, Va[26], Vc2[2]); + Vc2[3] = vec_nmsub(VbS2, Va[27], Vc2[3]); + Vc3[2] = vec_nmsub(VbS3, Va[26], Vc3[2]); + Vc3[3] = vec_nmsub(VbS3, Va[27], Vc3[3]); + Vc4[2] = vec_nmsub(VbS4, Va[26], Vc4[2]); + Vc4[3] = vec_nmsub(VbS4, Va[27], Vc4[3]); + Vc5[2] = vec_nmsub(VbS5, Va[26], Vc5[2]); + Vc5[3] = vec_nmsub(VbS5, Va[27], Vc5[3]); + Vc6[2] = vec_nmsub(VbS6, Va[26], Vc6[2]); + Vc6[3] = vec_nmsub(VbS6, Va[27], Vc6[3]); + Vc7[2] = vec_nmsub(VbS7, Va[26], Vc7[2]); + Vc7[3] = vec_nmsub(VbS7, Va[27], Vc7[3]); + c0[7] -= b[48] * a[103]; + c1[7] -= b[49] * a[103]; + c2[7] -= b[50] * a[103]; + c3[7] -= b[51] * a[103]; + c4[7] -= b[52] * a[103]; + c5[7] -= b[53] * a[103]; + c6[7] -= b[54] * a[103]; + c7[7] -= b[55] * a[103]; + + b[56] = (c0[7] *= a[119]); + b[57] = (c1[7] *= a[119]); + b[58] = (c2[7] *= a[119]); + b[59] = (c3[7] *= a[119]); + b[60] = (c4[7] *= a[119]); + b[61] = (c5[7] *= a[119]); + b[62] = (c6[7] *= a[119]); + b[63] = (c7[7] *= a[119]); + VbS0 = vec_splat(Vb[14], 0); + VbS1 = vec_splat(Vb[14], 1); + VbS2 = vec_splat(Vb[14], 2); + VbS3 = vec_splat(Vb[14], 3); + VbS4 = vec_splat(Vb[15], 0); + VbS5 = vec_splat(Vb[15], 1); + VbS6 = vec_splat(Vb[15], 2); + VbS7 = vec_splat(Vb[15], 3); + Vc0[2] = vec_nmsub(VbS0, Va[30], Vc0[2]); + Vc0[3] = vec_nmsub(VbS0, Va[31], Vc0[3]); + Vc1[2] = vec_nmsub(VbS1, Va[30], Vc1[2]); + Vc1[3] = vec_nmsub(VbS1, Va[31], Vc1[3]); + Vc2[2] = vec_nmsub(VbS2, Va[30], Vc2[2]); + Vc2[3] = vec_nmsub(VbS2, Va[31], Vc2[3]); + Vc3[2] = vec_nmsub(VbS3, Va[30], Vc3[2]); + Vc3[3] = vec_nmsub(VbS3, Va[31], Vc3[3]); + Vc4[2] = vec_nmsub(VbS4, Va[30], Vc4[2]); + Vc4[3] = vec_nmsub(VbS4, Va[31], Vc4[3]); + Vc5[2] = vec_nmsub(VbS5, Va[30], Vc5[2]); + Vc5[3] = vec_nmsub(VbS5, Va[31], Vc5[3]); + Vc6[2] = vec_nmsub(VbS6, Va[30], Vc6[2]); + Vc6[3] = vec_nmsub(VbS6, Va[31], Vc6[3]); + Vc7[2] = vec_nmsub(VbS7, Va[30], Vc7[2]); + Vc7[3] = vec_nmsub(VbS7, Va[31], Vc7[3]); + + b[64] = (c0[8] *= a[136]); + b[65] = (c1[8] *= a[136]); + b[66] = (c2[8] *= a[136]); + b[67] = (c3[8] *= a[136]); + b[68] = (c4[8] *= a[136]); + b[69] = (c5[8] *= a[136]); + b[70] = (c6[8] *= a[136]); + b[71] = (c7[8] *= a[136]); + VbS0 = vec_splat(Vb[16], 0); + VbS1 = vec_splat(Vb[16], 1); + VbS2 = vec_splat(Vb[16], 2); + VbS3 = vec_splat(Vb[16], 3); + VbS4 = vec_splat(Vb[17], 0); + VbS5 = vec_splat(Vb[17], 1); + VbS6 = vec_splat(Vb[17], 2); + VbS7 = vec_splat(Vb[17], 3); + Vc0[3] = vec_nmsub(VbS0, Va[35], Vc0[3]); + Vc1[3] = vec_nmsub(VbS1, Va[35], Vc1[3]); + Vc2[3] = vec_nmsub(VbS2, Va[35], Vc2[3]); + Vc3[3] = vec_nmsub(VbS3, Va[35], Vc3[3]); + Vc4[3] = vec_nmsub(VbS4, Va[35], Vc4[3]); + Vc5[3] = vec_nmsub(VbS5, Va[35], Vc5[3]); + Vc6[3] = vec_nmsub(VbS6, Va[35], Vc6[3]); + Vc7[3] = vec_nmsub(VbS7, Va[35], Vc7[3]); + c0[ 9] -= b[64] * a[137]; + c0[10] -= b[64] * a[138]; + c0[11] -= b[64] * a[139]; + c1[ 9] -= b[65] * a[137]; + c1[10] -= b[65] * a[138]; + c1[11] -= b[65] * a[139]; + c2[ 9] -= b[66] * a[137]; + c2[10] -= b[66] * a[138]; + c2[11] -= b[66] * a[139]; + c3[ 9] -= b[67] * a[137]; + c3[10] -= b[67] * a[138]; + c3[11] -= b[67] * a[139]; + c4[ 9] -= b[68] * a[137]; + c4[10] -= b[68] * a[138]; + c4[11] -= b[68] * a[139]; + c5[ 9] -= b[69] * a[137]; + c5[10] -= b[69] * a[138]; + c5[11] -= b[69] * a[139]; + c6[ 9] -= b[70] * a[137]; + c6[10] -= b[70] * a[138]; + c6[11] -= b[70] * a[139]; + c7[ 9] -= b[71] * a[137]; + c7[10] -= b[71] * a[138]; + c7[11] -= b[71] * a[139]; + + b[72] = (c0[9] *= a[153]); + b[73] = (c1[9] *= a[153]); + b[74] = (c2[9] *= a[153]); + b[75] = (c3[9] *= a[153]); + b[76] = (c4[9] *= a[153]); + b[77] = (c5[9] *= a[153]); + b[78] = (c6[9] *= a[153]); + b[79] = (c7[9] *= a[153]); + VbS0 = vec_splat(Vb[18], 0); + VbS1 = vec_splat(Vb[18], 1); + VbS2 = vec_splat(Vb[18], 2); + VbS3 = vec_splat(Vb[18], 3); + VbS4 = vec_splat(Vb[19], 0); + VbS5 = vec_splat(Vb[19], 1); + VbS6 = vec_splat(Vb[19], 2); + VbS7 = vec_splat(Vb[19], 3); + Vc0[3] = vec_nmsub(VbS0, Va[39], Vc0[3]); + Vc1[3] = vec_nmsub(VbS1, Va[39], Vc1[3]); + Vc2[3] = vec_nmsub(VbS2, Va[39], Vc2[3]); + Vc3[3] = vec_nmsub(VbS3, Va[39], Vc3[3]); + Vc4[3] = vec_nmsub(VbS4, Va[39], Vc4[3]); + Vc5[3] = vec_nmsub(VbS5, Va[39], Vc5[3]); + Vc6[3] = vec_nmsub(VbS6, Va[39], Vc6[3]); + Vc7[3] = vec_nmsub(VbS7, Va[39], Vc7[3]); + c0[10] -= b[72] * a[154]; + c0[11] -= b[72] * a[155]; + c1[10] -= b[73] * a[154]; + c1[11] -= b[73] * a[155]; + c2[10] -= b[74] * a[154]; + c2[11] -= b[74] * a[155]; + c3[10] -= b[75] * a[154]; + c3[11] -= b[75] * a[155]; + c4[10] -= b[76] * a[154]; + c4[11] -= b[76] * a[155]; + c5[10] -= b[77] * a[154]; + c5[11] -= b[77] * a[155]; + c6[10] -= b[78] * a[154]; + c6[11] -= b[78] * a[155]; + c7[10] -= b[79] * a[154]; + c7[11] -= b[79] * a[155]; + + b[80] = (c0[10] *= a[170]); + b[81] = (c1[10] *= a[170]); + b[82] = (c2[10] *= a[170]); + b[83] = (c3[10] *= a[170]); + b[84] = (c4[10] *= a[170]); + b[85] = (c5[10] *= a[170]); + b[86] = (c6[10] *= a[170]); + b[87] = (c7[10] *= a[170]); + VbS0 = vec_splat(Vb[20], 0); + VbS1 = vec_splat(Vb[20], 1); + VbS2 = vec_splat(Vb[20], 2); + VbS3 = vec_splat(Vb[20], 3); + VbS4 = vec_splat(Vb[21], 0); + VbS5 = vec_splat(Vb[21], 1); + VbS6 = vec_splat(Vb[21], 2); + VbS7 = vec_splat(Vb[21], 3); + Vc0[3] = vec_nmsub(VbS0, Va[43], Vc0[3]); + Vc1[3] = vec_nmsub(VbS1, Va[43], Vc1[3]); + Vc2[3] = vec_nmsub(VbS2, Va[43], Vc2[3]); + Vc3[3] = vec_nmsub(VbS3, Va[43], Vc3[3]); + Vc4[3] = vec_nmsub(VbS4, Va[43], Vc4[3]); + Vc5[3] = vec_nmsub(VbS5, Va[43], Vc5[3]); + Vc6[3] = vec_nmsub(VbS6, Va[43], Vc6[3]); + Vc7[3] = vec_nmsub(VbS7, Va[43], Vc7[3]); + c0[11] -= b[80] * a[171]; + c1[11] -= b[81] * a[171]; + c2[11] -= b[82] * a[171]; + c3[11] -= b[83] * a[171]; + c4[11] -= b[84] * a[171]; + c5[11] -= b[85] * a[171]; + c6[11] -= b[86] * a[171]; + c7[11] -= b[87] * a[171]; + + b[88] = (c0[11] *= a[187]); + b[89] = (c1[11] *= a[187]); + b[90] = (c2[11] *= a[187]); + b[91] = (c3[11] *= a[187]); + b[92] = (c4[11] *= a[187]); + b[93] = (c5[11] *= a[187]); + b[94] = (c6[11] *= a[187]); + b[95] = (c7[11] *= a[187]); + VbS0 = vec_splat(Vb[22], 0); + VbS1 = vec_splat(Vb[22], 1); + VbS2 = vec_splat(Vb[22], 2); + VbS3 = vec_splat(Vb[22], 3); + VbS4 = vec_splat(Vb[23], 0); + VbS5 = vec_splat(Vb[23], 1); + VbS6 = vec_splat(Vb[23], 2); + VbS7 = vec_splat(Vb[23], 3); + Vc0[3] = vec_nmsub(VbS0, Va[47], Vc0[3]); + Vc1[3] = vec_nmsub(VbS1, Va[47], Vc1[3]); + Vc2[3] = vec_nmsub(VbS2, Va[47], Vc2[3]); + Vc3[3] = vec_nmsub(VbS3, Va[47], Vc3[3]); + Vc4[3] = vec_nmsub(VbS4, Va[47], Vc4[3]); + Vc5[3] = vec_nmsub(VbS5, Va[47], Vc5[3]); + Vc6[3] = vec_nmsub(VbS6, Va[47], Vc6[3]); + Vc7[3] = vec_nmsub(VbS7, Va[47], Vc7[3]); + + b[ 96] = (c0[12] *= a[204]); + b[ 97] = (c1[12] *= a[204]); + b[ 98] = (c2[12] *= a[204]); + b[ 99] = (c3[12] *= a[204]); + b[100] = (c4[12] *= a[204]); + b[101] = (c5[12] *= a[204]); + b[102] = (c6[12] *= a[204]); + b[103] = (c7[12] *= a[204]); + c0[13] -= b[ 96] * a[205]; + c0[14] -= b[ 96] * a[206]; + c0[15] -= b[ 96] * a[207]; + c1[13] -= b[ 97] * a[205]; + c1[14] -= b[ 97] * a[206]; + c1[15] -= b[ 97] * a[207]; + c2[13] -= b[ 98] * a[205]; + c2[14] -= b[ 98] * a[206]; + c2[15] -= b[ 98] * a[207]; + c3[13] -= b[ 99] * a[205]; + c3[14] -= b[ 99] * a[206]; + c3[15] -= b[ 99] * a[207]; + c4[13] -= b[100] * a[205]; + c4[14] -= b[100] * a[206]; + c4[15] -= b[100] * a[207]; + c5[13] -= b[101] * a[205]; + c5[14] -= b[101] * a[206]; + c5[15] -= b[101] * a[207]; + c6[13] -= b[102] * a[205]; + c6[14] -= b[102] * a[206]; + c6[15] -= b[102] * a[207]; + c7[13] -= b[103] * a[205]; + c7[14] -= b[103] * a[206]; + c7[15] -= b[103] * a[207]; + + b[104] = (c0[13] *= a[221]); + b[105] = (c1[13] *= a[221]); + b[106] = (c2[13] *= a[221]); + b[107] = (c3[13] *= a[221]); + b[108] = (c4[13] *= a[221]); + b[109] = (c5[13] *= a[221]); + b[110] = (c6[13] *= a[221]); + b[111] = (c7[13] *= a[221]); + c0[14] -= b[104] * a[222]; + c0[15] -= b[104] * a[223]; + c1[14] -= b[105] * a[222]; + c1[15] -= b[105] * a[223]; + c2[14] -= b[106] * a[222]; + c2[15] -= b[106] * a[223]; + c3[14] -= b[107] * a[222]; + c3[15] -= b[107] * a[223]; + c4[14] -= b[108] * a[222]; + c4[15] -= b[108] * a[223]; + c5[14] -= b[109] * a[222]; + c5[15] -= b[109] * a[223]; + c6[14] -= b[110] * a[222]; + c6[15] -= b[110] * a[223]; + c7[14] -= b[111] * a[222]; + c7[15] -= b[111] * a[223]; + + b[112] = (c0[14] *= a[238]); + b[113] = (c1[14] *= a[238]); + b[114] = (c2[14] *= a[238]); + b[115] = (c3[14] *= a[238]); + b[116] = (c4[14] *= a[238]); + b[117] = (c5[14] *= a[238]); + b[118] = (c6[14] *= a[238]); + b[119] = (c7[14] *= a[238]); + c0[15] -= b[112] * a[239]; + c1[15] -= b[113] * a[239]; + c2[15] -= b[114] * a[239]; + c3[15] -= b[115] * a[239]; + c4[15] -= b[116] * a[239]; + c5[15] -= b[117] * a[239]; + c6[15] -= b[118] * a[239]; + c7[15] -= b[119] * a[239]; + + b[120] = (c0[15] *= a[255]); + b[121] = (c1[15] *= a[255]); + b[122] = (c2[15] *= a[255]); + b[123] = (c3[15] *= a[255]); + b[124] = (c4[15] *= a[255]); + b[125] = (c5[15] *= a[255]); + b[126] = (c6[15] *= a[255]); + b[127] = (c7[15] *= a[255]); +} + +#endif + +static inline __attribute__ ((always_inline)) void solve(BLASLONG m, BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) { + + FLOAT aa, bb; + + int i, j, k; + + for (i = 0; i < m; i++) { + + aa = *(a + i); + + for (j = 0; j < n; j ++) { + bb = *(c + i + j * ldc); + bb *= aa; + *b = bb; + *(c + i + j * ldc) = bb; + b ++; + + for (k = i + 1; k < m; k ++){ + *(c + k + j * ldc) -= bb * *(a + k); + } + + } + a += m; + } +} + +#else + +static inline __attribute__ ((always_inline)) void solve(BLASLONG m, BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) { + + FLOAT aa1, aa2; + FLOAT bb1, bb2; + FLOAT cc1, cc2; + + int i, j, k; + + ldc *= 2; + + for (i = 0; i < m; i++) { + + aa1 = *(a + i * 2 + 0); + aa2 = *(a + i * 2 + 1); + + for (j = 0; j < n; j ++) { + bb1 = *(c + i * 2 + 0 + j * ldc); + bb2 = *(c + i * 2 + 1 + j * ldc); + +#ifndef CONJ + cc1 = aa1 * bb1 - aa2 * bb2; + cc2 = aa1 * bb2 + aa2 * bb1; +#else + cc1 = aa1 * bb1 + aa2 * bb2; + cc2 = aa1 * bb2 - aa2 * bb1; +#endif + + *(b + 0) = cc1; + *(b + 1) = cc2; + *(c + i * 2 + 0 + j * ldc) = cc1; + *(c + i * 2 + 1 + j * ldc) = cc2; + b += 2; + + for (k = i + 1; k < m; k ++){ +#ifndef CONJ + *(c + k * 2 + 0 + j * ldc) -= cc1 * *(a + k * 2 + 0) - cc2 * *(a + k * 2 + 1); + *(c + k * 2 + 1 + j * ldc) -= cc1 * *(a + k * 2 + 1) + cc2 * *(a + k * 2 + 0); +#else + *(c + k * 2 + 0 + j * ldc) -= cc1 * *(a + k * 2 + 0) + cc2 * *(a + k * 2 + 1); + *(c + k * 2 + 1 + j * ldc) -= -cc1 * *(a + k * 2 + 1) + cc2 * *(a + k * 2 + 0); +#endif + } + + } + a += m * 2; + } +} + +#endif + + +int CNAME(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT dummy1, +#ifdef COMPLEX + FLOAT dummy2, +#endif + FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, BLASLONG offset){ + + FLOAT *aa, *cc; + BLASLONG kk; + BLASLONG i, j, jj; + +#if 0 + fprintf(stderr, "TRSM KERNEL LT : m = %3ld n = %3ld k = %3ld offset = %3ld\n", + m, n, k, offset); +#endif + + jj = 0; + + j = (n >> GEMM_UNROLL_N_SHIFT); + +#ifdef DOUBLE + int well_aligned = (GEMM_UNROLL_M==8) && (GEMM_UNROLL_N==8) && ((((unsigned long) a) & 0x7) == 0); +#else + int well_aligned = (GEMM_UNROLL_M==16) && (GEMM_UNROLL_N==8) && ((((unsigned long) a) & 0x7) == 0); +#endif + + while (j > 0) { + + kk = offset; + aa = a; + cc = c; + + i = (m >> GEMM_UNROLL_M_SHIFT); + + while (i > 0) { + + if (kk > 0) { + GEMM_KERNEL(GEMM_UNROLL_M, GEMM_UNROLL_N, kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa, b, cc, ldc); + } + + if (well_aligned) { +#ifdef DOUBLE + solve8x8(aa + kk * GEMM_UNROLL_M * COMPSIZE, + b + kk * GEMM_UNROLL_N * COMPSIZE, cc, ldc); +#else + solve16x8(aa + kk * GEMM_UNROLL_M * COMPSIZE, + b + kk * GEMM_UNROLL_N * COMPSIZE, cc, ldc); +#endif + } + else { + solve(GEMM_UNROLL_M, GEMM_UNROLL_N, + aa + kk * GEMM_UNROLL_M * COMPSIZE, + b + kk * GEMM_UNROLL_N * COMPSIZE, + cc, ldc); + } + + aa += GEMM_UNROLL_M * k * COMPSIZE; + cc += GEMM_UNROLL_M * COMPSIZE; + kk += GEMM_UNROLL_M; + i --; + } + + if (m & (GEMM_UNROLL_M - 1)) { + i = (GEMM_UNROLL_M >> 1); + while (i > 0) { + if (m & i) { + if (kk > 0) { + GEMM_KERNEL(i, GEMM_UNROLL_N, kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa, b, cc, ldc); + } + solve(i, GEMM_UNROLL_N, + aa + kk * i * COMPSIZE, + b + kk * GEMM_UNROLL_N * COMPSIZE, + cc, ldc); + + aa += i * k * COMPSIZE; + cc += i * COMPSIZE; + kk += i; + } + i >>= 1; + } + } + + b += GEMM_UNROLL_N * k * COMPSIZE; + c += GEMM_UNROLL_N * ldc * COMPSIZE; + j --; + jj += GEMM_UNROLL_M; + } + + if (n & (GEMM_UNROLL_N - 1)) { + + j = (GEMM_UNROLL_N >> 1); + while (j > 0) { + if (n & j) { + + kk = offset; + aa = a; + cc = c; + + i = (m >> GEMM_UNROLL_M_SHIFT); + + while (i > 0) { + if (kk > 0) { + GEMM_KERNEL(GEMM_UNROLL_M, j, kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa, + b, + cc, + ldc); + } + + solve(GEMM_UNROLL_M, j, + aa + kk * GEMM_UNROLL_M * COMPSIZE, + b + kk * j * COMPSIZE, cc, ldc); + + aa += GEMM_UNROLL_M * k * COMPSIZE; + cc += GEMM_UNROLL_M * COMPSIZE; + kk += GEMM_UNROLL_M; + i --; + } + + if (m & (GEMM_UNROLL_M - 1)) { + i = (GEMM_UNROLL_M >> 1); + while (i > 0) { + if (m & i) { + if (kk > 0) { + GEMM_KERNEL(i, j, kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa, + b, + cc, + ldc); + } + + solve(i, j, + aa + kk * i * COMPSIZE, + b + kk * j * COMPSIZE, cc, ldc); + + aa += i * k * COMPSIZE; + cc += i * COMPSIZE; + kk += i; + } + i >>= 1; + } + } + + b += j * k * COMPSIZE; + c += j * ldc * COMPSIZE; + } + j >>= 1; + } + } + + return 0; +} diff --git a/kernel/power/trsm_kernel_RN_power10.c b/kernel/power/trsm_kernel_RN_power10.c new file mode 100644 index 000000000..92c26fcc3 --- /dev/null +++ b/kernel/power/trsm_kernel_RN_power10.c @@ -0,0 +1,828 @@ +/*********************************************************************/ +/* Copyright 2009, 2010 The University of Texas at Austin. */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* 1. Redistributions of source code must retain the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer. */ +/* */ +/* 2. Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials */ +/* provided with the distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ +/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ +/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ +/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ +/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ +/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ +/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* */ +/* The views and conclusions contained in the software and */ +/* documentation are those of the authors and should not be */ +/* interpreted as representing official policies, either expressed */ +/* or implied, of The University of Texas at Austin. */ +/*********************************************************************/ + +#include "common.h" +#include + +static FLOAT dm1 = -1.; + +#ifdef CONJ +#define GEMM_KERNEL GEMM_KERNEL_R +#else +#define GEMM_KERNEL GEMM_KERNEL_N +#endif + +#if GEMM_DEFAULT_UNROLL_M == 1 +#define GEMM_UNROLL_M_SHIFT 0 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 2 +#define GEMM_UNROLL_M_SHIFT 1 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 4 +#define GEMM_UNROLL_M_SHIFT 2 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 6 +#define GEMM_UNROLL_M_SHIFT 2 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 8 +#define GEMM_UNROLL_M_SHIFT 3 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 16 +#define GEMM_UNROLL_M_SHIFT 4 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 1 +#define GEMM_UNROLL_N_SHIFT 0 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 2 +#define GEMM_UNROLL_N_SHIFT 1 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 4 +#define GEMM_UNROLL_N_SHIFT 2 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 8 +#define GEMM_UNROLL_N_SHIFT 3 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 16 +#define GEMM_UNROLL_N_SHIFT 4 +#endif + +#ifndef COMPLEX + +#ifdef DOUBLE + +static inline __attribute__ ((always_inline)) void solve8x8(FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) { + FLOAT *c0, *c1, *c2, *c3, *c4, *c5, *c6, *c7; + c0 = &c[0*ldc]; + c1 = &c[1*ldc]; + c2 = &c[2*ldc]; + c3 = &c[3*ldc]; + c4 = &c[4*ldc]; + c5 = &c[5*ldc]; + c6 = &c[6*ldc]; + c7 = &c[7*ldc]; + vector FLOAT *Vb = (vector FLOAT *) b; + vector FLOAT *Vc0 = (vector FLOAT *) c0; + vector FLOAT *Vc1 = (vector FLOAT *) c1; + vector FLOAT *Vc2 = (vector FLOAT *) c2; + vector FLOAT *Vc3 = (vector FLOAT *) c3; + vector FLOAT *Vc4 = (vector FLOAT *) c4; + vector FLOAT *Vc5 = (vector FLOAT *) c5; + vector FLOAT *Vc6 = (vector FLOAT *) c6; + vector FLOAT *Vc7 = (vector FLOAT *) c7; + vector FLOAT VbS0, VbS1, VbS2, VbS3, VbS4, VbS5, VbS6; + + a[0] = (c0[0] *= b[0]); + a[1] = (c0[1] *= b[0]); + a[2] = (c0[2] *= b[0]); + a[3] = (c0[3] *= b[0]); + a[4] = (c0[4] *= b[0]); + a[5] = (c0[5] *= b[0]); + a[6] = (c0[6] *= b[0]); + a[7] = (c0[7] *= b[0]); + VbS0 = vec_splat(Vb[0], 1); + VbS1 = vec_splat(Vb[1], 0); + VbS2 = vec_splat(Vb[1], 1); + VbS3 = vec_splat(Vb[2], 0); + VbS4 = vec_splat(Vb[2], 1); + VbS5 = vec_splat(Vb[3], 0); + VbS6 = vec_splat(Vb[3], 1); + Vc1[0] = vec_nmsub(Vc0[ 0], VbS0, Vc1[0]); + Vc1[1] = vec_nmsub(Vc0[ 1], VbS0, Vc1[1]); + Vc1[2] = vec_nmsub(Vc0[ 2], VbS0, Vc1[2]); + Vc1[3] = vec_nmsub(Vc0[ 3], VbS0, Vc1[3]); + Vc2[0] = vec_nmsub(Vc0[ 0], VbS1, Vc2[0]); + Vc2[1] = vec_nmsub(Vc0[ 1], VbS1, Vc2[1]); + Vc2[2] = vec_nmsub(Vc0[ 2], VbS1, Vc2[2]); + Vc2[3] = vec_nmsub(Vc0[ 3], VbS1, Vc2[3]); + Vc3[0] = vec_nmsub(Vc0[ 0], VbS2, Vc3[0]); + Vc3[1] = vec_nmsub(Vc0[ 1], VbS2, Vc3[1]); + Vc3[2] = vec_nmsub(Vc0[ 2], VbS2, Vc3[2]); + Vc3[3] = vec_nmsub(Vc0[ 3], VbS2, Vc3[3]); + Vc4[0] = vec_nmsub(Vc0[ 0], VbS3, Vc4[0]); + Vc4[1] = vec_nmsub(Vc0[ 1], VbS3, Vc4[1]); + Vc4[2] = vec_nmsub(Vc0[ 2], VbS3, Vc4[2]); + Vc4[3] = vec_nmsub(Vc0[ 3], VbS3, Vc4[3]); + Vc5[0] = vec_nmsub(Vc0[ 0], VbS4, Vc5[0]); + Vc5[1] = vec_nmsub(Vc0[ 1], VbS4, Vc5[1]); + Vc5[2] = vec_nmsub(Vc0[ 2], VbS4, Vc5[2]); + Vc5[3] = vec_nmsub(Vc0[ 3], VbS4, Vc5[3]); + Vc6[0] = vec_nmsub(Vc0[ 0], VbS5, Vc6[0]); + Vc6[1] = vec_nmsub(Vc0[ 1], VbS5, Vc6[1]); + Vc6[2] = vec_nmsub(Vc0[ 2], VbS5, Vc6[2]); + Vc6[3] = vec_nmsub(Vc0[ 3], VbS5, Vc6[3]); + Vc7[0] = vec_nmsub(Vc0[ 0], VbS6, Vc7[0]); + Vc7[1] = vec_nmsub(Vc0[ 1], VbS6, Vc7[1]); + Vc7[2] = vec_nmsub(Vc0[ 2], VbS6, Vc7[2]); + Vc7[3] = vec_nmsub(Vc0[ 3], VbS6, Vc7[3]); + + a[ 8] = (c1[0] *= b[9]); + a[ 9] = (c1[1] *= b[9]); + a[10] = (c1[2] *= b[9]); + a[11] = (c1[3] *= b[9]); + a[12] = (c1[4] *= b[9]); + a[13] = (c1[5] *= b[9]); + a[14] = (c1[6] *= b[9]); + a[15] = (c1[7] *= b[9]); + VbS0 = vec_splat(Vb[5], 0); + VbS1 = vec_splat(Vb[5], 1); + VbS2 = vec_splat(Vb[6], 0); + VbS3 = vec_splat(Vb[6], 1); + VbS4 = vec_splat(Vb[7], 0); + VbS5 = vec_splat(Vb[7], 1); + Vc2[0] = vec_nmsub(Vc1[0], VbS0, Vc2[0]); + Vc2[1] = vec_nmsub(Vc1[1], VbS0, Vc2[1]); + Vc2[2] = vec_nmsub(Vc1[2], VbS0, Vc2[2]); + Vc2[3] = vec_nmsub(Vc1[3], VbS0, Vc2[3]); + Vc3[0] = vec_nmsub(Vc1[0], VbS1, Vc3[0]); + Vc3[1] = vec_nmsub(Vc1[1], VbS1, Vc3[1]); + Vc3[2] = vec_nmsub(Vc1[2], VbS1, Vc3[2]); + Vc3[3] = vec_nmsub(Vc1[3], VbS1, Vc3[3]); + Vc4[0] = vec_nmsub(Vc1[0], VbS2, Vc4[0]); + Vc4[1] = vec_nmsub(Vc1[1], VbS2, Vc4[1]); + Vc4[2] = vec_nmsub(Vc1[2], VbS2, Vc4[2]); + Vc4[3] = vec_nmsub(Vc1[3], VbS2, Vc4[3]); + Vc5[0] = vec_nmsub(Vc1[0], VbS3, Vc5[0]); + Vc5[1] = vec_nmsub(Vc1[1], VbS3, Vc5[1]); + Vc5[2] = vec_nmsub(Vc1[2], VbS3, Vc5[2]); + Vc5[3] = vec_nmsub(Vc1[3], VbS3, Vc5[3]); + Vc6[0] = vec_nmsub(Vc1[0], VbS4, Vc6[0]); + Vc6[1] = vec_nmsub(Vc1[1], VbS4, Vc6[1]); + Vc6[2] = vec_nmsub(Vc1[2], VbS4, Vc6[2]); + Vc6[3] = vec_nmsub(Vc1[3], VbS4, Vc6[3]); + Vc7[0] = vec_nmsub(Vc1[0], VbS5, Vc7[0]); + Vc7[1] = vec_nmsub(Vc1[1], VbS5, Vc7[1]); + Vc7[2] = vec_nmsub(Vc1[2], VbS5, Vc7[2]); + Vc7[3] = vec_nmsub(Vc1[3], VbS5, Vc7[3]); + + a[16] = (c2[0] *= b[18]); + a[17] = (c2[1] *= b[18]); + a[18] = (c2[2] *= b[18]); + a[19] = (c2[3] *= b[18]); + a[20] = (c2[4] *= b[18]); + a[21] = (c2[5] *= b[18]); + a[22] = (c2[6] *= b[18]); + a[23] = (c2[7] *= b[18]); + VbS0 = vec_splat(Vb[ 9], 1); + VbS1 = vec_splat(Vb[10], 0); + VbS2 = vec_splat(Vb[10], 1); + VbS3 = vec_splat(Vb[11], 0); + VbS4 = vec_splat(Vb[11], 1); + Vc3[0] = vec_nmsub(Vc2[0], VbS0, Vc3[0]); + Vc3[1] = vec_nmsub(Vc2[1], VbS0, Vc3[1]); + Vc3[2] = vec_nmsub(Vc2[2], VbS0, Vc3[2]); + Vc3[3] = vec_nmsub(Vc2[3], VbS0, Vc3[3]); + Vc4[0] = vec_nmsub(Vc2[0], VbS1, Vc4[0]); + Vc4[1] = vec_nmsub(Vc2[1], VbS1, Vc4[1]); + Vc4[2] = vec_nmsub(Vc2[2], VbS1, Vc4[2]); + Vc4[3] = vec_nmsub(Vc2[3], VbS1, Vc4[3]); + Vc5[0] = vec_nmsub(Vc2[0], VbS2, Vc5[0]); + Vc5[1] = vec_nmsub(Vc2[1], VbS2, Vc5[1]); + Vc5[2] = vec_nmsub(Vc2[2], VbS2, Vc5[2]); + Vc5[3] = vec_nmsub(Vc2[3], VbS2, Vc5[3]); + Vc6[0] = vec_nmsub(Vc2[0], VbS3, Vc6[0]); + Vc6[1] = vec_nmsub(Vc2[1], VbS3, Vc6[1]); + Vc6[2] = vec_nmsub(Vc2[2], VbS3, Vc6[2]); + Vc6[3] = vec_nmsub(Vc2[3], VbS3, Vc6[3]); + Vc7[0] = vec_nmsub(Vc2[0], VbS4, Vc7[0]); + Vc7[1] = vec_nmsub(Vc2[1], VbS4, Vc7[1]); + Vc7[2] = vec_nmsub(Vc2[2], VbS4, Vc7[2]); + Vc7[3] = vec_nmsub(Vc2[3], VbS4, Vc7[3]); + + a[24] = (c3[0] *= b[27]); + a[25] = (c3[1] *= b[27]); + a[26] = (c3[2] *= b[27]); + a[27] = (c3[3] *= b[27]); + a[28] = (c3[4] *= b[27]); + a[29] = (c3[5] *= b[27]); + a[30] = (c3[6] *= b[27]); + a[31] = (c3[7] *= b[27]); + VbS0 = vec_splat(Vb[14], 0); + VbS1 = vec_splat(Vb[14], 1); + VbS2 = vec_splat(Vb[15], 0); + VbS3 = vec_splat(Vb[15], 1); + Vc4[0] = vec_nmsub(Vc3[0], VbS0, Vc4[0]); + Vc4[1] = vec_nmsub(Vc3[1], VbS0, Vc4[1]); + Vc4[2] = vec_nmsub(Vc3[2], VbS0, Vc4[2]); + Vc4[3] = vec_nmsub(Vc3[3], VbS0, Vc4[3]); + Vc5[0] = vec_nmsub(Vc3[0], VbS1, Vc5[0]); + Vc5[1] = vec_nmsub(Vc3[1], VbS1, Vc5[1]); + Vc5[2] = vec_nmsub(Vc3[2], VbS1, Vc5[2]); + Vc5[3] = vec_nmsub(Vc3[3], VbS1, Vc5[3]); + Vc6[0] = vec_nmsub(Vc3[0], VbS2, Vc6[0]); + Vc6[1] = vec_nmsub(Vc3[1], VbS2, Vc6[1]); + Vc6[2] = vec_nmsub(Vc3[2], VbS2, Vc6[2]); + Vc6[3] = vec_nmsub(Vc3[3], VbS2, Vc6[3]); + Vc7[0] = vec_nmsub(Vc3[0], VbS3, Vc7[0]); + Vc7[1] = vec_nmsub(Vc3[1], VbS3, Vc7[1]); + Vc7[2] = vec_nmsub(Vc3[2], VbS3, Vc7[2]); + Vc7[3] = vec_nmsub(Vc3[3], VbS3, Vc7[3]); + + a[32] = (c4[0] *= b[36]); + a[33] = (c4[1] *= b[36]); + a[34] = (c4[2] *= b[36]); + a[35] = (c4[3] *= b[36]); + a[36] = (c4[4] *= b[36]); + a[37] = (c4[5] *= b[36]); + a[38] = (c4[6] *= b[36]); + a[39] = (c4[7] *= b[36]); + VbS0 = vec_splat(Vb[18], 1); + VbS1 = vec_splat(Vb[19], 0); + VbS2 = vec_splat(Vb[19], 1); + Vc5[0] = vec_nmsub(Vc4[0], VbS0, Vc5[0]); + Vc5[1] = vec_nmsub(Vc4[1], VbS0, Vc5[1]); + Vc5[2] = vec_nmsub(Vc4[2], VbS0, Vc5[2]); + Vc5[3] = vec_nmsub(Vc4[3], VbS0, Vc5[3]); + Vc6[0] = vec_nmsub(Vc4[0], VbS1, Vc6[0]); + Vc6[1] = vec_nmsub(Vc4[1], VbS1, Vc6[1]); + Vc6[2] = vec_nmsub(Vc4[2], VbS1, Vc6[2]); + Vc6[3] = vec_nmsub(Vc4[3], VbS1, Vc6[3]); + Vc7[0] = vec_nmsub(Vc4[0], VbS2, Vc7[0]); + Vc7[1] = vec_nmsub(Vc4[1], VbS2, Vc7[1]); + Vc7[2] = vec_nmsub(Vc4[2], VbS2, Vc7[2]); + Vc7[3] = vec_nmsub(Vc4[3], VbS2, Vc7[3]); + + a[40] = (c5[0] *= b[45]); + a[41] = (c5[1] *= b[45]); + a[42] = (c5[2] *= b[45]); + a[43] = (c5[3] *= b[45]); + a[44] = (c5[4] *= b[45]); + a[45] = (c5[5] *= b[45]); + a[46] = (c5[6] *= b[45]); + a[47] = (c5[7] *= b[45]); + VbS0 = vec_splat(Vb[23], 0); + VbS1 = vec_splat(Vb[23], 1); + Vc6[0] = vec_nmsub(Vc5[0], VbS0, Vc6[0]); + Vc6[1] = vec_nmsub(Vc5[1], VbS0, Vc6[1]); + Vc6[2] = vec_nmsub(Vc5[2], VbS0, Vc6[2]); + Vc6[3] = vec_nmsub(Vc5[3], VbS0, Vc6[3]); + Vc7[0] = vec_nmsub(Vc5[0], VbS1, Vc7[0]); + Vc7[1] = vec_nmsub(Vc5[1], VbS1, Vc7[1]); + Vc7[2] = vec_nmsub(Vc5[2], VbS1, Vc7[2]); + Vc7[3] = vec_nmsub(Vc5[3], VbS1, Vc7[3]); + + a[48] = (c6[0] *= b[54]); + a[49] = (c6[1] *= b[54]); + a[50] = (c6[2] *= b[54]); + a[51] = (c6[3] *= b[54]); + a[52] = (c6[4] *= b[54]); + a[53] = (c6[5] *= b[54]); + a[54] = (c6[6] *= b[54]); + a[55] = (c6[7] *= b[54]); + VbS0 = vec_splat(Vb[27], 1); + Vc7[0] = vec_nmsub(Vc6[0], VbS0, Vc7[0]); + Vc7[1] = vec_nmsub(Vc6[1], VbS0, Vc7[1]); + Vc7[2] = vec_nmsub(Vc6[2], VbS0, Vc7[2]); + Vc7[3] = vec_nmsub(Vc6[3], VbS0, Vc7[3]); + + a[56] = (c7[0] *= b[63]); + a[57] = (c7[1] *= b[63]); + a[58] = (c7[2] *= b[63]); + a[59] = (c7[3] *= b[63]); + a[60] = (c7[4] *= b[63]); + a[61] = (c7[5] *= b[63]); + a[62] = (c7[6] *= b[63]); + a[63] = (c7[7] *= b[63]); +} + +#else + +static inline __attribute__ ((always_inline)) void solve16x8(FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) { + FLOAT *c0, *c1, *c2, *c3, *c4, *c5, *c6, *c7; + c0 = &c[0*ldc]; + c1 = &c[1*ldc]; + c2 = &c[2*ldc]; + c3 = &c[3*ldc]; + c4 = &c[4*ldc]; + c5 = &c[5*ldc]; + c6 = &c[6*ldc]; + c7 = &c[7*ldc]; + vector FLOAT *Va = (vector FLOAT *) a; + vector FLOAT *Vb = (vector FLOAT *) b; + vector FLOAT *Vc0 = (vector FLOAT *) c0; + vector FLOAT *Vc1 = (vector FLOAT *) c1; + vector FLOAT *Vc2 = (vector FLOAT *) c2; + vector FLOAT *Vc3 = (vector FLOAT *) c3; + vector FLOAT *Vc4 = (vector FLOAT *) c4; + vector FLOAT *Vc5 = (vector FLOAT *) c5; + vector FLOAT *Vc6 = (vector FLOAT *) c6; + vector FLOAT *Vc7 = (vector FLOAT *) c7; + vector FLOAT VbS0, VbS1, VbS2, VbS3, VbS4, VbS5, VbS6, VbS7; + + VbS0 = vec_splat(Vb[0], 0); + VbS1 = vec_splat(Vb[0], 1); + VbS2 = vec_splat(Vb[0], 2); + VbS3 = vec_splat(Vb[0], 3); + VbS4 = vec_splat(Vb[1], 0); + VbS5 = vec_splat(Vb[1], 1); + VbS6 = vec_splat(Vb[1], 2); + VbS7 = vec_splat(Vb[1], 3); + + Vc0[ 0] = vec_mul(VbS0, Vc0[ 0]); + Vc0[ 1] = vec_mul(VbS0, Vc0[ 1]); + Vc0[ 2] = vec_mul(VbS0, Vc0[ 2]); + Vc0[ 3] = vec_mul(VbS0, Vc0[ 3]); + Va[0] = Vc0[0]; + Va[1] = Vc0[1]; + Va[2] = Vc0[2]; + Va[3] = Vc0[3]; + Vc1[0] = vec_nmsub(VbS1, Va[0], Vc1[0]); + Vc1[1] = vec_nmsub(VbS1, Va[1], Vc1[1]); + Vc1[2] = vec_nmsub(VbS1, Va[2], Vc1[2]); + Vc1[3] = vec_nmsub(VbS1, Va[3], Vc1[3]); + Vc2[0] = vec_nmsub(VbS2, Va[0], Vc2[0]); + Vc2[1] = vec_nmsub(VbS2, Va[1], Vc2[1]); + Vc2[2] = vec_nmsub(VbS2, Va[2], Vc2[2]); + Vc2[3] = vec_nmsub(VbS2, Va[3], Vc2[3]); + Vc3[0] = vec_nmsub(VbS3, Va[0], Vc3[0]); + Vc3[1] = vec_nmsub(VbS3, Va[1], Vc3[1]); + Vc3[2] = vec_nmsub(VbS3, Va[2], Vc3[2]); + Vc3[3] = vec_nmsub(VbS3, Va[3], Vc3[3]); + Vc4[0] = vec_nmsub(VbS4, Va[0], Vc4[0]); + Vc4[1] = vec_nmsub(VbS4, Va[1], Vc4[1]); + Vc4[2] = vec_nmsub(VbS4, Va[2], Vc4[2]); + Vc4[3] = vec_nmsub(VbS4, Va[3], Vc4[3]); + Vc5[0] = vec_nmsub(VbS5, Va[0], Vc5[0]); + Vc5[1] = vec_nmsub(VbS5, Va[1], Vc5[1]); + Vc5[2] = vec_nmsub(VbS5, Va[2], Vc5[2]); + Vc5[3] = vec_nmsub(VbS5, Va[3], Vc5[3]); + Vc6[0] = vec_nmsub(VbS6, Va[0], Vc6[0]); + Vc6[1] = vec_nmsub(VbS6, Va[1], Vc6[1]); + Vc6[2] = vec_nmsub(VbS6, Va[2], Vc6[2]); + Vc6[3] = vec_nmsub(VbS6, Va[3], Vc6[3]); + Vc7[0] = vec_nmsub(VbS7, Va[0], Vc7[0]); + Vc7[1] = vec_nmsub(VbS7, Va[1], Vc7[1]); + Vc7[2] = vec_nmsub(VbS7, Va[2], Vc7[2]); + Vc7[3] = vec_nmsub(VbS7, Va[3], Vc7[3]); + + VbS0 = vec_splat(Vb[2], 1); + VbS1 = vec_splat(Vb[2], 2); + VbS2 = vec_splat(Vb[2], 3); + VbS3 = vec_splat(Vb[3], 0); + VbS4 = vec_splat(Vb[3], 1); + VbS5 = vec_splat(Vb[3], 2); + VbS6 = vec_splat(Vb[3], 3); + + Vc1[0] = vec_mul(VbS0, Vc1[0]); + Vc1[1] = vec_mul(VbS0, Vc1[1]); + Vc1[2] = vec_mul(VbS0, Vc1[2]); + Vc1[3] = vec_mul(VbS0, Vc1[3]); + Va[4] = Vc1[0]; + Va[5] = Vc1[1]; + Va[6] = Vc1[2]; + Va[7] = Vc1[3]; + Vc2[0] = vec_nmsub(VbS1, Va[4], Vc2[0]); + Vc2[1] = vec_nmsub(VbS1, Va[5], Vc2[1]); + Vc2[2] = vec_nmsub(VbS1, Va[6], Vc2[2]); + Vc2[3] = vec_nmsub(VbS1, Va[7], Vc2[3]); + Vc3[0] = vec_nmsub(VbS2, Va[4], Vc3[0]); + Vc3[1] = vec_nmsub(VbS2, Va[5], Vc3[1]); + Vc3[2] = vec_nmsub(VbS2, Va[6], Vc3[2]); + Vc3[3] = vec_nmsub(VbS2, Va[7], Vc3[3]); + Vc4[0] = vec_nmsub(VbS3, Va[4], Vc4[0]); + Vc4[1] = vec_nmsub(VbS3, Va[5], Vc4[1]); + Vc4[2] = vec_nmsub(VbS3, Va[6], Vc4[2]); + Vc4[3] = vec_nmsub(VbS3, Va[7], Vc4[3]); + Vc5[0] = vec_nmsub(VbS4, Va[4], Vc5[0]); + Vc5[1] = vec_nmsub(VbS4, Va[5], Vc5[1]); + Vc5[2] = vec_nmsub(VbS4, Va[6], Vc5[2]); + Vc5[3] = vec_nmsub(VbS4, Va[7], Vc5[3]); + Vc6[0] = vec_nmsub(VbS5, Va[4], Vc6[0]); + Vc6[1] = vec_nmsub(VbS5, Va[5], Vc6[1]); + Vc6[2] = vec_nmsub(VbS5, Va[6], Vc6[2]); + Vc6[3] = vec_nmsub(VbS5, Va[7], Vc6[3]); + Vc7[0] = vec_nmsub(VbS6, Va[4], Vc7[0]); + Vc7[1] = vec_nmsub(VbS6, Va[5], Vc7[1]); + Vc7[2] = vec_nmsub(VbS6, Va[6], Vc7[2]); + Vc7[3] = vec_nmsub(VbS6, Va[7], Vc7[3]); + + VbS0 = vec_splat(Vb[4], 2); + VbS1 = vec_splat(Vb[4], 3); + VbS2 = vec_splat(Vb[5], 0); + VbS3 = vec_splat(Vb[5], 1); + VbS4 = vec_splat(Vb[5], 2); + VbS5 = vec_splat(Vb[5], 3); + + Vc2[0] = vec_mul(VbS0, Vc2[0]); + Vc2[1] = vec_mul(VbS0, Vc2[1]); + Vc2[2] = vec_mul(VbS0, Vc2[2]); + Vc2[3] = vec_mul(VbS0, Vc2[3]); + Va[ 8] = Vc2[0]; + Va[ 9] = Vc2[1]; + Va[10] = Vc2[2]; + Va[11] = Vc2[3]; + Vc3[0] = vec_nmsub(VbS1, Va[ 8], Vc3[0]); + Vc3[1] = vec_nmsub(VbS1, Va[ 9], Vc3[1]); + Vc3[2] = vec_nmsub(VbS1, Va[10], Vc3[2]); + Vc3[3] = vec_nmsub(VbS1, Va[11], Vc3[3]); + Vc4[0] = vec_nmsub(VbS2, Va[ 8], Vc4[0]); + Vc4[1] = vec_nmsub(VbS2, Va[ 9], Vc4[1]); + Vc4[2] = vec_nmsub(VbS2, Va[10], Vc4[2]); + Vc4[3] = vec_nmsub(VbS2, Va[11], Vc4[3]); + Vc5[0] = vec_nmsub(VbS3, Va[ 8], Vc5[0]); + Vc5[1] = vec_nmsub(VbS3, Va[ 9], Vc5[1]); + Vc5[2] = vec_nmsub(VbS3, Va[10], Vc5[2]); + Vc5[3] = vec_nmsub(VbS3, Va[11], Vc5[3]); + Vc6[0] = vec_nmsub(VbS4, Va[ 8], Vc6[0]); + Vc6[1] = vec_nmsub(VbS4, Va[ 9], Vc6[1]); + Vc6[2] = vec_nmsub(VbS4, Va[10], Vc6[2]); + Vc6[3] = vec_nmsub(VbS4, Va[11], Vc6[3]); + Vc7[0] = vec_nmsub(VbS5, Va[ 8], Vc7[0]); + Vc7[1] = vec_nmsub(VbS5, Va[ 9], Vc7[1]); + Vc7[2] = vec_nmsub(VbS5, Va[10], Vc7[2]); + Vc7[3] = vec_nmsub(VbS5, Va[11], Vc7[3]); + + VbS0 = vec_splat(Vb[6], 3); + VbS1 = vec_splat(Vb[7], 0); + VbS2 = vec_splat(Vb[7], 1); + VbS3 = vec_splat(Vb[7], 2); + VbS4 = vec_splat(Vb[7], 3); + + Vc3[0] = vec_mul(VbS0, Vc3[0]); + Vc3[1] = vec_mul(VbS0, Vc3[1]); + Vc3[2] = vec_mul(VbS0, Vc3[2]); + Vc3[3] = vec_mul(VbS0, Vc3[3]); + Va[12] = Vc3[0]; + Va[13] = Vc3[1]; + Va[14] = Vc3[2]; + Va[15] = Vc3[3]; + Vc4[0] = vec_nmsub(VbS1, Va[12], Vc4[0]); + Vc4[1] = vec_nmsub(VbS1, Va[13], Vc4[1]); + Vc4[2] = vec_nmsub(VbS1, Va[14], Vc4[2]); + Vc4[3] = vec_nmsub(VbS1, Va[15], Vc4[3]); + Vc5[0] = vec_nmsub(VbS2, Va[12], Vc5[0]); + Vc5[1] = vec_nmsub(VbS2, Va[13], Vc5[1]); + Vc5[2] = vec_nmsub(VbS2, Va[14], Vc5[2]); + Vc5[3] = vec_nmsub(VbS2, Va[15], Vc5[3]); + Vc6[0] = vec_nmsub(VbS3, Va[12], Vc6[0]); + Vc6[1] = vec_nmsub(VbS3, Va[13], Vc6[1]); + Vc6[2] = vec_nmsub(VbS3, Va[14], Vc6[2]); + Vc6[3] = vec_nmsub(VbS3, Va[15], Vc6[3]); + Vc7[0] = vec_nmsub(VbS4, Va[12], Vc7[0]); + Vc7[1] = vec_nmsub(VbS4, Va[13], Vc7[1]); + Vc7[2] = vec_nmsub(VbS4, Va[14], Vc7[2]); + Vc7[3] = vec_nmsub(VbS4, Va[15], Vc7[3]); + + VbS0 = vec_splat(Vb[9], 0); + VbS1 = vec_splat(Vb[9], 1); + VbS2 = vec_splat(Vb[9], 2); + VbS3 = vec_splat(Vb[9], 3); + + Vc4[0] = vec_mul(VbS0, Vc4[0]); + Vc4[1] = vec_mul(VbS0, Vc4[1]); + Vc4[2] = vec_mul(VbS0, Vc4[2]); + Vc4[3] = vec_mul(VbS0, Vc4[3]); + Va[16] = Vc4[0]; + Va[17] = Vc4[1]; + Va[18] = Vc4[2]; + Va[19] = Vc4[3]; + Vc5[0] = vec_nmsub(VbS1, Va[16], Vc5[0]); + Vc5[1] = vec_nmsub(VbS1, Va[17], Vc5[1]); + Vc5[2] = vec_nmsub(VbS1, Va[18], Vc5[2]); + Vc5[3] = vec_nmsub(VbS1, Va[19], Vc5[3]); + Vc6[0] = vec_nmsub(VbS2, Va[16], Vc6[0]); + Vc6[1] = vec_nmsub(VbS2, Va[17], Vc6[1]); + Vc6[2] = vec_nmsub(VbS2, Va[18], Vc6[2]); + Vc6[3] = vec_nmsub(VbS2, Va[19], Vc6[3]); + Vc7[0] = vec_nmsub(VbS3, Va[16], Vc7[0]); + Vc7[1] = vec_nmsub(VbS3, Va[17], Vc7[1]); + Vc7[2] = vec_nmsub(VbS3, Va[18], Vc7[2]); + Vc7[3] = vec_nmsub(VbS3, Va[19], Vc7[3]); + + VbS0 = vec_splat(Vb[11], 1); + VbS1 = vec_splat(Vb[11], 2); + VbS2 = vec_splat(Vb[11], 3); + + Vc5[0] = vec_mul(VbS0, Vc5[0]); + Vc5[1] = vec_mul(VbS0, Vc5[1]); + Vc5[2] = vec_mul(VbS0, Vc5[2]); + Vc5[3] = vec_mul(VbS0, Vc5[3]); + Va[20] = Vc5[0]; + Va[21] = Vc5[1]; + Va[22] = Vc5[2]; + Va[23] = Vc5[3]; + Vc6[0] = vec_nmsub(VbS1, Va[20], Vc6[0]); + Vc6[1] = vec_nmsub(VbS1, Va[21], Vc6[1]); + Vc6[2] = vec_nmsub(VbS1, Va[22], Vc6[2]); + Vc6[3] = vec_nmsub(VbS1, Va[23], Vc6[3]); + Vc7[0] = vec_nmsub(VbS2, Va[20], Vc7[0]); + Vc7[1] = vec_nmsub(VbS2, Va[21], Vc7[1]); + Vc7[2] = vec_nmsub(VbS2, Va[22], Vc7[2]); + Vc7[3] = vec_nmsub(VbS2, Va[23], Vc7[3]); + + VbS0 = vec_splat(Vb[13], 2); + VbS1 = vec_splat(Vb[13], 3); + + Vc6[0] = vec_mul(VbS0, Vc6[0]); + Vc6[1] = vec_mul(VbS0, Vc6[1]); + Vc6[2] = vec_mul(VbS0, Vc6[2]); + Vc6[3] = vec_mul(VbS0, Vc6[3]); + Va[24] = Vc6[0]; + Va[25] = Vc6[1]; + Va[26] = Vc6[2]; + Va[27] = Vc6[3]; + Vc7[0] = vec_nmsub(VbS1, Va[24], Vc7[0]); + Vc7[1] = vec_nmsub(VbS1, Va[25], Vc7[1]); + Vc7[2] = vec_nmsub(VbS1, Va[26], Vc7[2]); + Vc7[3] = vec_nmsub(VbS1, Va[27], Vc7[3]); + + VbS0 = vec_splat(Vb[15], 3); + + Vc7[0] = vec_mul(VbS0, Vc7[0]); + Vc7[1] = vec_mul(VbS0, Vc7[1]); + Vc7[2] = vec_mul(VbS0, Vc7[2]); + Vc7[3] = vec_mul(VbS0, Vc7[3]); + Va[28] = Vc7[0]; + Va[29] = Vc7[1]; + Va[30] = Vc7[2]; + Va[31] = Vc7[3]; +} + +#endif + +static inline __attribute__ ((always_inline)) void solve(BLASLONG m, BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) { + + FLOAT aa, bb; + + int i, j, k; + + for (i = 0; i < n; i++) { + + bb = *(b + i); + + for (j = 0; j < m; j ++) { + aa = *(c + j + i * ldc); + aa *= bb; + *a = aa; + *(c + j + i * ldc) = aa; + a ++; + + for (k = i + 1; k < n; k ++){ + *(c + j + k * ldc) -= aa * *(b + k); + } + + } + b += n; + } +} + +#else + +static inline __attribute__ ((always_inline)) void solve(BLASLONG m, BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) { + + FLOAT aa1, aa2; + FLOAT bb1, bb2; + FLOAT cc1, cc2; + + int i, j, k; + + ldc *= 2; + + for (i = 0; i < n; i++) { + + bb1 = *(b + i * 2 + 0); + bb2 = *(b + i * 2 + 1); + + for (j = 0; j < m; j ++) { + aa1 = *(c + j * 2 + 0 + i * ldc); + aa2 = *(c + j * 2 + 1 + i * ldc); + +#ifndef CONJ + cc1 = aa1 * bb1 - aa2 * bb2; + cc2 = aa1 * bb2 + aa2 * bb1; +#else + cc1 = aa1 * bb1 + aa2 * bb2; + cc2 = -aa1 * bb2 + aa2 * bb1; +#endif + + *(a + 0) = cc1; + *(a + 1) = cc2; + *(c + j * 2 + 0 + i * ldc) = cc1; + *(c + j * 2 + 1 + i * ldc) = cc2; + a += 2; + + for (k = i + 1; k < n; k ++){ +#ifndef CONJ + *(c + j * 2 + 0 + k * ldc) -= cc1 * *(b + k * 2 + 0) - cc2 * *(b + k * 2 + 1); + *(c + j * 2 + 1 + k * ldc) -= cc1 * *(b + k * 2 + 1) + cc2 * *(b + k * 2 + 0); +#else + *(c + j * 2 + 0 + k * ldc) -= cc1 * *(b + k * 2 + 0) + cc2 * *(b + k * 2 + 1); + *(c + j * 2 + 1 + k * ldc) -= - cc1 * *(b + k * 2 + 1) + cc2 * *(b + k * 2 + 0); +#endif + } + + } + b += n * 2; + } +} + +#endif + + +int CNAME(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT dummy1, +#ifdef COMPLEX + FLOAT dummy2, +#endif + FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, BLASLONG offset){ + + FLOAT *aa, *cc; + BLASLONG kk; + BLASLONG i, j, jj; + +#if 0 + fprintf(stderr, "TRSM RN KERNEL m = %3ld n = %3ld k = %3ld offset = %3ld\n", + m, n, k, offset); +#endif + + jj = 0; + j = (n >> GEMM_UNROLL_N_SHIFT); + kk = -offset; + +#ifdef DOUBLE + int well_aligned = (GEMM_UNROLL_M==8) && (GEMM_UNROLL_N==8) && ((((unsigned long) a) & 0x7) == 0); +#else + int well_aligned = (GEMM_UNROLL_M==16) && (GEMM_UNROLL_N==8) && ((((unsigned long) a) & 0x7) == 0); +#endif + + while (j > 0) { + + aa = a; + cc = c; + + i = (m >> GEMM_UNROLL_M_SHIFT); + + if (i > 0) { + do { + if (kk > 0) { + GEMM_KERNEL(GEMM_UNROLL_M, GEMM_UNROLL_N, kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa, b, cc, ldc); + } + + if (well_aligned) { +#ifdef DOUBLE + solve8x8(aa + kk * GEMM_UNROLL_M * COMPSIZE, + b + kk * GEMM_UNROLL_N * COMPSIZE, cc, ldc); +#else + solve16x8(aa + kk * GEMM_UNROLL_M * COMPSIZE, + b + kk * GEMM_UNROLL_N * COMPSIZE, cc, ldc); +#endif + } + else { + solve(GEMM_UNROLL_M, GEMM_UNROLL_N, + aa + kk * GEMM_UNROLL_M * COMPSIZE, + b + kk * GEMM_UNROLL_N * COMPSIZE, + cc, ldc); + } + + aa += GEMM_UNROLL_M * k * COMPSIZE; + cc += GEMM_UNROLL_M * COMPSIZE; + i --; + } while (i > 0); + } + + + if (m & (GEMM_UNROLL_M - 1)) { + i = (GEMM_UNROLL_M >> 1); + while (i > 0) { + if (m & i) { + if (kk > 0) { + GEMM_KERNEL(i, GEMM_UNROLL_N, kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa, b, cc, ldc); + } + solve(i, GEMM_UNROLL_N, + aa + kk * i * COMPSIZE, + b + kk * GEMM_UNROLL_N * COMPSIZE, + cc, ldc); + + aa += i * k * COMPSIZE; + cc += i * COMPSIZE; + } + i >>= 1; + } + } + + kk += GEMM_UNROLL_N; + b += GEMM_UNROLL_N * k * COMPSIZE; + c += GEMM_UNROLL_N * ldc * COMPSIZE; + j --; + jj += GEMM_UNROLL_M; + } + + if (n & (GEMM_UNROLL_N - 1)) { + + j = (GEMM_UNROLL_N >> 1); + while (j > 0) { + if (n & j) { + + aa = a; + cc = c; + + i = (m >> GEMM_UNROLL_M_SHIFT); + + while (i > 0) { + if (kk > 0) { + GEMM_KERNEL(GEMM_UNROLL_M, j, kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa, + b, + cc, + ldc); + } + + solve(GEMM_UNROLL_M, j, + aa + kk * GEMM_UNROLL_M * COMPSIZE, + b + kk * j * COMPSIZE, cc, ldc); + + aa += GEMM_UNROLL_M * k * COMPSIZE; + cc += GEMM_UNROLL_M * COMPSIZE; + i --; + } + + if (m & (GEMM_UNROLL_M - 1)) { + i = (GEMM_UNROLL_M >> 1); + while (i > 0) { + if (m & i) { + if (kk > 0) { + GEMM_KERNEL(i, j, kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa, + b, + cc, + ldc); + } + + solve(i, j, + aa + kk * i * COMPSIZE, + b + kk * j * COMPSIZE, cc, ldc); + + aa += i * k * COMPSIZE; + cc += i * COMPSIZE; + } + i >>= 1; + } + } + + b += j * k * COMPSIZE; + c += j * ldc * COMPSIZE; + kk += j; + } + j >>= 1; + } + } + + return 0; +} diff --git a/kernel/power/trsm_kernel_RT.S b/kernel/power/trsm_kernel_RT.S index 07b88402c..d39d3a6e2 100644 --- a/kernel/power/trsm_kernel_RT.S +++ b/kernel/power/trsm_kernel_RT.S @@ -59,7 +59,7 @@ #define N r4 #define K r5 -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define A r6 #define B r7 @@ -180,7 +180,7 @@ slwi LDC, LDC, BASE_SHIFT -#if defined(linux) && defined(__64BIT__) +#if (defined(linux) || defined(__FreeBSD__)) && defined(__64BIT__) ld OFFSET, FRAMESLOT(0) + STACKSIZE(SP) #endif @@ -254,7 +254,7 @@ #else -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ mr PREA, r10 lwz PREB, FRAMESLOT(0) + STACKSIZE(SP) diff --git a/kernel/power/trsm_kernel_RT_power10.c b/kernel/power/trsm_kernel_RT_power10.c new file mode 100644 index 000000000..529590f37 --- /dev/null +++ b/kernel/power/trsm_kernel_RT_power10.c @@ -0,0 +1,855 @@ +/*********************************************************************/ +/* Copyright 2009, 2010 The University of Texas at Austin. */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* 1. Redistributions of source code must retain the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer. */ +/* */ +/* 2. Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials */ +/* provided with the distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ +/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ +/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ +/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ +/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ +/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ +/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* */ +/* The views and conclusions contained in the software and */ +/* documentation are those of the authors and should not be */ +/* interpreted as representing official policies, either expressed */ +/* or implied, of The University of Texas at Austin. */ +/*********************************************************************/ + +#include "common.h" +#include + +static FLOAT dm1 = -1.; + +#ifdef CONJ +#define GEMM_KERNEL GEMM_KERNEL_R +#else +#define GEMM_KERNEL GEMM_KERNEL_N +#endif + +#if GEMM_DEFAULT_UNROLL_M == 1 +#define GEMM_UNROLL_M_SHIFT 0 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 2 +#define GEMM_UNROLL_M_SHIFT 1 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 4 +#define GEMM_UNROLL_M_SHIFT 2 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 6 +#define GEMM_UNROLL_M_SHIFT 2 +#endif + + +#if GEMM_DEFAULT_UNROLL_M == 8 +#define GEMM_UNROLL_M_SHIFT 3 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 16 +#define GEMM_UNROLL_M_SHIFT 4 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 1 +#define GEMM_UNROLL_N_SHIFT 0 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 2 +#define GEMM_UNROLL_N_SHIFT 1 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 4 +#define GEMM_UNROLL_N_SHIFT 2 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 8 +#define GEMM_UNROLL_N_SHIFT 3 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 16 +#define GEMM_UNROLL_N_SHIFT 4 +#endif + +#ifndef COMPLEX + +#ifdef DOUBLE + +static inline __attribute__ ((always_inline)) void solve8x8(FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) { + FLOAT *c0, *c1, *c2, *c3, *c4, *c5, *c6, *c7; + c0 = &c[0*ldc]; + c1 = &c[1*ldc]; + c2 = &c[2*ldc]; + c3 = &c[3*ldc]; + c4 = &c[4*ldc]; + c5 = &c[5*ldc]; + c6 = &c[6*ldc]; + c7 = &c[7*ldc]; + vector FLOAT *Vb = (vector FLOAT *) b; + vector FLOAT *Vc0 = (vector FLOAT *) c0; + vector FLOAT *Vc1 = (vector FLOAT *) c1; + vector FLOAT *Vc2 = (vector FLOAT *) c2; + vector FLOAT *Vc3 = (vector FLOAT *) c3; + vector FLOAT *Vc4 = (vector FLOAT *) c4; + vector FLOAT *Vc5 = (vector FLOAT *) c5; + vector FLOAT *Vc6 = (vector FLOAT *) c6; + vector FLOAT *Vc7 = (vector FLOAT *) c7; + vector FLOAT VbS0, VbS1, VbS2, VbS3, VbS4, VbS5, VbS6; + + a[56] = (c7[0] *= b[63]); + a[57] = (c7[1] *= b[63]); + a[58] = (c7[2] *= b[63]); + a[59] = (c7[3] *= b[63]); + a[60] = (c7[4] *= b[63]); + a[61] = (c7[5] *= b[63]); + a[62] = (c7[6] *= b[63]); + a[63] = (c7[7] *= b[63]); + VbS0 = vec_splat(Vb[28], 0); + VbS1 = vec_splat(Vb[28], 1); + VbS2 = vec_splat(Vb[29], 0); + VbS3 = vec_splat(Vb[29], 1); + VbS4 = vec_splat(Vb[30], 0); + VbS5 = vec_splat(Vb[30], 1); + VbS6 = vec_splat(Vb[31], 0); + Vc0[0] = vec_nmsub(Vc7[0], VbS0, Vc0[0]); + Vc0[1] = vec_nmsub(Vc7[1], VbS0, Vc0[1]); + Vc0[2] = vec_nmsub(Vc7[2], VbS0, Vc0[2]); + Vc0[3] = vec_nmsub(Vc7[3], VbS0, Vc0[3]); + Vc1[0] = vec_nmsub(Vc7[0], VbS1, Vc1[0]); + Vc1[1] = vec_nmsub(Vc7[1], VbS1, Vc1[1]); + Vc1[2] = vec_nmsub(Vc7[2], VbS1, Vc1[2]); + Vc1[3] = vec_nmsub(Vc7[3], VbS1, Vc1[3]); + Vc2[0] = vec_nmsub(Vc7[0], VbS2, Vc2[0]); + Vc2[1] = vec_nmsub(Vc7[1], VbS2, Vc2[1]); + Vc2[2] = vec_nmsub(Vc7[2], VbS2, Vc2[2]); + Vc2[3] = vec_nmsub(Vc7[3], VbS2, Vc2[3]); + Vc3[0] = vec_nmsub(Vc7[0], VbS3, Vc3[0]); + Vc3[1] = vec_nmsub(Vc7[1], VbS3, Vc3[1]); + Vc3[2] = vec_nmsub(Vc7[2], VbS3, Vc3[2]); + Vc3[3] = vec_nmsub(Vc7[3], VbS3, Vc3[3]); + Vc4[0] = vec_nmsub(Vc7[0], VbS4, Vc4[0]); + Vc4[1] = vec_nmsub(Vc7[1], VbS4, Vc4[1]); + Vc4[2] = vec_nmsub(Vc7[2], VbS4, Vc4[2]); + Vc4[3] = vec_nmsub(Vc7[3], VbS4, Vc4[3]); + Vc5[0] = vec_nmsub(Vc7[0], VbS5, Vc5[0]); + Vc5[1] = vec_nmsub(Vc7[1], VbS5, Vc5[1]); + Vc5[2] = vec_nmsub(Vc7[2], VbS5, Vc5[2]); + Vc5[3] = vec_nmsub(Vc7[3], VbS5, Vc5[3]); + Vc6[0] = vec_nmsub(Vc7[0], VbS6, Vc6[0]); + Vc6[1] = vec_nmsub(Vc7[1], VbS6, Vc6[1]); + Vc6[2] = vec_nmsub(Vc7[2], VbS6, Vc6[2]); + Vc6[3] = vec_nmsub(Vc7[3], VbS6, Vc6[3]); + + a[48] = (c6[0] *= b[54]); + a[49] = (c6[1] *= b[54]); + a[50] = (c6[2] *= b[54]); + a[51] = (c6[3] *= b[54]); + a[52] = (c6[4] *= b[54]); + a[53] = (c6[5] *= b[54]); + a[54] = (c6[6] *= b[54]); + a[55] = (c6[7] *= b[54]); + VbS0 = vec_splat(Vb[24], 0); + VbS1 = vec_splat(Vb[24], 1); + VbS2 = vec_splat(Vb[25], 0); + VbS3 = vec_splat(Vb[25], 1); + VbS4 = vec_splat(Vb[26], 0); + VbS5 = vec_splat(Vb[26], 1); + Vc0[0] = vec_nmsub(Vc6[0], VbS0, Vc0[0]); + Vc0[1] = vec_nmsub(Vc6[1], VbS0, Vc0[1]); + Vc0[2] = vec_nmsub(Vc6[2], VbS0, Vc0[2]); + Vc0[3] = vec_nmsub(Vc6[3], VbS0, Vc0[3]); + Vc1[0] = vec_nmsub(Vc6[0], VbS1, Vc1[0]); + Vc1[1] = vec_nmsub(Vc6[1], VbS1, Vc1[1]); + Vc1[2] = vec_nmsub(Vc6[2], VbS1, Vc1[2]); + Vc1[3] = vec_nmsub(Vc6[3], VbS1, Vc1[3]); + Vc2[0] = vec_nmsub(Vc6[0], VbS2, Vc2[0]); + Vc2[1] = vec_nmsub(Vc6[1], VbS2, Vc2[1]); + Vc2[2] = vec_nmsub(Vc6[2], VbS2, Vc2[2]); + Vc2[3] = vec_nmsub(Vc6[3], VbS2, Vc2[3]); + Vc3[0] = vec_nmsub(Vc6[0], VbS3, Vc3[0]); + Vc3[1] = vec_nmsub(Vc6[1], VbS3, Vc3[1]); + Vc3[2] = vec_nmsub(Vc6[2], VbS3, Vc3[2]); + Vc3[3] = vec_nmsub(Vc6[3], VbS3, Vc3[3]); + Vc4[0] = vec_nmsub(Vc6[0], VbS4, Vc4[0]); + Vc4[1] = vec_nmsub(Vc6[1], VbS4, Vc4[1]); + Vc4[2] = vec_nmsub(Vc6[2], VbS4, Vc4[2]); + Vc4[3] = vec_nmsub(Vc6[3], VbS4, Vc4[3]); + Vc5[0] = vec_nmsub(Vc6[0], VbS5, Vc5[0]); + Vc5[1] = vec_nmsub(Vc6[1], VbS5, Vc5[1]); + Vc5[2] = vec_nmsub(Vc6[2], VbS5, Vc5[2]); + Vc5[3] = vec_nmsub(Vc6[3], VbS5, Vc5[3]); + + a[40] = (c5[0] *= b[45]); + a[41] = (c5[1] *= b[45]); + a[42] = (c5[2] *= b[45]); + a[43] = (c5[3] *= b[45]); + a[44] = (c5[4] *= b[45]); + a[45] = (c5[5] *= b[45]); + a[46] = (c5[6] *= b[45]); + a[47] = (c5[7] *= b[45]); + VbS0 = vec_splat(Vb[20], 0); + VbS1 = vec_splat(Vb[20], 1); + VbS2 = vec_splat(Vb[21], 0); + VbS3 = vec_splat(Vb[21], 1); + VbS4 = vec_splat(Vb[22], 0); + Vc0[0] = vec_nmsub(Vc5[0], VbS0, Vc0[0]); + Vc0[1] = vec_nmsub(Vc5[1], VbS0, Vc0[1]); + Vc0[2] = vec_nmsub(Vc5[2], VbS0, Vc0[2]); + Vc0[3] = vec_nmsub(Vc5[3], VbS0, Vc0[3]); + Vc1[0] = vec_nmsub(Vc5[0], VbS1, Vc1[0]); + Vc1[1] = vec_nmsub(Vc5[1], VbS1, Vc1[1]); + Vc1[2] = vec_nmsub(Vc5[2], VbS1, Vc1[2]); + Vc1[3] = vec_nmsub(Vc5[3], VbS1, Vc1[3]); + Vc2[0] = vec_nmsub(Vc5[0], VbS2, Vc2[0]); + Vc2[1] = vec_nmsub(Vc5[1], VbS2, Vc2[1]); + Vc2[2] = vec_nmsub(Vc5[2], VbS2, Vc2[2]); + Vc2[3] = vec_nmsub(Vc5[3], VbS2, Vc2[3]); + Vc3[0] = vec_nmsub(Vc5[0], VbS3, Vc3[0]); + Vc3[1] = vec_nmsub(Vc5[1], VbS3, Vc3[1]); + Vc3[2] = vec_nmsub(Vc5[2], VbS3, Vc3[2]); + Vc3[3] = vec_nmsub(Vc5[3], VbS3, Vc3[3]); + Vc4[0] = vec_nmsub(Vc5[0], VbS4, Vc4[0]); + Vc4[1] = vec_nmsub(Vc5[1], VbS4, Vc4[1]); + Vc4[2] = vec_nmsub(Vc5[2], VbS4, Vc4[2]); + Vc4[3] = vec_nmsub(Vc5[3], VbS4, Vc4[3]); + + a[32] = (c4[0] *= b[36]); + a[33] = (c4[1] *= b[36]); + a[34] = (c4[2] *= b[36]); + a[35] = (c4[3] *= b[36]); + a[36] = (c4[4] *= b[36]); + a[37] = (c4[5] *= b[36]); + a[38] = (c4[6] *= b[36]); + a[39] = (c4[7] *= b[36]); + VbS0 = vec_splat(Vb[16], 0); + VbS1 = vec_splat(Vb[16], 1); + VbS2 = vec_splat(Vb[17], 0); + VbS3 = vec_splat(Vb[17], 1); + Vc0[0] = vec_nmsub(Vc4[0], VbS0, Vc0[0]); + Vc0[1] = vec_nmsub(Vc4[1], VbS0, Vc0[1]); + Vc0[2] = vec_nmsub(Vc4[2], VbS0, Vc0[2]); + Vc0[3] = vec_nmsub(Vc4[3], VbS0, Vc0[3]); + Vc1[0] = vec_nmsub(Vc4[0], VbS1, Vc1[0]); + Vc1[1] = vec_nmsub(Vc4[1], VbS1, Vc1[1]); + Vc1[2] = vec_nmsub(Vc4[2], VbS1, Vc1[2]); + Vc1[3] = vec_nmsub(Vc4[3], VbS1, Vc1[3]); + Vc2[0] = vec_nmsub(Vc4[0], VbS2, Vc2[0]); + Vc2[1] = vec_nmsub(Vc4[1], VbS2, Vc2[1]); + Vc2[2] = vec_nmsub(Vc4[2], VbS2, Vc2[2]); + Vc2[3] = vec_nmsub(Vc4[3], VbS2, Vc2[3]); + Vc3[0] = vec_nmsub(Vc4[0], VbS3, Vc3[0]); + Vc3[1] = vec_nmsub(Vc4[1], VbS3, Vc3[1]); + Vc3[2] = vec_nmsub(Vc4[2], VbS3, Vc3[2]); + Vc3[3] = vec_nmsub(Vc4[3], VbS3, Vc3[3]); + + a[24] = (c3[0] *= b[27]); + a[25] = (c3[1] *= b[27]); + a[26] = (c3[2] *= b[27]); + a[27] = (c3[3] *= b[27]); + a[28] = (c3[4] *= b[27]); + a[29] = (c3[5] *= b[27]); + a[30] = (c3[6] *= b[27]); + a[31] = (c3[7] *= b[27]); + VbS0 = vec_splat(Vb[12], 0); + VbS1 = vec_splat(Vb[12], 1); + VbS2 = vec_splat(Vb[13], 0); + Vc0[0] = vec_nmsub(Vc3[0], VbS0, Vc0[0]); + Vc0[1] = vec_nmsub(Vc3[1], VbS0, Vc0[1]); + Vc0[2] = vec_nmsub(Vc3[2], VbS0, Vc0[2]); + Vc0[3] = vec_nmsub(Vc3[3], VbS0, Vc0[3]); + Vc1[0] = vec_nmsub(Vc3[0], VbS1, Vc1[0]); + Vc1[1] = vec_nmsub(Vc3[1], VbS1, Vc1[1]); + Vc1[2] = vec_nmsub(Vc3[2], VbS1, Vc1[2]); + Vc1[3] = vec_nmsub(Vc3[3], VbS1, Vc1[3]); + Vc2[0] = vec_nmsub(Vc3[0], VbS2, Vc2[0]); + Vc2[1] = vec_nmsub(Vc3[1], VbS2, Vc2[1]); + Vc2[2] = vec_nmsub(Vc3[2], VbS2, Vc2[2]); + Vc2[3] = vec_nmsub(Vc3[3], VbS2, Vc2[3]); + + a[16] = (c2[0] *= b[18]); + a[17] = (c2[1] *= b[18]); + a[18] = (c2[2] *= b[18]); + a[19] = (c2[3] *= b[18]); + a[20] = (c2[4] *= b[18]); + a[21] = (c2[5] *= b[18]); + a[22] = (c2[6] *= b[18]); + a[23] = (c2[7] *= b[18]); + VbS0 = vec_splat(Vb[8], 0); + VbS1 = vec_splat(Vb[8], 1); + Vc0[0] = vec_nmsub(Vc2[0], VbS0, Vc0[0]); + Vc0[1] = vec_nmsub(Vc2[1], VbS0, Vc0[1]); + Vc0[2] = vec_nmsub(Vc2[2], VbS0, Vc0[2]); + Vc0[3] = vec_nmsub(Vc2[3], VbS0, Vc0[3]); + Vc1[0] = vec_nmsub(Vc2[0], VbS1, Vc1[0]); + Vc1[1] = vec_nmsub(Vc2[1], VbS1, Vc1[1]); + Vc1[2] = vec_nmsub(Vc2[2], VbS1, Vc1[2]); + Vc1[3] = vec_nmsub(Vc2[3], VbS1, Vc1[3]); + + a[ 8] = (c1[0] *= b[9]); + a[ 9] = (c1[1] *= b[9]); + a[10] = (c1[2] *= b[9]); + a[11] = (c1[3] *= b[9]); + a[12] = (c1[4] *= b[9]); + a[13] = (c1[5] *= b[9]); + a[14] = (c1[6] *= b[9]); + a[15] = (c1[7] *= b[9]); + VbS0 = vec_splat(Vb[4], 0); + Vc0[0] = vec_nmsub(Vc1[0], VbS0, Vc0[0]); + Vc0[1] = vec_nmsub(Vc1[1], VbS0, Vc0[1]); + Vc0[2] = vec_nmsub(Vc1[2], VbS0, Vc0[2]); + Vc0[3] = vec_nmsub(Vc1[3], VbS0, Vc0[3]); + + a[0] = (c0[0] *= b[0]); + a[1] = (c0[1] *= b[0]); + a[2] = (c0[2] *= b[0]); + a[3] = (c0[3] *= b[0]); + a[4] = (c0[4] *= b[0]); + a[5] = (c0[5] *= b[0]); + a[6] = (c0[6] *= b[0]); + a[7] = (c0[7] *= b[0]); +} + +#else + +static inline __attribute__ ((always_inline)) void solve16x8(FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) { + FLOAT *c0, *c1, *c2, *c3, *c4, *c5, *c6, *c7; + c0 = &c[0*ldc]; + c1 = &c[1*ldc]; + c2 = &c[2*ldc]; + c3 = &c[3*ldc]; + c4 = &c[4*ldc]; + c5 = &c[5*ldc]; + c6 = &c[6*ldc]; + c7 = &c[7*ldc]; + + vector FLOAT *Va = (vector FLOAT *) a; + vector FLOAT *Vb = (vector FLOAT *) b; + vector FLOAT *Vc0 = (vector FLOAT *) c0; + vector FLOAT *Vc1 = (vector FLOAT *) c1; + vector FLOAT *Vc2 = (vector FLOAT *) c2; + vector FLOAT *Vc3 = (vector FLOAT *) c3; + vector FLOAT *Vc4 = (vector FLOAT *) c4; + vector FLOAT *Vc5 = (vector FLOAT *) c5; + vector FLOAT *Vc6 = (vector FLOAT *) c6; + vector FLOAT *Vc7 = (vector FLOAT *) c7; + vector FLOAT VbS0, VbS1, VbS2, VbS3, VbS4, VbS5, VbS6, VbS7; + + VbS0 = vec_splat(Vb[14], 0); + VbS1 = vec_splat(Vb[14], 1); + VbS2 = vec_splat(Vb[14], 2); + VbS3 = vec_splat(Vb[14], 3); + VbS4 = vec_splat(Vb[15], 0); + VbS5 = vec_splat(Vb[15], 1); + VbS6 = vec_splat(Vb[15], 2); + VbS7 = vec_splat(Vb[15], 3); + + Vc7[0] = vec_mul(VbS7, Vc7[0]); + Vc7[1] = vec_mul(VbS7, Vc7[1]); + Vc7[2] = vec_mul(VbS7, Vc7[2]); + Vc7[3] = vec_mul(VbS7, Vc7[3]); + Va[28] = Vc7[0]; + Va[29] = Vc7[1]; + Va[30] = Vc7[2]; + Va[31] = Vc7[3]; + Vc0[0] = vec_nmsub(VbS0, Va[28], Vc0[0]); + Vc0[1] = vec_nmsub(VbS0, Va[29], Vc0[1]); + Vc0[2] = vec_nmsub(VbS0, Va[30], Vc0[2]); + Vc0[3] = vec_nmsub(VbS0, Va[31], Vc0[3]); + Vc1[0] = vec_nmsub(VbS1, Va[28], Vc1[0]); + Vc1[1] = vec_nmsub(VbS1, Va[29], Vc1[1]); + Vc1[2] = vec_nmsub(VbS1, Va[30], Vc1[2]); + Vc1[3] = vec_nmsub(VbS1, Va[31], Vc1[3]); + Vc2[0] = vec_nmsub(VbS2, Va[28], Vc2[0]); + Vc2[1] = vec_nmsub(VbS2, Va[29], Vc2[1]); + Vc2[2] = vec_nmsub(VbS2, Va[30], Vc2[2]); + Vc2[3] = vec_nmsub(VbS2, Va[31], Vc2[3]); + Vc3[0] = vec_nmsub(VbS3, Va[28], Vc3[0]); + Vc3[1] = vec_nmsub(VbS3, Va[29], Vc3[1]); + Vc3[2] = vec_nmsub(VbS3, Va[30], Vc3[2]); + Vc3[3] = vec_nmsub(VbS3, Va[31], Vc3[3]); + Vc4[0] = vec_nmsub(VbS4, Va[28], Vc4[0]); + Vc4[1] = vec_nmsub(VbS4, Va[29], Vc4[1]); + Vc4[2] = vec_nmsub(VbS4, Va[30], Vc4[2]); + Vc4[3] = vec_nmsub(VbS4, Va[31], Vc4[3]); + Vc5[0] = vec_nmsub(VbS5, Va[28], Vc5[0]); + Vc5[1] = vec_nmsub(VbS5, Va[29], Vc5[1]); + Vc5[2] = vec_nmsub(VbS5, Va[30], Vc5[2]); + Vc5[3] = vec_nmsub(VbS5, Va[31], Vc5[3]); + Vc6[0] = vec_nmsub(VbS6, Va[28], Vc6[0]); + Vc6[1] = vec_nmsub(VbS6, Va[29], Vc6[1]); + Vc6[2] = vec_nmsub(VbS6, Va[30], Vc6[2]); + Vc6[3] = vec_nmsub(VbS6, Va[31], Vc6[3]); + + VbS0 = vec_splat(Vb[12], 0); + VbS1 = vec_splat(Vb[12], 1); + VbS2 = vec_splat(Vb[12], 2); + VbS3 = vec_splat(Vb[12], 3); + VbS4 = vec_splat(Vb[13], 0); + VbS5 = vec_splat(Vb[13], 1); + VbS6 = vec_splat(Vb[13], 2); + + Vc6[0] = vec_mul(VbS6, Vc6[0]); + Vc6[1] = vec_mul(VbS6, Vc6[1]); + Vc6[2] = vec_mul(VbS6, Vc6[2]); + Vc6[3] = vec_mul(VbS6, Vc6[3]); + Va[24] = Vc6[0]; + Va[25] = Vc6[1]; + Va[26] = Vc6[2]; + Va[27] = Vc6[3]; + Vc0[0] = vec_nmsub(VbS0, Va[24], Vc0[0]); + Vc0[1] = vec_nmsub(VbS0, Va[25], Vc0[1]); + Vc0[2] = vec_nmsub(VbS0, Va[26], Vc0[2]); + Vc0[3] = vec_nmsub(VbS0, Va[27], Vc0[3]); + Vc1[0] = vec_nmsub(VbS1, Va[24], Vc1[0]); + Vc1[1] = vec_nmsub(VbS1, Va[25], Vc1[1]); + Vc1[2] = vec_nmsub(VbS1, Va[26], Vc1[2]); + Vc1[3] = vec_nmsub(VbS1, Va[27], Vc1[3]); + Vc2[0] = vec_nmsub(VbS2, Va[24], Vc2[0]); + Vc2[1] = vec_nmsub(VbS2, Va[25], Vc2[1]); + Vc2[2] = vec_nmsub(VbS2, Va[26], Vc2[2]); + Vc2[3] = vec_nmsub(VbS2, Va[27], Vc2[3]); + Vc3[0] = vec_nmsub(VbS3, Va[24], Vc3[0]); + Vc3[1] = vec_nmsub(VbS3, Va[25], Vc3[1]); + Vc3[2] = vec_nmsub(VbS3, Va[26], Vc3[2]); + Vc3[3] = vec_nmsub(VbS3, Va[27], Vc3[3]); + Vc4[0] = vec_nmsub(VbS4, Va[24], Vc4[0]); + Vc4[1] = vec_nmsub(VbS4, Va[25], Vc4[1]); + Vc4[2] = vec_nmsub(VbS4, Va[26], Vc4[2]); + Vc4[3] = vec_nmsub(VbS4, Va[27], Vc4[3]); + Vc5[0] = vec_nmsub(VbS5, Va[24], Vc5[0]); + Vc5[1] = vec_nmsub(VbS5, Va[25], Vc5[1]); + Vc5[2] = vec_nmsub(VbS5, Va[26], Vc5[2]); + Vc5[3] = vec_nmsub(VbS5, Va[27], Vc5[3]); + + VbS0 = vec_splat(Vb[10], 0); + VbS1 = vec_splat(Vb[10], 1); + VbS2 = vec_splat(Vb[10], 2); + VbS3 = vec_splat(Vb[10], 3); + VbS4 = vec_splat(Vb[11], 0); + VbS5 = vec_splat(Vb[11], 1); + + Vc5[0] = vec_mul(VbS5, Vc5[0]); + Vc5[1] = vec_mul(VbS5, Vc5[1]); + Vc5[2] = vec_mul(VbS5, Vc5[2]); + Vc5[3] = vec_mul(VbS5, Vc5[3]); + Va[20] = Vc5[0]; + Va[21] = Vc5[1]; + Va[22] = Vc5[2]; + Va[23] = Vc5[3]; + Vc0[0] = vec_nmsub(VbS0, Va[20], Vc0[0]); + Vc0[1] = vec_nmsub(VbS0, Va[21], Vc0[1]); + Vc0[2] = vec_nmsub(VbS0, Va[22], Vc0[2]); + Vc0[3] = vec_nmsub(VbS0, Va[23], Vc0[3]); + Vc1[0] = vec_nmsub(VbS1, Va[20], Vc1[0]); + Vc1[1] = vec_nmsub(VbS1, Va[21], Vc1[1]); + Vc1[2] = vec_nmsub(VbS1, Va[22], Vc1[2]); + Vc1[3] = vec_nmsub(VbS1, Va[23], Vc1[3]); + Vc2[0] = vec_nmsub(VbS2, Va[20], Vc2[0]); + Vc2[1] = vec_nmsub(VbS2, Va[21], Vc2[1]); + Vc2[2] = vec_nmsub(VbS2, Va[22], Vc2[2]); + Vc2[3] = vec_nmsub(VbS2, Va[23], Vc2[3]); + Vc3[0] = vec_nmsub(VbS3, Va[20], Vc3[0]); + Vc3[1] = vec_nmsub(VbS3, Va[21], Vc3[1]); + Vc3[2] = vec_nmsub(VbS3, Va[22], Vc3[2]); + Vc3[3] = vec_nmsub(VbS3, Va[23], Vc3[3]); + Vc4[0] = vec_nmsub(VbS4, Va[20], Vc4[0]); + Vc4[1] = vec_nmsub(VbS4, Va[21], Vc4[1]); + Vc4[2] = vec_nmsub(VbS4, Va[22], Vc4[2]); + Vc4[3] = vec_nmsub(VbS4, Va[23], Vc4[3]); + + VbS0 = vec_splat(Vb[8], 0); + VbS1 = vec_splat(Vb[8], 1); + VbS2 = vec_splat(Vb[8], 2); + VbS3 = vec_splat(Vb[8], 3); + VbS4 = vec_splat(Vb[9], 0); + + Vc4[0] = vec_mul(VbS4, Vc4[0]); + Vc4[1] = vec_mul(VbS4, Vc4[1]); + Vc4[2] = vec_mul(VbS4, Vc4[2]); + Vc4[3] = vec_mul(VbS4, Vc4[3]); + Va[16] = Vc4[0]; + Va[17] = Vc4[1]; + Va[18] = Vc4[2]; + Va[19] = Vc4[3]; + Vc0[0] = vec_nmsub(VbS0, Va[16], Vc0[0]); + Vc0[1] = vec_nmsub(VbS0, Va[17], Vc0[1]); + Vc0[2] = vec_nmsub(VbS0, Va[18], Vc0[2]); + Vc0[3] = vec_nmsub(VbS0, Va[19], Vc0[3]); + Vc1[0] = vec_nmsub(VbS1, Va[16], Vc1[0]); + Vc1[1] = vec_nmsub(VbS1, Va[17], Vc1[1]); + Vc1[2] = vec_nmsub(VbS1, Va[18], Vc1[2]); + Vc1[3] = vec_nmsub(VbS1, Va[19], Vc1[3]); + Vc2[0] = vec_nmsub(VbS2, Va[16], Vc2[0]); + Vc2[1] = vec_nmsub(VbS2, Va[17], Vc2[1]); + Vc2[2] = vec_nmsub(VbS2, Va[18], Vc2[2]); + Vc2[3] = vec_nmsub(VbS2, Va[19], Vc2[3]); + Vc3[0] = vec_nmsub(VbS3, Va[16], Vc3[0]); + Vc3[1] = vec_nmsub(VbS3, Va[17], Vc3[1]); + Vc3[2] = vec_nmsub(VbS3, Va[18], Vc3[2]); + Vc3[3] = vec_nmsub(VbS3, Va[19], Vc3[3]); + + VbS0 = vec_splat(Vb[6], 0); + VbS1 = vec_splat(Vb[6], 1); + VbS2 = vec_splat(Vb[6], 2); + VbS3 = vec_splat(Vb[6], 3); + + Vc3[0] = vec_mul(VbS3, Vc3[0]); + Vc3[1] = vec_mul(VbS3, Vc3[1]); + Vc3[2] = vec_mul(VbS3, Vc3[2]); + Vc3[3] = vec_mul(VbS3, Vc3[3]); + Va[12] = Vc3[0]; + Va[13] = Vc3[1]; + Va[14] = Vc3[2]; + Va[15] = Vc3[3]; + Vc0[0] = vec_nmsub(VbS0, Va[12], Vc0[0]); + Vc0[1] = vec_nmsub(VbS0, Va[13], Vc0[1]); + Vc0[2] = vec_nmsub(VbS0, Va[14], Vc0[2]); + Vc0[3] = vec_nmsub(VbS0, Va[15], Vc0[3]); + Vc1[0] = vec_nmsub(VbS1, Va[12], Vc1[0]); + Vc1[1] = vec_nmsub(VbS1, Va[13], Vc1[1]); + Vc1[2] = vec_nmsub(VbS1, Va[14], Vc1[2]); + Vc1[3] = vec_nmsub(VbS1, Va[15], Vc1[3]); + Vc2[0] = vec_nmsub(VbS2, Va[12], Vc2[0]); + Vc2[1] = vec_nmsub(VbS2, Va[13], Vc2[1]); + Vc2[2] = vec_nmsub(VbS2, Va[14], Vc2[2]); + Vc2[3] = vec_nmsub(VbS2, Va[15], Vc2[3]); + + VbS0 = vec_splat(Vb[4], 0); + VbS1 = vec_splat(Vb[4], 1); + VbS2 = vec_splat(Vb[4], 2); + + Vc2[0] = vec_mul(VbS2, Vc2[0]); + Vc2[1] = vec_mul(VbS2, Vc2[1]); + Vc2[2] = vec_mul(VbS2, Vc2[2]); + Vc2[3] = vec_mul(VbS2, Vc2[3]); + Va[ 8] = Vc2[0]; + Va[ 9] = Vc2[1]; + Va[10] = Vc2[2]; + Va[11] = Vc2[3]; + Vc0[0] = vec_nmsub(VbS0, Va[ 8], Vc0[0]); + Vc0[1] = vec_nmsub(VbS0, Va[ 9], Vc0[1]); + Vc0[2] = vec_nmsub(VbS0, Va[10], Vc0[2]); + Vc0[3] = vec_nmsub(VbS0, Va[11], Vc0[3]); + Vc1[0] = vec_nmsub(VbS1, Va[ 8], Vc1[0]); + Vc1[1] = vec_nmsub(VbS1, Va[ 9], Vc1[1]); + Vc1[2] = vec_nmsub(VbS1, Va[10], Vc1[2]); + Vc1[3] = vec_nmsub(VbS1, Va[11], Vc1[3]); + + VbS0 = vec_splat(Vb[2], 0); + VbS1 = vec_splat(Vb[2], 1); + + Vc1[0] = vec_mul(VbS1, Vc1[0]); + Vc1[1] = vec_mul(VbS1, Vc1[1]); + Vc1[2] = vec_mul(VbS1, Vc1[2]); + Vc1[3] = vec_mul(VbS1, Vc1[3]); + Va[4] = Vc1[0]; + Va[5] = Vc1[1]; + Va[6] = Vc1[2]; + Va[7] = Vc1[3]; + Vc0[0] = vec_nmsub(VbS0, Va[4], Vc0[0]); + Vc0[1] = vec_nmsub(VbS0, Va[5], Vc0[1]); + Vc0[2] = vec_nmsub(VbS0, Va[6], Vc0[2]); + Vc0[3] = vec_nmsub(VbS0, Va[7], Vc0[3]); + + VbS0 = vec_splat(Vb[0], 0); + + Vc0[0] = vec_mul(VbS0, Vc0[0]); + Vc0[1] = vec_mul(VbS0, Vc0[1]); + Vc0[2] = vec_mul(VbS0, Vc0[2]); + Vc0[3] = vec_mul(VbS0, Vc0[3]); + Va[0] = Vc0[0]; + Va[1] = Vc0[1]; + Va[2] = Vc0[2]; + Va[3] = Vc0[3]; +} + +#endif + +static inline __attribute__ ((always_inline)) void solve(BLASLONG m, BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) { + + FLOAT aa, bb; + + int i, j, k; + + a += (n - 1) * m; + b += (n - 1) * n; + + for (i = n - 1; i >= 0; i--) { + + bb = *(b + i); + + for (j = 0; j < m; j ++) { + aa = *(c + j + i * ldc); + aa *= bb; + *a = aa; + *(c + j + i * ldc) = aa; + a ++; + + for (k = 0; k < i; k ++){ + *(c + j + k * ldc) -= aa * *(b + k); + } + + } + b -= n; + a -= 2 * m; + } + +} + +#else + +static inline __attribute__ ((always_inline)) void solve(BLASLONG m, BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) { + + FLOAT aa1, aa2; + FLOAT bb1, bb2; + FLOAT cc1, cc2; + + int i, j, k; + + ldc *= 2; + + a += (n - 1) * m * 2; + b += (n - 1) * n * 2; + + for (i = n - 1; i >= 0; i--) { + + bb1 = *(b + i * 2 + 0); + bb2 = *(b + i * 2 + 1); + + for (j = 0; j < m; j ++) { + + aa1 = *(c + j * 2 + 0 + i * ldc); + aa2 = *(c + j * 2 + 1 + i * ldc); + +#ifndef CONJ + cc1 = aa1 * bb1 - aa2 * bb2; + cc2 = aa1 * bb2 + aa2 * bb1; +#else + cc1 = aa1 * bb1 + aa2 * bb2; + cc2 = - aa1 * bb2 + aa2 * bb1; +#endif + + *(a + 0) = cc1; + *(a + 1) = cc2; + + *(c + j * 2 + 0 + i * ldc) = cc1; + *(c + j * 2 + 1 + i * ldc) = cc2; + a += 2; + + for (k = 0; k < i; k ++){ +#ifndef CONJ + *(c + j * 2 + 0 + k * ldc) -= cc1 * *(b + k * 2 + 0) - cc2 * *(b + k * 2 + 1); + *(c + j * 2 + 1 + k * ldc) -= cc1 * *(b + k * 2 + 1) + cc2 * *(b + k * 2 + 0); +#else + *(c + j * 2 + 0 + k * ldc) -= cc1 * *(b + k * 2 + 0) + cc2 * *(b + k * 2 + 1); + *(c + j * 2 + 1 + k * ldc) -= -cc1 * *(b + k * 2 + 1) + cc2 * *(b + k * 2 + 0); +#endif + } + + } + b -= n * 2; + a -= 4 * m; + } + +} + +#endif + +int CNAME(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT dummy1, +#ifdef COMPLEX + FLOAT dummy2, +#endif + FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, BLASLONG offset){ + + BLASLONG i, j; + FLOAT *aa, *cc; + BLASLONG kk; + +#if 0 + fprintf(stderr, "TRSM RT KERNEL m = %3ld n = %3ld k = %3ld offset = %3ld\n", + m, n, k, offset); +#endif + +#ifdef DOUBLE + int well_aligned = (GEMM_UNROLL_M==8) && (GEMM_UNROLL_N==8) && ((((unsigned long) a) & 0x7) == 0); +#else + int well_aligned = (GEMM_UNROLL_M==16) && (GEMM_UNROLL_N==8) && ((((unsigned long) a) & 0x7) == 0); +#endif + + kk = n - offset; + c += n * ldc * COMPSIZE; + b += n * k * COMPSIZE; + + if (n & (GEMM_UNROLL_N - 1)) { + + j = 1; + while (j < GEMM_UNROLL_N) { + if (n & j) { + + aa = a; + b -= j * k * COMPSIZE; + c -= j * ldc* COMPSIZE; + cc = c; + + i = (m >> GEMM_UNROLL_M_SHIFT); + if (i > 0) { + + do { + if (k - kk > 0) { + GEMM_KERNEL(GEMM_UNROLL_M, j, k - kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa + GEMM_UNROLL_M * kk * COMPSIZE, + b + j * kk * COMPSIZE, + cc, + ldc); + } + + solve(GEMM_UNROLL_M, j, + aa + (kk - j) * GEMM_UNROLL_M * COMPSIZE, + b + (kk - j) * j * COMPSIZE, + cc, ldc); + + aa += GEMM_UNROLL_M * k * COMPSIZE; + cc += GEMM_UNROLL_M * COMPSIZE; + i --; + } while (i > 0); + } + + if (m & (GEMM_UNROLL_M - 1)) { + i = (GEMM_UNROLL_M >> 1); + do { + if (m & i) { + + if (k - kk > 0) { + GEMM_KERNEL(i, j, k - kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa + i * kk * COMPSIZE, + b + j * kk * COMPSIZE, + cc, ldc); + } + + solve(i, j, + aa + (kk - j) * i * COMPSIZE, + b + (kk - j) * j * COMPSIZE, + cc, ldc); + + aa += i * k * COMPSIZE; + cc += i * COMPSIZE; + + } + i >>= 1; + } while (i > 0); + } + kk -= j; + } + j <<= 1; + } + } + + j = (n >> GEMM_UNROLL_N_SHIFT); + + if (j > 0) { + + do { + aa = a; + b -= GEMM_UNROLL_N * k * COMPSIZE; + c -= GEMM_UNROLL_N * ldc * COMPSIZE; + cc = c; + + i = (m >> GEMM_UNROLL_M_SHIFT); + if (i > 0) { + do { + if (k - kk > 0) { + GEMM_KERNEL(GEMM_UNROLL_M, GEMM_UNROLL_N, k - kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa + GEMM_UNROLL_M * kk * COMPSIZE, + b + GEMM_UNROLL_N * kk * COMPSIZE, + cc, + ldc); + } + + if (well_aligned) { +#ifdef DOUBLE + solve8x8(aa + (kk - GEMM_UNROLL_N) * GEMM_UNROLL_M * COMPSIZE, + b + (kk - GEMM_UNROLL_N) * GEMM_UNROLL_N * COMPSIZE, cc, ldc); +#else + solve16x8(aa + (kk - GEMM_UNROLL_N) * GEMM_UNROLL_M * COMPSIZE, + b + (kk - GEMM_UNROLL_N) * GEMM_UNROLL_N * COMPSIZE, cc, ldc); +#endif + } + else { + solve(GEMM_UNROLL_M, GEMM_UNROLL_N, + aa + (kk - GEMM_UNROLL_N) * GEMM_UNROLL_M * COMPSIZE, + b + (kk - GEMM_UNROLL_N) * GEMM_UNROLL_N * COMPSIZE, + cc, ldc); + } + + aa += GEMM_UNROLL_M * k * COMPSIZE; + cc += GEMM_UNROLL_M * COMPSIZE; + i --; + } while (i > 0); + } + + if (m & (GEMM_UNROLL_M - 1)) { + i = (GEMM_UNROLL_M >> 1); + do { + if (m & i) { + if (k - kk > 0) { + GEMM_KERNEL(i, GEMM_UNROLL_N, k - kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa + i * kk * COMPSIZE, + b + GEMM_UNROLL_N * kk * COMPSIZE, + cc, + ldc); + } + + solve(i, GEMM_UNROLL_N, + aa + (kk - GEMM_UNROLL_N) * i * COMPSIZE, + b + (kk - GEMM_UNROLL_N) * GEMM_UNROLL_N * COMPSIZE, + cc, ldc); + + aa += i * k * COMPSIZE; + cc += i * COMPSIZE; + } + i >>= 1; + } while (i > 0); + } + + kk -= GEMM_UNROLL_N; + j --; + } while (j > 0); + } + + return 0; +} + + diff --git a/kernel/power/trsm_kernel_cell_LN.S b/kernel/power/trsm_kernel_cell_LN.S index 803530cbb..f656015a8 100644 --- a/kernel/power/trsm_kernel_cell_LN.S +++ b/kernel/power/trsm_kernel_cell_LN.S @@ -59,7 +59,7 @@ #define N r4 #define K r5 -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define A r6 #define B r7 @@ -180,7 +180,7 @@ slwi LDC, LDC, BASE_SHIFT -#if defined(linux) && defined(__64BIT__) +#if (defined(linux) || defined(__FreeBSD__)) && defined(__64BIT__) ld OFFSET, FRAMESLOT(0) + STACKSIZE(SP) #endif @@ -231,7 +231,7 @@ li PREC, -4 * SIZE #else -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ mr PREA, r10 lwz PREB, FRAMESLOT(0) + STACKSIZE(SP) diff --git a/kernel/power/trsm_kernel_cell_LT.S b/kernel/power/trsm_kernel_cell_LT.S index 105e7d43c..083af7289 100644 --- a/kernel/power/trsm_kernel_cell_LT.S +++ b/kernel/power/trsm_kernel_cell_LT.S @@ -59,7 +59,7 @@ #define N r4 #define K r5 -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define A r6 #define B r7 @@ -180,7 +180,7 @@ slwi LDC, LDC, BASE_SHIFT -#if defined(linux) && defined(__64BIT__) +#if (defined(linux) || defined(__FreeBSD__)) && defined(__64BIT__) ld OFFSET, FRAMESLOT(0) + STACKSIZE(SP) #endif @@ -257,7 +257,7 @@ #else -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ mr PREA, r10 lwz PREB, FRAMESLOT(0) + STACKSIZE(SP) diff --git a/kernel/power/trsm_kernel_cell_RT.S b/kernel/power/trsm_kernel_cell_RT.S index a54a261cb..5a5b67e77 100644 --- a/kernel/power/trsm_kernel_cell_RT.S +++ b/kernel/power/trsm_kernel_cell_RT.S @@ -59,7 +59,7 @@ #define N r4 #define K r5 -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define A r6 #define B r7 @@ -180,7 +180,7 @@ slwi LDC, LDC, BASE_SHIFT -#if defined(linux) && defined(__64BIT__) +#if (defined(linux) || defined(__FreeBSD__)) && defined(__64BIT__) ld OFFSET, FRAMESLOT(0) + STACKSIZE(SP) #endif @@ -231,7 +231,7 @@ li PREC, -4 * SIZE #else -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ mr PREA, r10 lwz PREB, FRAMESLOT(0) + STACKSIZE(SP) diff --git a/kernel/power/trsm_kernel_hummer_LN.S b/kernel/power/trsm_kernel_hummer_LN.S index 109dacb8c..35ffab427 100644 --- a/kernel/power/trsm_kernel_hummer_LN.S +++ b/kernel/power/trsm_kernel_hummer_LN.S @@ -46,7 +46,7 @@ #define N r4 #define K r5 -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #define A r6 #define B r7 #define C r8 diff --git a/kernel/power/trsm_kernel_hummer_LT.S b/kernel/power/trsm_kernel_hummer_LT.S index 1ad062a7c..f7a09dbd8 100644 --- a/kernel/power/trsm_kernel_hummer_LT.S +++ b/kernel/power/trsm_kernel_hummer_LT.S @@ -46,7 +46,7 @@ #define N r4 #define K r5 -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #define A r6 #define B r7 #define C r8 diff --git a/kernel/power/trsm_kernel_hummer_RT.S b/kernel/power/trsm_kernel_hummer_RT.S index 94b3c0c85..0e563e5cc 100644 --- a/kernel/power/trsm_kernel_hummer_RT.S +++ b/kernel/power/trsm_kernel_hummer_RT.S @@ -46,7 +46,7 @@ #define N r4 #define K r5 -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #define A r6 #define B r7 #define C r8 diff --git a/kernel/power/trsm_kernel_power6_LN.S b/kernel/power/trsm_kernel_power6_LN.S index 937a6761a..83594c772 100644 --- a/kernel/power/trsm_kernel_power6_LN.S +++ b/kernel/power/trsm_kernel_power6_LN.S @@ -59,7 +59,7 @@ #define N r4 #define K r5 -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define A r6 #define B r7 @@ -179,7 +179,7 @@ slwi LDC, LDC, BASE_SHIFT -#if defined(linux) && defined(__64BIT__) +#if (defined(linux) || defined(__FreeBSD__)) && defined(__64BIT__) ld OFFSET, FRAMESLOT(0) + STACKSIZE(SP) #endif diff --git a/kernel/power/trsm_kernel_power6_LT.S b/kernel/power/trsm_kernel_power6_LT.S index 924f00ec0..54a8547b0 100644 --- a/kernel/power/trsm_kernel_power6_LT.S +++ b/kernel/power/trsm_kernel_power6_LT.S @@ -59,7 +59,7 @@ #define N r4 #define K r5 -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define A r6 #define B r7 @@ -180,7 +180,7 @@ slwi LDC, LDC, BASE_SHIFT -#if defined(linux) && defined(__64BIT__) +#if (defined(linux) || defined(__FreeBSD__)) && defined(__64BIT__) ld OFFSET, FRAMESLOT(0) + STACKSIZE(SP) #endif diff --git a/kernel/power/trsm_kernel_power6_RT.S b/kernel/power/trsm_kernel_power6_RT.S index 40ee5e28d..b2b27613c 100644 --- a/kernel/power/trsm_kernel_power6_RT.S +++ b/kernel/power/trsm_kernel_power6_RT.S @@ -59,7 +59,7 @@ #define N r4 #define K r5 -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define A r6 #define B r7 @@ -179,7 +179,7 @@ slwi LDC, LDC, BASE_SHIFT -#if defined(linux) && defined(__64BIT__) +#if (defined(linux) || defined(__FreeBSD__)) && defined(__64BIT__) ld OFFSET, FRAMESLOT(0) + STACKSIZE(SP) #endif diff --git a/kernel/power/trsm_kernel_ppc440_LN.S b/kernel/power/trsm_kernel_ppc440_LN.S index 6b7312101..a708a084d 100644 --- a/kernel/power/trsm_kernel_ppc440_LN.S +++ b/kernel/power/trsm_kernel_ppc440_LN.S @@ -59,7 +59,7 @@ #define N r4 #define K r5 -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define A r6 #define B r7 @@ -191,7 +191,7 @@ slwi LDC, LDC, BASE_SHIFT -#if defined(linux) && defined(__64BIT__) +#if (defined(linux) || defined(__FreeBSD__)) && defined(__64BIT__) ld OFFSET, FRAMESLOT(0) + STACKSIZE(SP) #endif diff --git a/kernel/power/trsm_kernel_ppc440_LT.S b/kernel/power/trsm_kernel_ppc440_LT.S index 28b109b96..31f82de2c 100644 --- a/kernel/power/trsm_kernel_ppc440_LT.S +++ b/kernel/power/trsm_kernel_ppc440_LT.S @@ -59,7 +59,7 @@ #define N r4 #define K r5 -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define A r6 #define B r7 @@ -176,7 +176,7 @@ slwi LDC, LDC, BASE_SHIFT -#if defined(linux) && defined(__64BIT__) +#if (defined(linux) || defined(__FreeBSD__)) && defined(__64BIT__) ld OFFSET, FRAMESLOT(0) + STACKSIZE(SP) #endif diff --git a/kernel/power/trsm_kernel_ppc440_RT.S b/kernel/power/trsm_kernel_ppc440_RT.S index df80cd393..f5005403c 100644 --- a/kernel/power/trsm_kernel_ppc440_RT.S +++ b/kernel/power/trsm_kernel_ppc440_RT.S @@ -59,7 +59,7 @@ #define N r4 #define K r5 -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define A r6 #define B r7 @@ -191,7 +191,7 @@ slwi LDC, LDC, BASE_SHIFT -#if defined(linux) && defined(__64BIT__) +#if (defined(linux) || defined(__FreeBSD__)) && defined(__64BIT__) ld OFFSET, FRAMESLOT(0) + STACKSIZE(SP) #endif diff --git a/kernel/power/zasum.c b/kernel/power/zasum.c index f61c62e75..305e50ede 100644 --- a/kernel/power/zasum.c +++ b/kernel/power/zasum.c @@ -46,9 +46,11 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endif -#if defined(POWER8) || defined(POWER9) +#if defined(POWER8) || defined(POWER9) || defined(POWER10) +#if defined(__VEC__) || defined(__ALTIVEC__) #include "zasum_microk_power8.c" #endif +#endif #ifndef HAVE_KERNEL_8 diff --git a/kernel/power/zasum_microk_power8.c b/kernel/power/zasum_microk_power8.c index 82366902d..3f0af4232 100644 --- a/kernel/power/zasum_microk_power8.c +++ b/kernel/power/zasum_microk_power8.c @@ -68,10 +68,10 @@ static double zasum_kernel_8 (long n, double *x) "addi %2, %2, 128 \n\t" "addic. %1, %1, -8 \n\t" - "ble 2f \n\t" + "ble two%= \n\t" - ".p2align 5 \n" - "1: \n\t" + ".align 5 \n" + "one%=: \n\t" "xvabsdp 48, 40 \n\t" "xvabsdp 49, 41 \n\t" @@ -108,9 +108,9 @@ static double zasum_kernel_8 (long n, double *x) "xvadddp 38, 38, %x5 \n\t" "xvadddp 39, 39, %x6 \n\t" - "bgt 1b \n" + "bgt one%= \n" - "2: \n\t" + "two%=: \n\t" "xvabsdp 48, 40 \n\t" "xvabsdp 49, 41 \n\t" @@ -140,7 +140,7 @@ static double zasum_kernel_8 (long n, double *x) "xvadddp 32, 32, 36 \n\t" - "xxswapd 33, 32 \n\t" + XXSWAPD_S(33,32) "xsadddp %x0, 32, 33 \n" "#n=%1 x=%3=%2 sum=%0 o16=%8 o32=%9 o48=%10 o64=%11 o80=%12 o96=%13 o112=%14\n" diff --git a/kernel/power/zaxpy.S b/kernel/power/zaxpy.S index ac5b249bb..b001f42d1 100644 --- a/kernel/power/zaxpy.S +++ b/kernel/power/zaxpy.S @@ -39,7 +39,7 @@ #define ASSEMBLER #include "common.h" -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define N r3 #define X r6 @@ -123,7 +123,7 @@ stfd f24, 80(SP) stfd f25, 88(SP) -#if defined(linux) && defined(__64BIT__) +#if (defined(linux) || defined(__FreeBSD__)) && defined(__64BIT__) ld INCY, FRAMESLOT(0) + STACKSIZE(SP) #endif diff --git a/kernel/power/zaxpy.c b/kernel/power/zaxpy.c index f0f8c6910..3064d5435 100644 --- a/kernel/power/zaxpy.c +++ b/kernel/power/zaxpy.c @@ -36,9 +36,11 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" -#if defined(POWER8) || defined(POWER9) +#if defined(POWER8) || defined(POWER9) || defined(POWER10) +#if defined(__VEC__) || defined(__ALTIVEC__) #include "zaxpy_microk_power8.c" #endif +#endif #ifndef HAVE_KERNEL_4 diff --git a/kernel/power/zaxpy_microk_power10.c b/kernel/power/zaxpy_microk_power10.c new file mode 100644 index 000000000..8e593bbfa --- /dev/null +++ b/kernel/power/zaxpy_microk_power10.c @@ -0,0 +1,200 @@ +/*************************************************************************** +Copyright (c) 2020, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#define HAVE_KERNEL_4 1 +static void zaxpy_kernel_4 (long n, double *x, double *y, + double alpha_r, double alpha_i) +{ +#if !defined(CONJ) + static const double mvec[2] = { 1.0, -1.0 }; +#else + static const double mvec[2] = { -1.0, 1.0 }; +#endif + const double *mvecp = mvec; + + __vector double t0; + __vector double t1; + __vector double t2; + __vector double t3; + __vector double t4; + __vector double t5; + __vector double t6; + __vector double t7; + long ytmp; + + __asm__ + ( + XXSPLTD_S(32,%x15,0) // alpha_r + XXSPLTD_S(33,%x16,0) // alpha_i + "lxvd2x 36, 0, %17 \n\t" // mvec + +#if !defined(CONJ) + "xvmuldp 33, 33, 36 \n\t" // alpha_i * mvec +#else + "xvmuldp 32, 32, 36 \n\t" // alpha_r * mvec +#endif + + "mr %12, %3 \n\t" + "dcbt 0, %2 \n\t" + "dcbt 0, %3 \n\t" + + + "lxvp 40, 0(%2) \n\t" // x0 + "lxvp 42, 32(%2) \n\t" // x2 + "lxvp 48, 0(%3) \n\t" // y0 + "lxvp 50, 32(%3) \n\t" // y2 + + XXSWAPD_S(%x4,40) // exchange real and imag part + XXSWAPD_S(%x5,41) // exchange real and imag part + XXSWAPD_S(%x6,42) // exchange real and imag part + XXSWAPD_S(%x7,43) // exchange real and imag part + + "lxvp 44, 64(%2) \n\t" // x4 + "lxvp 46, 96(%2) \n\t" // x6 + "lxvp 34, 64(%3) \n\t" // y4 + "lxvp 38, 96(%3) \n\t" // y6 + + XXSWAPD_S(%x8,44) // exchange real and imag part + XXSWAPD_S(%x9,45) // exchange real and imag part + XXSWAPD_S(%x10,46) // exchange real and imag part + XXSWAPD_S(%x11,47) // exchange real and imag part + + "addi %2, %2, 128 \n\t" + "addi %3, %3, 128 \n\t" + + "addic. %1, %1, -8 \n\t" + "ble two%= \n\t" + + ".align 5 \n" + "one%=: \n\t" + + "xvmaddadp 48, 40, 32 \n\t" // alpha_r * x0_r , alpha_r * x0_i + "xvmaddadp 49, 41, 32 \n\t" + "lxvp 40, 0(%2) \n\t" // x0 + "xvmaddadp 50, 42, 32 \n\t" + "xvmaddadp 51, 43, 32 \n\t" + "lxvp 42, 32(%2) \n\t" // x2 + + "xvmaddadp 34, 44, 32 \n\t" + "xvmaddadp 35, 45, 32 \n\t" + "lxvp 44, 64(%2) \n\t" // x4 + "xvmaddadp 38, 46, 32 \n\t" + "xvmaddadp 39, 47, 32 \n\t" + "lxvp 46, 96(%2) \n\t" // x6 + + "xvmaddadp 48, %x4, 33 \n\t" // alpha_i * x0_i , alpha_i * x0_r + "addi %2, %2, 128 \n\t" + "xvmaddadp 49, %x5, 33 \n\t" + "xvmaddadp 50, %x6, 33 \n\t" + "xvmaddadp 51, %x7, 33 \n\t" + + "xvmaddadp 34, %x8, 33 \n\t" + "xvmaddadp 35, %x9, 33 \n\t" + "xvmaddadp 38, %x10, 33 \n\t" + "xvmaddadp 39, %x11, 33 \n\t" + + "stxvp 48, 0(%12) \n\t" + "stxvp 50, 32(%12) \n\t" + "stxvp 34, 64(%12) \n\t" + "stxvp 38, 96(%12) \n\t" + + "addi %12, %12, 128 \n\t" + + XXSWAPD_S(%x4,40) // exchange real and imag part + XXSWAPD_S(%x5,41) // exchange real and imag part + "lxvp 48, 0(%3) \n\t" // y0 + XXSWAPD_S(%x6,42) // exchange real and imag part + XXSWAPD_S(%x7,43) // exchange real and imag part + "lxvp 50, 32(%3) \n\t" // y2 + + XXSWAPD_S(%x8,44) // exchange real and imag part + XXSWAPD_S(%x9,45) // exchange real and imag part + "lxvp 34, 64(%3) \n\t" // y4 + XXSWAPD_S(%x10,46) // exchange real and imag part + XXSWAPD_S(%x11,47) // exchange real and imag part + "lxvp 38, 96(%3) \n\t" // y6 + + "addi %3, %3, 128 \n\t" + + "addic. %1, %1, -8 \n\t" + "bgt one%= \n" + + "two%=: \n\t" + "xvmaddadp 48, 40, 32 \n\t" // alpha_r * x0_r , alpha_r * x0_i + "xvmaddadp 49, 41, 32 \n\t" + "xvmaddadp 50, 42, 32 \n\t" + "xvmaddadp 51, 43, 32 \n\t" + + "xvmaddadp 34, 44, 32 \n\t" + "xvmaddadp 35, 45, 32 \n\t" + "xvmaddadp 38, 46, 32 \n\t" + "xvmaddadp 39, 47, 32 \n\t" + + "xvmaddadp 48, %x4, 33 \n\t" // alpha_i * x0_i , alpha_i * x0_r + "xvmaddadp 49, %x5, 33 \n\t" + "xvmaddadp 50, %x6, 33 \n\t" + "xvmaddadp 51, %x7, 33 \n\t" + + "xvmaddadp 34, %x8, 33 \n\t" + "xvmaddadp 35, %x9, 33 \n\t" + "xvmaddadp 38, %x10, 33 \n\t" + "xvmaddadp 39, %x11, 33 \n\t" + + "stxvp 48, 0(%12) \n\t" + "stxvp 50, 32(%12) \n\t" + "stxvp 34, 64(%12) \n\t" + "stxvp 38, 96(%12) \n\t" + + "#n=%1 x=%13=%2 y=%0=%3 alpha=(%15,%16) mvecp=%14=%17 ytmp=%12\n" + "#t0=%x4 t1=%x5 t2=%x6 t3=%x7 t4=%x8 t5=%x9 t6=%x10 t7=%x11" + : + "+m" (*y), + "+r" (n), // 1 + "+b" (x), // 2 + "+b" (y), // 3 + "=wa" (t0), // 4 + "=wa" (t1), // 5 + "=wa" (t2), // 6 + "=wa" (t3), // 7 + "=wa" (t4), // 8 + "=wa" (t5), // 9 + "=wa" (t6), // 10 + "=wa" (t7), // 11 + "=b" (ytmp) // 12 + : + "m" (*x), + "m" (*mvecp), + "d" (alpha_r), // 15 + "d" (alpha_i), // 16 + "12" (mvecp) // 17 + : + "cr0", + "vs32","vs33","vs34","vs35","vs36","vs37","vs38","vs39", + "vs40","vs41","vs42","vs43","vs44","vs45","vs46","vs47", + "vs48","vs49","vs50","vs51" + ); +} diff --git a/kernel/power/zaxpy_microk_power8.c b/kernel/power/zaxpy_microk_power8.c index 124614f62..959050e5f 100644 --- a/kernel/power/zaxpy_microk_power8.c +++ b/kernel/power/zaxpy_microk_power8.c @@ -61,8 +61,8 @@ static void zaxpy_kernel_4 (long n, double *x, double *y, __asm__ ( - "xxspltd 32, %x19, 0 \n\t" // alpha_r - "xxspltd 33, %x20, 0 \n\t" // alpha_i + XXSPLTD_S(32,%x19,0) // alpha_r + XXSPLTD_S(33,%x20,0) // alpha_i "lxvd2x 36, 0, %21 \n\t" // mvec @@ -87,10 +87,10 @@ static void zaxpy_kernel_4 (long n, double *x, double *y, "lxvd2x 50, %23, %3 \n\t" // y2 "lxvd2x 51, %24, %3 \n\t" // y3 - "xxswapd %x8, 40 \n\t" // exchange real and imag part - "xxswapd %x9, 41 \n\t" // exchange real and imag part - "xxswapd %x10, 42 \n\t" // exchange real and imag part - "xxswapd %x11, 43 \n\t" // exchange real and imag part + XXSWAPD_S(%x8,40) // exchange real and imag part + XXSWAPD_S(%x9,41) // exchange real and imag part + XXSWAPD_S(%x10,42) // exchange real and imag part + XXSWAPD_S(%x11,43) // exchange real and imag part "addi %2, %2, 64 \n\t" "addi %3, %3, 64 \n\t" @@ -105,19 +105,19 @@ static void zaxpy_kernel_4 (long n, double *x, double *y, "lxvd2x %x6, %23, %3 \n\t" // y6 "lxvd2x %x7, %24, %3 \n\t" // y7 - "xxswapd %x12, 44 \n\t" // exchange real and imag part - "xxswapd %x13, 45 \n\t" // exchange real and imag part - "xxswapd %x14, 46 \n\t" // exchange real and imag part - "xxswapd %x15, 47 \n\t" // exchange real and imag part + XXSWAPD_S(%x12,44) // exchange real and imag part + XXSWAPD_S(%x13,45) // exchange real and imag part + XXSWAPD_S(%x14,46) // exchange real and imag part + XXSWAPD_S(%x15,47) // exchange real and imag part "addi %2, %2, 64 \n\t" "addi %3, %3, 64 \n\t" "addic. %1, %1, -8 \n\t" - "ble 2f \n\t" + "ble two%= \n\t" - ".p2align 5 \n" - "1: \n\t" + ".align 5 \n" + "one%=: \n\t" "xvmaddadp 48, 40, 32 \n\t" // alpha_r * x0_r , alpha_r * x0_i "xvmaddadp 49, 41, 32 \n\t" @@ -163,31 +163,31 @@ static void zaxpy_kernel_4 (long n, double *x, double *y, "addi %16, %16, 64 \n\t" - "xxswapd %x8, 40 \n\t" // exchange real and imag part - "xxswapd %x9, 41 \n\t" // exchange real and imag part + XXSWAPD_S(%x8,40) // exchange real and imag part + XXSWAPD_S(%x9,41) // exchange real and imag part "lxvd2x 48, 0, %3 \n\t" // y0 "lxvd2x 49, %22, %3 \n\t" // y1 - "xxswapd %x10, 42 \n\t" // exchange real and imag part - "xxswapd %x11, 43 \n\t" // exchange real and imag part + XXSWAPD_S(%x10,42) // exchange real and imag part + XXSWAPD_S(%x11,43) // exchange real and imag part "lxvd2x 50, %23, %3 \n\t" // y2 "lxvd2x 51, %24, %3 \n\t" // y3 - "xxswapd %x12, 44 \n\t" // exchange real and imag part + XXSWAPD_S(%x12,44) // exchange real and imag part "addi %3, %3, 64 \n\t" - "xxswapd %x13, 45 \n\t" // exchange real and imag part + XXSWAPD_S(%x13,45) // exchange real and imag part "lxvd2x %x4, 0, %3 \n\t" // y4 "lxvd2x %x5, %22, %3 \n\t" // y5 - "xxswapd %x14, 46 \n\t" // exchange real and imag part - "xxswapd %x15, 47 \n\t" // exchange real and imag part + XXSWAPD_S(%x14,46) // exchange real and imag part + XXSWAPD_S(%x15,47) // exchange real and imag part "lxvd2x %x6, %23, %3 \n\t" // y6 "lxvd2x %x7, %24, %3 \n\t" // y7 "addi %3, %3, 64 \n\t" "addic. %1, %1, -8 \n\t" - "bgt 1b \n" + "bgt one%= \n" - "2: \n\t" + "two%=: \n\t" "xvmaddadp 48, 40, 32 \n\t" // alpha_r * x0_r , alpha_r * x0_i "xvmaddadp 49, 41, 32 \n\t" diff --git a/kernel/power/zaxpy_power10.c b/kernel/power/zaxpy_power10.c new file mode 100644 index 000000000..54cfb8fd7 --- /dev/null +++ b/kernel/power/zaxpy_power10.c @@ -0,0 +1,126 @@ +/*************************************************************************** +Copyright (c) 2020, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "common.h" + + +#if defined(__VEC__) || defined(__ALTIVEC__) +#include "zaxpy_microk_power10.c" +#endif + + +#ifndef HAVE_KERNEL_4 + +static void zaxpy_kernel_4(BLASLONG n, FLOAT *x, FLOAT *y, FLOAT da_r,FLOAT da_i) +{ + BLASLONG register i = 0; + BLASLONG register ix = 0; + + + + while(i < n) + { +#if !defined(CONJ) + y[ix] += ( da_r * x[ix] - da_i * x[ix+1] ) ; + y[ix+1] += ( da_r * x[ix+1] + da_i * x[ix] ) ; + y[ix+2] += ( da_r * x[ix+2] - da_i * x[ix+3] ) ; + y[ix+3] += ( da_r * x[ix+3] + da_i * x[ix+2] ) ; +#else + y[ix] += ( da_r * x[ix] + da_i * x[ix+1] ) ; + y[ix+1] -= ( da_r * x[ix+1] - da_i * x[ix] ) ; + y[ix+2] += ( da_r * x[ix+2] + da_i * x[ix+3] ) ; + y[ix+3] -= ( da_r * x[ix+3] - da_i * x[ix+2] ) ; +#endif + + ix+=4 ; + i+=2 ; + + } + +} + +#endif + +int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da_r, FLOAT da_i, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT *dummy, BLASLONG dummy2) +{ + BLASLONG i=0; + BLASLONG ix=0,iy=0; + + if ( n <= 0 ) return(0); + + if ( (inc_x == 1) && (inc_y == 1) ) + { + + BLASLONG n1 = n & -16; + + if ( n1 ) + { + zaxpy_kernel_4 (n1, x, y, da_r, da_i); + ix = 2 * n1; + } + i = n1; + while(i < n) + { +#if !defined(CONJ) + y[ix] += ( da_r * x[ix] - da_i * x[ix+1] ) ; + y[ix+1] += ( da_r * x[ix+1] + da_i * x[ix] ) ; +#else + y[ix] += ( da_r * x[ix] + da_i * x[ix+1] ) ; + y[ix+1] -= ( da_r * x[ix+1] - da_i * x[ix] ) ; +#endif + i++ ; + ix += 2; + + } + return(0); + + + } + + inc_x *=2; + inc_y *=2; + + while(i < n) + { + +#if !defined(CONJ) + y[iy] += ( da_r * x[ix] - da_i * x[ix+1] ) ; + y[iy+1] += ( da_r * x[ix+1] + da_i * x[ix] ) ; +#else + y[iy] += ( da_r * x[ix] + da_i * x[ix+1] ) ; + y[iy+1] -= ( da_r * x[ix+1] - da_i * x[ix] ) ; +#endif + ix += inc_x ; + iy += inc_y ; + i++ ; + + } + return(0); + +} + + diff --git a/kernel/power/zaxpy_ppc440.S b/kernel/power/zaxpy_ppc440.S index b5c604e91..848a0135f 100644 --- a/kernel/power/zaxpy_ppc440.S +++ b/kernel/power/zaxpy_ppc440.S @@ -39,7 +39,7 @@ #define ASSEMBLER #include "common.h" -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define N r3 #define X r6 @@ -112,7 +112,7 @@ stfd f24, 80(SP) stfd f25, 88(SP) -#if defined(linux) && defined(__64BIT__) +#if (defined(linux) || defined(__FreeBSD__)) && defined(__64BIT__) ld INCY, FRAMESLOT(0) + STACKSIZE(SP) #endif diff --git a/kernel/power/zcopy.c b/kernel/power/zcopy.c index b21d6ef15..453f4e551 100644 --- a/kernel/power/zcopy.c +++ b/kernel/power/zcopy.c @@ -35,9 +35,11 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" -#if defined(POWER8) || defined(POWER9) +#if defined(POWER8) || defined(POWER9) || defined(POWER10) +#if defined(__VEC__) || defined(__ALTIVEC__) #include "zcopy_microk_power8.c" #endif +#endif #ifndef HAVE_KERNEL_16 diff --git a/kernel/power/zcopy_microk_power8.c b/kernel/power/zcopy_microk_power8.c index 5ca34b633..e29547047 100644 --- a/kernel/power/zcopy_microk_power8.c +++ b/kernel/power/zcopy_microk_power8.c @@ -62,10 +62,10 @@ static void zcopy_kernel_16 (long n, FLOAT *x, FLOAT *y) "addi %2, %2, 128 \n\t" "addic. %1, %1, -16 \n\t" - "ble 2f \n\t" + "ble two%= \n\t" - ".p2align 5 \n" - "1: \n\t" + ".align 5 \n" + "one%=: \n\t" "stxvd2x 32, 0, %3 \n\t" "stxvd2x 33, %5, %3 \n\t" @@ -108,9 +108,9 @@ static void zcopy_kernel_16 (long n, FLOAT *x, FLOAT *y) "addi %2, %2, 128 \n\t" "addic. %1, %1, -16 \n\t" - "bgt 1b \n" + "bgt one%= \n" - "2: \n\t" + "two%=: \n\t" "stxvd2x 32, 0, %3 \n\t" "stxvd2x 33, %5, %3 \n\t" diff --git a/kernel/power/zcopy_power10.c b/kernel/power/zcopy_power10.c new file mode 100644 index 000000000..6b4e7a7d4 --- /dev/null +++ b/kernel/power/zcopy_power10.c @@ -0,0 +1,132 @@ +/*************************************************************************** +Copyright (c) 2020, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "common.h" + +#if defined(__VEC__) || defined(__ALTIVEC__) +#include "copy_microk_power10.c" +#endif + +#ifndef HAVE_KERNEL + +static void copy_kernel(BLASLONG n, FLOAT *x, FLOAT *y) +{ + + BLASLONG i=0; + FLOAT f0, f1, f2, f3, f4, f5, f6, f7; + FLOAT *x1=x; + FLOAT *y1=y; + + while ( i 0 ) + { + copy_kernel(n1, x, y); + i=n1; + ix=n1*2; + iy=n1*2; + } + + while(i < n) + { + y[iy] = x[iy] ; + y[iy+1] = x[ix+1] ; + ix+=2; + iy+=2; + i++ ; + + } + + + } + else + { + + BLASLONG inc_x2 = 2 * inc_x; + BLASLONG inc_y2 = 2 * inc_y; + + while(i < n) + { + y[iy] = x[ix] ; + y[iy+1] = x[ix+1] ; + ix += inc_x2 ; + iy += inc_y2 ; + i++ ; + + } + + } + return(0); + + +} + + diff --git a/kernel/power/zdot.c b/kernel/power/zdot.c index fd36c7f44..fe0e9284e 100644 --- a/kernel/power/zdot.c +++ b/kernel/power/zdot.c @@ -36,9 +36,11 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" -#if defined(POWER8) || defined(POWER9) +#if defined(POWER8) || defined(POWER9) || defined(POWER10) +#if defined(__VEC__) || defined(__ALTIVEC__) #include "zdot_microk_power8.c" #endif +#endif #ifndef HAVE_KERNEL_8 @@ -93,9 +95,11 @@ FLOAT _Complex CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG in FLOAT dot[4] = { 0.0, 0.0, 0.0 , 0.0 } ; if ( n <= 0 ) - { + { /* __real__ result = 0.0 ; __imag__ result = 0.0 ; + */ + result = OPENBLAS_MAKE_COMPLEX_FLOAT(0.0,0.0); return(result); } @@ -149,11 +153,17 @@ FLOAT _Complex CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG in } #if !defined(CONJ) + /* __real__ result = dot[0] - dot[1]; __imag__ result = dot[2] + dot[3]; + */ + result = OPENBLAS_MAKE_COMPLEX_FLOAT(dot[0]-dot[1],dot[2]+dot[3]); #else + /* __real__ result = dot[0] + dot[1]; __imag__ result = dot[2] - dot[3]; + */ + result = OPENBLAS_MAKE_COMPLEX_FLOAT(dot[0]+dot[1],dot[2]-dot[3]); #endif diff --git a/kernel/power/zdot_microk_power8.c b/kernel/power/zdot_microk_power8.c index 71078b66c..dcde82433 100644 --- a/kernel/power/zdot_microk_power8.c +++ b/kernel/power/zdot_microk_power8.c @@ -60,10 +60,10 @@ static void zdot_kernel_8 (long n, double *x, double *y, double *dot) "lxvd2x 43, %9, %2 \n\t" // x3_r, x3_i "lxvd2x 51, %9, %3 \n\t" // y3_r, y3_i - "xxswapd 0, 48 \n\t" // y0_i, y0_r - "xxswapd 1, 49 \n\t" // y1_i, y1_r - "xxswapd 2, 50 \n\t" // y2_i, y2_r - "xxswapd 3, 51 \n\t" // y3_i, y3_r + XXSWAPD_S(0,48) // y0_i, y0_r + XXSWAPD_S(1,49) // y1_i, y1_r + XXSWAPD_S(2,50) // y2_i, y2_r + XXSWAPD_S(3,51) // y3_i, y3_r "addi %2, %2, 64 \n\t" "addi %3, %3, 64 \n\t" @@ -77,19 +77,19 @@ static void zdot_kernel_8 (long n, double *x, double *y, double *dot) "lxvd2x 47, %9, %2 \n\t" // x3_r, x3_i "lxvd2x 7, %9, %3 \n\t" // y3_r, y3_i - "xxswapd 8, 4 \n\t" // y0_i, y0_r - "xxswapd 9, 5 \n\t" // y1_i, y1_r - "xxswapd 10, 6 \n\t" // y2_i, y2_r - "xxswapd 11, 7 \n\t" // y3_i, y3_r + XXSWAPD_S(8,4) // y0_i, y0_r + XXSWAPD_S(9,5) // y1_i, y1_r + XXSWAPD_S(10,6) // y2_i, y2_r + XXSWAPD_S(11,7) // y3_i, y3_r "addi %2, %2, 64 \n\t" "addi %3, %3, 64 \n\t" "addic. %1, %1, -8 \n\t" - "ble 2f \n\t" + "ble two%= \n\t" - ".p2align 5 \n" - "1: \n\t" + ".align 5 \n" + "one%=: \n\t" "xvmaddadp 32, 40, 48 \n\t" // x0_r * y0_r , x0_i * y0_i "lxvd2x 48, 0, %3 \n\t" // y0_r, y0_i @@ -111,14 +111,14 @@ static void zdot_kernel_8 (long n, double *x, double *y, double *dot) "xvmaddadp 39, 43, 3 \n\t" // x3_r * y3_i , x3_i * y3_r "lxvd2x 43, %9, %2 \n\t" // x3_r, x3_i - "xxswapd 0,48 \n\t" // y0_i, y0_r - "xxswapd 1,49 \n\t" // y1_i, y1_r + XXSWAPD_S(0,48) // y0_i, y0_r + XXSWAPD_S(1,49) // y1_i, y1_r "addi %2, %2, 64 \n\t" "addi %3, %3, 64 \n\t" - "xxswapd 2,50 \n\t" // y2_i, y2_r - "xxswapd 3,51 \n\t" // y3_i, y3_r + XXSWAPD_S(2,50) // y2_i, y2_r + XXSWAPD_S(3,51) // y3_i, y3_r "xvmaddadp 32, 44, 4 \n\t" // x0_r * y0_r , x0_i * y0_i "lxvd2x 4, 0, %3 \n\t" // y0_r, y0_i @@ -138,19 +138,19 @@ static void zdot_kernel_8 (long n, double *x, double *y, double *dot) "xvmaddadp 39, 47, 11 \n\t" // x3_r * y3_i , x3_i * y3_r "lxvd2x 47, %9, %2 \n\t" // x3_r, x3_i - "xxswapd 8,4 \n\t" // y0_i, y0_r - "xxswapd 9,5 \n\t" // y1_i, y1_r + XXSWAPD_S(8,4) // y0_i, y0_r + XXSWAPD_S(9,5) // y1_i, y1_r "addi %2, %2, 64 \n\t" "addi %3, %3, 64 \n\t" - "xxswapd 10,6 \n\t" // y2_i, y2_r - "xxswapd 11,7 \n\t" // y3_i, y3_r + XXSWAPD_S(10,6) // y2_i, y2_r + XXSWAPD_S(11,7) // y3_i, y3_r "addic. %1, %1, -8 \n\t" - "bgt 1b \n" + "bgt one%= \n" - "2: \n\t" + "two%=: \n\t" "xvmaddadp 32, 40, 48 \n\t" // x0_r * y0_r , x0_i * y0_i "xvmaddadp 34, 41, 49 \n\t" // x1_r * y1_r , x1_i * y1_i diff --git a/kernel/power/zgemm_beta.S b/kernel/power/zgemm_beta.S index 1f4c29210..57c3bed50 100644 --- a/kernel/power/zgemm_beta.S +++ b/kernel/power/zgemm_beta.S @@ -62,7 +62,7 @@ stfd f31, 8(SP) stw r0, 16(SP) -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ lwz LDC, FRAMESLOT(0) + STACKSIZE(SP) #else diff --git a/kernel/power/zgemm_kernel.S b/kernel/power/zgemm_kernel.S index 8ec8b674a..ae8a93e89 100644 --- a/kernel/power/zgemm_kernel.S +++ b/kernel/power/zgemm_kernel.S @@ -61,7 +61,7 @@ #define N r4 #define K r5 -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define A r6 #define B r7 @@ -169,7 +169,7 @@ stfd f2, ALPHA_I stw r0, FZERO -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifdef __64BIT__ ld LDC, FRAMESLOT(0) + STACKSIZE(SP) #endif @@ -190,7 +190,7 @@ #endif #ifdef TRMMKERNEL -#if defined(linux) && defined(__64BIT__) +#if (defined(linux) || defined(__FreeBSD__)) && defined(__64BIT__) ld OFFSET, FRAMESLOT(1) + STACKSIZE(SP) #endif @@ -231,7 +231,7 @@ #endif #else -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ lwz PREA, FRAMESLOT(2) + STACKSIZE(SP) lwz PREC, FRAMESLOT(3) + STACKSIZE(SP) diff --git a/kernel/power/zgemm_kernel_8x2_power8.S b/kernel/power/zgemm_kernel_8x2_power8.S index 5526b91c9..dfe2d9dc6 100644 --- a/kernel/power/zgemm_kernel_8x2_power8.S +++ b/kernel/power/zgemm_kernel_8x2_power8.S @@ -132,7 +132,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define N r4 #define K r5 -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define A r6 #define B r7 @@ -296,7 +296,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. stfd f2, ALPHA_I_SP stw r0, FZERO -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifdef __64BIT__ ld LDC, FRAMESLOT(0) + 0(FRAMEPOINTER) #endif @@ -317,7 +317,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endif #ifdef TRMMKERNEL -#if defined(linux) && defined(__64BIT__) +#if (defined(linux) || defined(__FreeBSD__)) && defined(__64BIT__) ld OFFSET, FRAMESLOT(1) + 0(FRAMEPOINTER) #endif diff --git a/kernel/power/zgemm_kernel_altivec.S b/kernel/power/zgemm_kernel_altivec.S index 2b650cd02..2525a8e58 100644 --- a/kernel/power/zgemm_kernel_altivec.S +++ b/kernel/power/zgemm_kernel_altivec.S @@ -62,7 +62,7 @@ #define N r4 #define K r5 -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define A r6 #define B r7 @@ -238,7 +238,7 @@ #endif -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifdef __64BIT__ ld LDC, FRAMESLOT(0) + STACKSIZE(SP) #endif @@ -264,7 +264,7 @@ #endif #else -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ lwz PREB, FRAMESLOT(2) + STACKSIZE(SP) lwz PREC, FRAMESLOT(3) + STACKSIZE(SP) diff --git a/kernel/power/zgemm_kernel_altivec_cell.S b/kernel/power/zgemm_kernel_altivec_cell.S index 642d1f2e7..47a79064d 100644 --- a/kernel/power/zgemm_kernel_altivec_cell.S +++ b/kernel/power/zgemm_kernel_altivec_cell.S @@ -62,7 +62,7 @@ #define N r4 #define K r5 -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define A r6 #define B r7 @@ -244,7 +244,7 @@ #endif -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifdef __64BIT__ ld LDC, FRAMESLOT(0) + STACKSIZE(SP) #endif @@ -270,7 +270,7 @@ #endif #else -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ lwz PREB, FRAMESLOT(2) + STACKSIZE(SP) lwz PREC, FRAMESLOT(3) + STACKSIZE(SP) diff --git a/kernel/power/zgemm_kernel_altivec_g4.S b/kernel/power/zgemm_kernel_altivec_g4.S index 0f7a6f9aa..c305270bd 100644 --- a/kernel/power/zgemm_kernel_altivec_g4.S +++ b/kernel/power/zgemm_kernel_altivec_g4.S @@ -62,7 +62,7 @@ #define N r4 #define K r5 -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define A r6 #define B r7 @@ -238,7 +238,7 @@ #endif -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifdef __64BIT__ ld LDC, FRAMESLOT(0) + STACKSIZE(SP) #endif diff --git a/kernel/power/zgemm_kernel_cell.S b/kernel/power/zgemm_kernel_cell.S index 8fd6b0afb..3d179378b 100644 --- a/kernel/power/zgemm_kernel_cell.S +++ b/kernel/power/zgemm_kernel_cell.S @@ -61,7 +61,7 @@ #define N r4 #define K r5 -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define A r6 #define B r7 @@ -175,7 +175,7 @@ stfd f2, ALPHA_I stw r0, FZERO -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifdef __64BIT__ ld LDC, FRAMESLOT(0) + STACKSIZE(SP) #endif @@ -196,7 +196,7 @@ #endif #ifdef TRMMKERNEL -#if defined(linux) && defined(__64BIT__) +#if (defined(linux) || defined(__FreeBSD__)) && defined(__64BIT__) ld OFFSET, FRAMESLOT(1) + STACKSIZE(SP) #endif @@ -230,7 +230,7 @@ li PREA, 16 * 12 * SIZE #else -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ lwz PREA, FRAMESLOT(2) + STACKSIZE(SP) lwz PREC, FRAMESLOT(3) + STACKSIZE(SP) diff --git a/kernel/power/zgemm_kernel_g4.S b/kernel/power/zgemm_kernel_g4.S index bf6bf77e8..b92fb4225 100644 --- a/kernel/power/zgemm_kernel_g4.S +++ b/kernel/power/zgemm_kernel_g4.S @@ -61,7 +61,7 @@ #define N r4 #define K r5 -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define A r6 #define B r7 @@ -185,7 +185,7 @@ stfd f2, ALPHA_I stw r0, FZERO -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifdef __64BIT__ ld LDC, FRAMESLOT(0) + STACKSIZE(SP) #endif @@ -206,7 +206,7 @@ #endif #ifdef TRMMKERNEL -#if defined(linux) && defined(__64BIT__) +#if (defined(linux) || defined(__FreeBSD__)) && defined(__64BIT__) ld OFFSET, FRAMESLOT(1) + STACKSIZE(SP) #endif diff --git a/kernel/power/zgemm_kernel_hummer.S b/kernel/power/zgemm_kernel_hummer.S index 991a64373..5546dd2f6 100644 --- a/kernel/power/zgemm_kernel_hummer.S +++ b/kernel/power/zgemm_kernel_hummer.S @@ -48,7 +48,7 @@ #define N r4 #define K r5 -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #define A r6 #define B r7 #define C r8 diff --git a/kernel/power/zgemm_kernel_power10.S b/kernel/power/zgemm_kernel_power10.S new file mode 100644 index 000000000..fca389e69 --- /dev/null +++ b/kernel/power/zgemm_kernel_power10.S @@ -0,0 +1,245 @@ +/*************************************************************************** +Copyright (c) 2013-2020, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ +#define ASSEMBLER +#include "common.h" +#include "def_vsx.h" + +#define LOAD ld + +#define STACKSIZE 512 + +#define FZERO 312+192(SP) + +#define FLINK_SAVE (STACKSIZE+16) /* 16($r12) */ + +#define M r3 +#define N r4 +#define K r5 + + +#define A r8 +#define B r9 +#define C r10 +#define LDC r6 +#define OFFSET r7 + + + +#define o0 0 +#define alpha_r vs62 +#define alpha_i vs63 + +#define VECSAVE r11 + +#define FRAMEPOINTER r12 + +#define T10 r14 + +#define L r15 +#define T8 r16 +#define T5 r17 +#define T2 r19 +#define TEMP_REG r20 +#define T6 r21 +#define I r22 +#define J r23 +#define AO r24 +#define BO r25 +#define CO r26 +#define T7 r27 +#define T3 r28 +#define T4 r29 + +#define PRE r30 +#define T1 r31 + +#ifndef NEEDPARAM + + PROLOGUE + PROFCODE + + mr FRAMEPOINTER, SP + addi SP, SP, -STACKSIZE + mflr r0 + stfd f14, 0(SP) + stfd f15, 8(SP) + stfd f16, 16(SP) + stfd f17, 24(SP) + + stfd f18, 32(SP) + stfd f19, 40(SP) + stfd f20, 48(SP) + stfd f21, 56(SP) + + stfd f22, 64(SP) + stfd f23, 72(SP) + stfd f24, 80(SP) + stfd f25, 88(SP) + + stfd f26, 96(SP) + stfd f27, 104(SP) + stfd f28, 112(SP) + stfd f29, 120(SP) + + stfd f30, 128(SP) + stfd f31, 136(SP) + + xxspltd alpha_r,vs1,0 /*copy from register f1 */ + xxspltd alpha_i,vs2,0 /*copy from register f2 */ + + std r31, 144(SP) + std r30, 152(SP) + std r29, 160(SP) + std r28, 168(SP) + std r27, 176(SP) + std r26, 184(SP) + std r25, 192(SP) + std r24, 200(SP) + std r23, 208(SP) + std r22, 216(SP) + std r21, 224(SP) + std r20, 232(SP) + std r19, 240(SP) + std r18, 248(SP) + std r17, 256(SP) + std r16, 264(SP) + std r15, 272(SP) + std r14, 280(SP) + + + stxv vs20, 288(SP) + stxv vs21, 304(SP) + stxv vs22, 320(SP) + stxv vs23, 336(SP) + stxv vs24, 352(SP) + stxv vs25, 368(SP) + stxv vs26, 384(SP) + stxv vs27, 400(SP) + stxv vs28, 416(SP) + stxv vs29, 432(SP) + stxv vs30, 448(SP) + stxv vs31, 464(SP) + + std r0, FLINK_SAVE(SP) + + +#if defined(linux) || defined(__FreeBSD__) + ld LDC, FRAMESLOT(0) + 0(FRAMEPOINTER) +#endif + + +#ifdef TRMMKERNEL +#if (defined(linux) || defined(__FreeBSD__)) && defined(__64BIT__) + ld OFFSET, FRAMESLOT(1) + 0(FRAMEPOINTER) +#endif +#endif + + +#include "zgemm_macros_power10.S" + + + + slwi LDC, LDC, ZBASE_SHIFT + li PRE, 512 + li r0, 0 + + +#if defined(CC) || defined(CR) || defined(RC) || defined(RR) +/*negate for this case as we will use addition -1*(a+b) */ + xvnegdp alpha_r,alpha_r + xvnegdp alpha_i,alpha_i +#endif + .align 4 + +#include "zgemm_logic_power10.S" + +L999: + + lfd f14, 0(SP) + lfd f15, 8(SP) + lfd f16, 16(SP) + lfd f17, 24(SP) + + lfd f18, 32(SP) + lfd f19, 40(SP) + lfd f20, 48(SP) + lfd f21, 56(SP) + + lfd f22, 64(SP) + lfd f23, 72(SP) + lfd f24, 80(SP) + lfd f25, 88(SP) + + lfd f26, 96(SP) + lfd f27, 104(SP) + lfd f28, 112(SP) + lfd f29, 120(SP) + + lfd f30, 128(SP) + lfd f31, 136(SP) + + + ld r31, 144(SP) + ld r30, 152(SP) + ld r29, 160(SP) + ld r28, 168(SP) + ld r27, 176(SP) + ld r26, 184(SP) + ld r25, 192(SP) + ld r24, 200(SP) + ld r23, 208(SP) + ld r22, 216(SP) + ld r21, 224(SP) + ld r20, 232(SP) + ld r19, 240(SP) + ld r18, 248(SP) + ld r17, 256(SP) + ld r16, 264(SP) + ld r15, 272(SP) + ld r14, 280(SP) + + ld r0, FLINK_SAVE(SP) + + lxv vs20, 288(SP) + lxv vs21, 304(SP) + lxv vs22, 320(SP) + lxv vs23, 336(SP) + lxv vs24, 352(SP) + lxv vs25, 368(SP) + lxv vs26, 384(SP) + lxv vs27, 400(SP) + mtlr r0 + lxv vs28, 416(SP) + lxv vs29, 432(SP) + lxv vs30, 448(SP) + lxv vs31, 464(SP) + + addi SP, SP, STACKSIZE + blr + + EPILOGUE +#endif diff --git a/kernel/power/zgemm_kernel_power3.S b/kernel/power/zgemm_kernel_power3.S index 471d3b9ae..d14cb1cd9 100644 --- a/kernel/power/zgemm_kernel_power3.S +++ b/kernel/power/zgemm_kernel_power3.S @@ -61,7 +61,7 @@ #define N r4 #define K r5 -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define A r6 #define B r7 @@ -161,7 +161,7 @@ stfd f2, ALPHA_I stw r0, FZERO -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifdef __64BIT__ ld LDC, FRAMESLOT(0) + STACKSIZE(SP) #endif @@ -202,7 +202,7 @@ #endif #else -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ lwz PREA, FRAMESLOT(2) + STACKSIZE(SP) lwz PREC, FRAMESLOT(3) + STACKSIZE(SP) diff --git a/kernel/power/zgemm_kernel_power6.S b/kernel/power/zgemm_kernel_power6.S index 3c28649bc..9b47b9fc1 100644 --- a/kernel/power/zgemm_kernel_power6.S +++ b/kernel/power/zgemm_kernel_power6.S @@ -61,7 +61,7 @@ #define N r4 #define K r5 -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define A r6 #define B r7 @@ -199,7 +199,7 @@ stfd f2, ALPHA_I stw r0, FZERO -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifdef __64BIT__ ld LDC, FRAMESLOT(0) + STACKSIZE(SP) #endif @@ -220,7 +220,7 @@ #endif #ifdef TRMMKERNEL -#if defined(linux) && defined(__64BIT__) +#if (defined(linux) || defined(__FreeBSD__)) && defined(__64BIT__) ld OFFSET, FRAMESLOT(1) + STACKSIZE(SP) #endif diff --git a/kernel/power/zgemm_kernel_power9.S b/kernel/power/zgemm_kernel_power9.S new file mode 100644 index 000000000..d1e60da6c --- /dev/null +++ b/kernel/power/zgemm_kernel_power9.S @@ -0,0 +1,245 @@ +/*************************************************************************** +Copyright (c) 2013-2019, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ +#define ASSEMBLER +#include "common.h" +#include "def_vsx.h" + +#define LOAD ld + +#define STACKSIZE 512 + +#define FZERO 312+192(SP) + +#define FLINK_SAVE (STACKSIZE+16) /* 16($r12) */ + +#define M r3 +#define N r4 +#define K r5 + + +#define A r8 +#define B r9 +#define C r10 +#define LDC r6 +#define OFFSET r7 + + + +#define o0 0 +#define alpha_r vs30 +#define alpha_i vs31 + +#define VECSAVE r11 + +#define FRAMEPOINTER r12 + +#define T10 r14 + +#define L r15 +#define T8 r16 +#define T5 r17 +#define T2 r19 +#define TEMP_REG r20 +#define T6 r21 +#define I r22 +#define J r23 +#define AO r24 +#define BO r25 +#define CO r26 +#define T7 r27 +#define T3 r28 +#define T4 r29 + +#define PRE r30 +#define T1 r31 + +#ifndef NEEDPARAM + + PROLOGUE + PROFCODE + + mr FRAMEPOINTER, SP + addi SP, SP, -STACKSIZE + mflr r0 + stfd f14, 0(SP) + stfd f15, 8(SP) + stfd f16, 16(SP) + stfd f17, 24(SP) + + stfd f18, 32(SP) + stfd f19, 40(SP) + stfd f20, 48(SP) + stfd f21, 56(SP) + + stfd f22, 64(SP) + stfd f23, 72(SP) + stfd f24, 80(SP) + stfd f25, 88(SP) + + stfd f26, 96(SP) + stfd f27, 104(SP) + stfd f28, 112(SP) + stfd f29, 120(SP) + + stfd f30, 128(SP) + stfd f31, 136(SP) + + xxspltd alpha_r,vs1,0 /*copy from register f1 */ + xxspltd alpha_i,vs2,0 /*copy from register f2 */ + + std r31, 144(SP) + std r30, 152(SP) + std r29, 160(SP) + std r28, 168(SP) + std r27, 176(SP) + std r26, 184(SP) + std r25, 192(SP) + std r24, 200(SP) + std r23, 208(SP) + std r22, 216(SP) + std r21, 224(SP) + std r20, 232(SP) + std r19, 240(SP) + std r18, 248(SP) + std r17, 256(SP) + std r16, 264(SP) + std r15, 272(SP) + std r14, 280(SP) + + + stxv vs52, 288(SP) + stxv vs53, 304(SP) + stxv vs54, 320(SP) + stxv vs55, 336(SP) + stxv vs56, 352(SP) + stxv vs57, 368(SP) + stxv vs58, 384(SP) + stxv vs59, 400(SP) + stxv vs60, 416(SP) + stxv vs61, 432(SP) + stxv vs62, 448(SP) + stxv vs63, 464(SP) + + std r0, FLINK_SAVE(SP) + + +#if defined(linux) || defined(__FreeBSD__) + ld LDC, FRAMESLOT(0) + 0(FRAMEPOINTER) +#endif + + +#ifdef TRMMKERNEL +#if (defined(linux) || defined(__FreeBSD__)) && defined(__64BIT__) + ld OFFSET, FRAMESLOT(1) + 0(FRAMEPOINTER) +#endif +#endif + + +#include "zgemm_macros_power9.S" + + + + slwi LDC, LDC, ZBASE_SHIFT + li PRE, 512 + li r0, 0 + + +#if defined(CC) || defined(CR) || defined(RC) || defined(RR) +/*negate for this case as we will use addition -1*(a+b) */ + xvnegdp alpha_r,alpha_r + xvnegdp alpha_i,alpha_i +#endif + .align 4 + +#include "zgemm_logic_power9.S" + +L999: + + lfd f14, 0(SP) + lfd f15, 8(SP) + lfd f16, 16(SP) + lfd f17, 24(SP) + + lfd f18, 32(SP) + lfd f19, 40(SP) + lfd f20, 48(SP) + lfd f21, 56(SP) + + lfd f22, 64(SP) + lfd f23, 72(SP) + lfd f24, 80(SP) + lfd f25, 88(SP) + + lfd f26, 96(SP) + lfd f27, 104(SP) + lfd f28, 112(SP) + lfd f29, 120(SP) + + lfd f30, 128(SP) + lfd f31, 136(SP) + + + ld r31, 144(SP) + ld r30, 152(SP) + ld r29, 160(SP) + ld r28, 168(SP) + ld r27, 176(SP) + ld r26, 184(SP) + ld r25, 192(SP) + ld r24, 200(SP) + ld r23, 208(SP) + ld r22, 216(SP) + ld r21, 224(SP) + ld r20, 232(SP) + ld r19, 240(SP) + ld r18, 248(SP) + ld r17, 256(SP) + ld r16, 264(SP) + ld r15, 272(SP) + ld r14, 280(SP) + + ld r0, FLINK_SAVE(SP) + + lxv vs52, 288(SP) + lxv vs53, 304(SP) + lxv vs54, 320(SP) + lxv vs55, 336(SP) + lxv vs56, 352(SP) + lxv vs57, 368(SP) + lxv vs58, 384(SP) + lxv vs59, 400(SP) + mtlr r0 + lxv vs60, 416(SP) + lxv vs61, 432(SP) + lxv vs62, 448(SP) + lxv vs63, 464(SP) + + addi SP, SP, STACKSIZE + blr + + EPILOGUE +#endif \ No newline at end of file diff --git a/kernel/power/zgemm_kernel_ppc440.S b/kernel/power/zgemm_kernel_ppc440.S index 748b69a0c..ba99a21c5 100644 --- a/kernel/power/zgemm_kernel_ppc440.S +++ b/kernel/power/zgemm_kernel_ppc440.S @@ -61,7 +61,7 @@ #define N r4 #define K r5 -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define A r6 #define B r7 @@ -182,7 +182,7 @@ stfd f2, ALPHA_I stw r0, FZERO -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifdef __64BIT__ ld LDC, FRAMESLOT(0) + STACKSIZE(SP) #endif @@ -203,7 +203,7 @@ #endif #ifdef TRMMKERNEL -#if defined(linux) && defined(__64BIT__) +#if (defined(linux) || defined(__FreeBSD__)) && defined(__64BIT__) ld OFFSET, FRAMESLOT(1) + STACKSIZE(SP) #endif diff --git a/kernel/power/zgemm_logic_power10.S b/kernel/power/zgemm_logic_power10.S new file mode 100644 index 000000000..1143733e0 --- /dev/null +++ b/kernel/power/zgemm_logic_power10.S @@ -0,0 +1,1735 @@ +/*************************************************************************** +Copyright (c) 2013-2020, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ +#define MY_ALIGN .align 3 +b ZGEMM_L2 +/* MINI SUBROUTINES */ +/* 2x8 MAIN 128x+2 LOOP */ + + +ZGEMM_L2x8_LMAIN_SUB: +/*----------------------------------------*/ + mtctr T8 + MY_ALIGN +ZGEMM_L2x8_LOOP: +/*----------------------------------------*/ + dcbt AO, PRE + dcbt BO, PRE + KERNEL2x8_2 0, 0 +ZGEMM_L2x8_K128: +/*----------------------------------------*/ + KERNEL2x8_2 1, 0 + dcbt AO, T2 + KERNEL2x8_2 2, 0 + KERNEL2x8_2 3, 0 + dcbt AO, T3 + dcbt BO, T2 + KERNEL2x8_2 4, 0 + KERNEL2x8_2 5, 0 + dcbt AO, T4 + KERNEL2x8_2 6, 0 + KERNEL2x8_2 7, 0 + dcbt AO, T5 + dcbt BO, T3 + KERNEL2x8_2 8, 0 + KERNEL2x8_2 9, 0 + KERNEL2x8_2 10, 0 + KERNEL2x8_2 11, 0 + dcbt BO, T4 + KERNEL2x8_2 12, 0 + KERNEL2x8_2 13, 0 + KERNEL2x8_2 14, 0 + KERNEL2x8_2 15, 0 + KERNEL2x8_2 16, 0 + KERNEL2x8_2 17, 0 + KERNEL2x8_2 18, 0 + KERNEL2x8_2 19, 0 + KERNEL2x8_2 20, 0 + KERNEL2x8_2 21, 0 + KERNEL2x8_2 22, 0 + KERNEL2x8_2 23, 0 + KERNEL2x8_2 24, 0 + KERNEL2x8_2 25, 0 + KERNEL2x8_2 26, 0 + KERNEL2x8_2 27, 0 + KERNEL2x8_2 28, 0 + KERNEL2x8_2 29, 0 + KERNEL2x8_2 30, 0 + KERNEL2x8_2 31, 0 + KERNEL2x8_2 32, 0 + KERNEL2x8_2 33, 0 + KERNEL2x8_2 34, 0 + KERNEL2x8_2 35, 0 + KERNEL2x8_2 36, 0 + KERNEL2x8_2 37, 0 + KERNEL2x8_2 38, 0 + KERNEL2x8_2 39, 0 + KERNEL2x8_2 40, 0 + KERNEL2x8_2 41, 0 + KERNEL2x8_2 42, 0 + KERNEL2x8_2 43, 0 + KERNEL2x8_2 44, 0 + KERNEL2x8_2 45, 0 + KERNEL2x8_2 46, 0 + KERNEL2x8_2 47, 0 + KERNEL2x8_2 48, 0 + KERNEL2x8_2 49, 0 + KERNEL2x8_2 50, 0 + KERNEL2x8_2 51, 0 + KERNEL2x8_2 52, 0 + KERNEL2x8_2 53, 0 + KERNEL2x8_2 54, 0 + KERNEL2x8_2 55, 0 + KERNEL2x8_2 56, 0 + KERNEL2x8_2 57, 0 + KERNEL2x8_2 58, 0 + KERNEL2x8_2 59, 0 + KERNEL2x8_2 60, 0 + KERNEL2x8_2 61, 0 + KERNEL2x8_2 62, 0 + KERNEL2x8_2 63, 1 + bdz ZGEMM_L2x8_LOOP_END + b ZGEMM_L2x8_LOOP + MY_ALIGN + +ZGEMM_L2x8_LOOP_END: +/*----------------------------------------*/ + KERNEL2x8_2 0, 1 + blr + MY_ALIGN + + +ZGEMM_2x4_LMAIN_SUB: +/*----------------------------------------*/ + mtctr T8 + MY_ALIGN +ZGEMM_L2x4_LOOP: +/*----------------------------------------*/ + KERNEL2x4_2 0, 0 +ZGEMM_L2x4_K32: +/*----------------------------------------*/ + KERNEL2x4_2 1, 0 + KERNEL2x4_2 2, 0 + KERNEL2x4_2 3, 0 + KERNEL2x4_2 4, 0 + KERNEL2x4_2 5, 0 + KERNEL2x4_2 6, 0 + KERNEL2x4_2 7, 0 + KERNEL2x4_2 8, 0 + KERNEL2x4_2 9, 0 + KERNEL2x4_2 10, 0 + KERNEL2x4_2 11, 0 + KERNEL2x4_2 12, 0 + KERNEL2x4_2 13, 0 + KERNEL2x4_2 14, 0 + KERNEL2x4_2 15, 1 + bdnz ZGEMM_L2x4_LOOP + MY_ALIGN +ZGEMM_L2x4_LOOP_END: +/*----------------------------------------*/ + KERNEL2x4_2 0, 1 + blr + MY_ALIGN + + +ZGEMM_2x2_LMAIN_SUB: +/*----------------------------------------*/ + mtctr T8 + MY_ALIGN +ZGEMM_L2x2_LOOP: +/*----------------------------------------*/ + KERNEL2x2_2 0, 0 +ZGEMM_L2x2_K32: +/*----------------------------------------*/ + KERNEL2x2_2 1, 0 + KERNEL2x2_2 2, 0 + KERNEL2x2_2 3, 0 + KERNEL2x2_2 4, 0 + KERNEL2x2_2 5, 0 + KERNEL2x2_2 6, 0 + KERNEL2x2_2 7, 0 + KERNEL2x2_2 8, 0 + KERNEL2x2_2 9, 0 + KERNEL2x2_2 10, 0 + KERNEL2x2_2 11, 0 + KERNEL2x2_2 12, 0 + KERNEL2x2_2 13, 0 + KERNEL2x2_2 14, 0 + KERNEL2x2_2 15, 1 + bdnz ZGEMM_L2x2_LOOP + MY_ALIGN + + +ZGEMM_L2x2_LOOP_END: +/*----------------------------------------*/ + KERNEL2x2_2 0, 1 + blr + MY_ALIGN + +ZGEMM_2x1_LMAIN_SUB: +/*----------------------------------------*/ + mtctr T8 + LOAD2x1_2 + MY_ALIGN +ZGEMM_L2x1_LOOP: +/*----------------------------------------*/ + KERNEL2x1_L2 32, 64, 0, 0 +ZGEMM_L2x1_K32: +/*----------------------------------------*/ + KERNEL2x1_L2 32, 64, 1, 0 + KERNEL2x1_L2 32, 64, 2, 0 + KERNEL2x1_L2 32, 64, 3, 0 + KERNEL2x1_L2 32, 64, 4, 0 + KERNEL2x1_L2 32, 64, 5, 0 + KERNEL2x1_L2 32, 64, 6, 0 + KERNEL2x1_L2 32, 64, 7, 0 + KERNEL2x1_L2 32, 64, 8, 0 + KERNEL2x1_L2 32, 64, 9, 0 + KERNEL2x1_L2 32, 64, 10, 0 + KERNEL2x1_L2 32, 64, 11, 0 + KERNEL2x1_L2 32, 64, 12, 0 + KERNEL2x1_L2 32, 64, 13, 0 + KERNEL2x1_L2 32, 64, 14, 0 + KERNEL2x1_L2 32, 64, 15, 1 + bdnz ZGEMM_L2x1_LOOP + MY_ALIGN +ZGEMM_L2x1_LOOP_END: +/*----------------------------------------*/ + END2x1_2 + blr + + MY_ALIGN + + +/* MAIN LOOP BEGINS */ + MY_ALIGN + + +ZGEMM_L2: +/*----------------------------------------*/ +#if defined(TRMMKERNEL) && !defined(LEFT) + neg TEMP_REG, OFFSET +#endif + srawi. J, N, 1 + bgt ZGEMM_L2_BEGIN + b ZGEMM_L2_END + +ZGEMM_L2_BEGIN: +/*----------------------------------------*/ + mr CO, C + slwi T1, LDC, 1 + add T2,C,LDC + mr AO, A + add C, C, T1 +#if defined(TRMMKERNEL) && defined(LEFT) + mr TEMP_REG, OFFSET /*off = offset;*/ +#endif + srawi. I, M, 3 + bgt ZGEMM_L2_BEGIN_CONTINUE + b ZGEMM_L2x8_END + +ZGEMM_L2_BEGIN_CONTINUE: + dcbt CO,r0 /*just prefetch*/ + dcbt T2,r0 + + +ZGEMM_L2x8_BEGIN: +/*----------------------------------------*/ +#if defined(TRMMKERNEL) + REFRESH_POINTERS AO, BO,TEMP_REG, B, 8, 2 +#else + mr BO, B + dcbt B, r0 +#endif + dcbt AO, r0 +#if defined(TRMMKERNEL) + REFRESH_TEMP_BK T6,K,TEMP_REG, 8, 2 + mr T1, T6 +#else + mr T1, K +#endif +/* TEMPS FOR PREFETCH */ + li T2, 1024 + li T3, 1024+512 + addi T1,T1, -2 +/* TEMPS FOR PREFETCH */ + li T4, 2048 + li T5, 2048+512 + srawi. T8, T1, 7 /* T8 <- T1 % 128 */ + + KERNEL2x8_PRELOAD + KERNEL2x8_ZERO_AND_PRIME_MMA + ble ZGEMM_L2x8_SUB0 + bl ZGEMM_L2x8_LMAIN_SUB + andi. L, T1, 127 + + bgt ZGEMM_L2x8_BEGIN_CONTINUE + b ZGEMM_L2x8_SAVE + +ZGEMM_L2x8_BEGIN_CONTINUE: + b ZGEMM_L2x8_SUB2 + + +ZGEMM_L2x8_SUB0: +/*----------------------------------------*/ +#if defined(TRMMKERNEL) + andi. L, T6, 255 + cmpwi T6, 129 +#else + andi. L, K, 255 + cmpwi K, 129 +#endif + li T8, 1 + bne CMP2x8_128K + LOAD_END_2x8 128, 32 + KERNEL2x8_PRELOAD + addi BO, BO, -64 + addi AO,AO, -256 + mtctr T8 + bl ZGEMM_L2x8_K128 + b ZGEMM_L2x8_SAVE + +CMP2x8_128K: +/*----------------------------------------*/ +#if defined(TRMMKERNEL) + cmpwi T6, 128 +#else + cmpwi K, 128 +#endif + bne ZGEMM_L2x8_SUB2 + MY_ALIGN + mtctr T8 + addi BO, BO, -64 + addi AO,AO, -256 + bl ZGEMM_L2x8_K128 + b ZGEMM_L2x8_SAVE + MY_ALIGN + + +ZGEMM_L2x8_SUB2: +/*----------------------------------------*/ + andi. T1,L, 64 + ble ZGEMM_L2x8_SUB2_32 + dcbt AO, PRE + dcbt BO, PRE + KERNEL2x8_2 0, 0 + KERNEL2x8_2 1, 0 + dcbt AO, T2 + KERNEL2x8_2 2, 0 + KERNEL2x8_2 3, 0 + dcbt AO, T3 + dcbt BO, T2 + KERNEL2x8_2 4, 0 + KERNEL2x8_2 5, 0 + dcbt AO, T4 + KERNEL2x8_2 6, 0 + KERNEL2x8_2 7, 0 + dcbt AO, T5 + dcbt BO, T3 + KERNEL2x8_2 8, 0 + KERNEL2x8_2 9, 0 + KERNEL2x8_2 10, 0 + KERNEL2x8_2 11, 0 + dcbt BO, T4 + KERNEL2x8_2 12, 0 + KERNEL2x8_2 13, 0 + KERNEL2x8_2 14, 0 + KERNEL2x8_2 15, 0 + KERNEL2x8_2 16, 0 + KERNEL2x8_2 17, 0 + KERNEL2x8_2 18, 0 + KERNEL2x8_2 19, 0 + KERNEL2x8_2 20, 0 + KERNEL2x8_2 21, 0 + KERNEL2x8_2 22, 0 + KERNEL2x8_2 23, 0 + KERNEL2x8_2 24, 0 + KERNEL2x8_2 25, 0 + KERNEL2x8_2 26, 0 + KERNEL2x8_2 27, 0 + KERNEL2x8_2 28, 0 + KERNEL2x8_2 29, 0 + KERNEL2x8_2 30, 0 + KERNEL2x8_2 31, 1 + MY_ALIGN + + +ZGEMM_L2x8_SUB2_32: +/*----------------------------------------*/ + andi. T1,L, 32 + ble ZGEMM_L2x8_SUB2_16 + dcbt AO, PRE + dcbt BO, PRE + KERNEL2x8_2 0, 0 + KERNEL2x8_2 1, 0 + dcbt AO, T2 + KERNEL2x8_2 2, 0 + KERNEL2x8_2 3, 0 + dcbt AO, T3 + dcbt BO, T2 + KERNEL2x8_2 4, 0 + KERNEL2x8_2 5, 0 + dcbt AO, T4 + KERNEL2x8_2 6, 0 + KERNEL2x8_2 7, 0 + dcbt AO, T5 + dcbt BO, T3 + KERNEL2x8_2 8, 0 + KERNEL2x8_2 9, 0 + KERNEL2x8_2 10, 0 + KERNEL2x8_2 11, 0 + dcbt BO, T4 + KERNEL2x8_2 12, 0 + KERNEL2x8_2 13, 0 + KERNEL2x8_2 14, 0 + KERNEL2x8_2 15, 1 + MY_ALIGN + + +ZGEMM_L2x8_SUB2_16: +/*----------------------------------------*/ + andi. T1,L, 16 + ble ZGEMM_L2x8_SUB2_8 + dcbt AO, PRE + dcbt BO, PRE + KERNEL2x8_2 0, 0 + KERNEL2x8_2 1, 0 + dcbt AO, T2 + KERNEL2x8_2 2, 0 + KERNEL2x8_2 3, 0 + dcbt AO, T3 + dcbt BO, T2 + KERNEL2x8_2 4, 0 + KERNEL2x8_2 5, 0 + dcbt AO, T4 + KERNEL2x8_2 6, 0 + KERNEL2x8_2 7, 1 + MY_ALIGN + + +ZGEMM_L2x8_SUB2_8: +/*----------------------------------------*/ + andi. T1,L, 8 + ble ZGEMM_L2x8_SUB2_4 + KERNEL2x8_2 0, 0 + KERNEL2x8_2 1, 0 + KERNEL2x8_2 2, 0 + KERNEL2x8_2 3, 1 + MY_ALIGN + + +ZGEMM_L2x8_SUB2_4: +/*----------------------------------------*/ + andi. T1,L, 4 + ble ZGEMM_L2x8_SUB2_2 + KERNEL2x8_2 0, 0 + KERNEL2x8_2 1, 1 + MY_ALIGN + + +ZGEMM_L2x8_SUB2_2: +/*----------------------------------------*/ + andi. T1,L, 2 + ble ZGEMM_L2x8_SUB2_1 + KERNEL2x8_2 0, 1 + MY_ALIGN + + +ZGEMM_L2x8_SUB2_1: +/*----------------------------------------*/ + andi. T1,L, 1 + ble ZGEMM_L2x8_SAVE + LOAD_END_2x8 128, 32 + + +ZGEMM_L2x8_SAVE: +/*----------------------------------------*/ + addic. I, I, -1 + KERNEL2x8_UNPRIME_MMA + SAVE2x8 +#if defined(TRMMKERNEL) + REFRESH_AFTER_SAVE T6,K,TEMP_REG, BO,AO, 8, 2 +#endif + + ble ZGEMM_L2x8_SAVE_CONTINUE + b ZGEMM_L2x8_BEGIN + +ZGEMM_L2x8_SAVE_CONTINUE: + andi. T2, M, 7 + ble ZGEMM_L2x1_END + andi. T1, M, 4 + ble ZGEMM_L2x4_END + b ZGEMM_L2x4_BEGIN + MY_ALIGN + + +ZGEMM_L2x8_END: +/*----------------------------------------*/ + + +ZGEMM_L2x4_BEGIN: +/*----------------------------------------*/ + andi. T2, M, 7 + ble ZGEMM_L2x1_END + andi. T1, M, 4 + ble ZGEMM_L2x4_END +#if defined(TRMMKERNEL) + REFRESH_POINTERS AO, BO,TEMP_REG, B, 4, 2 +#else + mr BO, B +#endif +#if defined(TRMMKERNEL) + REFRESH_TEMP_BK T6,K,TEMP_REG, 4, 2 + mr T1, T6 + addi T1,T1, -2 + srawi. T8, T1, 5 /**(T11-2) % 32x */ +#else + mr T1, K + addi T1,T1, -2 + srawi. T8, T1, 5 /**(K-2) % 32x */ +#endif + KERNEL2x4_PRELOAD + KERNEL2x4_ZERO_AND_PRIME_MMA + ble ZGEMM_L2x4_SUB0 + bl ZGEMM_2x4_LMAIN_SUB + andi. L, T1, 31 + ble ZGEMM_L2x4_SAVE + b ZGEMM_L2x4_SUB2 + + +ZGEMM_L2x4_SUB0: +/*----------------------------------------*/ +#if defined(TRMMKERNEL) + andi. L, T6, 63 + cmpwi T6, 33 +#else + andi. L, K, 63 + cmpwi K, 33 +#endif + li T8, 1 + bne CMP2x4_32K + LOAD_END_2x4 64, 32 + KERNEL2x4_PRELOAD + addi BO, BO, -64 + addi AO,AO, -128 + mtctr T8 + bl ZGEMM_L2x4_K32 + b ZGEMM_L2x4_SAVE + CMP2x4_32K: +/*----------------------------------------*/ +#if defined(TRMMKERNEL) + cmpwi T6, 32 +#else + cmpwi K, 32 +#endif + bne ZGEMM_L2x4_SUB2 + MY_ALIGN + mtctr T8 + addi BO, BO, -64 + addi AO,AO, -128 + bl ZGEMM_L2x4_K32 + b ZGEMM_L2x4_SAVE + MY_ALIGN + MY_ALIGN + + +ZGEMM_L2x4_SUB2: +/*----------------------------------------*/ + andi. T1,L, 16 + ble ZGEMM_L2x4_SUB2_8 + KERNEL2x4_2 0, 0 + KERNEL2x4_2 1, 0 + KERNEL2x4_2 2, 0 + KERNEL2x4_2 3, 0 + KERNEL2x4_2 4, 0 + KERNEL2x4_2 5, 0 + KERNEL2x4_2 6, 0 + KERNEL2x4_2 7, 1 + MY_ALIGN + + +ZGEMM_L2x4_SUB2_8: +/*----------------------------------------*/ + andi. T1,L, 8 + ble ZGEMM_L2x4_SUB2_4 + KERNEL2x4_2 0, 0 + KERNEL2x4_2 1, 0 + KERNEL2x4_2 2, 0 + KERNEL2x4_2 3, 1 + MY_ALIGN + + +ZGEMM_L2x4_SUB2_4: +/*----------------------------------------*/ + andi. T1,L, 4 + ble ZGEMM_L2x4_SUB2_2 + KERNEL2x4_2 0, 0 + KERNEL2x4_2 1, 1 + MY_ALIGN + + +ZGEMM_L2x4_SUB2_2: +/*----------------------------------------*/ + andi. T1,L, 2 + ble ZGEMM_L2x4_SUB2_1 + KERNEL2x4_2 0, 1 + MY_ALIGN + + +ZGEMM_L2x4_SUB2_1: +/*----------------------------------------*/ + andi. T1,L, 1 + ble ZGEMM_L2x4_SAVE + LOAD_END_2x4 64, 32 + + +ZGEMM_L2x4_SAVE: +/*----------------------------------------*/ + KERNEL2x4_UNPRIME_MMA + SAVE2x4 +#if defined(TRMMKERNEL) + REFRESH_AFTER_SAVE T6,K,TEMP_REG, BO,AO, 4, 2 +#endif + + +ZGEMM_L2x4_END: +/*----------------------------------------*/ + + +ZGEMM_L2x2_BEGIN: +/*----------------------------------------*/ + andi. T1, M, 2 + ble ZGEMM_L2x2_END +#if defined(TRMMKERNEL) + REFRESH_POINTERS AO, BO,TEMP_REG, B, 2, 2 +#else + mr BO, B +#endif +#if defined(TRMMKERNEL) + REFRESH_TEMP_BK T6,K,TEMP_REG, 2, 2 + mr T1, T6 + addi T1,T1, -2 + srawi. T8, T1, 5 /**(T11-2) % 32x */ +#else + mr T1, K + addi T1,T1, -2 + srawi. T8, T1, 5 /**(K-2) % 32x */ +#endif + KERNEL2x2_PRELOAD + KERNEL2x2_ZERO_AND_PRIME_MMA + ble ZGEMM_L2x2_SUB0 + bl ZGEMM_2x2_LMAIN_SUB + andi. L, T1, 31 + ble ZGEMM_L2x2_SAVE + b ZGEMM_L2x2_SUB2 + + +ZGEMM_L2x2_SUB0: +/*----------------------------------------*/ +#if defined(TRMMKERNEL) + andi. L, T6, 63 + cmpwi T6, 33 +#else + andi. L, K, 63 + cmpwi K, 33 +#endif + li T8, 1 + bne CMP2x2_32K + LOAD_END_2x2 32, 32 + KERNEL2x2_PRELOAD + addi BO, BO, -64 + addi AO,AO, -64 + mtctr T8 + bl ZGEMM_L2x2_K32 + b ZGEMM_L2x2_SAVE + CMP2x2_32K: +/*----------------------------------------*/ +#if defined(TRMMKERNEL) + cmpwi T6, 32 +#else + cmpwi K, 32 +#endif + bne ZGEMM_L2x2_SUB2 + MY_ALIGN + mtctr T8 + addi BO, BO, -64 + addi AO,AO, -64 + bl ZGEMM_L2x2_K32 + b ZGEMM_L2x2_SAVE + MY_ALIGN + MY_ALIGN + + +ZGEMM_L2x2_SUB2: +/*----------------------------------------*/ + andi. T1,L, 16 + ble ZGEMM_L2x2_SUB2_8 + KERNEL2x2_2 0, 0 + KERNEL2x2_2 1, 0 + KERNEL2x2_2 2, 0 + KERNEL2x2_2 3, 0 + KERNEL2x2_2 4, 0 + KERNEL2x2_2 5, 0 + KERNEL2x2_2 6, 0 + KERNEL2x2_2 7, 1 + MY_ALIGN + + +ZGEMM_L2x2_SUB2_8: +/*----------------------------------------*/ + andi. T1,L, 8 + ble ZGEMM_L2x2_SUB2_4 + KERNEL2x2_2 0, 0 + KERNEL2x2_2 1, 0 + KERNEL2x2_2 2, 0 + KERNEL2x2_2 3, 1 + MY_ALIGN + + +ZGEMM_L2x2_SUB2_4: +/*----------------------------------------*/ + andi. T1,L, 4 + ble ZGEMM_L2x2_SUB2_2 + KERNEL2x2_2 0, 0 + KERNEL2x2_2 1, 1 + MY_ALIGN + + +ZGEMM_L2x2_SUB2_2: +/*----------------------------------------*/ + andi. T1,L, 2 + ble ZGEMM_L2x2_SUB2_1 + KERNEL2x2_2 0, 1 + MY_ALIGN + + +ZGEMM_L2x2_SUB2_1: +/*----------------------------------------*/ + andi. T1,L, 1 + ble ZGEMM_L2x2_SAVE + LOAD_END_2x2 32, 32 + + +ZGEMM_L2x2_SAVE: +/*----------------------------------------*/ + KERNEL2x2_UNPRIME_MMA + SAVE2x2 +#if defined(TRMMKERNEL) + REFRESH_AFTER_SAVE T6,K,TEMP_REG, BO,AO, 2, 2 +#endif + + +ZGEMM_L2x2_END: +/*----------------------------------------*/ + + +ZGEMM_L2x1_BEGIN: +/*----------------------------------------*/ + andi. T1, M, 1 + ble ZGEMM_L2x1_END +#if defined(TRMMKERNEL) + REFRESH_POINTERS AO, BO,TEMP_REG, B, 1, 2 +#else + mr BO, B +#endif +#if defined(TRMMKERNEL) + REFRESH_TEMP_BK T6,K,TEMP_REG, 1, 2 + mr T1, T6 + addi T1,T1, -2 + srawi. T8, T1, 5 /**(T11-2) % 32x */ +#else + mr T1, K + addi T1,T1, -2 + srawi. T8, T1, 5 /**(K-2) % 32x */ +#endif + ZERO2x1 + ble ZGEMM_L2x1_SUB0 + bl ZGEMM_2x1_LMAIN_SUB + andi. L, T1, 31 + ble ZGEMM_L2x1_SAVE + b ZGEMM_L2x1_SUB2 + + +ZGEMM_L2x1_SUB0: +/*----------------------------------------*/ +#if defined(TRMMKERNEL) + andi. L, T6, 63 + cmpwi T6, 33 +#else + andi. L, K, 63 + cmpwi K, 33 +#endif + li T8, 1 + bne CMP2x1_32K + addi BO, BO, -32 + addi AO,AO, -16 + LOAD2x1O 16, 32 + END2x1_WITHOUT_ADD + LOAD2x1_2O 32, 64 + mtctr T8 + bl ZGEMM_L2x1_K32 + b ZGEMM_L2x1_SAVE + CMP2x1_32K: +/*----------------------------------------*/ +#if defined(TRMMKERNEL) + cmpwi T6, 32 +#else + cmpwi K, 32 +#endif + bne ZGEMM_L2x1_SUB2 + MY_ALIGN + mtctr T8 + addi BO, BO, -64 + addi AO,AO, -32 + LOAD2x1_2O 32, 64 + bl ZGEMM_L2x1_K32 + b ZGEMM_L2x1_SAVE + MY_ALIGN + MY_ALIGN + + +ZGEMM_L2x1_SUB2: +/*----------------------------------------*/ + andi. T1,L, 16 + ble ZGEMM_L2x1_SUB2_8 + LOAD2x1_2 + KERNEL2x1_L2 32, 64, 0, 0 + KERNEL2x1_L2 32, 64, 1, 0 + KERNEL2x1_L2 32, 64, 2, 0 + KERNEL2x1_L2 32, 64, 3, 0 + KERNEL2x1_L2 32, 64, 4, 0 + KERNEL2x1_L2 32, 64, 5, 0 + KERNEL2x1_L2 32, 64, 6, 0 + KERNEL2x1_E2 32, 64, 7, 1 + MY_ALIGN + + +ZGEMM_L2x1_SUB2_8: +/*----------------------------------------*/ + andi. T1,L, 8 + ble ZGEMM_L2x1_SUB2_4 + LOAD2x1_2 + KERNEL2x1_L2 32, 64, 0, 0 + KERNEL2x1_L2 32, 64, 1, 0 + KERNEL2x1_L2 32, 64, 2, 0 + KERNEL2x1_E2 32, 64, 3, 1 + MY_ALIGN + + +ZGEMM_L2x1_SUB2_4: +/*----------------------------------------*/ + andi. T1,L, 4 + ble ZGEMM_L2x1_SUB2_2 + LOAD2x1_2 + KERNEL2x1_L2 32, 64, 0, 0 + KERNEL2x1_E2 32, 64, 1, 1 + MY_ALIGN + + +ZGEMM_L2x1_SUB2_2: +/*----------------------------------------*/ + andi. T1,L, 2 + ble ZGEMM_L2x1_SUB2_1 + LOAD2x1_2 + KERNEL2x1_E2 32, 64, 0, 1 + MY_ALIGN + + +ZGEMM_L2x1_SUB2_1: +/*----------------------------------------*/ + andi. T1,L, 1 + ble ZGEMM_L2x1_SAVE + KERNEL2x1 + + +ZGEMM_L2x1_SAVE: +/*----------------------------------------*/ + SAVE2x1 +#if defined(TRMMKERNEL) + REFRESH_AFTER_SAVE T6,K,TEMP_REG, BO,AO, 1, 2 +#endif + + +ZGEMM_L2x1_END: +/*----------------------------------------*/ + slwi T1, K, 5 + addic. J, J, -1 + add B, B, T1 +#if defined(TRMMKERNEL) && !defined(LEFT) + addi TEMP_REG, TEMP_REG, 2 +#endif + ble ZGEMM_L2_END + b ZGEMM_L2_BEGIN + +ZGEMM_L2_END: + +b ZGEMM_L1 +/* MINI SUBROUTINES */ +/* 1x8 MAIN 128x+2 LOOP */ + + +ZGEMM_L1x8_LMAIN_SUB: +/*----------------------------------------*/ + mtctr T8 + MY_ALIGN +ZGEMM_L1x8_LOOP: +/*----------------------------------------*/ + dcbt AO, PRE + dcbt BO, PRE + KERNEL1x8_2 0, 0 +ZGEMM_L1x8_K128: +/*----------------------------------------*/ + KERNEL1x8_2 1, 0 + dcbt AO, T2 + KERNEL1x8_2 2, 0 + KERNEL1x8_2 3, 0 + dcbt AO, T3 + dcbt BO, T2 + KERNEL1x8_2 4, 0 + KERNEL1x8_2 5, 0 + dcbt AO, T4 + KERNEL1x8_2 6, 0 + KERNEL1x8_2 7, 0 + dcbt AO, T5 + dcbt BO, T3 + KERNEL1x8_2 8, 0 + KERNEL1x8_2 9, 0 + KERNEL1x8_2 10, 0 + KERNEL1x8_2 11, 0 + dcbt BO, T4 + KERNEL1x8_2 12, 0 + KERNEL1x8_2 13, 0 + KERNEL1x8_2 14, 0 + KERNEL1x8_2 15, 0 + KERNEL1x8_2 16, 0 + KERNEL1x8_2 17, 0 + KERNEL1x8_2 18, 0 + KERNEL1x8_2 19, 0 + KERNEL1x8_2 20, 0 + KERNEL1x8_2 21, 0 + KERNEL1x8_2 22, 0 + KERNEL1x8_2 23, 0 + KERNEL1x8_2 24, 0 + KERNEL1x8_2 25, 0 + KERNEL1x8_2 26, 0 + KERNEL1x8_2 27, 0 + KERNEL1x8_2 28, 0 + KERNEL1x8_2 29, 0 + KERNEL1x8_2 30, 0 + KERNEL1x8_2 31, 0 + KERNEL1x8_2 32, 0 + KERNEL1x8_2 33, 0 + KERNEL1x8_2 34, 0 + KERNEL1x8_2 35, 0 + KERNEL1x8_2 36, 0 + KERNEL1x8_2 37, 0 + KERNEL1x8_2 38, 0 + KERNEL1x8_2 39, 0 + KERNEL1x8_2 40, 0 + KERNEL1x8_2 41, 0 + KERNEL1x8_2 42, 0 + KERNEL1x8_2 43, 0 + KERNEL1x8_2 44, 0 + KERNEL1x8_2 45, 0 + KERNEL1x8_2 46, 0 + KERNEL1x8_2 47, 0 + KERNEL1x8_2 48, 0 + KERNEL1x8_2 49, 0 + KERNEL1x8_2 50, 0 + KERNEL1x8_2 51, 0 + KERNEL1x8_2 52, 0 + KERNEL1x8_2 53, 0 + KERNEL1x8_2 54, 0 + KERNEL1x8_2 55, 0 + KERNEL1x8_2 56, 0 + KERNEL1x8_2 57, 0 + KERNEL1x8_2 58, 0 + KERNEL1x8_2 59, 0 + KERNEL1x8_2 60, 0 + KERNEL1x8_2 61, 0 + KERNEL1x8_2 62, 0 + KERNEL1x8_2 63, 1 + bdnz ZGEMM_L1x8_LOOP + MY_ALIGN +ZGEMM_L1x8_LOOP_END: +/*----------------------------------------*/ + KERNEL1x8_2 0, 1 + blr + MY_ALIGN + + +ZGEMM_1x4_LMAIN_SUB: +/*----------------------------------------*/ + mtctr T8 + MY_ALIGN + + +ZGEMM_L1x4_LOOP: +/*----------------------------------------*/ + KERNEL1x4_2 0, 0 + + +ZGEMM_L1x4_K32: +/*----------------------------------------*/ + KERNEL1x4_2 1, 0 + KERNEL1x4_2 2, 0 + KERNEL1x4_2 3, 0 + KERNEL1x4_2 4, 0 + KERNEL1x4_2 5, 0 + KERNEL1x4_2 6, 0 + KERNEL1x4_2 7, 0 + KERNEL1x4_2 8, 0 + KERNEL1x4_2 9, 0 + KERNEL1x4_2 10, 0 + KERNEL1x4_2 11, 0 + KERNEL1x4_2 12, 0 + KERNEL1x4_2 13, 0 + KERNEL1x4_2 14, 0 + KERNEL1x4_2 15, 1 + bdnz ZGEMM_L1x4_LOOP + MY_ALIGN + + +ZGEMM_L1x4_LOOP_END: +/*----------------------------------------*/ + KERNEL1x4_2 0, 1 + blr + MY_ALIGN + + +ZGEMM_1x2_LMAIN_SUB: +/*----------------------------------------*/ + mtctr T8 + MY_ALIGN + + +ZGEMM_L1x2_LOOP: +/*----------------------------------------*/ + KERNEL1x2_2 0, 0 + + +ZGEMM_L1x2_K32: +/*----------------------------------------*/ + KERNEL1x2_2 1, 0 + KERNEL1x2_2 2, 0 + KERNEL1x2_2 3, 0 + KERNEL1x2_2 4, 0 + KERNEL1x2_2 5, 0 + KERNEL1x2_2 6, 0 + KERNEL1x2_2 7, 0 + KERNEL1x2_2 8, 0 + KERNEL1x2_2 9, 0 + KERNEL1x2_2 10, 0 + KERNEL1x2_2 11, 0 + KERNEL1x2_2 12, 0 + KERNEL1x2_2 13, 0 + KERNEL1x2_2 14, 0 + KERNEL1x2_2 15, 1 + bdnz ZGEMM_L1x2_LOOP + MY_ALIGN + + +ZGEMM_L1x2_LOOP_END: +/*----------------------------------------*/ + KERNEL1x2_2 0, 1 + blr + MY_ALIGN + + +ZGEMM_1x1_LMAIN_SUB: +/*----------------------------------------*/ + mtctr T8 + LOAD1x1_2 + MY_ALIGN + + +ZGEMM_L1x1_LOOP: +/*----------------------------------------*/ + KERNEL1x1_L2 32, 32, 0, 0 + + +ZGEMM_L1x1_K32: +/*----------------------------------------*/ + KERNEL1x1_L2 32, 32, 1, 0 + KERNEL1x1_L2 32, 32, 2, 0 + KERNEL1x1_L2 32, 32, 3, 0 + KERNEL1x1_L2 32, 32, 4, 0 + KERNEL1x1_L2 32, 32, 5, 0 + KERNEL1x1_L2 32, 32, 6, 0 + KERNEL1x1_L2 32, 32, 7, 0 + KERNEL1x1_L2 32, 32, 8, 0 + KERNEL1x1_L2 32, 32, 9, 0 + KERNEL1x1_L2 32, 32, 10, 0 + KERNEL1x1_L2 32, 32, 11, 0 + KERNEL1x1_L2 32, 32, 12, 0 + KERNEL1x1_L2 32, 32, 13, 0 + KERNEL1x1_L2 32, 32, 14, 0 + KERNEL1x1_L2 32, 32, 15, 1 + bdnz ZGEMM_L1x1_LOOP + MY_ALIGN + + +ZGEMM_L1x1_LOOP_END: +/*----------------------------------------*/ + END1x1_2 + blr + MY_ALIGN + + +/*----------------------N1 BEGINS---------*/ +ZGEMM_L1: +/*----------------------------------------*/ + andi. T1, N, 1 + ble ZGEMM_L1_END + +ZGEMM_L1_BEGIN: +/*----------------------------------------*/ + mr CO, C + + add T2,C,LDC + mr AO, A + add C, C, T1 +#if defined(TRMMKERNEL) && defined(LEFT) + mr TEMP_REG, OFFSET /*off = offset;*/ +#endif + srawi. I, M, 3 + ble ZGEMM_L1x8_END + dcbt CO,r0 /*just prefetch*/ + dcbt T2,r0 + + +ZGEMM_L1x8_BEGIN: +/*----------------------------------------*/ +#if defined(TRMMKERNEL) + REFRESH_POINTERS AO, BO,TEMP_REG, B, 8, 1 +#else + mr BO, B + dcbt B, r0 +#endif + dcbt AO, r0 +#if defined(TRMMKERNEL) + REFRESH_TEMP_BK T6,K,TEMP_REG, 8, 1 + mr T1, T6 +/* TEMPS FOR PREFETCH */ + li T2, 1024 + li T3, 1024+512 + addi T1,T1, -2 +/* TEMPS FOR PREFETCH */ + li T4, 2048 + li T5, 2048+512 + srawi. T8, T1, 7 /**(T11-2) % 128x */ +#else + mr T1, K +/* TEMPS FOR PREFETCH */ + li T2, 1024 + li T3, 1024+512 + addi T1,T1, -2 +/* TEMPS FOR PREFETCH */ + li T4, 2048 + li T5, 2048+512 + srawi. T8, T1, 7 /**(K-2) % 128x */ +#endif + KERNEL1x8_ZERO_AND_PRIME_MMA + ble ZGEMM_L1x8_SUB0 + bl ZGEMM_L1x8_LMAIN_SUB + andi. L, T1, 127 + ble ZGEMM_L1x8_SAVE + b ZGEMM_L1x8_SUB2 + + +ZGEMM_L1x8_SUB0: +/*----------------------------------------*/ +#if defined(TRMMKERNEL) + andi. L, T6, 255 + cmpwi T6, 129 +#else + andi. L, K, 255 + cmpwi K, 129 +#endif + li T8, 1 + bne CMP1x8_128K + LOAD_END_1x8 -128, -16 + mtctr T8 + bl ZGEMM_L1x8_K128 + b ZGEMM_L1x8_SAVE + CMP1x8_128K: +/*----------------------------------------*/ +#if defined(TRMMKERNEL) + cmpwi T6, 128 +#else + cmpwi K, 128 +#endif + bne ZGEMM_L1x8_SUB2 + MY_ALIGN + mtctr T8 + addi BO, BO, -32 + addi AO,AO, -256 + bl ZGEMM_L1x8_K128 + b ZGEMM_L1x8_SAVE + MY_ALIGN + + +ZGEMM_L1x8_SUB2: +/*----------------------------------------*/ + andi. T1,L, 64 + ble ZGEMM_L1x8_SUB2_32 + dcbt AO, PRE + dcbt BO, PRE + KERNEL1x8_2 0, 0 + KERNEL1x8_2 1, 0 + dcbt AO, T2 + KERNEL1x8_2 2, 0 + KERNEL1x8_2 3, 0 + dcbt AO, T3 + dcbt BO, T2 + KERNEL1x8_2 4, 0 + KERNEL1x8_2 5, 0 + dcbt AO, T4 + KERNEL1x8_2 6, 0 + KERNEL1x8_2 7, 0 + dcbt AO, T5 + dcbt BO, T3 + KERNEL1x8_2 8, 0 + KERNEL1x8_2 9, 0 + KERNEL1x8_2 10, 0 + KERNEL1x8_2 11, 0 + dcbt BO, T4 + KERNEL1x8_2 12, 0 + KERNEL1x8_2 13, 0 + KERNEL1x8_2 14, 0 + KERNEL1x8_2 15, 0 + KERNEL1x8_2 16, 0 + KERNEL1x8_2 17, 0 + KERNEL1x8_2 18, 0 + KERNEL1x8_2 19, 0 + KERNEL1x8_2 20, 0 + KERNEL1x8_2 21, 0 + KERNEL1x8_2 22, 0 + KERNEL1x8_2 23, 0 + KERNEL1x8_2 24, 0 + KERNEL1x8_2 25, 0 + KERNEL1x8_2 26, 0 + KERNEL1x8_2 27, 0 + KERNEL1x8_2 28, 0 + KERNEL1x8_2 29, 0 + KERNEL1x8_2 30, 0 + KERNEL1x8_2 31, 1 + MY_ALIGN + + +ZGEMM_L1x8_SUB2_32: +/*----------------------------------------*/ + andi. T1,L, 32 + ble ZGEMM_L1x8_SUB2_16 + dcbt AO, PRE + dcbt BO, PRE + KERNEL1x8_2 0, 0 + KERNEL1x8_2 1, 0 + dcbt AO, T2 + KERNEL1x8_2 2, 0 + KERNEL1x8_2 3, 0 + dcbt AO, T3 + dcbt BO, T2 + KERNEL1x8_2 4, 0 + KERNEL1x8_2 5, 0 + dcbt AO, T4 + KERNEL1x8_2 6, 0 + KERNEL1x8_2 7, 0 + dcbt AO, T5 + dcbt BO, T3 + KERNEL1x8_2 8, 0 + KERNEL1x8_2 9, 0 + KERNEL1x8_2 10, 0 + KERNEL1x8_2 11, 0 + dcbt BO, T4 + KERNEL1x8_2 12, 0 + KERNEL1x8_2 13, 0 + KERNEL1x8_2 14, 0 + KERNEL1x8_2 15, 1 + MY_ALIGN + + +ZGEMM_L1x8_SUB2_16: +/*----------------------------------------*/ + andi. T1,L, 16 + ble ZGEMM_L1x8_SUB2_8 + dcbt AO, PRE + dcbt BO, PRE + KERNEL1x8_2 0, 0 + KERNEL1x8_2 1, 0 + dcbt AO, T2 + KERNEL1x8_2 2, 0 + KERNEL1x8_2 3, 0 + dcbt AO, T3 + dcbt BO, T2 + KERNEL1x8_2 4, 0 + KERNEL1x8_2 5, 0 + dcbt AO, T4 + KERNEL1x8_2 6, 0 + KERNEL1x8_2 7, 1 + MY_ALIGN + + +ZGEMM_L1x8_SUB2_8: +/*----------------------------------------*/ + andi. T1,L, 8 + ble ZGEMM_L1x8_SUB2_4 + KERNEL1x8_2 0, 0 + KERNEL1x8_2 1, 0 + KERNEL1x8_2 2, 0 + KERNEL1x8_2 3, 1 + MY_ALIGN + + +ZGEMM_L1x8_SUB2_4: +/*----------------------------------------*/ + andi. T1,L, 4 + ble ZGEMM_L1x8_SUB2_2 + KERNEL1x8_2 0, 0 + KERNEL1x8_2 1, 1 + MY_ALIGN + + +ZGEMM_L1x8_SUB2_2: +/*----------------------------------------*/ + andi. T1,L, 2 + ble ZGEMM_L1x8_SUB2_1 + KERNEL1x8_2 0, 1 + MY_ALIGN + + +ZGEMM_L1x8_SUB2_1: +/*----------------------------------------*/ + andi. T1,L, 1 + ble ZGEMM_L1x8_SAVE + LOAD_END_1x8 128, 16 + + +ZGEMM_L1x8_SAVE: +/*----------------------------------------*/ + addic. I, I, -1 + KERNEL1x8_UNPRIME_MMA + SAVE1x8 +#if defined(TRMMKERNEL) + REFRESH_AFTER_SAVE T6,K,TEMP_REG, BO,AO, 8, 1 +#endif + bgt ZGEMM_L1x8_BEGIN + andi. T2, M, 7 + ble ZGEMM_L1x1_END + andi. T1, M, 4 + ble ZGEMM_L1x4_END + b ZGEMM_L1x4_BEGIN + MY_ALIGN + + +ZGEMM_L1x8_END: +/*----------------------------------------*/ + + +ZGEMM_L1x4_BEGIN: +/*----------------------------------------*/ + andi. T2, M, 7 + ble ZGEMM_L1x1_END + andi. T1, M, 4 + ble ZGEMM_L1x4_END +#if defined(TRMMKERNEL) + REFRESH_POINTERS AO, BO,TEMP_REG, B, 4, 1 +#else + mr BO, B +#endif +#if defined(TRMMKERNEL) + REFRESH_TEMP_BK T6,K,TEMP_REG, 4, 1 + mr T1, T6 + addi T1,T1, -2 + srawi. T8, T1, 5 /**(T11-2) % 32x */ +#else + mr T1, K + addi T1,T1, -2 + srawi. T8, T1, 5 /**(K-2) % 32x */ +#endif + KERNEL1x4_ZERO_AND_PRIME_MMA + ble ZGEMM_L1x4_SUB0 + bl ZGEMM_1x4_LMAIN_SUB + andi. L, T1, 31 + ble ZGEMM_L1x4_SAVE + b ZGEMM_L1x4_SUB2 + + +ZGEMM_L1x4_SUB0: +/*----------------------------------------*/ +#if defined(TRMMKERNEL) + andi. L, T6, 63 + cmpwi T6, 33 +#else + andi. L, K, 63 + cmpwi K, 33 +#endif + li T8, 1 + bne CMP1x4_32K + LOAD_END_1x4 -64, -16 + mtctr T8 + bl ZGEMM_L1x4_K32 + b ZGEMM_L1x4_SAVE + CMP1x4_32K: +/*----------------------------------------*/ +#if defined(TRMMKERNEL) + cmpwi T6, 32 +#else + cmpwi K, 32 +#endif + bne ZGEMM_L1x4_SUB2 + MY_ALIGN + mtctr T8 + addi BO, BO, -32 + addi AO,AO, -128 + bl ZGEMM_L1x4_K32 + b ZGEMM_L1x4_SAVE + MY_ALIGN + MY_ALIGN + + +ZGEMM_L1x4_SUB2: +/*----------------------------------------*/ + andi. T1,L, 16 + ble ZGEMM_L1x4_SUB2_8 + KERNEL1x4_2 0, 0 + KERNEL1x4_2 1, 0 + KERNEL1x4_2 2, 0 + KERNEL1x4_2 3, 0 + KERNEL1x4_2 4, 0 + KERNEL1x4_2 5, 0 + KERNEL1x4_2 6, 0 + KERNEL1x4_2 7, 1 + MY_ALIGN + + +ZGEMM_L1x4_SUB2_8: +/*----------------------------------------*/ + andi. T1,L, 8 + ble ZGEMM_L1x4_SUB2_4 + KERNEL1x4_2 0, 0 + KERNEL1x4_2 1, 0 + KERNEL1x4_2 2, 0 + KERNEL1x4_2 3, 1 + MY_ALIGN + + +ZGEMM_L1x4_SUB2_4: +/*----------------------------------------*/ + andi. T1,L, 4 + ble ZGEMM_L1x4_SUB2_2 + KERNEL1x4_2 0, 0 + KERNEL1x4_2 1, 1 + MY_ALIGN + + +ZGEMM_L1x4_SUB2_2: +/*----------------------------------------*/ + andi. T1,L, 2 + ble ZGEMM_L1x4_SUB2_1 + KERNEL1x4_2 0, 1 + MY_ALIGN + + +ZGEMM_L1x4_SUB2_1: +/*----------------------------------------*/ + andi. T1,L, 1 + ble ZGEMM_L1x4_SAVE + LOAD_END_1x4 64,16 + + + +ZGEMM_L1x4_SAVE: +/*----------------------------------------*/ + KERNEL1x4_UNPRIME_MMA + SAVE1x4 +#if defined(TRMMKERNEL) + REFRESH_AFTER_SAVE T6,K,TEMP_REG, BO,AO, 4, 1 +#endif + + +ZGEMM_L1x4_END: +/*----------------------------------------*/ + + +ZGEMM_L1x2_BEGIN: +/*----------------------------------------*/ + andi. T1, M, 2 + ble ZGEMM_L1x2_END +#if defined(TRMMKERNEL) + REFRESH_POINTERS AO, BO,TEMP_REG, B, 2, 1 +#else + mr BO, B +#endif +#if defined(TRMMKERNEL) + REFRESH_TEMP_BK T6,K,TEMP_REG, 2, 1 + mr T1, T6 + addi T1,T1, -2 + srawi. T8, T1, 5 /**(T11-2) % 32x */ +#else + mr T1, K + addi T1,T1, -2 + srawi. T8, T1, 5 /**(K-2) % 32x */ +#endif + KERNEL1x2_ZERO_AND_PRIME_MMA + ble ZGEMM_L1x2_SUB0 + bl ZGEMM_1x2_LMAIN_SUB + andi. L, T1, 31 + ble ZGEMM_L1x2_SAVE + b ZGEMM_L1x2_SUB2 + + +ZGEMM_L1x2_SUB0: +/*----------------------------------------*/ +#if defined(TRMMKERNEL) + andi. L, T6, 63 + cmpwi T6, 33 +#else + andi. L, K, 63 + cmpwi K, 33 +#endif + li T8, 1 + bne CMP1x2_32K + LOAD_END_1x2 -32, -16 + mtctr T8 + bl ZGEMM_L1x2_K32 + b ZGEMM_L1x2_SAVE + CMP1x2_32K: +/*----------------------------------------*/ +#if defined(TRMMKERNEL) + cmpwi T6, 32 +#else + cmpwi K, 32 +#endif + bne ZGEMM_L1x2_SUB2 + MY_ALIGN + mtctr T8 + addi BO, BO, -32 + addi AO,AO, -64 + bl ZGEMM_L1x2_K32 + b ZGEMM_L1x2_SAVE + MY_ALIGN + MY_ALIGN + + +ZGEMM_L1x2_SUB2: +/*----------------------------------------*/ + andi. T1,L, 16 + ble ZGEMM_L1x2_SUB2_8 + KERNEL1x2_2 0, 0 + KERNEL1x2_2 1, 0 + KERNEL1x2_2 2, 0 + KERNEL1x2_2 3, 0 + KERNEL1x2_2 4, 0 + KERNEL1x2_2 5, 0 + KERNEL1x2_2 6, 0 + KERNEL1x2_2 7, 1 + MY_ALIGN + + +ZGEMM_L1x2_SUB2_8: +/*----------------------------------------*/ + andi. T1,L, 8 + ble ZGEMM_L1x2_SUB2_4 + KERNEL1x2_2 0, 0 + KERNEL1x2_2 1, 0 + KERNEL1x2_2 2, 0 + KERNEL1x2_2 3, 1 + MY_ALIGN + + +ZGEMM_L1x2_SUB2_4: +/*----------------------------------------*/ + andi. T1,L, 4 + ble ZGEMM_L1x2_SUB2_2 + KERNEL1x2_2 0, 0 + KERNEL1x2_2 1, 1 + MY_ALIGN + + +ZGEMM_L1x2_SUB2_2: +/*----------------------------------------*/ + andi. T1,L, 2 + ble ZGEMM_L1x2_SUB2_1 + KERNEL1x2_2 0, 1 + MY_ALIGN + + +ZGEMM_L1x2_SUB2_1: +/*----------------------------------------*/ + andi. T1,L, 1 + ble ZGEMM_L1x2_SAVE + LOAD_END_1x2 32,16 + + +ZGEMM_L1x2_SAVE: +/*----------------------------------------*/ + KERNEL1x2_UNPRIME_MMA + SAVE1x2 +#if defined(TRMMKERNEL) + REFRESH_AFTER_SAVE T6,K,TEMP_REG, BO,AO, 2, 1 +#endif + + +ZGEMM_L1x2_END: +/*----------------------------------------*/ + + +ZGEMM_L1x1_BEGIN: +/*----------------------------------------*/ + andi. T1, M, 1 + ble ZGEMM_L1x1_END +#if defined(TRMMKERNEL) + REFRESH_POINTERS AO, BO,TEMP_REG, B, 1, 1 +#else + mr BO, B +#endif +#if defined(TRMMKERNEL) + REFRESH_TEMP_BK T6,K,TEMP_REG, 1, 1 + mr T1, T6 + addi T1,T1, -2 + srawi. T8, T1, 5 /**(T11-2) % 32x */ +#else + mr T1, K + addi T1,T1, -2 + srawi. T8, T1, 5 /**(K-2) % 32x */ +#endif + ZERO1x1 + ble ZGEMM_L1x1_SUB0 + bl ZGEMM_1x1_LMAIN_SUB + andi. L, T1, 31 + ble ZGEMM_L1x1_SAVE + b ZGEMM_L1x1_SUB2 + + +ZGEMM_L1x1_SUB0: +/*----------------------------------------*/ +#if defined(TRMMKERNEL) + andi. L, T6, 63 + cmpwi T6, 33 +#else + andi. L, K, 63 + cmpwi K, 33 +#endif + li T8, 1 + bne CMP1x1_32K + addi BO, BO, -16 + addi AO,AO, -16 + LOAD1x1O 16, 16 + END1x1_WITHOUT_ADD + LOAD1x1_2O 32, 32 + mtctr T8 + bl ZGEMM_L1x1_K32 + b ZGEMM_L1x1_SAVE + CMP1x1_32K: +/*----------------------------------------*/ +#if defined(TRMMKERNEL) + cmpwi T6, 32 +#else + cmpwi K, 32 +#endif + bne ZGEMM_L1x1_SUB2 + MY_ALIGN + mtctr T8 + addi BO, BO, -32 + addi AO,AO, -32 + LOAD1x1_2O 32, 32 + bl ZGEMM_L1x1_K32 + b ZGEMM_L1x1_SAVE + MY_ALIGN + + +ZGEMM_L1x1_SUB2: +/*----------------------------------------*/ + andi. T1, L, 16 + ble ZGEMM_L1x1_SUB2_8 + LOAD1x1_2 + KERNEL1x1_L2 32, 32, 0, 0 + KERNEL1x1_L2 32, 32, 1, 0 + KERNEL1x1_L2 32, 32, 2, 0 + KERNEL1x1_L2 32, 32, 3, 0 + KERNEL1x1_L2 32, 32, 4, 0 + KERNEL1x1_L2 32, 32, 5, 0 + KERNEL1x1_L2 32, 32, 6, 0 + KERNEL1x1_E2 32, 32, 7, 1 + MY_ALIGN + + +ZGEMM_L1x1_SUB2_8: +/*----------------------------------------*/ + andi. T1, L, 8 + ble ZGEMM_L1x1_SUB2_4 + LOAD1x1_2 + KERNEL1x1_L2 32, 32, 0, 0 + KERNEL1x1_L2 32, 32, 1, 0 + KERNEL1x1_L2 32, 32, 2, 0 + KERNEL1x1_E2 32, 32, 3, 1 + MY_ALIGN + + +ZGEMM_L1x1_SUB2_4: +/*----------------------------------------*/ + andi. T1,L, 4 + ble ZGEMM_L1x1_SUB2_2 + LOAD1x1_2 + KERNEL1x1_L2 32, 32, 0, 0 + KERNEL1x1_E2 32, 32, 1, 1 + MY_ALIGN + + +ZGEMM_L1x1_SUB2_2: +/*----------------------------------------*/ + andi. T1,L, 2 + ble ZGEMM_L1x1_SUB2_1 + LOAD1x1_2 + KERNEL1x1_E2 32, 32, 0, 1 + MY_ALIGN + + +ZGEMM_L1x1_SUB2_1: +/*----------------------------------------*/ + andi. T1,L, 1 + ble ZGEMM_L1x1_SAVE + KERNEL1x1 + + +ZGEMM_L1x1_SAVE: +/*----------------------------------------*/ + SAVE1x1 +#if defined(TRMMKERNEL) + REFRESH_AFTER_SAVE T6,K,TEMP_REG, BO,AO, 1, 1 +#endif + + +ZGEMM_L1x1_END: +/*----------------------------------------*/ +#if defined(TRMMKERNEL) && !defined(LEFT) + addi TEMP_REG, TEMP_REG, 1 +#endif + + +ZGEMM_L1_END: +/*----------------------------------------*/ diff --git a/kernel/power/zgemm_logic_power9.S b/kernel/power/zgemm_logic_power9.S new file mode 100644 index 000000000..fe5d8ade2 --- /dev/null +++ b/kernel/power/zgemm_logic_power9.S @@ -0,0 +1,1891 @@ +/*************************************************************************** +Copyright (c) 2013-2019, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ +#define MY_ALIGN .align 3 +b ZGEMM_L2 +/* MINI SUBROUTINES */ +/* 2x8 MAIN 128x+2 LOOP */ + + +ZGEMM_L2x8_LMAIN_SUB: +/*----------------------------------------*/ + mtctr T8 + LOAD2x8_2 + MY_ALIGN +ZGEMM_L2x8_LOOP: +/*----------------------------------------*/ + dcbt AO, PRE + dcbt BO, PRE + KERNEL2x8_L2 256,64,0,0 +ZGEMM_L2x8_K128: +/*----------------------------------------*/ + KERNEL2x8_L2 256,64,1,0 + dcbt AO, T2 + KERNEL2x8_L2 256,64,2,0 + KERNEL2x8_L2 256,64,3,0 + dcbt AO, T3 + dcbt BO, T2 + KERNEL2x8_L2 256,64,4,0 + KERNEL2x8_L2 256,64,5,0 + dcbt AO, T4 + KERNEL2x8_L2 256,64,6,0 + KERNEL2x8_L2 256,64,7,0 + dcbt AO, T5 + dcbt BO, T3 + KERNEL2x8_L2 256,64,8,0 + KERNEL2x8_L2 256,64,9,0 + KERNEL2x8_L2 256,64,10,0 + KERNEL2x8_L2 256,64,11,0 + dcbt BO, T4 + KERNEL2x8_L2 256,64,12,0 + KERNEL2x8_L2 256,64,13,0 + KERNEL2x8_L2 256,64,14,0 + KERNEL2x8_L2 256,64,15,0 + KERNEL2x8_L2 256,64,16,0 + KERNEL2x8_L2 256,64,17,0 + KERNEL2x8_L2 256,64,18,0 + KERNEL2x8_L2 256,64,19,0 + KERNEL2x8_L2 256,64,20,0 + KERNEL2x8_L2 256,64,21,0 + KERNEL2x8_L2 256,64,22,0 + KERNEL2x8_L2 256,64,23,0 + KERNEL2x8_L2 256,64,24,0 + KERNEL2x8_L2 256,64,25,0 + KERNEL2x8_L2 256,64,26,0 + KERNEL2x8_L2 256,64,27,0 + KERNEL2x8_L2 256,64,28,0 + KERNEL2x8_L2 256,64,29,0 + KERNEL2x8_L2 256,64,30,0 + KERNEL2x8_L2 256,64,31,0 + KERNEL2x8_L2 256,64,32,0 + KERNEL2x8_L2 256,64,33,0 + KERNEL2x8_L2 256,64,34,0 + KERNEL2x8_L2 256,64,35,0 + KERNEL2x8_L2 256,64,36,0 + KERNEL2x8_L2 256,64,37,0 + KERNEL2x8_L2 256,64,38,0 + KERNEL2x8_L2 256,64,39,0 + KERNEL2x8_L2 256,64,40,0 + KERNEL2x8_L2 256,64,41,0 + KERNEL2x8_L2 256,64,42,0 + KERNEL2x8_L2 256,64,43,0 + KERNEL2x8_L2 256,64,44,0 + KERNEL2x8_L2 256,64,45,0 + KERNEL2x8_L2 256,64,46,0 + KERNEL2x8_L2 256,64,47,0 + KERNEL2x8_L2 256,64,48,0 + KERNEL2x8_L2 256,64,49,0 + KERNEL2x8_L2 256,64,50,0 + KERNEL2x8_L2 256,64,51,0 + KERNEL2x8_L2 256,64,52,0 + KERNEL2x8_L2 256,64,53,0 + KERNEL2x8_L2 256,64,54,0 + KERNEL2x8_L2 256,64,55,0 + KERNEL2x8_L2 256,64,56,0 + KERNEL2x8_L2 256,64,57,0 + KERNEL2x8_L2 256,64,58,0 + KERNEL2x8_L2 256,64,59,0 + KERNEL2x8_L2 256,64,60,0 + KERNEL2x8_L2 256,64,61,0 + KERNEL2x8_L2 256,64,62,0 + KERNEL2x8_L2 256,64,63,1 + bdnz ZGEMM_L2x8_LOOP + MY_ALIGN +ZGEMM_L2x8_LOOP_END: +/*----------------------------------------*/ + END2x8_2 + blr + MY_ALIGN + + +ZGEMM_2x8_L64_SUB: +/*----------------------------------------*/ + LOAD2x8_2 + dcbt AO, PRE + dcbt BO, PRE + KERNEL2x8_L2 256,64,0,0 + KERNEL2x8_L2 256,64,1,0 + dcbt AO, T2 + KERNEL2x8_L2 256,64,2,0 + KERNEL2x8_L2 256,64,3,0 + dcbt AO, T3 + dcbt BO, T2 + KERNEL2x8_L2 256,64,4,0 + KERNEL2x8_L2 256,64,5,0 + dcbt AO, T4 + KERNEL2x8_L2 256,64,6,0 + KERNEL2x8_L2 256,64,7,0 + dcbt AO, T5 + dcbt BO, T3 + KERNEL2x8_L2 256,64,8,0 + KERNEL2x8_L2 256,64,9,0 + KERNEL2x8_L2 256,64,10,0 + KERNEL2x8_L2 256,64,11,0 + dcbt BO, T4 + KERNEL2x8_L2 256,64,12,0 + KERNEL2x8_L2 256,64,13,0 + KERNEL2x8_L2 256,64,14,0 + KERNEL2x8_L2 256,64,15,0 + KERNEL2x8_L2 256,64,16,0 + KERNEL2x8_L2 256,64,17,0 + KERNEL2x8_L2 256,64,18,0 + KERNEL2x8_L2 256,64,19,0 + KERNEL2x8_L2 256,64,20,0 + KERNEL2x8_L2 256,64,21,0 + KERNEL2x8_L2 256,64,22,0 + KERNEL2x8_L2 256,64,23,0 + KERNEL2x8_L2 256,64,24,0 + KERNEL2x8_L2 256,64,25,0 + KERNEL2x8_L2 256,64,26,0 + KERNEL2x8_L2 256,64,27,0 + KERNEL2x8_L2 256,64,28,0 + KERNEL2x8_L2 256,64,29,0 + KERNEL2x8_L2 256,64,30,0 + KERNEL2x8_E2 256,64,31,1 + blr + MY_ALIGN + + +ZGEMM_2x8_L32_SUB: +/*----------------------------------------*/ + LOAD2x8_2 + dcbt AO, PRE + dcbt BO, PRE + KERNEL2x8_L2 256,64,0,0 + KERNEL2x8_L2 256,64,1,0 + dcbt AO, T2 + KERNEL2x8_L2 256,64,2,0 + KERNEL2x8_L2 256,64,3,0 + dcbt AO, T3 + dcbt BO, T2 + KERNEL2x8_L2 256,64,4,0 + KERNEL2x8_L2 256,64,5,0 + dcbt AO, T4 + KERNEL2x8_L2 256,64,6,0 + KERNEL2x8_L2 256,64,7,0 + dcbt AO, T5 + dcbt BO, T3 + KERNEL2x8_L2 256,64,8,0 + KERNEL2x8_L2 256,64,9,0 + KERNEL2x8_L2 256,64,10,0 + KERNEL2x8_L2 256,64,11,0 + dcbt BO, T4 + KERNEL2x8_L2 256,64,12,0 + KERNEL2x8_L2 256,64,13,0 + KERNEL2x8_L2 256,64,14,0 + KERNEL2x8_E2 256,64,15,1 + blr + MY_ALIGN + + +ZGEMM_2x8_L16_SUB: +/*----------------------------------------*/ + LOAD2x8_2 + dcbt AO, PRE + dcbt BO, PRE + KERNEL2x8_L2 256,64,0,0 + KERNEL2x8_L2 256,64,1,0 + dcbt AO, T2 + KERNEL2x8_L2 256,64,2,0 + KERNEL2x8_L2 256,64,3,0 + dcbt AO, T3 + dcbt BO, T2 + KERNEL2x8_L2 256,64,4,0 + KERNEL2x8_L2 256,64,5,0 + dcbt AO, T4 + KERNEL2x8_L2 256,64,6,0 + KERNEL2x8_E2 256,64,7,1 + blr + MY_ALIGN + + +ZGEMM_2x4_LMAIN_SUB: +/*----------------------------------------*/ + mtctr T8 + LOAD2x4_2 + MY_ALIGN +ZGEMM_L2x4_LOOP: +/*----------------------------------------*/ + KERNEL2x4_L2 128,64,0,0 +ZGEMM_L2x4_K32: +/*----------------------------------------*/ + KERNEL2x4_L2 128,64,1,0 + KERNEL2x4_L2 128,64,2,0 + KERNEL2x4_L2 128,64,3,0 + KERNEL2x4_L2 128,64,4,0 + KERNEL2x4_L2 128,64,5,0 + KERNEL2x4_L2 128,64,6,0 + KERNEL2x4_L2 128,64,7,0 + KERNEL2x4_L2 128,64,8,0 + KERNEL2x4_L2 128,64,9,0 + KERNEL2x4_L2 128,64,10,0 + KERNEL2x4_L2 128,64,11,0 + KERNEL2x4_L2 128,64,12,0 + KERNEL2x4_L2 128,64,13,0 + KERNEL2x4_L2 128,64,14,0 + KERNEL2x4_L2 128,64,15,1 + bdnz ZGEMM_L2x4_LOOP + MY_ALIGN +ZGEMM_L2x4_LOOP_END: +/*----------------------------------------*/ + END2x4_2 + blr + MY_ALIGN + + +ZGEMM_2x4_L16_SUB: +/*----------------------------------------*/ + LOAD2x4_2 + KERNEL2x4_L2 128,64,0,0 + KERNEL2x4_L2 128,64,1,0 + KERNEL2x4_L2 128,64,2,0 + KERNEL2x4_L2 128,64,3,0 + KERNEL2x4_L2 128,64,4,0 + KERNEL2x4_L2 128,64,5,0 + KERNEL2x4_L2 128,64,6,0 + KERNEL2x4_E2 128,64,7,1 + blr + MY_ALIGN + + +ZGEMM_2x4_L8_SUB: +/*----------------------------------------*/ + LOAD2x4_2 + KERNEL2x4_L2 128,64,0,0 + KERNEL2x4_L2 128,64,1,0 + KERNEL2x4_L2 128,64,2,0 + KERNEL2x4_E2 128,64,3,1 + blr + + +ZGEMM_2x2_LMAIN_SUB: +/*----------------------------------------*/ + mtctr T8 + LOAD2x2_2 + MY_ALIGN +ZGEMM_L2x2_LOOP: +/*----------------------------------------*/ + KERNEL2x2_L2 64,64,0,0 +ZGEMM_L2x2_K32: +/*----------------------------------------*/ + KERNEL2x2_L2 64,64,1,0 + KERNEL2x2_L2 64,64,2,0 + KERNEL2x2_L2 64,64,3,0 + KERNEL2x2_L2 64,64,4,0 + KERNEL2x2_L2 64,64,5,0 + KERNEL2x2_L2 64,64,6,0 + KERNEL2x2_L2 64,64,7,0 + KERNEL2x2_L2 64,64,8,0 + KERNEL2x2_L2 64,64,9,0 + KERNEL2x2_L2 64,64,10,0 + KERNEL2x2_L2 64,64,11,0 + KERNEL2x2_L2 64,64,12,0 + KERNEL2x2_L2 64,64,13,0 + KERNEL2x2_L2 64,64,14,0 + KERNEL2x2_L2 64,64,15,1 + bdnz ZGEMM_L2x2_LOOP + MY_ALIGN + + +ZGEMM_L2x2_LOOP_END: +/*----------------------------------------*/ + END2x2_2 + blr + MY_ALIGN +ZGEMM_2x2_L16_SUB: +/*----------------------------------------*/ + LOAD2x2_2 + KERNEL2x2_L2 64,64,0,0 + KERNEL2x2_L2 64,64,1,0 + KERNEL2x2_L2 64,64,2,0 + KERNEL2x2_L2 64,64,3,0 + KERNEL2x2_L2 64,64,4,0 + KERNEL2x2_L2 64,64,5,0 + KERNEL2x2_L2 64,64,6,0 + KERNEL2x2_E2 64,64,7,1 + blr + MY_ALIGN +ZGEMM_2x2_L8_SUB: +/*----------------------------------------*/ + LOAD2x2_2 + KERNEL2x2_L2 64,64,0,0 + KERNEL2x2_L2 64,64,1,0 + KERNEL2x2_L2 64,64,2,0 + KERNEL2x2_E2 64,64,3,1 + blr + + +ZGEMM_2x1_LMAIN_SUB: +/*----------------------------------------*/ + mtctr T8 + LOAD2x1_2 + MY_ALIGN +ZGEMM_L2x1_LOOP: +/*----------------------------------------*/ + KERNEL2x1_L2 32,64,0,0 +ZGEMM_L2x1_K32: +/*----------------------------------------*/ + KERNEL2x1_L2 32,64,1,0 + KERNEL2x1_L2 32,64,2,0 + KERNEL2x1_L2 32,64,3,0 + KERNEL2x1_L2 32,64,4,0 + KERNEL2x1_L2 32,64,5,0 + KERNEL2x1_L2 32,64,6,0 + KERNEL2x1_L2 32,64,7,0 + KERNEL2x1_L2 32,64,8,0 + KERNEL2x1_L2 32,64,9,0 + KERNEL2x1_L2 32,64,10,0 + KERNEL2x1_L2 32,64,11,0 + KERNEL2x1_L2 32,64,12,0 + KERNEL2x1_L2 32,64,13,0 + KERNEL2x1_L2 32,64,14,0 + KERNEL2x1_L2 32,64,15,1 + bdnz ZGEMM_L2x1_LOOP + MY_ALIGN +ZGEMM_L2x1_LOOP_END: +/*----------------------------------------*/ + END2x1_2 + blr + + MY_ALIGN +ZGEMM_2x1_L16_SUB: +/*----------------------------------------*/ + LOAD2x1_2 + KERNEL2x1_L2 32,64,0,0 + KERNEL2x1_L2 32,64,1,0 + KERNEL2x1_L2 32,64,2,0 + KERNEL2x1_L2 32,64,3,0 + KERNEL2x1_L2 32,64,4,0 + KERNEL2x1_L2 32,64,5,0 + KERNEL2x1_L2 32,64,6,0 + KERNEL2x1_E2 32,64,7,1 + blr + MY_ALIGN + + +ZGEMM_2x1_L8_SUB: +/*----------------------------------------*/ + LOAD2x1_2 + KERNEL2x1_L2 32,64,0,0 + KERNEL2x1_L2 32,64,1,0 + KERNEL2x1_L2 32,64,2,0 + KERNEL2x1_E2 32,64,3,1 + blr + + + +/* MAIN LOOP BEGINS */ + MY_ALIGN + + +ZGEMM_L2: +/*----------------------------------------*/ +#if defined(TRMMKERNEL) && !defined(LEFT) + neg TEMP_REG, OFFSET +#endif + srawi. J, N, 1 + ble ZGEMM_L2_END + + +ZGEMM_L2_BEGIN: +/*----------------------------------------*/ + mr CO, C + slwi T1, LDC , 1 + add T2,C,LDC + mr AO, A + add C, C, T1 +#if defined(TRMMKERNEL) && defined(LEFT) + mr TEMP_REG, OFFSET /*off = offset;*/ +#endif + srawi. I, M, 3 + ble ZGEMM_L2x8_END + dcbt CO,r0 /*just prefetch*/ + dcbt T2,r0 + + +ZGEMM_L2x8_BEGIN: +/*----------------------------------------*/ +#if defined(TRMMKERNEL) + REFRESH_POINTERS AO,BO,TEMP_REG,B,8,2 +#else + mr BO, B + dcbt B, r0 +#endif + dcbt AO, r0 +#if defined(TRMMKERNEL) + REFRESH_TEMP_BK T6,K,TEMP_REG,8,2 + mr T1, T6 +/* TEMPS FOR PREFETCH */ + li T2, 1024 + li T3, 1024+512 + addi T1,T1, -2 +/* TEMPS FOR PREFETCH */ + li T4, 2048 + li T5, 2048+512 + srawi. T8, T1, 7 /**(T11-2) % 128x */ +#else + mr T1, K +/* TEMPS FOR PREFETCH */ + li T2, 1024 + li T3, 1024+512 + addi T1,T1, -2 +/* TEMPS FOR PREFETCH */ + li T4, 2048 + li T5, 2048+512 + srawi. T8, T1, 7 /**(K-2) % 128x */ +#endif + ZERO2x8 + ble ZGEMM_L2x8_SUB0 + bl ZGEMM_L2x8_LMAIN_SUB + andi. L, T1, 127 + ble ZGEMM_L2x8_SAVE + b ZGEMM_L2x8_SUB2 + + +ZGEMM_L2x8_SUB0: +/*----------------------------------------*/ +#if defined(TRMMKERNEL) + andi. L, T6, 255 + cmpwi T6,129 +#else + andi. L, K, 255 + cmpwi K,129 +#endif + li T8,1 + bne CMP2x8_128K + addi BO,BO,-32 + addi AO,AO,-128 + LOAD2x8O 128,32 + END2x8_WITHOUT_ADD + LOAD2x8_2O 256, 64 + mtctr T8 + bl ZGEMM_L2x8_K128 + b ZGEMM_L2x8_SAVE + CMP2x8_128K: +/*----------------------------------------*/ +#if defined(TRMMKERNEL) + cmpwi T6,128 +#else + cmpwi K,128 +#endif + bne ZGEMM_L2x8_SUB2 + MY_ALIGN + mtctr T8 + addi BO,BO,-64 + addi AO,AO,-256 + LOAD2x8_2O 256,64 + bl ZGEMM_L2x8_K128 + b ZGEMM_L2x8_SAVE + MY_ALIGN + + +ZGEMM_L2x8_SUB2: +/*----------------------------------------*/ + andi. T1,L, 64 + ble ZGEMM_L2x8_SUB2_32 + bl ZGEMM_2x8_L64_SUB + MY_ALIGN + + +ZGEMM_L2x8_SUB2_32: +/*----------------------------------------*/ + andi. T1,L, 32 + ble ZGEMM_L2x8_SUB2_16 + bl ZGEMM_2x8_L32_SUB + MY_ALIGN + + +ZGEMM_L2x8_SUB2_16: +/*----------------------------------------*/ + andi. T1,L, 16 + ble ZGEMM_L2x8_SUB2_8 + bl ZGEMM_2x8_L16_SUB + MY_ALIGN + + +ZGEMM_L2x8_SUB2_8: +/*----------------------------------------*/ + andi. T1,L, 8 + ble ZGEMM_L2x8_SUB2_4 + LOAD2x8_2 + KERNEL2x8_L2 256,64, 0,0 + KERNEL2x8_L2 256,64, 1,0 + KERNEL2x8_L2 256,64, 2,0 + KERNEL2x8_E2 256,64, 3,1 + MY_ALIGN + + +ZGEMM_L2x8_SUB2_4: +/*----------------------------------------*/ + andi. T1,L, 4 + ble ZGEMM_L2x8_SUB2_2 + LOAD2x8_2 + KERNEL2x8_L2 256,64, 0,0 + KERNEL2x8_E2 256,64, 1,1 + MY_ALIGN + + +ZGEMM_L2x8_SUB2_2: +/*----------------------------------------*/ + andi. T1,L, 2 + ble ZGEMM_L2x8_SUB2_1 + LOAD2x8_2 + KERNEL2x8_E2 256,64, 0,1 + MY_ALIGN + + +ZGEMM_L2x8_SUB2_1: +/*----------------------------------------*/ + andi. T1,L, 1 + ble ZGEMM_L2x8_SAVE + KERNEL2x8 + + +ZGEMM_L2x8_SAVE: +/*----------------------------------------*/ + addic. I, I, -1 + SAVE2x8 +#if defined(TRMMKERNEL) + REFRESH_AFTER_SAVE T6,K,TEMP_REG,BO,AO,8,2 +#endif + bgt ZGEMM_L2x8_BEGIN + andi. T2, M, 7 + ble ZGEMM_L2x1_END + andi. T1, M, 4 + ble ZGEMM_L2x4_END + b ZGEMM_L2x4_BEGIN + MY_ALIGN + + +ZGEMM_L2x8_END: +/*----------------------------------------*/ + + +ZGEMM_L2x4_BEGIN: +/*----------------------------------------*/ + andi. T2, M, 7 + ble ZGEMM_L2x1_END + andi. T1, M, 4 + ble ZGEMM_L2x4_END +#if defined(TRMMKERNEL) + REFRESH_POINTERS AO,BO,TEMP_REG,B,4,2 +#else + mr BO, B +#endif +#if defined(TRMMKERNEL) + REFRESH_TEMP_BK T6,K,TEMP_REG,4,2 + mr T1, T6 + addi T1,T1, -2 + srawi. T8, T1, 5 /**(T11-2) % 32x */ +#else + mr T1, K + addi T1,T1, -2 + srawi. T8, T1, 5 /**(K-2) % 32x */ +#endif + ZERO2x4 + ble ZGEMM_L2x4_SUB0 + bl ZGEMM_2x4_LMAIN_SUB + andi. L, T1, 31 + ble ZGEMM_L2x4_SAVE + b ZGEMM_L2x4_SUB2 + + +ZGEMM_L2x4_SUB0: +/*----------------------------------------*/ +#if defined(TRMMKERNEL) + andi. L, T6, 63 + cmpwi T6,33 +#else + andi. L, K, 63 + cmpwi K,33 +#endif + li T8,1 + bne CMP2x4_32K + addi BO,BO,-32 + addi AO,AO,-64 + LOAD2x4O 64,32 + END2x4_WITHOUT_ADD + LOAD2x4_2O 128, 64 + mtctr T8 + bl ZGEMM_L2x4_K32 + b ZGEMM_L2x4_SAVE + CMP2x4_32K: +/*----------------------------------------*/ +#if defined(TRMMKERNEL) + cmpwi T6,32 +#else + cmpwi K,32 +#endif + bne ZGEMM_L2x4_SUB2 + MY_ALIGN + mtctr T8 + addi BO,BO,-64 + addi AO,AO,-128 + LOAD2x4_2O 128,64 + bl ZGEMM_L2x4_K32 + b ZGEMM_L2x4_SAVE + MY_ALIGN + MY_ALIGN + + +ZGEMM_L2x4_SUB2: +/*----------------------------------------*/ + andi. T1,L, 16 + ble ZGEMM_L2x4_SUB2_8 + bl ZGEMM_2x4_L16_SUB + MY_ALIGN + + +ZGEMM_L2x4_SUB2_8: +/*----------------------------------------*/ + andi. T1,L, 8 + ble ZGEMM_L2x4_SUB2_4 + bl ZGEMM_2x4_L8_SUB + MY_ALIGN + + +ZGEMM_L2x4_SUB2_4: +/*----------------------------------------*/ + andi. T1,L, 4 + ble ZGEMM_L2x4_SUB2_2 + LOAD2x4_2 + KERNEL2x4_L2 128,64, 0,0 + KERNEL2x4_E2 128,64, 1,1 + MY_ALIGN + + +ZGEMM_L2x4_SUB2_2: +/*----------------------------------------*/ + andi. T1,L, 2 + ble ZGEMM_L2x4_SUB2_1 + LOAD2x4_2 + KERNEL2x4_E2 128,64, 0,1 + MY_ALIGN + + +ZGEMM_L2x4_SUB2_1: +/*----------------------------------------*/ + andi. T1,L, 1 + ble ZGEMM_L2x4_SAVE + KERNEL2x4 + + +ZGEMM_L2x4_SAVE: +/*----------------------------------------*/ + SAVE2x4 +#if defined(TRMMKERNEL) + REFRESH_AFTER_SAVE T6,K,TEMP_REG,BO,AO,4,2 +#endif + + +ZGEMM_L2x4_END: +/*----------------------------------------*/ + + +ZGEMM_L2x2_BEGIN: +/*----------------------------------------*/ + andi. T1, M, 2 + ble ZGEMM_L2x2_END +#if defined(TRMMKERNEL) + REFRESH_POINTERS AO,BO,TEMP_REG,B,2,2 +#else + mr BO, B +#endif +#if defined(TRMMKERNEL) + REFRESH_TEMP_BK T6,K,TEMP_REG,2,2 + mr T1, T6 + addi T1,T1, -2 + srawi. T8, T1, 5 /**(T11-2) % 32x */ +#else + mr T1, K + addi T1,T1, -2 + srawi. T8, T1, 5 /**(K-2) % 32x */ +#endif + ZERO2x2 + ble ZGEMM_L2x2_SUB0 + bl ZGEMM_2x2_LMAIN_SUB + andi. L, T1, 31 + ble ZGEMM_L2x2_SAVE + b ZGEMM_L2x2_SUB2 + + +ZGEMM_L2x2_SUB0: +/*----------------------------------------*/ +#if defined(TRMMKERNEL) + andi. L, T6, 63 + cmpwi T6,33 +#else + andi. L, K, 63 + cmpwi K,33 +#endif + li T8,1 + bne CMP2x2_32K + addi BO,BO,-32 + addi AO,AO,-32 + LOAD2x2O 32,32 + END2x2_WITHOUT_ADD + LOAD2x2_2O 64, 64 + mtctr T8 + bl ZGEMM_L2x2_K32 + b ZGEMM_L2x2_SAVE + CMP2x2_32K: +/*----------------------------------------*/ +#if defined(TRMMKERNEL) + cmpwi T6,32 +#else + cmpwi K,32 +#endif + bne ZGEMM_L2x2_SUB2 + MY_ALIGN + mtctr T8 + addi BO,BO,-64 + addi AO,AO,-64 + LOAD2x2_2O 64,64 + bl ZGEMM_L2x2_K32 + b ZGEMM_L2x2_SAVE + MY_ALIGN + MY_ALIGN + + +ZGEMM_L2x2_SUB2: +/*----------------------------------------*/ + andi. T1,L, 16 + ble ZGEMM_L2x2_SUB2_8 + bl ZGEMM_2x2_L16_SUB + MY_ALIGN + + +ZGEMM_L2x2_SUB2_8: +/*----------------------------------------*/ + andi. T1,L, 8 + ble ZGEMM_L2x2_SUB2_4 + bl ZGEMM_2x2_L8_SUB + MY_ALIGN + + +ZGEMM_L2x2_SUB2_4: +/*----------------------------------------*/ + andi. T1,L, 4 + ble ZGEMM_L2x2_SUB2_2 + LOAD2x2_2 + KERNEL2x2_L2 64,64, 0,0 + KERNEL2x2_E2 64,64, 1,1 + MY_ALIGN + + +ZGEMM_L2x2_SUB2_2: +/*----------------------------------------*/ + andi. T1,L, 2 + ble ZGEMM_L2x2_SUB2_1 + LOAD2x2_2 + KERNEL2x2_E2 64,64, 0,1 + MY_ALIGN + + +ZGEMM_L2x2_SUB2_1: +/*----------------------------------------*/ + andi. T1,L, 1 + ble ZGEMM_L2x2_SAVE + KERNEL2x2 + + +ZGEMM_L2x2_SAVE: +/*----------------------------------------*/ + SAVE2x2 +#if defined(TRMMKERNEL) + REFRESH_AFTER_SAVE T6,K,TEMP_REG,BO,AO,2,2 +#endif + + +ZGEMM_L2x2_END: +/*----------------------------------------*/ + + +ZGEMM_L2x1_BEGIN: +/*----------------------------------------*/ + andi. T1, M, 1 + ble ZGEMM_L2x1_END +#if defined(TRMMKERNEL) + REFRESH_POINTERS AO,BO,TEMP_REG,B,1,2 +#else + mr BO, B +#endif +#if defined(TRMMKERNEL) + REFRESH_TEMP_BK T6,K,TEMP_REG,1,2 + mr T1, T6 + addi T1,T1, -2 + srawi. T8, T1, 5 /**(T11-2) % 32x */ +#else + mr T1, K + addi T1,T1, -2 + srawi. T8, T1, 5 /**(K-2) % 32x */ +#endif + ZERO2x1 + ble ZGEMM_L2x1_SUB0 + bl ZGEMM_2x1_LMAIN_SUB + andi. L, T1, 31 + ble ZGEMM_L2x1_SAVE + b ZGEMM_L2x1_SUB2 + + +ZGEMM_L2x1_SUB0: +/*----------------------------------------*/ +#if defined(TRMMKERNEL) + andi. L, T6, 63 + cmpwi T6,33 +#else + andi. L, K, 63 + cmpwi K,33 +#endif + li T8,1 + bne CMP2x1_32K + addi BO,BO,-32 + addi AO,AO,-16 + LOAD2x1O 16,32 + END2x1_WITHOUT_ADD + LOAD2x1_2O 32, 64 + mtctr T8 + bl ZGEMM_L2x1_K32 + b ZGEMM_L2x1_SAVE + CMP2x1_32K: +/*----------------------------------------*/ +#if defined(TRMMKERNEL) + cmpwi T6,32 +#else + cmpwi K,32 +#endif + bne ZGEMM_L2x1_SUB2 + MY_ALIGN + mtctr T8 + addi BO,BO,-64 + addi AO,AO,-32 + LOAD2x1_2O 32,64 + bl ZGEMM_L2x1_K32 + b ZGEMM_L2x1_SAVE + MY_ALIGN + MY_ALIGN + + +ZGEMM_L2x1_SUB2: +/*----------------------------------------*/ + andi. T1,L, 16 + ble ZGEMM_L2x1_SUB2_8 + bl ZGEMM_2x1_L16_SUB + MY_ALIGN + + +ZGEMM_L2x1_SUB2_8: +/*----------------------------------------*/ + andi. T1,L, 8 + ble ZGEMM_L2x1_SUB2_4 + bl ZGEMM_2x1_L8_SUB + MY_ALIGN + + +ZGEMM_L2x1_SUB2_4: +/*----------------------------------------*/ + andi. T1,L, 4 + ble ZGEMM_L2x1_SUB2_2 + LOAD2x1_2 + KERNEL2x1_L2 32,64, 0,0 + KERNEL2x1_E2 32,64, 1,1 + MY_ALIGN + + +ZGEMM_L2x1_SUB2_2: +/*----------------------------------------*/ + andi. T1,L, 2 + ble ZGEMM_L2x1_SUB2_1 + LOAD2x1_2 + KERNEL2x1_E2 32,64, 0,1 + MY_ALIGN + + +ZGEMM_L2x1_SUB2_1: +/*----------------------------------------*/ + andi. T1,L, 1 + ble ZGEMM_L2x1_SAVE + KERNEL2x1 + + +ZGEMM_L2x1_SAVE: +/*----------------------------------------*/ + SAVE2x1 +#if defined(TRMMKERNEL) + REFRESH_AFTER_SAVE T6,K,TEMP_REG,BO,AO,1,2 +#endif + + +ZGEMM_L2x1_END: +/*----------------------------------------*/ + slwi T1, K, 5 + addic. J, J, -1 + add B, B, T1 +#if defined(TRMMKERNEL) && !defined(LEFT) + addi TEMP_REG, TEMP_REG, 2 +#endif + bgt ZGEMM_L2_BEGIN + + +ZGEMM_L2_END: + +b ZGEMM_L1 +/* MINI SUBROUTINES */ +/* 1x8 MAIN 128x+2 LOOP */ + + +ZGEMM_L1x8_LMAIN_SUB: +/*----------------------------------------*/ + mtctr T8 + LOAD1x8_2 + MY_ALIGN +ZGEMM_L1x8_LOOP: +/*----------------------------------------*/ + dcbt AO, PRE + dcbt BO, PRE + KERNEL1x8_L2 256,32,0,0 +ZGEMM_L1x8_K128: +/*----------------------------------------*/ + KERNEL1x8_L2 256,32,1,0 + dcbt AO, T2 + KERNEL1x8_L2 256,32,2,0 + KERNEL1x8_L2 256,32,3,0 + dcbt AO, T3 + dcbt BO, T2 + KERNEL1x8_L2 256,32,4,0 + KERNEL1x8_L2 256,32,5,0 + dcbt AO, T4 + KERNEL1x8_L2 256,32,6,0 + KERNEL1x8_L2 256,32,7,0 + dcbt AO, T5 + dcbt BO, T3 + KERNEL1x8_L2 256,32,8,0 + KERNEL1x8_L2 256,32,9,0 + KERNEL1x8_L2 256,32,10,0 + KERNEL1x8_L2 256,32,11,0 + dcbt BO, T4 + KERNEL1x8_L2 256,32,12,0 + KERNEL1x8_L2 256,32,13,0 + KERNEL1x8_L2 256,32,14,0 + KERNEL1x8_L2 256,32,15,0 + KERNEL1x8_L2 256,32,16,0 + KERNEL1x8_L2 256,32,17,0 + KERNEL1x8_L2 256,32,18,0 + KERNEL1x8_L2 256,32,19,0 + KERNEL1x8_L2 256,32,20,0 + KERNEL1x8_L2 256,32,21,0 + KERNEL1x8_L2 256,32,22,0 + KERNEL1x8_L2 256,32,23,0 + KERNEL1x8_L2 256,32,24,0 + KERNEL1x8_L2 256,32,25,0 + KERNEL1x8_L2 256,32,26,0 + KERNEL1x8_L2 256,32,27,0 + KERNEL1x8_L2 256,32,28,0 + KERNEL1x8_L2 256,32,29,0 + KERNEL1x8_L2 256,32,30,0 + KERNEL1x8_L2 256,32,31,0 + KERNEL1x8_L2 256,32,32,0 + KERNEL1x8_L2 256,32,33,0 + KERNEL1x8_L2 256,32,34,0 + KERNEL1x8_L2 256,32,35,0 + KERNEL1x8_L2 256,32,36,0 + KERNEL1x8_L2 256,32,37,0 + KERNEL1x8_L2 256,32,38,0 + KERNEL1x8_L2 256,32,39,0 + KERNEL1x8_L2 256,32,40,0 + KERNEL1x8_L2 256,32,41,0 + KERNEL1x8_L2 256,32,42,0 + KERNEL1x8_L2 256,32,43,0 + KERNEL1x8_L2 256,32,44,0 + KERNEL1x8_L2 256,32,45,0 + KERNEL1x8_L2 256,32,46,0 + KERNEL1x8_L2 256,32,47,0 + KERNEL1x8_L2 256,32,48,0 + KERNEL1x8_L2 256,32,49,0 + KERNEL1x8_L2 256,32,50,0 + KERNEL1x8_L2 256,32,51,0 + KERNEL1x8_L2 256,32,52,0 + KERNEL1x8_L2 256,32,53,0 + KERNEL1x8_L2 256,32,54,0 + KERNEL1x8_L2 256,32,55,0 + KERNEL1x8_L2 256,32,56,0 + KERNEL1x8_L2 256,32,57,0 + KERNEL1x8_L2 256,32,58,0 + KERNEL1x8_L2 256,32,59,0 + KERNEL1x8_L2 256,32,60,0 + KERNEL1x8_L2 256,32,61,0 + KERNEL1x8_L2 256,32,62,0 + KERNEL1x8_L2 256,32,63,1 + bdnz ZGEMM_L1x8_LOOP + MY_ALIGN +ZGEMM_L1x8_LOOP_END: +/*----------------------------------------*/ + END1x8_2 + blr + MY_ALIGN + + +ZGEMM_1x8_L64_SUB: +/*----------------------------------------*/ + LOAD1x8_2 + dcbt AO, PRE + dcbt BO, PRE + KERNEL1x8_L2 256,32,0,0 + KERNEL1x8_L2 256,32,1,0 + dcbt AO, T2 + KERNEL1x8_L2 256,32,2,0 + KERNEL1x8_L2 256,32,3,0 + dcbt AO, T3 + dcbt BO, T2 + KERNEL1x8_L2 256,32,4,0 + KERNEL1x8_L2 256,32,5,0 + dcbt AO, T4 + KERNEL1x8_L2 256,32,6,0 + KERNEL1x8_L2 256,32,7,0 + dcbt AO, T5 + dcbt BO, T3 + KERNEL1x8_L2 256,32,8,0 + KERNEL1x8_L2 256,32,9,0 + KERNEL1x8_L2 256,32,10,0 + KERNEL1x8_L2 256,32,11,0 + dcbt BO, T4 + KERNEL1x8_L2 256,32,12,0 + KERNEL1x8_L2 256,32,13,0 + KERNEL1x8_L2 256,32,14,0 + KERNEL1x8_L2 256,32,15,0 + KERNEL1x8_L2 256,32,16,0 + KERNEL1x8_L2 256,32,17,0 + KERNEL1x8_L2 256,32,18,0 + KERNEL1x8_L2 256,32,19,0 + KERNEL1x8_L2 256,32,20,0 + KERNEL1x8_L2 256,32,21,0 + KERNEL1x8_L2 256,32,22,0 + KERNEL1x8_L2 256,32,23,0 + KERNEL1x8_L2 256,32,24,0 + KERNEL1x8_L2 256,32,25,0 + KERNEL1x8_L2 256,32,26,0 + KERNEL1x8_L2 256,32,27,0 + KERNEL1x8_L2 256,32,28,0 + KERNEL1x8_L2 256,32,29,0 + KERNEL1x8_L2 256,32,30,0 + KERNEL1x8_E2 256,32,31,1 + blr + MY_ALIGN + + +ZGEMM_1x8_L32_SUB: +/*----------------------------------------*/ + LOAD1x8_2 + dcbt AO, PRE + dcbt BO, PRE + KERNEL1x8_L2 256,32,0,0 + KERNEL1x8_L2 256,32,1,0 + dcbt AO, T2 + KERNEL1x8_L2 256,32,2,0 + KERNEL1x8_L2 256,32,3,0 + dcbt AO, T3 + dcbt BO, T2 + KERNEL1x8_L2 256,32,4,0 + KERNEL1x8_L2 256,32,5,0 + dcbt AO, T4 + KERNEL1x8_L2 256,32,6,0 + KERNEL1x8_L2 256,32,7,0 + dcbt AO, T5 + dcbt BO, T3 + KERNEL1x8_L2 256,32,8,0 + KERNEL1x8_L2 256,32,9,0 + KERNEL1x8_L2 256,32,10,0 + KERNEL1x8_L2 256,32,11,0 + dcbt BO, T4 + KERNEL1x8_L2 256,32,12,0 + KERNEL1x8_L2 256,32,13,0 + KERNEL1x8_L2 256,32,14,0 + KERNEL1x8_E2 256,32,15,1 + blr + MY_ALIGN + + +ZGEMM_1x8_L16_SUB: +/*----------------------------------------*/ + LOAD1x8_2 + dcbt AO, PRE + dcbt BO, PRE + KERNEL1x8_L2 256,32,0,0 + KERNEL1x8_L2 256,32,1,0 + dcbt AO, T2 + KERNEL1x8_L2 256,32,2,0 + KERNEL1x8_L2 256,32,3,0 + dcbt AO, T3 + dcbt BO, T2 + KERNEL1x8_L2 256,32,4,0 + KERNEL1x8_L2 256,32,5,0 + dcbt AO, T4 + KERNEL1x8_L2 256,32,6,0 + KERNEL1x8_E2 256,32,7,1 + blr + MY_ALIGN + + +ZGEMM_1x4_LMAIN_SUB: +/*----------------------------------------*/ + mtctr T8 + LOAD1x4_2 + MY_ALIGN + + +ZGEMM_L1x4_LOOP: +/*----------------------------------------*/ + KERNEL1x4_L2 128,32,0,0 + + +ZGEMM_L1x4_K32: +/*----------------------------------------*/ + KERNEL1x4_L2 128,32,1,0 + KERNEL1x4_L2 128,32,2,0 + KERNEL1x4_L2 128,32,3,0 + KERNEL1x4_L2 128,32,4,0 + KERNEL1x4_L2 128,32,5,0 + KERNEL1x4_L2 128,32,6,0 + KERNEL1x4_L2 128,32,7,0 + KERNEL1x4_L2 128,32,8,0 + KERNEL1x4_L2 128,32,9,0 + KERNEL1x4_L2 128,32,10,0 + KERNEL1x4_L2 128,32,11,0 + KERNEL1x4_L2 128,32,12,0 + KERNEL1x4_L2 128,32,13,0 + KERNEL1x4_L2 128,32,14,0 + KERNEL1x4_L2 128,32,15,1 + bdnz ZGEMM_L1x4_LOOP + MY_ALIGN + + +ZGEMM_L1x4_LOOP_END: +/*----------------------------------------*/ + END1x4_2 + blr + MY_ALIGN + + +ZGEMM_1x4_L16_SUB: +/*----------------------------------------*/ + LOAD1x4_2 + KERNEL1x4_L2 128,32,0,0 + KERNEL1x4_L2 128,32,1,0 + KERNEL1x4_L2 128,32,2,0 + KERNEL1x4_L2 128,32,3,0 + KERNEL1x4_L2 128,32,4,0 + KERNEL1x4_L2 128,32,5,0 + KERNEL1x4_L2 128,32,6,0 + KERNEL1x4_E2 128,32,7,1 + blr + MY_ALIGN + + +ZGEMM_1x4_L8_SUB: +/*----------------------------------------*/ + LOAD1x4_2 + KERNEL1x4_L2 128,32,0,0 + KERNEL1x4_L2 128,32,1,0 + KERNEL1x4_L2 128,32,2,0 + KERNEL1x4_E2 128,32,3,1 + blr + + +ZGEMM_1x2_LMAIN_SUB: +/*----------------------------------------*/ + mtctr T8 + LOAD1x2_2 + MY_ALIGN + + +ZGEMM_L1x2_LOOP: +/*----------------------------------------*/ + KERNEL1x2_L2 64,32,0,0 + + +ZGEMM_L1x2_K32: +/*----------------------------------------*/ + KERNEL1x2_L2 64,32,1,0 + KERNEL1x2_L2 64,32,2,0 + KERNEL1x2_L2 64,32,3,0 + KERNEL1x2_L2 64,32,4,0 + KERNEL1x2_L2 64,32,5,0 + KERNEL1x2_L2 64,32,6,0 + KERNEL1x2_L2 64,32,7,0 + KERNEL1x2_L2 64,32,8,0 + KERNEL1x2_L2 64,32,9,0 + KERNEL1x2_L2 64,32,10,0 + KERNEL1x2_L2 64,32,11,0 + KERNEL1x2_L2 64,32,12,0 + KERNEL1x2_L2 64,32,13,0 + KERNEL1x2_L2 64,32,14,0 + KERNEL1x2_L2 64,32,15,1 + bdnz ZGEMM_L1x2_LOOP + MY_ALIGN + + +ZGEMM_L1x2_LOOP_END: +/*----------------------------------------*/ + END1x2_2 + blr + MY_ALIGN + + +ZGEMM_1x2_L16_SUB: +/*----------------------------------------*/ + LOAD1x2_2 + KERNEL1x2_L2 64,32,0,0 + KERNEL1x2_L2 64,32,1,0 + KERNEL1x2_L2 64,32,2,0 + KERNEL1x2_L2 64,32,3,0 + KERNEL1x2_L2 64,32,4,0 + KERNEL1x2_L2 64,32,5,0 + KERNEL1x2_L2 64,32,6,0 + KERNEL1x2_E2 64,32,7,1 + blr + MY_ALIGN + + +ZGEMM_1x2_L8_SUB: +/*----------------------------------------*/ + LOAD1x2_2 + KERNEL1x2_L2 64,32,0,0 + KERNEL1x2_L2 64,32,1,0 + KERNEL1x2_L2 64,32,2,0 + KERNEL1x2_E2 64,32,3,1 + blr + + +ZGEMM_1x1_LMAIN_SUB: +/*----------------------------------------*/ + mtctr T8 + LOAD1x1_2 + MY_ALIGN + + +ZGEMM_L1x1_LOOP: +/*----------------------------------------*/ + KERNEL1x1_L2 32,32,0,0 + + +ZGEMM_L1x1_K32: +/*----------------------------------------*/ + KERNEL1x1_L2 32,32,1,0 + KERNEL1x1_L2 32,32,2,0 + KERNEL1x1_L2 32,32,3,0 + KERNEL1x1_L2 32,32,4,0 + KERNEL1x1_L2 32,32,5,0 + KERNEL1x1_L2 32,32,6,0 + KERNEL1x1_L2 32,32,7,0 + KERNEL1x1_L2 32,32,8,0 + KERNEL1x1_L2 32,32,9,0 + KERNEL1x1_L2 32,32,10,0 + KERNEL1x1_L2 32,32,11,0 + KERNEL1x1_L2 32,32,12,0 + KERNEL1x1_L2 32,32,13,0 + KERNEL1x1_L2 32,32,14,0 + KERNEL1x1_L2 32,32,15,1 + bdnz ZGEMM_L1x1_LOOP + MY_ALIGN + + +ZGEMM_L1x1_LOOP_END: +/*----------------------------------------*/ + END1x1_2 + blr + MY_ALIGN + + +ZGEMM_1x1_L16_SUB: +/*----------------------------------------*/ + LOAD1x1_2 + KERNEL1x1_L2 32,32,0,0 + KERNEL1x1_L2 32,32,1,0 + KERNEL1x1_L2 32,32,2,0 + KERNEL1x1_L2 32,32,3,0 + KERNEL1x1_L2 32,32,4,0 + KERNEL1x1_L2 32,32,5,0 + KERNEL1x1_L2 32,32,6,0 + KERNEL1x1_E2 32,32,7,1 + blr + MY_ALIGN + + +ZGEMM_1x1_L8_SUB: +/*----------------------------------------*/ + LOAD1x1_2 + KERNEL1x1_L2 32,32,0,0 + KERNEL1x1_L2 32,32,1,0 + KERNEL1x1_L2 32,32,2,0 + KERNEL1x1_E2 32,32,3,1 + blr + + +/*----------------------N1 BEGINS---------*/ +ZGEMM_L1: +/*----------------------------------------*/ + andi. T1, N, 1 + ble ZGEMM_L1_END + +ZGEMM_L1_BEGIN: +/*----------------------------------------*/ + mr CO, C + + add T2,C,LDC + mr AO, A + add C, C, T1 +#if defined(TRMMKERNEL) && defined(LEFT) + mr TEMP_REG, OFFSET /*off = offset;*/ +#endif + srawi. I, M, 3 + ble ZGEMM_L1x8_END + dcbt CO,r0 /*just prefetch*/ + dcbt T2,r0 + + +ZGEMM_L1x8_BEGIN: +/*----------------------------------------*/ +#if defined(TRMMKERNEL) + REFRESH_POINTERS AO,BO,TEMP_REG,B,8,1 +#else + mr BO, B + dcbt B, r0 +#endif + dcbt AO, r0 +#if defined(TRMMKERNEL) + REFRESH_TEMP_BK T6,K,TEMP_REG,8,1 + mr T1, T6 +/* TEMPS FOR PREFETCH */ + li T2, 1024 + li T3, 1024+512 + addi T1,T1, -2 +/* TEMPS FOR PREFETCH */ + li T4, 2048 + li T5, 2048+512 + srawi. T8, T1, 7 /**(T11-2) % 128x */ +#else + mr T1, K +/* TEMPS FOR PREFETCH */ + li T2, 1024 + li T3, 1024+512 + addi T1,T1, -2 +/* TEMPS FOR PREFETCH */ + li T4, 2048 + li T5, 2048+512 + srawi. T8, T1, 7 /**(K-2) % 128x */ +#endif + ZERO1x8 + ble ZGEMM_L1x8_SUB0 + bl ZGEMM_L1x8_LMAIN_SUB + andi. L, T1, 127 + ble ZGEMM_L1x8_SAVE + b ZGEMM_L1x8_SUB2 + + +ZGEMM_L1x8_SUB0: +/*----------------------------------------*/ +#if defined(TRMMKERNEL) + andi. L, T6, 255 + cmpwi T6,129 +#else + andi. L, K, 255 + cmpwi K,129 +#endif + li T8,1 + bne CMP1x8_128K + addi BO,BO,-16 + addi AO,AO,-128 + LOAD1x8O 128,16 + END1x8_WITHOUT_ADD + LOAD1x8_2O 256, 32 + mtctr T8 + bl ZGEMM_L1x8_K128 + b ZGEMM_L1x8_SAVE + CMP1x8_128K: +/*----------------------------------------*/ +#if defined(TRMMKERNEL) + cmpwi T6,128 +#else + cmpwi K,128 +#endif + bne ZGEMM_L1x8_SUB2 + MY_ALIGN + mtctr T8 + addi BO,BO,-32 + addi AO,AO,-256 + LOAD1x8_2O 256,32 + bl ZGEMM_L1x8_K128 + b ZGEMM_L1x8_SAVE + MY_ALIGN + + +ZGEMM_L1x8_SUB2: +/*----------------------------------------*/ + andi. T1,L, 64 + ble ZGEMM_L1x8_SUB2_32 + bl ZGEMM_1x8_L64_SUB + MY_ALIGN + + +ZGEMM_L1x8_SUB2_32: +/*----------------------------------------*/ + andi. T1,L, 32 + ble ZGEMM_L1x8_SUB2_16 + bl ZGEMM_1x8_L32_SUB + MY_ALIGN + + +ZGEMM_L1x8_SUB2_16: +/*----------------------------------------*/ + andi. T1,L, 16 + ble ZGEMM_L1x8_SUB2_8 + bl ZGEMM_1x8_L16_SUB + MY_ALIGN + + +ZGEMM_L1x8_SUB2_8: +/*----------------------------------------*/ + andi. T1,L, 8 + ble ZGEMM_L1x8_SUB2_4 + LOAD1x8_2 + KERNEL1x8_L2 256,32, 0,0 + KERNEL1x8_L2 256,32, 1,0 + KERNEL1x8_L2 256,32, 2,0 + KERNEL1x8_E2 256,32, 3,1 + MY_ALIGN + + +ZGEMM_L1x8_SUB2_4: +/*----------------------------------------*/ + andi. T1,L, 4 + ble ZGEMM_L1x8_SUB2_2 + LOAD1x8_2 + KERNEL1x8_L2 256,32, 0,0 + KERNEL1x8_E2 256,32, 1,1 + MY_ALIGN + + +ZGEMM_L1x8_SUB2_2: +/*----------------------------------------*/ + andi. T1,L, 2 + ble ZGEMM_L1x8_SUB2_1 + LOAD1x8_2 + KERNEL1x8_E2 256,32, 0,1 + MY_ALIGN + + +ZGEMM_L1x8_SUB2_1: +/*----------------------------------------*/ + andi. T1,L, 1 + ble ZGEMM_L1x8_SAVE + KERNEL1x8 + + +ZGEMM_L1x8_SAVE: +/*----------------------------------------*/ + addic. I, I, -1 + SAVE1x8 +#if defined(TRMMKERNEL) + REFRESH_AFTER_SAVE T6,K,TEMP_REG,BO,AO,8,1 +#endif + bgt ZGEMM_L1x8_BEGIN + andi. T2, M, 7 + ble ZGEMM_L1x1_END + andi. T1, M, 4 + ble ZGEMM_L1x4_END + b ZGEMM_L1x4_BEGIN + MY_ALIGN + + +ZGEMM_L1x8_END: +/*----------------------------------------*/ + + +ZGEMM_L1x4_BEGIN: +/*----------------------------------------*/ + andi. T2, M, 7 + ble ZGEMM_L1x1_END + andi. T1, M, 4 + ble ZGEMM_L1x4_END +#if defined(TRMMKERNEL) + REFRESH_POINTERS AO,BO,TEMP_REG,B,4,1 +#else + mr BO, B +#endif +#if defined(TRMMKERNEL) + REFRESH_TEMP_BK T6,K,TEMP_REG,4,1 + mr T1, T6 + addi T1,T1, -2 + srawi. T8, T1, 5 /**(T11-2) % 32x */ +#else + mr T1, K + addi T1,T1, -2 + srawi. T8, T1, 5 /**(K-2) % 32x */ +#endif + ZERO1x4 + ble ZGEMM_L1x4_SUB0 + bl ZGEMM_1x4_LMAIN_SUB + andi. L, T1, 31 + ble ZGEMM_L1x4_SAVE + b ZGEMM_L1x4_SUB2 + + +ZGEMM_L1x4_SUB0: +/*----------------------------------------*/ +#if defined(TRMMKERNEL) + andi. L, T6, 63 + cmpwi T6,33 +#else + andi. L, K, 63 + cmpwi K,33 +#endif + li T8,1 + bne CMP1x4_32K + addi BO,BO,-16 + addi AO,AO,-64 + LOAD1x4O 64,16 + END1x4_WITHOUT_ADD + LOAD1x4_2O 128, 32 + mtctr T8 + bl ZGEMM_L1x4_K32 + b ZGEMM_L1x4_SAVE + CMP1x4_32K: +/*----------------------------------------*/ +#if defined(TRMMKERNEL) + cmpwi T6,32 +#else + cmpwi K,32 +#endif + bne ZGEMM_L1x4_SUB2 + MY_ALIGN + mtctr T8 + addi BO,BO,-32 + addi AO,AO,-128 + LOAD1x4_2O 128,32 + bl ZGEMM_L1x4_K32 + b ZGEMM_L1x4_SAVE + MY_ALIGN + MY_ALIGN + + +ZGEMM_L1x4_SUB2: +/*----------------------------------------*/ + andi. T1,L, 16 + ble ZGEMM_L1x4_SUB2_8 + bl ZGEMM_1x4_L16_SUB + MY_ALIGN + + +ZGEMM_L1x4_SUB2_8: +/*----------------------------------------*/ + andi. T1,L, 8 + ble ZGEMM_L1x4_SUB2_4 + bl ZGEMM_1x4_L8_SUB + MY_ALIGN + + +ZGEMM_L1x4_SUB2_4: +/*----------------------------------------*/ + andi. T1,L, 4 + ble ZGEMM_L1x4_SUB2_2 + LOAD1x4_2 + KERNEL1x4_L2 128,32, 0,0 + KERNEL1x4_E2 128,32, 1,1 + MY_ALIGN + + +ZGEMM_L1x4_SUB2_2: +/*----------------------------------------*/ + andi. T1,L, 2 + ble ZGEMM_L1x4_SUB2_1 + LOAD1x4_2 + KERNEL1x4_E2 128,32, 0,1 + MY_ALIGN + + +ZGEMM_L1x4_SUB2_1: +/*----------------------------------------*/ + andi. T1,L, 1 + ble ZGEMM_L1x4_SAVE + KERNEL1x4 + + +ZGEMM_L1x4_SAVE: +/*----------------------------------------*/ + SAVE1x4 +#if defined(TRMMKERNEL) + REFRESH_AFTER_SAVE T6,K,TEMP_REG,BO,AO,4,1 +#endif + + +ZGEMM_L1x4_END: +/*----------------------------------------*/ + + +ZGEMM_L1x2_BEGIN: +/*----------------------------------------*/ + andi. T1, M, 2 + ble ZGEMM_L1x2_END +#if defined(TRMMKERNEL) + REFRESH_POINTERS AO,BO,TEMP_REG,B,2,1 +#else + mr BO, B +#endif +#if defined(TRMMKERNEL) + REFRESH_TEMP_BK T6,K,TEMP_REG,2,1 + mr T1, T6 + addi T1,T1, -2 + srawi. T8, T1, 5 /**(T11-2) % 32x */ +#else + mr T1, K + addi T1,T1, -2 + srawi. T8, T1, 5 /**(K-2) % 32x */ +#endif + ZERO1x2 + ble ZGEMM_L1x2_SUB0 + bl ZGEMM_1x2_LMAIN_SUB + andi. L, T1, 31 + ble ZGEMM_L1x2_SAVE + b ZGEMM_L1x2_SUB2 + + +ZGEMM_L1x2_SUB0: +/*----------------------------------------*/ +#if defined(TRMMKERNEL) + andi. L, T6, 63 + cmpwi T6,33 +#else + andi. L, K, 63 + cmpwi K,33 +#endif + li T8,1 + bne CMP1x2_32K + addi BO,BO,-16 + addi AO,AO,-32 + LOAD1x2O 32,16 + END1x2_WITHOUT_ADD + LOAD1x2_2O 64, 32 + mtctr T8 + bl ZGEMM_L1x2_K32 + b ZGEMM_L1x2_SAVE + CMP1x2_32K: +/*----------------------------------------*/ +#if defined(TRMMKERNEL) + cmpwi T6,32 +#else + cmpwi K,32 +#endif + bne ZGEMM_L1x2_SUB2 + MY_ALIGN + mtctr T8 + addi BO,BO,-32 + addi AO,AO,-64 + LOAD1x2_2O 64,32 + bl ZGEMM_L1x2_K32 + b ZGEMM_L1x2_SAVE + MY_ALIGN + MY_ALIGN + + +ZGEMM_L1x2_SUB2: +/*----------------------------------------*/ + andi. T1,L, 16 + ble ZGEMM_L1x2_SUB2_8 + bl ZGEMM_1x2_L16_SUB + MY_ALIGN + + +ZGEMM_L1x2_SUB2_8: +/*----------------------------------------*/ + andi. T1,L, 8 + ble ZGEMM_L1x2_SUB2_4 + bl ZGEMM_1x2_L8_SUB + MY_ALIGN + + +ZGEMM_L1x2_SUB2_4: +/*----------------------------------------*/ + andi. T1,L, 4 + ble ZGEMM_L1x2_SUB2_2 + LOAD1x2_2 + KERNEL1x2_L2 64,32, 0,0 + KERNEL1x2_E2 64,32, 1,1 + MY_ALIGN + + +ZGEMM_L1x2_SUB2_2: +/*----------------------------------------*/ + andi. T1,L, 2 + ble ZGEMM_L1x2_SUB2_1 + LOAD1x2_2 + KERNEL1x2_E2 64,32, 0,1 + MY_ALIGN + + +ZGEMM_L1x2_SUB2_1: +/*----------------------------------------*/ + andi. T1,L, 1 + ble ZGEMM_L1x2_SAVE + KERNEL1x2 + + +ZGEMM_L1x2_SAVE: +/*----------------------------------------*/ + SAVE1x2 +#if defined(TRMMKERNEL) + REFRESH_AFTER_SAVE T6,K,TEMP_REG,BO,AO,2,1 +#endif + + +ZGEMM_L1x2_END: +/*----------------------------------------*/ + + +ZGEMM_L1x1_BEGIN: +/*----------------------------------------*/ + andi. T1, M, 1 + ble ZGEMM_L1x1_END +#if defined(TRMMKERNEL) + REFRESH_POINTERS AO,BO,TEMP_REG,B,1,1 +#else + mr BO, B +#endif +#if defined(TRMMKERNEL) + REFRESH_TEMP_BK T6,K,TEMP_REG,1,1 + mr T1, T6 + addi T1,T1, -2 + srawi. T8, T1, 5 /**(T11-2) % 32x */ +#else + mr T1, K + addi T1,T1, -2 + srawi. T8, T1, 5 /**(K-2) % 32x */ +#endif + ZERO1x1 + ble ZGEMM_L1x1_SUB0 + bl ZGEMM_1x1_LMAIN_SUB + andi. L, T1, 31 + ble ZGEMM_L1x1_SAVE + b ZGEMM_L1x1_SUB2 + + +ZGEMM_L1x1_SUB0: +/*----------------------------------------*/ +#if defined(TRMMKERNEL) + andi. L, T6, 63 + cmpwi T6,33 +#else + andi. L, K, 63 + cmpwi K,33 +#endif + li T8,1 + bne CMP1x1_32K + addi BO,BO,-16 + addi AO,AO,-16 + LOAD1x1O 16,16 + END1x1_WITHOUT_ADD + LOAD1x1_2O 32, 32 + mtctr T8 + bl ZGEMM_L1x1_K32 + b ZGEMM_L1x1_SAVE + CMP1x1_32K: +/*----------------------------------------*/ +#if defined(TRMMKERNEL) + cmpwi T6,32 +#else + cmpwi K,32 +#endif + bne ZGEMM_L1x1_SUB2 + MY_ALIGN + mtctr T8 + addi BO,BO,-32 + addi AO,AO,-32 + LOAD1x1_2O 32,32 + bl ZGEMM_L1x1_K32 + b ZGEMM_L1x1_SAVE + MY_ALIGN + MY_ALIGN + + +ZGEMM_L1x1_SUB2: +/*----------------------------------------*/ + andi. T1,L, 16 + ble ZGEMM_L1x1_SUB2_8 + bl ZGEMM_1x1_L16_SUB + MY_ALIGN + + +ZGEMM_L1x1_SUB2_8: +/*----------------------------------------*/ + andi. T1,L, 8 + ble ZGEMM_L1x1_SUB2_4 + bl ZGEMM_1x1_L8_SUB + MY_ALIGN + + +ZGEMM_L1x1_SUB2_4: +/*----------------------------------------*/ + andi. T1,L, 4 + ble ZGEMM_L1x1_SUB2_2 + LOAD1x1_2 + KERNEL1x1_L2 32,32, 0,0 + KERNEL1x1_E2 32,32, 1,1 + MY_ALIGN + + +ZGEMM_L1x1_SUB2_2: +/*----------------------------------------*/ + andi. T1,L, 2 + ble ZGEMM_L1x1_SUB2_1 + LOAD1x1_2 + KERNEL1x1_E2 32,32, 0,1 + MY_ALIGN + + +ZGEMM_L1x1_SUB2_1: +/*----------------------------------------*/ + andi. T1,L, 1 + ble ZGEMM_L1x1_SAVE + KERNEL1x1 + + +ZGEMM_L1x1_SAVE: +/*----------------------------------------*/ + SAVE1x1 +#if defined(TRMMKERNEL) + REFRESH_AFTER_SAVE T6,K,TEMP_REG,BO,AO,1,1 +#endif + + +ZGEMM_L1x1_END: +/*----------------------------------------*/ +#if defined(TRMMKERNEL) && !defined(LEFT) + addi TEMP_REG, TEMP_REG, 1 +#endif + + +ZGEMM_L1_END: +/*----------------------------------------*/ + \ No newline at end of file diff --git a/kernel/power/zgemm_macros_8x2_power8.S b/kernel/power/zgemm_macros_8x2_power8.S index c43a115b2..24a36470c 100644 --- a/kernel/power/zgemm_macros_8x2_power8.S +++ b/kernel/power/zgemm_macros_8x2_power8.S @@ -67,7 +67,11 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * Macros for N=2 and M=8 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD2x8_1', ` +#else .macro LOAD2x8_1 +#endif lxvd2x vs16, o0, BO // load real part from B lxvd2x vs17, o16, BO // load imag part from B @@ -91,9 +95,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi AO, AO, 64 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x8_I1', ` +#else .macro KERNEL2x8_I1 +#endif lxvd2x vs8, o0, AO // load real,imag from A lxvd2x vs9, o16, AO // load real,imag from A @@ -151,9 +163,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs63, vs7, vs19 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x8_1', ` +#else .macro KERNEL2x8_1 +#endif lxvd2x vs8, o0, AO // load real,imag from A lxvd2x vs9, o16, AO // load real,imag from A @@ -211,9 +231,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs63, vs7, vs19 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x8_2', ` +#else .macro KERNEL2x8_2 +#endif lxvd2x vs0, o0, AO // load real,imag from A lxvd2x vs1, o16, AO // load real,imag from A @@ -271,9 +299,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs63, vs15, vs23 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x8_E2', ` +#else .macro KERNEL2x8_E2 +#endif xvmaddadp vs32, vs8, vs20 // real*real, imag*real @@ -311,9 +347,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs63, vs15, vs23 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x8_SUBI1', ` +#else .macro KERNEL2x8_SUBI1 +#endif lxvd2x vs0, o0, AO // load real,imag from A lxvd2x vs1, o16, AO // load real,imag from A @@ -371,9 +415,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs63, vs7, vs19 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x8_SUB1', ` +#else .macro KERNEL2x8_SUB1 +#endif lxvd2x vs0, o0, AO // load real,imag from A lxvd2x vs1, o16, AO // load real,imag from A @@ -431,9 +483,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs63, vs7, vs19 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE2x8', ` +#else .macro SAVE2x8 +#endif mr T1, CO @@ -455,13 +515,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs33, vs33 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs33,vs33) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs32 // realA*realB XSFADD_R2 vs0, vs0, vs33 // imagA*imagB - xxswapd vs32, vs32 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs33, vs33 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs32,vs32) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs33,vs33) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs32 // realA*imagB XSFADD_I2 vs1, vs1, vs33 // imagA*realB @@ -479,13 +539,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs35, vs35 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs35,vs35) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs34 // realA*realB XSFADD_R2 vs0, vs0, vs35 // imagA*imagB - xxswapd vs34, vs34 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs35, vs35 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs34,vs34) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs35,vs35) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs34 // realA*imagB XSFADD_I2 vs1, vs1, vs35 // imagA*realB @@ -503,13 +563,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs37, vs37 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs37,vs37) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs36 // realA*realB XSFADD_R2 vs0, vs0, vs37 // imagA*imagB - xxswapd vs36, vs36 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs37, vs37 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs36,vs36) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs37,vs37) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs36 // realA*imagB XSFADD_I2 vs1, vs1, vs37 // imagA*realB @@ -527,13 +587,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs39, vs39 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs39,vs39) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs38 // realA*realB XSFADD_R2 vs0, vs0, vs39 // imagA*imagB - xxswapd vs38, vs38 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs39, vs39 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs38,vs38) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs39,vs39) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs38 // realA*imagB XSFADD_I2 vs1, vs1, vs39 // imagA*realB @@ -551,13 +611,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs41, vs41 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs41,vs41) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs40 // realA*realB XSFADD_R2 vs0, vs0, vs41 // imagA*imagB - xxswapd vs40, vs40 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs41, vs41 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs40,vs40) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs41,vs41) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs40 // realA*imagB XSFADD_I2 vs1, vs1, vs41 // imagA*realB @@ -575,13 +635,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs43, vs43 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs43,vs43) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs42 // realA*realB XSFADD_R2 vs0, vs0, vs43 // imagA*imagB - xxswapd vs42, vs42 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs43, vs43 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs42,vs42) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs43,vs43) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs42 // realA*imagB XSFADD_I2 vs1, vs1, vs43 // imagA*realB @@ -599,13 +659,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs45, vs45 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs45,vs45) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs44 // realA*realB XSFADD_R2 vs0, vs0, vs45 // imagA*imagB - xxswapd vs44, vs44 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs45, vs45 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs44,vs44) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs45,vs45) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs44 // realA*imagB XSFADD_I2 vs1, vs1, vs45 // imagA*realB @@ -623,13 +683,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs47, vs47 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs47,vs47) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs46 // realA*realB XSFADD_R2 vs0, vs0, vs47 // imagA*imagB - xxswapd vs46, vs46 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs47, vs47 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs46,vs46) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs47,vs47) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs46 // realA*imagB XSFADD_I2 vs1, vs1, vs47 // imagA*realB @@ -685,13 +745,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs49, vs49 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs49,vs49) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs48 // realA*realB XSFADD_R2 vs0, vs0, vs49 // imagA*imagB - xxswapd vs48, vs48 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs49, vs49 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs48,vs48) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs49,vs49) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs48 // realA*imagB XSFADD_I2 vs1, vs1, vs49 // imagA*realB @@ -709,13 +769,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs51, vs51 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs51,vs51) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs50 // realA*realB XSFADD_R2 vs0, vs0, vs51 // imagA*imagB - xxswapd vs50, vs50 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs51, vs51 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs50,vs50) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs51,vs51) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs50 // realA*imagB XSFADD_I2 vs1, vs1, vs51 // imagA*realB @@ -733,13 +793,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs53, vs53 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs53,vs53) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs52 // realA*realB XSFADD_R2 vs0, vs0, vs53 // imagA*imagB - xxswapd vs52, vs52 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs53, vs53 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs52,vs52) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs53,vs53) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs52 // realA*imagB XSFADD_I2 vs1, vs1, vs53 // imagA*realB @@ -757,13 +817,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs55, vs55 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs55,vs55) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs54 // realA*realB XSFADD_R2 vs0, vs0, vs55 // imagA*imagB - xxswapd vs54, vs54 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs55, vs55 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs54,vs54) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs55,vs55) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs54 // realA*imagB XSFADD_I2 vs1, vs1, vs55 // imagA*realB @@ -781,13 +841,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs57, vs57 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs57,vs57) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs56 // realA*realB XSFADD_R2 vs0, vs0, vs57 // imagA*imagB - xxswapd vs56, vs56 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs57, vs57 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs56,vs56) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs57,vs57) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs56 // realA*imagB XSFADD_I2 vs1, vs1, vs57 // imagA*realB @@ -805,13 +865,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs59, vs59 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs59,vs59) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs58 // realA*realB XSFADD_R2 vs0, vs0, vs59 // imagA*imagB - xxswapd vs58, vs58 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs59, vs59 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs58,vs58) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs59,vs59) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs58 // realA*imagB XSFADD_I2 vs1, vs1, vs59 // imagA*realB @@ -829,13 +889,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs61, vs61 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs61,vs61) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs60 // realA*realB XSFADD_R2 vs0, vs0, vs61 // imagA*imagB - xxswapd vs60, vs60 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs61, vs61 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs60,vs60) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs61,vs61) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs60 // realA*imagB XSFADD_I2 vs1, vs1, vs61 // imagA*realB @@ -853,13 +913,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs63, vs63 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs63,vs63) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs62 // realA*realB XSFADD_R2 vs0, vs0, vs63 // imagA*imagB - xxswapd vs62, vs62 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs63, vs63 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs62,vs62) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs63,vs63) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs62 // realA*imagB XSFADD_I2 vs1, vs1, vs63 // imagA*realB @@ -900,14 +960,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. add T2, T2, LDC addi CO, CO, 128 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=2 and M=4 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD2x4_1', ` +#else .macro LOAD2x4_1 +#endif lxvd2x vs16, o0, BO // load real part from B lxvd2x vs17, o16, BO // load imag part from B @@ -924,9 +992,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi AO, AO, 64 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x4_I1', ` +#else .macro KERNEL2x4_I1 +#endif lxvd2x vs8, o0, AO // load real,imag from A lxvd2x vs9, o16, AO // load real,imag from A @@ -961,9 +1037,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs47, vs3, vs19 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x4_1', ` +#else .macro KERNEL2x4_1 +#endif lxvd2x vs8, o0, AO // load real,imag from A lxvd2x vs9, o16, AO // load real,imag from A @@ -998,9 +1082,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs47, vs3, vs19 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x4_2', ` +#else .macro KERNEL2x4_2 +#endif lxvd2x vs0, o0, AO // load real,imag from A lxvd2x vs1, o16, AO // load real,imag from A @@ -1035,9 +1127,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs47, vs11, vs23 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x4_E2', ` +#else .macro KERNEL2x4_E2 +#endif xvmaddadp vs32, vs8, vs20 // real*real, imag*real @@ -1059,9 +1159,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs47, vs11, vs23 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x4_SUBI1', ` +#else .macro KERNEL2x4_SUBI1 +#endif lxvd2x vs0, o0, AO // load real,imag from A lxvd2x vs1, o16, AO // load real,imag from A @@ -1096,9 +1204,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs47, vs3, vs19 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x4_SUB1', ` +#else .macro KERNEL2x4_SUB1 +#endif lxvd2x vs0, o0, AO // load real,imag from A lxvd2x vs1, o16, AO // load real,imag from A @@ -1133,9 +1249,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs47, vs3, vs19 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE2x4', ` +#else .macro SAVE2x4 +#endif mr T1, CO @@ -1152,13 +1276,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs33, vs33 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs33,vs33) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs32 // realA*realB XSFADD_R2 vs0, vs0, vs33 // imagA*imagB - xxswapd vs32, vs32 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs33, vs33 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs32,vs32) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs33,vs33) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs32 // realA*imagB XSFADD_I2 vs1, vs1, vs33 // imagA*realB @@ -1176,13 +1300,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs35, vs35 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs35,vs35) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs34 // realA*realB XSFADD_R2 vs0, vs0, vs35 // imagA*imagB - xxswapd vs34, vs34 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs35, vs35 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs34,vs34) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs35,vs35) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs34 // realA*imagB XSFADD_I2 vs1, vs1, vs35 // imagA*realB @@ -1200,13 +1324,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs37, vs37 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs37,vs37) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs36 // realA*realB XSFADD_R2 vs0, vs0, vs37 // imagA*imagB - xxswapd vs36, vs36 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs37, vs37 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs36,vs36) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs37,vs37) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs36 // realA*imagB XSFADD_I2 vs1, vs1, vs37 // imagA*realB @@ -1224,13 +1348,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs39, vs39 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs39,vs39) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs38 // realA*realB XSFADD_R2 vs0, vs0, vs39 // imagA*imagB - xxswapd vs38, vs38 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs39, vs39 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs38,vs38) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs39,vs39) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs38 // realA*imagB XSFADD_I2 vs1, vs1, vs39 // imagA*realB @@ -1273,13 +1397,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs41, vs41 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs41,vs41) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs40 // realA*realB XSFADD_R2 vs0, vs0, vs41 // imagA*imagB - xxswapd vs40, vs40 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs41, vs41 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs40,vs40) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs41,vs41) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs40 // realA*imagB XSFADD_I2 vs1, vs1, vs41 // imagA*realB @@ -1297,13 +1421,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs43, vs43 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs43,vs43) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs42 // realA*realB XSFADD_R2 vs0, vs0, vs43 // imagA*imagB - xxswapd vs42, vs42 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs43, vs43 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs42,vs42) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs43,vs43) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs42 // realA*imagB XSFADD_I2 vs1, vs1, vs43 // imagA*realB @@ -1321,13 +1445,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs45, vs45 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs45,vs45) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs44 // realA*realB XSFADD_R2 vs0, vs0, vs45 // imagA*imagB - xxswapd vs44, vs44 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs45, vs45 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs44,vs44) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs45,vs45) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs44 // realA*imagB XSFADD_I2 vs1, vs1, vs45 // imagA*realB @@ -1345,13 +1469,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs47, vs47 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs47,vs47) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs46 // realA*realB XSFADD_R2 vs0, vs0, vs47 // imagA*imagB - xxswapd vs46, vs46 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs47, vs47 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs46,vs46) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs47,vs47) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs46 // realA*imagB XSFADD_I2 vs1, vs1, vs47 // imagA*realB @@ -1383,14 +1507,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. add T1, T1, LDC addi CO, CO, 64 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=2 and M=2 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD2x2_1', ` +#else .macro LOAD2x2_1 +#endif lxvd2x vs16, o0, BO // load real part from B lxvd2x vs17, o16, BO // load imag part from B @@ -1405,9 +1537,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi AO, AO, 32 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x2_I1', ` +#else .macro KERNEL2x2_I1 +#endif lxvd2x vs8, o0, AO // load real,imag from A lxvd2x vs9, o16, AO // load real,imag from A @@ -1432,9 +1572,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs39, vs1, vs19 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x2_1', ` +#else .macro KERNEL2x2_1 +#endif lxvd2x vs8, o0, AO // load real,imag from A lxvd2x vs9, o16, AO // load real,imag from A @@ -1459,9 +1607,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs39, vs1, vs19 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x2_2', ` +#else .macro KERNEL2x2_2 +#endif lxvd2x vs0, o0, AO // load real,imag from A lxvd2x vs1, o16, AO // load real,imag from A @@ -1486,9 +1642,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs39, vs9, vs23 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x2_E2', ` +#else .macro KERNEL2x2_E2 +#endif xvmaddadp vs32, vs8, vs20 // real*real, imag*real @@ -1502,9 +1666,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs39, vs9, vs23 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x2_SUBI1', ` +#else .macro KERNEL2x2_SUBI1 +#endif lxvd2x vs0, o0, AO // load real,imag from A lxvd2x vs1, o16, AO // load real,imag from A @@ -1529,9 +1701,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs39, vs1, vs19 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x2_SUB1', ` +#else .macro KERNEL2x2_SUB1 +#endif lxvd2x vs0, o0, AO // load real,imag from A lxvd2x vs1, o16, AO // load real,imag from A @@ -1556,9 +1736,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs39, vs1, vs19 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE2x2', ` +#else .macro SAVE2x2 +#endif mr T1, CO @@ -1573,13 +1761,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs33, vs33 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs33,vs33) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs32 // realA*realB XSFADD_R2 vs0, vs0, vs33 // imagA*imagB - xxswapd vs32, vs32 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs33, vs33 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs32,vs32) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs33,vs33) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs32 // realA*imagB XSFADD_I2 vs1, vs1, vs33 // imagA*realB @@ -1597,13 +1785,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs35, vs35 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs35,vs35) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs34 // realA*realB XSFADD_R2 vs0, vs0, vs35 // imagA*imagB - xxswapd vs34, vs34 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs35, vs35 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs34,vs34) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs35,vs35) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs34 // realA*imagB XSFADD_I2 vs1, vs1, vs35 // imagA*realB @@ -1640,13 +1828,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs37, vs37 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs37,vs37) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs36 // realA*realB XSFADD_R2 vs0, vs0, vs37 // imagA*imagB - xxswapd vs36, vs36 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs37, vs37 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs36,vs36) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs37,vs37) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs36 // realA*imagB XSFADD_I2 vs1, vs1, vs37 // imagA*realB @@ -1664,13 +1852,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs39, vs39 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs39,vs39) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs38 // realA*realB XSFADD_R2 vs0, vs0, vs39 // imagA*imagB - xxswapd vs38, vs38 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs39, vs39 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs38,vs38) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs39,vs39) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs38 // realA*imagB XSFADD_I2 vs1, vs1, vs39 // imagA*realB @@ -1698,14 +1886,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. add T1, T1, LDC addi CO, CO, 32 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=2 and M=1 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD2x1_1', ` +#else .macro LOAD2x1_1 +#endif lxvd2x vs16, o0, BO // load real part from B lxvd2x vs17, o16, BO // load imag part from B @@ -1719,9 +1915,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi AO, AO, 16 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x1_I1', ` +#else .macro KERNEL2x1_I1 +#endif lxvd2x vs8, o0, AO // load real,imag from A @@ -1741,9 +1945,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs35, vs0, vs19 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x1_1', ` +#else .macro KERNEL2x1_1 +#endif lxvd2x vs8, o0, AO // load real,imag from A @@ -1763,9 +1975,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs35, vs0, vs19 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x1_2', ` +#else .macro KERNEL2x1_2 +#endif lxvd2x vs0, o0, AO // load real,imag from A @@ -1785,9 +2005,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs35, vs8, vs23 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x1_E2', ` +#else .macro KERNEL2x1_E2 +#endif xvmaddadp vs32, vs8, vs20 // real*real, imag*real @@ -1797,9 +2025,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs35, vs8, vs23 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x1_SUBI1', ` +#else .macro KERNEL2x1_SUBI1 +#endif lxvd2x vs0, o0, AO // load real,imag from A @@ -1819,9 +2055,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs35, vs0, vs19 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x1_SUB1', ` +#else .macro KERNEL2x1_SUB1 +#endif lxvd2x vs0, o0, AO // load real,imag from A @@ -1841,9 +2085,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs35, vs0, vs19 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE2x1', ` +#else .macro SAVE2x1 +#endif mr T1, CO @@ -1857,13 +2109,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs33, vs33 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs33,vs33) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs32 // realA*realB XSFADD_R2 vs0, vs0, vs33 // imagA*imagB - xxswapd vs32, vs32 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs33, vs33 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs32,vs32) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs33,vs33) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs32 // realA*imagB XSFADD_I2 vs1, vs1, vs33 // imagA*realB @@ -1897,13 +2149,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs35, vs35 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs35,vs35) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs34 // realA*realB XSFADD_R2 vs0, vs0, vs35 // imagA*imagB - xxswapd vs34, vs34 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs35, vs35 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs34,vs34) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs35,vs35) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs34 // realA*imagB XSFADD_I2 vs1, vs1, vs35 // imagA*realB @@ -1929,14 +2181,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. add T1, T1, LDC addi CO, CO, 16 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=1 and M=8 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD1x8_1', ` +#else .macro LOAD1x8_1 +#endif lxvd2x vs16, o0, BO // load real part from B lxvd2x vs17, o16, BO // load imag part from B @@ -1958,9 +2218,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi AO, AO, 64 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x8_I1', ` +#else .macro KERNEL1x8_I1 +#endif lxvd2x vs8, o0, AO // load real,imag from A lxvd2x vs9, o16, AO // load real,imag from A @@ -1999,9 +2267,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs47, vs7, vs17 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x8_1', ` +#else .macro KERNEL1x8_1 +#endif lxvd2x vs8, o0, AO // load real,imag from A lxvd2x vs9, o16, AO // load real,imag from A @@ -2040,9 +2316,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs47, vs7, vs17 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x8_2', ` +#else .macro KERNEL1x8_2 +#endif lxvd2x vs0, o0, AO // load real,imag from A lxvd2x vs1, o16, AO // load real,imag from A @@ -2081,9 +2365,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs47, vs15, vs21 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x8_E2', ` +#else .macro KERNEL1x8_E2 +#endif xvmaddadp vs32, vs8, vs20 // real*real, imag*real @@ -2104,9 +2396,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs47, vs15, vs21 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x8_SUBI1', ` +#else .macro KERNEL1x8_SUBI1 +#endif lxvd2x vs0, o0, AO // load real,imag from A lxvd2x vs1, o16, AO // load real,imag from A @@ -2145,9 +2445,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs47, vs7, vs17 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x8_SUB1', ` +#else .macro KERNEL1x8_SUB1 +#endif lxvd2x vs0, o0, AO // load real,imag from A lxvd2x vs1, o16, AO // load real,imag from A @@ -2186,9 +2494,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs47, vs7, vs17 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE1x8', ` +#else .macro SAVE1x8 +#endif mr T1, CO @@ -2210,13 +2526,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs33, vs33 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs33,vs33) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs32 // realA*realB XSFADD_R2 vs0, vs0, vs33 // imagA*imagB - xxswapd vs32, vs32 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs33, vs33 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs32,vs32) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs33,vs33) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs32 // realA*imagB XSFADD_I2 vs1, vs1, vs33 // imagA*realB @@ -2234,13 +2550,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs35, vs35 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs35,vs35) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs34 // realA*realB XSFADD_R2 vs0, vs0, vs35 // imagA*imagB - xxswapd vs34, vs34 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs35, vs35 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs34,vs34) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs35,vs35) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs34 // realA*imagB XSFADD_I2 vs1, vs1, vs35 // imagA*realB @@ -2258,13 +2574,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs37, vs37 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs37,vs37) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs36 // realA*realB XSFADD_R2 vs0, vs0, vs37 // imagA*imagB - xxswapd vs36, vs36 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs37, vs37 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs36,vs36) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs37,vs37) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs36 // realA*imagB XSFADD_I2 vs1, vs1, vs37 // imagA*realB @@ -2282,13 +2598,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs39, vs39 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs39,vs39) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs38 // realA*realB XSFADD_R2 vs0, vs0, vs39 // imagA*imagB - xxswapd vs38, vs38 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs39, vs39 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs38,vs38) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs39,vs39) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs38 // realA*imagB XSFADD_I2 vs1, vs1, vs39 // imagA*realB @@ -2306,13 +2622,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs41, vs41 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs41,vs41) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs40 // realA*realB XSFADD_R2 vs0, vs0, vs41 // imagA*imagB - xxswapd vs40, vs40 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs41, vs41 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs40,vs40) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs41,vs41) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs40 // realA*imagB XSFADD_I2 vs1, vs1, vs41 // imagA*realB @@ -2330,13 +2646,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs43, vs43 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs43,vs43) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs42 // realA*realB XSFADD_R2 vs0, vs0, vs43 // imagA*imagB - xxswapd vs42, vs42 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs43, vs43 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs42,vs42) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs43,vs43) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs42 // realA*imagB XSFADD_I2 vs1, vs1, vs43 // imagA*realB @@ -2354,13 +2670,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs45, vs45 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs45,vs45) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs44 // realA*realB XSFADD_R2 vs0, vs0, vs45 // imagA*imagB - xxswapd vs44, vs44 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs45, vs45 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs44,vs44) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs45,vs45) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs44 // realA*imagB XSFADD_I2 vs1, vs1, vs45 // imagA*realB @@ -2378,13 +2694,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs47, vs47 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs47,vs47) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs46 // realA*realB XSFADD_R2 vs0, vs0, vs47 // imagA*imagB - xxswapd vs46, vs46 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs47, vs47 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs46,vs46) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs47,vs47) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs46 // realA*imagB XSFADD_I2 vs1, vs1, vs47 // imagA*realB @@ -2425,14 +2741,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. add T2, T2, LDC addi CO, CO, 128 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=1 and M=4 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD1x4_1', ` +#else .macro LOAD1x4_1 +#endif lxvd2x vs16, o0, BO // load real part from B lxvd2x vs17, o16, BO // load imag part from B @@ -2447,9 +2771,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi AO, AO, 64 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x4_I1', ` +#else .macro KERNEL1x4_I1 +#endif lxvd2x vs8, o0, AO // load real,imag from A lxvd2x vs9, o16, AO // load real,imag from A @@ -2473,9 +2805,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs39, vs3, vs17 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x4_1', ` +#else .macro KERNEL1x4_1 +#endif lxvd2x vs8, o0, AO // load real,imag from A lxvd2x vs9, o16, AO // load real,imag from A @@ -2499,9 +2839,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs39, vs3, vs17 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x4_2', ` +#else .macro KERNEL1x4_2 +#endif lxvd2x vs0, o0, AO // load real,imag from A lxvd2x vs1, o16, AO // load real,imag from A @@ -2525,9 +2873,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs39, vs11, vs21 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x4_E2', ` +#else .macro KERNEL1x4_E2 +#endif xvmaddadp vs32, vs8, vs20 // real*real, imag*real @@ -2540,9 +2896,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs39, vs11, vs21 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x4_SUBI1', ` +#else .macro KERNEL1x4_SUBI1 +#endif lxvd2x vs0, o0, AO // load real,imag from A lxvd2x vs1, o16, AO // load real,imag from A @@ -2566,9 +2930,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs39, vs3, vs17 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x4_SUB1', ` +#else .macro KERNEL1x4_SUB1 +#endif lxvd2x vs0, o0, AO // load real,imag from A lxvd2x vs1, o16, AO // load real,imag from A @@ -2592,9 +2964,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs39, vs3, vs17 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE1x4', ` +#else .macro SAVE1x4 +#endif mr T1, CO @@ -2611,13 +2991,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs33, vs33 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs33,vs33) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs32 // realA*realB XSFADD_R2 vs0, vs0, vs33 // imagA*imagB - xxswapd vs32, vs32 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs33, vs33 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs32,vs32) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs33,vs33) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs32 // realA*imagB XSFADD_I2 vs1, vs1, vs33 // imagA*realB @@ -2635,13 +3015,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs35, vs35 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs35,vs35) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs34 // realA*realB XSFADD_R2 vs0, vs0, vs35 // imagA*imagB - xxswapd vs34, vs34 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs35, vs35 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs34,vs34) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs35,vs35) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs34 // realA*imagB XSFADD_I2 vs1, vs1, vs35 // imagA*realB @@ -2659,13 +3039,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs37, vs37 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs37,vs37) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs36 // realA*realB XSFADD_R2 vs0, vs0, vs37 // imagA*imagB - xxswapd vs36, vs36 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs37, vs37 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs36,vs36) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs37,vs37) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs36 // realA*imagB XSFADD_I2 vs1, vs1, vs37 // imagA*realB @@ -2683,13 +3063,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs39, vs39 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs39,vs39) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs38 // realA*realB XSFADD_R2 vs0, vs0, vs39 // imagA*imagB - xxswapd vs38, vs38 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs39, vs39 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs38,vs38) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs39,vs39) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs38 // realA*imagB XSFADD_I2 vs1, vs1, vs39 // imagA*realB @@ -2721,14 +3101,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. add T1, T1, LDC addi CO, CO, 64 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=1 and M=2 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD1x2_1', ` +#else .macro LOAD1x2_1 +#endif lxvd2x vs16, o0, BO // load real part from B lxvd2x vs17, o16, BO // load imag part from B @@ -2741,9 +3129,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi AO, AO, 32 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x2_I1', ` +#else .macro KERNEL1x2_I1 +#endif lxvd2x vs8, o0, AO // load real,imag from A lxvd2x vs9, o16, AO // load real,imag from A @@ -2761,9 +3157,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs35, vs1, vs17 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x2_1', ` +#else .macro KERNEL1x2_1 +#endif lxvd2x vs8, o0, AO // load real,imag from A lxvd2x vs9, o16, AO // load real,imag from A @@ -2781,9 +3185,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs35, vs1, vs17 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x2_2', ` +#else .macro KERNEL1x2_2 +#endif lxvd2x vs0, o0, AO // load real,imag from A lxvd2x vs1, o16, AO // load real,imag from A @@ -2801,9 +3213,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs35, vs9, vs21 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x2_E2', ` +#else .macro KERNEL1x2_E2 +#endif xvmaddadp vs32, vs8, vs20 // real*real, imag*real @@ -2812,9 +3232,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs35, vs9, vs21 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x2_SUBI1', ` +#else .macro KERNEL1x2_SUBI1 +#endif lxvd2x vs0, o0, AO // load real,imag from A lxvd2x vs1, o16, AO // load real,imag from A @@ -2832,9 +3260,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs35, vs1, vs17 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x2_SUB1', ` +#else .macro KERNEL1x2_SUB1 +#endif lxvd2x vs0, o0, AO // load real,imag from A lxvd2x vs1, o16, AO // load real,imag from A @@ -2852,9 +3288,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs35, vs1, vs17 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE1x2', ` +#else .macro SAVE1x2 +#endif mr T1, CO @@ -2869,13 +3313,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs33, vs33 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs33,vs33) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs32 // realA*realB XSFADD_R2 vs0, vs0, vs33 // imagA*imagB - xxswapd vs32, vs32 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs33, vs33 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs32,vs32) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs33,vs33) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs32 // realA*imagB XSFADD_I2 vs1, vs1, vs33 // imagA*realB @@ -2893,13 +3337,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs35, vs35 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs35,vs35) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs34 // realA*realB XSFADD_R2 vs0, vs0, vs35 // imagA*imagB - xxswapd vs34, vs34 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs35, vs35 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs34,vs34) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs35,vs35) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs34 // realA*imagB XSFADD_I2 vs1, vs1, vs35 // imagA*realB @@ -2927,14 +3371,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. add T1, T1, LDC addi CO, CO, 32 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=1 and M=1 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD1x1_1', ` +#else .macro LOAD1x1_1 +#endif lxvd2x vs16, o0, BO // load real part from B lxvd2x vs17, o16, BO // load imag part from B @@ -2946,9 +3398,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi AO, AO, 16 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x1_I1', ` +#else .macro KERNEL1x1_I1 +#endif lxvd2x vs8, o0, AO // load real,imag from A @@ -2963,9 +3423,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs33, vs0, vs17 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x1_1', ` +#else .macro KERNEL1x1_1 +#endif lxvd2x vs8, o0, AO // load real,imag from A @@ -2980,9 +3448,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs33, vs0, vs17 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x1_2', ` +#else .macro KERNEL1x1_2 +#endif lxvd2x vs0, o0, AO // load real,imag from A @@ -2997,18 +3473,34 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs33, vs8, vs21 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x1_E2', ` +#else .macro KERNEL1x1_E2 +#endif xvmaddadp vs32, vs8, vs20 // real*real, imag*real xvmaddadp vs33, vs8, vs21 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x1_SUBI1', ` +#else .macro KERNEL1x1_SUBI1 +#endif lxvd2x vs0, o0, AO // load real,imag from A @@ -3023,9 +3515,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs33, vs0, vs17 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x1_SUB1', ` +#else .macro KERNEL1x1_SUB1 +#endif lxvd2x vs0, o0, AO // load real,imag from A @@ -3040,9 +3540,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs33, vs0, vs17 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE1x1', ` +#else .macro SAVE1x1 +#endif mr T1, CO @@ -3056,13 +3564,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs33, vs33 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs33,vs33) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs32 // realA*realB XSFADD_R2 vs0, vs0, vs33 // imagA*imagB - xxswapd vs32, vs32 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs33, vs33 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs32,vs32) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs33,vs33) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs32 // realA*imagB XSFADD_I2 vs1, vs1, vs33 // imagA*realB @@ -3088,11 +3596,19 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. add T1, T1, LDC addi CO, CO, 16 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`ZCOPYB_1x1', ` +#else .macro ZCOPYB_1x1 +#endif lxvdsx vs4, o0, BO // b0_r lxvdsx vs5, o8, BO // b0_i @@ -3101,10 +3617,18 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. stxvd2x vs5, o16, BBO addi BBO, BBO, 32 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`ZCOPYB_8x1', ` +#else .macro ZCOPYB_8x1 +#endif lxvd2x vs32, o0, BO lxvd2x vs33, o16, BO @@ -3118,23 +3642,23 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. lxvd2x vs39, o48, BO addi BO, BO, 64 - xxspltd vs40, vs32, 0 - xxspltd vs41, vs32, 1 - xxspltd vs42, vs33, 0 - xxspltd vs43, vs33, 1 - xxspltd vs44, vs34, 0 - xxspltd vs45, vs34, 1 - xxspltd vs46, vs35, 0 - xxspltd vs47, vs35, 1 - - xxspltd vs48, vs36, 0 - xxspltd vs49, vs36, 1 - xxspltd vs50, vs37, 0 - xxspltd vs51, vs37, 1 - xxspltd vs52, vs38, 0 - xxspltd vs53, vs38, 1 - xxspltd vs54, vs39, 0 - xxspltd vs55, vs39, 1 + XXSPLTD(vs40,vs32,0) + XXSPLTD(vs41,vs32,1) + XXSPLTD(vs42,vs33,0) + XXSPLTD(vs43,vs33,1) + XXSPLTD(vs44,vs34,0) + XXSPLTD(vs45,vs34,1) + XXSPLTD(vs46,vs35,0) + XXSPLTD(vs47,vs35,1) + + XXSPLTD(vs48,vs36,0) + XXSPLTD(vs49,vs36,1) + XXSPLTD(vs50,vs37,0) + XXSPLTD(vs51,vs37,1) + XXSPLTD(vs52,vs38,0) + XXSPLTD(vs53,vs38,1) + XXSPLTD(vs54,vs39,0) + XXSPLTD(vs55,vs39,1) stxvd2x vs40, o0, BBO stxvd2x vs41, o16, BBO @@ -3160,6 +3684,10 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. stxvd2x vs55, o48, BBO addi BBO, BBO, 64 +#if defined(_AIX) +') +#else .endm +#endif diff --git a/kernel/power/zgemm_macros_power10.S b/kernel/power/zgemm_macros_power10.S new file mode 100644 index 000000000..42f9c5ad4 --- /dev/null +++ b/kernel/power/zgemm_macros_power10.S @@ -0,0 +1,1138 @@ +/*************************************************************************** +Copyright (c) 2013-2020, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#define unit_size 16 +#define DISP32(ind,disp) (ind*unit_size*32+disp) +#define DISP16(ind,disp) (ind*unit_size*16+disp) +#define DISP8(ind,disp) (ind*unit_size*8+disp) +#define DISP4(ind,disp) (ind*unit_size*4+disp) +#define DISP2(ind,disp) (ind*unit_size*2+disp) +#define DISP1(ind,disp) (ind*unit_size+disp) +#define DISPX(disp) (disp) +/* HELPERS FOR SAVE */ +/* {r0,i0} and {r1,i1} into {r0,r1} {i0,i1} */ + + +.macro LOAD_COUPLE_AS_RR_II VS_OUT1,VS_OUT2,VS_TEMP1,VS_TEMP2,REG,LOFFSET +#ifndef TRMMKERNEL + lxv \VS_TEMP1, DISPX(\LOFFSET)(\REG) + lxv \VS_TEMP2, DISPX(\LOFFSET+16)(\REG) + xxmrgld \VS_OUT1,\VS_TEMP1,\VS_TEMP2 + xxmrghd \VS_OUT2,\VS_TEMP1,\VS_TEMP2 +#endif +.endm +/*from 2 result {a0r*br,a0i*bi} and {a1r*br,a1i*bi} pack into {a0r*br,a1r*br} and {a0i*bi,a1i*bi}*/ + + +.macro RESULT_INTO_REALREAL_IMAGEIMAGE VSIN1,VSIN2,VSOUT1,VSOUT2 + xxmrgld \VSOUT1, \VSIN1,\VSIN2 /* real*real from 2 results*/ + xxmrghd \VSOUT2, \VSIN1,\VSIN2 /* imag*imag from 2 results*/ +.endm +/*from 2 result {a0r*bi,a0i*br} and {a1r*bi,a1i*br} pack into {a0r*bi,a1r*bi} and {a0i*br,a1i*br}*/ + + +.macro RESULT_INTO_REALIMAG_IMAGREAL VSIN1,VSIN2,VSOUT1,VSOUT2 + xxmrgld \VSOUT1, \VSIN1,\VSIN2 /* real*imag */ + xxmrghd \VSOUT2, \VSIN1,\VSIN2 /* imag*real*/ +.endm +/* {a0r*br op a0i*bi ,a1r*br op a1i*bi} ~ {r0,r1}; {a0r*bi op a0i*br ,a1r*bi op a1i*br} ~ {i0,i1}*/ + + +.macro AGGREGATE_REALS_IMAGES VSINR_OUT1,VSINR,VSINI_OUT2,VSINI +#if defined(NN) || defined(NT) || defined(TN) || defined(TT) + xvsubdp \VSINR_OUT1,\VSINR_OUT1,\VSINR + xvadddp \VSINI_OUT2,\VSINI_OUT2,\VSINI +#elif defined(CN) || defined(CT) || defined(RN) || defined(RT) + xvadddp \VSINR_OUT1,\VSINR_OUT1,\VSINR + xvsubdp \VSINI_OUT2,\VSINI_OUT2,\VSINI +#elif defined(NC) || defined(TC) || defined(NR) || defined(TR) + xvadddp \VSINR_OUT1,\VSINR_OUT1,\VSINR + xvsubdp \VSINI_OUT2,\VSINI,\VSINI_OUT2 +#else // CC || CR || RC || RR + /*we will assume {-alpha_r,-alpha_i} for this case */ + /*i1i2-r1r2 so we will negate alpha real instead to fix sign*/ + xvsubdp \VSINR_OUT1,\VSINR,\VSINR_OUT1 + /*we will negate alpha image instead instead to fix sign*/ + xvadddp \VSINI_OUT2,\VSINI_OUT2,\VSINI +#endif +.endm +/* {i0,i1} * {alpha_i,alpha_i} - VSOUT1 ;VSOUT2 + {r0,r1}*{alpha_i,alpha_i} */ + + +.macro MULT_APLHA_PART1 VSINRR,VSINII,VSOUT1,VSOUT2 +#ifndef TRMMKERNEL + xvmsubadp \VSOUT1,\VSINII, alpha_i + xvmaddadp \VSOUT2,\VSINRR, alpha_i +#else + xvmuldp \VSOUT1,\VSINII, alpha_i + xvmuldp \VSOUT2,\VSINRR, alpha_i +#endif +.endm +/* {r0,r1} * {alpha_r,alpha_r} - VSOUT1 ;VSOUT2 + {i0,i1} * {alpha_r,alpha_r} */ + + +.macro MULT_APLHA_PART2 VSINRR,VSINII,VSOUT1,VSOUT2 + xvmsubadp \VSOUT1,\VSINRR, alpha_r + xvmaddadp \VSOUT2,\VSINII, alpha_r +.endm +/* unpack to store 2{r,r} {i,i} into {r,i} {r,i} (big endian because of stxv) */ + + +.macro UNPACK_FOR_STORE VSIN1,VSIN2,VSOUT1,VSOUT2 + xxmrghd \VSOUT1,\VSIN2,\VSIN1 + xxmrgld \VSOUT2,\VSIN2,\VSIN1 +.endm + + +.macro STORE_COUPLE REG,LOFFSET,VSIN1,VSIN2 + stxv \VSIN1, DISPX(\LOFFSET)(\REG) + stxv \VSIN2, DISPX(\LOFFSET+16)(\REG) +.endm + + +.macro SAVE8 VSRes1,VSRes2,VSRes3,VSRes4,VSRes5,VSRes6,VSRes7,VSRes8,VSRes9,VSRes10,VSRes11,VSRes12,VSRes13,VSRes14,VSRes15,VSRes16,BASE_REG,LOFFSET + RESULT_INTO_REALREAL_IMAGEIMAGE \VSRes1,\VSRes3,vs34,vs35 + LOAD_COUPLE_AS_RR_II vs46,vs47,vs50,vs51,\BASE_REG,\LOFFSET + RESULT_INTO_REALIMAG_IMAGREAL \VSRes2,\VSRes4,vs36,vs37 + LOAD_COUPLE_AS_RR_II vs48,vs49,vs52,vs53,\BASE_REG,(\LOFFSET+32) + RESULT_INTO_REALREAL_IMAGEIMAGE \VSRes5,\VSRes7,vs38,vs39 + LOAD_COUPLE_AS_RR_II vs56,vs57,vs50,vs51,\BASE_REG,(\LOFFSET +64) + RESULT_INTO_REALIMAG_IMAGREAL \VSRes6,\VSRes8,vs40,vs41 + LOAD_COUPLE_AS_RR_II vs58,vs59,vs52,vs53,\BASE_REG,(\LOFFSET+96) + RESULT_INTO_REALREAL_IMAGEIMAGE \VSRes9,\VSRes11,vs42,vs43 + AGGREGATE_REALS_IMAGES vs34,vs35,vs36,vs37 + RESULT_INTO_REALIMAG_IMAGREAL \VSRes10,\VSRes12,vs44,vs45 + AGGREGATE_REALS_IMAGES vs38,vs39,vs40,vs41 + RESULT_INTO_REALREAL_IMAGEIMAGE \VSRes13,\VSRes15,\VSRes1,\VSRes2 + MULT_APLHA_PART1 vs34,vs36, vs46,vs47 + RESULT_INTO_REALIMAG_IMAGREAL \VSRes14,\VSRes16,\VSRes3,\VSRes4 + MULT_APLHA_PART1 vs38,vs40,vs48,vs49 + MULT_APLHA_PART2 vs34,vs36,vs46,vs47 + AGGREGATE_REALS_IMAGES vs42,vs43,vs44,vs45 + MULT_APLHA_PART2 vs38,vs40,vs48,vs49 + AGGREGATE_REALS_IMAGES \VSRes1,\VSRes2,\VSRes3,\VSRes4 + UNPACK_FOR_STORE vs46,vs47,vs39,vs41 + MULT_APLHA_PART1 vs42,vs44, vs56,vs57 + UNPACK_FOR_STORE vs48,vs49,vs35,vs37 + MULT_APLHA_PART1 \VSRes1,\VSRes3, vs58,vs59 + STORE_COUPLE \BASE_REG,\LOFFSET,vs39,vs41 + MULT_APLHA_PART2 vs42,vs44,vs56,vs57 + STORE_COUPLE \BASE_REG,(\LOFFSET+32),vs35,vs37 + MULT_APLHA_PART2 \VSRes1,\VSRes3, vs58,vs59 + UNPACK_FOR_STORE vs56,vs57,vs42,vs44 + UNPACK_FOR_STORE vs58,vs59,\VSRes1,\VSRes3 + STORE_COUPLE \BASE_REG,(\LOFFSET +64),vs42,vs44 + STORE_COUPLE \BASE_REG,(\LOFFSET+96),\VSRes1,\VSRes3 +.endm + + +.macro SAVE4 VSRes1,VSRes2,VSRes3,VSRes4,VSRes5,VSRes6,VSRes7,VSRes8,BASE_REG,LOFFSET + RESULT_INTO_REALREAL_IMAGEIMAGE \VSRes1,\VSRes3,vs34,vs35 + LOAD_COUPLE_AS_RR_II vs46,vs47,vs50,vs51,\BASE_REG,\LOFFSET + RESULT_INTO_REALIMAG_IMAGREAL \VSRes2,\VSRes4,vs36,vs37 + LOAD_COUPLE_AS_RR_II vs48,vs49,vs52,vs53,\BASE_REG,(\LOFFSET+32) + RESULT_INTO_REALREAL_IMAGEIMAGE \VSRes5,\VSRes7,vs38,vs39 + RESULT_INTO_REALIMAG_IMAGREAL \VSRes6,\VSRes8,vs40,vs41 + AGGREGATE_REALS_IMAGES vs34,vs35,vs36,vs37 + AGGREGATE_REALS_IMAGES vs38,vs39,vs40,vs41 + MULT_APLHA_PART1 vs34,vs36, vs46,vs47 + MULT_APLHA_PART1 vs38,vs40, vs48,vs49 + MULT_APLHA_PART2 vs34,vs36, vs46,vs47 + MULT_APLHA_PART2 vs38,vs40,vs48,vs49 + UNPACK_FOR_STORE vs46,vs47,vs39,vs41 + UNPACK_FOR_STORE vs48,vs49,vs35,vs37 + STORE_COUPLE \BASE_REG,\LOFFSET,vs39,vs41 + STORE_COUPLE \BASE_REG,(\LOFFSET+32),vs35,vs37 +.endm + + +.macro SAVE2 VSRes1,VSRes2,VSRes3,VSRes4,BASE_REG,LOFFSET + RESULT_INTO_REALREAL_IMAGEIMAGE \VSRes1,\VSRes3,vs34,vs35 + LOAD_COUPLE_AS_RR_II vs46,vs47,vs50,vs51,\BASE_REG,\LOFFSET + RESULT_INTO_REALIMAG_IMAGREAL \VSRes2,\VSRes4,vs36,vs37 + AGGREGATE_REALS_IMAGES vs34,vs35,vs36,vs37 + MULT_APLHA_PART1 vs34,vs36, vs46,vs47 + MULT_APLHA_PART2 vs34,vs36, vs46,vs47 + UNPACK_FOR_STORE vs46,vs47,vs39,vs41 + STORE_COUPLE \BASE_REG,\LOFFSET,vs39,vs41 +.endm + + +.macro SAVE1 VSRes1,VSRes2,BASE_REG,LOFFSET + RESULT_INTO_REALREAL_IMAGEIMAGE \VSRes1,\VSRes1,vs34,vs35 +#ifndef TRMMKERNEL + lxv vs50, (\LOFFSET)(\BASE_REG) + xxmrgld vs46,vs50,vs50 + xxmrghd vs47,vs50,vs50 +#endif + RESULT_INTO_REALIMAG_IMAGREAL \VSRes2,\VSRes2,vs36,vs37 + AGGREGATE_REALS_IMAGES vs34,vs35,vs36,vs37 + MULT_APLHA_PART1 vs34,vs36, vs46,vs47 + MULT_APLHA_PART2 vs34,vs36, vs46,vs47 + UNPACK_FOR_STORE vs46,vs47,vs39,vs41 + xxmrghd vs39,vs47,vs46 + stxv vs39, (\LOFFSET)(\BASE_REG) +.endm + +/********************************************************************************************** +* + +.macros for N=2 and M=8 +**********************************************************************************************/ + +.macro KERNEL2x8_ZERO_AND_PRIME_MMA + /* zero out and prime the MMA accumulators */ + xxsetaccz 0 + xxsetaccz 1 + xxsetaccz 2 + xxsetaccz 3 + xxsetaccz 4 + xxsetaccz 5 + xxsetaccz 6 + xxsetaccz 7 +.endm + + +.macro KERNEL2x8_PRELOAD + lxvp vs32, 0(AO) // load real,imag from A + lxvp vs34, 32(AO) // load real,imag from A + lxvp vs36, 64(AO) // load real,imag from A + lxvp vs38, 96(AO) // load real,imag from A + lxvp vs48, 0(BO) // load real imag from B +.endm + + +.macro KERNEL2x8_2 Index, IsLast + lxvp vs40, DISP16(\Index,128)(AO) // load real,imag from A + lxvp vs42, DISP16(\Index,160)(AO) // load real,imag from A + lxvp vs44, DISP16(\Index,192)(AO) // load real,imag from A + lxvp vs46, DISP16(\Index,224)(AO) // load real,imag from A + lxvp vs50, DISP4(\Index, 32)(BO) // load real,imag from B + xvf64gerpp 0, vs32, vs49 + xvf64gerpp 1, vs34, vs49 + xvf64gerpp 2, vs36, vs49 + xvf64gerpp 3, vs38, vs49 + xvf64gerpp 4, vs32, vs48 + xvf64gerpp 5, vs34, vs48 + xvf64gerpp 6, vs36, vs48 + xvf64gerpp 7, vs38, vs48 + lxvp vs32, DISP16(\Index, 256)(AO) // load real,imag from A + lxvp vs34, DISP16(\Index, 288)(AO) // load real,imag from A + lxvp vs36, DISP16(\Index, 320)(AO) // load real,imag from A + lxvp vs38, DISP16(\Index, 352)(AO) // load real,imag from A + lxvp vs48, DISP4(\Index, 64)(BO) // load real imag from B + xvf64gerpp 0, vs40, vs51 + xvf64gerpp 1, vs42, vs51 + xvf64gerpp 2, vs44, vs51 + xvf64gerpp 3, vs46, vs51 + xvf64gerpp 4, vs40, vs50 + xvf64gerpp 5, vs42, vs50 + xvf64gerpp 6, vs44, vs50 + xvf64gerpp 7, vs46, vs50 +.if \IsLast==1 + addi AO, AO, DISP16(\Index,256) + addi BO, BO, DISP4(\Index,64) +.endif +.endm + + +.macro LOAD_END_2x8 OffsetA,OffsetB + xvf64gerpp 0, vs32, vs49 + xvf64gerpp 1, vs34, vs49 + xvf64gerpp 2, vs36, vs49 + xvf64gerpp 3, vs38, vs49 + xvf64gerpp 4, vs32, vs48 + xvf64gerpp 5, vs34, vs48 + xvf64gerpp 6, vs36, vs48 + xvf64gerpp 7, vs38, vs48 + addi BO, BO, \OffsetB + addi AO, AO, \OffsetA +.endm + + +.macro KERNEL2x8_UNPRIME_MMA + /* "unprime" MMA accumulators */ + xxmfacc 0 + xxmfacc 1 + xxmfacc 2 + xxmfacc 3 + xxmfacc 4 + xxmfacc 5 + xxmfacc 6 + xxmfacc 7 +.endm + + +.macro SAVE2x8 + add T1, CO ,LDC + xxpermdi vs32, vs0, vs1, 0b01 + xxpermdi vs33, vs0, vs1, 0b10 + xxpermdi vs34, vs2, vs3, 0b01 + xxpermdi vs35, vs2, vs3, 0b10 + xxpermdi vs36, vs4, vs5, 0b01 + xxpermdi vs37, vs4, vs5, 0b10 + xxpermdi vs38, vs6, vs7, 0b01 + xxpermdi vs39, vs6, vs7, 0b10 + xxpermdi vs40, vs8, vs9, 0b01 + xxpermdi vs41, vs8, vs9, 0b10 + xxpermdi vs42, vs10, vs11, 0b01 + xxpermdi vs43, vs10, vs11, 0b10 + xxpermdi vs44, vs12, vs13, 0b01 + xxpermdi vs45, vs12, vs13, 0b10 + xxpermdi vs46, vs14, vs15, 0b01 + xxpermdi vs47, vs14, vs15, 0b10 + + xxlor vs2, vs32, vs32 + xxlor vs3, vs33, vs33 + xxlor vs0, vs34, vs34 + xxlor vs1, vs35, vs35 + xxlor vs6, vs36, vs36 + xxlor vs7, vs37, vs37 + xxlor vs4, vs38, vs38 + xxlor vs5, vs39, vs39 + xxlor vs10, vs40, vs40 + xxlor vs11, vs41, vs41 + xxlor vs8, vs42, vs42 + xxlor vs9, vs43, vs43 + xxlor vs14, vs44, vs44 + xxlor vs15, vs45, vs45 + xxlor vs12, vs46, vs46 + xxlor vs13, vs47, vs47 + + xxpermdi vs32, vs16, vs17, 0b01 + xxpermdi vs33, vs16, vs17, 0b10 + xxpermdi vs34, vs18, vs19, 0b01 + xxpermdi vs35, vs18, vs19, 0b10 + xxpermdi vs36, vs20, vs21, 0b01 + xxpermdi vs37, vs20, vs21, 0b10 + xxpermdi vs38, vs22, vs23, 0b01 + xxpermdi vs39, vs22, vs23, 0b10 + xxpermdi vs40, vs24, vs25, 0b01 + xxpermdi vs41, vs24, vs25, 0b10 + xxpermdi vs42, vs26, vs27, 0b01 + xxpermdi vs43, vs26, vs27, 0b10 + xxpermdi vs44, vs28, vs29, 0b01 + xxpermdi vs45, vs28, vs29, 0b10 + xxpermdi vs46, vs30, vs31, 0b01 + xxpermdi vs47, vs30, vs31, 0b10 + + xxlor vs18, vs32, vs32 + xxlor vs19, vs33, vs33 + xxlor vs16, vs34, vs34 + xxlor vs17, vs35, vs35 + xxlor vs22, vs36, vs36 + xxlor vs23, vs37, vs37 + xxlor vs20, vs38, vs38 + xxlor vs21, vs39, vs39 + xxlor vs26, vs40, vs40 + xxlor vs27, vs41, vs41 + xxlor vs24, vs42, vs42 + xxlor vs25, vs43, vs43 + xxlor vs30, vs44, vs44 + xxlor vs31, vs45, vs45 + xxlor vs28, vs46, vs46 + xxlor vs29, vs47, vs47 + + SAVE8 vs0,vs1,vs2,vs3,vs4,vs5,vs6,vs7,vs8,vs9,vs10,vs11,vs12,vs13,vs14,vs15,CO,0 + SAVE8 vs16,vs17,vs18,vs19,vs20,vs21,vs22,vs23,vs24,vs25,vs26,vs27,vs28,vs29,vs30,vs31,T1,0 + addi CO, CO, 128 +.endm + +/********************************************************************************************** +* + +.macros for N=2 and M=4 +**********************************************************************************************/ + +.macro KERNEL2x4_ZERO_AND_PRIME_MMA + /* zero out and prime the MMA accumulators */ + xxsetaccz 0 + xxsetaccz 1 + xxsetaccz 2 + xxsetaccz 3 +.endm + + +.macro KERNEL2x4_PRELOAD + lxvp vs32, 0(AO) // load real,imag from A + lxvp vs34, 32(AO) // load real,imag from A + lxvp vs48, 0(BO) // load real imag from B +.endm + + +.macro KERNEL2x4_2 Index, IsLast + lxvp vs40, DISP8(\Index, 64)(AO) // load real,imag from A + lxvp vs42, DISP8(\Index, 96)(AO) // load real,imag from A + lxvp vs50, DISP4(\Index, 32)(BO) // load real,imag from B + xvf64gerpp 0, vs32, vs49 + xvf64gerpp 1, vs34, vs49 + xvf64gerpp 2, vs32, vs48 + xvf64gerpp 3, vs34, vs48 + lxvp vs32, DISP8(\Index, 128)(AO) // load real,imag from A + lxvp vs34, DISP8(\Index, 160)(AO) // load real,imag from A + lxvp vs48, DISP4(\Index, 64)(BO) // load real,imag from B + xvf64gerpp 0, vs40, vs51 + xvf64gerpp 1, vs42, vs51 + xvf64gerpp 2, vs40, vs50 + xvf64gerpp 3, vs42, vs50 +.if \IsLast==1 + addi AO, AO, DISP8(\Index,128) + addi BO, BO, DISP4(\Index,64) +.endif +.endm + + +.macro LOAD_END_2x4 OffsetA, OffsetB + xvf64gerpp 0, vs32, vs49 + xvf64gerpp 1, vs34, vs49 + xvf64gerpp 2, vs32, vs48 + xvf64gerpp 3, vs34, vs48 + addi BO, BO, \OffsetB + addi AO, AO, \OffsetA +.endm + + +.macro KERNEL2x4_UNPRIME_MMA + /* "unprime" MMA accumulators */ + xxmfacc 0 + xxmfacc 1 + xxmfacc 2 + xxmfacc 3 +.endm + + +.macro SAVE2x4 + add T1, CO ,LDC + xxpermdi vs32, vs0, vs1, 0b01 + xxpermdi vs33, vs0, vs1, 0b10 + xxpermdi vs34, vs2, vs3, 0b01 + xxpermdi vs35, vs2, vs3, 0b10 + xxpermdi vs36, vs4, vs5, 0b01 + xxpermdi vs37, vs4, vs5, 0b10 + xxpermdi vs38, vs6, vs7, 0b01 + xxpermdi vs39, vs6, vs7, 0b10 + xxpermdi vs40, vs8, vs9, 0b01 + xxpermdi vs41, vs8, vs9, 0b10 + xxpermdi vs42, vs10, vs11, 0b01 + xxpermdi vs43, vs10, vs11, 0b10 + xxpermdi vs44, vs12, vs13, 0b01 + xxpermdi vs45, vs12, vs13, 0b10 + xxpermdi vs46, vs14, vs15, 0b01 + xxpermdi vs47, vs14, vs15, 0b10 + + xxlor vs2, vs32, vs32 + xxlor vs3, vs33, vs33 + xxlor vs0, vs34, vs34 + xxlor vs1, vs35, vs35 + xxlor vs6, vs36, vs36 + xxlor vs7, vs37, vs37 + xxlor vs4, vs38, vs38 + xxlor vs5, vs39, vs39 + xxlor vs10, vs40, vs40 + xxlor vs11, vs41, vs41 + xxlor vs8, vs42, vs42 + xxlor vs9, vs43, vs43 + xxlor vs14, vs44, vs44 + xxlor vs15, vs45, vs45 + xxlor vs12, vs46, vs46 + xxlor vs13, vs47, vs47 + + SAVE4 vs0,vs1,vs2,vs3,vs4,vs5,vs6,vs7,CO,0 + SAVE4 vs8,vs9,vs10,vs11,vs12,vs13,vs14,vs15,T1,0 + addi CO, CO, 64 +.endm + +/********************************************************************************************** +* + +.macros for N=2 and M=2 +**********************************************************************************************/ + +.macro KERNEL2x2_ZERO_AND_PRIME_MMA + /* zero out and prime the MMA accumulators */ + xxsetaccz 0 + xxsetaccz 1 +.endm + + +.macro KERNEL2x2_PRELOAD + lxvp vs32, 0(AO) // load real,imag from A + lxvp vs48, 0(BO) // load real imag from B +.endm + + +.macro KERNEL2x2_2 Index, IsLast + lxvp vs40, DISP4(\Index, 32)(AO) // load real,imag from A + lxvp vs50, DISP4(\Index, 32)(BO) // load real,imag from B + xvf64gerpp 0, vs32, vs49 + xvf64gerpp 1, vs32, vs48 + lxvp vs32, DISP4(\Index, 64)(AO) // load real,imag from A + lxvp vs48, DISP4(\Index, 64)(BO) // load real imag from B + xvf64gerpp 0, vs40, vs51 + xvf64gerpp 1, vs40, vs50 +.if \IsLast==1 + addi AO, AO, DISP4(\Index,64) + addi BO, BO, DISP4(\Index,64) +.endif +.endm + + +.macro LOAD_END_2x2 OffsetA,OffsetB + xvf64gerpp 0, vs32, vs49 + xvf64gerpp 1, vs32, vs48 + addi BO, BO, \OffsetB + addi AO, AO, \OffsetA +.endm + + +.macro KERNEL2x2_UNPRIME_MMA + /* "unprime" MMA accumulators */ + xxmfacc 0 + xxmfacc 1 +.endm + + +.macro SAVE2x2 + add T1, CO ,LDC + xxpermdi vs32, vs0, vs1, 0b01 + xxpermdi vs33, vs0, vs1, 0b10 + xxpermdi vs34, vs2, vs3, 0b01 + xxpermdi vs35, vs2, vs3, 0b10 + xxpermdi vs36, vs4, vs5, 0b01 + xxpermdi vs37, vs4, vs5, 0b10 + xxpermdi vs38, vs6, vs7, 0b01 + xxpermdi vs39, vs6, vs7, 0b10 + + xxlor vs2, vs32, vs32 + xxlor vs3, vs33, vs33 + xxlor vs0, vs34, vs34 + xxlor vs1, vs35, vs35 + xxlor vs6, vs36, vs36 + xxlor vs7, vs37, vs37 + xxlor vs4, vs38, vs38 + xxlor vs5, vs39, vs39 + + SAVE2 vs0,vs1,vs2,vs3,CO,0 + SAVE2 vs4,vs5,vs6,vs7,T1,0 + addi CO, CO, 32 +.endm + +/********************************************************************************************** +* + +.macros for N=2 and M=1 +**********************************************************************************************/ + +.macro ZERO2x1 + xxlxor vs0, vs0, vs0 + xxlxor vs1, vs1, vs1 + xxlxor vs2, vs2, vs2 + xxlxor vs3, vs3, vs3 + +.endm + + +.macro LOAD2x1 + LOAD2x1O 0,0 +.endm + + +.macro LOAD2x1O OffsetA,OffsetB + lxv vs48,(\OffsetB+ 0)(BO) // load real imag from B + lxv vs50, (\OffsetB+16)(BO) // load real,imag from B + xxswapd vs49, vs48 + xxswapd vs51, vs50 + lxv vs32, (0+\OffsetA)(AO) // load real,imag from A +.endm + + +.macro END2x1_WITHOUT_ADD + END2x1 AO,BO,0,0 +.endm + + +.macro END2x1 AREG, BREG, OffsetA, OffsetB +.if \OffsetB != 0 + addi \BREG, \BREG, \OffsetB +.endif +.if \OffsetA != 0 + addi \AREG, \AREG, \OffsetA +.endif + xvmaddadp vs0, vs32, vs48 + xvmaddadp vs2, vs32, vs50 + xvmaddadp vs1, vs32, vs49 + xvmaddadp vs3, vs32, vs51 +.endm + + +.macro LOAD2x1_2 + LOAD2x1_2O 0,0 +.endm + + +.macro LOAD2x1_2O OffsetA,OffsetB + lxv vs48,(\OffsetB+ 0)(BO) // load real imag from B + lxv vs50, (\OffsetB+16)(BO) // load real,imag from B + lxv vs52, (\OffsetB+32)(BO) // load real,imag from B + lxv vs54, (\OffsetB+48)(BO) // load real,imag from B + xxswapd vs49, vs48 + xxswapd vs51, vs50 + lxv vs32, (0+\OffsetA)(AO) // load real,imag from A + lxv vs40, (16+\OffsetA)(AO) // load real,imag from A +.endm + + +.macro END2x1_2 + /*for load2 offset will be 32 and 64*/ + KERNEL2x1_2 AO,BO, 32,64,0 ,1,1 +.endm + + +.macro KERNEL2x1_E2 OffsetA,OffsetB, Index,IsLast + KERNEL2x1_2 AO,BO, \OffsetA,\OffsetB, \Index,\IsLast ,1 +.endm + + +.macro KERNEL2x1_L2 OffsetA,OffsetB, Index,IsLast + KERNEL2x1_2 AO,BO, \OffsetA,\OffsetB, \Index,\IsLast ,0 +.endm + + +.macro KERNEL2x1_2 AREG,BREG, OffsetA,OffsetB, Index,IsLast ,Complete + xxswapd vs53, vs52 + xxswapd vs55, vs54 + xvmaddadp vs0, vs32, vs48 + xvmaddadp vs2, vs32, vs50 + xvmaddadp vs1, vs32, vs49 + xvmaddadp vs3, vs32, vs51 +.if \Complete==0 + lxv vs32, DISP2(\Index, 0 + \OffsetA)(\AREG) // load real,imag from A +.endif +.if \Complete==0 + lxv vs48, DISP4(\Index, 0+\OffsetB)(\BREG) // load real imag from B + lxv vs50, DISP4(\Index, 16+\OffsetB)(\BREG) // load real,imag from B +.endif +.if \Complete==0 + xxswapd vs49, vs48 + xxswapd vs51, vs50 +.endif + xvmaddadp vs0, vs40, vs52 + xvmaddadp vs2, vs40, vs54 + xvmaddadp vs1, vs40, vs53 + xvmaddadp vs3, vs40, vs55 +.if \Complete==0 + lxv vs40, DISP2(\Index,16+0+ \OffsetA)(\AREG) // load real,imag from A +.endif + +.if \Complete==0 + lxv vs52, DISP4(\Index, 32+\OffsetB)(\BREG) // load real,imag from B + lxv vs54, DISP4(\Index, 48+\OffsetB)(\BREG) // load real,imag from B +.endif +.if \IsLast==1 +.if \Complete==1 + addi \AREG, \AREG, DISP2(\Index,\OffsetA) + addi \BREG, \BREG, DISP4(\Index,\OffsetB) +.else + addi \AREG, \AREG, DISP2(\Index,32) + addi \BREG, \BREG, DISP4(\Index,64) +.endif +.endif +.endm + + +.macro KERNEL2x1 + LOAD2x1 + END2x1 AO, BO, 16,32 +.endm + + +.macro SAVE2x1 + add T1, CO ,LDC + SAVE1 vs0,vs1,CO,0 + SAVE1 vs2,vs3,T1,0 + addi CO, CO, 16 +.endm + +/********************************************************************************************** +* + +.macros for N=1 and M=8 +**********************************************************************************************/ + +.macro KERNEL1x8_ZERO_AND_PRIME_MMA + /* zero out and prime the MMA accumulators */ + xxsetaccz 0 + xxsetaccz 1 + xxsetaccz 2 + xxsetaccz 3 +.endm + + +.macro KERNEL1x8_2 Index,IsLast + lxvp vs32, DISP16(\Index, 0)(AO) // load real,imag from A + lxvp vs34, DISP16(\Index, 32)(AO) // load real,imag from A + lxvp vs36, DISP16(\Index, 64)(AO) // load real,imag from A + lxvp vs38, DISP16(\Index, 96)(AO) // load real,imag from A + lxvp vs40, DISP16(\Index, 128)(AO) // load real,imag from A + lxvp vs42, DISP16(\Index, 160)(AO) // load real,imag from A + lxvp vs44, DISP16(\Index, 192)(AO) // load real,imag from A + lxvp vs46, DISP16(\Index, 224)(AO) // load real,imag from A + lxvp vs48, DISP2(\Index, 0)(BO) // load real imag from B + xvf64gerpp 0, vs32, vs49 + xvf64gerpp 1, vs34, vs49 + xvf64gerpp 2, vs36, vs49 + xvf64gerpp 3, vs38, vs49 + xvf64gerpp 0, vs40, vs48 + xvf64gerpp 1, vs42, vs48 + xvf64gerpp 2, vs44, vs48 + xvf64gerpp 3, vs46, vs48 +.if \IsLast==1 + addi AO, AO, DISP16(\Index,256) + addi BO, BO, DISP2(\Index,32) +.endif +.endm + + +.macro LOAD_END_1x8 OffsetA,OffsetB + lxvp vs32, 0(AO) // load real,imag from A + lxvp vs34, 32(AO) // load real,imag from A + lxvp vs36, 64(AO) // load real,imag from A + lxvp vs38, 96(AO) // load real,imag from A + lxv vs48, 0(BO) // load real imag from B + xvf64gerpp 0, vs32, vs48 + xvf64gerpp 1, vs34, vs48 + xvf64gerpp 2, vs36, vs48 + xvf64gerpp 3, vs38, vs48 + addi BO, BO, \OffsetB + addi AO, AO, \OffsetA +.endm + + +.macro KERNEL1x8_UNPRIME_MMA + /* "unprime" MMA accumulators */ + xxmfacc 0 + xxmfacc 1 + xxmfacc 2 + xxmfacc 3 +.endm + + +.macro SAVE1x8 + xxpermdi vs32, vs0, vs1, 0b01 + xxpermdi vs33, vs0, vs1, 0b10 + xxpermdi vs34, vs2, vs3, 0b01 + xxpermdi vs35, vs2, vs3, 0b10 + xxpermdi vs36, vs4, vs5, 0b01 + xxpermdi vs37, vs4, vs5, 0b10 + xxpermdi vs38, vs6, vs7, 0b01 + xxpermdi vs39, vs6, vs7, 0b10 + xxpermdi vs40, vs8, vs9, 0b01 + xxpermdi vs41, vs8, vs9, 0b10 + xxpermdi vs42, vs10, vs11, 0b01 + xxpermdi vs43, vs10, vs11, 0b10 + xxpermdi vs44, vs12, vs13, 0b01 + xxpermdi vs45, vs12, vs13, 0b10 + xxpermdi vs46, vs14, vs15, 0b01 + xxpermdi vs47, vs14, vs15, 0b10 + + xxlor vs2, vs32, vs32 + xxlor vs3, vs33, vs33 + xxlor vs0, vs34, vs34 + xxlor vs1, vs35, vs35 + xxlor vs6, vs36, vs36 + xxlor vs7, vs37, vs37 + xxlor vs4, vs38, vs38 + xxlor vs5, vs39, vs39 + xxlor vs10, vs40, vs40 + xxlor vs11, vs41, vs41 + xxlor vs8, vs42, vs42 + xxlor vs9, vs43, vs43 + xxlor vs14, vs44, vs44 + xxlor vs15, vs45, vs45 + xxlor vs12, vs46, vs46 + xxlor vs13, vs47, vs47 + + SAVE8 vs0,vs1,vs2,vs3,vs4,vs5,vs6,vs7,vs8,vs9,vs10,vs11,vs12,vs13,vs14,vs15,CO,0 + addi CO, CO, 128 +.endm + +/********************************************************************************************** +* + +.macros for N=1 and M=4 +**********************************************************************************************/ + +.macro KERNEL1x4_ZERO_AND_PRIME_MMA + /* zero out and prime the MMA accumulators */ + xxsetaccz 0 + xxsetaccz 1 +.endm + + +.macro KERNEL1x4_2 Index,IsLast + lxvp vs32, DISP8(\Index, 0)(AO) // load real,imag from A + lxvp vs34, DISP8(\Index, 32)(AO) // load real,imag from A + lxvp vs40, DISP8(\Index, 64)(AO) // load real,imag from A + lxvp vs42, DISP8(\Index, 96)(AO) // load real,imag from A + lxvp vs48, DISP2(\Index, 0)(BO) // load real imag from B + xvf64gerpp 0, vs32, vs49 + xvf64gerpp 1, vs34, vs49 + xvf64gerpp 0, vs40, vs48 + xvf64gerpp 1, vs42, vs48 +.if \IsLast==1 + addi AO, AO, DISP8(\Index,128) + addi BO, BO, DISP2(\Index,32) +.endif +.endm + + +.macro LOAD_END_1x4 OffsetA,OffsetB + lxvp vs32, 0(AO) // load real,imag from A + lxvp vs34, 32(AO) // load real,imag from A + lxv vs48, 0(BO) // load real imag from B + xvf64gerpp 0, vs32, vs48 + xvf64gerpp 1, vs34, vs48 + addi BO, BO, \OffsetB + addi AO, AO, \OffsetA +.endm + + +.macro KERNEL1x4_UNPRIME_MMA + /* "unprime" MMA accumulators */ + xxmfacc 0 + xxmfacc 1 +.endm + + +.macro SAVE1x4 + xxpermdi vs32, vs0, vs1, 0b01 + xxpermdi vs33, vs0, vs1, 0b10 + xxpermdi vs34, vs2, vs3, 0b01 + xxpermdi vs35, vs2, vs3, 0b10 + xxpermdi vs36, vs4, vs5, 0b01 + xxpermdi vs37, vs4, vs5, 0b10 + xxpermdi vs38, vs6, vs7, 0b01 + xxpermdi vs39, vs6, vs7, 0b10 + + xxlor vs2, vs32, vs32 + xxlor vs3, vs33, vs33 + xxlor vs0, vs34, vs34 + xxlor vs1, vs35, vs35 + xxlor vs6, vs36, vs36 + xxlor vs7, vs37, vs37 + xxlor vs4, vs38, vs38 + xxlor vs5, vs39, vs39 + + SAVE4 vs0,vs1,vs2,vs3,vs4,vs5,vs6,vs7,CO,0 + addi CO, CO, 64 +.endm + +/********************************************************************************************** +* + +.macros for N=1 and M=2 +**********************************************************************************************/ + +.macro KERNEL1x2_ZERO_AND_PRIME_MMA + /* zero out and prime the MMA accumulators */ + xxsetaccz 0 +.endm + + +.macro KERNEL1x2_2 Index,IsLast + lxvp vs32, DISP4(\Index, 0)(AO) // load real,imag from A + lxvp vs40, DISP4(\Index, 32)(AO) // load real,imag from A + lxvp vs48, DISP2(\Index, 0)(BO) // load real imag from B + xvf64gerpp 0, vs32, vs49 + xvf64gerpp 0, vs40, vs48 +.if \IsLast==1 + addi AO, AO, DISP4(\Index,64) + addi BO, BO, DISP2(\Index,32) +.endif +.endm + + +.macro LOAD_END_1x2 OffsetA,OffsetB + lxvp vs32, 0(AO) // load real,imag from A + lxv vs48, 0(BO) // load real imag from B + xvf64gerpp 0, vs32, vs48 + addi BO, BO, \OffsetB + addi AO, AO, \OffsetA +.endm + + +.macro KERNEL1x2_UNPRIME_MMA + /* "unprime" MMA accumulators */ + xxmfacc 0 +.endm + + +.macro SAVE1x2 + xxpermdi vs32, vs0, vs1, 0b01 + xxpermdi vs33, vs0, vs1, 0b10 + xxpermdi vs34, vs2, vs3, 0b01 + xxpermdi vs35, vs2, vs3, 0b10 + + xxlor vs2, vs32, vs32 + xxlor vs3, vs33, vs33 + xxlor vs0, vs34, vs34 + xxlor vs1, vs35, vs35 + + SAVE2 vs0,vs1,vs2,vs3,CO,0 + addi CO, CO, 32 +.endm + +/********************************************************************************************** +* + +.macros for N=1 and M=1 +**********************************************************************************************/ + +.macro ZERO1x1 + xxlxor vs0, vs0, vs0 + xxlxor vs1, vs1, vs1 +.endm + + +.macro LOAD1x1 + LOAD1x1O 0,0 +.endm + + +.macro LOAD1x1O OffsetA,OffsetB + lxv vs48,(\OffsetB+ 0)(BO) // load real imag from B + lxv vs32, (0+\OffsetA)(AO) // load real,imag from A + xxswapd vs49, vs48 + +.endm + + +.macro END1x1_WITHOUT_ADD + END1x1 AO,BO,0,0 +.endm + + +.macro END1x1 AREG, BREG, OffsetA, OffsetB +.if \OffsetB != 0 + addi \BREG, \BREG, \OffsetB +.endif +.if \OffsetA != 0 + addi \AREG, \AREG, \OffsetA +.endif + xvmaddadp vs0, vs32, vs48 + xvmaddadp vs1, vs32, vs49 +.endm + + +.macro LOAD1x1_2 + LOAD1x1_2O 0,0 +.endm + + +.macro LOAD1x1_2O OffsetA,OffsetB + lxv vs48,(\OffsetB+ 0)(BO) // load real imag from B + lxv vs52, (\OffsetB+16)(BO) // load real,imag from B + xxswapd vs49, vs48 + + lxv vs32, (0+\OffsetA)(AO) // load real,imag from A + lxv vs40, (16+\OffsetA)(AO) // load real,imag from A +.endm + + +.macro END1x1_2 + /*for load2 offset will be 32 and 32*/ + KERNEL1x1_2 AO,BO, 32,32,0 ,1,1 +.endm + + + +.macro KERNEL1x1_E2 OffsetA,OffsetB, Index,IsLast + KERNEL1x1_2 AO,BO, \OffsetA,\OffsetB, \Index,\IsLast ,1 +.endm + + +.macro KERNEL1x1_L2 OffsetA,OffsetB, Index,IsLast + KERNEL1x1_2 AO,BO, \OffsetA,\OffsetB, \Index,\IsLast ,0 +.endm + + +.macro KERNEL1x1_2 AREG,BREG, OffsetA,OffsetB, Index,IsLast ,Complete + xxswapd vs53, vs52 + xvmaddadp vs0, vs32, vs48 + xvmaddadp vs1, vs32, vs49 +.if \Complete==0 + lxv vs32, DISP2(\Index, 0 + \OffsetA)(\AREG) // load real,imag from A +.endif +.if \Complete==0 + lxv vs48, DISP2(\Index, 0+\OffsetB)(\BREG) // load real imag from B +.endif +.if \Complete==0 + xxswapd vs49, vs48 +.endif + xvmaddadp vs0, vs40, vs52 + xvmaddadp vs1, vs40, vs53 +.if \Complete==0 + lxv vs40, DISP2(\Index,16+0+ \OffsetA)(\AREG) // load real,imag from A +.endif + +.if \Complete==0 + lxv vs52, DISP2(\Index, 16+\OffsetB)(\BREG) // load real,imag from B +.endif +.if \IsLast==1 +.if \Complete==1 + addi \AREG, \AREG, DISP2(\Index,\OffsetA) + addi \BREG, \BREG, DISP2(\Index,\OffsetB) +.else + addi \AREG, \AREG, DISP2(\Index,32) + addi \BREG, \BREG, DISP2(\Index,32) +.endif +.endif +.endm + + + +.macro KERNEL1x1 + LOAD1x1 + END1x1 AO, BO, 16,16 +.endm + + + +.macro SAVE1x1 + SAVE1 vs0,vs1,CO,0 + addi CO, CO, 16 +.endm + +/****************************TRMM POINTER REFRESH + +.macroSES*************************/ + + +.macro SHIFT_REG REG1,REG2,SHIFT_VAL + .if \SHIFT_VAL==16 + slwi \REG1, \REG2, 8 + .elseif \SHIFT_VAL==8 + slwi \REG1, \REG2, 7 + .elseif \SHIFT_VAL==4 + slwi \REG1, \REG2, 6 + .elseif \SHIFT_VAL==2 + slwi \REG1, \REG2, 5 + .elseif \SHIFT_VAL==1 + slwi \REG1, \REG2, 4 + .endif +.endm +/* +//#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) +// ptrbb = bb; +// #else +// ptrba += off*16; +// ptrbb = bb + off*2; +// #endif +*/ + + +.macro REFRESH_POINTERS PTR_A,PTR_B,OFF_VAL,B_VAL,C_A,C_B + #if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + /* ptrbb = bb;*/ + mr \PTR_B,\B_VAL /* refresh BPOINT */ + #else + /* + // ptrba =ptrba+ off*C_A; + // ptrbb = bb + off*C_B; + */ + SHIFT_REG T4,\OFF_VAL,\C_B /* Number of values in B shifted */ + SHIFT_REG T2,\OFF_VAL,\C_A /* Number of values in A shifted */ + add \PTR_B, \B_VAL , T4 /* Add values to BO */ + add \PTR_A, \PTR_A, T2 /* Add values to AO */ + #endif +.endm + +/* +// #if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) +// temp = bk-off; +// #elif defined(LEFT) +// temp = off+16; // number of values in A +// #else +// temp = off+2; // number of values in B +// #endif +*/ + + +.macro REFRESH_TEMP_BK TEMP_BK,BK_VAL,OFF_VAL,INCR_A,INCR_B + #if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + /* temp = bk-off;*/ + sub \TEMP_BK,\BK_VAL,\OFF_VAL + #elif defined(LEFT) + /* temp = off+INCR_A; // number of values in A */ + addi \TEMP_BK, \OFF_VAL, \INCR_A + #else + /* temp = off+INCR_B // number of values in B*/ + addi \TEMP_BK,\OFF_VAL, \INCR_B + #endif +.endm +/* +// #if ( defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) +// temp = bk - off; +// #ifdef LEFT +// temp -= 16; // number of values in A +// #else +// temp -= 2; // number of values in B +// #endif +// ptrba += temp*16; +// ptrbb += temp*2; +// #endif +// #ifdef LEFT +// off += 16; // number of values in A +// #endif +*/ + + + +.macro REFRESH_AFTER_SAVE TEMP_BK,BK_VAL,OFF_VAL,PTR_B,PTR_A,C_A,C_B + #if ( defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + /*temp = bk - off;*/ + sub \TEMP_BK,\BK_VAL,\OFF_VAL + #ifdef LEFT + /*temp -= 8; // number of values in A*/ + addi \TEMP_BK,\TEMP_BK,-\C_A + #else + /*temp -= 4; // number of values in B*/ + addi \TEMP_BK,\TEMP_BK,-\C_B + #endif + /*ptrba += temp*C_A; + ptrbb += temp*C_B;*/ + SHIFT_REG T4,\TEMP_BK,\C_A + SHIFT_REG T2,\TEMP_BK,\C_B + add \PTR_A, \PTR_A,T4/*ptrba+temp*C_A*/ + add \PTR_B, \PTR_B,T2 + #endif + #ifdef LEFT + /*off += 8; // number of values in A*/ + addi \OFF_VAL,\OFF_VAL,\C_A + #endif +.endm + diff --git a/kernel/power/zgemm_macros_power9.S b/kernel/power/zgemm_macros_power9.S new file mode 100644 index 000000000..8670e9574 --- /dev/null +++ b/kernel/power/zgemm_macros_power9.S @@ -0,0 +1,1825 @@ +/*************************************************************************** +Copyright (c) 2013-2019, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#define unit_size 16 +#define DISP32(ind,disp) (ind*unit_size*32+disp) +#define DISP16(ind,disp) (ind*unit_size*16+disp) +#define DISP8(ind,disp) (ind*unit_size*8+disp) +#define DISP4(ind,disp) (ind*unit_size*4+disp) +#define DISP2(ind,disp) (ind*unit_size*2+disp) +#define DISP1(ind,disp) (ind*unit_size+disp) +#define DISPX(disp) (disp) +/* HELPERS FOR SAVE */ +/* {r0,i0} and {r1,i1} into {r0,r1} {i0,i1} */ + + +.macro LOAD_COUPLE_AS_RR_II VS_OUT1,VS_OUT2,VS_TEMP1,VS_TEMP2,REG,LOFFSET +#ifndef TRMMKERNEL + lxv \VS_TEMP1, DISPX(\LOFFSET)(\REG) + lxv \VS_TEMP2, DISPX(\LOFFSET+16)(\REG) + xxmrgld \VS_OUT1,\VS_TEMP1,\VS_TEMP2 + xxmrghd \VS_OUT2,\VS_TEMP1,\VS_TEMP2 +#endif +.endm +/*from 2 result {a0r*br,a0i*bi} and {a1r*br,a1i*bi} pack into {a0r*br,a1r*br} and {a0i*bi,a1i*bi}*/ + + +.macro RESULT_INTO_REALREAL_IMAGEIMAGE VSIN1,VSIN2,VSOUT1,VSOUT2 + xxmrgld \VSOUT1, \VSIN1,\VSIN2 /* real*real from 2 results*/ + xxmrghd \VSOUT2, \VSIN1,\VSIN2 /* imag*imag from 2 results*/ +.endm +/*from 2 result {a0r*bi,a0i*br} and {a1r*bi,a1i*br} pack into {a0r*bi,a1r*bi} and {a0i*br,a1i*br}*/ + + +.macro RESULT_INTO_REALIMAG_IMAGREAL VSIN1,VSIN2,VSOUT1,VSOUT2 + xxmrgld \VSOUT1, \VSIN1,\VSIN2 /* real*imag */ + xxmrghd \VSOUT2, \VSIN1,\VSIN2 /* imag*real*/ +.endm +/* {a0r*br op a0i*bi ,a1r*br op a1i*bi} ~ {r0,r1}; {a0r*bi op a0i*br ,a1r*bi op a1i*br} ~ {i0,i1}*/ + + +.macro AGGREGATE_REALS_IMAGES VSINR_OUT1,VSINR,VSINI_OUT2,VSINI +#if defined(NN) || defined(NT) || defined(TN) || defined(TT) + xvsubdp \VSINR_OUT1,\VSINR_OUT1,\VSINR + xvadddp \VSINI_OUT2,\VSINI_OUT2,\VSINI +#elif defined(CN) || defined(CT) || defined(RN) || defined(RT) + xvadddp \VSINR_OUT1,\VSINR_OUT1,\VSINR + xvsubdp \VSINI_OUT2,\VSINI_OUT2,\VSINI +#elif defined(NC) || defined(TC) || defined(NR) || defined(TR) + xvadddp \VSINR_OUT1,\VSINR_OUT1,\VSINR + xvsubdp \VSINI_OUT2,\VSINI,\VSINI_OUT2 +#else // CC || CR || RC || RR + /*we will assume {-alpha_r,-alpha_i} for this case */ + /*i1i2-r1r2 so we will negate alpha real instead to fix sign*/ + xvsubdp \VSINR_OUT1,\VSINR,\VSINR_OUT1 + /*we will negate alpha image instead instead to fix sign*/ + xvadddp \VSINI_OUT2,\VSINI_OUT2,\VSINI +#endif +.endm +/* {i0,i1} * {alpha_i,alpha_i} - VSOUT1 ;VSOUT2 + {r0,r1}*{alpha_i,alpha_i} */ + + +.macro MULT_APLHA_PART1 VSINRR,VSINII,VSOUT1,VSOUT2 +#ifndef TRMMKERNEL + xvmsubadp \VSOUT1,\VSINII, alpha_i + xvmaddadp \VSOUT2,\VSINRR, alpha_i +#else + xvmuldp \VSOUT1,\VSINII, alpha_i + xvmuldp \VSOUT2,\VSINRR, alpha_i +#endif +.endm +/* {r0,r1} * {alpha_r,alpha_r} - VSOUT1 ;VSOUT2 + {i0,i1} * {alpha_r,alpha_r} */ + + +.macro MULT_APLHA_PART2 VSINRR,VSINII,VSOUT1,VSOUT2 + xvmsubadp \VSOUT1,\VSINRR, alpha_r + xvmaddadp \VSOUT2,\VSINII, alpha_r +.endm +/* unpack to store 2{r,r} {i,i} into {r,i} {r,i} (big endian because of stxv) */ + + +.macro UNPACK_FOR_STORE VSIN1,VSIN2,VSOUT1,VSOUT2 + xxmrghd \VSOUT1,\VSIN2,\VSIN1 + xxmrgld \VSOUT2,\VSIN2,\VSIN1 +.endm + + +.macro STORE_COUPLE REG,LOFFSET,VSIN1,VSIN2 + stxv \VSIN1, DISPX(\LOFFSET)(\REG) + stxv \VSIN2, DISPX(\LOFFSET+16)(\REG) +.endm + + +.macro SAVE8 VSRes1,VSRes2,VSRes3,VSRes4,VSRes5,VSRes6,VSRes7,VSRes8,VSRes9,VSRes10,VSRes11,VSRes12,VSRes13,VSRes14,VSRes15,VSRes16,BASE_REG,LOFFSET + RESULT_INTO_REALREAL_IMAGEIMAGE \VSRes1,\VSRes3,vs2,vs3 + LOAD_COUPLE_AS_RR_II vs14,vs15,vs18,vs19,\BASE_REG,\LOFFSET + RESULT_INTO_REALIMAG_IMAGREAL \VSRes2,\VSRes4,vs4,vs5 + LOAD_COUPLE_AS_RR_II vs16,vs17,vs20,vs21,\BASE_REG,(\LOFFSET+32) + RESULT_INTO_REALREAL_IMAGEIMAGE \VSRes5,\VSRes7,vs6,vs7 + LOAD_COUPLE_AS_RR_II vs24,vs25,vs18,vs19,\BASE_REG,(\LOFFSET +64) + RESULT_INTO_REALIMAG_IMAGREAL \VSRes6,\VSRes8,vs8,vs9 + LOAD_COUPLE_AS_RR_II vs26,vs27,vs20,vs21,\BASE_REG,(\LOFFSET+96) + RESULT_INTO_REALREAL_IMAGEIMAGE \VSRes9,\VSRes11,vs10,vs11 + AGGREGATE_REALS_IMAGES vs2,vs3,vs4,vs5 + RESULT_INTO_REALIMAG_IMAGREAL \VSRes10,\VSRes12,vs12,vs13 + AGGREGATE_REALS_IMAGES vs6,vs7,vs8,vs9 + RESULT_INTO_REALREAL_IMAGEIMAGE \VSRes13,\VSRes15,\VSRes1,\VSRes2 + MULT_APLHA_PART1 vs2,vs4, vs14,vs15 + RESULT_INTO_REALIMAG_IMAGREAL \VSRes14,\VSRes16,\VSRes3,\VSRes4 + MULT_APLHA_PART1 vs6,vs8,vs16,vs17 + MULT_APLHA_PART2 vs2,vs4,vs14,vs15 + AGGREGATE_REALS_IMAGES vs10,vs11,vs12,vs13 + MULT_APLHA_PART2 vs6,vs8,vs16,vs17 + AGGREGATE_REALS_IMAGES \VSRes1,\VSRes2,\VSRes3,\VSRes4 + UNPACK_FOR_STORE vs14,vs15,vs7,vs9 + MULT_APLHA_PART1 vs10,vs12, vs24,vs25 + UNPACK_FOR_STORE vs16,vs17,vs3,vs5 + MULT_APLHA_PART1 \VSRes1,\VSRes3, vs26,vs27 + STORE_COUPLE \BASE_REG,\LOFFSET,vs7,vs9 + MULT_APLHA_PART2 vs10,vs12,vs24,vs25 + STORE_COUPLE \BASE_REG,(\LOFFSET+32),vs3,vs5 + MULT_APLHA_PART2 \VSRes1,\VSRes3, vs26,vs27 + UNPACK_FOR_STORE vs24,vs25,vs10,vs12 + UNPACK_FOR_STORE vs26,vs27,\VSRes1,\VSRes3 + STORE_COUPLE \BASE_REG,(\LOFFSET +64),vs10,vs12 + STORE_COUPLE \BASE_REG,(\LOFFSET+96),\VSRes1,\VSRes3 +.endm + + +.macro SAVE4 VSRes1,VSRes2,VSRes3,VSRes4,VSRes5,VSRes6,VSRes7,VSRes8,BASE_REG,LOFFSET + RESULT_INTO_REALREAL_IMAGEIMAGE \VSRes1,\VSRes3,vs2,vs3 + LOAD_COUPLE_AS_RR_II vs14,vs15,vs18,vs19,\BASE_REG,\LOFFSET + RESULT_INTO_REALIMAG_IMAGREAL \VSRes2,\VSRes4,vs4,vs5 + LOAD_COUPLE_AS_RR_II vs16,vs17,vs20,vs21,\BASE_REG,(\LOFFSET+32) + RESULT_INTO_REALREAL_IMAGEIMAGE \VSRes5,\VSRes7,vs6,vs7 + RESULT_INTO_REALIMAG_IMAGREAL \VSRes6,\VSRes8,vs8,vs9 + AGGREGATE_REALS_IMAGES vs2,vs3,vs4,vs5 + AGGREGATE_REALS_IMAGES vs6,vs7,vs8,vs9 + MULT_APLHA_PART1 vs2,vs4, vs14,vs15 + MULT_APLHA_PART1 vs6,vs8, vs16,vs17 + MULT_APLHA_PART2 vs2,vs4, vs14,vs15 + MULT_APLHA_PART2 vs6,vs8,vs16,vs17 + UNPACK_FOR_STORE vs14,vs15,vs7,vs9 + UNPACK_FOR_STORE vs16,vs17,vs3,vs5 + STORE_COUPLE \BASE_REG,\LOFFSET,vs7,vs9 + STORE_COUPLE \BASE_REG,(\LOFFSET+32),vs3,vs5 +.endm + + + +.macro SAVE2 VSRes1,VSRes2,VSRes3,VSRes4,BASE_REG,LOFFSET + RESULT_INTO_REALREAL_IMAGEIMAGE \VSRes1,\VSRes3,vs2,vs3 + LOAD_COUPLE_AS_RR_II vs14,vs15,vs18,vs19,\BASE_REG,\LOFFSET + RESULT_INTO_REALIMAG_IMAGREAL \VSRes2,\VSRes4,vs4,vs5 + AGGREGATE_REALS_IMAGES vs2,vs3,vs4,vs5 + MULT_APLHA_PART1 vs2,vs4, vs14,vs15 + MULT_APLHA_PART2 vs2,vs4, vs14,vs15 + UNPACK_FOR_STORE vs14,vs15,vs7,vs9 + STORE_COUPLE \BASE_REG,\LOFFSET,vs7,vs9 +.endm + + + +.macro SAVE1 VSRes1,VSRes2,BASE_REG,LOFFSET + RESULT_INTO_REALREAL_IMAGEIMAGE \VSRes1,\VSRes1,vs2,vs3 +#ifndef TRMMKERNEL + lxv vs18, (\LOFFSET)(\BASE_REG) + xxmrgld vs14,vs18,vs18 + xxmrghd vs15,vs18,vs18 +#endif + RESULT_INTO_REALIMAG_IMAGREAL \VSRes2,\VSRes2,vs4,vs5 + AGGREGATE_REALS_IMAGES vs2,vs3,vs4,vs5 + MULT_APLHA_PART1 vs2,vs4, vs14,vs15 + MULT_APLHA_PART2 vs2,vs4, vs14,vs15 + UNPACK_FOR_STORE vs14,vs15,vs7,vs9 + xxmrghd vs7,vs15,vs14 + stxv vs7, (\LOFFSET)(\BASE_REG) +.endm +/********************************************************************************************** +* + +.macros for N=2 and M=8 +**********************************************************************************************/ + +.macro Zero2x8 + xxlxor vs32, vs32, vs32 + xxlxor vs33, vs33, vs33 + xxlxor vs34, vs34, vs34 + xxlxor vs35, vs35, vs35 + xxlxor vs36, vs36, vs36 + xxlxor vs37, vs37, vs37 + xxlxor vs38, vs38, vs38 + xxlxor vs39, vs39, vs39 + xxlxor vs40, vs40, vs40 + xxlxor vs41, vs41, vs41 + xxlxor vs42, vs42, vs42 + xxlxor vs43, vs43, vs43 + xxlxor vs44, vs44, vs44 + xxlxor vs45, vs45, vs45 + xxlxor vs46, vs46, vs46 + xxlxor vs47, vs47, vs47 + xxlxor vs48, vs48, vs48 + xxlxor vs49, vs49, vs49 + xxlxor vs50, vs50, vs50 + xxlxor vs51, vs51, vs51 + xxlxor vs52, vs52, vs52 + xxlxor vs53, vs53, vs53 + xxlxor vs54, vs54, vs54 + xxlxor vs55, vs55, vs55 + xxlxor vs56, vs56, vs56 + xxlxor vs57, vs57, vs57 + xxlxor vs58, vs58, vs58 + xxlxor vs59, vs59, vs59 + xxlxor vs60, vs60, vs60 + xxlxor vs61, vs61, vs61 + xxlxor vs62, vs62, vs62 + xxlxor vs63, vs63, vs63 +.endm + + +.macro LOAD2x8 + LOAD2x8O 0,0 +.endm + + +.macro LOAD2x8O OffsetA,OffsetB + lxv vs16,(\OffsetB+ 0)(BO) // load real imag from B + lxv vs18, (\OffsetB+16)(BO) // load real,imag from B + xxswapd vs17, vs16 + xxswapd vs19, vs18 + lxv vs0, (0+\OffsetA)(AO) // load real,imag from A + lxv vs1, (16+\OffsetA)(AO) // load real,imag from A + lxv vs2, (32+\OffsetA)(AO) // load real,imag from A + lxv vs3, (48+\OffsetA)(AO) // load real,imag from A + lxv vs4, (64+\OffsetA)(AO) // load real,imag from A + lxv vs5, (80+\OffsetA)(AO) // load real,imag from A + lxv vs6, (96+\OffsetA)(AO) // load real,imag from A + lxv vs7, (112+\OffsetA)(AO) // load real,imag from A + +.endm + + +.macro END2x8_NORMAL + END2x8 AO,BO,128,32 +.endm + + +.macro END2x8_WITHOUT_ADD + END2x8 AO,BO,0,0 +.endm + + +.macro END2x8 AREG, BREG, OffsetA, OffsetB +.if \OffsetB != 0 + addi \BREG, \BREG, \OffsetB +.endif +.if \OffsetA != 0 + addi \AREG, \AREG, \OffsetA +.endif + xvmaddadp vs32, vs0, vs16 + xvmaddadp vs48, vs0, vs18 + xvmaddadp vs33, vs0, vs17 + xvmaddadp vs49, vs0, vs19 + xvmaddadp vs34, vs1, vs16 + xvmaddadp vs50, vs1, vs18 + xvmaddadp vs35, vs1, vs17 + xvmaddadp vs51, vs1, vs19 + xvmaddadp vs36, vs2, vs16 + xvmaddadp vs52, vs2, vs18 + xvmaddadp vs37, vs2, vs17 + xvmaddadp vs53, vs2, vs19 + xvmaddadp vs38, vs3, vs16 + xvmaddadp vs54, vs3, vs18 + xvmaddadp vs39, vs3, vs17 + xvmaddadp vs55, vs3, vs19 + xvmaddadp vs40, vs4, vs16 + xvmaddadp vs56, vs4, vs18 + xvmaddadp vs41, vs4, vs17 + xvmaddadp vs57, vs4, vs19 + xvmaddadp vs42, vs5, vs16 + xvmaddadp vs58, vs5, vs18 + xvmaddadp vs43, vs5, vs17 + xvmaddadp vs59, vs5, vs19 + xvmaddadp vs44, vs6, vs16 + xvmaddadp vs60, vs6, vs18 + xvmaddadp vs45, vs6, vs17 + xvmaddadp vs61, vs6, vs19 + xvmaddadp vs46, vs7, vs16 + xvmaddadp vs62, vs7, vs18 + xvmaddadp vs47, vs7, vs17 + xvmaddadp vs63, vs7, vs19 +.endm + + +.macro LOAD2x8_2 + LOAD2x8_2O 0,0 +.endm + + +.macro LOAD2x8_2O OffsetA,OffsetB + lxv vs16,(\OffsetB+ 0)(BO) // load real imag from B + lxv vs18, (\OffsetB+16)(BO) // load real,imag from B + lxv vs20, (\OffsetB+32)(BO) // load real,imag from B + lxv vs22, (\OffsetB+48)(BO) // load real,imag from B + xxswapd vs17, vs16 + xxswapd vs19, vs18 + lxv vs0, (0+\OffsetA)(AO) // load real,imag from A + lxv vs1, (16+\OffsetA)(AO) // load real,imag from A + lxv vs2, (32+\OffsetA)(AO) // load real,imag from A + lxv vs3, (48+\OffsetA)(AO) // load real,imag from A + lxv vs4, (64+\OffsetA)(AO) // load real,imag from A + lxv vs5, (80+\OffsetA)(AO) // load real,imag from A + lxv vs6, (96+\OffsetA)(AO) // load real,imag from A + lxv vs7, (112+\OffsetA)(AO) // load real,imag from A + lxv vs8, (128+0+\OffsetA)(AO) // load real,imag from A + lxv vs9, (128+16+\OffsetA)(AO) // load real,imag from A + lxv vs10, (128+32+\OffsetA)(AO) // load real,imag from A + lxv vs11, (128+48+\OffsetA)(AO) // load real,imag from A + lxv vs12, (128+64+\OffsetA)(AO) // load real,imag from A + lxv vs13, (128+80+\OffsetA)(AO) // load real,imag from A + lxv vs14, (128+96+\OffsetA)(AO) // load real,imag from A + lxv vs15, (128+112+\OffsetA)(AO) // load real,imag from A +.endm + + +.macro END2x8_2 + /*for load2 offset will be 256 and 64*/ + KERNEL2x8_2 AO,BO, 256,64,0 ,1,1 +.endm + + + +.macro KERNEL2x8_E2 OffsetA,OffsetB, Index,IsLast + KERNEL2x8_2 AO,BO, \OffsetA,\OffsetB, \Index,\IsLast ,1 +.endm + + +.macro KERNEL2x8_L2 OffsetA,OffsetB, Index,IsLast + KERNEL2x8_2 AO,BO, \OffsetA,\OffsetB, \Index,\IsLast ,0 +.endm + + +.macro KERNEL2x8_2 AREG,BREG, OffsetA,OffsetB, Index,IsLast ,Complete + xvmaddadp vs32, vs0, vs16 + xvmaddadp vs48, vs0, vs18 + xvmaddadp vs33, vs0, vs17 + xvmaddadp vs49, vs0, vs19 + xxswapd vs21, vs20 + xxswapd vs23, vs22 + xvmaddadp vs34, vs1, vs16 + xvmaddadp vs50, vs1, vs18 + xvmaddadp vs35, vs1, vs17 + xvmaddadp vs51, vs1, vs19 +.if \Complete==0 + lxv vs0, DISP16(\Index, 0 + \OffsetA)(\AREG) // load real,imag from A + lxv vs1, DISP16(\Index,16 + \OffsetA)(\AREG) // load real,imag from A +.endif + xvmaddadp vs36, vs2, vs16 + xvmaddadp vs52, vs2, vs18 + xvmaddadp vs37, vs2, vs17 + xvmaddadp vs53, vs2, vs19 + xvmaddadp vs38, vs3, vs16 + xvmaddadp vs54, vs3, vs18 + xvmaddadp vs39, vs3, vs17 + xvmaddadp vs55, vs3, vs19 +.if \Complete==0 + lxv vs2, DISP16(\Index,32 + \OffsetA)(\AREG) // load real,imag from A + lxv vs3, DISP16(\Index,48 + \OffsetA)(\AREG) // load real,imag from A +.endif + xvmaddadp vs40, vs4, vs16 + xvmaddadp vs56, vs4, vs18 + xvmaddadp vs41, vs4, vs17 + xvmaddadp vs57, vs4, vs19 + xvmaddadp vs42, vs5, vs16 + xvmaddadp vs58, vs5, vs18 + xvmaddadp vs43, vs5, vs17 + xvmaddadp vs59, vs5, vs19 +.if \Complete==0 + lxv vs4, DISP16(\Index,64+ \OffsetA)(\AREG) // load real,imag from A + lxv vs5, DISP16(\Index,64+16 + \OffsetA)(\AREG) // load real,imag from A +.endif + xvmaddadp vs44, vs6, vs16 + xvmaddadp vs60, vs6, vs18 + xvmaddadp vs45, vs6, vs17 + xvmaddadp vs61, vs6, vs19 + xvmaddadp vs46, vs7, vs16 + xvmaddadp vs62, vs7, vs18 + xvmaddadp vs47, vs7, vs17 + xvmaddadp vs63, vs7, vs19 +.if \Complete==0 + lxv vs16, DISP4(\Index, 0+\OffsetB)(\BREG) // load real imag from B + lxv vs18, DISP4(\Index, 16+\OffsetB)(\BREG) // load real,imag from B +.endif + xvmaddadp vs32, vs8, vs20 + xvmaddadp vs48, vs8, vs22 +.if \Complete==0 + lxv vs6, DISP16(\Index,64+32 + \OffsetA)(\AREG) // load real,imag from A + lxv vs7, DISP16(\Index,64+48 + \OffsetA)(\AREG) // load real,imag from A +.endif + xvmaddadp vs33, vs8, vs21 + xvmaddadp vs49, vs8, vs23 +.if \Complete==0 + xxswapd vs17, vs16 + xxswapd vs19, vs18 +.endif + xvmaddadp vs34, vs9, vs20 + xvmaddadp vs50, vs9, vs22 + xvmaddadp vs35, vs9, vs21 + xvmaddadp vs51, vs9, vs23 +.if \Complete==0 + lxv vs8, DISP16(\Index,128+ + \OffsetA)(\AREG) // load real,imag from A + lxv vs9, DISP16(\Index,128+16 + \OffsetA)(\AREG) // load real,imag from A +.endif + xvmaddadp vs36, vs10, vs20 + xvmaddadp vs52, vs10, vs22 + xvmaddadp vs37, vs10, vs21 + xvmaddadp vs53, vs10, vs23 + xvmaddadp vs38, vs11, vs20 + xvmaddadp vs54, vs11, vs22 + xvmaddadp vs39, vs11, vs21 + xvmaddadp vs55, vs11, vs23 +.if \Complete==0 + lxv vs10, DISP16(\Index,128+32 + \OffsetA)(\AREG) // load real,imag from A + lxv vs11, DISP16(\Index,128+48 + \OffsetA)(\AREG) // load real,imag from A +.endif + xvmaddadp vs40, vs12, vs20 + xvmaddadp vs56, vs12, vs22 + xvmaddadp vs41, vs12, vs21 + xvmaddadp vs57, vs12, vs23 + xvmaddadp vs42, vs13, vs20 + xvmaddadp vs58, vs13, vs22 + xvmaddadp vs43, vs13, vs21 + xvmaddadp vs59, vs13, vs23 +.if \Complete==0 + lxv vs12, DISP16(\Index, 192 + \OffsetA)(\AREG) // load real,imag from A + lxv vs13, DISP16(\Index,192 +16 + \OffsetA)(\AREG) // load real,imag from A +.endif + xvmaddadp vs44, vs14, vs20 + xvmaddadp vs60, vs14, vs22 + xvmaddadp vs45, vs14, vs21 + xvmaddadp vs61, vs14, vs23 + xvmaddadp vs46, vs15, vs20 + xvmaddadp vs62, vs15, vs22 + xvmaddadp vs47, vs15, vs21 + xvmaddadp vs63, vs15, vs23 +.if \Complete==0 + lxv vs14, DISP16(\Index,192 +32 + \OffsetA)(\AREG) // load real,imag from A + lxv vs15, DISP16(\Index,192 +48 + \OffsetA)(\AREG) // load real,imag from A + lxv vs20, DISP4(\Index, 32+\OffsetB)(\BREG) // load real,imag from B + lxv vs22, DISP4(\Index, 48+\OffsetB)(\BREG) // load real,imag from B +.endif +.if \IsLast==1 +.if \Complete==1 + addi \AREG, \AREG, DISP16(\Index,\OffsetA) + addi \BREG, \BREG, DISP4(\Index,\OffsetB) +.else + addi \AREG, \AREG, DISP16(\Index,256) + addi \BREG, \BREG, DISP4(\Index,64) +.endif +.endif +.endm + + + + + +.macro KERNEL2x8 + LOAD2x8 + END2x8 AO, BO, 128,32 +.endm + + +.macro SAVE2x8 + add T1, CO ,LDC + SAVE8 vs32,vs33,vs34,vs35,vs36,vs37,vs38,vs39,vs40,vs41,vs42,vs43,vs44,vs45,vs46,vs47,CO,0 + SAVE8 vs48,vs49,vs50,vs51,vs52,vs53,vs54,vs55,vs56,vs57,vs58,vs59,vs60,vs61,vs62,vs63,T1,0 + addi CO, CO, 128 +.endm +/********************************************************************************************** +* + +.macros for N=2 and M=4 +**********************************************************************************************/ + + +.macro Zero2x4 + xxlxor vs32, vs32, vs32 + xxlxor vs33, vs33, vs33 + xxlxor vs34, vs34, vs34 + xxlxor vs35, vs35, vs35 + xxlxor vs36, vs36, vs36 + xxlxor vs37, vs37, vs37 + xxlxor vs38, vs38, vs38 + xxlxor vs39, vs39, vs39 + xxlxor vs40, vs40, vs40 + xxlxor vs41, vs41, vs41 + xxlxor vs42, vs42, vs42 + xxlxor vs43, vs43, vs43 + xxlxor vs44, vs44, vs44 + xxlxor vs45, vs45, vs45 + xxlxor vs46, vs46, vs46 + xxlxor vs47, vs47, vs47 +.endm + + +.macro LOAD2x4 + LOAD2x4O 0,0 +.endm + + +.macro LOAD2x4O OffsetA,OffsetB + lxv vs16,(\OffsetB+ 0)(BO) // load real imag from B + lxv vs18, (\OffsetB+16)(BO) // load real,imag from B + xxswapd vs17, vs16 + xxswapd vs19, vs18 + lxv vs0, (0+\OffsetA)(AO) // load real,imag from A + lxv vs1, (16+\OffsetA)(AO) // load real,imag from A + lxv vs2, (32+\OffsetA)(AO) // load real,imag from A + lxv vs3, (48+\OffsetA)(AO) // load real,imag from A +.endm + + +.macro END2x4_NORMAL + END2x4 AO,BO,64,32 +.endm + + +.macro END2x4_WITHOUT_ADD + END2x4 AO,BO,0,0 +.endm + + +.macro END2x4 AREG, BREG, OffsetA, OffsetB +.if \OffsetB != 0 + addi \BREG, \BREG, \OffsetB +.endif +.if \OffsetA != 0 + addi \AREG, \AREG, \OffsetA +.endif + xvmaddadp vs32, vs0, vs16 + xvmaddadp vs40, vs0, vs18 + xvmaddadp vs33, vs0, vs17 + xvmaddadp vs41, vs0, vs19 + xvmaddadp vs34, vs1, vs16 + xvmaddadp vs42, vs1, vs18 + xvmaddadp vs35, vs1, vs17 + xvmaddadp vs43, vs1, vs19 + xvmaddadp vs36, vs2, vs16 + xvmaddadp vs44, vs2, vs18 + xvmaddadp vs37, vs2, vs17 + xvmaddadp vs45, vs2, vs19 + xvmaddadp vs38, vs3, vs16 + xvmaddadp vs46, vs3, vs18 + xvmaddadp vs39, vs3, vs17 + xvmaddadp vs47, vs3, vs19 + +.endm + + +.macro LOAD2x4_2 + LOAD2x4_2O 0,0 +.endm + + +.macro LOAD2x4_2O OffsetA,OffsetB + lxv vs16,(\OffsetB+ 0)(BO) // load real imag from B + lxv vs18, (\OffsetB+16)(BO) // load real,imag from B + lxv vs20, (\OffsetB+32)(BO) // load real,imag from B + lxv vs22, (\OffsetB+48)(BO) // load real,imag from B + xxswapd vs17, vs16 + xxswapd vs19, vs18 + lxv vs0, (0+\OffsetA)(AO) // load real,imag from A + lxv vs1, (16+\OffsetA)(AO) // load real,imag from A + lxv vs2, (32+\OffsetA)(AO) // load real,imag from A + lxv vs3, (48+\OffsetA)(AO) // load real,imag from A + lxv vs8, (64+\OffsetA)(AO) // load real,imag from A + lxv vs9, (80+\OffsetA)(AO) // load real,imag from A + lxv vs10, (96+\OffsetA)(AO) // load real,imag from A + lxv vs11, (112+\OffsetA)(AO) // load real,imag from A +.endm + + +.macro END2x4_2 + /*for load2 offset will be 128 and 64*/ + KERNEL2x4_2 AO,BO, 128,64,0 ,1,1 +.endm + + + +.macro KERNEL2x4_E2 OffsetA,OffsetB, Index,IsLast + KERNEL2x4_2 AO,BO, \OffsetA,\OffsetB, \Index,\IsLast ,1 +.endm + + +.macro KERNEL2x4_L2 OffsetA,OffsetB, Index,IsLast + KERNEL2x4_2 AO,BO, \OffsetA,\OffsetB, \Index,\IsLast ,0 +.endm + + +.macro KERNEL2x4_2 AREG,BREG, OffsetA,OffsetB, Index,IsLast ,Complete + xvmaddadp vs32, vs0, vs16 + xvmaddadp vs40, vs0, vs18 + xvmaddadp vs33, vs0, vs17 + xvmaddadp vs41, vs0, vs19 + xxswapd vs21, vs20 + xxswapd vs23, vs22 + xvmaddadp vs34, vs1, vs16 + xvmaddadp vs42, vs1, vs18 + xvmaddadp vs35, vs1, vs17 + xvmaddadp vs43, vs1, vs19 +.if \Complete==0 + lxv vs0, DISP8(\Index, 0 + \OffsetA)(\AREG) // load real,imag from A + lxv vs1, DISP8(\Index,16 + \OffsetA)(\AREG) // load real,imag from A +.endif + xvmaddadp vs36, vs2, vs16 + xvmaddadp vs44, vs2, vs18 + xvmaddadp vs37, vs2, vs17 + xvmaddadp vs45, vs2, vs19 + xvmaddadp vs38, vs3, vs16 + xvmaddadp vs46, vs3, vs18 + xvmaddadp vs39, vs3, vs17 + xvmaddadp vs47, vs3, vs19 +.if \Complete==0 + lxv vs2, DISP8(\Index,32 + \OffsetA)(\AREG) // load real,imag from A + lxv vs3, DISP8(\Index,48 + \OffsetA)(\AREG) // load real,imag from A +.endif + +.if \Complete==0 + lxv vs16, DISP4(\Index, 0+\OffsetB)(\BREG) // load real imag from B + lxv vs18, DISP4(\Index, 16+\OffsetB)(\BREG) // load real,imag from B +.endif + xvmaddadp vs32, vs8, vs20 + xvmaddadp vs40, vs8, vs22 + xvmaddadp vs33, vs8, vs21 + xvmaddadp vs41, vs8, vs23 +.if \Complete==0 + xxswapd vs17, vs16 + xxswapd vs19, vs18 +.endif + xvmaddadp vs34, vs9, vs20 + xvmaddadp vs42, vs9, vs22 + xvmaddadp vs35, vs9, vs21 + xvmaddadp vs43, vs9, vs23 +.if \Complete==0 + lxv vs8, DISP8(\Index,64+0+ \OffsetA)(\AREG) // load real,imag from A + lxv vs9, DISP8(\Index,64+16 + \OffsetA)(\AREG) // load real,imag from A +.endif + xvmaddadp vs36, vs10, vs20 + xvmaddadp vs44, vs10, vs22 + xvmaddadp vs37, vs10, vs21 + xvmaddadp vs45, vs10, vs23 + xvmaddadp vs38, vs11, vs20 + xvmaddadp vs46, vs11, vs22 + xvmaddadp vs39, vs11, vs21 + xvmaddadp vs47, vs11, vs23 +.if \Complete==0 + lxv vs10, DISP8(\Index,64+32 + \OffsetA)(\AREG) // load real,imag from A + lxv vs11, DISP8(\Index,64+48 + \OffsetA)(\AREG) // load real,imag from A +.endif + +.if \Complete==0 + lxv vs20, DISP4(\Index, 32+\OffsetB)(\BREG) // load real,imag from B + lxv vs22, DISP4(\Index, 48+\OffsetB)(\BREG) // load real,imag from B +.endif +.if \IsLast==1 +.if \Complete==1 + addi \AREG, \AREG, DISP8(\Index,\OffsetA) + addi \BREG, \BREG, DISP4(\Index,\OffsetB) +.else + addi \AREG, \AREG, DISP8(\Index,128) + addi \BREG, \BREG, DISP4(\Index,64) +.endif +.endif +.endm + + + +.macro KERNEL2x4 + LOAD2x4 + END2x4 AO, BO, 64,32 +.endm + + + +.macro SAVE2x4 + add T1, CO ,LDC + SAVE4 vs32,vs33,vs34,vs35,vs36,vs37,vs38,vs39,CO,0 + SAVE4 vs40,vs41,vs42,vs43,vs44,vs45,vs46,vs47,T1,0 + addi CO, CO, 64 +.endm +/********************************************************************************************** +* + +.macros for N=2 and M=2 +**********************************************************************************************/ + + +.macro Zero2x2 + xxlxor vs32, vs32, vs32 + xxlxor vs33, vs33, vs33 + xxlxor vs34, vs34, vs34 + xxlxor vs35, vs35, vs35 + xxlxor vs36, vs36, vs36 + xxlxor vs37, vs37, vs37 + xxlxor vs38, vs38, vs38 + xxlxor vs39, vs39, vs39 + +.endm + + +.macro LOAD2x2 + LOAD2x2O 0,0 +.endm + + +.macro LOAD2x2O OffsetA,OffsetB + lxv vs16,(\OffsetB+ 0)(BO) // load real imag from B + lxv vs18, (\OffsetB+16)(BO) // load real,imag from B + xxswapd vs17, vs16 + xxswapd vs19, vs18 + lxv vs0, (0+\OffsetA)(AO) // load real,imag from A + lxv vs1, (16+\OffsetA)(AO) // load real,imag from A + +.endm + + +.macro END2x2_NORMAL + END2x2 AO,BO,32,32 +.endm + + +.macro END2x2_WITHOUT_ADD + END2x2 AO,BO,0,0 +.endm + + +.macro END2x2 AREG, BREG, OffsetA, OffsetB +.if \OffsetB != 0 + addi \BREG, \BREG, \OffsetB +.endif +.if \OffsetA != 0 + addi \AREG, \AREG, \OffsetA +.endif + xvmaddadp vs32, vs0, vs16 + xvmaddadp vs36, vs0, vs18 + xvmaddadp vs33, vs0, vs17 + xvmaddadp vs37, vs0, vs19 + xvmaddadp vs34, vs1, vs16 + xvmaddadp vs38, vs1, vs18 + xvmaddadp vs35, vs1, vs17 + xvmaddadp vs39, vs1, vs19 + +.endm + + +.macro LOAD2x2_2 + LOAD2x2_2O 0,0 +.endm + + +.macro LOAD2x2_2O OffsetA,OffsetB + lxv vs16,(\OffsetB+ 0)(BO) // load real imag from B + lxv vs18, (\OffsetB+16)(BO) // load real,imag from B + lxv vs20, (\OffsetB+32)(BO) // load real,imag from B + lxv vs22, (\OffsetB+48)(BO) // load real,imag from B + xxswapd vs17, vs16 + xxswapd vs19, vs18 + lxv vs0, (0+\OffsetA)(AO) // load real,imag from A + lxv vs1, (16+\OffsetA)(AO) // load real,imag from A + lxv vs8, (32+\OffsetA)(AO) // load real,imag from A + lxv vs9, (48+\OffsetA)(AO) // load real,imag from A + +.endm + + +.macro END2x2_2 + /*for load2 offset will be 64 and 64*/ + KERNEL2x2_2 AO,BO, 64,64,0 ,1,1 +.endm + + + +.macro KERNEL2x2_E2 OffsetA,OffsetB, Index,IsLast + KERNEL2x2_2 AO,BO, \OffsetA,\OffsetB, \Index,\IsLast ,1 +.endm + + +.macro KERNEL2x2_L2 OffsetA,OffsetB, Index,IsLast + KERNEL2x2_2 AO,BO, \OffsetA,\OffsetB, \Index,\IsLast ,0 +.endm + + +.macro KERNEL2x2_2 AREG,BREG, OffsetA,OffsetB, Index,IsLast ,Complete + xvmaddadp vs32, vs0, vs16 + xvmaddadp vs36, vs0, vs18 + xvmaddadp vs33, vs0, vs17 + xvmaddadp vs37, vs0, vs19 + xxswapd vs21, vs20 + xxswapd vs23, vs22 + xvmaddadp vs34, vs1, vs16 + xvmaddadp vs38, vs1, vs18 + xvmaddadp vs35, vs1, vs17 + xvmaddadp vs39, vs1, vs19 +.if \Complete==0 + lxv vs0, DISP4(\Index, 0 + \OffsetA)(\AREG) // load real,imag from A + lxv vs1, DISP4(\Index,16 + \OffsetA)(\AREG) // load real,imag from A +.endif +.if \Complete==0 + lxv vs16, DISP4(\Index, 0+\OffsetB)(\BREG) // load real imag from B + lxv vs18, DISP4(\Index, 16+\OffsetB)(\BREG) // load real,imag from B +.endif + xvmaddadp vs32, vs8, vs20 + xvmaddadp vs36, vs8, vs22 + xvmaddadp vs33, vs8, vs21 + xvmaddadp vs37, vs8, vs23 +.if \Complete==0 + xxswapd vs17, vs16 + xxswapd vs19, vs18 +.endif + xvmaddadp vs34, vs9, vs20 + xvmaddadp vs38, vs9, vs22 + xvmaddadp vs35, vs9, vs21 + xvmaddadp vs39, vs9, vs23 +.if \Complete==0 + lxv vs20, DISP4(\Index, 32+\OffsetB)(\BREG) // load real,imag from B + lxv vs22, DISP4(\Index, 48+\OffsetB)(\BREG) // load real,imag from B +.endif +.if \Complete==0 + lxv vs8, DISP4(\Index,32+0+ \OffsetA)(\AREG) // load real,imag from A + lxv vs9, DISP4(\Index,32+16 + \OffsetA)(\AREG) // load real,imag from A +.endif + + + +.if \IsLast==1 +.if \Complete==1 + addi \AREG, \AREG, DISP4(\Index,\OffsetA) + addi \BREG, \BREG, DISP4(\Index,\OffsetB) +.else + addi \AREG, \AREG, DISP4(\Index,64) + addi \BREG, \BREG, DISP4(\Index,64) +.endif +.endif +.endm + + + +.macro KERNEL2x2 + LOAD2x2 + END2x2 AO, BO, 32,32 +.endm + + + +.macro SAVE2x2 + add T1, CO ,LDC + SAVE2 vs32,vs33,vs34,vs35,CO,0 + SAVE2 vs36,vs37,vs38,vs39,T1,0 + addi CO, CO, 32 +.endm +/********************************************************************************************** +* + +.macros for N=2 and M=1 +**********************************************************************************************/ + + + +.macro Zero2x1 + xxlxor vs32, vs32, vs32 + xxlxor vs33, vs33, vs33 + xxlxor vs34, vs34, vs34 + xxlxor vs35, vs35, vs35 + +.endm + + +.macro LOAD2x1 + LOAD2x1O 0,0 +.endm + + +.macro LOAD2x1O OffsetA,OffsetB + lxv vs16,(\OffsetB+ 0)(BO) // load real imag from B + lxv vs18, (\OffsetB+16)(BO) // load real,imag from B + xxswapd vs17, vs16 + xxswapd vs19, vs18 + lxv vs0, (0+\OffsetA)(AO) // load real,imag from A +.endm + + +.macro END2x1_NORMAL + END2x1 AO,BO,16,32 +.endm + + +.macro END2x1_WITHOUT_ADD + END2x1 AO,BO,0,0 +.endm + + +.macro END2x1 AREG, BREG, OffsetA, OffsetB +.if \OffsetB != 0 + addi \BREG, \BREG, \OffsetB +.endif +.if \OffsetA != 0 + addi \AREG, \AREG, \OffsetA +.endif + xvmaddadp vs32, vs0, vs16 + xvmaddadp vs34, vs0, vs18 + xvmaddadp vs33, vs0, vs17 + xvmaddadp vs35, vs0, vs19 +.endm + + +.macro LOAD2x1_2 + LOAD2x1_2O 0,0 +.endm + + +.macro LOAD2x1_2O OffsetA,OffsetB + lxv vs16,(\OffsetB+ 0)(BO) // load real imag from B + lxv vs18, (\OffsetB+16)(BO) // load real,imag from B + lxv vs20, (\OffsetB+32)(BO) // load real,imag from B + lxv vs22, (\OffsetB+48)(BO) // load real,imag from B + xxswapd vs17, vs16 + xxswapd vs19, vs18 + lxv vs0, (0+\OffsetA)(AO) // load real,imag from A + lxv vs8, (16+\OffsetA)(AO) // load real,imag from A +.endm + + +.macro END2x1_2 + /*for load2 offset will be 32 and 64*/ + KERNEL2x1_2 AO,BO, 32,64,0 ,1,1 +.endm + + + +.macro KERNEL2x1_E2 OffsetA,OffsetB, Index,IsLast + KERNEL2x1_2 AO,BO, \OffsetA,\OffsetB, \Index,\IsLast ,1 +.endm + + +.macro KERNEL2x1_L2 OffsetA,OffsetB, Index,IsLast + KERNEL2x1_2 AO,BO, \OffsetA,\OffsetB, \Index,\IsLast ,0 +.endm + + +.macro KERNEL2x1_2 AREG,BREG, OffsetA,OffsetB, Index,IsLast ,Complete + xxswapd vs21, vs20 + xxswapd vs23, vs22 + xvmaddadp vs32, vs0, vs16 + xvmaddadp vs34, vs0, vs18 + xvmaddadp vs33, vs0, vs17 + xvmaddadp vs35, vs0, vs19 +.if \Complete==0 + lxv vs0, DISP2(\Index, 0 + \OffsetA)(\AREG) // load real,imag from A +.endif +.if \Complete==0 + lxv vs16, DISP4(\Index, 0+\OffsetB)(\BREG) // load real imag from B + lxv vs18, DISP4(\Index, 16+\OffsetB)(\BREG) // load real,imag from B +.endif +.if \Complete==0 + xxswapd vs17, vs16 + xxswapd vs19, vs18 +.endif + xvmaddadp vs32, vs8, vs20 + xvmaddadp vs34, vs8, vs22 + xvmaddadp vs33, vs8, vs21 + xvmaddadp vs35, vs8, vs23 +.if \Complete==0 + lxv vs8, DISP2(\Index,16+0+ \OffsetA)(\AREG) // load real,imag from A +.endif + +.if \Complete==0 + lxv vs20, DISP4(\Index, 32+\OffsetB)(\BREG) // load real,imag from B + lxv vs22, DISP4(\Index, 48+\OffsetB)(\BREG) // load real,imag from B +.endif +.if \IsLast==1 +.if \Complete==1 + addi \AREG, \AREG, DISP2(\Index,\OffsetA) + addi \BREG, \BREG, DISP4(\Index,\OffsetB) +.else + addi \AREG, \AREG, DISP2(\Index,32) + addi \BREG, \BREG, DISP4(\Index,64) +.endif +.endif +.endm + + + +.macro KERNEL2x1 + LOAD2x1 + END2x1 AO, BO, 16,32 +.endm + + + +.macro SAVE2x1 + add T1, CO ,LDC + SAVE1 vs32,vs33,CO,0 + SAVE1 vs34,vs35,T1,0 + addi CO, CO, 16 +.endm + +/********************************************************************************************** +* + +.macros for N=1 and M=8 +**********************************************************************************************/ + + +.macro Zero1x8 + xxlxor vs32, vs32, vs32 + xxlxor vs33, vs33, vs33 + xxlxor vs34, vs34, vs34 + xxlxor vs35, vs35, vs35 + xxlxor vs36, vs36, vs36 + xxlxor vs37, vs37, vs37 + xxlxor vs38, vs38, vs38 + xxlxor vs39, vs39, vs39 + xxlxor vs40, vs40, vs40 + xxlxor vs41, vs41, vs41 + xxlxor vs42, vs42, vs42 + xxlxor vs43, vs43, vs43 + xxlxor vs44, vs44, vs44 + xxlxor vs45, vs45, vs45 + xxlxor vs46, vs46, vs46 + xxlxor vs47, vs47, vs47 + xxlxor vs48, vs48, vs48 +.endm + + +.macro LOAD1x8 + LOAD1x8O 0,0 +.endm + + +.macro LOAD1x8O OffsetA,OffsetB + lxv vs16,(\OffsetB+ 0)(BO) // load real imag from B + xxswapd vs17, vs16 + lxv vs0, (0+\OffsetA)(AO) // load real,imag from A + lxv vs1, (16+\OffsetA)(AO) // load real,imag from A + lxv vs2, (32+\OffsetA)(AO) // load real,imag from A + lxv vs3, (48+\OffsetA)(AO) // load real,imag from A + lxv vs4, (64+\OffsetA)(AO) // load real,imag from A + lxv vs5, (80+\OffsetA)(AO) // load real,imag from A + lxv vs6, (96+\OffsetA)(AO) // load real,imag from A + lxv vs7, (112+\OffsetA)(AO) // load real,imag from A + +.endm + + +.macro END1x8_NORMAL + END1x8 AO,BO,128,16 +.endm + + +.macro END1x8_WITHOUT_ADD + END1x8 AO,BO,0,0 +.endm + + +.macro END1x8 AREG, BREG, OffsetA, OffsetB +.if \OffsetB != 0 + addi \BREG, \BREG, \OffsetB +.endif +.if \OffsetA != 0 + addi \AREG, \AREG, \OffsetA +.endif + xvmaddadp vs32, vs0, vs16 + xvmaddadp vs33, vs0, vs17 + + xvmaddadp vs34, vs1, vs16 + xvmaddadp vs35, vs1, vs17 + + xvmaddadp vs36, vs2, vs16 + xvmaddadp vs37, vs2, vs17 + + xvmaddadp vs38, vs3, vs16 + xvmaddadp vs39, vs3, vs17 + + xvmaddadp vs40, vs4, vs16 + xvmaddadp vs41, vs4, vs17 + + xvmaddadp vs42, vs5, vs16 + xvmaddadp vs43, vs5, vs17 + + xvmaddadp vs44, vs6, vs16 + xvmaddadp vs45, vs6, vs17 + + xvmaddadp vs46, vs7, vs16 + xvmaddadp vs47, vs7, vs17 + +.endm + + +.macro LOAD1x8_2 + LOAD1x8_2O 0,0 +.endm + + +.macro LOAD1x8_2O OffsetA,OffsetB + lxv vs16,(\OffsetB+ 0)(BO) // load real imag from B + lxv vs20, (\OffsetB+16)(BO) // load real,imag from B + xxswapd vs17, vs16 + + lxv vs0, (0+\OffsetA)(AO) // load real,imag from A + lxv vs1, (16+\OffsetA)(AO) // load real,imag from A + lxv vs2, (32+\OffsetA)(AO) // load real,imag from A + lxv vs3, (48+\OffsetA)(AO) // load real,imag from A + lxv vs4, (64+\OffsetA)(AO) // load real,imag from A + lxv vs5, (80+\OffsetA)(AO) // load real,imag from A + lxv vs6, (96+\OffsetA)(AO) // load real,imag from A + lxv vs7, (112+\OffsetA)(AO) // load real,imag from A + lxv vs8, (128+0+\OffsetA)(AO) // load real,imag from A + lxv vs9, (128+16+\OffsetA)(AO) // load real,imag from A + lxv vs10, (128+32+\OffsetA)(AO) // load real,imag from A + lxv vs11, (128+48+\OffsetA)(AO) // load real,imag from A + lxv vs12, (128+64+\OffsetA)(AO) // load real,imag from A + lxv vs13, (128+80+\OffsetA)(AO) // load real,imag from A + lxv vs14, (128+96+\OffsetA)(AO) // load real,imag from A + lxv vs15, (128+112+\OffsetA)(AO) // load real,imag from A +.endm + + +.macro END1x8_2 + /*for load2 offset will be 256 and 32*/ + KERNEL1x8_2 AO,BO, 256,32,0 ,1,1 +.endm + + + +.macro KERNEL1x8_E2 OffsetA,OffsetB, Index,IsLast + KERNEL1x8_2 AO,BO, \OffsetA,\OffsetB, \Index,\IsLast ,1 +.endm + + +.macro KERNEL1x8_L2 OffsetA,OffsetB, Index,IsLast + KERNEL1x8_2 AO,BO, \OffsetA,\OffsetB, \Index,\IsLast ,0 +.endm + + +.macro KERNEL1x8_2 AREG,BREG, OffsetA,OffsetB, Index,IsLast ,Complete + xvmaddadp vs32, vs0, vs16 + xvmaddadp vs33, vs0, vs17 + xxswapd vs21, vs20 + xvmaddadp vs34, vs1, vs16 + xvmaddadp vs35, vs1, vs17 +.if \Complete==0 + lxv vs0, DISP16(\Index, 0 + \OffsetA)(\AREG) // load real,imag from A + lxv vs1, DISP16(\Index,16 + \OffsetA)(\AREG) // load real,imag from A +.endif + xvmaddadp vs36, vs2, vs16 + xvmaddadp vs37, vs2, vs17 + + xvmaddadp vs38, vs3, vs16 + xvmaddadp vs39, vs3, vs17 +.if \Complete==0 + lxv vs2, DISP16(\Index,32 + \OffsetA)(\AREG) // load real,imag from A + lxv vs3, DISP16(\Index,48 + \OffsetA)(\AREG) // load real,imag from A +.endif + xvmaddadp vs40, vs4, vs16 + xvmaddadp vs41, vs4, vs17 + + xvmaddadp vs42, vs5, vs16 + xvmaddadp vs43, vs5, vs17 +.if \Complete==0 + lxv vs4, DISP16(\Index,64+ \OffsetA)(\AREG) // load real,imag from A + lxv vs5, DISP16(\Index,64+16 + \OffsetA)(\AREG) // load real,imag from A +.endif + xvmaddadp vs44, vs6, vs16 + xvmaddadp vs45, vs6, vs17 + + xvmaddadp vs46, vs7, vs16 + xvmaddadp vs47, vs7, vs17 +.if \Complete==0 + lxv vs16, DISP2(\Index, 0+\OffsetB)(\BREG) // load real imag from B +.endif +.if \Complete==0 + xxswapd vs17, vs16 +.endif + xvmaddadp vs32, vs8, vs20 + xvmaddadp vs33, vs8, vs21 +.if \Complete==0 + lxv vs6, DISP16(\Index,64+32 + \OffsetA)(\AREG) // load real,imag from A + lxv vs7, DISP16(\Index,64+48 + \OffsetA)(\AREG) // load real,imag from A +.endif + xvmaddadp vs34, vs9, vs20 + xvmaddadp vs35, vs9, vs21 +.if \Complete==0 + lxv vs8, DISP16(\Index,128+ + \OffsetA)(\AREG) // load real,imag from A + lxv vs9, DISP16(\Index,128+16 + \OffsetA)(\AREG) // load real,imag from A +.endif + xvmaddadp vs36, vs10, vs20 + xvmaddadp vs37, vs10, vs21 + xvmaddadp vs38, vs11, vs20 + xvmaddadp vs39, vs11, vs21 +.if \Complete==0 + lxv vs10, DISP16(\Index,128+32 + \OffsetA)(\AREG) // load real,imag from A + lxv vs11, DISP16(\Index,128+48 + \OffsetA)(\AREG) // load real,imag from A +.endif + xvmaddadp vs40, vs12, vs20 + xvmaddadp vs41, vs12, vs21 + xvmaddadp vs42, vs13, vs20 + xvmaddadp vs43, vs13, vs21 +.if \Complete==0 + lxv vs12, DISP16(\Index, 192 + \OffsetA)(\AREG) // load real,imag from A + lxv vs13, DISP16(\Index,192 +16 + \OffsetA)(\AREG) // load real,imag from A +.endif + xvmaddadp vs44, vs14, vs20 + xvmaddadp vs45, vs14, vs21 + xvmaddadp vs46, vs15, vs20 + xvmaddadp vs47, vs15, vs21 +.if \Complete==0 + lxv vs14, DISP16(\Index,192 +32 + \OffsetA)(\AREG) // load real,imag from A + lxv vs15, DISP16(\Index,192 +48 + \OffsetA)(\AREG) // load real,imag from A + lxv vs20, DISP2(\Index, 16+\OffsetB)(\BREG) // load real,imag from B +.endif +.if \IsLast==1 +.if \Complete==1 + addi \AREG, \AREG, DISP16(\Index,\OffsetA) + addi \BREG, \BREG, DISP2(\Index,\OffsetB) +.else + addi \AREG, \AREG, DISP16(\Index,256) + addi \BREG, \BREG, DISP2(\Index,32) +.endif +.endif +.endm + + + + + +.macro KERNEL1x8 + LOAD1x8 + END1x8 AO, BO, 128,16 +.endm + + +.macro SAVE1x8 + SAVE8 vs32,vs33,vs34,vs35,vs36,vs37,vs38,vs39,vs40,vs41,vs42,vs43,vs44,vs45,vs46,vs47,CO,0 + addi CO, CO, 128 +.endm +/********************************************************************************************** +* + +.macros for N=2 and M=4 +**********************************************************************************************/ + + +.macro Zero1x4 + xxlxor vs32, vs32, vs32 + xxlxor vs33, vs33, vs33 + xxlxor vs34, vs34, vs34 + xxlxor vs35, vs35, vs35 + xxlxor vs36, vs36, vs36 + xxlxor vs37, vs37, vs37 + xxlxor vs38, vs38, vs38 + xxlxor vs39, vs39, vs39 +.endm + + +.macro LOAD1x4 + LOAD1x4O 0,0 +.endm + + +.macro LOAD1x4O OffsetA,OffsetB + lxv vs16,(\OffsetB+ 0)(BO) // load real imag from B + xxswapd vs17, vs16 + + lxv vs0, (0+\OffsetA)(AO) // load real,imag from A + lxv vs1, (16+\OffsetA)(AO) // load real,imag from A + lxv vs2, (32+\OffsetA)(AO) // load real,imag from A + lxv vs3, (48+\OffsetA)(AO) // load real,imag from A + +.endm + + +.macro END1x4_NORMAL + END1x4 AO,BO,64,16 +.endm + + +.macro END1x4_WITHOUT_ADD + END1x4 AO,BO,0,0 +.endm + + +.macro END1x4 AREG, BREG, OffsetA, OffsetB +.if \OffsetB != 0 + addi \BREG, \BREG, \OffsetB +.endif +.if \OffsetA != 0 + addi \AREG, \AREG, \OffsetA +.endif + xvmaddadp vs32, vs0, vs16 + xvmaddadp vs33, vs0, vs17 + + xvmaddadp vs34, vs1, vs16 + xvmaddadp vs35, vs1, vs17 + + xvmaddadp vs36, vs2, vs16 + xvmaddadp vs37, vs2, vs17 + + xvmaddadp vs38, vs3, vs16 + xvmaddadp vs39, vs3, vs17 + +.endm + + +.macro LOAD1x4_2 + LOAD1x4_2O 0,0 +.endm + + +.macro LOAD1x4_2O OffsetA,OffsetB + lxv vs16,(\OffsetB+ 0)(BO) // load real imag from B + lxv vs20, (\OffsetB+16)(BO) // load real,imag from B + xxswapd vs17, vs16 + + lxv vs0, (0+\OffsetA)(AO) // load real,imag from A + lxv vs1, (16+\OffsetA)(AO) // load real,imag from A + lxv vs2, (32+\OffsetA)(AO) // load real,imag from A + lxv vs3, (48+\OffsetA)(AO) // load real,imag from A + lxv vs8, (64+\OffsetA)(AO) // load real,imag from A + lxv vs9, (80+\OffsetA)(AO) // load real,imag from A + lxv vs10, (96+\OffsetA)(AO) // load real,imag from A + lxv vs11, (112+\OffsetA)(AO) // load real,imag from A +.endm + + +.macro END1x4_2 + /*for load2 offset will be 128 and 32*/ + KERNEL1x4_2 AO,BO, 128,32,0 ,1,1 +.endm + + + +.macro KERNEL1x4_E2 OffsetA,OffsetB, Index,IsLast + KERNEL1x4_2 AO,BO, \OffsetA,\OffsetB, \Index,\IsLast ,1 +.endm + + +.macro KERNEL1x4_L2 OffsetA,OffsetB, Index,IsLast + KERNEL1x4_2 AO,BO, \OffsetA,\OffsetB, \Index,\IsLast ,0 +.endm + + +.macro KERNEL1x4_2 AREG,BREG, OffsetA,OffsetB, Index,IsLast ,Complete + xvmaddadp vs32, vs0, vs16 + xvmaddadp vs33, vs0, vs17 + xxswapd vs21, vs20 + xvmaddadp vs34, vs1, vs16 + xvmaddadp vs35, vs1, vs17 +.if \Complete==0 + lxv vs0, DISP8(\Index, 0 + \OffsetA)(\AREG) // load real,imag from A + lxv vs1, DISP8(\Index,16 + \OffsetA)(\AREG) // load real,imag from A +.endif + xvmaddadp vs36, vs2, vs16 + xvmaddadp vs37, vs2, vs17 + + xvmaddadp vs38, vs3, vs16 + xvmaddadp vs39, vs3, vs17 +.if \Complete==0 + lxv vs2, DISP8(\Index,32 + \OffsetA)(\AREG) // load real,imag from A + lxv vs3, DISP8(\Index,48 + \OffsetA)(\AREG) // load real,imag from A +.endif + +.if \Complete==0 + lxv vs16, DISP2(\Index, 0+\OffsetB)(\BREG) // load real imag from B +.endif + xvmaddadp vs32, vs8, vs20 + xvmaddadp vs33, vs8, vs21 +.if \Complete==0 + xxswapd vs17, vs16 +.endif + xvmaddadp vs34, vs9, vs20 + xvmaddadp vs35, vs9, vs21 +.if \Complete==0 + lxv vs8, DISP8(\Index,64+0+ \OffsetA)(\AREG) // load real,imag from A + lxv vs9, DISP8(\Index,64+16 + \OffsetA)(\AREG) // load real,imag from A +.endif + xvmaddadp vs36, vs10, vs20 + xvmaddadp vs37, vs10, vs21 + xvmaddadp vs38, vs11, vs20 + xvmaddadp vs39, vs11, vs21 +.if \Complete==0 + lxv vs10, DISP8(\Index,64+32 + \OffsetA)(\AREG) // load real,imag from A + lxv vs11, DISP8(\Index,64+48 + \OffsetA)(\AREG) // load real,imag from A +.endif + +.if \Complete==0 + lxv vs20, DISP2(\Index, 16+\OffsetB)(\BREG) // load real,imag from B +.endif +.if \IsLast==1 +.if \Complete==1 + addi \AREG, \AREG, DISP8(\Index,\OffsetA) + addi \BREG, \BREG, DISP2(\Index,\OffsetB) +.else + addi \AREG, \AREG, DISP8(\Index,128) + addi \BREG, \BREG, DISP2(\Index,32) +.endif +.endif +.endm + + + +.macro KERNEL1x4 + LOAD1x4 + END1x4 AO, BO, 64,16 +.endm + + + +.macro SAVE1x4 + SAVE4 vs32,vs33,vs34,vs35,vs36,vs37,vs38,vs39,CO,0 + addi CO, CO, 64 +.endm +/********************************************************************************************** +* + +.macros for N=2 and M=2 +**********************************************************************************************/ + + +.macro Zero1x2 + xxlxor vs32, vs32, vs32 + xxlxor vs33, vs33, vs33 + xxlxor vs34, vs34, vs34 + xxlxor vs35, vs35, vs35 + +.endm + + +.macro LOAD1x2 + LOAD1x2O 0,0 +.endm + + +.macro LOAD1x2O OffsetA,OffsetB + lxv vs16,(\OffsetB+ 0)(BO) // load real imag from B + xxswapd vs17, vs16 + + lxv vs0, (0+\OffsetA)(AO) // load real,imag from A + lxv vs1, (16+\OffsetA)(AO) // load real,imag from A + +.endm + + +.macro END1x2_NORMAL + END1x2 AO,BO,32,16 +.endm + + +.macro END1x2_WITHOUT_ADD + END1x2 AO,BO,0,0 +.endm + + +.macro END1x2 AREG, BREG, OffsetA, OffsetB +.if \OffsetB != 0 + addi \BREG, \BREG, \OffsetB +.endif +.if \OffsetA != 0 + addi \AREG, \AREG, \OffsetA +.endif + xvmaddadp vs32, vs0, vs16 + xvmaddadp vs33, vs0, vs17 + + xvmaddadp vs34, vs1, vs16 + xvmaddadp vs35, vs1, vs17 + +.endm + + +.macro LOAD1x2_2 + LOAD1x2_2O 0,0 +.endm + + +.macro LOAD1x2_2O OffsetA,OffsetB + lxv vs16,(\OffsetB+ 0)(BO) // load real imag from B + lxv vs20, (\OffsetB+16)(BO) // load real,imag from B + xxswapd vs17, vs16 + + lxv vs0, (0+\OffsetA)(AO) // load real,imag from A + lxv vs1, (16+\OffsetA)(AO) // load real,imag from A + lxv vs8, (32+\OffsetA)(AO) // load real,imag from A + lxv vs9, (48+\OffsetA)(AO) // load real,imag from A +.endm + + +.macro END1x2_2 + /*for load2 offset will be 64 and 32*/ + KERNEL1x2_2 AO,BO, 64,32,0 ,1,1 +.endm + + + +.macro KERNEL1x2_E2 OffsetA,OffsetB, Index,IsLast + KERNEL1x2_2 AO,BO, \OffsetA,\OffsetB, \Index,\IsLast ,1 +.endm + + +.macro KERNEL1x2_L2 OffsetA,OffsetB, Index,IsLast + KERNEL1x2_2 AO,BO, \OffsetA,\OffsetB, \Index,\IsLast ,0 +.endm + + +.macro KERNEL1x2_2 AREG,BREG, OffsetA,OffsetB, Index,IsLast ,Complete + xvmaddadp vs32, vs0, vs16 + xvmaddadp vs33, vs0, vs17 + xxswapd vs21, vs20 + xvmaddadp vs34, vs1, vs16 + xvmaddadp vs35, vs1, vs17 +.if \Complete==0 + lxv vs0, DISP4(\Index, 0 + \OffsetA)(\AREG) // load real,imag from A + lxv vs1, DISP4(\Index,16 + \OffsetA)(\AREG) // load real,imag from A +.endif +.if \Complete==0 + lxv vs16, DISP2(\Index, 0+\OffsetB)(\BREG) // load real imag from B +.endif + xvmaddadp vs32, vs8, vs20 + xvmaddadp vs33, vs8, vs21 +.if \Complete==0 + xxswapd vs17, vs16 +.endif + xvmaddadp vs34, vs9, vs20 + xvmaddadp vs35, vs9, vs21 +.if \Complete==0 + lxv vs20, DISP2(\Index, 16+\OffsetB)(\BREG) // load real,imag from B +.endif +.if \Complete==0 + lxv vs8, DISP4(\Index,32+0+ \OffsetA)(\AREG) // load real,imag from A + lxv vs9, DISP4(\Index,32+16 + \OffsetA)(\AREG) // load real,imag from A +.endif + + + +.if \IsLast==1 +.if \Complete==1 + addi \AREG, \AREG, DISP4(\Index,\OffsetA) + addi \BREG, \BREG, DISP2(\Index,\OffsetB) +.else + addi \AREG, \AREG, DISP4(\Index,64) + addi \BREG, \BREG, DISP2(\Index,32) +.endif +.endif +.endm + + + +.macro KERNEL1x2 + LOAD1x2 + END1x2 AO, BO, 32,16 +.endm + + + +.macro SAVE1x2 + SAVE2 vs32,vs33,vs34,vs35,CO,0 + addi CO, CO, 32 +.endm +/********************************************************************************************** +* + +.macros for N=2 and M=1 +**********************************************************************************************/ + + + +.macro Zero1x1 + xxlxor vs32, vs32, vs32 + xxlxor vs33, vs33, vs33 +.endm + + +.macro LOAD1x1 + LOAD1x1O 0,0 +.endm + + +.macro LOAD1x1O OffsetA,OffsetB + lxv vs16,(\OffsetB+ 0)(BO) // load real imag from B + lxv vs0, (0+\OffsetA)(AO) // load real,imag from A + xxswapd vs17, vs16 + +.endm + + +.macro END1x1_NORMAL + END1x1 AO,BO,16,16 +.endm + + +.macro END1x1_WITHOUT_ADD + END1x1 AO,BO,0,0 +.endm + + +.macro END1x1 AREG, BREG, OffsetA, OffsetB +.if \OffsetB != 0 + addi \BREG, \BREG, \OffsetB +.endif +.if \OffsetA != 0 + addi \AREG, \AREG, \OffsetA +.endif + xvmaddadp vs32, vs0, vs16 + xvmaddadp vs33, vs0, vs17 +.endm + + +.macro LOAD1x1_2 + LOAD1x1_2O 0,0 +.endm + + +.macro LOAD1x1_2O OffsetA,OffsetB + lxv vs16,(\OffsetB+ 0)(BO) // load real imag from B + lxv vs20, (\OffsetB+16)(BO) // load real,imag from B + xxswapd vs17, vs16 + + lxv vs0, (0+\OffsetA)(AO) // load real,imag from A + lxv vs8, (16+\OffsetA)(AO) // load real,imag from A +.endm + + +.macro END1x1_2 + /*for load2 offset will be 32 and 32*/ + KERNEL1x1_2 AO,BO, 32,32,0 ,1,1 +.endm + + + +.macro KERNEL1x1_E2 OffsetA,OffsetB, Index,IsLast + KERNEL1x1_2 AO,BO, \OffsetA,\OffsetB, \Index,\IsLast ,1 +.endm + + +.macro KERNEL1x1_L2 OffsetA,OffsetB, Index,IsLast + KERNEL1x1_2 AO,BO, \OffsetA,\OffsetB, \Index,\IsLast ,0 +.endm + + +.macro KERNEL1x1_2 AREG,BREG, OffsetA,OffsetB, Index,IsLast ,Complete + xxswapd vs21, vs20 + xvmaddadp vs32, vs0, vs16 + xvmaddadp vs33, vs0, vs17 +.if \Complete==0 + lxv vs0, DISP2(\Index, 0 + \OffsetA)(\AREG) // load real,imag from A +.endif +.if \Complete==0 + lxv vs16, DISP2(\Index, 0+\OffsetB)(\BREG) // load real imag from B +.endif +.if \Complete==0 + xxswapd vs17, vs16 +.endif + xvmaddadp vs32, vs8, vs20 + xvmaddadp vs33, vs8, vs21 +.if \Complete==0 + lxv vs8, DISP2(\Index,16+0+ \OffsetA)(\AREG) // load real,imag from A +.endif + +.if \Complete==0 + lxv vs20, DISP2(\Index, 16+\OffsetB)(\BREG) // load real,imag from B +.endif +.if \IsLast==1 +.if \Complete==1 + addi \AREG, \AREG, DISP2(\Index,\OffsetA) + addi \BREG, \BREG, DISP2(\Index,\OffsetB) +.else + addi \AREG, \AREG, DISP2(\Index,32) + addi \BREG, \BREG, DISP2(\Index,32) +.endif +.endif +.endm + + + +.macro KERNEL1x1 + LOAD1x1 + END1x1 AO, BO, 16,16 +.endm + + + +.macro SAVE1x1 + SAVE1 vs32,vs33,CO,0 + addi CO, CO, 16 +.endm + +/****************************TRMM POINTER REFRESH + +.macroSES*************************/ + + +.macro SHIFT_REG REG1,REG2,SHIFT_VAL + .if \SHIFT_VAL==16 + slwi \REG1, \REG2, 8 + .elseif \SHIFT_VAL==8 + slwi \REG1, \REG2, 7 + .elseif \SHIFT_VAL==4 + slwi \REG1, \REG2, 6 + .elseif \SHIFT_VAL==2 + slwi \REG1, \REG2, 5 + .elseif \SHIFT_VAL==1 + slwi \REG1, \REG2, 4 + .endif +.endm +/* +//#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) +// ptrbb = bb; +// #else +// ptrba += off*16; +// ptrbb = bb + off*2; +// #endif +*/ + + +.macro REFRESH_POINTERS PTR_A,PTR_B,OFF_VAL,B_VAL,C_A,C_B + #if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + /* ptrbb = bb;*/ + mr \PTR_B,\B_VAL /* refresh BPOINT */ + #else + /* + // ptrba =ptrba+ off*C_A; + // ptrbb = bb + off*C_B; + */ + SHIFT_REG T4,\OFF_VAL,\C_B /* Number of values in B shifted */ + SHIFT_REG T2,\OFF_VAL,\C_A /* Number of values in A shifted */ + add \PTR_B, \B_VAL , T4 /* Add values to BO */ + add \PTR_A, \PTR_A, T2 /* Add values to AO */ + #endif +.endm + +/* +// #if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) +// temp = bk-off; +// #elif defined(LEFT) +// temp = off+16; // number of values in A +// #else +// temp = off+2; // number of values in B +// #endif +*/ + + +.macro REFRESH_TEMP_BK TEMP_BK,BK_VAL,OFF_VAL,INCR_A,INCR_B + #if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + /* temp = bk-off;*/ + sub \TEMP_BK,\BK_VAL,\OFF_VAL + #elif defined(LEFT) + /* temp = off+INCR_A; // number of values in A */ + addi \TEMP_BK, \OFF_VAL, \INCR_A + #else + /* temp = off+INCR_B // number of values in B*/ + addi \TEMP_BK,\OFF_VAL, \INCR_B + #endif +.endm +/* +// #if ( defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) +// temp = bk - off; +// #ifdef LEFT +// temp -= 16; // number of values in A +// #else +// temp -= 2; // number of values in B +// #endif +// ptrba += temp*16; +// ptrbb += temp*2; +// #endif +// #ifdef LEFT +// off += 16; // number of values in A +// #endif +*/ + + + +.macro REFRESH_AFTER_SAVE TEMP_BK,BK_VAL,OFF_VAL,PTR_B,PTR_A,C_A,C_B + #if ( defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + /*temp = bk - off;*/ + sub \TEMP_BK,\BK_VAL,\OFF_VAL + #ifdef LEFT + /*temp -= 8; // number of values in A*/ + addi \TEMP_BK,\TEMP_BK,-\C_A + #else + /*temp -= 4; // number of values in B*/ + addi \TEMP_BK,\TEMP_BK,-\C_B + #endif + /*ptrba += temp*C_A; + ptrbb += temp*C_B;*/ + SHIFT_REG T4,\TEMP_BK,\C_A + SHIFT_REG T2,\TEMP_BK,\C_B + add \PTR_A, \PTR_A,T4/*ptrba+temp*C_A*/ + add \PTR_B, \PTR_B,T2 + #endif + #ifdef LEFT + /*off += 8; // number of values in A*/ + addi \OFF_VAL,\OFF_VAL,\C_A + #endif +.endm \ No newline at end of file diff --git a/kernel/power/zgemm_tcopy_macros_8_power8.S b/kernel/power/zgemm_tcopy_macros_8_power8.S index 3f5a5ed03..654332375 100644 --- a/kernel/power/zgemm_tcopy_macros_8_power8.S +++ b/kernel/power/zgemm_tcopy_macros_8_power8.S @@ -38,7 +38,11 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * Macros for N=4 and M=8 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_4x8', ` +#else .macro COPY_4x8 +#endif lxvd2x vs32, o0, A0 lxvd2x vs33, o16, A0 @@ -144,14 +148,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. stxvd2x vs12, o32, T1 stxvd2x vs13, o48, T1 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=4 and M=4 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_4x4', ` +#else .macro COPY_4x4 +#endif lxvd2x vs32, o0, A0 lxvd2x vs33, o16, A0 @@ -209,14 +221,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. stxvd2x vs46, o32, T1 stxvd2x vs47, o48, T1 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=4 and M=2 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_4x2', ` +#else .macro COPY_4x2 +#endif lxvd2x vs32, o0, A0 lxvd2x vs33, o16, A0 @@ -254,14 +274,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. stxvd2x vs38, o32, T1 stxvd2x vs39, o48, T1 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=4 and M=1 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_4x1', ` +#else .macro COPY_4x1 +#endif lxvd2x vs32, o0, A0 addi A0, A0, 16 @@ -289,14 +317,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. stxvd2x vs35, o48, T1 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=2 and M=8 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_2x8', ` +#else .macro COPY_2x8 +#endif lxvd2x vs32, o0, A0 lxvd2x vs33, o16, A0 @@ -350,14 +386,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. stxvd2x vs46, o32, T1 stxvd2x vs47, o48, T1 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=2 and M=4 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_2x4', ` +#else .macro COPY_2x4 +#endif lxvd2x vs32, o0, A0 lxvd2x vs33, o16, A0 @@ -387,14 +431,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. stxvd2x vs38, o32, T1 stxvd2x vs39, o48, T1 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=2 and M=2 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_2x2', ` +#else .macro COPY_2x2 +#endif lxvd2x vs32, o0, A0 lxvd2x vs33, o16, A0 @@ -414,14 +466,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. stxvd2x vs34, o32, T1 stxvd2x vs35, o48, T1 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=2 and M=1 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_2x1', ` +#else .macro COPY_2x1 +#endif lxvd2x vs32, o0, A0 addi A0, A0, 16 @@ -437,14 +497,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. stxvd2x vs33, o16, T1 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=1 and M=8 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_1x8', ` +#else .macro COPY_1x8 +#endif lxvd2x vs32, o0, A0 lxvd2x vs33, o16, A0 @@ -472,14 +540,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. stxvd2x vs38, o32, T1 stxvd2x vs39, o48, T1 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=1 and M=4 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_1x4', ` +#else .macro COPY_1x4 +#endif lxvd2x vs32, o0, A0 lxvd2x vs33, o16, A0 @@ -495,14 +571,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. stxvd2x vs34, o32, T1 stxvd2x vs35, o48, T1 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=1 and M=2 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_1x2', ` +#else .macro COPY_1x2 +#endif lxvd2x vs32, o0, A0 lxvd2x vs33, o16, A0 @@ -514,14 +598,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. stxvd2x vs32, o0, T1 stxvd2x vs33, o16, T1 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=1 and M=1 **********************************************************************************************/ +#if defined(_AIX) +define(`COPY_1x1', ` +#else .macro COPY_1x1 +#endif lxvd2x vs32, o0, A0 addi A0, A0, 16 @@ -531,5 +623,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. stxvd2x vs32, o0, T1 +#if defined(_AIX) +') +#else .endm +#endif diff --git a/kernel/power/zgemv_n.S b/kernel/power/zgemv_n.S index f93439986..708f1318d 100644 --- a/kernel/power/zgemv_n.S +++ b/kernel/power/zgemv_n.S @@ -39,7 +39,7 @@ #define ASSEMBLER #include "common.h" -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define M r3 #define N r4 @@ -250,7 +250,7 @@ stw r22, 176(SP) #endif -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ lwz INCY, FRAMESLOT(0) + STACKSIZE(SP) #else diff --git a/kernel/power/zgemv_n_4.c b/kernel/power/zgemv_n_4.c index 167b0a158..1f7199c89 100644 --- a/kernel/power/zgemv_n_4.c +++ b/kernel/power/zgemv_n_4.c @@ -29,6 +29,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include #include "common.h" +#if defined(__VEC__) || defined(__ALTIVEC__) + #define HAVE_KERNEL_4x4_VEC 1 #define HAVE_KERNEL_4x2_VEC 1 #define HAVE_KERNEL_4x1_VEC 1 @@ -37,6 +39,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #if defined(HAVE_KERNEL_4x4_VEC) || defined(HAVE_KERNEL_4x2_VEC) || defined(HAVE_KERNEL_4x1_VEC) #include #endif +#endif // #define NBMAX 4096 @@ -614,8 +617,8 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha_r, FLOAT alpha_i, BLASLONG m2; BLASLONG m3; BLASLONG n2; - - FLOAT xbuffer[8], *ybuffer; + FLOAT xbuffer[8] __attribute__((aligned(16))); + FLOAT *ybuffer; if (m < 1) return (0); if (n < 1) return (0); diff --git a/kernel/power/zgemv_n_ppc440.S b/kernel/power/zgemv_n_ppc440.S index 55dd2d84f..bd1148b65 100644 --- a/kernel/power/zgemv_n_ppc440.S +++ b/kernel/power/zgemv_n_ppc440.S @@ -39,7 +39,7 @@ #define ASSEMBLER #include "common.h" -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define M r3 #define N r4 @@ -223,7 +223,7 @@ stw r22, 176(SP) #endif -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ lwz INCY, FRAMESLOT(0) + STACKSIZE(SP) lwz BUFFER, FRAMESLOT(1) + STACKSIZE(SP) diff --git a/kernel/power/zgemv_t.S b/kernel/power/zgemv_t.S index 9c6f510c2..d82fab16a 100644 --- a/kernel/power/zgemv_t.S +++ b/kernel/power/zgemv_t.S @@ -47,7 +47,7 @@ #define STACKSIZE 304 #endif -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define M r3 #define N r4 @@ -226,7 +226,7 @@ stw r0, 4 + FZERO #endif -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ lwz INCY, FRAMESLOT(0) + STACKSIZE(SP) lwz BUFFER, FRAMESLOT(1) + STACKSIZE(SP) diff --git a/kernel/power/zgemv_t_4.c b/kernel/power/zgemv_t_4.c index 20a0812dd..956d75ffc 100644 --- a/kernel/power/zgemv_t_4.c +++ b/kernel/power/zgemv_t_4.c @@ -28,10 +28,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #define NBMAX 4096 +#if defined(__VEC__) || defined(__ALTIVEC__) + #define HAVE_KERNEL_4x4_VEC 1 #define HAVE_KERNEL_4x2_VEC 1 #define HAVE_KERNEL_4x1_VEC 1 +#endif #if defined(HAVE_KERNEL_4x4_VEC) || defined(HAVE_KERNEL_4x2_VEC) || defined(HAVE_KERNEL_4x1_VEC) #include #endif @@ -510,7 +513,7 @@ static void zgemv_kernel_4x1(BLASLONG n, FLOAT *ap, FLOAT *x, FLOAT *y, FLOAT al #endif -static __attribute__((always_inline)) void copy_x(BLASLONG n, FLOAT *src, FLOAT *dest, BLASLONG inc_src) { +static __attribute__((always_inline)) inline void copy_x(BLASLONG n, FLOAT *src, FLOAT *dest, BLASLONG inc_src) { BLASLONG i; for (i = 0; i < n; i++) { *dest = *src; @@ -532,8 +535,8 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha_r, FLOAT alpha_i, BLASLONG m2; BLASLONG m3; BLASLONG n2; - - FLOAT ybuffer[8], *xbuffer; + FLOAT ybuffer[8] __attribute__((aligned(16))); + FLOAT *xbuffer; if (m < 1) return (0); if (n < 1) return (0); diff --git a/kernel/power/zgemv_t_ppc440.S b/kernel/power/zgemv_t_ppc440.S index bfc039a0c..d7f3ee027 100644 --- a/kernel/power/zgemv_t_ppc440.S +++ b/kernel/power/zgemv_t_ppc440.S @@ -47,7 +47,7 @@ #define STACKSIZE 304 #endif -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define M r3 #define N r4 @@ -179,7 +179,7 @@ stw r0, 4 + FZERO #endif -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ lwz INCY, FRAMESLOT(0) + STACKSIZE(SP) lwz BUFFER, FRAMESLOT(1) + STACKSIZE(SP) diff --git a/kernel/power/zger.S b/kernel/power/zger.S index a9a607815..73757d448 100644 --- a/kernel/power/zger.S +++ b/kernel/power/zger.S @@ -47,7 +47,7 @@ #endif #endif -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define M r3 #define N r4 @@ -235,7 +235,7 @@ stw r27, 196(SP) #endif -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ lwz LDA, FRAMESLOT(0) + STACKSIZE(SP) lwz BUFFER, FRAMESLOT(1) + STACKSIZE(SP) diff --git a/kernel/power/zrot.c b/kernel/power/zrot.c index d45468fd5..5e7ca3b23 100644 --- a/kernel/power/zrot.c +++ b/kernel/power/zrot.c @@ -24,6 +24,9 @@ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ +#if !defined(__VEC__) || !defined(__ALTIVEC__) +#include "../arm/zrot.c" +#else #include "common.h" @@ -40,8 +43,8 @@ static void zrot_kernel_4(BLASLONG n, FLOAT *x, FLOAT *y, FLOAT cosA, FLOAT si __asm__ ( - "xxspltd 36, %x[cos], 0 \n\t" // load c to both dwords - "xxspltd 37, %x[sin], 0 \n\t" // load s to both dwords + XXSPLTD_S(36,%x[cos],0) // load c to both dwords + XXSPLTD_S(37,%x[sin],0) // load s to both dwords "lxvd2x 32, 0, %[x_ptr] \n\t" // load x "lxvd2x 33, %[i16], %[x_ptr] \n\t" @@ -57,10 +60,10 @@ static void zrot_kernel_4(BLASLONG n, FLOAT *x, FLOAT *y, FLOAT cosA, FLOAT si "addi %[y_ptr], %[y_ptr], 64 \n\t" "addic. %[temp_n], %[temp_n], -4 \n\t" - "ble 2f \n\t" + "ble two%= \n\t" - ".p2align 5 \n" - "1: \n\t" + ".align 5 \n" + "one%=: \n\t" "xvmuldp 40, 32, 36 \n\t" // c * x "xvmuldp 41, 33, 36 \n\t" @@ -124,9 +127,9 @@ static void zrot_kernel_4(BLASLONG n, FLOAT *x, FLOAT *y, FLOAT cosA, FLOAT si "addi %[y_ptr], %[y_ptr], 128 \n\t" "addic. %[temp_n], %[temp_n], -4 \n\t" - "bgt+ 1b \n" + "bgt+ one%= \n" - "2: \n\t" + "two%=: \n\t" "xvmuldp 40, 32, 36 \n\t" // c * x "xvmuldp 41, 33, 36 \n\t" @@ -262,4 +265,4 @@ int CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT } - \ No newline at end of file +#endif diff --git a/kernel/power/zscal.S b/kernel/power/zscal.S index 2eb7b0df3..ae68ee672 100644 --- a/kernel/power/zscal.S +++ b/kernel/power/zscal.S @@ -43,7 +43,7 @@ #define XX r4 #define PREA r5 -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define X r6 #define INCX r7 diff --git a/kernel/power/zscal.c b/kernel/power/zscal.c index a1b441d2c..5526f4d67 100644 --- a/kernel/power/zscal.c +++ b/kernel/power/zscal.c @@ -38,11 +38,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #pragma GCC optimize "O1" -#if defined(POWER8) || defined(POWER9) +#if defined(POWER8) || defined(POWER9) || defined(POWER10) +#if defined(__VEC__) || defined(__ALTIVEC__) #if defined(DOUBLE) #include "zscal_microk_power8.c" #endif #endif +#endif #ifndef HAVE_KERNEL_8 diff --git a/kernel/power/zscal_microk_power8.c b/kernel/power/zscal_microk_power8.c index aba9029a0..567331775 100644 --- a/kernel/power/zscal_microk_power8.c +++ b/kernel/power/zscal_microk_power8.c @@ -58,8 +58,8 @@ static void zscal_kernel_8 (long n, double *x, double alpha_r, double alpha_i) "dcbt 0, %2 \n\t" "xsnegdp 33, %x16 \n\t" // -alpha_i - "xxspltd 32, %x15, 0 \n\t" // alpha_r , alpha_r - "xxmrghd 33, 33, %x16 \n\t" // -alpha_i , alpha_i + XXSPLTD_S(32,%x15,0) // alpha_r , alpha_r + XXMRGHD_S(33,33,%x16) // -alpha_i , alpha_i "lxvd2x 40, 0, %2 \n\t" // x0_r, x0_i "lxvd2x 41, %17, %2 \n\t" @@ -73,10 +73,10 @@ static void zscal_kernel_8 (long n, double *x, double alpha_r, double alpha_i) "addi %2, %2, 128 \n\t" "addic. %1, %1, -8 \n\t" - "ble 2f \n\t" + "ble two%= \n\t" - ".p2align 5 \n" - "1: \n\t" + ".align 5 \n" + "one%=: \n\t" "xvmuldp 48, 40, 32 \n\t" // x0_r * alpha_r, x0_i * alpha_r "xvmuldp 49, 41, 32 \n\t" @@ -87,14 +87,14 @@ static void zscal_kernel_8 (long n, double *x, double alpha_r, double alpha_i) "xvmuldp %x5, 46, 32 \n\t" "xvmuldp %x6, 47, 32 \n\t" - "xxswapd %x7, 40 \n\t" - "xxswapd %x8, 41 \n\t" - "xxswapd %x9, 42 \n\t" - "xxswapd %x10, 43 \n\t" - "xxswapd %x11, 44 \n\t" - "xxswapd %x12, 45 \n\t" - "xxswapd %x13, 46 \n\t" - "xxswapd %x14, 47 \n\t" + XXSWAPD_S(%x7,40) + XXSWAPD_S(%x8,41) + XXSWAPD_S(%x9,42) + XXSWAPD_S(%x10,43) + XXSWAPD_S(%x11,44) + XXSWAPD_S(%x12,45) + XXSWAPD_S(%x13,46) + XXSWAPD_S(%x14,47) "xvmuldp %x7, %x7, 33 \n\t" // x0_i * -alpha_i, x0_r * alpha_i "xvmuldp %x8, %x8, 33 \n\t" @@ -147,9 +147,9 @@ static void zscal_kernel_8 (long n, double *x, double alpha_r, double alpha_i) "addi %2, %2, 256 \n\t" "addic. %1, %1, -8 \n\t" - "bgt 1b \n" + "bgt one%= \n" - "2: \n\t" + "two%=: \n\t" "xvmuldp 48, 40, 32 \n\t" // x0_r * alpha_r, x0_i * alpha_r "xvmuldp 49, 41, 32 \n\t" @@ -160,14 +160,14 @@ static void zscal_kernel_8 (long n, double *x, double alpha_r, double alpha_i) "xvmuldp %x5, 46, 32 \n\t" "xvmuldp %x6, 47, 32 \n\t" - "xxswapd %x7, 40 \n\t" - "xxswapd %x8, 41 \n\t" - "xxswapd %x9, 42 \n\t" - "xxswapd %x10, 43 \n\t" - "xxswapd %x11, 44 \n\t" - "xxswapd %x12, 45 \n\t" - "xxswapd %x13, 46 \n\t" - "xxswapd %x14, 47 \n\t" + XXSWAPD_S(%x7,40) + XXSWAPD_S(%x8,41) + XXSWAPD_S(%x9,42) + XXSWAPD_S(%x10,43) + XXSWAPD_S(%x11,44) + XXSWAPD_S(%x12,45) + XXSWAPD_S(%x13,46) + XXSWAPD_S(%x14,47) "addi %2, %2, -128 \n\t" diff --git a/kernel/power/zscal_ppc440.S b/kernel/power/zscal_ppc440.S index d0e4c9bcf..55dd1b87b 100644 --- a/kernel/power/zscal_ppc440.S +++ b/kernel/power/zscal_ppc440.S @@ -43,7 +43,7 @@ #define XX r4 #define PRE r5 -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define X r6 #define INCX r7 diff --git a/kernel/power/zswap.S b/kernel/power/zswap.S index 8befadca2..415164a2b 100644 --- a/kernel/power/zswap.S +++ b/kernel/power/zswap.S @@ -39,7 +39,7 @@ #define ASSEMBLER #include "common.h" -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define N r3 #define X r6 @@ -117,7 +117,7 @@ stfd f30, 128(SP) stfd f31, 136(SP) -#if defined(linux) && defined(__64BIT__) +#if (defined(linux) || defined(__FreeBSD__)) && defined(__64BIT__) ld INCY, FRAMESLOT(0) + STACKSIZE(SP) #endif diff --git a/kernel/power/zswap.c b/kernel/power/zswap.c index 1d8826f41..6cd3d9664 100644 --- a/kernel/power/zswap.c +++ b/kernel/power/zswap.c @@ -36,8 +36,12 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" +#if defined(__VEC__) || defined(__ALTIVEC__) #if defined(POWER8) || defined(POWER9) #include "zswap_microk_power8.c" +#elif defined(POWER10) +#include "cswap_microk_power10.c" +#endif #endif diff --git a/kernel/power/zswap_microk_power8.c b/kernel/power/zswap_microk_power8.c index 54391ba5d..1e9fbe2cf 100644 --- a/kernel/power/zswap_microk_power8.c +++ b/kernel/power/zswap_microk_power8.c @@ -40,8 +40,8 @@ zswap_kernel_16 (long n, double *x, double *y) { __asm__ ( - ".p2align 5 \n" - "1: \n\t" + ".align 5 \n" + "one%=: \n\t" "lxvd2x 32, 0, %4 \n\t" "lxvd2x 33, %5, %4 \n\t" "lxvd2x 34, %6, %4 \n\t" @@ -130,7 +130,7 @@ zswap_kernel_16 (long n, double *x, double *y) "addi %4, %4, 128 \n\t" "addic. %2, %2, -16 \n\t" - "bgt 1b \n" + "bgt one%= \n" "#n=%2 x=%0=%3 y=%1=%4 o16=%5 o32=%6 o48=%7 o64=%8 o80=%9 o96=%10 o112=%11" : diff --git a/kernel/power/zsymv_L.S b/kernel/power/zsymv_L.S index b348e328f..9f00df072 100644 --- a/kernel/power/zsymv_L.S +++ b/kernel/power/zsymv_L.S @@ -39,7 +39,7 @@ #define ASSEMBLER #include "common.h" -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define M r3 #define N r4 @@ -259,7 +259,7 @@ stw r27, 196(SP) #endif -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ lwz BUFFER, FRAMESLOT(0) + STACKSIZE(SP) #else diff --git a/kernel/power/zsymv_U.S b/kernel/power/zsymv_U.S index b631cbe35..fe97fde8b 100644 --- a/kernel/power/zsymv_U.S +++ b/kernel/power/zsymv_U.S @@ -39,7 +39,7 @@ #define ASSEMBLER #include "common.h" -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define M r3 #define IS r4 @@ -256,7 +256,7 @@ stw r27, 196(SP) #endif -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ lwz BUFFER, FRAMESLOT(0) + STACKSIZE(SP) #else diff --git a/kernel/power/ztrmm_kernel_8x2_power8.S b/kernel/power/ztrmm_kernel_8x2_power8.S index c1415138c..684cbd6eb 100644 --- a/kernel/power/ztrmm_kernel_8x2_power8.S +++ b/kernel/power/ztrmm_kernel_8x2_power8.S @@ -98,7 +98,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define N r4 #define K r5 -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define A r6 #define B r7 @@ -259,7 +259,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. stfd f2, ALPHA_I_SP stw r0, FZERO -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifdef __64BIT__ ld LDC, FRAMESLOT(0) + STACKSIZE(SP) #endif @@ -280,7 +280,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endif #ifdef TRMMKERNEL -#if defined(linux) && defined(__64BIT__) +#if (defined(linux) || defined(__FreeBSD__)) && defined(__64BIT__) ld OFFSET, FRAMESLOT(1) + STACKSIZE(SP) #endif diff --git a/kernel/power/ztrmm_macros_8x2_power8.S b/kernel/power/ztrmm_macros_8x2_power8.S index 701ec65c8..b3fbcd220 100644 --- a/kernel/power/ztrmm_macros_8x2_power8.S +++ b/kernel/power/ztrmm_macros_8x2_power8.S @@ -68,7 +68,11 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * Macros for N=2 and M=8 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD2x8_1', ` +#else .macro LOAD2x8_1 +#endif lxvdsx vs16, o0, BO // load real part from B lxvdsx vs17, o8, BO // load imag part from B @@ -92,9 +96,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi AO, AO, 64 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x8_I1', ` +#else .macro KERNEL2x8_I1 +#endif lxvd2x vs8, o0, AO // load real,imag from A lxvd2x vs9, o16, AO // load real,imag from A @@ -152,9 +164,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs63, vs7, vs19 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x8_1', ` +#else .macro KERNEL2x8_1 +#endif xvmaddadp vs32, vs0, vs16 // real*real, imag*real @@ -221,9 +241,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi AO, AO, 64 addi BO, BO, 32 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x8_2', ` +#else .macro KERNEL2x8_2 +#endif xvmaddadp vs32, vs8, vs20 // real*real, imag*real @@ -289,9 +317,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi AO, AO, 64 addi BO, BO, 32 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x8_E2', ` +#else .macro KERNEL2x8_E2 +#endif xvmaddadp vs32, vs8, vs20 // real*real, imag*real @@ -329,9 +365,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs63, vs15, vs23 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x8_SUBI1', ` +#else .macro KERNEL2x8_SUBI1 +#endif lxvd2x vs0, o0, AO // load real,imag from A lxvd2x vs1, o16, AO // load real,imag from A @@ -389,9 +433,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs63, vs7, vs19 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x8_SUB1', ` +#else .macro KERNEL2x8_SUB1 +#endif lxvd2x vs0, o0, AO // load real,imag from A lxvd2x vs1, o16, AO // load real,imag from A @@ -449,9 +501,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs63, vs7, vs19 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE2x8', ` +#else .macro SAVE2x8 +#endif mr T1, CO @@ -473,13 +533,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs33, vs33 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs33,vs33) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs32 // realA*realB XSFADD_R2 vs0, vs0, vs33 // imagA*imagB - xxswapd vs32, vs32 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs33, vs33 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs32,vs32) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs33,vs33) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs32 // realA*imagB XSFADD_I2 vs1, vs1, vs33 // imagA*realB @@ -497,13 +557,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs35, vs35 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs35,vs35) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs34 // realA*realB XSFADD_R2 vs0, vs0, vs35 // imagA*imagB - xxswapd vs34, vs34 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs35, vs35 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs34,vs34) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs35,vs35) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs34 // realA*imagB XSFADD_I2 vs1, vs1, vs35 // imagA*realB @@ -521,13 +581,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs37, vs37 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs37,vs37) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs36 // realA*realB XSFADD_R2 vs0, vs0, vs37 // imagA*imagB - xxswapd vs36, vs36 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs37, vs37 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs36,vs36) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs37,vs37) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs36 // realA*imagB XSFADD_I2 vs1, vs1, vs37 // imagA*realB @@ -545,13 +605,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs39, vs39 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs39,vs39) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs38 // realA*realB XSFADD_R2 vs0, vs0, vs39 // imagA*imagB - xxswapd vs38, vs38 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs39, vs39 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs38,vs38) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs39,vs39) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs38 // realA*imagB XSFADD_I2 vs1, vs1, vs39 // imagA*realB @@ -569,13 +629,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs41, vs41 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs41,vs41) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs40 // realA*realB XSFADD_R2 vs0, vs0, vs41 // imagA*imagB - xxswapd vs40, vs40 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs41, vs41 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs40,vs40) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs41,vs41) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs40 // realA*imagB XSFADD_I2 vs1, vs1, vs41 // imagA*realB @@ -593,13 +653,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs43, vs43 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs43,vs43) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs42 // realA*realB XSFADD_R2 vs0, vs0, vs43 // imagA*imagB - xxswapd vs42, vs42 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs43, vs43 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs42,vs42) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs43,vs43) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs42 // realA*imagB XSFADD_I2 vs1, vs1, vs43 // imagA*realB @@ -617,13 +677,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs45, vs45 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs45,vs45) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs44 // realA*realB XSFADD_R2 vs0, vs0, vs45 // imagA*imagB - xxswapd vs44, vs44 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs45, vs45 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs44,vs44) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs45,vs45) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs44 // realA*imagB XSFADD_I2 vs1, vs1, vs45 // imagA*realB @@ -641,13 +701,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs47, vs47 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs47,vs47) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs46 // realA*realB XSFADD_R2 vs0, vs0, vs47 // imagA*imagB - xxswapd vs46, vs46 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs47, vs47 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs46,vs46) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs47,vs47) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs46 // realA*imagB XSFADD_I2 vs1, vs1, vs47 // imagA*realB @@ -703,13 +763,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs49, vs49 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs49,vs49) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs48 // realA*realB XSFADD_R2 vs0, vs0, vs49 // imagA*imagB - xxswapd vs48, vs48 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs49, vs49 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs48,vs48) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs49,vs49) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs48 // realA*imagB XSFADD_I2 vs1, vs1, vs49 // imagA*realB @@ -727,13 +787,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs51, vs51 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs51,vs51) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs50 // realA*realB XSFADD_R2 vs0, vs0, vs51 // imagA*imagB - xxswapd vs50, vs50 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs51, vs51 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs50,vs50) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs51,vs51) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs50 // realA*imagB XSFADD_I2 vs1, vs1, vs51 // imagA*realB @@ -751,13 +811,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs53, vs53 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs53,vs53) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs52 // realA*realB XSFADD_R2 vs0, vs0, vs53 // imagA*imagB - xxswapd vs52, vs52 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs53, vs53 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs52,vs52) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs53,vs53) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs52 // realA*imagB XSFADD_I2 vs1, vs1, vs53 // imagA*realB @@ -775,13 +835,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs55, vs55 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs55,vs55) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs54 // realA*realB XSFADD_R2 vs0, vs0, vs55 // imagA*imagB - xxswapd vs54, vs54 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs55, vs55 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs54,vs54) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs55,vs55) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs54 // realA*imagB XSFADD_I2 vs1, vs1, vs55 // imagA*realB @@ -799,13 +859,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs57, vs57 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs57,vs57) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs56 // realA*realB XSFADD_R2 vs0, vs0, vs57 // imagA*imagB - xxswapd vs56, vs56 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs57, vs57 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs56,vs56) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs57,vs57) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs56 // realA*imagB XSFADD_I2 vs1, vs1, vs57 // imagA*realB @@ -823,13 +883,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs59, vs59 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs59,vs59) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs58 // realA*realB XSFADD_R2 vs0, vs0, vs59 // imagA*imagB - xxswapd vs58, vs58 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs59, vs59 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs58,vs58) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs59,vs59) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs58 // realA*imagB XSFADD_I2 vs1, vs1, vs59 // imagA*realB @@ -847,13 +907,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs61, vs61 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs61,vs61) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs60 // realA*realB XSFADD_R2 vs0, vs0, vs61 // imagA*imagB - xxswapd vs60, vs60 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs61, vs61 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs60,vs60) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs61,vs61) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs60 // realA*imagB XSFADD_I2 vs1, vs1, vs61 // imagA*realB @@ -871,13 +931,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs63, vs63 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs63,vs63) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs62 // realA*realB XSFADD_R2 vs0, vs0, vs63 // imagA*imagB - xxswapd vs62, vs62 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs63, vs63 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs62,vs62) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs63,vs63) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs62 // realA*imagB XSFADD_I2 vs1, vs1, vs63 // imagA*realB @@ -918,14 +978,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. add T2, T2, LDC addi CO, CO, 128 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=2 and M=4 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD2x4_1', ` +#else .macro LOAD2x4_1 +#endif lxvdsx vs16, o0, BO // load real part from B lxvdsx vs17, o8, BO // load imag part from B @@ -942,9 +1010,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi AO, AO, 64 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x4_I1', ` +#else .macro KERNEL2x4_I1 +#endif lxvd2x vs8, o0, AO // load real,imag from A lxvd2x vs9, o16, AO // load real,imag from A @@ -979,9 +1055,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs47, vs3, vs19 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x4_1', ` +#else .macro KERNEL2x4_1 +#endif lxvd2x vs8, o0, AO // load real,imag from A lxvd2x vs9, o16, AO // load real,imag from A @@ -1016,9 +1100,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs47, vs3, vs19 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x4_2', ` +#else .macro KERNEL2x4_2 +#endif lxvd2x vs0, o0, AO // load real,imag from A lxvd2x vs1, o16, AO // load real,imag from A @@ -1053,9 +1145,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs47, vs11, vs23 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x4_E2', ` +#else .macro KERNEL2x4_E2 +#endif xvmaddadp vs32, vs8, vs20 // real*real, imag*real @@ -1077,9 +1177,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs47, vs11, vs23 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x4_SUBI1', ` +#else .macro KERNEL2x4_SUBI1 +#endif lxvd2x vs0, o0, AO // load real,imag from A lxvd2x vs1, o16, AO // load real,imag from A @@ -1114,9 +1222,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs47, vs3, vs19 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x4_SUB1', ` +#else .macro KERNEL2x4_SUB1 +#endif lxvd2x vs0, o0, AO // load real,imag from A lxvd2x vs1, o16, AO // load real,imag from A @@ -1151,9 +1267,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs47, vs3, vs19 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE2x4', ` +#else .macro SAVE2x4 +#endif mr T1, CO @@ -1170,13 +1294,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs33, vs33 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs33,vs33) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs32 // realA*realB XSFADD_R2 vs0, vs0, vs33 // imagA*imagB - xxswapd vs32, vs32 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs33, vs33 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs32,vs32) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs33,vs33) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs32 // realA*imagB XSFADD_I2 vs1, vs1, vs33 // imagA*realB @@ -1194,13 +1318,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs35, vs35 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs35,vs35) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs34 // realA*realB XSFADD_R2 vs0, vs0, vs35 // imagA*imagB - xxswapd vs34, vs34 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs35, vs35 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs34,vs34) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs35,vs35) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs34 // realA*imagB XSFADD_I2 vs1, vs1, vs35 // imagA*realB @@ -1218,13 +1342,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs37, vs37 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs37,vs37) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs36 // realA*realB XSFADD_R2 vs0, vs0, vs37 // imagA*imagB - xxswapd vs36, vs36 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs37, vs37 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs36,vs36) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs37,vs37) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs36 // realA*imagB XSFADD_I2 vs1, vs1, vs37 // imagA*realB @@ -1242,13 +1366,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs39, vs39 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs39,vs39) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs38 // realA*realB XSFADD_R2 vs0, vs0, vs39 // imagA*imagB - xxswapd vs38, vs38 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs39, vs39 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs38,vs38) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs39,vs39) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs38 // realA*imagB XSFADD_I2 vs1, vs1, vs39 // imagA*realB @@ -1291,13 +1415,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs41, vs41 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs41,vs41) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs40 // realA*realB XSFADD_R2 vs0, vs0, vs41 // imagA*imagB - xxswapd vs40, vs40 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs41, vs41 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs40,vs40) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs41,vs41) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs40 // realA*imagB XSFADD_I2 vs1, vs1, vs41 // imagA*realB @@ -1315,13 +1439,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs43, vs43 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs43,vs43) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs42 // realA*realB XSFADD_R2 vs0, vs0, vs43 // imagA*imagB - xxswapd vs42, vs42 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs43, vs43 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs42,vs42) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs43,vs43) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs42 // realA*imagB XSFADD_I2 vs1, vs1, vs43 // imagA*realB @@ -1339,13 +1463,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs45, vs45 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs45,vs45) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs44 // realA*realB XSFADD_R2 vs0, vs0, vs45 // imagA*imagB - xxswapd vs44, vs44 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs45, vs45 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs44,vs44) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs45,vs45) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs44 // realA*imagB XSFADD_I2 vs1, vs1, vs45 // imagA*realB @@ -1363,13 +1487,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs47, vs47 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs47,vs47) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs46 // realA*realB XSFADD_R2 vs0, vs0, vs47 // imagA*imagB - xxswapd vs46, vs46 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs47, vs47 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs46,vs46) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs47,vs47) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs46 // realA*imagB XSFADD_I2 vs1, vs1, vs47 // imagA*realB @@ -1401,14 +1525,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. add T1, T1, LDC addi CO, CO, 64 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=2 and M=2 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD2x2_1', ` +#else .macro LOAD2x2_1 +#endif lxvdsx vs16, o0, BO // load real part from B lxvdsx vs17, o8, BO // load imag part from B @@ -1423,9 +1555,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi AO, AO, 32 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x2_I1', ` +#else .macro KERNEL2x2_I1 +#endif lxvd2x vs8, o0, AO // load real,imag from A lxvd2x vs9, o16, AO // load real,imag from A @@ -1450,9 +1590,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs39, vs1, vs19 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x2_1', ` +#else .macro KERNEL2x2_1 +#endif lxvd2x vs8, o0, AO // load real,imag from A lxvd2x vs9, o16, AO // load real,imag from A @@ -1477,9 +1625,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs39, vs1, vs19 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x2_2', ` +#else .macro KERNEL2x2_2 +#endif lxvd2x vs0, o0, AO // load real,imag from A lxvd2x vs1, o16, AO // load real,imag from A @@ -1504,9 +1660,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs39, vs9, vs23 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x2_E2', ` +#else .macro KERNEL2x2_E2 +#endif xvmaddadp vs32, vs8, vs20 // real*real, imag*real @@ -1520,9 +1684,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs39, vs9, vs23 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x2_SUBI1', ` +#else .macro KERNEL2x2_SUBI1 +#endif lxvd2x vs0, o0, AO // load real,imag from A lxvd2x vs1, o16, AO // load real,imag from A @@ -1547,9 +1719,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs39, vs1, vs19 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x2_SUB1', ` +#else .macro KERNEL2x2_SUB1 +#endif lxvd2x vs0, o0, AO // load real,imag from A lxvd2x vs1, o16, AO // load real,imag from A @@ -1574,9 +1754,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs39, vs1, vs19 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE2x2', ` +#else .macro SAVE2x2 +#endif mr T1, CO @@ -1591,13 +1779,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs33, vs33 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs33,vs33) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs32 // realA*realB XSFADD_R2 vs0, vs0, vs33 // imagA*imagB - xxswapd vs32, vs32 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs33, vs33 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs32,vs32) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs33,vs33) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs32 // realA*imagB XSFADD_I2 vs1, vs1, vs33 // imagA*realB @@ -1615,13 +1803,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs35, vs35 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs35,vs35) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs34 // realA*realB XSFADD_R2 vs0, vs0, vs35 // imagA*imagB - xxswapd vs34, vs34 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs35, vs35 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs34,vs34) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs35,vs35) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs34 // realA*imagB XSFADD_I2 vs1, vs1, vs35 // imagA*realB @@ -1658,13 +1846,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs37, vs37 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs37,vs37) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs36 // realA*realB XSFADD_R2 vs0, vs0, vs37 // imagA*imagB - xxswapd vs36, vs36 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs37, vs37 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs36,vs36) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs37,vs37) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs36 // realA*imagB XSFADD_I2 vs1, vs1, vs37 // imagA*realB @@ -1682,13 +1870,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs39, vs39 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs39,vs39) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs38 // realA*realB XSFADD_R2 vs0, vs0, vs39 // imagA*imagB - xxswapd vs38, vs38 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs39, vs39 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs38,vs38) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs39,vs39) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs38 // realA*imagB XSFADD_I2 vs1, vs1, vs39 // imagA*realB @@ -1716,14 +1904,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. add T1, T1, LDC addi CO, CO, 32 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=2 and M=1 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD2x1_1', ` +#else .macro LOAD2x1_1 +#endif lxvdsx vs16, o0, BO // load real part from B lxvdsx vs17, o8, BO // load imag part from B @@ -1737,9 +1933,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi AO, AO, 16 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x1_I1', ` +#else .macro KERNEL2x1_I1 +#endif lxvd2x vs8, o0, AO // load real,imag from A @@ -1759,9 +1963,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs35, vs0, vs19 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x1_1', ` +#else .macro KERNEL2x1_1 +#endif lxvd2x vs8, o0, AO // load real,imag from A @@ -1781,9 +1993,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs35, vs0, vs19 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x1_2', ` +#else .macro KERNEL2x1_2 +#endif lxvd2x vs0, o0, AO // load real,imag from A @@ -1803,9 +2023,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs35, vs8, vs23 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x1_E2', ` +#else .macro KERNEL2x1_E2 +#endif xvmaddadp vs32, vs8, vs20 // real*real, imag*real @@ -1815,9 +2043,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs35, vs8, vs23 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x1_SUBI1', ` +#else .macro KERNEL2x1_SUBI1 +#endif lxvd2x vs0, o0, AO // load real,imag from A @@ -1837,9 +2073,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs35, vs0, vs19 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL2x1_SUB1', ` +#else .macro KERNEL2x1_SUB1 +#endif lxvd2x vs0, o0, AO // load real,imag from A @@ -1859,9 +2103,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs35, vs0, vs19 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE2x1', ` +#else .macro SAVE2x1 +#endif mr T1, CO @@ -1875,13 +2127,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs33, vs33 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs33,vs33) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs32 // realA*realB XSFADD_R2 vs0, vs0, vs33 // imagA*imagB - xxswapd vs32, vs32 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs33, vs33 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs32,vs32) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs33,vs33) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs32 // realA*imagB XSFADD_I2 vs1, vs1, vs33 // imagA*realB @@ -1915,13 +2167,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs35, vs35 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs35,vs35) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs34 // realA*realB XSFADD_R2 vs0, vs0, vs35 // imagA*imagB - xxswapd vs34, vs34 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs35, vs35 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs34,vs34) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs35,vs35) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs34 // realA*imagB XSFADD_I2 vs1, vs1, vs35 // imagA*realB @@ -1947,14 +2199,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. add T1, T1, LDC addi CO, CO, 16 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=1 and M=8 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD1x8_1', ` +#else .macro LOAD1x8_1 +#endif lxvdsx vs16, o0, BO // load real part from B lxvdsx vs17, o8, BO // load imag part from B @@ -1976,9 +2236,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi AO, AO, 64 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x8_I1', ` +#else .macro KERNEL1x8_I1 +#endif lxvd2x vs8, o0, AO // load real,imag from A lxvd2x vs9, o16, AO // load real,imag from A @@ -2017,9 +2285,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs47, vs7, vs17 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x8_1', ` +#else .macro KERNEL1x8_1 +#endif lxvd2x vs8, o0, AO // load real,imag from A lxvd2x vs9, o16, AO // load real,imag from A @@ -2058,9 +2334,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs47, vs7, vs17 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x8_2', ` +#else .macro KERNEL1x8_2 +#endif lxvd2x vs0, o0, AO // load real,imag from A lxvd2x vs1, o16, AO // load real,imag from A @@ -2099,9 +2383,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs47, vs15, vs21 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x8_E2', ` +#else .macro KERNEL1x8_E2 +#endif xvmaddadp vs32, vs8, vs20 // real*real, imag*real @@ -2122,9 +2414,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs47, vs15, vs21 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x8_SUBI1', ` +#else .macro KERNEL1x8_SUBI1 +#endif lxvd2x vs0, o0, AO // load real,imag from A lxvd2x vs1, o16, AO // load real,imag from A @@ -2163,9 +2463,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs47, vs7, vs17 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x8_SUB1', ` +#else .macro KERNEL1x8_SUB1 +#endif lxvd2x vs0, o0, AO // load real,imag from A lxvd2x vs1, o16, AO // load real,imag from A @@ -2204,9 +2512,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs47, vs7, vs17 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE1x8', ` +#else .macro SAVE1x8 +#endif mr T1, CO @@ -2228,13 +2544,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs33, vs33 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs33,vs33) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs32 // realA*realB XSFADD_R2 vs0, vs0, vs33 // imagA*imagB - xxswapd vs32, vs32 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs33, vs33 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs32,vs32) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs33,vs33) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs32 // realA*imagB XSFADD_I2 vs1, vs1, vs33 // imagA*realB @@ -2252,13 +2568,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs35, vs35 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs35,vs35) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs34 // realA*realB XSFADD_R2 vs0, vs0, vs35 // imagA*imagB - xxswapd vs34, vs34 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs35, vs35 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs34,vs34) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs35,vs35) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs34 // realA*imagB XSFADD_I2 vs1, vs1, vs35 // imagA*realB @@ -2276,13 +2592,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs37, vs37 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs37,vs37) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs36 // realA*realB XSFADD_R2 vs0, vs0, vs37 // imagA*imagB - xxswapd vs36, vs36 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs37, vs37 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs36,vs36) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs37,vs37) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs36 // realA*imagB XSFADD_I2 vs1, vs1, vs37 // imagA*realB @@ -2300,13 +2616,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs39, vs39 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs39,vs39) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs38 // realA*realB XSFADD_R2 vs0, vs0, vs39 // imagA*imagB - xxswapd vs38, vs38 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs39, vs39 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs38,vs38) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs39,vs39) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs38 // realA*imagB XSFADD_I2 vs1, vs1, vs39 // imagA*realB @@ -2324,13 +2640,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs41, vs41 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs41,vs41) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs40 // realA*realB XSFADD_R2 vs0, vs0, vs41 // imagA*imagB - xxswapd vs40, vs40 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs41, vs41 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs40,vs40) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs41,vs41) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs40 // realA*imagB XSFADD_I2 vs1, vs1, vs41 // imagA*realB @@ -2348,13 +2664,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs43, vs43 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs43,vs43) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs42 // realA*realB XSFADD_R2 vs0, vs0, vs43 // imagA*imagB - xxswapd vs42, vs42 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs43, vs43 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs42,vs42) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs43,vs43) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs42 // realA*imagB XSFADD_I2 vs1, vs1, vs43 // imagA*realB @@ -2372,13 +2688,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs45, vs45 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs45,vs45) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs44 // realA*realB XSFADD_R2 vs0, vs0, vs45 // imagA*imagB - xxswapd vs44, vs44 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs45, vs45 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs44,vs44) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs45,vs45) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs44 // realA*imagB XSFADD_I2 vs1, vs1, vs45 // imagA*realB @@ -2396,13 +2712,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs47, vs47 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs47,vs47) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs46 // realA*realB XSFADD_R2 vs0, vs0, vs47 // imagA*imagB - xxswapd vs46, vs46 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs47, vs47 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs46,vs46) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs47,vs47) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs46 // realA*imagB XSFADD_I2 vs1, vs1, vs47 // imagA*realB @@ -2443,14 +2759,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. add T2, T2, LDC addi CO, CO, 128 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=1 and M=4 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD1x4_1', ` +#else .macro LOAD1x4_1 +#endif lxvdsx vs16, o0, BO // load real part from B lxvdsx vs17, o8, BO // load imag part from B @@ -2465,9 +2789,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi AO, AO, 64 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x4_I1', ` +#else .macro KERNEL1x4_I1 +#endif lxvd2x vs8, o0, AO // load real,imag from A lxvd2x vs9, o16, AO // load real,imag from A @@ -2491,9 +2823,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs39, vs3, vs17 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x4_1', ` +#else .macro KERNEL1x4_1 +#endif lxvd2x vs8, o0, AO // load real,imag from A lxvd2x vs9, o16, AO // load real,imag from A @@ -2517,9 +2857,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs39, vs3, vs17 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x4_2', ` +#else .macro KERNEL1x4_2 +#endif lxvd2x vs0, o0, AO // load real,imag from A lxvd2x vs1, o16, AO // load real,imag from A @@ -2543,9 +2891,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs39, vs11, vs21 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x4_E2', ` +#else .macro KERNEL1x4_E2 +#endif xvmaddadp vs32, vs8, vs20 // real*real, imag*real @@ -2558,9 +2914,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs39, vs11, vs21 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x4_SUBI1', ` +#else .macro KERNEL1x4_SUBI1 +#endif lxvd2x vs0, o0, AO // load real,imag from A lxvd2x vs1, o16, AO // load real,imag from A @@ -2584,9 +2948,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs39, vs3, vs17 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x4_SUB1', ` +#else .macro KERNEL1x4_SUB1 +#endif lxvd2x vs0, o0, AO // load real,imag from A lxvd2x vs1, o16, AO // load real,imag from A @@ -2610,9 +2982,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs39, vs3, vs17 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE1x4', ` +#else .macro SAVE1x4 +#endif mr T1, CO @@ -2629,13 +3009,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs33, vs33 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs33,vs33) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs32 // realA*realB XSFADD_R2 vs0, vs0, vs33 // imagA*imagB - xxswapd vs32, vs32 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs33, vs33 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs32,vs32) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs33,vs33) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs32 // realA*imagB XSFADD_I2 vs1, vs1, vs33 // imagA*realB @@ -2653,13 +3033,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs35, vs35 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs35,vs35) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs34 // realA*realB XSFADD_R2 vs0, vs0, vs35 // imagA*imagB - xxswapd vs34, vs34 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs35, vs35 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs34,vs34) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs35,vs35) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs34 // realA*imagB XSFADD_I2 vs1, vs1, vs35 // imagA*realB @@ -2677,13 +3057,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs37, vs37 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs37,vs37) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs36 // realA*realB XSFADD_R2 vs0, vs0, vs37 // imagA*imagB - xxswapd vs36, vs36 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs37, vs37 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs36,vs36) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs37,vs37) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs36 // realA*imagB XSFADD_I2 vs1, vs1, vs37 // imagA*realB @@ -2701,13 +3081,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs39, vs39 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs39,vs39) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs38 // realA*realB XSFADD_R2 vs0, vs0, vs39 // imagA*imagB - xxswapd vs38, vs38 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs39, vs39 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs38,vs38) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs39,vs39) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs38 // realA*imagB XSFADD_I2 vs1, vs1, vs39 // imagA*realB @@ -2739,14 +3119,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. add T1, T1, LDC addi CO, CO, 64 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=1 and M=2 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD1x2_1', ` +#else .macro LOAD1x2_1 +#endif lxvdsx vs16, o0, BO // load real part from B lxvdsx vs17, o8, BO // load imag part from B @@ -2759,9 +3147,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi AO, AO, 32 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x2_I1', ` +#else .macro KERNEL1x2_I1 +#endif lxvd2x vs8, o0, AO // load real,imag from A lxvd2x vs9, o16, AO // load real,imag from A @@ -2779,9 +3175,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs35, vs1, vs17 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x2_1', ` +#else .macro KERNEL1x2_1 +#endif lxvd2x vs8, o0, AO // load real,imag from A lxvd2x vs9, o16, AO // load real,imag from A @@ -2799,9 +3203,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs35, vs1, vs17 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x2_2', ` +#else .macro KERNEL1x2_2 +#endif lxvd2x vs0, o0, AO // load real,imag from A lxvd2x vs1, o16, AO // load real,imag from A @@ -2819,9 +3231,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs35, vs9, vs21 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x2_E2', ` +#else .macro KERNEL1x2_E2 +#endif xvmaddadp vs32, vs8, vs20 // real*real, imag*real @@ -2830,9 +3250,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs35, vs9, vs21 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x2_SUBI1', ` +#else .macro KERNEL1x2_SUBI1 +#endif lxvd2x vs0, o0, AO // load real,imag from A lxvd2x vs1, o16, AO // load real,imag from A @@ -2850,9 +3278,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs35, vs1, vs17 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x2_SUB1', ` +#else .macro KERNEL1x2_SUB1 +#endif lxvd2x vs0, o0, AO // load real,imag from A lxvd2x vs1, o16, AO // load real,imag from A @@ -2870,9 +3306,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs35, vs1, vs17 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE1x2', ` +#else .macro SAVE1x2 +#endif mr T1, CO @@ -2887,13 +3331,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs33, vs33 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs33,vs33) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs32 // realA*realB XSFADD_R2 vs0, vs0, vs33 // imagA*imagB - xxswapd vs32, vs32 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs33, vs33 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs32,vs32) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs33,vs33) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs32 // realA*imagB XSFADD_I2 vs1, vs1, vs33 // imagA*realB @@ -2911,13 +3355,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs35, vs35 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs35,vs35) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs34 // realA*realB XSFADD_R2 vs0, vs0, vs35 // imagA*imagB - xxswapd vs34, vs34 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs35, vs35 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs34,vs34) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs35,vs35) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs34 // realA*imagB XSFADD_I2 vs1, vs1, vs35 // imagA*realB @@ -2945,14 +3389,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. add T1, T1, LDC addi CO, CO, 32 +#if defined(_AIX) +') +#else .endm +#endif /********************************************************************************************** * Macros for N=1 and M=1 **********************************************************************************************/ +#if defined(_AIX) +define(`LOAD1x1_1', ` +#else .macro LOAD1x1_1 +#endif lxvdsx vs16, o0, BO // load real part from B lxvdsx vs17, o8, BO // load imag part from B @@ -2964,9 +3416,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi AO, AO, 16 +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x1_I1', ` +#else .macro KERNEL1x1_I1 +#endif lxvd2x vs8, o0, AO // load real,imag from A @@ -2981,9 +3441,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs33, vs0, vs17 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x1_1', ` +#else .macro KERNEL1x1_1 +#endif lxvd2x vs8, o0, AO // load real,imag from A @@ -2998,9 +3466,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs33, vs0, vs17 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x1_2', ` +#else .macro KERNEL1x1_2 +#endif lxvd2x vs0, o0, AO // load real,imag from A @@ -3015,18 +3491,34 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs33, vs8, vs21 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x1_E2', ` +#else .macro KERNEL1x1_E2 +#endif xvmaddadp vs32, vs8, vs20 // real*real, imag*real xvmaddadp vs33, vs8, vs21 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x1_SUBI1', ` +#else .macro KERNEL1x1_SUBI1 +#endif lxvd2x vs0, o0, AO // load real,imag from A @@ -3041,9 +3533,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmuldp vs33, vs0, vs17 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`KERNEL1x1_SUB1', ` +#else .macro KERNEL1x1_SUB1 +#endif lxvd2x vs0, o0, AO // load real,imag from A @@ -3058,9 +3558,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvmaddadp vs33, vs0, vs17 // real*imag, imag*imag +#if defined(_AIX) +') +#else .endm +#endif +#if defined(_AIX) +define(`SAVE1x1', ` +#else .macro SAVE1x1 +#endif mr T1, CO @@ -3074,13 +3582,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xxlxor vs0, vs0, vs0 xxlxor vs1, vs1, vs1 - xxswapd vs33, vs33 // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB + XXSWAPD(vs33,vs33) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB XSFADD_R1 vs0, vs0, vs32 // realA*realB XSFADD_R2 vs0, vs0, vs33 // imagA*imagB - xxswapd vs32, vs32 // realA*realB, imagA*realB -> imagA*realB, realA*realB - xxswapd vs33, vs33 // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB + XXSWAPD(vs32,vs32) // realA*realB, imagA*realB -> imagA*realB, realA*realB + XXSWAPD(vs33,vs33) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB XSFADD_I1 vs1, vs1, vs32 // realA*imagB XSFADD_I2 vs1, vs1, vs33 // imagA*realB @@ -3106,5 +3614,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. add T1, T1, LDC addi CO, CO, 16 +#if defined(_AIX) +') +#else .endm +#endif diff --git a/kernel/power/ztrsm_kernel_LN.S b/kernel/power/ztrsm_kernel_LN.S index 87473b45d..3acd9562d 100644 --- a/kernel/power/ztrsm_kernel_LN.S +++ b/kernel/power/ztrsm_kernel_LN.S @@ -61,7 +61,7 @@ #define N r4 #define K r5 -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define A r6 #define B r7 @@ -166,7 +166,7 @@ stw r0, FZERO -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifdef __64BIT__ ld LDC, FRAMESLOT(0) + STACKSIZE(SP) #endif @@ -186,7 +186,7 @@ #endif #endif -#if defined(linux) && defined(__64BIT__) +#if (defined(linux) || defined(__FreeBSD__)) && defined(__64BIT__) ld OFFSET, FRAMESLOT(1) + STACKSIZE(SP) #endif @@ -244,7 +244,7 @@ #endif #else -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ lwz PREA, FRAMESLOT(2) + STACKSIZE(SP) lwz PREC, FRAMESLOT(3) + STACKSIZE(SP) diff --git a/kernel/power/ztrsm_kernel_LT.S b/kernel/power/ztrsm_kernel_LT.S index db0860124..2d4f31189 100644 --- a/kernel/power/ztrsm_kernel_LT.S +++ b/kernel/power/ztrsm_kernel_LT.S @@ -61,7 +61,7 @@ #define N r4 #define K r5 -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define A r6 #define B r7 @@ -166,7 +166,7 @@ stw r0, FZERO -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifdef __64BIT__ ld LDC, FRAMESLOT(0) + STACKSIZE(SP) #endif @@ -186,7 +186,7 @@ #endif #endif -#if defined(linux) && defined(__64BIT__) +#if (defined(linux) || defined(__FreeBSD__)) && defined(__64BIT__) ld OFFSET, FRAMESLOT(1) + STACKSIZE(SP) #endif @@ -247,7 +247,7 @@ #endif #else -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ lwz PREA, FRAMESLOT(2) + STACKSIZE(SP) lwz PREC, FRAMESLOT(3) + STACKSIZE(SP) diff --git a/kernel/power/ztrsm_kernel_RT.S b/kernel/power/ztrsm_kernel_RT.S index c50ab86df..605363119 100644 --- a/kernel/power/ztrsm_kernel_RT.S +++ b/kernel/power/ztrsm_kernel_RT.S @@ -61,7 +61,7 @@ #define N r4 #define K r5 -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define A r6 #define B r7 @@ -166,7 +166,7 @@ stw r0, FZERO -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifdef __64BIT__ ld LDC, FRAMESLOT(0) + STACKSIZE(SP) #endif @@ -186,7 +186,7 @@ #endif #endif -#if defined(linux) && defined(__64BIT__) +#if (defined(linux) || defined(__FreeBSD__)) && defined(__64BIT__) ld OFFSET, FRAMESLOT(1) + STACKSIZE(SP) #endif @@ -247,7 +247,7 @@ #endif #else -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ lwz PREA, FRAMESLOT(2) + STACKSIZE(SP) lwz PREC, FRAMESLOT(3) + STACKSIZE(SP) diff --git a/kernel/power/ztrsm_kernel_cell_LN.S b/kernel/power/ztrsm_kernel_cell_LN.S index 884a3e864..4798b5958 100644 --- a/kernel/power/ztrsm_kernel_cell_LN.S +++ b/kernel/power/ztrsm_kernel_cell_LN.S @@ -61,7 +61,7 @@ #define N r4 #define K r5 -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define A r6 #define B r7 @@ -172,7 +172,7 @@ stw r0, FZERO -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifdef __64BIT__ ld LDC, FRAMESLOT(0) + STACKSIZE(SP) #endif @@ -192,7 +192,7 @@ #endif #endif -#if defined(linux) && defined(__64BIT__) +#if (defined(linux) || defined(__FreeBSD__)) && defined(__64BIT__) ld OFFSET, FRAMESLOT(1) + STACKSIZE(SP) #endif diff --git a/kernel/power/ztrsm_kernel_cell_LT.S b/kernel/power/ztrsm_kernel_cell_LT.S index 388dfe3c2..654938a4d 100644 --- a/kernel/power/ztrsm_kernel_cell_LT.S +++ b/kernel/power/ztrsm_kernel_cell_LT.S @@ -61,7 +61,7 @@ #define N r4 #define K r5 -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define A r6 #define B r7 @@ -172,7 +172,7 @@ stw r0, FZERO -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifdef __64BIT__ ld LDC, FRAMESLOT(0) + STACKSIZE(SP) #endif @@ -192,7 +192,7 @@ #endif #endif -#if defined(linux) && defined(__64BIT__) +#if (defined(linux) || defined(__FreeBSD__)) && defined(__64BIT__) ld OFFSET, FRAMESLOT(1) + STACKSIZE(SP) #endif @@ -246,7 +246,7 @@ li PREA, 16 * 12 * SIZE #else -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ lwz PREA, FRAMESLOT(2) + STACKSIZE(SP) lwz PREC, FRAMESLOT(3) + STACKSIZE(SP) diff --git a/kernel/power/ztrsm_kernel_cell_RT.S b/kernel/power/ztrsm_kernel_cell_RT.S index 00b50fe04..e3fe84d00 100644 --- a/kernel/power/ztrsm_kernel_cell_RT.S +++ b/kernel/power/ztrsm_kernel_cell_RT.S @@ -61,7 +61,7 @@ #define N r4 #define K r5 -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define A r6 #define B r7 @@ -172,7 +172,7 @@ stw r0, FZERO -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifdef __64BIT__ ld LDC, FRAMESLOT(0) + STACKSIZE(SP) #endif @@ -192,7 +192,7 @@ #endif #endif -#if defined(linux) && defined(__64BIT__) +#if (defined(linux) || defined(__FreeBSD__)) && defined(__64BIT__) ld OFFSET, FRAMESLOT(1) + STACKSIZE(SP) #endif diff --git a/kernel/power/ztrsm_kernel_hummer_LN.S b/kernel/power/ztrsm_kernel_hummer_LN.S index bf3eafa45..042f4d476 100644 --- a/kernel/power/ztrsm_kernel_hummer_LN.S +++ b/kernel/power/ztrsm_kernel_hummer_LN.S @@ -48,7 +48,7 @@ #define N r4 #define K r5 -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #define A r6 #define B r7 #define C r8 diff --git a/kernel/power/ztrsm_kernel_hummer_LT.S b/kernel/power/ztrsm_kernel_hummer_LT.S index 865c85f78..fc8a0bef8 100644 --- a/kernel/power/ztrsm_kernel_hummer_LT.S +++ b/kernel/power/ztrsm_kernel_hummer_LT.S @@ -48,7 +48,7 @@ #define N r4 #define K r5 -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #define A r6 #define B r7 #define C r8 diff --git a/kernel/power/ztrsm_kernel_hummer_RT.S b/kernel/power/ztrsm_kernel_hummer_RT.S index 99868f948..17e31ffa8 100644 --- a/kernel/power/ztrsm_kernel_hummer_RT.S +++ b/kernel/power/ztrsm_kernel_hummer_RT.S @@ -48,7 +48,7 @@ #define N r4 #define K r5 -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #define A r6 #define B r7 #define C r8 diff --git a/kernel/power/ztrsm_kernel_power6_LN.S b/kernel/power/ztrsm_kernel_power6_LN.S index 65b8077db..3c40f605a 100644 --- a/kernel/power/ztrsm_kernel_power6_LN.S +++ b/kernel/power/ztrsm_kernel_power6_LN.S @@ -57,7 +57,7 @@ #define N r4 #define K r5 -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define A r6 #define B r7 @@ -184,7 +184,7 @@ stw r0, FZERO -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifdef __64BIT__ ld LDC, FRAMESLOT(0) + STACKSIZE(SP) #endif @@ -204,7 +204,7 @@ #endif #endif -#if defined(linux) && defined(__64BIT__) +#if (defined(linux) || defined(__FreeBSD__)) && defined(__64BIT__) ld OFFSET, FRAMESLOT(1) + STACKSIZE(SP) #endif diff --git a/kernel/power/ztrsm_kernel_power6_LT.S b/kernel/power/ztrsm_kernel_power6_LT.S index c27170604..b2a92301d 100644 --- a/kernel/power/ztrsm_kernel_power6_LT.S +++ b/kernel/power/ztrsm_kernel_power6_LT.S @@ -57,7 +57,7 @@ #define N r4 #define K r5 -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define A r6 #define B r7 @@ -184,7 +184,7 @@ stw r0, FZERO -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifdef __64BIT__ ld LDC, FRAMESLOT(0) + STACKSIZE(SP) #endif @@ -204,7 +204,7 @@ #endif #endif -#if defined(linux) && defined(__64BIT__) +#if (defined(linux) || defined(__FreeBSD__)) && defined(__64BIT__) ld OFFSET, FRAMESLOT(1) + STACKSIZE(SP) #endif diff --git a/kernel/power/ztrsm_kernel_power6_RT.S b/kernel/power/ztrsm_kernel_power6_RT.S index ff0338cdc..cf37b5ca0 100644 --- a/kernel/power/ztrsm_kernel_power6_RT.S +++ b/kernel/power/ztrsm_kernel_power6_RT.S @@ -57,7 +57,7 @@ #define N r4 #define K r5 -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define A r6 #define B r7 @@ -184,7 +184,7 @@ stw r0, FZERO -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifdef __64BIT__ ld LDC, FRAMESLOT(0) + STACKSIZE(SP) #endif @@ -204,7 +204,7 @@ #endif #endif -#if defined(linux) && defined(__64BIT__) +#if (defined(linux) || defined(__FreeBSD__)) && defined(__64BIT__) ld OFFSET, FRAMESLOT(1) + STACKSIZE(SP) #endif diff --git a/kernel/power/ztrsm_kernel_ppc440_LN.S b/kernel/power/ztrsm_kernel_ppc440_LN.S index d33522456..f0be64d81 100644 --- a/kernel/power/ztrsm_kernel_ppc440_LN.S +++ b/kernel/power/ztrsm_kernel_ppc440_LN.S @@ -61,7 +61,7 @@ #define N r4 #define K r5 -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define A r6 #define B r7 @@ -177,7 +177,7 @@ stw r0, FZERO -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifdef __64BIT__ ld LDC, FRAMESLOT(0) + STACKSIZE(SP) #endif @@ -197,7 +197,7 @@ #endif #endif -#if defined(linux) && defined(__64BIT__) +#if (defined(linux) || defined(__FreeBSD__)) && defined(__64BIT__) ld OFFSET, FRAMESLOT(1) + STACKSIZE(SP) #endif diff --git a/kernel/power/ztrsm_kernel_ppc440_LT.S b/kernel/power/ztrsm_kernel_ppc440_LT.S index a9e7b891f..d5ff1b57f 100644 --- a/kernel/power/ztrsm_kernel_ppc440_LT.S +++ b/kernel/power/ztrsm_kernel_ppc440_LT.S @@ -61,7 +61,7 @@ #define N r4 #define K r5 -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define A r6 #define B r7 @@ -177,7 +177,7 @@ stw r0, FZERO -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifdef __64BIT__ ld LDC, FRAMESLOT(0) + STACKSIZE(SP) #endif @@ -197,7 +197,7 @@ #endif #endif -#if defined(linux) && defined(__64BIT__) +#if (defined(linux) || defined(__FreeBSD__)) && defined(__64BIT__) ld OFFSET, FRAMESLOT(1) + STACKSIZE(SP) #endif diff --git a/kernel/power/ztrsm_kernel_ppc440_RT.S b/kernel/power/ztrsm_kernel_ppc440_RT.S index 43f4b07cb..b77dd76d1 100644 --- a/kernel/power/ztrsm_kernel_ppc440_RT.S +++ b/kernel/power/ztrsm_kernel_ppc440_RT.S @@ -61,7 +61,7 @@ #define N r4 #define K r5 -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifndef __64BIT__ #define A r6 #define B r7 @@ -177,7 +177,7 @@ stw r0, FZERO -#ifdef linux +#if defined(linux) || defined(__FreeBSD__) #ifdef __64BIT__ ld LDC, FRAMESLOT(0) + STACKSIZE(SP) #endif @@ -197,7 +197,7 @@ #endif #endif -#if defined(linux) && defined(__64BIT__) +#if (defined(linux) || defined(__FreeBSD__)) && defined(__64BIT__) ld OFFSET, FRAMESLOT(1) + STACKSIZE(SP) #endif diff --git a/kernel/riscv64/KERNEL b/kernel/riscv64/KERNEL new file mode 100644 index 000000000..68d68b5f8 --- /dev/null +++ b/kernel/riscv64/KERNEL @@ -0,0 +1,30 @@ +ifndef SCABS_KERNEL +SCABS_KERNEL = ../generic/cabs.c +endif + +ifndef DCABS_KERNEL +DCABS_KERNEL = ../generic/cabs.c +endif + +ifndef QCABS_KERNEL +QCABS_KERNEL = ../generic/cabs.c +endif + +ifndef LSAME_KERNEL +LSAME_KERNEL = ../generic/lsame.c +endif + +ifndef SGEMM_BETA +SGEMM_BETA = ../generic/gemm_beta.c +endif +ifndef DGEMM_BETA +DGEMM_BETA = ../generic/gemm_beta.c +endif +ifndef CGEMM_BETA +CGEMM_BETA = ../generic/zgemm_beta.c +endif +ifndef ZGEMM_BETA +ZGEMM_BETA = ../generic/zgemm_beta.c +endif + + diff --git a/kernel/riscv64/KERNEL.C910V b/kernel/riscv64/KERNEL.C910V new file mode 100644 index 000000000..0da66fa35 --- /dev/null +++ b/kernel/riscv64/KERNEL.C910V @@ -0,0 +1,190 @@ +SAMAXKERNEL = amax_vector.c +DAMAXKERNEL = amax_vector.c +CAMAXKERNEL = zamax_vector.c +ZAMAXKERNEL = zamax_vector.c + +SAMINKERNEL = amin_vector.c +DAMINKERNEL = amin_vector.c +CAMINKERNEL = zamin_vector.c +ZAMINKERNEL = zamin_vector.c + +SMAXKERNEL = max_vector.c +DMAXKERNEL = max_vector.c + +SMINKERNEL = min_vector.c +DMINKERNEL = min_vector.c + +ISAMAXKERNEL = iamax_vector.c +IDAMAXKERNEL = iamax_vector.c +ICAMAXKERNEL = izamax_vector.c +IZAMAXKERNEL = izamax_vector.c + +ISAMINKERNEL = iamin_vector.c +IDAMINKERNEL = iamin_vector.c +ICAMINKERNEL = izamin_vector.c +IZAMINKERNEL = izamin_vector.c + +ISMAXKERNEL = imax_vector.c +IDMAXKERNEL = imax_vector.c + +ISMINKERNEL = imin_vector.c +IDMINKERNEL = imin_vector.c + +SASUMKERNEL = asum_vector.c +DASUMKERNEL = asum_vector.c +CASUMKERNEL = zasum_vector.c +ZASUMKERNEL = zasum_vector.c + +SSUMKERNEL = ../arm/sum.c +DSUMKERNEL = ../arm/sum.c +CSUMKERNEL = ../arm/zsum.c +ZSUMKERNEL = ../arm/zsum.c + +SAXPYKERNEL = axpy_vector.c +DAXPYKERNEL = axpy_vector.c +CAXPYKERNEL = zaxpy_vector.c +ZAXPYKERNEL = zaxpy_vector.c + +SAXPBYKERNEL = axpby_vector.c +DAXPBYKERNEL = axpby_vector.c +CAXPBYKERNEL = zaxpby_vector.c +ZAXPBYKERNEL = zaxpby_vector.c + +SCOPYKERNEL = copy_vector.c +DCOPYKERNEL = copy_vector.c +CCOPYKERNEL = zcopy_vector.c +ZCOPYKERNEL = zcopy_vector.c + +SDOTKERNEL = dot_vector.c +DDOTKERNEL = dot_vector.c +CDOTKERNEL = zdot_vector.c +ZDOTKERNEL = zdot_vector.c + +SNRM2KERNEL = nrm2_vector.c +DNRM2KERNEL = nrm2_vector.c +CNRM2KERNEL = znrm2_vector.c +ZNRM2KERNEL = znrm2_vector.c + +SROTKERNEL = rot_vector.c +DROTKERNEL = rot_vector.c +CROTKERNEL = zrot_vector.c +ZROTKERNEL = zrot_vector.c + +SSCALKERNEL = scal_vector.c +DSCALKERNEL = scal_vector.c +CSCALKERNEL = zscal_vector.c +ZSCALKERNEL = zscal_vector.c + +SSWAPKERNEL = swap_vector.c +DSWAPKERNEL = swap_vector.c +CSWAPKERNEL = zswap_vector.c +ZSWAPKERNEL = zswap_vector.c + +SGEMVNKERNEL = gemv_n_vector.c +DGEMVNKERNEL = gemv_n_vector.c +CGEMVNKERNEL = zgemv_n_vector.c +ZGEMVNKERNEL = zgemv_n_vector.c + +SGEMVTKERNEL = gemv_t_vector.c +DGEMVTKERNEL = gemv_t_vector.c +CGEMVTKERNEL = zgemv_t_vector.c +ZGEMVTKERNEL = zgemv_t_vector.c + +STRMMKERNEL = ../generic/trmmkernel_16x4.c +DTRMMKERNEL = ../generic/trmmkernel_8x4.c +CTRMMKERNEL = ../generic/ztrmmkernel_2x2.c +ZTRMMKERNEL = ../generic/ztrmmkernel_2x2.c + +SGEMMKERNEL = sgemm_kernel_16x4_c910v.c +ifneq ($(SGEMM_UNROLL_M), $(SGEMM_UNROLL_N)) +SGEMMINCOPY = ../generic/gemm_ncopy_$(SGEMM_UNROLL_M).c +SGEMMITCOPY = ../generic/gemm_tcopy_$(SGEMM_UNROLL_M).c +SGEMMINCOPYOBJ = sgemm_incopy$(TSUFFIX).$(SUFFIX) +SGEMMITCOPYOBJ = sgemm_itcopy$(TSUFFIX).$(SUFFIX) +endif +SGEMMONCOPY = ../generic/gemm_ncopy_$(SGEMM_UNROLL_N).c +SGEMMOTCOPY = ../generic/gemm_tcopy_$(SGEMM_UNROLL_N).c +SGEMMONCOPYOBJ = sgemm_oncopy$(TSUFFIX).$(SUFFIX) +SGEMMOTCOPYOBJ = sgemm_otcopy$(TSUFFIX).$(SUFFIX) + +DGEMMKERNEL = dgemm_kernel_8x4_c910v.c +ifneq ($(DGEMM_UNROLL_M), $(DGEMM_UNROLL_N)) +DGEMMINCOPY = ../generic/gemm_ncopy_$(DGEMM_UNROLL_M).c +DGEMMITCOPY = ../generic/gemm_tcopy_$(DGEMM_UNROLL_M).c +DGEMMINCOPYOBJ = dgemm_incopy$(TSUFFIX).$(SUFFIX) +DGEMMITCOPYOBJ = dgemm_itcopy$(TSUFFIX).$(SUFFIX) +endif +DGEMMONCOPY = ../generic/gemm_ncopy_$(DGEMM_UNROLL_N).c +DGEMMOTCOPY = ../generic/gemm_tcopy_$(DGEMM_UNROLL_N).c +DGEMMONCOPYOBJ = dgemm_oncopy$(TSUFFIX).$(SUFFIX) +DGEMMOTCOPYOBJ = dgemm_otcopy$(TSUFFIX).$(SUFFIX) + +CGEMMKERNEL = ../generic/zgemmkernel_2x2.c +CGEMMONCOPY = ../generic/zgemm_ncopy_2.c +CGEMMOTCOPY = ../generic/zgemm_tcopy_2.c +CGEMMONCOPYOBJ = cgemm_oncopy.o +CGEMMOTCOPYOBJ = cgemm_otcopy.o + +ZGEMMKERNEL = ../generic/zgemmkernel_2x2.c +ZGEMMONCOPY = ../generic/zgemm_ncopy_2.c +ZGEMMOTCOPY = ../generic/zgemm_tcopy_2.c +ZGEMMONCOPYOBJ = zgemm_oncopy.o +ZGEMMOTCOPYOBJ = zgemm_otcopy.o + +STRSMKERNEL_LN = ../generic/trsm_kernel_LN.c +STRSMKERNEL_LT = ../generic/trsm_kernel_LT.c +STRSMKERNEL_RN = ../generic/trsm_kernel_RN.c +STRSMKERNEL_RT = ../generic/trsm_kernel_RT.c + +DTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c +DTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c +DTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c +DTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c + +CTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c +CTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c +CTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c +CTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c + +ZTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c +ZTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c +ZTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c +ZTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c + +SSYMV_U_KERNEL = symv_U_vector.c +SSYMV_L_KERNEL = symv_L_vector.c +DSYMV_U_KERNEL = symv_U_vector.c +DSYMV_L_KERNEL = symv_L_vector.c +CSYMV_U_KERNEL = ../generic/zsymv_k.c +CSYMV_L_KERNEL = ../generic/zsymv_k.c +ZSYMV_U_KERNEL = ../generic/zsymv_k.c +ZSYMV_L_KERNEL = ../generic/zsymv_k.c + +CHEMV_L_KERNEL = zhemv_LM_vector.c +CHEMV_M_KERNEL = zhemv_LM_vector.c +CHEMV_U_KERNEL = zhemv_UV_vector.c +CHEMV_V_KERNEL = zhemv_UV_vector.c +ZHEMV_L_KERNEL = zhemv_LM_vector.c +ZHEMV_M_KERNEL = zhemv_LM_vector.c +ZHEMV_U_KERNEL = zhemv_UV_vector.c +ZHEMV_V_KERNEL = zhemv_UV_vector.c + + +LSAME_KERNEL = ../generic/lsame.c + +SCABS_KERNEL = ../generic/cabs.c +DCABS_KERNEL = ../generic/cabs.c +QCABS_KERNEL = ../generic/cabs.c + +ifndef SGEMM_BETA +SGEMM_BETA = ../generic/gemm_beta.c +endif +ifndef DGEMM_BETA +DGEMM_BETA = ../generic/gemm_beta.c +endif +ifndef CGEMM_BETA +CGEMM_BETA = ../generic/zgemm_beta.c +endif +ifndef ZGEMM_BETA +ZGEMM_BETA = ../generic/zgemm_beta.c +endif diff --git a/kernel/riscv64/KERNEL.RISCV64_GENERIC b/kernel/riscv64/KERNEL.RISCV64_GENERIC new file mode 100644 index 000000000..ea6a8cf21 --- /dev/null +++ b/kernel/riscv64/KERNEL.RISCV64_GENERIC @@ -0,0 +1,164 @@ +SAMAXKERNEL = ../riscv64/amax.c +DAMAXKERNEL = ../riscv64/amax.c +CAMAXKERNEL = ../riscv64/zamax.c +ZAMAXKERNEL = ../riscv64/zamax.c + +SAMINKERNEL = ../riscv64/amin.c +DAMINKERNEL = ../riscv64/amin.c +CAMINKERNEL = ../riscv64/zamin.c +ZAMINKERNEL = ../riscv64/zamin.c + +SMAXKERNEL = ../riscv64/max.c +DMAXKERNEL = ../riscv64/max.c + +SMINKERNEL = ../riscv64/min.c +DMINKERNEL = ../riscv64/min.c + +ISAMAXKERNEL = ../riscv64/iamax.c +IDAMAXKERNEL = ../riscv64/iamax.c +ICAMAXKERNEL = ../riscv64/izamax.c +IZAMAXKERNEL = ../riscv64/izamax.c + +ISAMINKERNEL = ../riscv64/iamin.c +IDAMINKERNEL = ../riscv64/iamin.c +ICAMINKERNEL = ../riscv64/izamin.c +IZAMINKERNEL = ../riscv64/izamin.c + +ISMAXKERNEL = ../riscv64/imax.c +IDMAXKERNEL = ../riscv64/imax.c + +ISMINKERNEL = ../riscv64/imin.c +IDMINKERNEL = ../riscv64/imin.c + +SASUMKERNEL = ../riscv64/asum.c +DASUMKERNEL = ../riscv64/asum.c +CASUMKERNEL = ../riscv64/zasum.c +ZASUMKERNEL = ../riscv64/zasum.c + +SSUMKERNEL = ../arm/sum.c +DSUMKERNEL = ../arm/sum.c +CSUMKERNEL = ../arm/zsum.c +ZSUMKERNEL = ../arm/zsum.c + +SAXPYKERNEL = ../riscv64/axpy.c +DAXPYKERNEL = ../riscv64/axpy.c +CAXPYKERNEL = ../riscv64/zaxpy.c +ZAXPYKERNEL = ../riscv64/zaxpy.c + +SCOPYKERNEL = ../riscv64/copy.c +DCOPYKERNEL = ../riscv64/copy.c +CCOPYKERNEL = ../riscv64/zcopy.c +ZCOPYKERNEL = ../riscv64/zcopy.c + +SDOTKERNEL = ../riscv64/dot.c +DDOTKERNEL = ../riscv64/dot.c +CDOTKERNEL = ../riscv64/zdot.c +ZDOTKERNEL = ../riscv64/zdot.c + +SNRM2KERNEL = ../riscv64/nrm2.c +DNRM2KERNEL = ../riscv64/nrm2.c +CNRM2KERNEL = ../riscv64/znrm2.c +ZNRM2KERNEL = ../riscv64/znrm2.c + +SROTKERNEL = ../riscv64/rot.c +DROTKERNEL = ../riscv64/rot.c +CROTKERNEL = ../riscv64/zrot.c +ZROTKERNEL = ../riscv64/zrot.c + +SSCALKERNEL = ../riscv64/scal.c +DSCALKERNEL = ../riscv64/scal.c +CSCALKERNEL = ../riscv64/zscal.c +ZSCALKERNEL = ../riscv64/zscal.c + +SSWAPKERNEL = ../riscv64/swap.c +DSWAPKERNEL = ../riscv64/swap.c +CSWAPKERNEL = ../riscv64/zswap.c +ZSWAPKERNEL = ../riscv64/zswap.c + +SGEMVNKERNEL = ../riscv64/gemv_n.c +DGEMVNKERNEL = ../riscv64/gemv_n.c +CGEMVNKERNEL = ../riscv64/zgemv_n.c +ZGEMVNKERNEL = ../riscv64/zgemv_n.c + +SGEMVTKERNEL = ../riscv64/gemv_t.c +DGEMVTKERNEL = ../riscv64/gemv_t.c +CGEMVTKERNEL = ../riscv64/zgemv_t.c +ZGEMVTKERNEL = ../riscv64/zgemv_t.c + +STRMMKERNEL = ../generic/trmmkernel_2x2.c +DTRMMKERNEL = ../generic/trmmkernel_2x2.c +CTRMMKERNEL = ../generic/ztrmmkernel_2x2.c +ZTRMMKERNEL = ../generic/ztrmmkernel_2x2.c + +SGEMMKERNEL = ../generic/gemmkernel_2x2.c +SGEMMONCOPY = ../generic/gemm_ncopy_2.c +SGEMMOTCOPY = ../generic/gemm_tcopy_2.c +SGEMMONCOPYOBJ = sgemm_oncopy.o +SGEMMOTCOPYOBJ = sgemm_otcopy.o + +DGEMMKERNEL = ../generic/gemmkernel_2x2.c +DGEMMONCOPY = ../generic/gemm_ncopy_2.c +DGEMMOTCOPY = ../generic/gemm_tcopy_2.c +DGEMMONCOPYOBJ = dgemm_oncopy.o +DGEMMOTCOPYOBJ = dgemm_otcopy.o + +CGEMMKERNEL = ../generic/zgemmkernel_2x2.c +CGEMMONCOPY = ../generic/zgemm_ncopy_2.c +CGEMMOTCOPY = ../generic/zgemm_tcopy_2.c +CGEMMONCOPYOBJ = cgemm_oncopy.o +CGEMMOTCOPYOBJ = cgemm_otcopy.o + +ZGEMMKERNEL = ../generic/zgemmkernel_2x2.c +ZGEMMONCOPY = ../generic/zgemm_ncopy_2.c +ZGEMMOTCOPY = ../generic/zgemm_tcopy_2.c +ZGEMMONCOPYOBJ = zgemm_oncopy.o +ZGEMMOTCOPYOBJ = zgemm_otcopy.o + +STRSMKERNEL_LN = ../generic/trsm_kernel_LN.c +STRSMKERNEL_LT = ../generic/trsm_kernel_LT.c +STRSMKERNEL_RN = ../generic/trsm_kernel_RN.c +STRSMKERNEL_RT = ../generic/trsm_kernel_RT.c + +DTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c +DTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c +DTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c +DTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c + +CTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c +CTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c +CTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c +CTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c + +ZTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c +ZTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c +ZTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c +ZTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c + +SSYMV_U_KERNEL = ../generic/symv_k.c +SSYMV_L_KERNEL = ../generic/symv_k.c +DSYMV_U_KERNEL = ../generic/symv_k.c +DSYMV_L_KERNEL = ../generic/symv_k.c +CSYMV_U_KERNEL = ../generic/zsymv_k.c +CSYMV_L_KERNEL = ../generic/zsymv_k.c +ZSYMV_U_KERNEL = ../generic/zsymv_k.c +ZSYMV_L_KERNEL = ../generic/zsymv_k.c + + +LSAME_KERNEL = ../generic/lsame.c + +SCABS_KERNEL = ../generic/cabs.c +DCABS_KERNEL = ../generic/cabs.c +QCABS_KERNEL = ../generic/cabs.c + +ifndef SGEMM_BETA +SGEMM_BETA = ../generic/gemm_beta.c +endif +ifndef DGEMM_BETA +DGEMM_BETA = ../generic/gemm_beta.c +endif +ifndef CGEMM_BETA +CGEMM_BETA = ../generic/zgemm_beta.c +endif +ifndef ZGEMM_BETA +ZGEMM_BETA = ../generic/zgemm_beta.c +endif diff --git a/kernel/riscv64/amax.c b/kernel/riscv64/amax.c new file mode 100644 index 000000000..792e68bd9 --- /dev/null +++ b/kernel/riscv64/amax.c @@ -0,0 +1,75 @@ +/*************************************************************************** +Copyright (c) 2013, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +/************************************************************************************** +* 2013/09/14 Saar +* BLASTEST float : OK +* BLASTEST double : OK +* CTEST : NoTest +* TEST : NoTest +* +**************************************************************************************/ + +#include "common.h" +#include + +#if defined(DOUBLE) + +#define ABS fabs + +#else + +#define ABS fabsf + +#endif + + +FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) +{ + BLASLONG i=0; + BLASLONG ix=0; + FLOAT maxf=0.0; + + if (n <= 0 || inc_x <= 0) return(maxf); + + maxf=ABS(x[0]); + ix += inc_x; + i++; + + while(i < n) + { + if( ABS(x[ix]) > maxf ) + { + maxf = ABS(x[ix]); + } + ix += inc_x; + i++; + } + return(maxf); +} + + diff --git a/kernel/riscv64/amax_vector.c b/kernel/riscv64/amax_vector.c new file mode 100644 index 000000000..b6aec131e --- /dev/null +++ b/kernel/riscv64/amax_vector.c @@ -0,0 +1,245 @@ +/*************************************************************************** +Copyright (c) 2020, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "common.h" +#include + +#if !defined(DOUBLE) +#define RVV_EFLOAT RVV_E32 +#define RVV_M RVV_M8 +#define FLOAT_V_T float32xm8_t +#define VLEV_FLOAT vlev_float32xm8 +#define VLSEV_FLOAT vlsev_float32xm8 +#define VFREDMAXVS_FLOAT vfredmaxvs_float32xm8 +#define MASK_T e32xm8_t +#define VMFLTVF_FLOAT vmfltvf_e32xm8_float32xm8 +#define VFMVVF_FLOAT vfmvvf_float32xm8 +#define VFRSUBVF_MASK_FLOAT vfrsubvf_mask_float32xm8 +#define VFMAXVV_FLOAT vfmaxvv_float32xm8 +#else +#define RVV_EFLOAT RVV_E64 +#define RVV_M RVV_M8 +#define FLOAT_V_T float64xm8_t +#define VLEV_FLOAT vlev_float64xm8 +#define VLSEV_FLOAT vlsev_float64xm8 +#define VFREDMAXVS_FLOAT vfredmaxvs_float64xm8 +#define MASK_T e64xm8_t +#define VMFLTVF_FLOAT vmfltvf_e64xm8_float64xm8 +#define VFMVVF_FLOAT vfmvvf_float64xm8 +#define VFRSUBVF_MASK_FLOAT vfrsubvf_mask_float64xm8 +#define VFMAXVV_FLOAT vfmaxvv_float64xm8 +#endif + +FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) +{ + BLASLONG i=0, j=0; + BLASLONG ix=0; + FLOAT maxf=0.0; + if (n <= 0 || inc_x <= 0) return(maxf); + unsigned int gvl = 0; + FLOAT_V_T v0, v1, v_max; + + MASK_T mask0, mask1; + FLOAT zero = 0.0; + if(inc_x == 1){ + gvl = vsetvli(n, RVV_EFLOAT, RVV_M); + if(gvl <= n/2){ + v_max = VFMVVF_FLOAT(0, gvl); + for(i=0,j=0; i maxf) + maxf = v0[0]; + j += gvl; + } + }else{ + gvl = vsetvli(n, RVV_EFLOAT, RVV_M); + BLASLONG stride_x = inc_x * sizeof(FLOAT); + if(gvl <= n/2){ + BLASLONG inc_xv = inc_x * gvl; + v_max = VFMVVF_FLOAT(0, gvl); + for(i=0,j=0; i maxf) + maxf = v0[0]; + j += gvl; + } + } + return(maxf); +} + + diff --git a/kernel/riscv64/amin.c b/kernel/riscv64/amin.c new file mode 100644 index 000000000..78495a8e3 --- /dev/null +++ b/kernel/riscv64/amin.c @@ -0,0 +1,75 @@ +/*************************************************************************** +Copyright (c) 2013, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +/************************************************************************************** +* 2013/09/14 Saar +* BLASTEST float : OK +* BLASTEST double : OK +* CTEST : NoTest +* TEST : NoTest +* +**************************************************************************************/ + +#include "common.h" +#include + +#if defined(DOUBLE) + +#define ABS fabs + +#else + +#define ABS fabsf + +#endif + + +FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) +{ + BLASLONG i=0; + BLASLONG ix=0; + FLOAT minf=0.0; + + if (n <= 0 || inc_x <= 0) return(minf); + + minf=ABS(x[0]); + ix += inc_x; + i++; + + while(i < n) + { + if( ABS(x[ix]) < minf ) + { + minf = ABS(x[ix]); + } + ix += inc_x; + i++; + } + return(minf); +} + + diff --git a/kernel/riscv64/amin_vector.c b/kernel/riscv64/amin_vector.c new file mode 100644 index 000000000..53243ad56 --- /dev/null +++ b/kernel/riscv64/amin_vector.c @@ -0,0 +1,241 @@ +/*************************************************************************** +Copyright (c) 2020, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "common.h" +#include +#include + +#if !defined(DOUBLE) +#define RVV_EFLOAT RVV_E32 +#define RVV_M RVV_M8 +#define FLOAT_V_T float32xm8_t +#define VLEV_FLOAT vlev_float32xm8 +#define VLSEV_FLOAT vlsev_float32xm8 +#define VFREDMINVS_FLOAT vfredminvs_float32xm8 +#define MASK_T e32xm8_t +#define VMFLTVF_FLOAT vmfltvf_e32xm8_float32xm8 +#define VFMVVF_FLOAT vfmvvf_float32xm8 +#define VFRSUBVF_MASK_FLOAT vfrsubvf_mask_float32xm8 +#define VFMINVV_FLOAT vfminvv_float32xm8 +#else +#define RVV_EFLOAT RVV_E64 +#define RVV_M RVV_M8 +#define FLOAT_V_T float64xm8_t +#define VLEV_FLOAT vlev_float64xm8 +#define VLSEV_FLOAT vlsev_float64xm8 +#define VFREDMINVS_FLOAT vfredminvs_float64xm8 +#define MASK_T e64xm8_t +#define VMFLTVF_FLOAT vmfltvf_e64xm8_float64xm8 +#define VFMVVF_FLOAT vfmvvf_float64xm8 +#define VFRSUBVF_MASK_FLOAT vfrsubvf_mask_float64xm8 +#define VFMINVV_FLOAT vfminvv_float64xm8 +#endif + +FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) +{ + BLASLONG i=0, j=0; + if (n <= 0 || inc_x <= 0) return(0.0); + FLOAT minf=FLT_MAX; + unsigned int gvl = 0; + FLOAT_V_T v0, v1, v_min; + + MASK_T mask0, mask1; + FLOAT zero = 0.0; + if(inc_x == 1){ + gvl = vsetvli(n, RVV_EFLOAT, RVV_M); + if(gvl <= n/2){ + v_min = VFMVVF_FLOAT(FLT_MAX, gvl); + for(i=0,j=0; i + +#if defined(DOUBLE) + +#define ABS fabs + +#else + +#define ABS fabsf + +#endif + + +FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) +{ + BLASLONG i=0; + FLOAT sumf = 0.0; + if (n <= 0 || inc_x <= 0) return(sumf); + + n *= inc_x; + while(i < n) + { + sumf += ABS(x[i]); + i += inc_x; + } + return(sumf); +} + + diff --git a/kernel/riscv64/asum_vector.c b/kernel/riscv64/asum_vector.c new file mode 100644 index 000000000..7ab7484e8 --- /dev/null +++ b/kernel/riscv64/asum_vector.c @@ -0,0 +1,131 @@ +/*************************************************************************** +Copyright (c) 2020, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "common.h" +#include + +#if !defined(DOUBLE) +#define RVV_EFLOAT RVV_E32 +#define RVV_M RVV_M8 +#define FLOAT_V_T float32xm8_t +#define VLEV_FLOAT vlev_float32xm8 +#define VLSEV_FLOAT vlsev_float32xm8 +#define VFREDSUMVS_FLOAT vfredsumvs_float32xm8 +#define MASK_T e32xm8_t +#define VMFLTVF_FLOAT vmfltvf_e32xm8_float32xm8 +#define VFMVVF_FLOAT vfmvvf_float32xm8 +#define VFRSUBVF_MASK_FLOAT vfrsubvf_mask_float32xm8 +#define VFADDVV_FLOAT vfaddvv_float32xm8 +#else +#define RVV_EFLOAT RVV_E64 +#define RVV_M RVV_M8 +#define FLOAT_V_T float64xm8_t +#define VLEV_FLOAT vlev_float64xm8 +#define VLSEV_FLOAT vlsev_float64xm8 +#define VFREDSUMVS_FLOAT vfredsumvs_float64xm8 +#define MASK_T e64xm8_t +#define VMFLTVF_FLOAT vmfltvf_e64xm8_float64xm8 +#define VFMVVF_FLOAT vfmvvf_float64xm8 +#define VFRSUBVF_MASK_FLOAT vfrsubvf_mask_float64xm8 +#define VFADDVV_FLOAT vfaddvv_float64xm8 +#endif +FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) +{ + BLASLONG i=0, j=0; + BLASLONG ix=0; + FLOAT asumf=0.0; + if (n <= 0 || inc_x <= 0) return(asumf); + unsigned int gvl = 0; + FLOAT_V_T v0, v1, v_zero,v_sum; + + MASK_T mask0, mask1; + if(inc_x == 1){ + gvl = vsetvli(n, RVV_EFLOAT, RVV_M); + v_zero = VFMVVF_FLOAT(0, gvl); + if(gvl <= n/2){ + v_sum = VFMVVF_FLOAT(0, gvl); + for(i=0,j=0; i + +#define KERNEL8x4_I \ + "addi t1, %[PB], 1*8 \n\t"\ + "addi t2, %[PB], 2*8 \n\t"\ + "addi t3, %[PB], 3*8 \n\t"\ + "fld ft0, (%[PB]) \n\t"\ + "fld ft1, (t1) \n\t"\ + "fld ft2, (t2) \n\t"\ + "fld ft3, (t3) \n\t"\ + "vle.v v0, (%[PA]) \n\t"\ + "addi t4, %[PA], 2*8 \n\t"\ + "addi t5, %[PA], 4*8 \n\t"\ + "vfmv.v.f v8, ft0 \n\t"\ + "addi t6, %[PA], 6*8 \n\t"\ + "addi %[PA], %[PA], 8*8 \n\t"\ + "vle.v v1, (t4) \n\t"\ + "addi t4, t4, 8*8 \n\t"\ + "vfmv.v.f v9, ft1 \n\t"\ + "vle.v v2, (t5) \n\t"\ + "addi t5, t5, 8*8 \n\t"\ + "vle.v v3, (t6) \n\t"\ + "addi t6, t6, 8*8 \n\t"\ + "vfmv.v.f v10, ft2 \n\t"\ + "addi %[PB], %[PB], 4*8 \n\t"\ + "vle.v v4, (%[PA]) \n\t"\ + "addi %[PA], %[PA], 8*8 \n\t"\ + "vfmv.v.f v11, ft3 \n\t"\ + "vfmacc.vv v16, v8, v0 \n\t"\ + "addi t1, t1, 4*8 \n\t"\ + "vle.v v5, (t4) \n\t"\ + "addi t4, t4, 8*8 \n\t"\ + "vfmacc.vv v17, v8, v1 \n\t"\ + "addi t2, t2, 4*8 \n\t"\ + "vle.v v6, (t5) \n\t"\ + "addi t5, t5, 8*8 \n\t"\ + "vfmacc.vv v18, v8, v2 \n\t"\ + "addi t3, t3, 4*8 \n\t"\ + "vle.v v7, (t6) \n\t"\ + "addi t6, t6, 8*8 \n\t"\ + "vfmacc.vv v19, v8, v3 \n\t"\ + "fld ft4, (%[PB]) \n\t"\ + "vfmacc.vv v20, v9, v0 \n\t"\ + "fld ft5, (t1) \n\t"\ + "vfmacc.vv v21, v9, v1 \n\t"\ + "fld ft6, (t2) \n\t"\ + "vfmacc.vv v22, v9, v2 \n\t"\ + "fld ft7, (t3) \n\t"\ + "vfmacc.vv v23, v9, v3 \n\t"\ + "vfmv.v.f v12, ft4 \n\t"\ + "vfmacc.vv v24, v10, v0 \n\t"\ + "vfmv.v.f v13, ft5 \n\t"\ + "vfmacc.vv v25, v10, v1 \n\t"\ + "vfmv.v.f v14, ft6 \n\t"\ + "vfmacc.vv v26, v10, v2 \n\t"\ + "vfmv.v.f v15, ft7 \n\t"\ + "vfmacc.vv v27, v10, v3 \n\t"\ + "addi %[PB], %[PB], 4*8 \n\t"\ + "vfmacc.vv v28, v11, v0 \n\t"\ + "addi t1, t1, 4*8 \n\t"\ + "vfmacc.vv v29, v11, v1 \n\t"\ + "addi t2, t2, 4*8 \n\t"\ + "vfmacc.vv v30, v11, v2 \n\t"\ + "addi t3, t3, 4*8 \n\t"\ + "vfmacc.vv v31, v11, v3 \n\t" + +#define KERNEL8x4_M1 \ + "vfmacc.vv v16, v8, v0 \n\t"\ + "vle.v v4, (%[PA]) \n\t"\ + "addi %[PA], %[PA], 8*8 \n\t"\ + "vfmacc.vv v17, v8, v1 \n\t"\ + "vle.v v5, (t4) \n\t"\ + "addi t4, t4, 8*8 \n\t"\ + "vfmacc.vv v18, v8, v2 \n\t"\ + "vle.v v6, (t5) \n\t"\ + "addi t5, t5, 8*8 \n\t"\ + "vfmacc.vv v19, v8, v3 \n\t"\ + "vle.v v7, (t6) \n\t"\ + "addi t6, t6, 8*8 \n\t"\ + "vfmacc.vv v20, v9, v0 \n\t"\ + "fld ft4, (%[PB]) \n\t"\ + "vfmacc.vv v21, v9, v1 \n\t"\ + "fld ft5, (t1) \n\t"\ + "vfmacc.vv v22, v9, v2 \n\t"\ + "fld ft6, (t2) \n\t"\ + "vfmacc.vv v23, v9, v3 \n\t"\ + "fld ft7, (t3) \n\t"\ + "addi %[PB], %[PB], 4*8 \n\t"\ + "vfmacc.vv v24, v10, v0 \n\t"\ + "addi t1, t1, 4*8 \n\t"\ + "vfmacc.vv v25, v10, v1 \n\t"\ + "vfmv.v.f v12, ft4 \n\t"\ + "vfmacc.vv v26, v10, v2 \n\t"\ + "addi t2, t2, 4*8 \n\t"\ + "vfmacc.vv v27, v10, v3 \n\t"\ + "vfmv.v.f v13, ft5 \n\t"\ + "vfmacc.vv v28, v11, v0 \n\t"\ + "addi t3, t3, 4*8 \n\t"\ + "vfmacc.vv v29, v11, v1 \n\t"\ + "vfmv.v.f v14, ft6 \n\t"\ + "vfmacc.vv v30, v11, v2 \n\t"\ + "vfmacc.vv v31, v11, v3 \n\t"\ + "vfmv.v.f v15, ft7 \n\t" + +#define KERNEL8x4_M2 \ + "vfmacc.vv v16, v12, v4 \n\t"\ + "vle.v v0, (%[PA]) \n\t"\ + "addi %[PA], %[PA], 8*8 \n\t"\ + "vfmacc.vv v17, v12, v5 \n\t"\ + "vle.v v1, (t4) \n\t"\ + "addi t4, t4, 8*8 \n\t"\ + "vfmacc.vv v18, v12, v6 \n\t"\ + "vle.v v2, (t5) \n\t"\ + "addi t5, t5, 8*8 \n\t"\ + "vfmacc.vv v19, v12, v7 \n\t"\ + "vle.v v3, (t6) \n\t"\ + "addi t6, t6, 8*8 \n\t"\ + "vfmacc.vv v20, v13, v4 \n\t"\ + "fld ft0, (%[PB]) \n\t"\ + "vfmacc.vv v21, v13, v5 \n\t"\ + "fld ft1, (t1) \n\t"\ + "vfmacc.vv v22, v13, v6 \n\t"\ + "fld ft2, (t2) \n\t"\ + "vfmacc.vv v23, v13, v7 \n\t"\ + "fld ft3, (t3) \n\t"\ + "addi %[PB], %[PB], 4*8 \n\t"\ + "vfmacc.vv v24, v14, v4 \n\t"\ + "addi t1, t1, 4*8 \n\t"\ + "vfmacc.vv v25, v14, v5 \n\t"\ + "vfmv.v.f v8, ft0 \n\t"\ + "vfmacc.vv v26, v14, v6 \n\t"\ + "addi t2, t2, 4*8 \n\t"\ + "vfmacc.vv v27, v14, v7 \n\t"\ + "vfmv.v.f v9, ft1 \n\t"\ + "vfmacc.vv v28, v15, v4 \n\t"\ + "addi t3, t3, 4*8 \n\t"\ + "vfmacc.vv v29, v15, v5 \n\t"\ + "vfmv.v.f v10, ft2 \n\t"\ + "vfmacc.vv v30, v15, v6 \n\t"\ + "vfmacc.vv v31, v15, v7 \n\t"\ + "vfmv.v.f v11, ft3 \n\t" + +#define KERNEL8x4_E \ + "vfmacc.vv v16, v12, v4 \n\t"\ + "vfmacc.vv v17, v12, v5 \n\t"\ + "vfmacc.vv v18, v12, v6 \n\t"\ + "vfmacc.vv v19, v12, v7 \n\t"\ + "vfmacc.vv v20, v13, v4 \n\t"\ + "vfmacc.vv v21, v13, v5 \n\t"\ + "vfmacc.vv v22, v13, v6 \n\t"\ + "vfmacc.vv v23, v13, v7 \n\t"\ + "vfmacc.vv v24, v14, v4 \n\t"\ + "vfmacc.vv v25, v14, v5 \n\t"\ + "vfmacc.vv v26, v14, v6 \n\t"\ + "vfmacc.vv v27, v14, v7 \n\t"\ + "vfmacc.vv v28, v15, v4 \n\t"\ + "vfmacc.vv v29, v15, v5 \n\t"\ + "vfmacc.vv v30, v15, v6 \n\t"\ + "vfmacc.vv v31, v15, v7 \n\t" + + + + +int CNAME(BLASLONG bm,BLASLONG bn,BLASLONG bk,FLOAT alpha,FLOAT* ba,FLOAT* bb,FLOAT* C,BLASLONG ldc +#ifdef TRMMKERNEL + ,BLASLONG offset +#endif + ) +{ + BLASLONG i,j,k; + FLOAT *C0,*C1,*C2,*C3; + FLOAT *ptrba,*ptrbb; + + FLOAT loadb0,loadb1,loadb2,loadb3; + FLOAT load0,load1,load2,load3,load4,load5,load6,load7; + + FLOAT res0,res1,res2,res3; + FLOAT res4,res5,res6,res7; + FLOAT res8,res9,res10,res11; + FLOAT res12,res13,res14,res15; + + for (j=0; j 0){ + vx = VFMVVF_FLOAT(0, gvl); + vx = VFREDSUM_FLOAT(vr, vx, gvl); + dot += vx[0]; + } + //tail + if(j < n){ + gvl = vsetvli(n-j, RVV_EFLOAT, RVV_M); + vx = VLEV_FLOAT(&x[j], gvl); + vy = VLEV_FLOAT(&y[j], gvl); + FLOAT_V_T vz = VFMVVF_FLOAT(0, gvl); + //vr = VFDOTVV_FLOAT(vx, vy, gvl); + vr = VFMACCVV_FLOAT(vz, vx, vy, gvl); + vx = VFREDSUM_FLOAT(vr, vz, gvl); + dot += vx[0]; + } + }else if(inc_y == 1){ + gvl = vsetvli(n, RVV_EFLOAT, RVV_M); + vr = VFMVVF_FLOAT(0, gvl); + unsigned int stride_x = inc_x * sizeof(FLOAT); + for(i=0,j=0; i 0){ + vx = VFMVVF_FLOAT(0, gvl); + vx = VFREDSUM_FLOAT(vr, vx, gvl); + dot += vx[0]; + } + //tail + if(j < n){ + gvl = vsetvli(n-j, RVV_EFLOAT, RVV_M); + vx = VLSEV_FLOAT(&x[j*inc_x], stride_x, gvl); + vy = VLEV_FLOAT(&y[j], gvl); + FLOAT_V_T vz = VFMVVF_FLOAT(0, gvl); + //vr = VFDOTVV_FLOAT(vx, vy, gvl); + vr = VFMACCVV_FLOAT(vz, vx, vy, gvl); + vx = VFREDSUM_FLOAT(vr, vz, gvl); + dot += vx[0]; + } + }else if(inc_x == 1){ + gvl = vsetvli(n, RVV_EFLOAT, RVV_M); + vr = VFMVVF_FLOAT(0, gvl); + unsigned int stride_y = inc_y * sizeof(FLOAT); + for(i=0,j=0; i 0){ + vx = VFMVVF_FLOAT(0, gvl); + vx = VFREDSUM_FLOAT(vr, vx, gvl); + dot += vx[0]; + } + //tail + if(j < n){ + gvl = vsetvli(n-j, RVV_EFLOAT, RVV_M); + vx = VLEV_FLOAT(&x[j], gvl); + vy = VLSEV_FLOAT(&y[j*inc_y], stride_y, gvl); + FLOAT_V_T vz = VFMVVF_FLOAT(0, gvl); + //vr = VFDOTVV_FLOAT(vx, vy, gvl); + vr = VFMACCVV_FLOAT(vz, vx, vy, gvl); + vx = VFREDSUM_FLOAT(vr, vz, gvl); + dot += vx[0]; + } + }else{ + gvl = vsetvli(n, RVV_EFLOAT, RVV_M); + vr = VFMVVF_FLOAT(0, gvl); + unsigned int stride_x = inc_x * sizeof(FLOAT); + unsigned int stride_y = inc_y * sizeof(FLOAT); + for(i=0,j=0; i 0){ + vx = VFMVVF_FLOAT(0, gvl); + vx = VFREDSUM_FLOAT(vr, vx, gvl); + dot += vx[0]; + } + //tail + if(j < n){ + gvl = vsetvli(n-j, RVV_EFLOAT, RVV_M); + vx = VLSEV_FLOAT(&x[j*inc_x], stride_x, gvl); + vy = VLSEV_FLOAT(&y[j*inc_y], stride_y, gvl); + FLOAT_V_T vz = VFMVVF_FLOAT(0, gvl); + //vr = VFDOTVV_FLOAT(vx, vy, gvl); + vr = VFMACCVV_FLOAT(vz, vx, vy, gvl); + vx = VFREDSUM_FLOAT(vr, vz, gvl); + dot += vx[0]; + } + } + return(dot); +} + + diff --git a/kernel/riscv64/gemv_n.c b/kernel/riscv64/gemv_n.c new file mode 100644 index 000000000..ef61b245b --- /dev/null +++ b/kernel/riscv64/gemv_n.c @@ -0,0 +1,67 @@ +/*************************************************************************** +Copyright (c) 2013, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + + +/************************************************************************************** + * * 2013/09/14 Saar + * * BLASTEST float : OK + * * BLASTEST double : OK + * CTEST : OK + * TEST : OK + * * + * **************************************************************************************/ + + +#include "common.h" + +int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, FLOAT *a, BLASLONG lda, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT *buffer) +{ + BLASLONG i; + BLASLONG ix,iy; + BLASLONG j; + FLOAT *a_ptr; + FLOAT temp; + + ix = 0; + a_ptr = a; + + for (j=0; j + +#if defined(DOUBLE) + +#define ABS fabs + +#else + +#define ABS fabsf + +#endif + + +BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) +{ + BLASLONG i=0; + BLASLONG ix=0; + FLOAT maxf=0.0; + BLASLONG max=0; + + if (n <= 0 || inc_x <= 0) return(max); + + maxf=ABS(x[0]); + ix += inc_x; + i++; + + while(i < n) + { + if( ABS(x[ix]) > maxf ) + { + max = i; + maxf = ABS(x[ix]); + } + ix += inc_x; + i++; + } + return(max+1); +} + + diff --git a/kernel/riscv64/iamax_vector.c b/kernel/riscv64/iamax_vector.c new file mode 100644 index 000000000..3aa64afc9 --- /dev/null +++ b/kernel/riscv64/iamax_vector.c @@ -0,0 +1,191 @@ +/*************************************************************************** +Copyright (c) 2020, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "common.h" +#include + +#if defined(DOUBLE) + +#define ABS fabs +#define RVV_EFLOAT RVV_E64 +#define RVV_M RVV_M8 +#define FLOAT_V_T float64xm8_t +#define VLEV_FLOAT vlev_float64xm8 +#define VLSEV_FLOAT vlsev_float64xm8 +#define VFREDMAXVS_FLOAT vfredmaxvs_float64xm8 +#define MASK_T e64xm8_t +#define VMFLTVF_FLOAT vmfltvf_e64xm8_float64xm8 +#define VMFLTVV_FLOAT vmfltvv_e64xm8_float64xm8 +#define VFMVVF_FLOAT vfmvvf_float64xm8 +#define VFRSUBVF_MASK_FLOAT vfrsubvf_mask_float64xm8 +#define VFMAXVV_FLOAT vfmaxvv_float64xm8 +#define VMFGEVF_FLOAT vmfgevf_e64xm8_float64xm8 +#define VMFIRSTM vmfirstm_e64xm8 +#define UINT_V_T uint64xm8_t +#define VIDV_MASK_UINT vidv_mask_uint64xm8 +#define VIDV_UINT vidv_uint64xm8 +#define VADDVX_MASK_UINT vaddvx_mask_uint64xm8 +#define VADDVX_UINT vaddvx_uint64xm8 +#define VMVVX_UINT vmvvx_uint64xm8 +#else + +#define ABS fabsf +#define RVV_EFLOAT RVV_E32 +#define RVV_M RVV_M8 +#define FLOAT_V_T float32xm8_t +#define VLEV_FLOAT vlev_float32xm8 +#define VLSEV_FLOAT vlsev_float32xm8 +#define VFREDMAXVS_FLOAT vfredmaxvs_float32xm8 +#define MASK_T e32xm8_t +#define VMFLTVF_FLOAT vmfltvf_e32xm8_float32xm8 +#define VMFLTVV_FLOAT vmfltvv_e32xm8_float32xm8 +#define VFMVVF_FLOAT vfmvvf_float32xm8 +#define VFRSUBVF_MASK_FLOAT vfrsubvf_mask_float32xm8 +#define VFMAXVV_FLOAT vfmaxvv_float32xm8 +#define VMFGEVF_FLOAT vmfgevf_e32xm8_float32xm8 +#define VMFIRSTM vmfirstm_e32xm8 +#define UINT_V_T uint32xm8_t +#define VIDV_MASK_UINT vidv_mask_uint32xm8 +#define VIDV_UINT vidv_uint32xm8 +#define VADDVX_MASK_UINT vaddvx_mask_uint32xm8 +#define VADDVX_UINT vaddvx_uint32xm8 +#define VMVVX_UINT vmvvx_uint32xm8 +#endif + + +BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) +{ + BLASLONG i=0, j=0; + FLOAT maxf=0.0; + unsigned int max_index = 0; + if (n <= 0 || inc_x <= 0) return(max_index); + + FLOAT_V_T vx, v_max; + UINT_V_T v_max_index; + MASK_T mask; + unsigned int gvl = 0; + if(inc_x == 1){ + gvl = vsetvli(n, RVV_EFLOAT, RVV_M); + v_max_index = VMVVX_UINT(0, gvl); + v_max = VFMVVF_FLOAT(-1, gvl); + for(i=0,j=0; i < n/gvl; i++){ + vx = VLEV_FLOAT(&x[j], gvl); + //fabs(vector) + mask = VMFLTVF_FLOAT(vx, 0, gvl); + vx = VFRSUBVF_MASK_FLOAT(vx, vx, 0, mask, gvl); + + //index where element greater than v_max + mask = VMFLTVV_FLOAT(v_max, vx, gvl); + v_max_index = VIDV_MASK_UINT(v_max_index, mask, gvl); + v_max_index = VADDVX_MASK_UINT(v_max_index, v_max_index, j, mask, gvl); + + //update v_max and start_index j + v_max = VFMAXVV_FLOAT(v_max, vx, gvl); + j += gvl; + } + vx = VFMVVF_FLOAT(0, gvl); + vx = VFREDMAXVS_FLOAT(v_max, vx, gvl); + maxf = vx[0]; + mask = VMFGEVF_FLOAT(v_max, maxf, gvl); + max_index = VMFIRSTM(mask,gvl); + max_index = v_max_index[max_index]; + + if(j < n){ + gvl = vsetvli(n-j, RVV_EFLOAT, RVV_M); + vx = VLEV_FLOAT(&x[j], gvl); + //fabs(vector) + mask = VMFLTVF_FLOAT(vx, 0, gvl); + v_max = VFRSUBVF_MASK_FLOAT(vx, vx, 0, mask, gvl); + + vx = VFMVVF_FLOAT(0, gvl); + vx = VFREDMAXVS_FLOAT(v_max, vx, gvl); + FLOAT cur_maxf = vx[0]; + if(cur_maxf > maxf){ + //tail index + v_max_index = VIDV_UINT(gvl); + v_max_index = VADDVX_UINT(v_max_index, j, gvl); + + mask = VMFGEVF_FLOAT(v_max, cur_maxf, gvl); + max_index = VMFIRSTM(mask,gvl); + max_index = v_max_index[max_index]; + } + } + }else{ + gvl = vsetvli(n, RVV_EFLOAT, RVV_M); + unsigned int stride_x = inc_x * sizeof(FLOAT); + unsigned int idx = 0, inc_v = gvl * inc_x; + + v_max_index = VMVVX_UINT(0, gvl); + v_max = VFMVVF_FLOAT(-1, gvl); + for(i=0,j=0; i < n/gvl; i++){ + vx = VLSEV_FLOAT(&x[idx], stride_x, gvl); + //fabs(vector) + mask = VMFLTVF_FLOAT(vx, 0, gvl); + vx = VFRSUBVF_MASK_FLOAT(vx, vx, 0, mask, gvl); + + //index where element greater than v_max + mask = VMFLTVV_FLOAT(v_max, vx, gvl); + v_max_index = VIDV_MASK_UINT(v_max_index, mask, gvl); + v_max_index = VADDVX_MASK_UINT(v_max_index, v_max_index, j, mask, gvl); + + //update v_max and start_index j + v_max = VFMAXVV_FLOAT(v_max, vx, gvl); + j += gvl; + idx += inc_v; + } + vx = VFMVVF_FLOAT(0, gvl); + vx = VFREDMAXVS_FLOAT(v_max, vx, gvl); + maxf = vx[0]; + mask = VMFGEVF_FLOAT(v_max, maxf, gvl); + max_index = VMFIRSTM(mask,gvl); + max_index = v_max_index[max_index]; + + if(j < n){ + gvl = vsetvli(n-j, RVV_EFLOAT, RVV_M); + vx = VLSEV_FLOAT(&x[idx], stride_x, gvl); + //fabs(vector) + mask = VMFLTVF_FLOAT(vx, 0, gvl); + v_max = VFRSUBVF_MASK_FLOAT(vx, vx, 0, mask, gvl); + + vx = VFMVVF_FLOAT(0, gvl); + vx = VFREDMAXVS_FLOAT(v_max, vx, gvl); + FLOAT cur_maxf = vx[0]; + if(cur_maxf > maxf){ + //tail index + v_max_index = VIDV_UINT(gvl); + v_max_index = VADDVX_UINT(v_max_index, j, gvl); + + mask = VMFGEVF_FLOAT(v_max, cur_maxf, gvl); + max_index = VMFIRSTM(mask,gvl); + max_index = v_max_index[max_index]; + } + } + } + return(max_index+1); +} + + diff --git a/kernel/riscv64/iamin.c b/kernel/riscv64/iamin.c new file mode 100644 index 000000000..155292bd5 --- /dev/null +++ b/kernel/riscv64/iamin.c @@ -0,0 +1,77 @@ +/*************************************************************************** +Copyright (c) 2013, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +/************************************************************************************** +* 2013/09/14 Saar +* BLASTEST float : NoTest +* BLASTEST double : NoTest +* CTEST : NoTest +* TEST : NoTest +* +**************************************************************************************/ + +#include "common.h" +#include + +#if defined(DOUBLE) + +#define ABS fabs + +#else + +#define ABS fabsf + +#endif + + +BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) +{ + BLASLONG i=0; + BLASLONG ix=0; + FLOAT minf=0.0; + BLASLONG min=0; + + if (n <= 0 || inc_x <= 0) return(min); + + minf=ABS(x[0]); + ix += inc_x; + i++; + + while(i < n) + { + if( ABS(x[ix]) < ABS(minf) ) + { + min = i; + minf = ABS(x[ix]); + } + ix += inc_x; + i++; + } + return(min+1); +} + + diff --git a/kernel/riscv64/iamin_vector.c b/kernel/riscv64/iamin_vector.c new file mode 100644 index 000000000..608f19a00 --- /dev/null +++ b/kernel/riscv64/iamin_vector.c @@ -0,0 +1,192 @@ +/*************************************************************************** +Copyright (c) 2020, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "common.h" +#include +#include + +#if defined(DOUBLE) + +#define ABS fabs +#define RVV_EFLOAT RVV_E64 +#define RVV_M RVV_M8 +#define FLOAT_V_T float64xm8_t +#define VLEV_FLOAT vlev_float64xm8 +#define VLSEV_FLOAT vlsev_float64xm8 +#define VFREDMINVS_FLOAT vfredminvs_float64xm8 +#define MASK_T e64xm8_t +#define VMFLTVF_FLOAT vmfltvf_e64xm8_float64xm8 +#define VMFLTVV_FLOAT vmfltvv_e64xm8_float64xm8 +#define VFMVVF_FLOAT vfmvvf_float64xm8 +#define VFRSUBVF_MASK_FLOAT vfrsubvf_mask_float64xm8 +#define VFMINVV_FLOAT vfminvv_float64xm8 +#define VMFLEVF_FLOAT vmflevf_e64xm8_float64xm8 +#define VMFIRSTM vmfirstm_e64xm8 +#define UINT_V_T uint64xm8_t +#define VIDV_MASK_UINT vidv_mask_uint64xm8 +#define VIDV_UINT vidv_uint64xm8 +#define VADDVX_MASK_UINT vaddvx_mask_uint64xm8 +#define VADDVX_UINT vaddvx_uint64xm8 +#define VMVVX_UINT vmvvx_uint64xm8 +#else + +#define ABS fabsf +#define RVV_EFLOAT RVV_E32 +#define RVV_M RVV_M8 +#define FLOAT_V_T float32xm8_t +#define VLEV_FLOAT vlev_float32xm8 +#define VLSEV_FLOAT vlsev_float32xm8 +#define VFREDMINVS_FLOAT vfredminvs_float32xm8 +#define MASK_T e32xm8_t +#define VMFLTVF_FLOAT vmfltvf_e32xm8_float32xm8 +#define VMFLTVV_FLOAT vmfltvv_e32xm8_float32xm8 +#define VFMVVF_FLOAT vfmvvf_float32xm8 +#define VFRSUBVF_MASK_FLOAT vfrsubvf_mask_float32xm8 +#define VFMINVV_FLOAT vfminvv_float32xm8 +#define VMFLEVF_FLOAT vmflevf_e32xm8_float32xm8 +#define VMFIRSTM vmfirstm_e32xm8 +#define UINT_V_T uint32xm8_t +#define VIDV_MASK_UINT vidv_mask_uint32xm8 +#define VIDV_UINT vidv_uint32xm8 +#define VADDVX_MASK_UINT vaddvx_mask_uint32xm8 +#define VADDVX_UINT vaddvx_uint32xm8 +#define VMVVX_UINT vmvvx_uint32xm8 +#endif + + +BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) +{ + BLASLONG i=0, j=0; + FLOAT minf=FLT_MAX; + unsigned int min_index = 0; + if (n <= 0 || inc_x <= 0) return(min_index); + + FLOAT_V_T vx, v_min; + UINT_V_T v_min_index; + MASK_T mask; + unsigned int gvl = 0; + if(inc_x == 1){ + gvl = vsetvli(n, RVV_EFLOAT, RVV_M); + v_min = VFMVVF_FLOAT(FLT_MAX, gvl); + v_min_index = VMVVX_UINT(0, gvl); + for(i=0,j=0; i < n/gvl; i++){ + vx = VLEV_FLOAT(&x[j], gvl); + //fabs(vector) + mask = VMFLTVF_FLOAT(vx, 0, gvl); + vx = VFRSUBVF_MASK_FLOAT(vx, vx, 0, mask, gvl); + + //index where element less than v_min + mask = VMFLTVV_FLOAT(vx, v_min, gvl); + v_min_index = VIDV_MASK_UINT(v_min_index, mask, gvl); + v_min_index = VADDVX_MASK_UINT(v_min_index, v_min_index, j, mask, gvl); + + //update v_min and start_index j + v_min = VFMINVV_FLOAT(v_min, vx, gvl); + j += gvl; + } + vx = VFMVVF_FLOAT(FLT_MAX, gvl); + vx = VFREDMINVS_FLOAT(v_min, vx, gvl); + minf = vx[0]; + mask = VMFLEVF_FLOAT(v_min, minf, gvl); + min_index = VMFIRSTM(mask,gvl); + min_index = v_min_index[min_index]; + + if(j < n){ + gvl = vsetvli(n-j, RVV_EFLOAT, RVV_M); + vx = VLEV_FLOAT(&x[j], gvl); + //fabs(vector) + mask = VMFLTVF_FLOAT(vx, 0, gvl); + v_min = VFRSUBVF_MASK_FLOAT(vx, vx, 0, mask, gvl); + + vx = VFMVVF_FLOAT(FLT_MAX, gvl); + vx = VFREDMINVS_FLOAT(v_min, vx, gvl); + FLOAT cur_minf = vx[0]; + if(cur_minf < minf){ + //tail index + v_min_index = VIDV_UINT(gvl); + v_min_index = VADDVX_UINT(v_min_index, j, gvl); + + mask = VMFLEVF_FLOAT(v_min, cur_minf, gvl); + min_index = VMFIRSTM(mask,gvl); + min_index = v_min_index[min_index]; + } + } + }else{ + gvl = vsetvli(n, RVV_EFLOAT, RVV_M); + unsigned int stride_x = inc_x * sizeof(FLOAT); + unsigned int idx = 0, inc_v = gvl * inc_x; + + v_min = VFMVVF_FLOAT(FLT_MAX, gvl); + v_min_index = VMVVX_UINT(0, gvl); + for(i=0,j=0; i < n/gvl; i++){ + vx = VLSEV_FLOAT(&x[idx], stride_x, gvl); + //fabs(vector) + mask = VMFLTVF_FLOAT(vx, 0, gvl); + vx = VFRSUBVF_MASK_FLOAT(vx, vx, 0, mask, gvl); + + //index where element less than v_min + mask = VMFLTVV_FLOAT(vx, v_min, gvl); + v_min_index = VIDV_MASK_UINT(v_min_index, mask, gvl); + v_min_index = VADDVX_MASK_UINT(v_min_index, v_min_index, j, mask, gvl); + + //update v_min and start_index j + v_min = VFMINVV_FLOAT(v_min, vx, gvl); + j += gvl; + idx += inc_v; + } + vx = VFMVVF_FLOAT(FLT_MAX, gvl); + vx = VFREDMINVS_FLOAT(v_min, vx, gvl); + minf = vx[0]; + mask = VMFLEVF_FLOAT(v_min, minf, gvl); + min_index = VMFIRSTM(mask,gvl); + min_index = v_min_index[min_index]; + + if(j < n){ + gvl = vsetvli(n-j, RVV_EFLOAT, RVV_M); + vx = VLSEV_FLOAT(&x[idx], stride_x, gvl); + //fabs(vector) + mask = VMFLTVF_FLOAT(vx, 0, gvl); + v_min = VFRSUBVF_MASK_FLOAT(vx, vx, 0, mask, gvl); + + vx = VFMVVF_FLOAT(FLT_MAX, gvl); + vx = VFREDMINVS_FLOAT(v_min, vx, gvl); + FLOAT cur_minf = vx[0]; + if(cur_minf < minf){ + //tail index + v_min_index = VIDV_UINT(gvl); + v_min_index = VADDVX_UINT(v_min_index, j, gvl); + + mask = VMFLEVF_FLOAT(v_min, cur_minf, gvl); + min_index = VMFIRSTM(mask,gvl); + min_index = v_min_index[min_index]; + } + } + } + return(min_index+1); +} + + diff --git a/kernel/riscv64/imax.c b/kernel/riscv64/imax.c new file mode 100644 index 000000000..5072dd16e --- /dev/null +++ b/kernel/riscv64/imax.c @@ -0,0 +1,69 @@ +/*************************************************************************** +Copyright (c) 2013, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + + +/************************************************************************************** +* 2013/09/14 Saar +* BLASTEST float : NoTest +* BLASTEST double : NoTest +* CTEST : NoTest +* TEST : NoTest +* +**************************************************************************************/ + +#include "common.h" +#include + + + +BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) +{ + BLASLONG i=0; + BLASLONG ix=0; + FLOAT maxf=0.0; + BLASLONG max=0; + + if (n <= 0 || inc_x <= 0) return(max); + + maxf=x[0]; + ix += inc_x; + i++; + + while(i < n) + { + if( x[ix] > maxf ) + { + max = i; + maxf = x[ix]; + } + ix += inc_x; + i++; + } + return(max+1); +} + + diff --git a/kernel/riscv64/imax_vector.c b/kernel/riscv64/imax_vector.c new file mode 100644 index 000000000..44af7101b --- /dev/null +++ b/kernel/riscv64/imax_vector.c @@ -0,0 +1,176 @@ +/*************************************************************************** +Copyright (c) 2020, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "common.h" +#include +#include + +#if defined(DOUBLE) + +#define ABS fabs +#define RVV_EFLOAT RVV_E64 +#define RVV_M RVV_M8 +#define FLOAT_V_T float64xm8_t +#define VLEV_FLOAT vlev_float64xm8 +#define VLSEV_FLOAT vlsev_float64xm8 +#define VFREDMAXVS_FLOAT vfredmaxvs_float64xm8 +#define MASK_T e64xm8_t +#define VMFLTVV_FLOAT vmfltvv_e64xm8_float64xm8 +#define VFMVVF_FLOAT vfmvvf_float64xm8 +#define VFMAXVV_FLOAT vfmaxvv_float64xm8 +#define VMFGEVF_FLOAT vmfgevf_e64xm8_float64xm8 +#define VMFIRSTM vmfirstm_e64xm8 +#define UINT_V_T uint64xm8_t +#define VIDV_MASK_UINT vidv_mask_uint64xm8 +#define VIDV_UINT vidv_uint64xm8 +#define VADDVX_MASK_UINT vaddvx_mask_uint64xm8 +#define VADDVX_UINT vaddvx_uint64xm8 +#define VMVVX_UINT vmvvx_uint64xm8 +#else + +#define ABS fabsf +#define RVV_EFLOAT RVV_E32 +#define RVV_M RVV_M8 +#define FLOAT_V_T float32xm8_t +#define VLEV_FLOAT vlev_float32xm8 +#define VLSEV_FLOAT vlsev_float32xm8 +#define VFREDMAXVS_FLOAT vfredmaxvs_float32xm8 +#define MASK_T e32xm8_t +#define VMFLTVV_FLOAT vmfltvv_e32xm8_float32xm8 +#define VFMVVF_FLOAT vfmvvf_float32xm8 +#define VFMAXVV_FLOAT vfmaxvv_float32xm8 +#define VMFGEVF_FLOAT vmfgevf_e32xm8_float32xm8 +#define VMFIRSTM vmfirstm_e32xm8 +#define UINT_V_T uint32xm8_t +#define VIDV_MASK_UINT vidv_mask_uint32xm8 +#define VIDV_UINT vidv_uint32xm8 +#define VADDVX_MASK_UINT vaddvx_mask_uint32xm8 +#define VADDVX_UINT vaddvx_uint32xm8 +#define VMVVX_UINT vmvvx_uint32xm8 +#endif + + +BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) +{ + BLASLONG i=0, j=0; + unsigned int max_index = 0; + if (n <= 0 || inc_x <= 0) return(max_index); + FLOAT maxf=-FLT_MAX; + + FLOAT_V_T vx, v_max; + UINT_V_T v_max_index; + MASK_T mask; + unsigned int gvl = 0; + if(inc_x == 1){ + gvl = vsetvli(n, RVV_EFLOAT, RVV_M); + v_max_index = VMVVX_UINT(0, gvl); + v_max = VFMVVF_FLOAT(-FLT_MAX, gvl); + for(i=0,j=0; i < n/gvl; i++){ + vx = VLEV_FLOAT(&x[j], gvl); + + //index where element greater than v_max + mask = VMFLTVV_FLOAT(v_max, vx, gvl); + v_max_index = VIDV_MASK_UINT(v_max_index, mask, gvl); + v_max_index = VADDVX_MASK_UINT(v_max_index, v_max_index, j, mask, gvl); + + //update v_max and start_index j + v_max = VFMAXVV_FLOAT(v_max, vx, gvl); + j += gvl; + } + vx = VFMVVF_FLOAT(-FLT_MAX, gvl); + vx = VFREDMAXVS_FLOAT(v_max, vx, gvl); + maxf = vx[0]; + mask = VMFGEVF_FLOAT(v_max, maxf, gvl); + max_index = VMFIRSTM(mask,gvl); + max_index = v_max_index[max_index]; + + if(j < n){ + gvl = vsetvli(n-j, RVV_EFLOAT, RVV_M); + v_max = VLEV_FLOAT(&x[j], gvl); + + vx = VFMVVF_FLOAT(-FLT_MAX, gvl); + vx = VFREDMAXVS_FLOAT(v_max, vx, gvl); + FLOAT cur_maxf = vx[0]; + if(cur_maxf > maxf){ + //tail index + v_max_index = VIDV_UINT(gvl); + v_max_index = VADDVX_UINT(v_max_index, j, gvl); + + mask = VMFGEVF_FLOAT(v_max, cur_maxf, gvl); + max_index = VMFIRSTM(mask,gvl); + max_index = v_max_index[max_index]; + } + } + }else{ + gvl = vsetvli(n, RVV_EFLOAT, RVV_M); + unsigned int stride_x = inc_x * sizeof(FLOAT); + unsigned int idx = 0, inc_v = gvl * inc_x; + + v_max = VFMVVF_FLOAT(-FLT_MAX, gvl); + v_max_index = VMVVX_UINT(0, gvl); + for(i=0,j=0; i < n/gvl; i++){ + vx = VLSEV_FLOAT(&x[idx], stride_x, gvl); + + //index where element greater than v_max + mask = VMFLTVV_FLOAT(v_max, vx, gvl); + v_max_index = VIDV_MASK_UINT(v_max_index, mask, gvl); + v_max_index = VADDVX_MASK_UINT(v_max_index, v_max_index, j, mask, gvl); + + //update v_max and start_index j + v_max = VFMAXVV_FLOAT(v_max, vx, gvl); + j += gvl; + idx += inc_v; + } + vx = VFMVVF_FLOAT(-FLT_MAX, gvl); + vx = VFREDMAXVS_FLOAT(v_max, vx, gvl); + maxf = vx[0]; + mask = VMFGEVF_FLOAT(v_max, maxf, gvl); + max_index = VMFIRSTM(mask,gvl); + max_index = v_max_index[max_index]; + + if(j < n){ + gvl = vsetvli(n-j, RVV_EFLOAT, RVV_M); + v_max = VLSEV_FLOAT(&x[idx], stride_x, gvl); + + vx = VFMVVF_FLOAT(-FLT_MAX, gvl); + vx = VFREDMAXVS_FLOAT(v_max, vx, gvl); + FLOAT cur_maxf = vx[0]; + if(cur_maxf > maxf){ + //tail index + v_max_index = VIDV_UINT(gvl); + v_max_index = VADDVX_UINT(v_max_index, j, gvl); + + mask = VMFGEVF_FLOAT(v_max, cur_maxf, gvl); + max_index = VMFIRSTM(mask,gvl); + max_index = v_max_index[max_index]; + } + } + } + return(max_index+1); +} + + diff --git a/kernel/riscv64/imin.c b/kernel/riscv64/imin.c new file mode 100644 index 000000000..598cba387 --- /dev/null +++ b/kernel/riscv64/imin.c @@ -0,0 +1,67 @@ +/*************************************************************************** +Copyright (c) 2013, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + + +/************************************************************************************** +* 2013/08/19 Saar +* BLASTEST float +* BLASTEST double +* +**************************************************************************************/ + +#include "common.h" +#include + + + +BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) +{ + BLASLONG i=0; + BLASLONG ix=0; + FLOAT minf=0.0; + BLASLONG min=0; + + if (n <= 0 || inc_x <= 0) return(min); + + minf=x[0]; + ix += inc_x; + i++; + + while(i < n) + { + if( x[ix] > minf ) + { + min = i; + minf = x[ix]; + } + ix += inc_x; + i++; + } + return(min+1); +} + + diff --git a/kernel/riscv64/imin_vector.c b/kernel/riscv64/imin_vector.c new file mode 100644 index 000000000..e6e0e9f9f --- /dev/null +++ b/kernel/riscv64/imin_vector.c @@ -0,0 +1,212 @@ +/*************************************************************************** +Copyright (c) 2020, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "common.h" +#include +#include + +#if defined(DOUBLE) + +#define ABS fabs +#define RVV_EFLOAT RVV_E64 +#define RVV_M RVV_M8 +#define FLOAT_V_T float64xm8_t +#define VLEV_FLOAT vlev_float64xm8 +#define VLSEV_FLOAT vlsev_float64xm8 +#define VFREDMINVS_FLOAT vfredminvs_float64xm8 +#define MASK_T e64xm8_t +#define VMFLTVV_FLOAT vmfltvv_e64xm8_float64xm8 +#define VFMVVF_FLOAT vfmvvf_float64xm8 +#define VFMINVV_FLOAT vfminvv_float64xm8 +#define VMFLEVF_FLOAT vmflevf_e64xm8_float64xm8 +#define VMFIRSTM vmfirstm_e64xm8 +#define UINT_V_T uint64xm8_t +#define VIDV_MASK_UINT vidv_mask_uint64xm8 +#define VIDV_UINT vidv_uint64xm8 +#define VADDVX_MASK_UINT vaddvx_mask_uint64xm8 +#define VADDVX_UINT vaddvx_uint64xm8 +#define VMVVX_UINT vmvvx_uint64xm8 +#else + +#define ABS fabsf +#define RVV_EFLOAT RVV_E32 +#define RVV_M RVV_M8 +#define FLOAT_V_T float32xm8_t +#define VLEV_FLOAT vlev_float32xm8 +#define VLSEV_FLOAT vlsev_float32xm8 +#define VFREDMINVS_FLOAT vfredminvs_float32xm8 +#define MASK_T e32xm8_t +#define VMFLTVV_FLOAT vmfltvv_e32xm8_float32xm8 +#define VFMVVF_FLOAT vfmvvf_float32xm8 +#define VFMINVV_FLOAT vfminvv_float32xm8 +#define VMFLEVF_FLOAT vmflevf_e32xm8_float32xm8 +#define VMFIRSTM vmfirstm_e32xm8 +#define UINT_V_T uint32xm8_t +#define VIDV_MASK_UINT vidv_mask_uint32xm8 +#define VIDV_UINT vidv_uint32xm8 +#define VADDVX_MASK_UINT vaddvx_mask_uint32xm8 +#define VADDVX_UINT vaddvx_uint32xm8 +#define VMVVX_UINT vmvvx_uint32xm8 +#endif + + +BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) +{ + BLASLONG i=0, j=0; + FLOAT minf=FLT_MAX; + unsigned int min_index = 0; + if (n <= 0 || inc_x <= 0) return(min_index); + + FLOAT_V_T vx, v_min; + UINT_V_T v_min_index; + MASK_T mask; + unsigned int gvl = 0; + if(inc_x == 1){ + gvl = vsetvli(n, RVV_EFLOAT, RVV_M); + v_min = VFMVVF_FLOAT(FLT_MAX, gvl); + v_min_index = VMVVX_UINT(0, gvl); + for(i=0,j=0; i < n/gvl; i++){ + vx = VLEV_FLOAT(&x[j], gvl); + //index where element less than v_min + mask = VMFLTVV_FLOAT(vx, v_min, gvl); + v_min_index = VIDV_MASK_UINT(v_min_index, mask, gvl); +/* +#if defined(DOUBLE) +asm volatile( + "vor.vv v0, %1, %1 \n\t" + "vsetvli x0, %2, e64,m8 \n\t" + "vid.v %0, v0.t \n\t" + :"+v"(v_min_index) + :"v"(mask), "r"(gvl) + :"v0"); +#else +asm volatile( + "vor.vv v0, %1, %1 \n\t" + "vsetvli x0, %2, e32,m8 \n\t" + "vid.v %0, v0.t \n\t" + :"+v"(v_min_index) + :"v"(mask), "r"(gvl) + :"v0"); +#endif +*/ + v_min_index = VADDVX_MASK_UINT(v_min_index, v_min_index, j, mask, gvl); + + //update v_min and start_index j + v_min = VFMINVV_FLOAT(v_min, vx, gvl); + j += gvl; + } + vx = VFMVVF_FLOAT(FLT_MAX, gvl); + vx = VFREDMINVS_FLOAT(v_min, vx, gvl); + minf = vx[0]; + mask = VMFLEVF_FLOAT(v_min, minf, gvl); + min_index = VMFIRSTM(mask,gvl); + min_index = v_min_index[min_index]; + + if(j < n){ + gvl = vsetvli(n-j, RVV_EFLOAT, RVV_M); + v_min = VLEV_FLOAT(&x[j], gvl); + + vx = VFMVVF_FLOAT(FLT_MAX, gvl); + vx = VFREDMINVS_FLOAT(v_min, vx, gvl); + FLOAT cur_minf = vx[0]; + if(cur_minf < minf){ + //tail index + v_min_index = VIDV_UINT(gvl); + v_min_index = VADDVX_UINT(v_min_index, j, gvl); + mask = VMFLEVF_FLOAT(v_min, cur_minf, gvl); + min_index = VMFIRSTM(mask,gvl); + min_index = v_min_index[min_index]; + } + } + }else{ + gvl = vsetvli(n, RVV_EFLOAT, RVV_M); + unsigned int stride_x = inc_x * sizeof(FLOAT); + unsigned int idx = 0, inc_v = gvl * inc_x; + + v_min = VFMVVF_FLOAT(FLT_MAX, gvl); + v_min_index = VMVVX_UINT(0, gvl); + for(i=0,j=0; i < n/gvl; i++){ + vx = VLSEV_FLOAT(&x[idx], stride_x, gvl); + + //index where element less than v_min + mask = VMFLTVV_FLOAT(vx, v_min, gvl); + v_min_index = VIDV_MASK_UINT(v_min_index, mask, gvl); +/* +#if defined(DOUBLE) +asm volatile( + "vor.vv v0, %1, %1 \n\t" + "vsetvli x0, %2, e64,m8 \n\t" + "vid.v %0, v0.t \n\t" + :"+v"(v_min_index) + :"v"(mask), "r"(gvl) + :"v0"); +#else +asm volatile( + "vor.vv v0, %1, %1 \n\t" + "vsetvli x0, %2, e32,m8 \n\t" + "vid.v %0, v0.t \n\t" + :"+v"(v_min_index) + :"v"(mask), "r"(gvl) + :"v0"); +#endif +*/ + + v_min_index = VADDVX_MASK_UINT(v_min_index, v_min_index, j, mask, gvl); + + //update v_min and start_index j + v_min = VFMINVV_FLOAT(v_min, vx, gvl); + j += gvl; + idx += inc_v; + } + vx = VFMVVF_FLOAT(FLT_MAX, gvl); + vx = VFREDMINVS_FLOAT(v_min, vx, gvl); + minf = vx[0]; + mask = VMFLEVF_FLOAT(v_min, minf, gvl); + min_index = VMFIRSTM(mask,gvl); + min_index = v_min_index[min_index]; + + if(j < n){ + gvl = vsetvli(n-j, RVV_EFLOAT, RVV_M); + v_min = VLSEV_FLOAT(&x[idx], stride_x, gvl); + + vx = VFMVVF_FLOAT(FLT_MAX, gvl); + vx = VFREDMINVS_FLOAT(v_min, vx, gvl); + FLOAT cur_minf = vx[0]; + if(cur_minf < minf){ + //tail index + v_min_index = VIDV_UINT(gvl); + v_min_index = VADDVX_UINT(v_min_index, j, gvl); + mask = VMFLEVF_FLOAT(v_min, cur_minf, gvl); + min_index = VMFIRSTM(mask,gvl); + min_index = v_min_index[min_index]; + } + } + } + return(min_index+1); +} + + diff --git a/kernel/riscv64/izamax.c b/kernel/riscv64/izamax.c new file mode 100644 index 000000000..8fe33e95b --- /dev/null +++ b/kernel/riscv64/izamax.c @@ -0,0 +1,81 @@ +/*************************************************************************** +Copyright (c) 2013, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +/************************************************************************************** +* 2013/09/14 Saar +* BLASTEST float : NoTest +* BLASTEST double : NoTest +* CTEST : OK +* TEST : OK +* +**************************************************************************************/ + +#include "common.h" +#include + +#if defined(DOUBLE) + +#define ABS fabs + +#else + +#define ABS fabsf + +#endif + +#define CABS1(x,i) ABS(x[i])+ABS(x[i+1]) + +BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) +{ + BLASLONG i=0; + BLASLONG ix=0; + FLOAT maxf; + BLASLONG max=0; + BLASLONG inc_x2; + + if (n <= 0 || inc_x <= 0) return(max); + + inc_x2 = 2 * inc_x; + + maxf = CABS1(x,0); + ix += inc_x2; + i++; + + while(i < n) + { + if( CABS1(x,ix) > maxf ) + { + max = i; + maxf = CABS1(x,ix); + } + ix += inc_x2; + i++; + } + return(max+1); +} + + diff --git a/kernel/riscv64/izamax_vector.c b/kernel/riscv64/izamax_vector.c new file mode 100644 index 000000000..62c95d973 --- /dev/null +++ b/kernel/riscv64/izamax_vector.c @@ -0,0 +1,246 @@ +/*************************************************************************** +Copyright (c) 2020, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "common.h" +#include + +#if defined(DOUBLE) + +#define RVV_EFLOAT RVV_E64 +#define FLOAT_V_T float64xm8_t +#define VLSEV_FLOAT vlsev_float64xm8 +#define VFREDMAXVS_FLOAT vfredmaxvs_float64xm8 +#define MASK_T e64xm8_t +#define VMFLTVF_FLOAT vmfltvf_e64xm8_float64xm8 +#define VMFLTVV_FLOAT vmfltvv_e64xm8_float64xm8 +#define VFMVVF_FLOAT vfmvvf_float64xm8 +#define VFRSUBVF_MASK_FLOAT vfrsubvf_mask_float64xm8 +#define VFMAXVV_FLOAT vfmaxvv_float64xm8 +#define VMFGEVF_FLOAT vmfgevf_e64xm8_float64xm8 +#define VMFIRSTM vmfirstm_e64xm8 +#define UINT_V_T uint64xm8_t +#define VIDV_MASK_UINT vidv_mask_uint64xm8 +#define VIDV_UINT vidv_uint64xm8 +#define VADDVX_MASK_UINT vaddvx_mask_uint64xm8 +#define VADDVX_UINT vaddvx_uint64xm8 +#define VFADDVV_FLOAT vfaddvv_float64xm8 +#define VMVVX_UINT vmvvx_uint64xm8 +#else + +#define ABS fabsf +#define RVV_EFLOAT RVV_E32 +#define FLOAT_V_T float32xm8_t +#define VLSEV_FLOAT vlsev_float32xm8 +#define VFREDMAXVS_FLOAT vfredmaxvs_float32xm8 +#define MASK_T e32xm8_t +#define VMFLTVF_FLOAT vmfltvf_e32xm8_float32xm8 +#define VMFLTVV_FLOAT vmfltvv_e32xm8_float32xm8 +#define VFMVVF_FLOAT vfmvvf_float32xm8 +#define VFRSUBVF_MASK_FLOAT vfrsubvf_mask_float32xm8 +#define VFMAXVV_FLOAT vfmaxvv_float32xm8 +#define VMFGEVF_FLOAT vmfgevf_e32xm8_float32xm8 +#define VMFIRSTM vmfirstm_e32xm8 +#define UINT_V_T uint32xm8_t +#define VIDV_MASK_UINT vidv_mask_uint32xm8 +#define VIDV_UINT vidv_uint32xm8 +#define VADDVX_MASK_UINT vaddvx_mask_uint32xm8 +#define VADDVX_UINT vaddvx_uint32xm8 +#define VFADDVV_FLOAT vfaddvv_float32xm8 +#define VMVVX_UINT vmvvx_uint32xm8 +#endif + +#define RVV_M RVV_M8 + +BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) +{ + BLASLONG i=0, j=0; + FLOAT maxf=0.0; + unsigned int max_index = 0; + if (n <= 0 || inc_x <= 0) return(max_index); + + FLOAT_V_T vx0, vx1, v_max; + UINT_V_T v_max_index; + MASK_T mask0, mask1; + unsigned int gvl = 0; + gvl = vsetvli(n, RVV_EFLOAT, RVV_M); + v_max_index = VMVVX_UINT(0, gvl); + v_max = VFMVVF_FLOAT(-1, gvl); + BLASLONG stride_x = inc_x * 2 * sizeof(FLOAT); + BLASLONG inc_xv = gvl * inc_x * 2; + BLASLONG ix = 0; + for(i=0,j=0; i < n/gvl; i++){ + vx0 = VLSEV_FLOAT(&x[ix], stride_x, gvl); + //fabs(vector) + mask0 = VMFLTVF_FLOAT(vx0, 0, gvl); + vx0 = VFRSUBVF_MASK_FLOAT(vx0, vx0, 0, mask0, gvl); +/* +#if defined(DOUBLE) +asm volatile( + "vor.vv v0, %1, %1\n\t" + "vsetvli x0, %3, e64,m8 \n\t" + "vfrsub.vf %0, %0, %2, v0.t \n\t" + :"+v"(vx0) + :"v"(mask0), "f"(zero), "r"(gvl) + :"v0"); +#else +asm volatile( + "vor.vv v0, %1, %1\n\t" + "vsetvli x0, %3, e32,m8 \n\t" + "vfrsub.vf %0, %0, %2, v0.t \n\t" + :"+v"(vx0) + :"v"(mask0), "f"(zero), "r"(gvl) + :"v0"); +#endif +*/ + vx1 = VLSEV_FLOAT(&x[ix+1], stride_x, gvl); + //fabs(vector) + mask1 = VMFLTVF_FLOAT(vx1, 0, gvl); + vx1 = VFRSUBVF_MASK_FLOAT(vx1, vx1, 0, mask1, gvl); +/* +#if defined(DOUBLE) +asm volatile( + "vor.vv v0, %1, %1\n\t" + "vsetvli x0, %3, e64,m8 \n\t" + "vfrsub.vf %0, %0, %2, v0.t \n\t" + :"+v"(vx1) + :"v"(mask1), "f"(zero), "r"(gvl) + :"v0"); +#else +asm volatile( + "vor.vv v0, %1, %1\n\t" + "vsetvli x0, %3, e32,m8 \n\t" + "vfrsub.vf %0, %0, %2, v0.t \n\t" + :"+v"(vx1) + :"v"(mask1), "f"(zero), "r"(gvl) + :"v0"); +#endif +*/ + vx0 = VFADDVV_FLOAT(vx0, vx1, gvl); + + //index where element greater than v_max + mask0 = VMFLTVV_FLOAT(v_max, vx0, gvl); + v_max_index = VIDV_MASK_UINT(v_max_index, mask0, gvl); +/* +#if defined(DOUBLE) +asm volatile( + "vor.vv v0, %1, %1 \n\t" + "vsetvli x0, %2, e64,m8 \n\t" + "vid.v %0, v0.t \n\t" + :"+v"(v_max_index) + :"v"(mask0), "r"(gvl) + :"v0"); +#else +asm volatile( + "vor.vv v0, %1, %1 \n\t" + "vsetvli x0, %2, e32,m8 \n\t" + "vid.v %0, v0.t \n\t" + :"+v"(v_max_index) + :"v"(mask0), "r"(gvl) + :"v0"); +#endif +*/ + v_max_index = VADDVX_MASK_UINT(v_max_index, v_max_index, j, mask0, gvl); + + //update v_max and start_index j + v_max = VFMAXVV_FLOAT(v_max, vx0, gvl); + j += gvl; + ix += inc_xv; + } + vx0 = VFMVVF_FLOAT(0, gvl); + vx0 = VFREDMAXVS_FLOAT(v_max, vx0, gvl); + maxf = vx0[0]; + mask0 = VMFGEVF_FLOAT(v_max, maxf, gvl); + max_index = VMFIRSTM(mask0,gvl); + max_index = v_max_index[max_index]; + + if(j < n){ + gvl = vsetvli(n-j, RVV_EFLOAT, RVV_M); + v_max_index = VMVVX_UINT(0, gvl); + vx0 = VLSEV_FLOAT(&x[ix], stride_x, gvl); + //fabs(vector) + mask0 = VMFLTVF_FLOAT(vx0, 0, gvl); + vx0 = VFRSUBVF_MASK_FLOAT(vx0, vx0, 0, mask0, gvl); +/* +#if defined(DOUBLE) +asm volatile( + "vor.vv v0, %1, %1\n\t" + "vsetvli x0, %3, e64,m8 \n\t" + "vfrsub.vf %0, %0, %2, v0.t \n\t" + :"+v"(vx0) + :"v"(mask0), "f"(zero), "r"(gvl) + :"v0"); +#else +asm volatile( + "vor.vv v0, %1, %1\n\t" + "vsetvli x0, %3, e32,m8 \n\t" + "vfrsub.vf %0, %0, %2, v0.t \n\t" + :"+v"(vx0) + :"v"(mask0), "f"(zero), "r"(gvl) + :"v0"); +#endif +*/ + vx1 = VLSEV_FLOAT(&x[ix+1], stride_x, gvl); + //fabs(vector) + mask1 = VMFLTVF_FLOAT(vx1, 0, gvl); + vx1 = VFRSUBVF_MASK_FLOAT(vx1, vx1, 0, mask1, gvl); +/* +#if defined(DOUBLE) +asm volatile( + "vor.vv v0, %1, %1\n\t" + "vsetvli x0, %3, e64,m8 \n\t" + "vfrsub.vf %0, %0, %2, v0.t \n\t" + :"+v"(vx1) + :"v"(mask1), "f"(zero), "r"(gvl) + :"v0"); +#else +asm volatile( + "vor.vv v0, %1, %1\n\t" + "vsetvli x0, %3, e32,m8 \n\t" + "vfrsub.vf %0, %0, %2, v0.t \n\t" + :"+v"(vx1) + :"v"(mask1), "f"(zero), "r"(gvl) + :"v0"); +#endif +*/ + v_max = VFADDVV_FLOAT(vx0, vx1, gvl); + vx0 = VFMVVF_FLOAT(0, gvl); + vx0 = VFREDMAXVS_FLOAT(v_max, vx0, gvl); + FLOAT cur_maxf = vx0[0]; + if(cur_maxf > maxf){ + //tail index + v_max_index = VIDV_UINT(gvl); + v_max_index = VADDVX_UINT(v_max_index, j, gvl); + + mask0 = VMFGEVF_FLOAT(v_max, cur_maxf, gvl); + max_index = VMFIRSTM(mask0,gvl); + max_index = v_max_index[max_index]; + } + } + return(max_index+1); +} + + diff --git a/kernel/riscv64/izamin.c b/kernel/riscv64/izamin.c new file mode 100644 index 000000000..fb5a0d4cb --- /dev/null +++ b/kernel/riscv64/izamin.c @@ -0,0 +1,81 @@ +/*************************************************************************** +Copyright (c) 2013, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +/************************************************************************************** +* 2013/09/14 Saar +* BLASTEST float : NoTest +* BLASTEST double : NoTest +* CTEST : NoTest +* TEST : NoTest +* +**************************************************************************************/ + +#include "common.h" +#include + +#if defined(DOUBLE) + +#define ABS fabs + +#else + +#define ABS fabsf + +#endif + +#define CABS1(x,i) ABS(x[i])+ABS(x[i+1]) + +BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) +{ + BLASLONG i=0; + BLASLONG ix=0; + FLOAT minf; + BLASLONG min=0; + BLASLONG inc_x2; + + if (n <= 0 || inc_x <= 0) return(min); + + inc_x2 = 2 * inc_x; + + minf = CABS1(x,0); + ix += inc_x2; + i++; + + while(i < n) + { + if( CABS1(x,ix) < minf ) + { + min = i; + minf = CABS1(x,ix); + } + ix += inc_x2; + i++; + } + return(min+1); +} + + diff --git a/kernel/riscv64/izamin_vector.c b/kernel/riscv64/izamin_vector.c new file mode 100644 index 000000000..38eccf1b5 --- /dev/null +++ b/kernel/riscv64/izamin_vector.c @@ -0,0 +1,247 @@ +/*************************************************************************** +Copyright (c) 2020, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "common.h" +#include +#include + +#if defined(DOUBLE) + +#define RVV_EFLOAT RVV_E64 +#define FLOAT_V_T float64xm8_t +#define VLSEV_FLOAT vlsev_float64xm8 +#define VFREDMINVS_FLOAT vfredminvs_float64xm8 +#define MASK_T e64xm8_t +#define VMFLTVF_FLOAT vmfltvf_e64xm8_float64xm8 +#define VMFLTVV_FLOAT vmfltvv_e64xm8_float64xm8 +#define VFMVVF_FLOAT vfmvvf_float64xm8 +#define VFRSUBVF_MASK_FLOAT vfrsubvf_mask_float64xm8 +#define VFMINVV_FLOAT vfminvv_float64xm8 +#define VMFLEVF_FLOAT vmflevf_e64xm8_float64xm8 +#define VMFIRSTM vmfirstm_e64xm8 +#define UINT_V_T uint64xm8_t +#define VIDV_MASK_UINT vidv_mask_uint64xm8 +#define VIDV_UINT vidv_uint64xm8 +#define VADDVX_MASK_UINT vaddvx_mask_uint64xm8 +#define VADDVX_UINT vaddvx_uint64xm8 +#define VFADDVV_FLOAT vfaddvv_float64xm8 +#define VMVVX_UINT vmvvx_uint64xm8 +#else + +#define ABS fabsf +#define RVV_EFLOAT RVV_E32 +#define FLOAT_V_T float32xm8_t +#define VLSEV_FLOAT vlsev_float32xm8 +#define VFREDMINVS_FLOAT vfredminvs_float32xm8 +#define MASK_T e32xm8_t +#define VMFLTVF_FLOAT vmfltvf_e32xm8_float32xm8 +#define VMFLTVV_FLOAT vmfltvv_e32xm8_float32xm8 +#define VFMVVF_FLOAT vfmvvf_float32xm8 +#define VFRSUBVF_MASK_FLOAT vfrsubvf_mask_float32xm8 +#define VFMINVV_FLOAT vfminvv_float32xm8 +#define VMFLEVF_FLOAT vmflevf_e32xm8_float32xm8 +#define VMFIRSTM vmfirstm_e32xm8 +#define UINT_V_T uint32xm8_t +#define VIDV_MASK_UINT vidv_mask_uint32xm8 +#define VIDV_UINT vidv_uint32xm8 +#define VADDVX_MASK_UINT vaddvx_mask_uint32xm8 +#define VADDVX_UINT vaddvx_uint32xm8 +#define VFADDVV_FLOAT vfaddvv_float32xm8 +#define VMVVX_UINT vmvvx_uint32xm8 +#endif + +#define RVV_M RVV_M8 + +BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) +{ + BLASLONG i=0, j=0; + FLOAT minf=FLT_MAX; + unsigned int min_index = 0; + if (n <= 0 || inc_x <= 0) return(min_index); + + FLOAT_V_T vx0, vx1, v_min; + UINT_V_T v_min_index; + MASK_T mask0, mask1; + unsigned int gvl = 0; + gvl = vsetvli(n, RVV_EFLOAT, RVV_M); + v_min_index = VMVVX_UINT(0, gvl); + v_min = VFMVVF_FLOAT(FLT_MAX, gvl); + BLASLONG stride_x = inc_x * 2 * sizeof(FLOAT); + BLASLONG inc_xv = gvl * inc_x * 2; + BLASLONG ix = 0; + for(i=0,j=0; i < n/gvl; i++){ + vx0 = VLSEV_FLOAT(&x[ix], stride_x, gvl); + //fabs(vector) + mask0 = VMFLTVF_FLOAT(vx0, 0, gvl); + vx0 = VFRSUBVF_MASK_FLOAT(vx0, vx0, 0, mask0, gvl); +/* +#if defined(DOUBLE) +asm volatile( + "vor.vv v0, %1, %1\n\t" + "vsetvli x0, %3, e64,m8 \n\t" + "vfrsub.vf %0, %0, %2, v0.t \n\t" + :"+v"(vx0) + :"v"(mask0), "f"(zero), "r"(gvl) + :"v0"); +#else +asm volatile( + "vor.vv v0, %1, %1\n\t" + "vsetvli x0, %3, e32,m8 \n\t" + "vfrsub.vf %0, %0, %2, v0.t \n\t" + :"+v"(vx0) + :"v"(mask0), "f"(zero), "r"(gvl) + :"v0"); +#endif +*/ + vx1 = VLSEV_FLOAT(&x[ix+1], stride_x, gvl); + //fabs(vector) + mask1 = VMFLTVF_FLOAT(vx1, 0, gvl); + vx1 = VFRSUBVF_MASK_FLOAT(vx1, vx1, 0, mask1, gvl); +/* +#if defined(DOUBLE) +asm volatile( + "vor.vv v0, %1, %1\n\t" + "vsetvli x0, %3, e64,m8 \n\t" + "vfrsub.vf %0, %0, %2, v0.t \n\t" + :"+v"(vx1) + :"v"(mask1), "f"(zero), "r"(gvl) + :"v0"); +#else +asm volatile( + "vor.vv v0, %1, %1\n\t" + "vsetvli x0, %3, e32,m8 \n\t" + "vfrsub.vf %0, %0, %2, v0.t \n\t" + :"+v"(vx1) + :"v"(mask1), "f"(zero), "r"(gvl) + :"v0"); +#endif +*/ + vx0 = VFADDVV_FLOAT(vx0, vx1, gvl); + + //index where element less than v_min + mask0 = VMFLTVV_FLOAT(vx0, v_min, gvl); + v_min_index = VIDV_MASK_UINT(v_min_index, mask0, gvl); +/* +#if defined(DOUBLE) +asm volatile( + "vor.vv v0, %1, %1 \n\t" + "vsetvli x0, %2, e64,m8 \n\t" + "vid.v %0, v0.t \n\t" + :"+v"(v_min_index) + :"v"(mask0), "r"(gvl) + :"v0"); +#else +asm volatile( + "vor.vv v0, %1, %1 \n\t" + "vsetvli x0, %2, e32,m8 \n\t" + "vid.v %0, v0.t \n\t" + :"+v"(v_min_index) + :"v"(mask0), "r"(gvl) + :"v0"); +#endif +*/ + v_min_index = VADDVX_MASK_UINT(v_min_index, v_min_index, j, mask0, gvl); + + //update v_min and start_index j + v_min = VFMINVV_FLOAT(v_min, vx0, gvl); + j += gvl; + ix += inc_xv; + } + vx0 = VFMVVF_FLOAT(FLT_MAX, gvl); + vx0 = VFREDMINVS_FLOAT(v_min, vx0, gvl); + minf = vx0[0]; + mask0 = VMFLEVF_FLOAT(v_min, minf, gvl); + min_index = VMFIRSTM(mask0,gvl); + min_index = v_min_index[min_index]; + + if(j < n){ + gvl = vsetvli(n-j, RVV_EFLOAT, RVV_M); + v_min_index = VMVVX_UINT(0, gvl); + vx0 = VLSEV_FLOAT(&x[ix], stride_x, gvl); + //fabs(vector) + mask0 = VMFLTVF_FLOAT(vx0, 0, gvl); + vx0 = VFRSUBVF_MASK_FLOAT(vx0, vx0, 0, mask0, gvl); +/* +#if defined(DOUBLE) +asm volatile( + "vor.vv v0, %1, %1\n\t" + "vsetvli x0, %3, e64,m8 \n\t" + "vfrsub.vf %0, %0, %2, v0.t \n\t" + :"+v"(vx0) + :"v"(mask0), "f"(zero), "r"(gvl) + :"v0"); +#else +asm volatile( + "vor.vv v0, %1, %1\n\t" + "vsetvli x0, %3, e32,m8 \n\t" + "vfrsub.vf %0, %0, %2, v0.t \n\t" + :"+v"(vx0) + :"v"(mask0), "f"(zero), "r"(gvl) + :"v0"); +#endif +*/ + vx1 = VLSEV_FLOAT(&x[ix+1], stride_x, gvl); + //fabs(vector) + mask1 = VMFLTVF_FLOAT(vx1, 0, gvl); + vx1 = VFRSUBVF_MASK_FLOAT(vx1, vx1, 0, mask1, gvl); +/* +#if defined(DOUBLE) +asm volatile( + "vor.vv v0, %1, %1\n\t" + "vsetvli x0, %3, e64,m8 \n\t" + "vfrsub.vf %0, %0, %2, v0.t \n\t" + :"+v"(vx1) + :"v"(mask1), "f"(zero), "r"(gvl) + :"v0"); +#else +asm volatile( + "vor.vv v0, %1, %1\n\t" + "vsetvli x0, %3, e32,m8 \n\t" + "vfrsub.vf %0, %0, %2, v0.t \n\t" + :"+v"(vx1) + :"v"(mask1), "f"(zero), "r"(gvl) + :"v0"); +#endif +*/ + v_min = VFADDVV_FLOAT(vx0, vx1, gvl); + vx0 = VFMVVF_FLOAT(FLT_MAX, gvl); + vx0 = VFREDMINVS_FLOAT(v_min, vx0, gvl); + FLOAT cur_minf = vx0[0]; + if(cur_minf < minf){ + //tail index + v_min_index = VIDV_UINT(gvl); + v_min_index = VADDVX_UINT(v_min_index, j, gvl); + + mask0 = VMFLEVF_FLOAT(v_min, cur_minf, gvl); + min_index = VMFIRSTM(mask0,gvl); + min_index = v_min_index[min_index]; + } + } + return(min_index+1); +} + + diff --git a/kernel/riscv64/max.c b/kernel/riscv64/max.c new file mode 100644 index 000000000..2ad956bc0 --- /dev/null +++ b/kernel/riscv64/max.c @@ -0,0 +1,65 @@ +/*************************************************************************** +Copyright (c) 2013, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +/************************************************************************************** +* 2013/09/14 Saar +* BLASTEST float : NoTest +* BLASTEST double : NoTest +* CTEST : NoTest +* TEST : NoTest +* +**************************************************************************************/ + +#include "common.h" +#include + + +FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) +{ + BLASLONG i=0; + BLASLONG ix=0; + FLOAT maxf=0.0; + + if (n <= 0 || inc_x <= 0) return(maxf); + + maxf=x[0]; + ix += inc_x; + i++; + + while(i < n) + { + if( x[ix] > maxf ) + { + maxf = x[ix]; + } + ix += inc_x; + i++; + } + return(maxf); +} + + diff --git a/kernel/riscv64/max_vector.c b/kernel/riscv64/max_vector.c new file mode 100644 index 000000000..4ef75452d --- /dev/null +++ b/kernel/riscv64/max_vector.c @@ -0,0 +1,116 @@ +/*************************************************************************** +Copyright (c) 2020, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "common.h" +#include +#include +#if !defined(DOUBLE) +#define RVV_EFLOAT RVV_E32 +#define RVV_M RVV_M8 +#define FLOAT_V_T float32xm8_t +#define VLEV_FLOAT vlev_float32xm8 +#define VLSEV_FLOAT vlsev_float32xm8 +#define VFREDMAXVS_FLOAT vfredmaxvs_float32xm8 +#define VFMVVF_FLOAT vfmvvf_float32xm8 +#define VFMAXVV_FLOAT vfmaxvv_float32xm8 +#else +#define RVV_EFLOAT RVV_E64 +#define RVV_M RVV_M8 +#define FLOAT_V_T float64xm8_t +#define VLEV_FLOAT vlev_float64xm8 +#define VLSEV_FLOAT vlsev_float64xm8 +#define VFREDMAXVS_FLOAT vfredmaxvs_float64xm8 +#define VFMVVF_FLOAT vfmvvf_float64xm8 +#define VFMAXVV_FLOAT vfmaxvv_float64xm8 +#endif + +FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) +{ + BLASLONG i=0, j=0; + if (n <= 0 || inc_x <= 0) return(0.0); + FLOAT maxf=-FLT_MAX; + unsigned int gvl = 0; + FLOAT_V_T v0, v1, v_max; + + if(inc_x == 1){ + gvl = vsetvli(n, RVV_EFLOAT, RVV_M); + if(gvl <= n/2){ + v_max = VFMVVF_FLOAT(-FLT_MAX, gvl); + for(i=0,j=0; i maxf) + maxf = v0[0]; + j += gvl; + } + }else{ + gvl = vsetvli(n, RVV_EFLOAT, RVV_M); + BLASLONG stride_x = inc_x * sizeof(FLOAT); + if(gvl <= n/2){ + v_max = VFMVVF_FLOAT(-FLT_MAX, gvl); + BLASLONG idx = 0, inc_xv = inc_x * gvl; + for(i=0,j=0; i maxf) + maxf = v0[0]; + j += gvl; + } + } + return(maxf); +} + + diff --git a/kernel/riscv64/min.c b/kernel/riscv64/min.c new file mode 100644 index 000000000..2812fe397 --- /dev/null +++ b/kernel/riscv64/min.c @@ -0,0 +1,65 @@ +/*************************************************************************** +Copyright (c) 2013, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +/************************************************************************************** +* 2013/09/14 Saar +* BLASTEST float : NoTest +* BLASTEST double : NoTest +* CTEST : NoTest +* TEST : NoTest +* +**************************************************************************************/ + +#include "common.h" +#include + + +FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) +{ + BLASLONG i=0; + BLASLONG ix=0; + FLOAT minf=0.0; + + if (n <= 0 || inc_x <= 0) return(minf); + + minf=x[0]; + ix += inc_x; + i++; + + while(i < n) + { + if( x[ix] < minf ) + { + minf = x[ix]; + } + ix += inc_x; + i++; + } + return(minf); +} + + diff --git a/kernel/riscv64/min_vector.c b/kernel/riscv64/min_vector.c new file mode 100644 index 000000000..83c965bfa --- /dev/null +++ b/kernel/riscv64/min_vector.c @@ -0,0 +1,116 @@ +/*************************************************************************** +Copyright (c) 2020, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "common.h" +#include +#include +#if !defined(DOUBLE) +#define RVV_EFLOAT RVV_E32 +#define RVV_M RVV_M8 +#define FLOAT_V_T float32xm8_t +#define VLEV_FLOAT vlev_float32xm8 +#define VLSEV_FLOAT vlsev_float32xm8 +#define VFREDMINVS_FLOAT vfredminvs_float32xm8 +#define VFMVVF_FLOAT vfmvvf_float32xm8 +#define VFMINVV_FLOAT vfminvv_float32xm8 +#else +#define RVV_EFLOAT RVV_E64 +#define RVV_M RVV_M8 +#define FLOAT_V_T float64xm8_t +#define VLEV_FLOAT vlev_float64xm8 +#define VLSEV_FLOAT vlsev_float64xm8 +#define VFREDMINVS_FLOAT vfredminvs_float64xm8 +#define VFMVVF_FLOAT vfmvvf_float64xm8 +#define VFMINVV_FLOAT vfminvv_float64xm8 +#endif + +FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) +{ + BLASLONG i=0, j=0; + if (n <= 0 || inc_x <= 0) return(0.0); + FLOAT minf=FLT_MAX; + unsigned int gvl = 0; + FLOAT_V_T v0, v1, v_min; + + if(inc_x == 1){ + gvl = vsetvli(n, RVV_EFLOAT, RVV_M); + if(gvl <= n/2){ + v_min = VFMVVF_FLOAT(FLT_MAX, gvl); + for(i=0,j=0; i + +#if defined(DOUBLE) + +#define ABS fabs + +#else + +#define ABS fabsf + +#endif + + + +FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) +{ + BLASLONG i=0; + FLOAT scale = 0.0; + FLOAT ssq = 1.0; + FLOAT absxi = 0.0; + + + if (n <= 0 || inc_x <= 0) return(0.0); + if ( n == 1 ) return( ABS(x[0]) ); + + n *= inc_x; + while(i < n) + { + + if ( x[i] != 0.0 ) + { + absxi = ABS( x[i] ); + if ( scale < absxi ) + { + ssq = 1 + ssq * ( scale / absxi ) * ( scale / absxi ); + scale = absxi ; + } + else + { + ssq += ( absxi/scale ) * ( absxi/scale ); + } + + } + i += inc_x; + } + scale = scale * sqrt( ssq ); + return(scale); + +} + + diff --git a/kernel/riscv64/nrm2_vector.c b/kernel/riscv64/nrm2_vector.c new file mode 100644 index 000000000..785c0d2f8 --- /dev/null +++ b/kernel/riscv64/nrm2_vector.c @@ -0,0 +1,220 @@ +/*************************************************************************** +Copyright (c) 2020, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "common.h" +#if !defined(DOUBLE) +#define RVV_EFLOAT RVV_E32 +#define RVV_M RVV_M4 +#define FLOAT_V_T float32xm4_t +#define VLEV_FLOAT vlev_float32xm4 +#define VLSEV_FLOAT vlsev_float32xm4 +#define VFREDSUM_FLOAT vfredsumvs_float32xm4 +#define VFMACCVV_FLOAT vfmaccvv_float32xm4 +#define VFMVVF_FLOAT vfmvvf_float32xm4 +#define VFDOTVV_FLOAT vfdotvv_float32xm4 +#define ABS fabsf +#define MASK_T e32xm4_t +#define VFRSUBVF_MASK_FLOAT vfrsubvf_mask_float32xm4 +#define VMFGTVF_FLOAT vmfgtvf_e32xm4_float32xm4 +#define VMFIRSTM vmfirstm_e32xm4 +#define VFDIVVF_FLOAT vfdivvf_float32xm4 +#define VMFLTVF_FLOAT vmfltvf_e32xm4_float32xm4 +#define VFREDMAXVS_FLOAT vfredmaxvs_float32xm4 +#else +#define RVV_EFLOAT RVV_E64 +#define RVV_M RVV_M4 +#define FLOAT_V_T float64xm4_t +#define VLEV_FLOAT vlev_float64xm4 +#define VLSEV_FLOAT vlsev_float64xm4 +#define VFREDSUM_FLOAT vfredsumvs_float64xm4 +#define VFMACCVV_FLOAT vfmaccvv_float64xm4 +#define VFMVVF_FLOAT vfmvvf_float64xm4 +#define VFDOTVV_FLOAT vfdotvv_float64xm4 +#define ABS fabs +#define MASK_T e64xm4_t +#define VFRSUBVF_MASK_FLOAT vfrsubvf_mask_float64xm4 +#define VMFGTVF_FLOAT vmfgtvf_e64xm4_float64xm4 +#define VMFIRSTM vmfirstm_e64xm4 +#define VFDIVVF_FLOAT vfdivvf_float64xm4 +#define VMFLTVF_FLOAT vmfltvf_e64xm4_float64xm4 +#define VFREDMAXVS_FLOAT vfredmaxvs_float64xm4 +#endif + +FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) +{ + BLASLONG i=0, j=0; + + if ( n < 0 ) return(0.0); + if(n == 1) return (ABS(x[0])); + + FLOAT_V_T vr, v0, v_zero; + unsigned int gvl = 0; + FLOAT scale = 0.0, ssq = 0.0; + MASK_T mask; + BLASLONG index = 0; + if(inc_x == 1){ + gvl = vsetvli(n, RVV_EFLOAT, RVV_M); + vr = VFMVVF_FLOAT(0, gvl); + v_zero = VFMVVF_FLOAT(0, gvl); + for(i=0,j=0; i + +#define KERNEL16x4_I \ + "addi t1, %[PB], 1*4 \n\t"\ + "addi t2, %[PB], 2*4 \n\t"\ + "addi t3, %[PB], 3*4 \n\t"\ + "flw ft0, (%[PB]) \n\t"\ + "flw ft1, (t1) \n\t"\ + "flw ft2, (t2) \n\t"\ + "flw ft3, (t3) \n\t"\ + "vle.v v0, (%[PA]) \n\t"\ + "addi t4, %[PA], 4*4 \n\t"\ + "addi t5, %[PA], 8*4 \n\t"\ + "vfmv.v.f v8, ft0 \n\t"\ + "addi t6, %[PA], 12*4 \n\t"\ + "addi %[PA], %[PA], 16*4 \n\t"\ + "vle.v v1, (t4) \n\t"\ + "addi t4, t4, 16*4 \n\t"\ + "vfmv.v.f v9, ft1 \n\t"\ + "vle.v v2, (t5) \n\t"\ + "addi t5, t5, 16*4 \n\t"\ + "vle.v v3, (t6) \n\t"\ + "addi t6, t6, 16*4 \n\t"\ + "vfmv.v.f v10, ft2 \n\t"\ + "addi %[PB], %[PB], 4*4 \n\t"\ + "vle.v v4, (%[PA]) \n\t"\ + "addi %[PA], %[PA], 16*4 \n\t"\ + "vfmv.v.f v11, ft3 \n\t"\ + "vfmacc.vv v16, v8, v0 \n\t"\ + "addi t1, t1, 4*4 \n\t"\ + "vle.v v5, (t4) \n\t"\ + "addi t4, t4, 16*4 \n\t"\ + "vfmacc.vv v17, v8, v1 \n\t"\ + "addi t2, t2, 4*4 \n\t"\ + "vle.v v6, (t5) \n\t"\ + "addi t5, t5, 16*4 \n\t"\ + "vfmacc.vv v18, v8, v2 \n\t"\ + "addi t3, t3, 4*4 \n\t"\ + "vle.v v7, (t6) \n\t"\ + "addi t6, t6, 16*4 \n\t"\ + "vfmacc.vv v19, v8, v3 \n\t"\ + "flw ft4, (%[PB]) \n\t"\ + "vfmacc.vv v20, v9, v0 \n\t"\ + "flw ft5, (t1) \n\t"\ + "vfmacc.vv v21, v9, v1 \n\t"\ + "flw ft6, (t2) \n\t"\ + "vfmacc.vv v22, v9, v2 \n\t"\ + "flw ft7, (t3) \n\t"\ + "vfmacc.vv v23, v9, v3 \n\t"\ + "vfmv.v.f v12, ft4 \n\t"\ + "vfmacc.vv v24, v10, v0 \n\t"\ + "vfmv.v.f v13, ft5 \n\t"\ + "vfmacc.vv v25, v10, v1 \n\t"\ + "vfmv.v.f v14, ft6 \n\t"\ + "vfmacc.vv v26, v10, v2 \n\t"\ + "vfmv.v.f v15, ft7 \n\t"\ + "vfmacc.vv v27, v10, v3 \n\t"\ + "addi %[PB], %[PB], 4*4 \n\t"\ + "vfmacc.vv v28, v11, v0 \n\t"\ + "addi t1, t1, 4*4 \n\t"\ + "vfmacc.vv v29, v11, v1 \n\t"\ + "addi t2, t2, 4*4 \n\t"\ + "vfmacc.vv v30, v11, v2 \n\t"\ + "addi t3, t3, 4*4 \n\t"\ + "vfmacc.vv v31, v11, v3 \n\t" + +#define KERNEL16x4_M1 \ + "vfmacc.vv v16, v8, v0 \n\t"\ + "vle.v v4, (%[PA]) \n\t"\ + "addi %[PA], %[PA], 16*4 \n\t"\ + "vfmacc.vv v17, v8, v1 \n\t"\ + "vle.v v5, (t4) \n\t"\ + "addi t4, t4, 16*4 \n\t"\ + "vfmacc.vv v18, v8, v2 \n\t"\ + "vle.v v6, (t5) \n\t"\ + "addi t5, t5, 16*4 \n\t"\ + "vfmacc.vv v19, v8, v3 \n\t"\ + "vle.v v7, (t6) \n\t"\ + "addi t6, t6, 16*4 \n\t"\ + "vfmacc.vv v20, v9, v0 \n\t"\ + "flw ft4, (%[PB]) \n\t"\ + "vfmacc.vv v21, v9, v1 \n\t"\ + "flw ft5, (t1) \n\t"\ + "vfmacc.vv v22, v9, v2 \n\t"\ + "flw ft6, (t2) \n\t"\ + "vfmacc.vv v23, v9, v3 \n\t"\ + "flw ft7, (t3) \n\t"\ + "addi %[PB], %[PB], 4*4 \n\t"\ + "vfmacc.vv v24, v10, v0 \n\t"\ + "addi t1, t1, 4*4 \n\t"\ + "vfmacc.vv v25, v10, v1 \n\t"\ + "vfmv.v.f v12, ft4 \n\t"\ + "vfmacc.vv v26, v10, v2 \n\t"\ + "addi t2, t2, 4*4 \n\t"\ + "vfmacc.vv v27, v10, v3 \n\t"\ + "vfmv.v.f v13, ft5 \n\t"\ + "vfmacc.vv v28, v11, v0 \n\t"\ + "addi t3, t3, 4*4 \n\t"\ + "vfmacc.vv v29, v11, v1 \n\t"\ + "vfmv.v.f v14, ft6 \n\t"\ + "vfmacc.vv v30, v11, v2 \n\t"\ + "vfmacc.vv v31, v11, v3 \n\t"\ + "vfmv.v.f v15, ft7 \n\t" + +#define KERNEL16x4_M2 \ + "vfmacc.vv v16, v12, v4 \n\t"\ + "vle.v v0, (%[PA]) \n\t"\ + "addi %[PA], %[PA], 16*4 \n\t"\ + "vfmacc.vv v17, v12, v5 \n\t"\ + "vle.v v1, (t4) \n\t"\ + "addi t4, t4, 16*4 \n\t"\ + "vfmacc.vv v18, v12, v6 \n\t"\ + "vle.v v2, (t5) \n\t"\ + "addi t5, t5, 16*4 \n\t"\ + "vfmacc.vv v19, v12, v7 \n\t"\ + "vle.v v3, (t6) \n\t"\ + "addi t6, t6, 16*4 \n\t"\ + "vfmacc.vv v20, v13, v4 \n\t"\ + "flw ft0, (%[PB]) \n\t"\ + "vfmacc.vv v21, v13, v5 \n\t"\ + "flw ft1, (t1) \n\t"\ + "vfmacc.vv v22, v13, v6 \n\t"\ + "flw ft2, (t2) \n\t"\ + "vfmacc.vv v23, v13, v7 \n\t"\ + "flw ft3, (t3) \n\t"\ + "addi %[PB], %[PB], 4*4 \n\t"\ + "vfmacc.vv v24, v14, v4 \n\t"\ + "addi t1, t1, 4*4 \n\t"\ + "vfmacc.vv v25, v14, v5 \n\t"\ + "vfmv.v.f v8, ft0 \n\t"\ + "vfmacc.vv v26, v14, v6 \n\t"\ + "addi t2, t2, 4*4 \n\t"\ + "vfmacc.vv v27, v14, v7 \n\t"\ + "vfmv.v.f v9, ft1 \n\t"\ + "vfmacc.vv v28, v15, v4 \n\t"\ + "addi t3, t3, 4*4 \n\t"\ + "vfmacc.vv v29, v15, v5 \n\t"\ + "vfmv.v.f v10, ft2 \n\t"\ + "vfmacc.vv v30, v15, v6 \n\t"\ + "vfmacc.vv v31, v15, v7 \n\t"\ + "vfmv.v.f v11, ft3 \n\t" + +#define KERNEL16x4_E \ + "vfmacc.vv v16, v12, v4 \n\t"\ + "vfmacc.vv v17, v12, v5 \n\t"\ + "vfmacc.vv v18, v12, v6 \n\t"\ + "vfmacc.vv v19, v12, v7 \n\t"\ + "vfmacc.vv v20, v13, v4 \n\t"\ + "vfmacc.vv v21, v13, v5 \n\t"\ + "vfmacc.vv v22, v13, v6 \n\t"\ + "vfmacc.vv v23, v13, v7 \n\t"\ + "vfmacc.vv v24, v14, v4 \n\t"\ + "vfmacc.vv v25, v14, v5 \n\t"\ + "vfmacc.vv v26, v14, v6 \n\t"\ + "vfmacc.vv v27, v14, v7 \n\t"\ + "vfmacc.vv v28, v15, v4 \n\t"\ + "vfmacc.vv v29, v15, v5 \n\t"\ + "vfmacc.vv v30, v15, v6 \n\t"\ + "vfmacc.vv v31, v15, v7 \n\t" + + +#define KERNEL8x4_I \ + "addi t1, %[PB], 1*4 \n\t"\ + "addi t2, %[PB], 2*4 \n\t"\ + "addi t3, %[PB], 3*4 \n\t"\ + "flw ft0, (%[PB]) \n\t"\ + "flw ft1, (t1) \n\t"\ + "flw ft2, (t2) \n\t"\ + "flw ft3, (t3) \n\t"\ + "vle.v v0, (%[PA]) \n\t"\ + "addi t4, %[PA], 4*4 \n\t"\ + "vfmv.v.f v8, ft0 \n\t"\ + "addi %[PA], %[PA], 8*4 \n\t"\ + "vle.v v1, (t4) \n\t"\ + "addi t4, t4, 8*4 \n\t"\ + "vfmv.v.f v9, ft1 \n\t"\ + "vfmv.v.f v10, ft2 \n\t"\ + "addi %[PB], %[PB], 4*4 \n\t"\ + "vle.v v4, (%[PA]) \n\t"\ + "addi %[PA], %[PA], 8*4 \n\t"\ + "vfmv.v.f v11, ft3 \n\t"\ + "vfmacc.vv v16, v8, v0 \n\t"\ + "addi t1, t1, 4*4 \n\t"\ + "vle.v v5, (t4) \n\t"\ + "addi t4, t4, 8*4 \n\t"\ + "vfmacc.vv v17, v8, v1 \n\t"\ + "addi t2, t2, 4*4 \n\t"\ + "flw ft4, (%[PB]) \n\t"\ + "addi t3, t3, 4*4 \n\t"\ + "vfmacc.vv v20, v9, v0 \n\t"\ + "flw ft5, (t1) \n\t"\ + "vfmacc.vv v21, v9, v1 \n\t"\ + "flw ft6, (t2) \n\t"\ + "vfmv.v.f v12, ft4 \n\t"\ + "flw ft7, (t3) \n\t"\ + "vfmacc.vv v24, v10, v0 \n\t"\ + "vfmv.v.f v13, ft5 \n\t"\ + "vfmacc.vv v25, v10, v1 \n\t"\ + "vfmv.v.f v14, ft6 \n\t"\ + "addi %[PB], %[PB], 4*4 \n\t"\ + "vfmv.v.f v15, ft7 \n\t"\ + "addi t1, t1, 4*4 \n\t"\ + "vfmacc.vv v28, v11, v0 \n\t"\ + "addi t2, t2, 4*4 \n\t"\ + "vfmacc.vv v29, v11, v1 \n\t"\ + "addi t3, t3, 4*4 \n\t" + + +#define KERNEL8x4_M1 \ + "vfmacc.vv v16, v8, v0 \n\t"\ + "vle.v v4, (%[PA]) \n\t"\ + "addi %[PA], %[PA], 8*4 \n\t"\ + "vfmacc.vv v17, v8, v1 \n\t"\ + "vle.v v5, (t4) \n\t"\ + "addi t4, t4, 8*4 \n\t"\ + "vfmacc.vv v20, v9, v0 \n\t"\ + "flw ft4, (%[PB]) \n\t"\ + "vfmacc.vv v21, v9, v1 \n\t"\ + "flw ft5, (t1) \n\t"\ + "addi %[PB], %[PB], 4*4 \n\t"\ + "flw ft6, (t2) \n\t"\ + "vfmacc.vv v24, v10, v0 \n\t"\ + "flw ft7, (t3) \n\t"\ + "addi t1, t1, 4*4 \n\t"\ + "vfmacc.vv v25, v10, v1 \n\t"\ + "vfmv.v.f v12, ft4 \n\t"\ + "addi t2, t2, 4*4 \n\t"\ + "vfmv.v.f v13, ft5 \n\t"\ + "vfmacc.vv v28, v11, v0 \n\t"\ + "addi t3, t3, 4*4 \n\t"\ + "vfmacc.vv v29, v11, v1 \n\t"\ + "vfmv.v.f v14, ft6 \n\t"\ + "vfmv.v.f v15, ft7 \n\t" + +#define KERNEL8x4_M2 \ + "vfmacc.vv v16, v12, v4 \n\t"\ + "vle.v v0, (%[PA]) \n\t"\ + "addi %[PA], %[PA], 8*4 \n\t"\ + "vfmacc.vv v17, v12, v5 \n\t"\ + "vle.v v1, (t4) \n\t"\ + "addi t4, t4, 8*4 \n\t"\ + "vfmacc.vv v20, v13, v4 \n\t"\ + "flw ft0, (%[PB]) \n\t"\ + "vfmacc.vv v21, v13, v5 \n\t"\ + "flw ft1, (t1) \n\t"\ + "addi %[PB], %[PB], 4*4 \n\t"\ + "flw ft2, (t2) \n\t"\ + "vfmacc.vv v24, v14, v4 \n\t"\ + "flw ft3, (t3) \n\t"\ + "addi t1, t1, 4*4 \n\t"\ + "vfmacc.vv v25, v14, v5 \n\t"\ + "vfmv.v.f v8, ft0 \n\t"\ + "addi t2, t2, 4*4 \n\t"\ + "vfmv.v.f v9, ft1 \n\t"\ + "vfmacc.vv v28, v15, v4 \n\t"\ + "addi t3, t3, 4*4 \n\t"\ + "vfmacc.vv v29, v15, v5 \n\t"\ + "vfmv.v.f v10, ft2 \n\t"\ + "vfmv.v.f v11, ft3 \n\t" + +#define KERNEL8x4_E \ + "vfmacc.vv v16, v12, v4 \n\t"\ + "vfmacc.vv v17, v12, v5 \n\t"\ + "vfmacc.vv v20, v13, v4 \n\t"\ + "vfmacc.vv v21, v13, v5 \n\t"\ + "vfmacc.vv v24, v14, v4 \n\t"\ + "vfmacc.vv v25, v14, v5 \n\t"\ + "vfmacc.vv v28, v15, v4 \n\t"\ + "vfmacc.vv v29, v15, v5 \n\t" + + +#define KERNEL16x2_I \ + "addi t1, %[PB], 1*4 \n\t"\ + "flw ft0, (%[PB]) \n\t"\ + "flw ft1, (t1) \n\t"\ + "vle.v v0, (%[PA]) \n\t"\ + "addi t4, %[PA], 4*4 \n\t"\ + "addi t5, %[PA], 8*4 \n\t"\ + "vfmv.v.f v8, ft0 \n\t"\ + "addi t6, %[PA], 12*4 \n\t"\ + "addi %[PA], %[PA], 16*4 \n\t"\ + "vle.v v1, (t4) \n\t"\ + "addi t4, t4, 16*4 \n\t"\ + "vfmv.v.f v9, ft1 \n\t"\ + "vle.v v2, (t5) \n\t"\ + "addi t5, t5, 16*4 \n\t"\ + "vle.v v3, (t6) \n\t"\ + "addi t6, t6, 16*4 \n\t"\ + "addi %[PB], %[PB], 2*4 \n\t"\ + "vle.v v4, (%[PA]) \n\t"\ + "addi %[PA], %[PA], 16*4 \n\t"\ + "vfmacc.vv v16, v8, v0 \n\t"\ + "addi t1, t1, 2*4 \n\t"\ + "vle.v v5, (t4) \n\t"\ + "addi t4, t4, 16*4 \n\t"\ + "vfmacc.vv v17, v8, v1 \n\t"\ + "vle.v v6, (t5) \n\t"\ + "addi t5, t5, 16*4 \n\t"\ + "vfmacc.vv v18, v8, v2 \n\t"\ + "vle.v v7, (t6) \n\t"\ + "addi t6, t6, 16*4 \n\t"\ + "vfmacc.vv v19, v8, v3 \n\t"\ + "flw ft4, (%[PB]) \n\t"\ + "vfmacc.vv v20, v9, v0 \n\t"\ + "flw ft5, (t1) \n\t"\ + "vfmacc.vv v21, v9, v1 \n\t"\ + "addi %[PB], %[PB], 2*4 \n\t"\ + "vfmacc.vv v22, v9, v2 \n\t"\ + "addi t1, t1, 2*4 \n\t"\ + "vfmacc.vv v23, v9, v3 \n\t"\ + "vfmv.v.f v12, ft4 \n\t"\ + "vfmv.v.f v13, ft5 \n\t" + + +#define KERNEL16x2_M1 \ + "vfmacc.vv v16, v8, v0 \n\t"\ + "vle.v v4, (%[PA]) \n\t"\ + "addi %[PA], %[PA], 16*4 \n\t"\ + "vfmacc.vv v17, v8, v1 \n\t"\ + "vle.v v5, (t4) \n\t"\ + "addi t4, t4, 16*4 \n\t"\ + "vfmacc.vv v18, v8, v2 \n\t"\ + "vle.v v6, (t5) \n\t"\ + "addi t5, t5, 16*4 \n\t"\ + "vfmacc.vv v19, v8, v3 \n\t"\ + "vle.v v7, (t6) \n\t"\ + "addi t6, t6, 16*4 \n\t"\ + "flw ft4, (%[PB]) \n\t"\ + "vfmacc.vv v20, v9, v0 \n\t"\ + "flw ft5, (t1) \n\t"\ + "vfmacc.vv v21, v9, v1 \n\t"\ + "vfmv.v.f v12, ft4 \n\t"\ + "vfmacc.vv v22, v9, v2 \n\t"\ + "addi t1, t1, 2*4 \n\t"\ + "vfmacc.vv v23, v9, v3 \n\t"\ + "addi %[PB], %[PB], 2*4 \n\t"\ + "vfmv.v.f v13, ft5 \n\t" + + +#define KERNEL16x2_M2 \ + "vfmacc.vv v16, v12, v4 \n\t"\ + "vle.v v0, (%[PA]) \n\t"\ + "addi %[PA], %[PA], 16*4 \n\t"\ + "vfmacc.vv v17, v12, v5 \n\t"\ + "vle.v v1, (t4) \n\t"\ + "addi t4, t4, 16*4 \n\t"\ + "vfmacc.vv v18, v12, v6 \n\t"\ + "vle.v v2, (t5) \n\t"\ + "addi t5, t5, 16*4 \n\t"\ + "vfmacc.vv v19, v12, v7 \n\t"\ + "vle.v v3, (t6) \n\t"\ + "addi t6, t6, 16*4 \n\t"\ + "vfmacc.vv v20, v13, v4 \n\t"\ + "flw ft0, (%[PB]) \n\t"\ + "vfmacc.vv v21, v13, v5 \n\t"\ + "flw ft1, (t1) \n\t"\ + "vfmacc.vv v22, v13, v6 \n\t"\ + "vfmv.v.f v8, ft0 \n\t"\ + "vfmacc.vv v23, v13, v7 \n\t"\ + "addi %[PB], %[PB], 2*4 \n\t"\ + "addi t1, t1, 2*4 \n\t"\ + "vfmv.v.f v9, ft1 \n\t" + + +#define KERNEL16x2_E \ + "vfmacc.vv v16, v12, v4 \n\t"\ + "vfmacc.vv v17, v12, v5 \n\t"\ + "vfmacc.vv v18, v12, v6 \n\t"\ + "vfmacc.vv v19, v12, v7 \n\t"\ + "vfmacc.vv v20, v13, v4 \n\t"\ + "vfmacc.vv v21, v13, v5 \n\t"\ + "vfmacc.vv v22, v13, v6 \n\t"\ + "vfmacc.vv v23, v13, v7 \n\t" + + +int CNAME(BLASLONG bm,BLASLONG bn,BLASLONG bk,FLOAT alpha,FLOAT* ba,FLOAT* bb,FLOAT* C,BLASLONG ldc +#ifdef TRMMKERNEL + ,BLASLONG offset +#endif + ) +{ + BLASLONG i,j,k; + FLOAT *C0,*C1,*C2,*C3; + FLOAT *ptrba,*ptrbb; + + FLOAT loadb0,loadb1,loadb2,loadb3; + FLOAT load0,load1,load2,load3,load4,load5,load6,load7; + + FLOAT res0,res1,res2,res3; + FLOAT res4,res5,res6,res7; + FLOAT res8,res9,res10,res11; + FLOAT res12,res13,res14,res15; + + for (j=0; j + +int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT dummy3, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT *dummy, BLASLONG dummy2) +{ + BLASLONG i=0; + BLASLONG ix=0,iy=0; + FLOAT temp; + + if ( n < 0 ) return(0); + + while(i < n) + { + + temp = x[ix] ; + x[ix] = y[iy] ; + y[iy] = temp ; + + ix += inc_x ; + iy += inc_y ; + i++ ; + + } + return(0); + +} + + diff --git a/kernel/riscv64/swap_vector.c b/kernel/riscv64/swap_vector.c new file mode 100644 index 000000000..9377bf4b9 --- /dev/null +++ b/kernel/riscv64/swap_vector.c @@ -0,0 +1,173 @@ +/*************************************************************************** +Copyright (c) 2020, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "common.h" +#include +#if !defined(DOUBLE) +#define RVV_EFLOAT RVV_E32 +#define RVV_M RVV_M8 +#define FLOAT_V_T float32xm8_t +#define VLEV_FLOAT vlev_float32xm8 +#define VLSEV_FLOAT vlsev_float32xm8 +#define VSEV_FLOAT vsev_float32xm8 +#define VSSEV_FLOAT vssev_float32xm8 +#else +#define RVV_EFLOAT RVV_E64 +#define RVV_M RVV_M8 +#define FLOAT_V_T float64xm8_t +#define VLEV_FLOAT vlev_float64xm8 +#define VLSEV_FLOAT vlsev_float64xm8 +#define VSEV_FLOAT vsev_float64xm8 +#define VSSEV_FLOAT vssev_float64xm8 +#endif + +int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT dummy3, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT *dummy, BLASLONG dummy2) +{ + BLASLONG i = 0, j = 0; + BLASLONG ix = 0,iy = 0; + BLASLONG stride_x, stride_y; + FLOAT_V_T vx0, vx1, vy0, vy1; + unsigned int gvl = 0; + + if (n < 0) return(0); + if(inc_x == 1 && inc_y == 1){ + gvl = vsetvli(n, RVV_EFLOAT, RVV_M); + if(gvl <= n/2){ + for(i=0,j=0; i 0){ + gvl = vsetvli(len, RVV_EFLOAT, RVV_M); + vr = VFMVVF_FLOAT(0, gvl); + for(k = 0; k < len / gvl; k++){ + va = VLEV_FLOAT(&a_ptr[i], gvl); + vy = VLEV_FLOAT(&y[i], gvl); + vy = VFMACCVF_FLOAT(vy, temp1, va, gvl); + VSEV_FLOAT(&y[i], vy, gvl); + + vx = VLEV_FLOAT(&x[i], gvl); + vr = VFMACCVV_FLOAT(vr, vx, va, gvl); + + i += gvl; + } + va = VFMVVF_FLOAT(0, gvl); + va = VFREDSUM_FLOAT(vr, va, gvl); + temp2 = va[0]; + if(i < m){ + gvl = vsetvli(m-i, RVV_EFLOAT, RVV_M); + vy = VLEV_FLOAT(&y[i], gvl); + va = VLEV_FLOAT(&a_ptr[i], gvl); + vy = VFMACCVF_FLOAT(vy, temp1, va, gvl); + VSEV_FLOAT(&y[i], vy, gvl); + + vx = VLEV_FLOAT(&x[i], gvl); + vr = VFMULVV_FLOAT(vx, va, gvl); + va = VFMVVF_FLOAT(0, gvl); + va = VFREDSUM_FLOAT(vr, va, gvl); + temp2 += va[0]; + } + } + y[j] += alpha * temp2; + a_ptr += lda; + } + }else if(inc_x == 1){ + jy = 0; + stride_y = inc_y * sizeof(FLOAT); + for (j=0; j 0){ + gvl = vsetvli(len, RVV_EFLOAT, RVV_M); + inc_yv = inc_y * gvl; + vr = VFMVVF_FLOAT(0, gvl); + for(k = 0; k < len / gvl; k++){ + va = VLEV_FLOAT(&a_ptr[i], gvl); + vy = VLSEV_FLOAT(&y[iy], stride_y, gvl); + vy = VFMACCVF_FLOAT(vy, temp1, va, gvl); + VSSEV_FLOAT(&y[iy], stride_y, vy, gvl); + + vx = VLEV_FLOAT(&x[i], gvl); + vr = VFMACCVV_FLOAT(vr, vx, va, gvl); + + i += gvl; + iy += inc_yv; + } + va = VFMVVF_FLOAT(0, gvl); + va = VFREDSUM_FLOAT(vr, va, gvl); + temp2 = va[0]; + if(i < m){ + gvl = vsetvli(m-i, RVV_EFLOAT, RVV_M); + vy = VLSEV_FLOAT(&y[iy], stride_y, gvl); + va = VLEV_FLOAT(&a_ptr[i], gvl); + vy = VFMACCVF_FLOAT(vy, temp1, va, gvl); + VSSEV_FLOAT(&y[iy], stride_y, vy, gvl); + + vx = VLEV_FLOAT(&x[i], gvl); + vr = VFMULVV_FLOAT(vx, va, gvl); + va = VFMVVF_FLOAT(0, gvl); + va = VFREDSUM_FLOAT(vr, va, gvl); + temp2 += va[0]; + } + } + y[jy] += alpha * temp2; + jy += inc_y; + a_ptr += lda; + } + }else if(inc_y == 1){ + jx = 0; + stride_x = inc_x * sizeof(FLOAT); + for (j=0; j 0){ + gvl = vsetvli(len, RVV_EFLOAT, RVV_M); + vr = VFMVVF_FLOAT(0, gvl); + inc_xv = inc_x * gvl; + for(k = 0; k < len / gvl; k++){ + va = VLEV_FLOAT(&a_ptr[i], gvl); + vy = VLEV_FLOAT(&y[i], gvl); + vy = VFMACCVF_FLOAT(vy, temp1, va, gvl); + VSEV_FLOAT(&y[i], vy, gvl); + + vx = VLSEV_FLOAT(&x[ix], stride_x, gvl); + vr = VFMACCVV_FLOAT(vr, vx, va, gvl); + + i += gvl; + ix += inc_xv; + } + va = VFMVVF_FLOAT(0, gvl); + va = VFREDSUM_FLOAT(vr, va, gvl); + temp2 = va[0]; + if(i < m){ + gvl = vsetvli(m-i, RVV_EFLOAT, RVV_M); + vy = VLEV_FLOAT(&y[i], gvl); + va = VLEV_FLOAT(&a_ptr[i], gvl); + vy = VFMACCVF_FLOAT(vy, temp1, va, gvl); + VSEV_FLOAT(&y[i], vy, gvl); + + vx = VLSEV_FLOAT(&x[ix], stride_x, gvl); + vr = VFMULVV_FLOAT(vx, va, gvl); + va = VFMVVF_FLOAT(0, gvl); + va = VFREDSUM_FLOAT(vr, va, gvl); + temp2 += va[0]; + } + } + y[j] += alpha * temp2; + jx += inc_x; + a_ptr += lda; + } + }else{ + stride_x = inc_x * sizeof(FLOAT); + stride_y = inc_y * sizeof(FLOAT); + jx = 0; + jy = 0; + for (j=0; j 0){ + gvl = vsetvli(len, RVV_EFLOAT, RVV_M); + inc_xv = inc_x * gvl; + inc_yv = inc_y * gvl; + vr = VFMVVF_FLOAT(0, gvl); + for(k = 0; k < len / gvl; k++){ + va = VLEV_FLOAT(&a_ptr[i], gvl); + vy = VLSEV_FLOAT(&y[iy], stride_y, gvl); + vy = VFMACCVF_FLOAT(vy, temp1, va, gvl); + VSSEV_FLOAT(&y[iy], stride_y, vy, gvl); + + vx = VLSEV_FLOAT(&x[ix], stride_x, gvl); + vr = VFMACCVV_FLOAT(vr, vx, va, gvl); + + i += gvl; + ix += inc_xv; + iy += inc_yv; + } + va = VFMVVF_FLOAT(0, gvl); + va = VFREDSUM_FLOAT(vr, va, gvl); + temp2 = va[0]; + if(i < m){ + gvl = vsetvli(m-i, RVV_EFLOAT, RVV_M); + vy = VLSEV_FLOAT(&y[iy], stride_y, gvl); + va = VLEV_FLOAT(&a_ptr[i], gvl); + vy = VFMACCVF_FLOAT(vy, temp1, va, gvl); + VSSEV_FLOAT(&y[iy], stride_y, vy, gvl); + + vx = VLSEV_FLOAT(&x[ix], stride_x, gvl); + vr = VFMULVV_FLOAT(vx, va, gvl); + va = VFMVVF_FLOAT(0, gvl); + va = VFREDSUM_FLOAT(vr, va, gvl); + temp2 += va[0]; + } + } + y[jy] += alpha * temp2; + jx += inc_x; + jy += inc_y; + a_ptr += lda; + } + } + return(0); +} + diff --git a/kernel/riscv64/symv_U.c b/kernel/riscv64/symv_U.c new file mode 100644 index 000000000..b5a0c96e9 --- /dev/null +++ b/kernel/riscv64/symv_U.c @@ -0,0 +1,71 @@ +/*************************************************************************** +Copyright (c) 2013, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + + +#include "common.h" + +int CNAME(BLASLONG m, BLASLONG offset, FLOAT alpha, FLOAT *a, BLASLONG lda, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT *buffer) +{ + BLASLONG i; + BLASLONG ix,iy; + BLASLONG jx,jy; + BLASLONG j; + FLOAT temp1; + FLOAT temp2; + +#if 0 + if( m != offset ) + printf("Symv_U: m=%d offset=%d\n",m,offset); +#endif + + BLASLONG m1 = m - offset; + + jx = m1 * inc_x; + jy = m1 * inc_y; + + for (j=m1; j 0){ + i = 0; + gvl = vsetvli(j, RVV_EFLOAT, RVV_M); + vr = VFMVVF_FLOAT(0, gvl); + for(k = 0; k < j / gvl; k++){ + vy = VLEV_FLOAT(&y[i], gvl); + va = VLEV_FLOAT(&a_ptr[i], gvl); + vy = VFMACCVF_FLOAT(vy, temp1, va, gvl); + VSEV_FLOAT(&y[i], vy, gvl); + + vx = VLEV_FLOAT(&x[i], gvl); + vr = VFMACCVV_FLOAT(vr, vx, va, gvl); + + i += gvl; + } + va = VFMVVF_FLOAT(0, gvl); + va = VFREDSUM_FLOAT(vr, va, gvl); + temp2 = va[0]; + if(i < j){ + gvl = vsetvli(j-i, RVV_EFLOAT, RVV_M); + vy = VLEV_FLOAT(&y[i], gvl); + va = VLEV_FLOAT(&a_ptr[i], gvl); + vy = VFMACCVF_FLOAT(vy, temp1, va, gvl); + VSEV_FLOAT(&y[i], vy, gvl); + + vx = VLEV_FLOAT(&x[i], gvl); + vr = VFMULVV_FLOAT(vx, va, gvl); + va = VFMVVF_FLOAT(0, gvl); + va = VFREDSUM_FLOAT(vr, va, gvl); + temp2 += va[0]; + } + } + y[j] += temp1 * a_ptr[j] + alpha * temp2; + a_ptr += lda; + } + }else if(inc_x == 1){ + jy = m1 * inc_y; + a_ptr += m1 * lda; + stride_y = inc_y * sizeof(FLOAT); + for (j=m1; j 0){ + iy = 0; + i = 0; + gvl = vsetvli(j, RVV_EFLOAT, RVV_M); + inc_yv = inc_y * gvl; + vr = VFMVVF_FLOAT(0, gvl); + for(k = 0; k < j / gvl; k++){ + vy = VLSEV_FLOAT(&y[iy], stride_y, gvl); + va = VLEV_FLOAT(&a_ptr[i], gvl); + vy = VFMACCVF_FLOAT(vy, temp1, va, gvl); + VSSEV_FLOAT(&y[iy], stride_y, vy, gvl); + + vx = VLEV_FLOAT(&x[i], gvl); + vr = VFMACCVV_FLOAT(vr, vx, va, gvl); + + i += gvl; + iy += inc_yv; + } + va = VFMVVF_FLOAT(0, gvl); + va = VFREDSUM_FLOAT(vr, va, gvl); + temp2 = va[0]; + if(i < j){ + gvl = vsetvli(j-i, RVV_EFLOAT, RVV_M); + vy = VLSEV_FLOAT(&y[iy], stride_y, gvl); + va = VLEV_FLOAT(&a_ptr[i], gvl); + vy = VFMACCVF_FLOAT(vy, temp1, va, gvl); + VSSEV_FLOAT(&y[iy], stride_y, vy, gvl); + + vx = VLEV_FLOAT(&x[i], gvl); + vr = VFMULVV_FLOAT(vx, va, gvl); + va = VFMVVF_FLOAT(0, gvl); + va = VFREDSUM_FLOAT(vr, va, gvl); + temp2 += va[0]; + } + } + y[jy] += temp1 * a_ptr[j] + alpha * temp2; + a_ptr += lda; + jy += inc_y; + } + }else if(inc_y == 1){ + jx = m1 * inc_x; + a_ptr += m1 * lda; + stride_x = inc_x * sizeof(FLOAT); + for (j=m1; j 0){ + ix = 0; + i = 0; + gvl = vsetvli(j, RVV_EFLOAT, RVV_M); + inc_xv = inc_x * gvl; + vr = VFMVVF_FLOAT(0, gvl); + for(k = 0; k < j / gvl; k++){ + vy = VLEV_FLOAT(&y[i], gvl); + va = VLEV_FLOAT(&a_ptr[i], gvl); + vy = VFMACCVF_FLOAT(vy, temp1, va, gvl); + VSEV_FLOAT(&y[i], vy, gvl); + + vx = VLSEV_FLOAT(&x[ix], stride_x, gvl); + vr = VFMACCVV_FLOAT(vr, vx, va, gvl); + + i += gvl; + ix += inc_xv; + } + va = VFMVVF_FLOAT(0, gvl); + va = VFREDSUM_FLOAT(vr, va, gvl); + temp2 = va[0]; + if(i < j){ + gvl = vsetvli(j-i, RVV_EFLOAT, RVV_M); + vy = VLEV_FLOAT(&y[i], gvl); + va = VLEV_FLOAT(&a_ptr[i], gvl); + vy = VFMACCVF_FLOAT(vy, temp1, va, gvl); + VSEV_FLOAT(&y[i], vy, gvl); + + vx = VLSEV_FLOAT(&x[ix], stride_x, gvl); + vr = VFMULVV_FLOAT(vx, va, gvl); + va = VFMVVF_FLOAT(0, gvl); + va = VFREDSUM_FLOAT(vr, va, gvl); + temp2 += va[0]; + } + } + y[j] += temp1 * a_ptr[j] + alpha * temp2; + a_ptr += lda; + jx += inc_x; + } + }else{ + jx = m1 * inc_x; + jy = m1 * inc_y; + a_ptr += m1 * lda; + stride_x = inc_x * sizeof(FLOAT); + stride_y = inc_y * sizeof(FLOAT); + for (j=m1; j 0){ + ix = 0; + iy = 0; + i = 0; + gvl = vsetvli(j, RVV_EFLOAT, RVV_M); + inc_xv = inc_x * gvl; + inc_yv = inc_y * gvl; + vr = VFMVVF_FLOAT(0, gvl); + for(k = 0; k < j / gvl; k++){ + vy = VLSEV_FLOAT(&y[iy], stride_y, gvl); + va = VLEV_FLOAT(&a_ptr[i], gvl); + vy = VFMACCVF_FLOAT(vy, temp1, va, gvl); + VSSEV_FLOAT(&y[iy], stride_y, vy, gvl); + + vx = VLSEV_FLOAT(&x[ix], stride_x, gvl); + vr = VFMACCVV_FLOAT(vr, vx, va, gvl); + + i += gvl; + ix += inc_xv; + iy += inc_yv; + } + va = VFMVVF_FLOAT(0, gvl); + va = VFREDSUM_FLOAT(vr, va, gvl); + temp2 = va[0]; + if(i < j){ + gvl = vsetvli(j-i, RVV_EFLOAT, RVV_M); + vy = VLSEV_FLOAT(&y[iy], stride_y, gvl); + va = VLEV_FLOAT(&a_ptr[i], gvl); + vy = VFMACCVF_FLOAT(vy, temp1, va, gvl); + VSSEV_FLOAT(&y[iy], stride_y, vy, gvl); + + vx = VLSEV_FLOAT(&x[ix], stride_x, gvl); + vr = VFMULVV_FLOAT(vx, va, gvl); + va = VFMVVF_FLOAT(0, gvl); + va = VFREDSUM_FLOAT(vr, va, gvl); + temp2 += va[0]; + } + } + y[jy] += temp1 * a_ptr[j] + alpha * temp2; + a_ptr += lda; + jx += inc_x; + jy += inc_y; + } + } + return(0); +} + diff --git a/kernel/riscv64/zamax.c b/kernel/riscv64/zamax.c new file mode 100644 index 000000000..a39bd7821 --- /dev/null +++ b/kernel/riscv64/zamax.c @@ -0,0 +1,79 @@ +/*************************************************************************** +Copyright (c) 2013, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +/************************************************************************************** +* 2013/09/14 Saar +* BLASTEST float : OK +* BLASTEST double : OK +* CTEST : NoTest +* TEST : NoTest +* +**************************************************************************************/ + +#include "common.h" +#include + +#if defined(DOUBLE) + +#define ABS fabs + +#else + +#define ABS fabsf + +#endif + +#define CABS1(x,i) ABS(x[i])+ABS(x[i+1]) + +FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) +{ + BLASLONG i=0; + BLASLONG ix=0; + FLOAT maxf; + BLASLONG inc_x2; + + if (n <= 0 || inc_x <= 0) return(0.0); + + inc_x2 = 2 * inc_x; + + maxf = CABS1(x,0); + ix += inc_x2; + i++; + + while(i < n) + { + if( CABS1(x,ix) > maxf ) + { + maxf = CABS1(x,ix); + } + ix += inc_x2; + i++; + } + return(maxf); +} + + diff --git a/kernel/riscv64/zamax_vector.c b/kernel/riscv64/zamax_vector.c new file mode 100644 index 000000000..a6c742b14 --- /dev/null +++ b/kernel/riscv64/zamax_vector.c @@ -0,0 +1,104 @@ +/*************************************************************************** +Copyright (c) 2020, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "common.h" +#include + +#if !defined(DOUBLE) +#define RVV_EFLOAT RVV_E32 +#define RVV_M RVV_M8 +#define FLOAT_V_T float32xm8_t +#define VLSEV_FLOAT vlsev_float32xm8 +#define VFREDMAXVS_FLOAT vfredmaxvs_float32xm8 +#define MASK_T e32xm8_t +#define VMFLTVF_FLOAT vmfltvf_e32xm8_float32xm8 +#define VFMVVF_FLOAT vfmvvf_float32xm8 +#define VFRSUBVF_MASK_FLOAT vfrsubvf_mask_float32xm8 +#define VFMAXVV_FLOAT vfmaxvv_float32xm8 +#define VFADDVV_FLOAT vfaddvv_float32xm8 +#else +#define RVV_EFLOAT RVV_E64 +#define RVV_M RVV_M8 +#define FLOAT_V_T float64xm8_t +#define VLSEV_FLOAT vlsev_float64xm8 +#define VFREDMAXVS_FLOAT vfredmaxvs_float64xm8 +#define MASK_T e64xm8_t +#define VMFLTVF_FLOAT vmfltvf_e64xm8_float64xm8 +#define VFMVVF_FLOAT vfmvvf_float64xm8 +#define VFRSUBVF_MASK_FLOAT vfrsubvf_mask_float64xm8 +#define VFMAXVV_FLOAT vfmaxvv_float64xm8 +#define VFADDVV_FLOAT vfaddvv_float64xm8 +#endif + +FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) +{ + BLASLONG i=0, j=0; + BLASLONG ix=0; + FLOAT maxf=0.0; + if (n <= 0 || inc_x <= 0) return(maxf); + unsigned int gvl = 0; + FLOAT_V_T v0, v1, v_max; + + MASK_T mask0, mask1; + BLASLONG stride_x = inc_x * sizeof(FLOAT) * 2; + gvl = vsetvli(n, RVV_EFLOAT, RVV_M); + v_max = VFMVVF_FLOAT(0, gvl); + BLASLONG inc_xv = inc_x * gvl * 2; + for(; i maxf) + maxf = v_max[0]; + } + return(maxf); +} diff --git a/kernel/riscv64/zamin.c b/kernel/riscv64/zamin.c new file mode 100644 index 000000000..02eab3e75 --- /dev/null +++ b/kernel/riscv64/zamin.c @@ -0,0 +1,79 @@ +/*************************************************************************** +Copyright (c) 2013, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +/************************************************************************************** +* 2013/09/14 Saar +* BLASTEST float : OK +* BLASTEST double : OK +* CTEST : NoTest +* TEST : NoTest +* +**************************************************************************************/ + +#include "common.h" +#include + +#if defined(DOUBLE) + +#define ABS fabs + +#else + +#define ABS fabsf + +#endif + +#define CABS1(x,i) ABS(x[i])+ABS(x[i+1]) + +FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) +{ + BLASLONG i=0; + BLASLONG ix=0; + FLOAT minf; + BLASLONG inc_x2; + + if (n <= 0 || inc_x <= 0) return(0.0); + + inc_x2 = 2 * inc_x; + + minf = CABS1(x,0); + ix += inc_x2; + i++; + + while(i < n) + { + if( CABS1(x,ix) < minf ) + { + minf = CABS1(x,ix); + } + ix += inc_x2; + i++; + } + return(minf); +} + + diff --git a/kernel/riscv64/zamin_vector.c b/kernel/riscv64/zamin_vector.c new file mode 100644 index 000000000..44a7cf1dc --- /dev/null +++ b/kernel/riscv64/zamin_vector.c @@ -0,0 +1,104 @@ +/*************************************************************************** +Copyright (c) 2020, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "common.h" +#include +#include + +#if !defined(DOUBLE) +#define RVV_EFLOAT RVV_E32 +#define RVV_M RVV_M8 +#define FLOAT_V_T float32xm8_t +#define VLSEV_FLOAT vlsev_float32xm8 +#define VFREDMINVS_FLOAT vfredminvs_float32xm8 +#define MASK_T e32xm8_t +#define VMFLTVF_FLOAT vmfltvf_e32xm8_float32xm8 +#define VFMVVF_FLOAT vfmvvf_float32xm8 +#define VFRSUBVF_MASK_FLOAT vfrsubvf_mask_float32xm8 +#define VFMINVV_FLOAT vfminvv_float32xm8 +#define VFADDVV_FLOAT vfaddvv_float32xm8 +#else +#define RVV_EFLOAT RVV_E64 +#define RVV_M RVV_M8 +#define FLOAT_V_T float64xm8_t +#define VLSEV_FLOAT vlsev_float64xm8 +#define VFREDMINVS_FLOAT vfredminvs_float64xm8 +#define MASK_T e64xm8_t +#define VMFLTVF_FLOAT vmfltvf_e64xm8_float64xm8 +#define VFMVVF_FLOAT vfmvvf_float64xm8 +#define VFRSUBVF_MASK_FLOAT vfrsubvf_mask_float64xm8 +#define VFMINVV_FLOAT vfminvv_float64xm8 +#define VFADDVV_FLOAT vfaddvv_float64xm8 +#endif + +FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) +{ + BLASLONG i=0, j=0; + BLASLONG ix=0; + if (n <= 0 || inc_x <= 0) return(0.0); + FLOAT minf=FLT_MAX; + unsigned int gvl = 0; + FLOAT_V_T v0, v1, v_min; + MASK_T mask0, mask1; + BLASLONG stride_x = inc_x * sizeof(FLOAT) * 2; + gvl = vsetvli(n, RVV_EFLOAT, RVV_M); + v_min = VFMVVF_FLOAT(FLT_MAX, gvl); + BLASLONG inc_xv = inc_x * gvl * 2; + for(; i + +#if defined(DOUBLE) + +#define ABS fabs + +#else + +#define ABS fabsf + +#endif + +#define CABS1(x,i) ABS(x[i])+ABS(x[i+1]) + +FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) +{ + BLASLONG i=0; + FLOAT sumf = 0.0; + BLASLONG inc_x2; + + if (n <= 0 || inc_x <= 0) return(sumf); + + inc_x2 = 2 * inc_x; + + n *= inc_x2; + while(i < n) + { + sumf += CABS1(x,i); + i += inc_x2; + } + return(sumf); +} + + diff --git a/kernel/riscv64/zasum_vector.c b/kernel/riscv64/zasum_vector.c new file mode 100644 index 000000000..d9fa88971 --- /dev/null +++ b/kernel/riscv64/zasum_vector.c @@ -0,0 +1,136 @@ +/*************************************************************************** +Copyright (c) 2020, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "common.h" +#include + +#if !defined(DOUBLE) +#define RVV_EFLOAT RVV_E32 +#define RVV_M RVV_M8 +#define FLOAT_V_T float32xm8_t +#define VLEV_FLOAT vlev_float32xm8 +#define VLSEV_FLOAT vlsev_float32xm8 +#define VFREDSUMVS_FLOAT vfredsumvs_float32xm8 +#define MASK_T e32xm8_t +#define VMFLTVF_FLOAT vmfltvf_e32xm8_float32xm8 +#define VFMVVF_FLOAT vfmvvf_float32xm8 +#define VFRSUBVF_MASK_FLOAT vfrsubvf_mask_float32xm8 +#define VFADDVV_FLOAT vfaddvv_float32xm8 +#else +#define RVV_EFLOAT RVV_E64 +#define RVV_M RVV_M8 +#define FLOAT_V_T float64xm8_t +#define VLEV_FLOAT vlev_float64xm8 +#define VLSEV_FLOAT vlsev_float64xm8 +#define VFREDSUMVS_FLOAT vfredsumvs_float64xm8 +#define MASK_T e64xm8_t +#define VMFLTVF_FLOAT vmfltvf_e64xm8_float64xm8 +#define VFMVVF_FLOAT vfmvvf_float64xm8 +#define VFRSUBVF_MASK_FLOAT vfrsubvf_mask_float64xm8 +#define VFADDVV_FLOAT vfaddvv_float64xm8 +#endif +FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) +{ + BLASLONG i=0, j=0; + BLASLONG ix=0; + FLOAT asumf=0.0; + if (n <= 0 || inc_x <= 0) return(asumf); + unsigned int gvl = 0; + FLOAT_V_T v0, v1, v_zero,v_sum; + + MASK_T mask0, mask1; + if(inc_x == 1){ + BLASLONG n2 = n * 2; + gvl = vsetvli(n2, RVV_EFLOAT, RVV_M); + v_zero = VFMVVF_FLOAT(0, gvl); + if(gvl <= n2/2){ + v_sum = VFMVVF_FLOAT(0, gvl); + for(i=0,j=0; i 0){ + gvl = vsetvli(len, RVV_EFLOAT, RVV_M); + inc_xv = incx * gvl * 2; + inc_yv = incy * gvl * 2; + inc_av = gvl * 2; + vr0 = VFMVVF_FLOAT(0, gvl); + vr1 = VFMVVF_FLOAT(0, gvl); + for(k = 0; k < len / gvl; k++){ + va0 = VLSEV_FLOAT(&a_ptr[ia], stride_a, gvl); + va1 = VLSEV_FLOAT(&a_ptr[ia+1], stride_a, gvl); + vy0 = VLSEV_FLOAT(&y[iy], stride_y, gvl); + vy1 = VLSEV_FLOAT(&y[iy+1], stride_y, gvl); +#ifndef HEMVREV + vy0 = VFMACCVF_FLOAT(vy0, temp_r1, va0, gvl); + vy0 = VFNMSACVF_FLOAT(vy0, temp_i1, va1, gvl); + vy1 = VFMACCVF_FLOAT(vy1, temp_r1, va1, gvl); + vy1 = VFMACCVF_FLOAT(vy1, temp_i1, va0, gvl); +#else + vy0 = VFMACCVF_FLOAT(vy0, temp_r1, va0, gvl); + vy0 = VFMACCVF_FLOAT(vy0, temp_i1, va1, gvl); + vy1 = VFNMSACVF_FLOAT(vy1, temp_r1, va1, gvl); + vy1 = VFMACCVF_FLOAT(vy1, temp_i1, va0, gvl); +#endif + VSSEV_FLOAT(&y[iy], stride_y, vy0, gvl); + VSSEV_FLOAT(&y[iy+1], stride_y, vy1, gvl); + + vx0 = VLSEV_FLOAT(&x[ix], stride_x, gvl); + vx1 = VLSEV_FLOAT(&x[ix+1], stride_x, gvl); +#ifndef HEMVREV + vr0 = VFMACCVV_FLOAT(vr0, vx0, va0, gvl); + vr0 = VFMACCVV_FLOAT(vr0, vx1, va1, gvl); + vr1 = VFMACCVV_FLOAT(vr1, vx1, va0, gvl); + vr1 = VFNMSACVV_FLOAT(vr1, vx0, va1, gvl); +#else + vr0 = VFMACCVV_FLOAT(vr0, vx0, va0, gvl); + vr0 = VFNMSACVV_FLOAT(vr0, vx1, va1, gvl); + vr1 = VFMACCVV_FLOAT(vr1, vx1, va0, gvl); + vr1 = VFMACCVV_FLOAT(vr1, vx0, va1, gvl); + +#endif + i += gvl; + ix += inc_xv; + iy += inc_yv; + ia += inc_av; + } + va0 = VFMVVF_FLOAT(0, gvl); + vx0 = VFREDSUM_FLOAT(vr0, va0, gvl); + temp_r2 = vx0[0]; + vx1 = VFREDSUM_FLOAT(vr1, va0, gvl); + temp_i2 = vx1[0]; + if(i < m){ + gvl = vsetvli(m-i, RVV_EFLOAT, RVV_M); + va0 = VLSEV_FLOAT(&a_ptr[ia], stride_a, gvl); + va1 = VLSEV_FLOAT(&a_ptr[ia+1], stride_a, gvl); + vy0 = VLSEV_FLOAT(&y[iy], stride_y, gvl); + vy1 = VLSEV_FLOAT(&y[iy+1], stride_y, gvl); +#ifndef HEMVREV + vy0 = VFMACCVF_FLOAT(vy0, temp_r1, va0, gvl); + vy0 = VFNMSACVF_FLOAT(vy0, temp_i1, va1, gvl); + vy1 = VFMACCVF_FLOAT(vy1, temp_r1, va1, gvl); + vy1 = VFMACCVF_FLOAT(vy1, temp_i1, va0, gvl); +#else + vy0 = VFMACCVF_FLOAT(vy0, temp_r1, va0, gvl); + vy0 = VFMACCVF_FLOAT(vy0, temp_i1, va1, gvl); + vy1 = VFNMSACVF_FLOAT(vy1, temp_r1, va1, gvl); + vy1 = VFMACCVF_FLOAT(vy1, temp_i1, va0, gvl); +#endif + VSSEV_FLOAT(&y[iy], stride_y, vy0, gvl); + VSSEV_FLOAT(&y[iy+1], stride_y, vy1, gvl); + + vx0 = VLSEV_FLOAT(&x[ix], stride_x, gvl); + vx1 = VLSEV_FLOAT(&x[ix+1], stride_x, gvl); +#ifndef HEMVREV + vr0 = VFMULVV_FLOAT(vx0, va0, gvl); + vr0 = VFMACCVV_FLOAT(vr0, vx1, va1, gvl); + vr1 = VFMULVV_FLOAT(vx1, va0, gvl); + vr1 = VFNMSACVV_FLOAT(vr1, vx0, va1, gvl); +#else + vr0 = VFMULVV_FLOAT(vx0, va0, gvl); + vr0 = VFNMSACVV_FLOAT(vr0, vx1, va1, gvl); + vr1 = VFMULVV_FLOAT(vx1, va0, gvl); + vr1 = VFMACCVV_FLOAT(vr1, vx0, va1, gvl); +#endif + + va0 = VFMVVF_FLOAT(0, gvl); + vx0 = VFREDSUM_FLOAT(vr0, va0, gvl); + temp_r2 += vx0[0]; + vx1 = VFREDSUM_FLOAT(vr1, va0, gvl); + temp_i2 += vx1[0]; + } + } + y[jy] += alpha_r * temp_r2 - alpha_i * temp_i2; + y[jy+1] += alpha_r * temp_i2 + alpha_i * temp_r2; + jx += inc_x2; + jy += inc_y2; + ja += 2; + a_ptr += lda2; + } + return(0); +} diff --git a/kernel/riscv64/zhemv_UV_vector.c b/kernel/riscv64/zhemv_UV_vector.c new file mode 100644 index 000000000..6fe12c76c --- /dev/null +++ b/kernel/riscv64/zhemv_UV_vector.c @@ -0,0 +1,192 @@ +/*************************************************************************** +Copyright (c) 2013, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "common.h" +#if !defined(DOUBLE) +#define RVV_EFLOAT RVV_E32 +#define RVV_M RVV_M4 +#define FLOAT_V_T float32xm4_t +#define VLSEV_FLOAT vlsev_float32xm4 +#define VSSEV_FLOAT vssev_float32xm4 +#define VFREDSUM_FLOAT vfredsumvs_float32xm4 +#define VFMACCVV_FLOAT vfmaccvv_float32xm4 +#define VFMACCVF_FLOAT vfmaccvf_float32xm4 +#define VFMVVF_FLOAT vfmvvf_float32xm4 +#define VFMULVV_FLOAT vfmulvv_float32xm4 +#define VFNMSACVF_FLOAT vfnmsacvf_float32xm4 +#define VFNMSACVV_FLOAT vfnmsacvv_float32xm4 +#else +#define RVV_EFLOAT RVV_E64 +#define RVV_M RVV_M4 +#define FLOAT_V_T float64xm4_t +#define VLSEV_FLOAT vlsev_float64xm4 +#define VSSEV_FLOAT vssev_float64xm4 +#define VFREDSUM_FLOAT vfredsumvs_float64xm4 +#define VFMACCVV_FLOAT vfmaccvv_float64xm4 +#define VFMACCVF_FLOAT vfmaccvf_float64xm4 +#define VFMVVF_FLOAT vfmvvf_float64xm4 +#define VFMULVV_FLOAT vfmulvv_float64xm4 +#define VFNMSACVF_FLOAT vfnmsacvf_float64xm4 +#define VFNMSACVV_FLOAT vfnmsacvv_float64xm4 +#endif + +int CNAME(BLASLONG m, BLASLONG offset, FLOAT alpha_r, FLOAT alpha_i, FLOAT *a, BLASLONG lda, FLOAT *x, BLASLONG incx, FLOAT *y, BLASLONG incy, FLOAT *buffer){ + BLASLONG i, j, k; + BLASLONG ix, iy, ia; + BLASLONG jx, jy, ja; + FLOAT temp_r1, temp_i1; + FLOAT temp_r2, temp_i2; + FLOAT *a_ptr = a; + unsigned int gvl = 0; + + + FLOAT_V_T va0, va1, vx0, vx1, vy0, vy1, vr0, vr1; + BLASLONG stride_x, stride_y, stride_a, inc_xv, inc_yv, inc_av, lda2; + + BLASLONG inc_x2 = incx * 2; + BLASLONG inc_y2 = incy * 2; + stride_x = inc_x2 * sizeof(FLOAT); + stride_y = inc_y2 * sizeof(FLOAT); + stride_a = 2 * sizeof(FLOAT); + lda2 = lda * 2; + + BLASLONG m1 = m - offset; + a_ptr = a + m1 * lda2; + jx = m1 * inc_x2; + jy = m1 * inc_y2; + ja = m1 * 2; + for(j = m1; j < m; j++){ + temp_r1 = alpha_r * x[jx] - alpha_i * x[jx+1];; + temp_i1 = alpha_r * x[jx+1] + alpha_i * x[jx]; + temp_r2 = 0; + temp_i2 = 0; + ix = 0; + iy = 0; + ia = 0; + i = 0; + if(j > 0){ + gvl = vsetvli(j, RVV_EFLOAT, RVV_M); + inc_xv = incx * gvl * 2; + inc_yv = incy * gvl * 2; + inc_av = gvl * 2; + vr0 = VFMVVF_FLOAT(0, gvl); + vr1 = VFMVVF_FLOAT(0, gvl); + for(k = 0; k < j / gvl; k++){ + va0 = VLSEV_FLOAT(&a_ptr[ia], stride_a, gvl); + va1 = VLSEV_FLOAT(&a_ptr[ia+1], stride_a, gvl); + vy0 = VLSEV_FLOAT(&y[iy], stride_y, gvl); + vy1 = VLSEV_FLOAT(&y[iy+1], stride_y, gvl); +#ifndef HEMVREV + vy0 = VFMACCVF_FLOAT(vy0, temp_r1, va0, gvl); + vy0 = VFNMSACVF_FLOAT(vy0, temp_i1, va1, gvl); + vy1 = VFMACCVF_FLOAT(vy1, temp_r1, va1, gvl); + vy1 = VFMACCVF_FLOAT(vy1, temp_i1, va0, gvl); +#else + vy0 = VFMACCVF_FLOAT(vy0, temp_r1, va0, gvl); + vy0 = VFMACCVF_FLOAT(vy0, temp_i1, va1, gvl); + vy1 = VFNMSACVF_FLOAT(vy1, temp_r1, va1, gvl); + vy1 = VFMACCVF_FLOAT(vy1, temp_i1, va0, gvl); +#endif + VSSEV_FLOAT(&y[iy], stride_y, vy0, gvl); + VSSEV_FLOAT(&y[iy+1], stride_y, vy1, gvl); + + vx0 = VLSEV_FLOAT(&x[ix], stride_x, gvl); + vx1 = VLSEV_FLOAT(&x[ix+1], stride_x, gvl); +#ifndef HEMVREV + vr0 = VFMACCVV_FLOAT(vr0, vx0, va0, gvl); + vr0 = VFMACCVV_FLOAT(vr0, vx1, va1, gvl); + vr1 = VFMACCVV_FLOAT(vr1, vx1, va0, gvl); + vr1 = VFNMSACVV_FLOAT(vr1, vx0, va1, gvl); +#else + vr0 = VFMACCVV_FLOAT(vr0, vx0, va0, gvl); + vr0 = VFNMSACVV_FLOAT(vr0, vx1, va1, gvl); + vr1 = VFMACCVV_FLOAT(vr1, vx1, va0, gvl); + vr1 = VFMACCVV_FLOAT(vr1, vx0, va1, gvl); + +#endif + i += gvl; + ix += inc_xv; + iy += inc_yv; + ia += inc_av; + } + va0 = VFMVVF_FLOAT(0, gvl); + vx0 = VFREDSUM_FLOAT(vr0, va0, gvl); + temp_r2 = vx0[0]; + vx1 = VFREDSUM_FLOAT(vr1, va0, gvl); + temp_i2 = vx1[0]; + if(i < j){ + gvl = vsetvli(j-i, RVV_EFLOAT, RVV_M); + va0 = VLSEV_FLOAT(&a_ptr[ia], stride_a, gvl); + va1 = VLSEV_FLOAT(&a_ptr[ia+1], stride_a, gvl); + vy0 = VLSEV_FLOAT(&y[iy], stride_y, gvl); + vy1 = VLSEV_FLOAT(&y[iy+1], stride_y, gvl); +#ifndef HEMVREV + vy0 = VFMACCVF_FLOAT(vy0, temp_r1, va0, gvl); + vy0 = VFNMSACVF_FLOAT(vy0, temp_i1, va1, gvl); + vy1 = VFMACCVF_FLOAT(vy1, temp_r1, va1, gvl); + vy1 = VFMACCVF_FLOAT(vy1, temp_i1, va0, gvl); +#else + vy0 = VFMACCVF_FLOAT(vy0, temp_r1, va0, gvl); + vy0 = VFMACCVF_FLOAT(vy0, temp_i1, va1, gvl); + vy1 = VFNMSACVF_FLOAT(vy1, temp_r1, va1, gvl); + vy1 = VFMACCVF_FLOAT(vy1, temp_i1, va0, gvl); +#endif + VSSEV_FLOAT(&y[iy], stride_y, vy0, gvl); + VSSEV_FLOAT(&y[iy+1], stride_y, vy1, gvl); + + vx0 = VLSEV_FLOAT(&x[ix], stride_x, gvl); + vx1 = VLSEV_FLOAT(&x[ix+1], stride_x, gvl); +#ifndef HEMVREV + vr0 = VFMULVV_FLOAT(vx0, va0, gvl); + vr0 = VFMACCVV_FLOAT(vr0, vx1, va1, gvl); + vr1 = VFMULVV_FLOAT(vx1, va0, gvl); + vr1 = VFNMSACVV_FLOAT(vr1, vx0, va1, gvl); +#else + vr0 = VFMULVV_FLOAT(vx0, va0, gvl); + vr0 = VFNMSACVV_FLOAT(vr0, vx1, va1, gvl); + vr1 = VFMULVV_FLOAT(vx1, va0, gvl); + vr1 = VFMACCVV_FLOAT(vr1, vx0, va1, gvl); +#endif + + va0 = VFMVVF_FLOAT(0, gvl); + vx0 = VFREDSUM_FLOAT(vr0, va0, gvl); + temp_r2 += vx0[0]; + vx1 = VFREDSUM_FLOAT(vr1, va0, gvl); + temp_i2 += vx1[0]; + } + } + y[jy] += temp_r1 * a_ptr[ja]; + y[jy+1] += temp_i1 * a_ptr[ja]; + y[jy] += alpha_r * temp_r2 - alpha_i * temp_i2; + y[jy+1] += alpha_r * temp_i2 + alpha_i * temp_r2; + jx += inc_x2; + jy += inc_y2; + ja += 2; + a_ptr += lda2; + } + return(0); +} diff --git a/kernel/riscv64/znrm2.c b/kernel/riscv64/znrm2.c new file mode 100644 index 000000000..fc1c8b54a --- /dev/null +++ b/kernel/riscv64/znrm2.c @@ -0,0 +1,106 @@ +/*************************************************************************** +Copyright (c) 2013, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +/************************************************************************************** +* 2013/09/13 Saar +* BLASTEST float : OK +* BLASTEST double : OK +* CTEST : OK +* TEST : OK +* +**************************************************************************************/ + +#include "common.h" +#include + +#if defined(DOUBLE) + +#define ABS fabs + +#else + +#define ABS fabsf + +#endif + + + +FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) +{ + BLASLONG i=0; + FLOAT scale = 0.0; + FLOAT ssq = 1.0; + BLASLONG inc_x2; + FLOAT temp; + + if (n <= 0 || inc_x <= 0) return(0.0); + + inc_x2 = 2 * inc_x; + + n *= inc_x2; + while(i < n) + { + + if ( x[i] != 0.0 ) + { + temp = ABS( x[i] ); + if ( scale < temp ) + { + ssq = 1 + ssq * ( scale / temp ) * ( scale / temp ); + scale = temp ; + } + else + { + ssq += ( temp / scale ) * ( temp / scale ); + } + + } + + if ( x[i+1] != 0.0 ) + { + temp = ABS( x[i+1] ); + if ( scale < temp ) + { + ssq = 1 + ssq * ( scale / temp ) * ( scale / temp ); + scale = temp ; + } + else + { + ssq += ( temp / scale ) * ( temp / scale ); + } + + } + + + i += inc_x2; + } + scale = scale * sqrt( ssq ); + return(scale); + +} + + diff --git a/kernel/riscv64/znrm2_vector.c b/kernel/riscv64/znrm2_vector.c new file mode 100644 index 000000000..b0ebfa5f4 --- /dev/null +++ b/kernel/riscv64/znrm2_vector.c @@ -0,0 +1,278 @@ +/*************************************************************************** +Copyright (c) 2020, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "common.h" +#if !defined(DOUBLE) +#define RVV_EFLOAT RVV_E32 +#define RVV_M RVV_M4 +#define FLOAT_V_T float32xm4_t +#define VLEV_FLOAT vlev_float32xm4 +#define VLSEV_FLOAT vlsev_float32xm4 +#define VFREDSUM_FLOAT vfredsumvs_float32xm4 +#define VFMACCVV_FLOAT vfmaccvv_float32xm4 +#define VFMVVF_FLOAT vfmvvf_float32xm4 +#define VFDOTVV_FLOAT vfdotvv_float32xm4 +#define ABS fabsf +#define MASK_T e32xm4_t +#define VFRSUBVF_MASK_FLOAT vfrsubvf_mask_float32xm4 +#define VMFGTVF_FLOAT vmfgtvf_e32xm4_float32xm4 +#define VMFIRSTM vmfirstm_e32xm4 +#define VFDIVVF_FLOAT vfdivvf_float32xm4 +#define VMFLTVF_FLOAT vmfltvf_e32xm4_float32xm4 +#define VFREDMAXVS_FLOAT vfredmaxvs_float32xm4 +#else +#define RVV_EFLOAT RVV_E64 +#define RVV_M RVV_M4 +#define FLOAT_V_T float64xm4_t +#define VLEV_FLOAT vlev_float64xm4 +#define VLSEV_FLOAT vlsev_float64xm4 +#define VFREDSUM_FLOAT vfredsumvs_float64xm4 +#define VFMACCVV_FLOAT vfmaccvv_float64xm4 +#define VFMVVF_FLOAT vfmvvf_float64xm4 +#define VFDOTVV_FLOAT vfdotvv_float64xm4 +#define ABS fabs +#define MASK_T e64xm4_t +#define VFRSUBVF_MASK_FLOAT vfrsubvf_mask_float64xm4 +#define VMFGTVF_FLOAT vmfgtvf_e64xm4_float64xm4 +#define VMFIRSTM vmfirstm_e64xm4 +#define VFDIVVF_FLOAT vfdivvf_float64xm4 +#define VMFLTVF_FLOAT vmfltvf_e64xm4_float64xm4 +#define VFREDMAXVS_FLOAT vfredmaxvs_float64xm4 +#endif + +FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) +{ + BLASLONG i=0, j=0; + + if ( n < 0 ) return(0.0); +// if(n == 1) return (ABS(x[0])); + + FLOAT_V_T vr, v0, v_zero; + unsigned int gvl = 0; + FLOAT scale = 0.0, ssq = 0.0; + MASK_T mask; + BLASLONG index = 0; + if(inc_x == 1){ + BLASLONG n2 = n * 2; + gvl = vsetvli(n2, RVV_EFLOAT, RVV_M); + vr = VFMVVF_FLOAT(0, gvl); + v_zero = VFMVVF_FLOAT(0, gvl); + for(i=0,j=0; i + +int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT dummy3, FLOAT dummy4, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT *dummy, BLASLONG dummy2) +{ + BLASLONG i=0; + BLASLONG ix=0,iy=0; + FLOAT temp[2]; + BLASLONG inc_x2; + BLASLONG inc_y2; + + if ( n < 0 ) return(0); + + inc_x2 = 2 * inc_x; + inc_y2 = 2 * inc_y; + + while(i < n) + { + + temp[0] = x[ix] ; + temp[1] = x[ix+1] ; + x[ix] = y[iy] ; + x[ix+1] = y[iy+1] ; + y[iy] = temp[0] ; + y[iy+1] = temp[1] ; + + ix += inc_x2 ; + iy += inc_y2 ; + i++ ; + + } + return(0); + +} + + diff --git a/kernel/riscv64/zswap_vector.c b/kernel/riscv64/zswap_vector.c new file mode 100644 index 000000000..b655a968c --- /dev/null +++ b/kernel/riscv64/zswap_vector.c @@ -0,0 +1,117 @@ +/*************************************************************************** +Copyright (c) 2020, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "common.h" +#include +#if !defined(DOUBLE) +#define RVV_EFLOAT RVV_E32 +#define RVV_M RVV_M8 +#define FLOAT_V_T float32xm8_t +#define VLEV_FLOAT vlev_float32xm8 +#define VLSEV_FLOAT vlsev_float32xm8 +#define VSEV_FLOAT vsev_float32xm8 +#define VSSEV_FLOAT vssev_float32xm8 +#else +#define RVV_EFLOAT RVV_E64 +#define RVV_M RVV_M8 +#define FLOAT_V_T float64xm8_t +#define VLEV_FLOAT vlev_float64xm8 +#define VLSEV_FLOAT vlsev_float64xm8 +#define VSEV_FLOAT vsev_float64xm8 +#define VSSEV_FLOAT vssev_float64xm8 +#endif + +int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT dummy3, FLOAT dummy4, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT *dummy, BLASLONG dummy2) +{ + BLASLONG i = 0, j = 0; + BLASLONG ix = 0,iy = 0; + BLASLONG stride_x, stride_y; + FLOAT_V_T vx0, vx1, vy0, vy1; + unsigned int gvl = 0; + + if (n < 0) return(0); + if(inc_x == 1 && inc_y == 1){ + gvl = vsetvli(n, RVV_EFLOAT, RVV_M); + BLASLONG n2 = n * 2; + if(gvl <= n2/2){ + for(i=0,j=0; i> 7); +#endif +#if BUILD_DOUBLE == 1 TABLE_NAME.dgemm_p = 32 * (l2 >> 7); +#endif +#if BUILD_COMPLEX==1 TABLE_NAME.cgemm_p = 32 * (l2 >> 7); +#endif +#if BUILD_COMPLEX16==1 TABLE_NAME.zgemm_p = 16 * (l2 >> 7); +#endif #ifdef EXPRECISION TABLE_NAME.qgemm_p = 16 * (l2 >> 7); TABLE_NAME.xgemm_p = 8 * (l2 >> 7); @@ -916,10 +1265,18 @@ static void init_parameter(void) { fprintf(stderr, "Northwood\n"); #endif +#if (BUILD_SINGLE==1) || (BUILD_COMPLEX==1) TABLE_NAME.sgemm_p = 96 * (l2 >> 7); +#endif +#if BUILD_DOUBLE == 1 TABLE_NAME.dgemm_p = 48 * (l2 >> 7); +#endif +#if BUILD_COMPLEX==1 TABLE_NAME.cgemm_p = 48 * (l2 >> 7); +#endif +#if BUILD_COMPLEX16==1 TABLE_NAME.zgemm_p = 24 * (l2 >> 7); +#endif #ifdef EXPRECISION TABLE_NAME.qgemm_p = 24 * (l2 >> 7); TABLE_NAME.xgemm_p = 12 * (l2 >> 7); @@ -932,10 +1289,18 @@ static void init_parameter(void) { fprintf(stderr, "Atom\n"); #endif +#if (BUILD_SINGLE==1) || (BUILD_COMPLEX==1) TABLE_NAME.sgemm_p = 256; +#endif +#if BUILD_DOUBLE ==1 TABLE_NAME.dgemm_p = 128; +#endif +#if BUILD_COMPLEX==1 TABLE_NAME.cgemm_p = 128; +#endif +#if BUILD_COMPLEX16==1 TABLE_NAME.zgemm_p = 64; +#endif #ifdef EXPRECISION TABLE_NAME.qgemm_p = 64; TABLE_NAME.xgemm_p = 32; @@ -948,10 +1313,18 @@ static void init_parameter(void) { fprintf(stderr, "Prescott\n"); #endif +#if (BUILD_SINGLE==1) || (BUILD_COMPLEX==1) TABLE_NAME.sgemm_p = 56 * (l2 >> 7); +#endif +#if BUILD_DOUBLE ==1 TABLE_NAME.dgemm_p = 28 * (l2 >> 7); +#endif +#if BUILD_COMPLEX==1 TABLE_NAME.cgemm_p = 28 * (l2 >> 7); +#endif +#if BUILD_COMPLEX16 == 1 TABLE_NAME.zgemm_p = 14 * (l2 >> 7); +#endif #ifdef EXPRECISION TABLE_NAME.qgemm_p = 14 * (l2 >> 7); TABLE_NAME.xgemm_p = 7 * (l2 >> 7); @@ -964,10 +1337,18 @@ static void init_parameter(void) { fprintf(stderr, "Core2\n"); #endif +#if (BUILD_SINGLE==1) || (BUILD_COMPLEX==1) TABLE_NAME.sgemm_p = 92 * (l2 >> 9) + 8; +#endif +#if BUILD_DOUBLE==1 TABLE_NAME.dgemm_p = 46 * (l2 >> 9) + 8; +#endif +#if BUILD_COMPLEX==1 TABLE_NAME.cgemm_p = 46 * (l2 >> 9) + 4; +#endif +#if BUILD_COMPLEX16==1 TABLE_NAME.zgemm_p = 23 * (l2 >> 9) + 4; +#endif #ifdef EXPRECISION TABLE_NAME.qgemm_p = 92 * (l2 >> 9) + 8; TABLE_NAME.xgemm_p = 46 * (l2 >> 9) + 4; @@ -980,10 +1361,18 @@ static void init_parameter(void) { fprintf(stderr, "Penryn\n"); #endif +#if (BUILD_SINGLE==1) || (BUILD_COMPLEX==1) TABLE_NAME.sgemm_p = 42 * (l2 >> 9) + 8; +#endif +#if BUILD_DOUBLE == 1 TABLE_NAME.dgemm_p = 42 * (l2 >> 9) + 8; +#endif +#if BUILD_COMPLEX==1 TABLE_NAME.cgemm_p = 21 * (l2 >> 9) + 4; +#endif +#if BUILD_COMPLEX16==1 TABLE_NAME.zgemm_p = 21 * (l2 >> 9) + 4; +#endif #ifdef EXPRECISION TABLE_NAME.qgemm_p = 42 * (l2 >> 9) + 8; TABLE_NAME.xgemm_p = 21 * (l2 >> 9) + 4; @@ -996,10 +1385,18 @@ static void init_parameter(void) { fprintf(stderr, "Dunnington\n"); #endif +#if (BUILD_SINGLE==1) || (BUILD_COMPLEX==1) TABLE_NAME.sgemm_p = 42 * (l2 >> 9) + 8; +#endif +#if BUILD_DOUBLE ==1 TABLE_NAME.dgemm_p = 42 * (l2 >> 9) + 8; +#endif +#if BUILD_COMPLEX==1 TABLE_NAME.cgemm_p = 21 * (l2 >> 9) + 4; +#endif +#if BUILD_COMPLEX16==1 TABLE_NAME.zgemm_p = 21 * (l2 >> 9) + 4; +#endif #ifdef EXPRECISION TABLE_NAME.qgemm_p = 42 * (l2 >> 9) + 8; TABLE_NAME.xgemm_p = 21 * (l2 >> 9) + 4; @@ -1013,10 +1410,18 @@ static void init_parameter(void) { fprintf(stderr, "Nehalem\n"); #endif +#if (BUILD_SINGLE==1) || (BUILD_COMPLEX==1) TABLE_NAME.sgemm_p = SGEMM_DEFAULT_P; +#endif +#if BUILD_DOUBLE TABLE_NAME.dgemm_p = DGEMM_DEFAULT_P; +#endif +#if BUILD_COMPLEX TABLE_NAME.cgemm_p = CGEMM_DEFAULT_P; +#endif +#if BUILD_COMPLEX16 TABLE_NAME.zgemm_p = ZGEMM_DEFAULT_P; +#endif #ifdef EXPRECISION TABLE_NAME.qgemm_p = QGEMM_DEFAULT_P; TABLE_NAME.xgemm_p = XGEMM_DEFAULT_P; @@ -1029,10 +1434,18 @@ static void init_parameter(void) { fprintf(stderr, "Sandybridge\n"); #endif +#if (BUILD_SINGLE==1) || (BUILD_COMPLEX==1) TABLE_NAME.sgemm_p = SGEMM_DEFAULT_P; +#endif +#if BUILD_DOUBLE TABLE_NAME.dgemm_p = DGEMM_DEFAULT_P; +#endif +#if BUILD_COMPLEX TABLE_NAME.cgemm_p = CGEMM_DEFAULT_P; +#endif +#if BUILD_COMPLEX16 TABLE_NAME.zgemm_p = ZGEMM_DEFAULT_P; +#endif #ifdef EXPRECISION TABLE_NAME.qgemm_p = QGEMM_DEFAULT_P; TABLE_NAME.xgemm_p = XGEMM_DEFAULT_P; @@ -1045,26 +1458,42 @@ static void init_parameter(void) { fprintf(stderr, "Haswell\n"); #endif +#if (BUILD_SINGLE==1) || (BUILD_COMPLEX==1) TABLE_NAME.sgemm_p = SGEMM_DEFAULT_P; +#endif +#if (BUILD_DOUBLE==1) || (BUILD_COMPLEX16) TABLE_NAME.dgemm_p = DGEMM_DEFAULT_P; +#endif +#if BUILD_COMPLEX TABLE_NAME.cgemm_p = CGEMM_DEFAULT_P; +#endif +#if BUILD_COMPLEX16 TABLE_NAME.zgemm_p = ZGEMM_DEFAULT_P; +#endif #ifdef EXPRECISION TABLE_NAME.qgemm_p = QGEMM_DEFAULT_P; TABLE_NAME.xgemm_p = XGEMM_DEFAULT_P; #endif #endif -#ifdef SKYLAKEX +#if defined(SKYLAKEX) || defined(COOPERLAKE) #ifdef DEBUG fprintf(stderr, "SkylakeX\n"); #endif +#if (BUILD_SINGLE==1) || (BUILD_COMPLEX==1) TABLE_NAME.sgemm_p = SGEMM_DEFAULT_P; +#endif +#if BUILD_DOUBLE TABLE_NAME.dgemm_p = DGEMM_DEFAULT_P; +#endif +#if BUILD_COMPLEX TABLE_NAME.cgemm_p = CGEMM_DEFAULT_P; +#endif +#if BUILD_COMPLEX16 TABLE_NAME.zgemm_p = ZGEMM_DEFAULT_P; +#endif #ifdef EXPRECISION TABLE_NAME.qgemm_p = QGEMM_DEFAULT_P; TABLE_NAME.xgemm_p = XGEMM_DEFAULT_P; @@ -1078,10 +1507,18 @@ static void init_parameter(void) { fprintf(stderr, "Opteron\n"); #endif +#if (BUILD_SINGLE==1) || (BUILD_COMPLEX==1) TABLE_NAME.sgemm_p = 224 + 56 * (l2 >> 7); +#endif +#if BUILD_DOUBLE TABLE_NAME.dgemm_p = 112 + 28 * (l2 >> 7); +#endif +#if BUILD_COMPLEX TABLE_NAME.cgemm_p = 112 + 28 * (l2 >> 7); +#endif +#if BUILD_COMPLEX16 TABLE_NAME.zgemm_p = 56 + 14 * (l2 >> 7); +#endif #ifdef EXPRECISION TABLE_NAME.qgemm_p = 56 + 14 * (l2 >> 7); TABLE_NAME.xgemm_p = 28 + 7 * (l2 >> 7); @@ -1094,10 +1531,18 @@ static void init_parameter(void) { fprintf(stderr, "Barcelona\n"); #endif +#if (BUILD_SINGLE==1) || (BUILD_COMPLEX==1) TABLE_NAME.sgemm_p = SGEMM_DEFAULT_P; +#endif +#if BUILD_DOUBLE TABLE_NAME.dgemm_p = DGEMM_DEFAULT_P; +#endif +#if BUILD_COMPLEX TABLE_NAME.cgemm_p = CGEMM_DEFAULT_P; +#endif +#if BUILD_COMPLEX16 TABLE_NAME.zgemm_p = ZGEMM_DEFAULT_P; +#endif #ifdef EXPRECISION TABLE_NAME.qgemm_p = QGEMM_DEFAULT_P; TABLE_NAME.xgemm_p = XGEMM_DEFAULT_P; @@ -1110,10 +1555,18 @@ static void init_parameter(void) { fprintf(stderr, "Bobcate\n"); #endif +#if (BUILD_SINGLE==1) || (BUILD_COMPLEX==1) TABLE_NAME.sgemm_p = SGEMM_DEFAULT_P; +#endif +#if BUILD_DOUBLE TABLE_NAME.dgemm_p = DGEMM_DEFAULT_P; +#endif +#if BUILD_COMPLEX TABLE_NAME.cgemm_p = CGEMM_DEFAULT_P; +#endif +#if BUILD_COMPLEX16 TABLE_NAME.zgemm_p = ZGEMM_DEFAULT_P; +#endif #ifdef EXPRECISION TABLE_NAME.qgemm_p = QGEMM_DEFAULT_P; TABLE_NAME.xgemm_p = XGEMM_DEFAULT_P; @@ -1126,10 +1579,18 @@ static void init_parameter(void) { fprintf(stderr, "Bulldozer\n"); #endif +#if (BUILD_SINGLE==1) || (BUILD_COMPLEX==1) TABLE_NAME.sgemm_p = SGEMM_DEFAULT_P; +#endif +#if BUILD_DOUBLE TABLE_NAME.dgemm_p = DGEMM_DEFAULT_P; +#endif +#if BUILD_COMPLEX TABLE_NAME.cgemm_p = CGEMM_DEFAULT_P; +#endif +#if BUILD_COMPLEX16 TABLE_NAME.zgemm_p = ZGEMM_DEFAULT_P; +#endif #ifdef EXPRECISION TABLE_NAME.qgemm_p = QGEMM_DEFAULT_P; TABLE_NAME.xgemm_p = XGEMM_DEFAULT_P; @@ -1142,10 +1603,18 @@ static void init_parameter(void) { fprintf(stderr, "Excavator\n"); #endif +#if (BUILD_SINGLE==1) || (BUILD_COMPLEX==1) TABLE_NAME.sgemm_p = SGEMM_DEFAULT_P; +#endif +#if BUILD_DOUBLE TABLE_NAME.dgemm_p = DGEMM_DEFAULT_P; +#endif +#if BUILD_COMPLEX TABLE_NAME.cgemm_p = CGEMM_DEFAULT_P; +#endif +#if BUILD_COMPLEX16 TABLE_NAME.zgemm_p = ZGEMM_DEFAULT_P; +#endif #ifdef EXPRECISION TABLE_NAME.qgemm_p = QGEMM_DEFAULT_P; TABLE_NAME.xgemm_p = XGEMM_DEFAULT_P; @@ -1159,10 +1628,18 @@ static void init_parameter(void) { fprintf(stderr, "Piledriver\n"); #endif +#if (BUILD_SINGLE==1) || (BUILD_COMPLEX==1) TABLE_NAME.sgemm_p = SGEMM_DEFAULT_P; +#endif +#if BUILD_DOUBLE TABLE_NAME.dgemm_p = DGEMM_DEFAULT_P; +#endif +#if BUILD_COMPLEX TABLE_NAME.cgemm_p = CGEMM_DEFAULT_P; +#endif +#if BUILD_COMPLEX16 TABLE_NAME.zgemm_p = ZGEMM_DEFAULT_P; +#endif #ifdef EXPRECISION TABLE_NAME.qgemm_p = QGEMM_DEFAULT_P; TABLE_NAME.xgemm_p = XGEMM_DEFAULT_P; @@ -1175,10 +1652,18 @@ static void init_parameter(void) { fprintf(stderr, "Steamroller\n"); #endif +#if (BUILD_SINGLE==1) || (BUILD_COMPLEX==1) TABLE_NAME.sgemm_p = SGEMM_DEFAULT_P; +#endif +#if BUILD_DOUBLE TABLE_NAME.dgemm_p = DGEMM_DEFAULT_P; +#endif +#if BUILD_COMPLEX TABLE_NAME.cgemm_p = CGEMM_DEFAULT_P; +#endif +#if BUILD_COMPLEX16 TABLE_NAME.zgemm_p = ZGEMM_DEFAULT_P; +#endif #ifdef EXPRECISION TABLE_NAME.qgemm_p = QGEMM_DEFAULT_P; TABLE_NAME.xgemm_p = XGEMM_DEFAULT_P; @@ -1191,10 +1676,18 @@ static void init_parameter(void) { fprintf(stderr, "Zen\n"); #endif +#if (BUILD_SINGLE==1) || (BUILD_COMPLEX==1) TABLE_NAME.sgemm_p = SGEMM_DEFAULT_P; +#endif +#if BUILD_DOUBLE TABLE_NAME.dgemm_p = DGEMM_DEFAULT_P; +#endif +#if BUILD_COMPLEX TABLE_NAME.cgemm_p = CGEMM_DEFAULT_P; +#endif +#if BUILD_COMPLEX16 TABLE_NAME.zgemm_p = ZGEMM_DEFAULT_P; +#endif #ifdef EXPRECISION TABLE_NAME.qgemm_p = QGEMM_DEFAULT_P; TABLE_NAME.xgemm_p = XGEMM_DEFAULT_P; @@ -1208,11 +1701,18 @@ static void init_parameter(void) { fprintf(stderr, "NANO\n"); #endif +#if (BUILD_SINGLE==1) || (BUILD_COMPLEX==1) TABLE_NAME.sgemm_p = SGEMM_DEFAULT_P; +#endif +#if (BUILD_DOUBLE==1) TABLE_NAME.dgemm_p = DGEMM_DEFAULT_P; +#endif +#if (BUILD_COMPLEX==1) TABLE_NAME.cgemm_p = CGEMM_DEFAULT_P; +#endif +#if (BUILD_COMPLEX16==1) TABLE_NAME.zgemm_p = ZGEMM_DEFAULT_P; - +#endif #ifdef EXPRECISION @@ -1222,41 +1722,55 @@ static void init_parameter(void) { #endif - +#if BUILD_COMPLEX==1 #ifdef CGEMM3M_DEFAULT_P TABLE_NAME.cgemm3m_p = CGEMM3M_DEFAULT_P; #else TABLE_NAME.cgemm3m_p = TABLE_NAME.sgemm_p; #endif +#endif +#if BUILD_COMPLEX16==1 #ifdef ZGEMM3M_DEFAULT_P TABLE_NAME.zgemm3m_p = ZGEMM3M_DEFAULT_P; #else TABLE_NAME.zgemm3m_p = TABLE_NAME.dgemm_p; #endif +#endif #ifdef EXPRECISION TABLE_NAME.xgemm3m_p = TABLE_NAME.qgemm_p; #endif - +#if BUILD_SINGLE == 1 TABLE_NAME.sgemm_p = ((TABLE_NAME.sgemm_p + SGEMM_DEFAULT_UNROLL_M - 1)/SGEMM_DEFAULT_UNROLL_M) * SGEMM_DEFAULT_UNROLL_M; +#endif +#if BUILD_DOUBLE== 1 TABLE_NAME.dgemm_p = ((TABLE_NAME.dgemm_p + DGEMM_DEFAULT_UNROLL_M - 1)/DGEMM_DEFAULT_UNROLL_M) * DGEMM_DEFAULT_UNROLL_M; +#endif +#if BUILD_COMPLEX==1 TABLE_NAME.cgemm_p = ((TABLE_NAME.cgemm_p + CGEMM_DEFAULT_UNROLL_M - 1)/CGEMM_DEFAULT_UNROLL_M) * CGEMM_DEFAULT_UNROLL_M; +#endif +#if BUILD_COMPLEX16==1 TABLE_NAME.zgemm_p = ((TABLE_NAME.zgemm_p + ZGEMM_DEFAULT_UNROLL_M - 1)/ZGEMM_DEFAULT_UNROLL_M) * ZGEMM_DEFAULT_UNROLL_M; +#endif +#if BUILD_COMPLEX==1 #ifdef CGEMM3M_DEFAULT_UNROLL_M TABLE_NAME.cgemm3m_p = ((TABLE_NAME.cgemm3m_p + CGEMM3M_DEFAULT_UNROLL_M - 1)/CGEMM3M_DEFAULT_UNROLL_M) * CGEMM3M_DEFAULT_UNROLL_M; #else TABLE_NAME.cgemm3m_p = ((TABLE_NAME.cgemm3m_p + SGEMM_DEFAULT_UNROLL_M - 1)/SGEMM_DEFAULT_UNROLL_M) * SGEMM_DEFAULT_UNROLL_M; #endif +#endif +#if BUILD_COMPLEX16==1 #ifdef ZGEMM3M_DEFAULT_UNROLL_M TABLE_NAME.zgemm3m_p = ((TABLE_NAME.zgemm3m_p + ZGEMM3M_DEFAULT_UNROLL_M - 1)/ZGEMM3M_DEFAULT_UNROLL_M) * ZGEMM3M_DEFAULT_UNROLL_M; #else TABLE_NAME.zgemm3m_p = ((TABLE_NAME.zgemm3m_p + DGEMM_DEFAULT_UNROLL_M - 1)/DGEMM_DEFAULT_UNROLL_M) * DGEMM_DEFAULT_UNROLL_M; #endif +#endif #ifdef QUAD_PRECISION TABLE_NAME.qgemm_p = ((TABLE_NAME.qgemm_p + QGEMM_DEFAULT_UNROLL_M - 1)/QGEMM_DEFAULT_UNROLL_M) * QGEMM_DEFAULT_UNROLL_M; @@ -1268,15 +1782,19 @@ static void init_parameter(void) { fprintf(stderr, "L2 = %8d DGEMM_P .. %d\n", l2, TABLE_NAME.dgemm_p); #endif +#if BUILD_SINGLE==1 TABLE_NAME.sgemm_r = (((BUFFER_SIZE - ((TABLE_NAME.sgemm_p * TABLE_NAME.sgemm_q * 4 + TABLE_NAME.offsetA + TABLE_NAME.align) & ~TABLE_NAME.align) ) / (TABLE_NAME.sgemm_q * 4) - 15) & ~15); +#endif +#if BUILD_DOUBLE==1 TABLE_NAME.dgemm_r = (((BUFFER_SIZE - ((TABLE_NAME.dgemm_p * TABLE_NAME.dgemm_q * 8 + TABLE_NAME.offsetA + TABLE_NAME.align) & ~TABLE_NAME.align) ) / (TABLE_NAME.dgemm_q * 8) - 15) & ~15); +#endif #ifdef EXPRECISION TABLE_NAME.qgemm_r = (((BUFFER_SIZE - @@ -1285,26 +1803,33 @@ static void init_parameter(void) { ) / (TABLE_NAME.qgemm_q * 16) - 15) & ~15); #endif +#if BUILD_COMPLEX ==1 TABLE_NAME.cgemm_r = (((BUFFER_SIZE - ((TABLE_NAME.cgemm_p * TABLE_NAME.cgemm_q * 8 + TABLE_NAME.offsetA + TABLE_NAME.align) & ~TABLE_NAME.align) ) / (TABLE_NAME.cgemm_q * 8) - 15) & ~15); +#endif +#if BUILD_COMPLEX16 ==1 TABLE_NAME.zgemm_r = (((BUFFER_SIZE - ((TABLE_NAME.zgemm_p * TABLE_NAME.zgemm_q * 16 + TABLE_NAME.offsetA + TABLE_NAME.align) & ~TABLE_NAME.align) ) / (TABLE_NAME.zgemm_q * 16) - 15) & ~15); +#endif +#if BUILD_COMPLEX == 1 TABLE_NAME.cgemm3m_r = (((BUFFER_SIZE - ((TABLE_NAME.cgemm3m_p * TABLE_NAME.cgemm3m_q * 8 + TABLE_NAME.offsetA + TABLE_NAME.align) & ~TABLE_NAME.align) ) / (TABLE_NAME.cgemm3m_q * 8) - 15) & ~15); +#endif +#if BUILD_COMPLEX16 == 1 TABLE_NAME.zgemm3m_r = (((BUFFER_SIZE - ((TABLE_NAME.zgemm3m_p * TABLE_NAME.zgemm3m_q * 16 + TABLE_NAME.offsetA + TABLE_NAME.align) & ~TABLE_NAME.align) ) / (TABLE_NAME.zgemm3m_q * 16) - 15) & ~15); - +#endif @@ -1325,4 +1850,6 @@ static void init_parameter(void) { } #endif //POWER -#endif //defined(ARCH_ARM64) +#endif //ZARCH +#endif //(ARCH_MIPS64) +#endif //(ARCH_ARM64) diff --git a/kernel/simd/intrin.h b/kernel/simd/intrin.h new file mode 100644 index 000000000..3802a91e1 --- /dev/null +++ b/kernel/simd/intrin.h @@ -0,0 +1,80 @@ +#ifndef _INTRIN_H_ +#define _INTRIN_H_ + +#if defined(_MSC_VER) +#define BLAS_INLINE __inline +#elif defined(__GNUC__) +#if defined(__STRICT_ANSI__) +#define BLAS_INLINE __inline__ +#else +#define BLAS_INLINE inline +#endif +#else +#define BLAS_INLINE +#endif + +#ifdef _MSC_VER +#define BLAS_FINLINE static __forceinline +#elif defined(__GNUC__) +#define BLAS_FINLINE static BLAS_INLINE __attribute__((always_inline)) +#else +#define BLAS_FINLINE static +#endif + +#ifdef __cplusplus +extern "C" { +#endif +// include head +/** SSE **/ +#ifdef HAVE_SSE +#include +#endif +/** SSE2 **/ +#ifdef HAVE_SSE2 +#include +#endif +/** SSE3 **/ +#ifdef HAVE_SSE3 +#include +#endif +/** SSSE3 **/ +#ifdef HAVE_SSSE3 +#include +#endif +/** SSE41 **/ +#ifdef HAVE_SSE4_1 +#include +#endif + +/** AVX **/ +#if defined(HAVE_AVX) || defined(HAVE_FMA3) +#include +#endif + +/** NEON **/ +#ifdef HAVE_NEON +#include +#endif + +// distribute +#if defined(HAVE_AVX512VL) || defined(HAVE_AVX512BF16) +#include "intrin_avx512.h" +#elif defined(HAVE_AVX2) +#include "intrin_avx.h" +#elif defined(HAVE_SSE2) +#include "intrin_sse.h" +#endif + +#ifdef HAVE_NEON +#include "intrin_neon.h" +#endif + +#ifndef V_SIMD + #define V_SIMD 0 + #define V_SIMD_F64 0 +#endif + +#ifdef __cplusplus +} +#endif +#endif // _INTRIN_H_ diff --git a/kernel/simd/intrin_avx.h b/kernel/simd/intrin_avx.h new file mode 100644 index 000000000..fbe531417 --- /dev/null +++ b/kernel/simd/intrin_avx.h @@ -0,0 +1,70 @@ +#define V_SIMD 256 +#define V_SIMD_F64 1 +/*************************** + * Data Type + ***************************/ +typedef __m256 v_f32; +typedef __m256d v_f64; +#define v_nlanes_f32 8 +#define v_nlanes_f64 4 +/*************************** + * Arithmetic + ***************************/ +#define v_add_f32 _mm256_add_ps +#define v_add_f64 _mm256_add_pd +#define v_sub_f32 _mm256_sub_ps +#define v_sub_f64 _mm256_sub_pd +#define v_mul_f32 _mm256_mul_ps +#define v_mul_f64 _mm256_mul_pd + +#ifdef HAVE_FMA3 + // multiply and add, a*b + c + #define v_muladd_f32 _mm256_fmadd_ps + #define v_muladd_f64 _mm256_fmadd_pd + // multiply and subtract, a*b - c + #define v_mulsub_f32 _mm256_fmsub_ps + #define v_mulsub_f64 _mm256_fmsub_pd +#else + // multiply and add, a*b + c + BLAS_FINLINE v_f32 v_muladd_f32(v_f32 a, v_f32 b, v_f32 c) + { return v_add_f32(v_mul_f32(a, b), c); } + BLAS_FINLINE v_f64 v_muladd_f64(v_f64 a, v_f64 b, v_f64 c) + { return v_add_f64(v_mul_f64(a, b), c); } + // multiply and subtract, a*b - c + BLAS_FINLINE v_f32 v_mulsub_f32(v_f32 a, v_f32 b, v_f32 c) + { return v_sub_f32(v_mul_f32(a, b), c); } + BLAS_FINLINE v_f64 v_mulsub_f64(v_f64 a, v_f64 b, v_f64 c) + { return v_sub_f64(v_mul_f64(a, b), c); } +#endif // !HAVE_FMA3 + +// Horizontal add: Calculates the sum of all vector elements. +BLAS_FINLINE float v_sum_f32(__m256 a) +{ + __m256 sum_halves = _mm256_hadd_ps(a, a); + sum_halves = _mm256_hadd_ps(sum_halves, sum_halves); + __m128 lo = _mm256_castps256_ps128(sum_halves); + __m128 hi = _mm256_extractf128_ps(sum_halves, 1); + __m128 sum = _mm_add_ps(lo, hi); + return _mm_cvtss_f32(sum); +} + +BLAS_FINLINE double v_sum_f64(__m256d a) +{ + __m256d sum_halves = _mm256_hadd_pd(a, a); + __m128d lo = _mm256_castpd256_pd128(sum_halves); + __m128d hi = _mm256_extractf128_pd(sum_halves, 1); + __m128d sum = _mm_add_pd(lo, hi); + return _mm_cvtsd_f64(sum); +} +/*************************** + * memory + ***************************/ +// unaligned load +#define v_loadu_f32 _mm256_loadu_ps +#define v_loadu_f64 _mm256_loadu_pd +#define v_storeu_f32 _mm256_storeu_ps +#define v_storeu_f64 _mm256_storeu_pd +#define v_setall_f32(VAL) _mm256_set1_ps(VAL) +#define v_setall_f64(VAL) _mm256_set1_pd(VAL) +#define v_zero_f32 _mm256_setzero_ps +#define v_zero_f64 _mm256_setzero_pd \ No newline at end of file diff --git a/kernel/simd/intrin_avx512.h b/kernel/simd/intrin_avx512.h new file mode 100644 index 000000000..8f38eedd9 --- /dev/null +++ b/kernel/simd/intrin_avx512.h @@ -0,0 +1,59 @@ +#define V_SIMD 512 +#define V_SIMD_F64 1 +/*************************** + * Data Type + ***************************/ +typedef __m512 v_f32; +typedef __m512d v_f64; +#define v_nlanes_f32 16 +#define v_nlanes_f64 8 +/*************************** + * Arithmetic + ***************************/ +#define v_add_f32 _mm512_add_ps +#define v_add_f64 _mm512_add_pd +#define v_sub_f32 _mm512_sub_ps +#define v_sub_f64 _mm512_sub_pd +#define v_mul_f32 _mm512_mul_ps +#define v_mul_f64 _mm512_mul_pd +// multiply and add, a*b + c +#define v_muladd_f32 _mm512_fmadd_ps +#define v_muladd_f64 _mm512_fmadd_pd +// multiply and subtract, a*b - c +#define v_mulsub_f32 _mm512_fmsub_ps +#define v_mulsub_f64 _mm512_fmsub_pd +BLAS_FINLINE float v_sum_f32(v_f32 a) +{ + __m512 h64 = _mm512_shuffle_f32x4(a, a, _MM_SHUFFLE(3, 2, 3, 2)); + __m512 sum32 = _mm512_add_ps(a, h64); + __m512 h32 = _mm512_shuffle_f32x4(sum32, sum32, _MM_SHUFFLE(1, 0, 3, 2)); + __m512 sum16 = _mm512_add_ps(sum32, h32); + __m512 h16 = _mm512_permute_ps(sum16, _MM_SHUFFLE(1, 0, 3, 2)); + __m512 sum8 = _mm512_add_ps(sum16, h16); + __m512 h4 = _mm512_permute_ps(sum8, _MM_SHUFFLE(2, 3, 0, 1)); + __m512 sum4 = _mm512_add_ps(sum8, h4); + return _mm_cvtss_f32(_mm512_castps512_ps128(sum4)); +} + +BLAS_FINLINE double v_sum_f64(v_f64 a) +{ + __m512d h64 = _mm512_shuffle_f64x2(a, a, _MM_SHUFFLE(3, 2, 3, 2)); + __m512d sum32 = _mm512_add_pd(a, h64); + __m512d h32 = _mm512_permutex_pd(sum32, _MM_SHUFFLE(1, 0, 3, 2)); + __m512d sum16 = _mm512_add_pd(sum32, h32); + __m512d h16 = _mm512_permute_pd(sum16, _MM_SHUFFLE(2, 3, 0, 1)); + __m512d sum8 = _mm512_add_pd(sum16, h16); + return _mm_cvtsd_f64(_mm512_castpd512_pd128(sum8)); +} +/*************************** + * memory + ***************************/ +// unaligned load +#define v_loadu_f32(PTR) _mm512_loadu_ps((const __m512*)(PTR)) +#define v_loadu_f64(PTR) _mm512_loadu_pd((const __m512*)(PTR)) +#define v_storeu_f32 _mm512_storeu_ps +#define v_storeu_f64 _mm512_storeu_pd +#define v_setall_f32(VAL) _mm512_set1_ps(VAL) +#define v_setall_f64(VAL) _mm512_set1_pd(VAL) +#define v_zero_f32 _mm512_setzero_ps +#define v_zero_f64 _mm512_setzero_pd diff --git a/kernel/simd/intrin_neon.h b/kernel/simd/intrin_neon.h new file mode 100644 index 000000000..cd44599fe --- /dev/null +++ b/kernel/simd/intrin_neon.h @@ -0,0 +1,78 @@ +#define V_SIMD 128 +#ifdef __aarch64__ + #define V_SIMD_F64 1 +#else + #define V_SIMD_F64 0 +#endif +/*************************** + * Data Type + ***************************/ +typedef float32x4_t v_f32; +#if V_SIMD_F64 + typedef float64x2_t v_f64; +#endif +#define v_nlanes_f32 4 +#define v_nlanes_f64 2 +/*************************** + * Arithmetic + ***************************/ +#define v_add_f32 vaddq_f32 +#define v_add_f64 vaddq_f64 +#define v_sub_f32 vsubq_f32 +#define v_sub_f64 vsubq_f64 +#define v_mul_f32 vmulq_f32 +#define v_mul_f64 vmulq_f64 + +// FUSED F32 +#ifdef HAVE_VFPV4 // FMA + // multiply and add, a*b + c + BLAS_FINLINE v_f32 v_muladd_f32(v_f32 a, v_f32 b, v_f32 c) + { return vfmaq_f32(c, a, b); } + // multiply and subtract, a*b - c + BLAS_FINLINE v_f32 v_mulsub_f32(v_f32 a, v_f32 b, v_f32 c) + { return vfmaq_f32(vnegq_f32(c), a, b); } +#else + // multiply and add, a*b + c + BLAS_FINLINE v_f32 v_muladd_f32(v_f32 a, v_f32 b, v_f32 c) + { return vmlaq_f32(c, a, b); } + // multiply and subtract, a*b - c + BLAS_FINLINE v_f32 v_mulsub_f32(v_f32 a, v_f32 b, v_f32 c) + { return vmlaq_f32(vnegq_f32(c), a, b); } +#endif + +// FUSED F64 +#if V_SIMD_F64 + BLAS_FINLINE v_f64 v_muladd_f64(v_f64 a, v_f64 b, v_f64 c) + { return vfmaq_f64(c, a, b); } + BLAS_FINLINE v_f64 v_mulsub_f64(v_f64 a, v_f64 b, v_f64 c) + { return vfmaq_f64(vnegq_f64(c), a, b); } +#endif + +// Horizontal add: Calculates the sum of all vector elements. +BLAS_FINLINE float v_sum_f32(float32x4_t a) +{ + float32x2_t r = vadd_f32(vget_high_f32(a), vget_low_f32(a)); + return vget_lane_f32(vpadd_f32(r, r), 0); +} + +#if V_SIMD_F64 + BLAS_FINLINE double v_sum_f64(float64x2_t a) + { + return vget_lane_f64(vget_low_f64(a) + vget_high_f64(a), 0); + } +#endif + +/*************************** + * memory + ***************************/ +// unaligned load +#define v_loadu_f32(a) vld1q_f32((const float*)a) +#define v_storeu_f32 vst1q_f32 +#define v_setall_f32(VAL) vdupq_n_f32(VAL) +#define v_zero_f32() vdupq_n_f32(0.0f) +#if V_SIMD_F64 + #define v_loadu_f64(a) vld1q_f64((const double*)a) + #define v_storeu_f64 vst1q_f64 + #define v_setall_f64 vdupq_n_f64 + #define v_zero_f64() vdupq_n_f64(0.0) +#endif \ No newline at end of file diff --git a/kernel/simd/intrin_sse.h b/kernel/simd/intrin_sse.h new file mode 100644 index 000000000..6a542072e --- /dev/null +++ b/kernel/simd/intrin_sse.h @@ -0,0 +1,80 @@ +#define V_SIMD 128 +#define V_SIMD_F64 1 +/*************************** + * Data Type + ***************************/ +typedef __m128 v_f32; +typedef __m128d v_f64; +#define v_nlanes_f32 4 +#define v_nlanes_f64 2 +/*************************** + * Arithmetic + ***************************/ +#define v_add_f32 _mm_add_ps +#define v_add_f64 _mm_add_pd +#define v_sub_f32 _mm_sub_ps +#define v_sub_f64 _mm_sub_pd +#define v_mul_f32 _mm_mul_ps +#define v_mul_f64 _mm_mul_pd +#ifdef HAVE_FMA3 + // multiply and add, a*b + c + #define v_muladd_f32 _mm_fmadd_ps + #define v_muladd_f64 _mm_fmadd_pd + // multiply and subtract, a*b - c + #define v_mulsub_f32 _mm_fmsub_ps + #define v_mulsub_f64 _mm_fmsub_pd +#elif defined(HAVE_FMA4) + // multiply and add, a*b + c + #define v_muladd_f32 _mm_macc_ps + #define v_muladd_f64 _mm_macc_pd + // multiply and subtract, a*b - c + #define v_mulsub_f32 _mm_msub_ps + #define v_mulsub_f64 _mm_msub_pd +#else + // multiply and add, a*b + c + BLAS_FINLINE v_f32 v_muladd_f32(v_f32 a, v_f32 b, v_f32 c) + { return v_add_f32(v_mul_f32(a, b), c); } + BLAS_FINLINE v_f64 v_muladd_f64(v_f64 a, v_f64 b, v_f64 c) + { return v_add_f64(v_mul_f64(a, b), c); } + // multiply and subtract, a*b - c + BLAS_FINLINE v_f32 v_mulsub_f32(v_f32 a, v_f32 b, v_f32 c) + { return v_sub_f32(v_mul_f32(a, b), c); } + BLAS_FINLINE v_f64 v_mulsub_f64(v_f64 a, v_f64 b, v_f64 c) + { return v_sub_f64(v_mul_f64(a, b), c); } +#endif // HAVE_FMA3 + +// Horizontal add: Calculates the sum of all vector elements. +BLAS_FINLINE float v_sum_f32(__m128 a) +{ +#ifdef HAVE_SSE3 + __m128 sum_halves = _mm_hadd_ps(a, a); + return _mm_cvtss_f32(_mm_hadd_ps(sum_halves, sum_halves)); +#else + __m128 t1 = _mm_movehl_ps(a, a); + __m128 t2 = _mm_add_ps(a, t1); + __m128 t3 = _mm_shuffle_ps(t2, t2, 1); + __m128 t4 = _mm_add_ss(t2, t3); + return _mm_cvtss_f32(t4); +#endif +} + +BLAS_FINLINE double v_sum_f64(__m128d a) +{ +#ifdef HAVE_SSE3 + return _mm_cvtsd_f64(_mm_hadd_pd(a, a)); +#else + return _mm_cvtsd_f64(_mm_add_pd(a, _mm_unpackhi_pd(a, a))); +#endif +} +/*************************** + * memory + ***************************/ +// unaligned load +#define v_loadu_f32 _mm_loadu_ps +#define v_loadu_f64 _mm_loadu_pd +#define v_storeu_f32 _mm_storeu_ps +#define v_storeu_f64 _mm_storeu_pd +#define v_setall_f32(VAL) _mm_set1_ps(VAL) +#define v_setall_f64(VAL) _mm_set1_pd(VAL) +#define v_zero_f32 _mm_setzero_ps +#define v_zero_f64 _mm_setzero_pd \ No newline at end of file diff --git a/kernel/sparc/KERNEL.sparc b/kernel/sparc/KERNEL.sparc index 2e8319ce5..1a2e9671a 100644 --- a/kernel/sparc/KERNEL.sparc +++ b/kernel/sparc/KERNEL.sparc @@ -54,3 +54,13 @@ ZTRSMKERNEL_LN = ztrsm_kernel_LN.S ZTRSMKERNEL_LT = ztrsm_kernel_LT.S ZTRSMKERNEL_RN = ztrsm_kernel_LT.S ZTRSMKERNEL_RT = ztrsm_kernel_RT.S + + +SDOTKERNEL = ../generic/dot.c +SDSDOTKERNEL = ../generic/dot.c +DSDOTKERNEL = ../generic/dot.c +DDOTKERNEL = ../generic/dot.c +CDOTKERNEL = ../arm/zdot.c +ZDOTKERNEL = ../arm/zdot.c +CSWAPKERNEL = ../arm/zswap.c +ZSWAPKERNEL = ../arm/zswap.c diff --git a/kernel/x86/lsame.S b/kernel/x86/lsame.S index 3ac7a7314..2a2ab2bb5 100644 --- a/kernel/x86/lsame.S +++ b/kernel/x86/lsame.S @@ -56,13 +56,13 @@ #ifndef HAVE_CMOV movl %eax, %ecx subl $32, %ecx - jle .L1 + jl .L1 movl %ecx, %eax .L1: movl %edx, %ecx subl $32, %ecx - jle .L2 + jl .L2 movl %ecx, %edx .L2: subl %eax, %edx diff --git a/kernel/x86/trsm_kernel_LN_2x4_penryn.S b/kernel/x86/trsm_kernel_LN_2x4_penryn.S index 34653d400..fde9eba8e 100644 --- a/kernel/x86/trsm_kernel_LN_2x4_penryn.S +++ b/kernel/x86/trsm_kernel_LN_2x4_penryn.S @@ -62,7 +62,7 @@ #define PREFETCHSIZE (8 * 21 + 4) #endif -#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined (SKYLAKEX) +#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined (SKYLAKEX) || defined (COOPERLAKE) #define PREFETCH prefetcht0 #define PREFETCHSIZE (8 * 21 + 4) #endif diff --git a/kernel/x86/trsm_kernel_LN_4x4_penryn.S b/kernel/x86/trsm_kernel_LN_4x4_penryn.S index 492f34344..fddf7560f 100644 --- a/kernel/x86/trsm_kernel_LN_4x4_penryn.S +++ b/kernel/x86/trsm_kernel_LN_4x4_penryn.S @@ -62,7 +62,7 @@ #define PREFETCHSIZE (8 * 21 + 4) #endif -#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined (SKYLAKEX) +#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined (SKYLAKEX) || defined (COOPERLAKE) #define PREFETCH prefetcht0 #define PREFETCHSIZE (8 * 21 + 4) #endif diff --git a/kernel/x86/trsm_kernel_LT_2x4_penryn.S b/kernel/x86/trsm_kernel_LT_2x4_penryn.S index 6840c54ad..33afd2a61 100644 --- a/kernel/x86/trsm_kernel_LT_2x4_penryn.S +++ b/kernel/x86/trsm_kernel_LT_2x4_penryn.S @@ -62,7 +62,7 @@ #define PREFETCHSIZE (8 * 21 + 4) #endif -#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined (SKYLAKEX) +#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined (SKYLAKEX) || defined (COOPERLAKE) #define PREFETCH prefetcht0 #define PREFETCHSIZE (8 * 21 + 4) #endif diff --git a/kernel/x86/trsm_kernel_LT_4x4_penryn.S b/kernel/x86/trsm_kernel_LT_4x4_penryn.S index e2f731fca..b05bd6ee5 100644 --- a/kernel/x86/trsm_kernel_LT_4x4_penryn.S +++ b/kernel/x86/trsm_kernel_LT_4x4_penryn.S @@ -62,7 +62,7 @@ #define PREFETCHSIZE (8 * 21 + 4) #endif -#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined (SKYLAKEX) +#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined (SKYLAKEX) || defined (COOPERLAKE) #define PREFETCH prefetcht0 #define PREFETCHSIZE (8 * 21 + 4) #endif diff --git a/kernel/x86/trsm_kernel_RT_2x4_penryn.S b/kernel/x86/trsm_kernel_RT_2x4_penryn.S index 11825429e..f960559a6 100644 --- a/kernel/x86/trsm_kernel_RT_2x4_penryn.S +++ b/kernel/x86/trsm_kernel_RT_2x4_penryn.S @@ -62,7 +62,7 @@ #define PREFETCHSIZE (8 * 21 + 4) #endif -#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined (SKYLAKEX) +#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined (SKYLAKEX) || defined (COOPERLAKE) #define PREFETCH prefetcht0 #define PREFETCHSIZE (8 * 21 + 4) #endif diff --git a/kernel/x86/trsm_kernel_RT_4x4_penryn.S b/kernel/x86/trsm_kernel_RT_4x4_penryn.S index 4c054f399..cf842c9b5 100644 --- a/kernel/x86/trsm_kernel_RT_4x4_penryn.S +++ b/kernel/x86/trsm_kernel_RT_4x4_penryn.S @@ -62,7 +62,7 @@ #define PREFETCHSIZE (8 * 21 + 4) #endif -#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined (SKYLAKEX) +#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined (SKYLAKEX) || defined (COOPERLAKE) #define PREFETCH prefetcht0 #define PREFETCHSIZE (8 * 21 + 4) #endif diff --git a/kernel/x86/ztrsm_kernel_LN_2x2_penryn.S b/kernel/x86/ztrsm_kernel_LN_2x2_penryn.S index e67496736..63c44c27a 100644 --- a/kernel/x86/ztrsm_kernel_LN_2x2_penryn.S +++ b/kernel/x86/ztrsm_kernel_LN_2x2_penryn.S @@ -61,7 +61,7 @@ #define PREFETCHSIZE 84 #endif -#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined (SKYLAKEX) +#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined (SKYLAKEX) || defined (COOPERLAKE) #define PREFETCH prefetcht1 #define PREFETCHSIZE 84 #endif diff --git a/kernel/x86/ztrsm_kernel_LT_1x2_penryn.S b/kernel/x86/ztrsm_kernel_LT_1x2_penryn.S index 498057697..4cb01e50a 100644 --- a/kernel/x86/ztrsm_kernel_LT_1x2_penryn.S +++ b/kernel/x86/ztrsm_kernel_LT_1x2_penryn.S @@ -63,7 +63,7 @@ #define PREFETCHSIZE 84 #endif -#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined (SKYLAKEX) +#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined (SKYLAKEX) || defined (COOPERLAKE) #define PREFETCH prefetcht1 #define PREFETCHSIZE 84 #endif diff --git a/kernel/x86/ztrsm_kernel_LT_2x2_penryn.S b/kernel/x86/ztrsm_kernel_LT_2x2_penryn.S index f3072983d..09d5d8e43 100644 --- a/kernel/x86/ztrsm_kernel_LT_2x2_penryn.S +++ b/kernel/x86/ztrsm_kernel_LT_2x2_penryn.S @@ -61,7 +61,7 @@ #define PREFETCHSIZE 84 #endif -#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined (SKYLAKEX) +#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined (SKYLAKEX) || defined (COOPERLAKE) #define PREFETCH prefetcht1 #define PREFETCHSIZE 84 #endif diff --git a/kernel/x86/ztrsm_kernel_RT_1x2_penryn.S b/kernel/x86/ztrsm_kernel_RT_1x2_penryn.S index 879ae9c38..7d129e54c 100644 --- a/kernel/x86/ztrsm_kernel_RT_1x2_penryn.S +++ b/kernel/x86/ztrsm_kernel_RT_1x2_penryn.S @@ -63,7 +63,7 @@ #define PREFETCHSIZE 84 #endif -#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined (SKYLAKEX) +#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined (SKYLAKEX) || defined (COOPERLAKE) #define PREFETCH prefetcht1 #define PREFETCHSIZE 84 #endif diff --git a/kernel/x86/ztrsm_kernel_RT_2x2_penryn.S b/kernel/x86/ztrsm_kernel_RT_2x2_penryn.S index 6c308197b..d33599317 100644 --- a/kernel/x86/ztrsm_kernel_RT_2x2_penryn.S +++ b/kernel/x86/ztrsm_kernel_RT_2x2_penryn.S @@ -61,7 +61,7 @@ #define PREFETCHSIZE 84 #endif -#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined (SKYLAKEX) +#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined (SKYLAKEX) || defined (COOPERLAKE) #define PREFETCH prefetcht1 #define PREFETCHSIZE 84 #endif diff --git a/kernel/x86_64/KERNEL b/kernel/x86_64/KERNEL index 4874711bb..b92f480e9 100644 --- a/kernel/x86_64/KERNEL +++ b/kernel/x86_64/KERNEL @@ -146,6 +146,18 @@ ifndef XDOTKERNEL XDOTKERNEL = zdot.S endif +ifndef SBDOTKERNEL +SBDOTKERNEL = sbdot.c +endif + +ifndef TOBF16KERNEL +TOBF16KERNEL = tobf16.c +endif + +ifndef BF16TOKERNEL +BF16TOKERNEL = bf16to.c +endif + ifndef ISAMAXKERNEL ISAMAXKERNEL = iamax_sse.S endif @@ -372,6 +384,14 @@ endif GEMVDEP = ../l2param.h +ifndef SBGEMVNKERNEL +SBGEMVNKERNEL = sbgemv_n.c +endif + +ifndef SBGEMVTKERNEL +SBGEMVTKERNEL = sbgemv_t.c +endif + ifndef SGEMVNKERNEL SGEMVNKERNEL = sgemv_n.c endif @@ -466,3 +486,6 @@ XTRSMKERNEL_RN = xtrsm_kernel_LT_1x1.S XTRSMKERNEL_RT = xtrsm_kernel_LT_1x1.S XGEMM3MKERNEL = xgemm3m_kernel_2x2.S + +SSUMKERNEL = ../arm/sum.c +DSUMKERNEL = ../arm/sum.c diff --git a/kernel/x86_64/KERNEL.COOPERLAKE b/kernel/x86_64/KERNEL.COOPERLAKE new file mode 100644 index 000000000..0b2f3c0ed --- /dev/null +++ b/kernel/x86_64/KERNEL.COOPERLAKE @@ -0,0 +1 @@ +include $(KERNELDIR)/KERNEL.SKYLAKEX diff --git a/kernel/x86_64/KERNEL.HASWELL b/kernel/x86_64/KERNEL.HASWELL index f98728a41..81eaf96ac 100644 --- a/kernel/x86_64/KERNEL.HASWELL +++ b/kernel/x86_64/KERNEL.HASWELL @@ -31,11 +31,11 @@ DAXPYKERNEL = daxpy.c CAXPYKERNEL = caxpy.c ZAXPYKERNEL = zaxpy.c -STRMMKERNEL = sgemm_kernel_16x4_haswell.S -SGEMMKERNEL = sgemm_kernel_16x4_haswell.S +STRMMKERNEL = sgemm_kernel_8x4_haswell.c +SGEMMKERNEL = sgemm_kernel_8x4_haswell_2.c SGEMM_BETA = sgemm_beta_skylakex.c -SGEMMINCOPY = ../generic/gemm_ncopy_16.c -SGEMMITCOPY = ../generic/gemm_tcopy_16.c +SGEMMINCOPY = ../generic/gemm_ncopy_8.c +SGEMMITCOPY = ../generic/gemm_tcopy_8.c SGEMMONCOPY = sgemm_ncopy_4_skylakex.c SGEMMOTCOPY = ../generic/gemm_tcopy_4.c SGEMMINCOPYOBJ = sgemm_incopy$(TSUFFIX).$(SUFFIX) @@ -56,7 +56,7 @@ DGEMMONCOPYOBJ = dgemm_oncopy$(TSUFFIX).$(SUFFIX) DGEMMOTCOPYOBJ = dgemm_otcopy$(TSUFFIX).$(SUFFIX) CTRMMKERNEL = cgemm_kernel_8x2_haswell.S -CGEMMKERNEL = cgemm_kernel_8x2_haswell.S +CGEMMKERNEL = cgemm_kernel_8x2_haswell.c CGEMMINCOPY = ../generic/zgemm_ncopy_8.c CGEMMITCOPY = ../generic/zgemm_tcopy_8.c CGEMMONCOPY = ../generic/zgemm_ncopy_2.c @@ -67,7 +67,7 @@ CGEMMONCOPYOBJ = cgemm_oncopy$(TSUFFIX).$(SUFFIX) CGEMMOTCOPYOBJ = cgemm_otcopy$(TSUFFIX).$(SUFFIX) ZTRMMKERNEL = zgemm_kernel_4x2_haswell.S -ZGEMMKERNEL = zgemm_kernel_4x2_haswell.S +ZGEMMKERNEL = zgemm_kernel_4x2_haswell.c ZGEMMINCOPY = ../generic/zgemm_ncopy_4.c ZGEMMITCOPY = ../generic/zgemm_tcopy_4.c ZGEMMONCOPY = ../generic/zgemm_ncopy_2.c @@ -77,10 +77,10 @@ ZGEMMITCOPYOBJ = zgemm_itcopy$(TSUFFIX).$(SUFFIX) ZGEMMONCOPYOBJ = zgemm_oncopy$(TSUFFIX).$(SUFFIX) ZGEMMOTCOPYOBJ = zgemm_otcopy$(TSUFFIX).$(SUFFIX) -STRSMKERNEL_LN = ../generic/trsm_kernel_LN.c -STRSMKERNEL_LT = ../generic/trsm_kernel_LT.c -STRSMKERNEL_RN = ../generic/trsm_kernel_RN.c -STRSMKERNEL_RT = ../generic/trsm_kernel_RT.c +STRSMKERNEL_LN = strsm_kernel_8x4_haswell_LN.c +STRSMKERNEL_LT = strsm_kernel_8x4_haswell_LT.c +STRSMKERNEL_RN = strsm_kernel_8x4_haswell_RN.c +STRSMKERNEL_RT = strsm_kernel_8x4_haswell_RT.c DTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c DTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c @@ -97,6 +97,11 @@ ZTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c ZTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c ZTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c -CGEMM3MKERNEL = zgemm3m_kernel_4x8_nehalem.S -ZGEMM3MKERNEL = zgemm3m_kernel_2x8_nehalem.S +CGEMM3MKERNEL = cgemm3m_kernel_8x4_haswell.c +ZGEMM3MKERNEL = zgemm3m_kernel_4x4_haswell.c +SASUMKERNEL = sasum.c +DASUMKERNEL = dasum.c + +SROTKERNEL = srot.c +DROTKERNEL = drot.c diff --git a/kernel/x86_64/KERNEL.SKYLAKEX b/kernel/x86_64/KERNEL.SKYLAKEX index d61c51628..3d71584fe 100644 --- a/kernel/x86_64/KERNEL.SKYLAKEX +++ b/kernel/x86_64/KERNEL.SKYLAKEX @@ -1,18 +1,32 @@ include $(KERNELDIR)/KERNEL.HASWELL -SGEMMKERNEL = sgemm_kernel_16x4_skylakex.c - +SGEMMKERNEL = sgemm_kernel_16x4_skylakex_3.c +STRMMKERNEL = sgemm_kernel_16x4_skylakex_2.c SGEMMINCOPY = ../generic/gemm_ncopy_16.c SGEMMITCOPY = sgemm_tcopy_16_skylakex.c SGEMMONCOPY = sgemm_ncopy_4_skylakex.c SGEMMOTCOPY = ../generic/gemm_tcopy_4.c +STRSMKERNEL_LN = ../generic/trsm_kernel_LN.c +STRSMKERNEL_LT = ../generic/trsm_kernel_LT.c +STRSMKERNEL_RN = ../generic/trsm_kernel_RN.c +STRSMKERNEL_RT = ../generic/trsm_kernel_RT.c -#DGEMMKERNEL = dgemm_kernel_4x8_skylakex.c - -#DGEMMINCOPY = dgemm_ncopy_8_skylakex.c -#DGEMMITCOPY = dgemm_tcopy_8_skylakex.c -DGEMMONCOPY = dgemm_ncopy_8_skylakex.c -DGEMMOTCOPY = dgemm_tcopy_8_skylakex.c +DGEMMKERNEL = dgemm_kernel_16x2_skylakex.c +DTRMMKERNEL = dgemm_kernel_16x2_skylakex.c +DGEMMINCOPY = ../generic/gemm_ncopy_16.c +DGEMMITCOPY = dgemm_tcopy_16_skylakex.c +DGEMMONCOPY = ../generic/gemm_ncopy_2.c +DGEMMOTCOPY = ../generic/gemm_tcopy_2.c +DTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c SGEMM_BETA = sgemm_beta_skylakex.c DGEMM_BETA = dgemm_beta_skylakex.c + +CGEMMKERNEL = cgemm_kernel_8x2_skylakex.c +ZGEMMKERNEL = zgemm_kernel_4x2_skylakex.c + +CSCALKERNEL = ../arm/zscal.c +ZSCALKERNEL = ../arm/zscal.c + +CASUMKERNEL = casum.c +ZASUMKERNEL = zasum.c diff --git a/kernel/x86_64/KERNEL.ZEN b/kernel/x86_64/KERNEL.ZEN index be4503d47..7bb308fea 100644 --- a/kernel/x86_64/KERNEL.ZEN +++ b/kernel/x86_64/KERNEL.ZEN @@ -30,10 +30,10 @@ DAXPYKERNEL = daxpy.c CAXPYKERNEL = caxpy.c ZAXPYKERNEL = zaxpy.c -STRMMKERNEL = sgemm_kernel_16x4_haswell.S -SGEMMKERNEL = sgemm_kernel_16x4_haswell.S -SGEMMINCOPY = ../generic/gemm_ncopy_16.c -SGEMMITCOPY = ../generic/gemm_tcopy_16.c +STRMMKERNEL = sgemm_kernel_8x4_haswell.c +SGEMMKERNEL = sgemm_kernel_8x4_haswell_2.c +SGEMMINCOPY = ../generic/gemm_ncopy_8.c +SGEMMITCOPY = ../generic/gemm_tcopy_8.c SGEMMONCOPY = ../generic/gemm_ncopy_4.c SGEMMOTCOPY = ../generic/gemm_tcopy_4.c SGEMMINCOPYOBJ = sgemm_incopy$(TSUFFIX).$(SUFFIX) @@ -53,7 +53,7 @@ DGEMMONCOPYOBJ = dgemm_oncopy$(TSUFFIX).$(SUFFIX) DGEMMOTCOPYOBJ = dgemm_otcopy$(TSUFFIX).$(SUFFIX) CTRMMKERNEL = cgemm_kernel_8x2_haswell.S -CGEMMKERNEL = cgemm_kernel_8x2_haswell.S +CGEMMKERNEL = cgemm_kernel_8x2_haswell.c CGEMMINCOPY = ../generic/zgemm_ncopy_8.c CGEMMITCOPY = ../generic/zgemm_tcopy_8.c CGEMMONCOPY = ../generic/zgemm_ncopy_2.c @@ -64,7 +64,7 @@ CGEMMONCOPYOBJ = cgemm_oncopy$(TSUFFIX).$(SUFFIX) CGEMMOTCOPYOBJ = cgemm_otcopy$(TSUFFIX).$(SUFFIX) ZTRMMKERNEL = zgemm_kernel_4x2_haswell.S -ZGEMMKERNEL = zgemm_kernel_4x2_haswell.S +ZGEMMKERNEL = zgemm_kernel_4x2_haswell.c ZGEMMINCOPY = ../generic/zgemm_ncopy_4.c ZGEMMITCOPY = ../generic/zgemm_tcopy_4.c ZGEMMONCOPY = ../generic/zgemm_ncopy_2.c @@ -74,10 +74,10 @@ ZGEMMITCOPYOBJ = zgemm_itcopy$(TSUFFIX).$(SUFFIX) ZGEMMONCOPYOBJ = zgemm_oncopy$(TSUFFIX).$(SUFFIX) ZGEMMOTCOPYOBJ = zgemm_otcopy$(TSUFFIX).$(SUFFIX) -STRSMKERNEL_LN = ../generic/trsm_kernel_LN.c -STRSMKERNEL_LT = ../generic/trsm_kernel_LT.c -STRSMKERNEL_RN = ../generic/trsm_kernel_RN.c -STRSMKERNEL_RT = ../generic/trsm_kernel_RT.c +STRSMKERNEL_LN = strsm_kernel_8x4_haswell_LN.c +STRSMKERNEL_LT = strsm_kernel_8x4_haswell_LT.c +STRSMKERNEL_RN = strsm_kernel_8x4_haswell_RN.c +STRSMKERNEL_RT = strsm_kernel_8x4_haswell_RT.c DTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c DTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c @@ -94,6 +94,6 @@ ZTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c ZTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c ZTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c -CGEMM3MKERNEL = zgemm3m_kernel_4x8_nehalem.S -ZGEMM3MKERNEL = zgemm3m_kernel_2x8_nehalem.S +CGEMM3MKERNEL = cgemm3m_kernel_8x4_haswell.c +ZGEMM3MKERNEL = zgemm3m_kernel_4x4_haswell.c diff --git a/kernel/x86_64/amax.S b/kernel/x86_64/amax.S index 0e9bf4db4..1498bb226 100644 --- a/kernel/x86_64/amax.S +++ b/kernel/x86_64/amax.S @@ -54,6 +54,10 @@ PROLOGUE PROFCODE + +#ifdef WINDOWS_ABI + emms +#endif salq $BASE_SHIFT, INCX diff --git a/kernel/x86_64/asum.S b/kernel/x86_64/asum.S index 31f973894..a2cbfd480 100644 --- a/kernel/x86_64/asum.S +++ b/kernel/x86_64/asum.S @@ -49,6 +49,10 @@ PROLOGUE PROFCODE + +#ifdef WINDOWS_ABI + emms +#endif fldz testq M, M diff --git a/kernel/x86_64/bf16_common_macros.h b/kernel/x86_64/bf16_common_macros.h new file mode 100644 index 000000000..1014ecc4d --- /dev/null +++ b/kernel/x86_64/bf16_common_macros.h @@ -0,0 +1,795 @@ +/*************************************************************************** +Copyright (c) 2014, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ +#ifndef __BF16_COMMON_MACROS +#define __BF16_COMMON_MACROS + +#include + +#define EXTRACT_LOW_256_FROM_512_2X(reg256, reg512) \ + reg256##_0 = _mm512_castps512_ps256(reg512##_0); \ + reg256##_1 = _mm512_castps512_ps256(reg512##_1); + + +#define BF16_MATRIX_LOAD_8x32(regArray, a, lda, idx_m, idx_n) \ + regArray##_0 = _mm512_loadu_si512(&a[(idx_m+0)*lda + idx_n]); \ + regArray##_1 = _mm512_loadu_si512(&a[(idx_m+1)*lda + idx_n]); \ + regArray##_2 = _mm512_loadu_si512(&a[(idx_m+2)*lda + idx_n]); \ + regArray##_3 = _mm512_loadu_si512(&a[(idx_m+3)*lda + idx_n]); \ + regArray##_4 = _mm512_loadu_si512(&a[(idx_m+4)*lda + idx_n]); \ + regArray##_5 = _mm512_loadu_si512(&a[(idx_m+5)*lda + idx_n]); \ + regArray##_6 = _mm512_loadu_si512(&a[(idx_m+6)*lda + idx_n]); \ + regArray##_7 = _mm512_loadu_si512(&a[(idx_m+7)*lda + idx_n]); + + +#define BF16_MATRIX_LOAD_8x16(regArray, a, lda, idx_m, idx_n) \ + regArray##_0 = _mm256_loadu_si256(&a[(idx_m+0)*lda + idx_n]); \ + regArray##_1 = _mm256_loadu_si256(&a[(idx_m+1)*lda + idx_n]); \ + regArray##_2 = _mm256_loadu_si256(&a[(idx_m+2)*lda + idx_n]); \ + regArray##_3 = _mm256_loadu_si256(&a[(idx_m+3)*lda + idx_n]); \ + regArray##_4 = _mm256_loadu_si256(&a[(idx_m+4)*lda + idx_n]); \ + regArray##_5 = _mm256_loadu_si256(&a[(idx_m+5)*lda + idx_n]); \ + regArray##_6 = _mm256_loadu_si256(&a[(idx_m+6)*lda + idx_n]); \ + regArray##_7 = _mm256_loadu_si256(&a[(idx_m+7)*lda + idx_n]); + + +#define BF16_MATRIX_LOAD_8x8(regArray, a, lda, idx_m, idx_n) \ + regArray##_0 = _mm_loadu_si128(&a[(idx_m+0)*lda + idx_n]); \ + regArray##_1 = _mm_loadu_si128(&a[(idx_m+1)*lda + idx_n]); \ + regArray##_2 = _mm_loadu_si128(&a[(idx_m+2)*lda + idx_n]); \ + regArray##_3 = _mm_loadu_si128(&a[(idx_m+3)*lda + idx_n]); \ + regArray##_4 = _mm_loadu_si128(&a[(idx_m+4)*lda + idx_n]); \ + regArray##_5 = _mm_loadu_si128(&a[(idx_m+5)*lda + idx_n]); \ + regArray##_6 = _mm_loadu_si128(&a[(idx_m+6)*lda + idx_n]); \ + regArray##_7 = _mm_loadu_si128(&a[(idx_m+7)*lda + idx_n]); + + +#define BF16_MATRIX_LOAD_1x32(regArray, a, lda, idx_m, idx_n) \ + regArray = _mm512_loadu_si512(&a[idx_m*lda + idx_n]); + + +#define BF16_MATRIX_MASKZ_LOAD_8x32(regArray, a, lda, idx_m, idx_n, mask) \ + regArray##_0 = _mm512_maskz_loadu_epi16(mask, &a[(idx_m+0)*lda + idx_n]); \ + regArray##_1 = _mm512_maskz_loadu_epi16(mask, &a[(idx_m+1)*lda + idx_n]); \ + regArray##_2 = _mm512_maskz_loadu_epi16(mask, &a[(idx_m+2)*lda + idx_n]); \ + regArray##_3 = _mm512_maskz_loadu_epi16(mask, &a[(idx_m+3)*lda + idx_n]); \ + regArray##_4 = _mm512_maskz_loadu_epi16(mask, &a[(idx_m+4)*lda + idx_n]); \ + regArray##_5 = _mm512_maskz_loadu_epi16(mask, &a[(idx_m+5)*lda + idx_n]); \ + regArray##_6 = _mm512_maskz_loadu_epi16(mask, &a[(idx_m+6)*lda + idx_n]); \ + regArray##_7 = _mm512_maskz_loadu_epi16(mask, &a[(idx_m+7)*lda + idx_n]); + + +#define BF16_MATRIX_MASKZ_LOAD_8x16(regArray, a, lda, idx_m, idx_n, mask) \ + regArray##_0 = _mm256_maskz_loadu_epi16(mask, &a[(idx_m+0)*lda + idx_n]); \ + regArray##_1 = _mm256_maskz_loadu_epi16(mask, &a[(idx_m+1)*lda + idx_n]); \ + regArray##_2 = _mm256_maskz_loadu_epi16(mask, &a[(idx_m+2)*lda + idx_n]); \ + regArray##_3 = _mm256_maskz_loadu_epi16(mask, &a[(idx_m+3)*lda + idx_n]); \ + regArray##_4 = _mm256_maskz_loadu_epi16(mask, &a[(idx_m+4)*lda + idx_n]); \ + regArray##_5 = _mm256_maskz_loadu_epi16(mask, &a[(idx_m+5)*lda + idx_n]); \ + regArray##_6 = _mm256_maskz_loadu_epi16(mask, &a[(idx_m+6)*lda + idx_n]); \ + regArray##_7 = _mm256_maskz_loadu_epi16(mask, &a[(idx_m+7)*lda + idx_n]); + + +#define BF16_MATRIX_MASKZ_LOAD_8x8(regArray, a, lda, idx_m, idx_n, mask) \ + regArray##_0 = _mm_maskz_loadu_epi16(mask, &a[(idx_m+0)*lda + idx_n]); \ + regArray##_1 = _mm_maskz_loadu_epi16(mask, &a[(idx_m+1)*lda + idx_n]); \ + regArray##_2 = _mm_maskz_loadu_epi16(mask, &a[(idx_m+2)*lda + idx_n]); \ + regArray##_3 = _mm_maskz_loadu_epi16(mask, &a[(idx_m+3)*lda + idx_n]); \ + regArray##_4 = _mm_maskz_loadu_epi16(mask, &a[(idx_m+4)*lda + idx_n]); \ + regArray##_5 = _mm_maskz_loadu_epi16(mask, &a[(idx_m+5)*lda + idx_n]); \ + regArray##_6 = _mm_maskz_loadu_epi16(mask, &a[(idx_m+6)*lda + idx_n]); \ + regArray##_7 = _mm_maskz_loadu_epi16(mask, &a[(idx_m+7)*lda + idx_n]); + + +#define BF16_MATRIX_MASKZ_LOAD_4x32(regArray, a, lda, idx_m, idx_n, mask) \ + regArray##_0 = _mm512_maskz_loadu_epi16(mask, &a[(idx_m+0)*lda + idx_n]); \ + regArray##_1 = _mm512_maskz_loadu_epi16(mask, &a[(idx_m+1)*lda + idx_n]); \ + regArray##_2 = _mm512_maskz_loadu_epi16(mask, &a[(idx_m+2)*lda + idx_n]); \ + regArray##_3 = _mm512_maskz_loadu_epi16(mask, &a[(idx_m+3)*lda + idx_n]); + + +#define BF16_MATRIX_MASKZ_LOAD_4x16(regArray, a, lda, idx_m, idx_n, mask) \ + regArray##_0 = _mm256_maskz_loadu_epi16(mask, &a[(idx_m+0)*lda + idx_n]); \ + regArray##_1 = _mm256_maskz_loadu_epi16(mask, &a[(idx_m+1)*lda + idx_n]); \ + regArray##_2 = _mm256_maskz_loadu_epi16(mask, &a[(idx_m+2)*lda + idx_n]); \ + regArray##_3 = _mm256_maskz_loadu_epi16(mask, &a[(idx_m+3)*lda + idx_n]); + + +#define BF16_MATRIX_MASKZ_LOAD_8x32_2(regArray, a, lda, idx_m, idx_n, mask) \ + regArray##_0 = _mm512_maskz_loadu_epi16(mask, &a[(idx_m+0)*lda + idx_n]); \ + regArray##_1 = _mm512_maskz_loadu_epi16(mask, &a[(idx_m+2)*lda + idx_n]); \ + regArray##_2 = _mm512_maskz_loadu_epi16(mask, &a[(idx_m+4)*lda + idx_n]); \ + regArray##_3 = _mm512_maskz_loadu_epi16(mask, &a[(idx_m+6)*lda + idx_n]); \ + regArray##_4 = _mm512_maskz_loadu_epi16(mask, &a[(idx_m+8)*lda + idx_n]); \ + regArray##_5 = _mm512_maskz_loadu_epi16(mask, &a[(idx_m+10)*lda + idx_n]); \ + regArray##_6 = _mm512_maskz_loadu_epi16(mask, &a[(idx_m+12)*lda + idx_n]); \ + regArray##_7 = _mm512_maskz_loadu_epi16(mask, &a[(idx_m+14)*lda + idx_n]); + + +#define BF16_MATRIX_MASKZ_LOAD_4x32_2(regArray, a, lda, idx_m, idx_n, mask) \ + regArray##_0 = _mm512_maskz_loadu_epi16(mask, &a[(idx_m+0)*lda + idx_n]); \ + regArray##_1 = _mm512_maskz_loadu_epi16(mask, &a[(idx_m+2)*lda + idx_n]); \ + regArray##_2 = _mm512_maskz_loadu_epi16(mask, &a[(idx_m+4)*lda + idx_n]); \ + regArray##_3 = _mm512_maskz_loadu_epi16(mask, &a[(idx_m+6)*lda + idx_n]); + +#define BF16_MATRIX_MASKZ_LOAD_1x32(regArray, a, lda, idx_m, idx_n, mask) \ + regArray = _mm512_maskz_loadu_epi16(mask, &a[idx_m*lda + idx_n]); + +#define BF16_VECTOR_LOAD_1x32(reg, x, idx_n) \ + reg = _mm512_loadu_si512(x + idx_n); + + +#define BF16_VECTOR_LOAD_1x16(reg, x, idx_n) \ + reg = _mm256_loadu_si256(x + idx_n); + + +#define BF16_VECTOR_LOAD_1x8(reg, x, idx_n) \ + reg = _mm_loadu_si128(x + idx_n); + + +#define BF16_VECTOR_MASKZ_LOAD_1x32(reg, x, idx_n, mask) \ + reg = _mm512_maskz_loadu_epi16(mask, x + idx_n); + + +#define BF16_VECTOR_MASKZ_LOAD_1x16(reg, x, idx_n, mask) \ + reg = _mm256_maskz_loadu_epi16(mask, x + idx_n); + + +#define BF16_VECTOR_MASKZ_LOAD_1x8(reg, x, idx_n, mask) \ + reg = _mm_maskz_loadu_epi16(mask, x + idx_n); + + +/* 2-step interleave for matrix against 8 rows with 32 BF16 elements per row + Input - register array of 8 rows of raw-major matrix + Output - the output of Step 2 + + Step 1: 2-element interleave for matrix + |a0|a1|b0|b1|a2|a3|b2|b3|a8 |a9 |b8 |b9 |a10|a11|b10|b11|a16|a17|b16|b17|a18|a19|b18|b19|a24|a25|b24|b25|a26|a27|b26|b27 + |c0|c1|d0|d1|c2|c3|d2|d3|c8 |c9 |d8 |d9 |c10|c11|d10|d11|c16|c17|d16|d17|c18|c19|d18|d19|c24|c25|d24|d25|c26|c27|d26|d27 + |e0|e1|f0|f1|e2|e3|f2|f3|e8 |e9 |f8 |f9 |e10|e11|f10|f11|e16|e17|f16|f17|e18|e19|f18|f19|e24|e25|f24|f25|e26|e27|f26|f27 + |g0|g1|h0|h1|g2|g3|h2|h3|g8 |g9 |h8 |h9 |g10|g11|h10|h11|g16|g17|h16|h17|g18|g19|h18|h19|g24|g25|h24|h25|g26|g27|h26|h27 + |a4|a5|b4|b5|a6|a7|b6|b7|a12|a13|b12|b13|a14|a15|b14|b15|a20|a21|b20|b21|a22|a23|b22|b23|a28|a29|b28|b29|a30|a31|b30|b31 + |c4|c5|d4|d5|c6|c7|d6|d7|c12|c13|d12|d13|c14|c15|d14|d15|c20|c21|d20|d21|c22|c23|d22|d23|c28|c29|d28|d29|c30|c31|d30|d31 + |e4|e5|f4|f5|e6|e7|f6|f7|e12|e13|f12|f13|e14|e15|f14|f15|e20|e21|f20|f21|e22|e23|f22|f23|e28|e29|f28|f29|e30|e31|f30|f31 + |g4|g5|h4|h5|g6|g7|h6|h7|g12|g13|h12|h13|g14|g15|h14|h15|g20|g21|h20|h21|g22|g23|h22|h23|g28|g29|h28|h29|g30|g31|h30|h31 + + Step 2: 4-element interleave for matrix + |a0|a1|b0|b1|c0|c1|d0|d1|a8 |a9 |b8 |b9 |c8 |c9 |d8 |d9 |a16|a17|b16|b17|c16|c17|d16|d17|a24|a25|b24|b25|c24|c25|d24|d25 + |a2|a3|b2|b3|c2|c3|d2|d3|a10|a11|b10|b11|c10|c11|d10|d11|a18|a19|b18|b19|c18|c19|d18|d19|a26|a27|b26|b27|c26|c27|d26|d27 + |e0|e1|f0|f1|g0|g1|h0|h1|e8 |e9 |f8 |f9 |g8 |g9 |h8 |h9 |e16|e17|f16|f17|g16|g17|h16|h17|e24|e25|f24|f25|g24|g25|h24|h25 + |e2|e3|f2|f3|g2|g3|h2|h3|e10|e11|f10|f11|g10|g11|h10|h11|e18|e19|f18|f19|g18|g19|h18|h19|e26|e27|f26|f27|g26|g27|h26|h27 + |a4|a5|b4|b5|c4|c5|d4|d5|a12|a13|b12|b13|c12|c13|d12|d13|a20|a21|b20|b21|c20|c21|d20|d21|a28|a29|b28|b29|c28|c29|d28|d29 + |a6|a7|b6|b7|c6|c7|d6|d7|a14|a15|b14|b15|c14|c15|d14|d15|a22|a23|b22|b23|c22|c23|d22|d23|a30|a31|b30|b31|c30|c31|d30|d31 + |e4|e5|f4|f5|g4|g5|h4|h5|e12|e13|f12|f13|g12|g13|h12|h13|e20|e21|f20|f21|g20|g21|h20|h21|e28|e29|f28|f29|g28|g29|h28|h29 + |e6|e7|f6|f7|g6|g7|h6|h7|e14|e15|f14|f15|g14|g15|h14|h15|e22|e23|f22|f23|g22|g23|h22|h23|e30|e31|f30|f31|g30|g31|h30|h31 +*/ +#define BF16_INTERLEAVE_8x32(regArray) \ + regArray##_8 = _mm512_unpacklo_epi32(regArray##_0, regArray##_1); \ + regArray##_9 = _mm512_unpacklo_epi32(regArray##_2, regArray##_3); \ + regArray##_10 = _mm512_unpacklo_epi32(regArray##_4, regArray##_5); \ + regArray##_11 = _mm512_unpacklo_epi32(regArray##_6, regArray##_7); \ + regArray##_12 = _mm512_unpackhi_epi32(regArray##_0, regArray##_1); \ + regArray##_13 = _mm512_unpackhi_epi32(regArray##_2, regArray##_3); \ + regArray##_14 = _mm512_unpackhi_epi32(regArray##_4, regArray##_5); \ + regArray##_15 = _mm512_unpackhi_epi32(regArray##_6, regArray##_7); \ + \ + regArray##_0 = _mm512_unpacklo_epi64(regArray##_8, regArray##_9); \ + regArray##_1 = _mm512_unpackhi_epi64(regArray##_8, regArray##_9); \ + regArray##_2 = _mm512_unpacklo_epi64(regArray##_10, regArray##_11); \ + regArray##_3 = _mm512_unpackhi_epi64(regArray##_10, regArray##_11); \ + regArray##_4 = _mm512_unpacklo_epi64(regArray##_12, regArray##_13); \ + regArray##_5 = _mm512_unpackhi_epi64(regArray##_12, regArray##_13); \ + regArray##_6 = _mm512_unpacklo_epi64(regArray##_14, regArray##_15); \ + regArray##_7 = _mm512_unpackhi_epi64(regArray##_14, regArray##_15); + + +/* 2-step interleave for matrix against 8 rows with 16 BF16 elements per row + Input - register array of 8 rows of raw-major matrix + Output - the output of Step 2 + + Step 1: 2-element interleave for matrix + |a0|a1|b0|b1|a2|a3|b2|b3|a8 |a9 |b8 |b9 |a10|a11|b10|b11 + |c0|c1|d0|d1|c2|c3|d2|d3|c8 |c9 |d8 |d9 |c10|c11|d10|d11 + |e0|e1|f0|f1|e2|e3|f2|f3|e8 |e9 |f8 |f9 |e10|e11|f10|f11 + |g0|g1|h0|h1|g2|g3|h2|h3|g8 |g9 |h8 |h9 |g10|g11|h10|h11 + |a4|a5|b4|b5|a6|a7|b6|b7|a12|a13|b12|b13|a14|a15|b14|b15 + |c4|c5|d4|d5|c6|c7|d6|d7|c12|c13|d12|d13|c14|c15|d14|d15 + |e4|e5|f4|f5|e6|e7|f6|f7|e12|e13|f12|f13|e14|e15|f14|f15 + |g4|g5|h4|h5|g6|g7|h6|h7|g12|g13|h12|h13|g14|g15|h14|h15 + + Step 2: 4-element interleave for matrix + |a0|a1|b0|b1|c0|c1|d0|d1|a8 |a9 |b8 |b9 |c8 |c9 |d8 |d9 + |a2|a3|b2|b3|c2|c3|d2|d3|a10|a11|b10|b11|c10|c11|d10|d11 + |e0|e1|f0|f1|g0|g1|h0|h1|e8 |e9 |f8 |f9 |g8 |g9 |h8 |h9 + |e2|e3|f2|f3|g2|g3|h2|h3|e10|e11|f10|f11|g10|g11|h10|h11 + |a4|a5|b4|b5|c4|c5|d4|d5|a12|a13|b12|b13|c12|c13|d12|d13 + |a6|a7|b6|b7|c6|c7|d6|d7|a14|a15|b14|b15|c14|c15|d14|d15 + |e4|e5|f4|f5|g4|g5|h4|h5|e12|e13|f12|f13|g12|g13|h12|h13 + |e6|e7|f6|f7|g6|g7|h6|h7|e14|e15|f14|f15|g14|g15|h14|h15 +*/ +#define BF16_INTERLEAVE_8x16(regArray) \ + regArray##_8 = _mm256_unpacklo_epi32(regArray##_0, regArray##_1); \ + regArray##_9 = _mm256_unpacklo_epi32(regArray##_2, regArray##_3); \ + regArray##_10 = _mm256_unpacklo_epi32(regArray##_4, regArray##_5); \ + regArray##_11 = _mm256_unpacklo_epi32(regArray##_6, regArray##_7); \ + regArray##_12 = _mm256_unpackhi_epi32(regArray##_0, regArray##_1); \ + regArray##_13 = _mm256_unpackhi_epi32(regArray##_2, regArray##_3); \ + regArray##_14 = _mm256_unpackhi_epi32(regArray##_4, regArray##_5); \ + regArray##_15 = _mm256_unpackhi_epi32(regArray##_6, regArray##_7); \ + \ + regArray##_0 = _mm256_unpacklo_epi64(regArray##_8, regArray##_9); \ + regArray##_1 = _mm256_unpackhi_epi64(regArray##_8, regArray##_9); \ + regArray##_2 = _mm256_unpacklo_epi64(regArray##_10, regArray##_11); \ + regArray##_3 = _mm256_unpackhi_epi64(regArray##_10, regArray##_11); \ + regArray##_4 = _mm256_unpacklo_epi64(regArray##_12, regArray##_13); \ + regArray##_5 = _mm256_unpackhi_epi64(regArray##_12, regArray##_13); \ + regArray##_6 = _mm256_unpacklo_epi64(regArray##_14, regArray##_15); \ + regArray##_7 = _mm256_unpackhi_epi64(regArray##_14, regArray##_15); + +/* 2-step interleave for matrix against 8 rows with 32 BF16 elements per row + Input - register array of 8 rows of raw-major matrix + Output - the output of Step 2 + + Step 1: 2-element interleave for matrix + |a0|a1|b0|b1|a2|a3|b2|b3|a8 |a9 |b8 |b9 |a10|a11|b10|b11|a16|a17|b16|b17|a18|a19|b18|b19|a24|a25|b24|b25|a26|a27|b26|b27 + |c0|c1|d0|d1|c2|c3|d2|d3|c8 |c9 |d8 |d9 |c10|c11|d10|d11|c16|c17|d16|d17|c18|c19|d18|d19|c24|c25|d24|d25|c26|c27|d26|d27 + |a4|a5|b4|b5|a6|a7|b6|b7|a12|a13|b12|b13|a14|a15|b14|b15|a20|a21|b20|b21|a22|a23|b22|b23|a28|a29|b28|b29|a30|a31|b30|b31 + |c4|c5|d4|d5|c6|c7|d6|d7|c12|c13|d12|d13|c14|c15|d14|d15|c20|c21|d20|d21|c22|c23|d22|d23|c28|c29|d28|d29|c30|c31|d30|d31 + + Step 2: 4-element interleave for matrix + |a0|a1|b0|b1|c0|c1|d0|d1|a8 |a9 |b8 |b9 |c8 |c9 |d8 |d9 |a16|a17|b16|b17|c16|c17|d16|d17|a24|a25|b24|b25|c24|c25|d24|d25 + |a2|a3|b2|b3|c2|c3|d2|d3|a10|a11|b10|b11|c10|c11|d10|d11|a18|a19|b18|b19|c18|c19|d18|d19|a26|a27|b26|b27|c26|c27|d26|d27 + |a4|a5|b4|b5|c4|c5|d4|d5|a12|a13|b12|b13|c12|c13|d12|d13|a20|a21|b20|b21|c20|c21|d20|d21|a28|a29|b28|b29|c28|c29|d28|d29 + |a6|a7|b6|b7|c6|c7|d6|d7|a14|a15|b14|b15|c14|c15|d14|d15|a22|a23|b22|b23|c22|c23|d22|d23|a30|a31|b30|b31|c30|c31|d30|d31 +*/ +#define BF16_INTERLEAVE_4x32(regArray) \ + regArray##_4 = _mm512_unpacklo_epi32(regArray##_0, regArray##_1); \ + regArray##_5 = _mm512_unpacklo_epi32(regArray##_2, regArray##_3); \ + regArray##_6 = _mm512_unpackhi_epi32(regArray##_0, regArray##_1); \ + regArray##_7 = _mm512_unpackhi_epi32(regArray##_2, regArray##_3); \ + \ + regArray##_0 = _mm512_unpacklo_epi64(regArray##_4, regArray##_5); \ + regArray##_1 = _mm512_unpackhi_epi64(regArray##_4, regArray##_5); \ + regArray##_2 = _mm512_unpacklo_epi64(regArray##_6, regArray##_7); \ + regArray##_3 = _mm512_unpackhi_epi64(regArray##_6, regArray##_7); + + +/* 2-step interleave for matrix against 8 rows with 16 BF16 elements per row + Input - register array of 8 rows of raw-major matrix + Output - the output of Step 2 + + Step 1: 2-element interleave for matrix + |a0|a1|b0|b1|a2|a3|b2|b3|a8 |a9 |b8 |b9 |a10|a11|b10|b11 + |c0|c1|d0|d1|c2|c3|d2|d3|c8 |c9 |d8 |d9 |c10|c11|d10|d11 + |a4|a5|b4|b5|a6|a7|b6|b7|a12|a13|b12|b13|a14|a15|b14|b15 + |c4|c5|d4|d5|c6|c7|d6|d7|c12|c13|d12|d13|c14|c15|d14|d15 + + Step 2: 4-element interleave for matrix + |a0|a1|b0|b1|c0|c1|d0|d1|a8 |a9 |b8 |b9 |c8 |c9 |d8 |d9 + |a2|a3|b2|b3|c2|c3|d2|d3|a10|a11|b10|b11|c10|c11|d10|d11 + |a4|a5|b4|b5|c4|c5|d4|d5|a12|a13|b12|b13|c12|c13|d12|d13 + |a6|a7|b6|b7|c6|c7|d6|d7|a14|a15|b14|b15|c14|c15|d14|d15 +*/ +#define BF16_INTERLEAVE_4x16(regArray) \ + regArray##_4 = _mm256_unpacklo_epi32(regArray##_0, regArray##_1); \ + regArray##_5 = _mm256_unpacklo_epi32(regArray##_2, regArray##_3); \ + regArray##_6 = _mm256_unpackhi_epi32(regArray##_0, regArray##_1); \ + regArray##_7 = _mm256_unpackhi_epi32(regArray##_2, regArray##_3); \ + \ + regArray##_0 = _mm256_unpacklo_epi64(regArray##_4, regArray##_5); \ + regArray##_1 = _mm256_unpackhi_epi64(regArray##_4, regArray##_5); \ + regArray##_2 = _mm256_unpacklo_epi64(regArray##_6, regArray##_7); \ + regArray##_3 = _mm256_unpackhi_epi64(regArray##_6, regArray##_7); + + +/* 2-step interleave for x with 32 BF16 elements + Input - original vector + Output - the output of Step 2 + + Step 1: 2-element interleave for x: + |x0|x1|x0|x1|x2|x3|x2|x3|x8 |x9 |x8 |x9 |x10|x11|x10|x11|x16|x17|x16|x17|x18|x19|x18|x19|x24|x25|x24|x25|x26|x27|x26|x27 + |x4|x5|x4|x5|x6|x7|x6|x7|x12|x13|x12|x13|x14|x15|x14|x15|x20|x21|x20|x21|x22|x23|x22|x23|x28|x29|x28|x29|x30|x31|x30|x31 + + Step 2: 4-element interleave for x: + |x0|x1|x0|x1|x0|x1|x0|x1|x8 |x9 |x8 |x9 |x8 |x9 |x8 |x9 |x16|x17|x16|x17|x16|x17|x16|x17|x24|x25|x24|x25|x24|x25|x24|x25 + |x2|x3|x2|x3|x2|x3|x2|x3|x10|x11|x10|x11|x10|x11|x10|x11|x18|x19|x18|x19|x18|x19|x18|x19|x26|x27|x26|x27|x26|x27|x26|x27 + |x4|x5|x4|x5|x4|x5|x4|x5|x12|x13|x12|x13|x12|x13|x12|x13|x20|x21|x20|x21|x20|x21|x20|x21|x28|x29|x28|x29|x28|x29|x28|x29 + |x6|x7|x6|x7|x6|x7|x6|x7|x14|x15|x14|x15|x14|x15|x14|x15|x22|x23|x22|x23|x22|x23|x22|x23|x30|x31|x30|x31|x30|x31|x30|x31 +*/ +#define BF16_INTERLEAVE_1x32(regArray) \ + regArray##_1 = _mm512_unpacklo_epi32(regArray##_0, regArray##_0); \ + regArray##_3 = _mm512_unpackhi_epi32(regArray##_0, regArray##_0); \ + \ + regArray##_0 = _mm512_unpacklo_epi64(regArray##_1, regArray##_1); \ + regArray##_1 = _mm512_unpackhi_epi64(regArray##_1, regArray##_1); \ + regArray##_2 = _mm512_unpacklo_epi64(regArray##_3, regArray##_3); \ + regArray##_3 = _mm512_unpackhi_epi64(regArray##_3, regArray##_3); + + +/* 2-step interleave for x with 16 BF16 elements + Input - original vector + Output - the output of Step 2 + + Step 1: 2-element interleave for x: + |x0|x1|x0|x1|x2|x3|x2|x3|x8 |x9 |x8 |x9 |x10|x11|x10|x11 + |x4|x5|x4|x5|x6|x7|x6|x7|x12|x13|x12|x13|x14|x15|x14|x15 + + Step 2: 4-element interleave for x: + |x0|x1|x0|x1|x0|x1|x0|x1|x8 |x9 |x8 |x9 |x8 |x9 |x8 |x9 + |x2|x3|x2|x3|x2|x3|x2|x3|x10|x11|x10|x11|x10|x11|x10|x11 + |x4|x5|x4|x5|x4|x5|x4|x5|x12|x13|x12|x13|x12|x13|x12|x13 + |x6|x7|x6|x7|x6|x7|x6|x7|x14|x15|x14|x15|x14|x15|x14|x15 +*/ +#define BF16_INTERLEAVE_1x16(regArray) \ + regArray##_1 = _mm256_unpacklo_epi32(regArray##_0, regArray##_0); \ + regArray##_3 = _mm256_unpackhi_epi32(regArray##_0, regArray##_0); \ + \ + regArray##_0 = _mm256_unpacklo_epi64(regArray##_1, regArray##_1); \ + regArray##_1 = _mm256_unpackhi_epi64(regArray##_1, regArray##_1); \ + regArray##_2 = _mm256_unpacklo_epi64(regArray##_3, regArray##_3); \ + regArray##_3 = _mm256_unpackhi_epi64(regArray##_3, regArray##_3); + +/* 1-step interleave to exchange the high-256s bit and low-256 bits of 4 pair of registers + |a0|a1|...|a14|a15|i0|i1|...|i14|i15| + |b0|b1|...|b14|b15|j0|j1|...|j14|j15| + |c0|c1|...|c14|c15|k0|k1|...|k14|k15| + |d0|d1|...|d14|d15|l0|l1|...|l14|l15| + |e0|e1|...|e14|e15|m0|m1|...|m14|m15| + |f0|f1|...|f14|f15|n0|n1|...|n14|n15| + |g0|g1|...|g14|g15|o0|o1|...|o14|o15| + |h0|h1|...|h14|h15|p0|p1|...|p14|p15| +*/ +#define BF16_INTERLEAVE256_8x32(regArray) \ + regArray##_0 = _mm512_shuffle_i32x4(regArray##_8, regArray##_12, 0x44); \ + regArray##_1 = _mm512_shuffle_i32x4(regArray##_8, regArray##_12, 0xee); \ + regArray##_2 = _mm512_shuffle_i32x4(regArray##_9, regArray##_13, 0x44); \ + regArray##_3 = _mm512_shuffle_i32x4(regArray##_9, regArray##_13, 0xee); \ + regArray##_4 = _mm512_shuffle_i32x4(regArray##_10, regArray##_14, 0x44); \ + regArray##_5 = _mm512_shuffle_i32x4(regArray##_10, regArray##_14, 0xee); \ + regArray##_6 = _mm512_shuffle_i32x4(regArray##_11, regArray##_15, 0x44); \ + regArray##_7 = _mm512_shuffle_i32x4(regArray##_11, regArray##_15, 0xee); + + +/* 1-step interleave to exchange the high-256s bit and low-256 bits of 2 pair of registers + |a0|a1|...|a14|a15|e0|e1|...|e14|e15| + |b0|b1|...|b14|b15|f0|f1|...|f14|f15| + |c0|c1|...|c14|c15|g0|g1|...|g14|g15| + |d0|d1|...|d14|d15|h0|h1|...|h14|h15| +*/ +#define BF16_INTERLEAVE256_4x32(regArray) \ + regArray##_0 = _mm512_shuffle_i32x4(regArray##_4, regArray##_6, 0x44); \ + regArray##_1 = _mm512_shuffle_i32x4(regArray##_4, regArray##_6, 0xee); \ + regArray##_2 = _mm512_shuffle_i32x4(regArray##_5, regArray##_7, 0x44); \ + regArray##_3 = _mm512_shuffle_i32x4(regArray##_5, regArray##_7, 0xee); + + +#define BF16_PERMUTE_8x32(idx, regArray) \ + regArray##_8 = _mm512_permutexvar_epi16(idx, regArray##_0); \ + regArray##_9 = _mm512_permutexvar_epi16(idx, regArray##_1); \ + regArray##_10 = _mm512_permutexvar_epi16(idx, regArray##_2); \ + regArray##_11 = _mm512_permutexvar_epi16(idx, regArray##_3); \ + regArray##_12 = _mm512_permutexvar_epi16(idx, regArray##_4); \ + regArray##_13 = _mm512_permutexvar_epi16(idx, regArray##_5); \ + regArray##_14 = _mm512_permutexvar_epi16(idx, regArray##_6); \ + regArray##_15 = _mm512_permutexvar_epi16(idx, regArray##_7); + + +#define BF16_PERMUTE_8x32_2(idx, regArray) \ + regArray##_8 = _mm512_permutexvar_epi32(idx, regArray##_0); \ + regArray##_9 = _mm512_permutexvar_epi32(idx, regArray##_1); \ + regArray##_10 = _mm512_permutexvar_epi32(idx, regArray##_2); \ + regArray##_11 = _mm512_permutexvar_epi32(idx, regArray##_3); \ + regArray##_12 = _mm512_permutexvar_epi32(idx, regArray##_4); \ + regArray##_13 = _mm512_permutexvar_epi32(idx, regArray##_5); \ + regArray##_14 = _mm512_permutexvar_epi32(idx, regArray##_6); \ + regArray##_15 = _mm512_permutexvar_epi32(idx, regArray##_7); + + +#define BF16_PERMUTE_4x32(idx, regArray) \ + regArray##_4 = _mm512_permutexvar_epi16(idx, regArray##_0); \ + regArray##_5 = _mm512_permutexvar_epi16(idx, regArray##_1); \ + regArray##_6 = _mm512_permutexvar_epi16(idx, regArray##_2); \ + regArray##_7 = _mm512_permutexvar_epi16(idx, regArray##_3); + + +#define BF16_PERMUTE_4x32_2(idx, regArray) \ + regArray##_4 = _mm512_permutexvar_epi32(idx, regArray##_0); \ + regArray##_5 = _mm512_permutexvar_epi32(idx, regArray##_1); \ + regArray##_6 = _mm512_permutexvar_epi32(idx, regArray##_2); \ + regArray##_7 = _mm512_permutexvar_epi32(idx, regArray##_3); + + +/* Calculate the dot result for 2-step interleaved matrix and vector + (Assume throughput for _mm512_dpbf16_ps is 0.5, tunable per platform) +*/ +#define BF16_2STEP_INTERLEAVED_DOT_8x32(accumArray, matArray, xArray) \ + accumArray##_0 = _mm512_dpbf16_ps(accumArray##_0, (__m512bh) matArray##_0, (__m512bh) xArray##_0); \ + accumArray##_1 = _mm512_dpbf16_ps(accumArray##_1, (__m512bh) matArray##_2, (__m512bh) xArray##_0); \ + accumArray##_0 = _mm512_dpbf16_ps(accumArray##_0, (__m512bh) matArray##_1, (__m512bh) xArray##_1); \ + accumArray##_1 = _mm512_dpbf16_ps(accumArray##_1, (__m512bh) matArray##_3, (__m512bh) xArray##_1); \ + accumArray##_0 = _mm512_dpbf16_ps(accumArray##_0, (__m512bh) matArray##_4, (__m512bh) xArray##_2); \ + accumArray##_1 = _mm512_dpbf16_ps(accumArray##_1, (__m512bh) matArray##_6, (__m512bh) xArray##_2); \ + accumArray##_0 = _mm512_dpbf16_ps(accumArray##_0, (__m512bh) matArray##_5, (__m512bh) xArray##_3); \ + accumArray##_1 = _mm512_dpbf16_ps(accumArray##_1, (__m512bh) matArray##_7, (__m512bh) xArray##_3); + + +/* Calculate the dot result for 2-step interleaved matrix and vector + (Assume throughput for _mm256_dpbf16_ps is 0.5, tunable per platform) +*/ +#define BF16_2STEP_INTERLEAVED_DOT_8x16(accumArray, matArray, xArray) \ + accumArray##_0 = _mm256_dpbf16_ps(accumArray##_0, (__m256bh) matArray##_0, (__m256bh) xArray##_0); \ + accumArray##_1 = _mm256_dpbf16_ps(accumArray##_1, (__m256bh) matArray##_2, (__m256bh) xArray##_0); \ + accumArray##_0 = _mm256_dpbf16_ps(accumArray##_0, (__m256bh) matArray##_1, (__m256bh) xArray##_1); \ + accumArray##_1 = _mm256_dpbf16_ps(accumArray##_1, (__m256bh) matArray##_3, (__m256bh) xArray##_1); \ + accumArray##_0 = _mm256_dpbf16_ps(accumArray##_0, (__m256bh) matArray##_4, (__m256bh) xArray##_2); \ + accumArray##_1 = _mm256_dpbf16_ps(accumArray##_1, (__m256bh) matArray##_6, (__m256bh) xArray##_2); \ + accumArray##_0 = _mm256_dpbf16_ps(accumArray##_0, (__m256bh) matArray##_5, (__m256bh) xArray##_3); \ + accumArray##_1 = _mm256_dpbf16_ps(accumArray##_1, (__m256bh) matArray##_7, (__m256bh) xArray##_3); + +/* Calculate the dot result for 2-step interleaved matrix and vector + (Assume throughput for _mm512_dpbf16_ps is 0.5, tunable per platform) +*/ +#define BF16_2STEP_INTERLEAVED_DOT_4x32(accumArray, matArray, xArray) \ + accumArray##_0 = _mm512_dpbf16_ps(accumArray##_0, (__m512bh) matArray##_0, (__m512bh) xArray##_0); \ + accumArray##_1 = _mm512_dpbf16_ps(accumArray##_1, (__m512bh) matArray##_1, (__m512bh) xArray##_1); \ + accumArray##_0 = _mm512_dpbf16_ps(accumArray##_0, (__m512bh) matArray##_2, (__m512bh) xArray##_2); \ + accumArray##_1 = _mm512_dpbf16_ps(accumArray##_1, (__m512bh) matArray##_3, (__m512bh) xArray##_3); + + +/* Calculate the dot result for 2-step interleaved matrix and vector + (Assume throughput for _mm256_dpbf16_ps is 0.5, tunable per platform) +*/ +#define BF16_2STEP_INTERLEAVED_DOT_4x16(accumArray, matArray, xArray) \ + accumArray##_0 = _mm256_dpbf16_ps(accumArray##_0, (__m256bh) matArray##_0, (__m256bh) xArray##_0); \ + accumArray##_1 = _mm256_dpbf16_ps(accumArray##_1, (__m256bh) matArray##_1, (__m256bh) xArray##_1); \ + accumArray##_0 = _mm256_dpbf16_ps(accumArray##_0, (__m256bh) matArray##_2, (__m256bh) xArray##_2); \ + accumArray##_1 = _mm256_dpbf16_ps(accumArray##_1, (__m256bh) matArray##_3, (__m256bh) xArray##_3); + + +/* Calculate the dot result for matrix and vector at 32 elements per row + (Assume throughput for _mm512_dpbf16_ps is 0.5, tunable per platform) +*/ +#define BF16_DOT_8x32(accumArray, matArray, xArray) \ + accumArray##_0 = _mm512_dpbf16_ps(accumArray##_0, (__m512bh) matArray##_0, (__m512bh) xArray); \ + accumArray##_1 = _mm512_dpbf16_ps(accumArray##_1, (__m512bh) matArray##_1, (__m512bh) xArray); \ + accumArray##_2 = _mm512_dpbf16_ps(accumArray##_2, (__m512bh) matArray##_2, (__m512bh) xArray); \ + accumArray##_3 = _mm512_dpbf16_ps(accumArray##_3, (__m512bh) matArray##_3, (__m512bh) xArray); \ + accumArray##_4 = _mm512_dpbf16_ps(accumArray##_4, (__m512bh) matArray##_4, (__m512bh) xArray); \ + accumArray##_5 = _mm512_dpbf16_ps(accumArray##_5, (__m512bh) matArray##_5, (__m512bh) xArray); \ + accumArray##_6 = _mm512_dpbf16_ps(accumArray##_6, (__m512bh) matArray##_6, (__m512bh) xArray); \ + accumArray##_7 = _mm512_dpbf16_ps(accumArray##_7, (__m512bh) matArray##_7, (__m512bh) xArray); + +/* Calculate the dot result for matrix and vector at 32 elements per row + (Assume throughput for _mm512_dpbf16_ps is 0.5, tunable per platform) +*/ +#define BF16_DOT_1x32(accumArray, matArray, xArray) \ + accumArray = _mm512_dpbf16_ps(accumArray, (__m512bh) matArray, (__m512bh) xArray); + +/* Calculate the dot result for matrix and vector at 16 elements per row + (Assume throughput for _mm256_dpbf16_ps is 0.5, tunable per platform) +*/ +#define BF16_DOT_8x16(accumArray, matArray, xArray) \ + accumArray##_0 = _mm256_dpbf16_ps(accumArray##_0, (__m256bh) matArray##_0, (__m256bh) xArray); \ + accumArray##_1 = _mm256_dpbf16_ps(accumArray##_1, (__m256bh) matArray##_1, (__m256bh) xArray); \ + accumArray##_2 = _mm256_dpbf16_ps(accumArray##_2, (__m256bh) matArray##_2, (__m256bh) xArray); \ + accumArray##_3 = _mm256_dpbf16_ps(accumArray##_3, (__m256bh) matArray##_3, (__m256bh) xArray); \ + accumArray##_4 = _mm256_dpbf16_ps(accumArray##_4, (__m256bh) matArray##_4, (__m256bh) xArray); \ + accumArray##_5 = _mm256_dpbf16_ps(accumArray##_5, (__m256bh) matArray##_5, (__m256bh) xArray); \ + accumArray##_6 = _mm256_dpbf16_ps(accumArray##_6, (__m256bh) matArray##_6, (__m256bh) xArray); \ + accumArray##_7 = _mm256_dpbf16_ps(accumArray##_7, (__m256bh) matArray##_7, (__m256bh) xArray); + + +/* 2-step interleave for matrix against 8 rows with 16 fp32 elements per row + Input - register array of 8 rows of raw-major matrix + Output - the output of Step 2 + + Step 1: 2-element interleave for matrix + |a0|b0|a1|b1|a4|b4|a5|b5|a8 |b8 |a9 |b9 |a12|b12|a13|b13| + |c0|d0|c1|d1|c4|d4|c5|d5|c8 |d8 |c9 |d9 |c12|d12|c13|d13| + |e0|f0|e1|f1|e4|f4|e5|f5|e8 |f8 |e9 |f9 |e12|f12|e13|f13| + |g0|h0|g1|h1|g4|h4|g5|h5|g8 |h8 |g9 |h9 |g12|h12|g13|h13| + |a2|b2|a3|b3|a6|b6|a7|b7|a10|b10|a11|b11|a14|b14|a15|b15| + |c2|d2|c3|d3|c6|d6|c7|d7|c10|d10|c11|d11|c14|d14|c15|d15| + |e2|f2|e3|f3|e6|f6|e7|f7|e10|f10|e11|f11|e14|f14|e15|f15| + |g2|h2|g3|h3|g6|h6|g7|h7|g10|h10|g11|h11|g14|h14|g15|h15| + + Step 2: 4-element interleave for matrix + |a0|b0|c0|d0|a4|b4|c4|d4|a8 |b8 |c8 |d8 |a12|b12|c12|d12| + |a1|b1|c1|d1|a5|b5|c5|d5|a9 |b9 |c9 |d9 |a13|b13|c13|d13| + |e0|f0|g0|h0|e4|f4|g4|h4|e8 |f8 |g8 |h8 |e12|f12|g12|h12| + |e1|f1|g1|h1|e5|f5|g5|h5|e9 |f9 |g9 |h9 |e13|f13|g13|h13| + |a2|b2|c2|d2|a6|b6|c6|d6|a10|b10|c10|d10|a14|b14|c14|d14| + |a3|b3|c3|d3|a7|b7|c7|d7|a11|b11|c11|d11|a15|b15|c15|d15| + |e2|f2|g2|h2|e6|f6|g6|h6|e10|f10|g10|h10|e14|f14|g14|h14| + |e3|f3|g3|h3|e7|f7|g7|h7|e11|f11|g11|h11|e15|f15|g15|h15| +*/ +#define FP32_INTERLEAVE_8x16(regArray) \ + regArray##_8 = _mm512_unpacklo_ps(regArray##_0, regArray##_1); \ + regArray##_9 = _mm512_unpacklo_ps(regArray##_2, regArray##_3); \ + regArray##_10 = _mm512_unpacklo_ps(regArray##_4, regArray##_5); \ + regArray##_11 = _mm512_unpacklo_ps(regArray##_6, regArray##_7); \ + regArray##_12 = _mm512_unpackhi_ps(regArray##_0, regArray##_1); \ + regArray##_13 = _mm512_unpackhi_ps(regArray##_2, regArray##_3); \ + regArray##_14 = _mm512_unpackhi_ps(regArray##_4, regArray##_5); \ + regArray##_15 = _mm512_unpackhi_ps(regArray##_6, regArray##_7); \ + \ + regArray##_0 = (__m512) _mm512_unpacklo_pd((__m512d) regArray##_8, (__m512d) regArray##_9); \ + regArray##_1 = (__m512) _mm512_unpackhi_pd((__m512d) regArray##_8, (__m512d) regArray##_9); \ + regArray##_4 = (__m512) _mm512_unpacklo_pd((__m512d) regArray##_10, (__m512d) regArray##_11); \ + regArray##_5 = (__m512) _mm512_unpackhi_pd((__m512d) regArray##_10, (__m512d) regArray##_11); \ + regArray##_2 = (__m512) _mm512_unpacklo_pd((__m512d) regArray##_12, (__m512d) regArray##_13); \ + regArray##_3 = (__m512) _mm512_unpackhi_pd((__m512d) regArray##_12, (__m512d) regArray##_13); \ + regArray##_6 = (__m512) _mm512_unpacklo_pd((__m512d) regArray##_14, (__m512d) regArray##_15); \ + regArray##_7 = (__m512) _mm512_unpackhi_pd((__m512d) regArray##_14, (__m512d) regArray##_15); + +#define FP32_INTERLEAVE_8x16_ARRAY(regArray) \ + regArray[8] = _mm512_unpacklo_ps(regArray[0], regArray[1]); \ + regArray[9] = _mm512_unpacklo_ps(regArray[2], regArray[3]); \ + regArray[10] = _mm512_unpacklo_ps(regArray[4], regArray[5]); \ + regArray[11] = _mm512_unpacklo_ps(regArray[6], regArray[7]); \ + regArray[12] = _mm512_unpackhi_ps(regArray[0], regArray[1]); \ + regArray[13] = _mm512_unpackhi_ps(regArray[2], regArray[3]); \ + regArray[14] = _mm512_unpackhi_ps(regArray[4], regArray[5]); \ + regArray[15] = _mm512_unpackhi_ps(regArray[6], regArray[7]); \ + \ + regArray[0] = (__m512) _mm512_unpacklo_pd((__m512d) regArray[8], (__m512d) regArray[9]); \ + regArray[1] = (__m512) _mm512_unpackhi_pd((__m512d) regArray[8], (__m512d) regArray[9]); \ + regArray[4] = (__m512) _mm512_unpacklo_pd((__m512d) regArray[10], (__m512d) regArray[11]); \ + regArray[5] = (__m512) _mm512_unpackhi_pd((__m512d) regArray[10], (__m512d) regArray[11]); \ + regArray[2] = (__m512) _mm512_unpacklo_pd((__m512d) regArray[12], (__m512d) regArray[13]); \ + regArray[3] = (__m512) _mm512_unpackhi_pd((__m512d) regArray[12], (__m512d) regArray[13]); \ + regArray[6] = (__m512) _mm512_unpacklo_pd((__m512d) regArray[14], (__m512d) regArray[15]); \ + regArray[7] = (__m512) _mm512_unpackhi_pd((__m512d) regArray[14], (__m512d) regArray[15]); + +/* 2-step interleave for matrix against 8 rows with 8 fp32 elements per row + Input - register array of 8 rows of raw-major matrix + Output - the output of Step 2 + + Step 1: 2-element interleave for matrix + |a0|b0|a1|b1|a4|b4|a5|b5| + |c0|d0|c1|d1|c4|d4|c5|d5| + |e0|f0|e1|f1|e4|f4|e5|f5| + |g0|h0|g1|h1|g4|h4|g5|h5| + |a2|b2|a3|b3|a6|b6|a7|b7| + |c2|d2|c3|d3|c6|d6|c7|d7| + |e2|f2|e3|f3|e6|f6|e7|f7| + |g2|h2|g3|h3|g6|h6|g7|h7| + + Step 2: 4-element interleave for matrix + |a0|b0|c0|d0|a4|b4|c4|d4| + |a1|b1|c1|d1|a5|b5|c5|d5| + |e0|f0|g0|h0|e4|f4|g4|h4| + |e1|f1|g1|h1|e5|f5|g5|h5| + |a2|b2|c2|d2|a6|b6|c6|d6| + |a3|b3|c3|d3|a7|b7|c7|d7| + |e2|f2|g2|h2|e6|f6|g6|h6| + |e3|f3|g3|h3|e7|f7|g7|h7| +*/ +#define FP32_INTERLEAVE_8x8(regArray) \ + regArray##_8 = _mm256_unpacklo_ps(regArray##_0, regArray##_1); \ + regArray##_9 = _mm256_unpacklo_ps(regArray##_2, regArray##_3); \ + regArray##_10 = _mm256_unpacklo_ps(regArray##_4, regArray##_5); \ + regArray##_11 = _mm256_unpacklo_ps(regArray##_6, regArray##_7); \ + regArray##_12 = _mm256_unpackhi_ps(regArray##_0, regArray##_1); \ + regArray##_13 = _mm256_unpackhi_ps(regArray##_2, regArray##_3); \ + regArray##_14 = _mm256_unpackhi_ps(regArray##_4, regArray##_5); \ + regArray##_15 = _mm256_unpackhi_ps(regArray##_6, regArray##_7); \ + \ + regArray##_0 = (__m256) _mm256_unpacklo_pd((__m256d) regArray##_8, (__m256d) regArray##_9); \ + regArray##_1 = (__m256) _mm256_unpackhi_pd((__m256d) regArray##_8, (__m256d) regArray##_9); \ + regArray##_4 = (__m256) _mm256_unpacklo_pd((__m256d) regArray##_10, (__m256d) regArray##_11); \ + regArray##_5 = (__m256) _mm256_unpackhi_pd((__m256d) regArray##_10, (__m256d) regArray##_11); \ + regArray##_2 = (__m256) _mm256_unpacklo_pd((__m256d) regArray##_12, (__m256d) regArray##_13); \ + regArray##_3 = (__m256) _mm256_unpackhi_pd((__m256d) regArray##_12, (__m256d) regArray##_13); \ + regArray##_6 = (__m256) _mm256_unpacklo_pd((__m256d) regArray##_14, (__m256d) regArray##_15); \ + regArray##_7 = (__m256) _mm256_unpackhi_pd((__m256d) regArray##_14, (__m256d) regArray##_15); + + +/* Accumulate the result for 2 batch of 4-registers +*/ +#define FP32_ACCUM2_8x16(regArray) \ + regArray##_0 = _mm512_add_ps(regArray##_0, regArray##_1); \ + regArray##_2 = _mm512_add_ps(regArray##_2, regArray##_3); \ + regArray##_4 = _mm512_add_ps(regArray##_4, regArray##_5); \ + regArray##_6 = _mm512_add_ps(regArray##_6, regArray##_7); \ + regArray##_0 = _mm512_add_ps(regArray##_0, regArray##_2); \ + regArray##_4 = _mm512_add_ps(regArray##_4, regArray##_6); + +#define FP32_ACCUM2_8x16_ARRAY(regArray) \ + regArray[0] = _mm512_add_ps(regArray[0], regArray[1]); \ + regArray[2] = _mm512_add_ps(regArray[2], regArray[3]); \ + regArray[4] = _mm512_add_ps(regArray[4], regArray[5]); \ + regArray[6] = _mm512_add_ps(regArray[6], regArray[7]); \ + regArray[0] = _mm512_add_ps(regArray[0], regArray[2]); \ + regArray[4] = _mm512_add_ps(regArray[4], regArray[6]); + +/* Accumulate the result for 2 batch of 4-registers +*/ +#define FP32_ACCUM2_8x8(regArray) \ + regArray##_0 = _mm256_add_ps(regArray##_0, regArray##_1); \ + regArray##_2 = _mm256_add_ps(regArray##_2, regArray##_3); \ + regArray##_4 = _mm256_add_ps(regArray##_4, regArray##_5); \ + regArray##_6 = _mm256_add_ps(regArray##_6, regArray##_7); \ + regArray##_0 = _mm256_add_ps(regArray##_0, regArray##_2); \ + regArray##_4 = _mm256_add_ps(regArray##_4, regArray##_6); + + +/* Store 16 (alpha * result + beta * y) to y +*/ +#define STORE16_COMPLETE_RESULT_ALPHA_BETA(regResult, targetAddr) \ + regResult = _mm512_fmadd_ps(ALPHAVECTOR, regResult, _mm512_mul_ps(BETAVECTOR, _mm512_loadu_ps(targetAddr))); \ + _mm512_storeu_ps(targetAddr, regResult); + + +/* Masked store 16 (alpha * result + beta * y) to y +*/ +#define STORE16_MASK_COMPLETE_RESULT_ALPHA_BETA(regResult, targetAddr, mask) \ + regResult = _mm512_fmadd_ps(ALPHAVECTOR, regResult, _mm512_mul_ps(BETAVECTOR, _mm512_maskz_loadu_ps(mask, targetAddr))); \ + _mm512_mask_storeu_ps(targetAddr, mask, regResult); + + +/* Store 8 (alpha * result + beta * y) to y +*/ +#define STORE8_COMPLETE_RESULT_ALPHA_BETA(regResult, targetAddr) \ + regResult = _mm256_fmadd_ps(_mm512_castps512_ps256(ALPHAVECTOR), regResult, _mm256_mul_ps(_mm512_castps512_ps256(BETAVECTOR), _mm256_loadu_ps(targetAddr))); \ + _mm256_storeu_ps(targetAddr, regResult); + + +/* Masked store 8 (alpha * result + beta * y) to y +*/ +#define STORE8_MASK_COMPLETE_RESULT_ALPHA_BETA(regResult, targetAddr, mask) \ + regResult = _mm256_fmadd_ps(_mm512_castps512_ps256(ALPHAVECTOR), regResult, _mm256_mul_ps(_mm512_castps512_ps256(BETAVECTOR), _mm256_maskz_loadu_ps(mask, targetAddr))); \ + _mm256_mask_storeu_ps(targetAddr, mask, regResult); + + +/* Store 4 (alpha * result + beta * y) to y +*/ +#define STORE4_COMPLETE_RESULT_ALPHA_BETA(regResult, targetAddr) \ + regResult = _mm_fmadd_ps(_mm512_castps512_ps128(ALPHAVECTOR), regResult, _mm_mul_ps(_mm512_castps512_ps128(BETAVECTOR), _mm_loadu_ps(targetAddr))); \ + _mm_storeu_ps(targetAddr, regResult); + + +/* Masked store 4 (alpha * result + beta * y) to y +*/ +#define STORE4_MASK_COMPLETE_RESULT_ALPHA_BETA(regResult, targetAddr, mask) \ + regResult = _mm_fmadd_ps(_mm512_castps512_ps128(ALPHAVECTOR), regResult, _mm_mul_ps(_mm512_castps512_ps128(BETAVECTOR), _mm_maskz_loadu_ps(mask, targetAddr))); \ + _mm_mask_storeu_ps(targetAddr, mask, regResult); + + +/* Store 16 (alpha * result + y) to y +*/ +#define STORE16_COMPLETE_RESULT_ALPHA_ONE(regResult, targetAddr) \ + regResult = _mm512_fmadd_ps(ALPHAVECTOR, regResult, _mm512_loadu_ps(targetAddr)); \ + _mm512_storeu_ps(targetAddr, regResult); + + +/* Masked store 16 (alpha * result + y) to y +*/ +#define STORE16_MASK_COMPLETE_RESULT_ALPHA_ONE(regResult, targetAddr, mask) \ + regResult = _mm512_fmadd_ps(ALPHAVECTOR, regResult, _mm512_maskz_loadu_ps(mask, targetAddr)); \ + _mm512_mask_storeu_ps(targetAddr, mask, regResult); + + +/* Store 8 (alpha * result + y) to y +*/ +#define STORE8_COMPLETE_RESULT_ALPHA_ONE(regResult, targetAddr) \ + regResult = _mm256_fmadd_ps(_mm512_castps512_ps256(ALPHAVECTOR), regResult, _mm256_loadu_ps(targetAddr)); \ + _mm256_storeu_ps(targetAddr, regResult); + + +/* Masked store 8 (alpha * result + y) to y +*/ +#define STORE8_MASK_COMPLETE_RESULT_ALPHA_ONE(regResult, targetAddr, mask) \ + regResult = _mm256_fmadd_ps(_mm512_castps512_ps256(ALPHAVECTOR), regResult, _mm256_maskz_loadu_ps(mask, targetAddr)); \ + _mm256_mask_storeu_ps(targetAddr, mask, regResult); + + +/* Store 4 (alpha * result + y) to y +*/ +#define STORE4_COMPLETE_RESULT_ALPHA_ONE(regResult, targetAddr) \ + regResult = _mm_fmadd_ps(_mm512_castps512_ps128(ALPHAVECTOR), regResult, _mm_loadu_ps(targetAddr)); \ + _mm_storeu_ps(targetAddr, regResult); + + +/* Masked store 4 (alpha * result + y) to y +*/ +#define STORE4_MASK_COMPLETE_RESULT_ALPHA_ONE(regResult, targetAddr, mask) \ + regResult = _mm_fmadd_ps(_mm512_castps512_ps128(ALPHAVECTOR), regResult, _mm_maskz_loadu_ps(mask, targetAddr)); \ + _mm_mask_storeu_ps(targetAddr, mask, regResult); + + +/* Store 16 (alpha * result) to y +*/ +#define STORE16_COMPLETE_RESULT_ALPHA(regResult, targetAddr) \ + _mm512_storeu_ps(targetAddr, _mm512_mul_ps(ALPHAVECTOR, regResult)); + + +/* Masked store 16 (alpha * result) to y +*/ +#define STORE16_MASK_COMPLETE_RESULT_ALPHA(regResult, targetAddr, mask) \ + _mm512_mask_storeu_ps(targetAddr, mask, _mm512_mul_ps(ALPHAVECTOR, regResult)); + + +/* Store 8 (alpha * result) to y +*/ +#define STORE8_COMPLETE_RESULT_ALPHA(regResult, targetAddr) \ + _mm256_storeu_ps(targetAddr, _mm256_mul_ps(_mm512_castps512_ps256(ALPHAVECTOR), regResult)); + + +/* Masked store 8 (alpha * result) to y +*/ +#define STORE8_MASK_COMPLETE_RESULT_ALPHA(regResult, targetAddr, mask) \ + _mm256_mask_storeu_ps(targetAddr, mask, _mm256_mul_ps(_mm512_castps512_ps256(ALPHAVECTOR), regResult)); + + +/* Store 4 (alpha * result) to y +*/ +#define STORE4_COMPLETE_RESULT_ALPHA(regResult, targetAddr) \ + _mm_storeu_ps(targetAddr, _mm_mul_ps(_mm512_castps512_ps128(ALPHAVECTOR), regResult)); + + +/* Masked store 4 (alpha * result) to y +*/ +#define STORE4_MASK_COMPLETE_RESULT_ALPHA(regResult, targetAddr, mask) \ + _mm_mask_storeu_ps(targetAddr, mask, _mm_mul_ps(_mm512_castps512_ps128(ALPHAVECTOR), regResult)); + + +/* Store 16 result to y +*/ +#define STORE16_COMPLETE_RESULT_DIRECT(regResult, targetAddr) \ + _mm512_storeu_ps(targetAddr, regResult); + + +/* Masked store 16 result to y +*/ +#define STORE16_MASK_COMPLETE_RESULT_DIRECT(regResult, targetAddr, mask) \ + _mm512_mask_storeu_ps(targetAddr, mask, regResult); + + +/* Store 8 result to y +*/ +#define STORE8_COMPLETE_RESULT_DIRECT(regResult, targetAddr) \ + _mm256_storeu_ps(targetAddr, regResult); + + +/* Masked store 8 result to y +*/ +#define STORE8_MASK_COMPLETE_RESULT_DIRECT(regResult, targetAddr, mask) \ + _mm256_mask_storeu_ps(targetAddr, mask, regResult); + + +/* Store 4 result to y +*/ +#define STORE4_COMPLETE_RESULT_DIRECT(regResult, targetAddr) \ + _mm_storeu_ps(targetAddr, regResult); + + +/* Masked store 4 result to y +*/ +#define STORE4_MASK_COMPLETE_RESULT_DIRECT(regResult, targetAddr, mask) \ + _mm_mask_storeu_ps(targetAddr, mask, regResult); + +#endif diff --git a/kernel/x86_64/bf16to.c b/kernel/x86_64/bf16to.c new file mode 100644 index 000000000..fc6b5a529 --- /dev/null +++ b/kernel/x86_64/bf16to.c @@ -0,0 +1,114 @@ +/*************************************************************************** +Copyright (c) 2014, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include +#include "common.h" + +#if defined(DOUBLE) +#define FLOAT_TYPE double +#elif defined(SINGLE) +#define FLOAT_TYPE float +#else +#endif + +/* Notes for algorithm: + * - Input denormal treated as zero + * - Force to be QNAN + */ +static void bf16to_kernel_1(BLASLONG n, const bfloat16 * in, BLASLONG inc_in, FLOAT_TYPE * out, BLASLONG inc_out) +{ + BLASLONG register index_in = 0; + BLASLONG register index_out = 0; + BLASLONG register index = 0; + uint16_t * tmp = NULL; +#if defined(DOUBLE) + float float_out = 0.0; +#endif + + while(index 0 ? (a) : (-(a))) +#endif + +#if defined(SKYLAKEX) +#include "casum_microk_skylakex-2.c" +#endif + +#ifndef HAVE_CASUM_KERNEL +static FLOAT casum_kernel(BLASLONG n, FLOAT *x1) +{ + + BLASLONG i=0; + BLASLONG n_8 = n & -8; + FLOAT *x = x1; + FLOAT temp0, temp1, temp2, temp3; + FLOAT temp4, temp5, temp6, temp7; + FLOAT sum0 = 0.0; + FLOAT sum1 = 0.0; + FLOAT sum2 = 0.0; + FLOAT sum3 = 0.0; + FLOAT sum4 = 0.0; + + while (i < n_8) { + temp0 = ABS_K(x[0]); + temp1 = ABS_K(x[1]); + temp2 = ABS_K(x[2]); + temp3 = ABS_K(x[3]); + temp4 = ABS_K(x[4]); + temp5 = ABS_K(x[5]); + temp6 = ABS_K(x[6]); + temp7 = ABS_K(x[7]); + + sum0 += temp0; + sum1 += temp1; + sum2 += temp2; + sum3 += temp3; + + sum0 += temp4; + sum1 += temp5; + sum2 += temp6; + sum3 += temp7; + + x+=8; + i+=4; + } + + while (i < n) { + sum4 += (ABS_K(x1[0]) + ABS_K(x1[1])); + x1 += 2; + i++; + } + + return sum0+sum1+sum2+sum3+sum4; +} + +#endif + +static FLOAT asum_compute(BLASLONG n, FLOAT *x, BLASLONG inc_x) +{ + BLASLONG i = 0; + BLASLONG ip = 0; + BLASLONG inc_x2; + FLOAT sumf = 0.0; + + if (n <= 0 || inc_x <= 0) return(sumf); + if (inc_x == 1) { + sumf = casum_kernel(n, x); + } + else { + inc_x2 = 2 * inc_x; + + while (i < n) { + sumf += ABS_K(x[ip]) + ABS_K(x[ip + 1]); + ip += inc_x2; + i++; + } + } + + return(sumf); +} + +#if defined(SMP) +static int asum_thread_function(BLASLONG n, + BLASLONG dummy0, BLASLONG dummy1, FLOAT dummy2, + FLOAT *x, BLASLONG inc_x, + FLOAT * dummy3, BLASLONG dummy4, + FLOAT * result, BLASLONG dummy5) +{ + *(FLOAT *) result = asum_compute(n, x, inc_x); + return 0; +} + +extern int blas_level1_thread_with_return_value(int mode, + BLASLONG m, BLASLONG n, BLASLONG k, void * alpha, + void *a, BLASLONG lda, + void *b, BLASLONG ldb, + void *c, BLASLONG ldc, + int (*function)(), + int nthread); +#endif + +FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) +{ +#if defined(SMP) + int nthreads; + FLOAT dummy_alpha[2]; +#endif + FLOAT sumf = 0.0; + +#if defined(SMP) + int num_cpu = num_cpu_avail(1); + if (n <= 10000 || inc_x <= 0) + nthreads = 1; + else + nthreads = num_cpu < n/10000 ? num_cpu : n/10000; + + if (nthreads == 1) { + sumf = asum_compute(n, x, inc_x); + } + else { + int mode, i; + char result[MAX_CPU_NUMBER * sizeof(double) *2]; + FLOAT *ptr; +#if !defined(DOUBLE) + mode = BLAS_SINGLE | BLAS_COMPLEX; +#else + mode = BLAS_DOUBLE | BLAS_COMPLEX; +#endif + blas_level1_thread_with_return_value(mode, n, 0, 0, dummy_alpha, x, inc_x, + NULL, 0, result, 0, (void *)asum_thread_function, nthreads); + ptr = (FLOAT *)result; + for (i = 0; i < nthreads; i++) { + sumf += (*ptr); + ptr = (FLOAT *)(((char *)ptr) + sizeof(double) *2); + } + } +#else + sumf = asum_compute(n, x, inc_x); +#endif + return(sumf); +} diff --git a/kernel/x86_64/casum_microk_skylakex-2.c b/kernel/x86_64/casum_microk_skylakex-2.c new file mode 100644 index 000000000..d51929f9f --- /dev/null +++ b/kernel/x86_64/casum_microk_skylakex-2.c @@ -0,0 +1,349 @@ +/* need a new enough GCC for avx512 support */ +#if (( defined(__GNUC__) && __GNUC__ > 6 && defined(__AVX512CD__)) || (defined(__clang__) && __clang_major__ >= 9)) + +#define HAVE_CASUM_KERNEL 1 + +#include + +#include + +static FLOAT casum_kernel(BLASLONG n, FLOAT *x) +{ + FLOAT *x1 = x; + FLOAT sumf=0.0; + BLASLONG n2 = n + n; + + if (n2 < 64) { + __m128 accum_10, accum_11, accum_12, accum_13; + __m128 abs_mask1; + + accum_10 = _mm_setzero_ps(); + accum_11 = _mm_setzero_ps(); + accum_12 = _mm_setzero_ps(); + accum_13 = _mm_setzero_ps(); + + abs_mask1 = (__m128)_mm_cmpeq_epi8((__m128i) abs_mask1, (__m128i) abs_mask1); + abs_mask1 = (__m128)_mm_srli_epi32((__m128i) abs_mask1, 1); + + _mm_prefetch(&x1[0], _MM_HINT_T0); + + if (n2 >= 32){ + __m128 x00 = _mm_loadu_ps(&x1[ 0]); + __m128 x01 = _mm_loadu_ps(&x1[ 4]); + __m128 x02 = _mm_loadu_ps(&x1[ 8]); + __m128 x03 = _mm_loadu_ps(&x1[12]); + + _mm_prefetch(&x1[16], _MM_HINT_T0); + __m128 x04 = _mm_loadu_ps(&x1[16]); + __m128 x05 = _mm_loadu_ps(&x1[20]); + __m128 x06 = _mm_loadu_ps(&x1[24]); + __m128 x07 = _mm_loadu_ps(&x1[28]); + + x00 = _mm_and_ps(x00, abs_mask1); + x01 = _mm_and_ps(x01, abs_mask1); + x02 = _mm_and_ps(x02, abs_mask1); + x03 = _mm_and_ps(x03, abs_mask1); + + accum_10 = _mm_add_ps(accum_10, x00); + accum_11 = _mm_add_ps(accum_11, x01); + accum_12 = _mm_add_ps(accum_12, x02); + accum_13 = _mm_add_ps(accum_13, x03); + + x04 = _mm_and_ps(x04, abs_mask1); + x05 = _mm_and_ps(x05, abs_mask1); + x06 = _mm_and_ps(x06, abs_mask1); + x07 = _mm_and_ps(x07, abs_mask1); + + accum_10 = _mm_add_ps(accum_10, x04); + accum_11 = _mm_add_ps(accum_11, x05); + accum_12 = _mm_add_ps(accum_12, x06); + accum_13 = _mm_add_ps(accum_13, x07); + + n2 -= 32; + x1 += 32; + } + + if (n2 >= 16) { + __m128 x00 = _mm_loadu_ps(&x1[ 0]); + __m128 x01 = _mm_loadu_ps(&x1[ 4]); + __m128 x02 = _mm_loadu_ps(&x1[ 8]); + __m128 x03 = _mm_loadu_ps(&x1[12]); + + x00 = _mm_and_ps(x00, abs_mask1); + x01 = _mm_and_ps(x01, abs_mask1); + x02 = _mm_and_ps(x02, abs_mask1); + x03 = _mm_and_ps(x03, abs_mask1); + accum_10 = _mm_add_ps(accum_10, x00); + accum_11 = _mm_add_ps(accum_11, x01); + accum_12 = _mm_add_ps(accum_12, x02); + accum_13 = _mm_add_ps(accum_13, x03); + + n2 -= 16; + x1 += 16; + } + + if (n2 >= 8) { + __m128 x00 = _mm_loadu_ps(&x1[ 0]); + __m128 x01 = _mm_loadu_ps(&x1[ 4]); + x00 = _mm_and_ps(x00, abs_mask1); + x01 = _mm_and_ps(x01, abs_mask1); + accum_10 = _mm_add_ps(accum_10, x00); + accum_11 = _mm_add_ps(accum_11, x01); + + n2 -= 8; + x1 += 8; + } + + if (n2 >= 4) { + __m128 x00 = _mm_loadu_ps(&x1[ 0]); + x00 = _mm_and_ps(x00, abs_mask1); + accum_10 = _mm_add_ps(accum_10, x00); + + n2 -= 4; + x1 += 4; + } + + if (n2) { + sumf += (ABS_K(x1[0]) + ABS_K(x1[1])); + } + + accum_10 = _mm_add_ps(accum_10, accum_11); + accum_12 = _mm_add_ps(accum_12, accum_13); + accum_10 = _mm_add_ps(accum_10, accum_12); + + accum_10 = _mm_hadd_ps(accum_10, accum_10); + accum_10 = _mm_hadd_ps(accum_10, accum_10); + + sumf += accum_10[0]; + } + else { + __m512 accum_0, accum_1, accum_2, accum_3; + __m512 x00, x01, x02, x03, x04, x05, x06, x07; + __m512 abs_mask = (__m512)_mm512_set1_epi32(0x7fffffff); + + accum_0 = _mm512_setzero_ps(); + accum_1 = _mm512_setzero_ps(); + accum_2 = _mm512_setzero_ps(); + accum_3 = _mm512_setzero_ps(); + + // alignment has side-effect when the size of input array is not large enough + if (n2 < 256) { + if (n2 >= 128) { + x00 = _mm512_loadu_ps(&x1[ 0]); + x01 = _mm512_loadu_ps(&x1[ 16]); + x02 = _mm512_loadu_ps(&x1[ 32]); + x03 = _mm512_loadu_ps(&x1[ 48]); + x04 = _mm512_loadu_ps(&x1[ 64]); + x05 = _mm512_loadu_ps(&x1[ 80]); + x06 = _mm512_loadu_ps(&x1[ 96]); + x07 = _mm512_loadu_ps(&x1[112]); + + x00 = _mm512_and_ps(x00, abs_mask); + x01 = _mm512_and_ps(x01, abs_mask); + x02 = _mm512_and_ps(x02, abs_mask); + x03 = _mm512_and_ps(x03, abs_mask); + + accum_0 = _mm512_add_ps(accum_0, x00); + accum_1 = _mm512_add_ps(accum_1, x01); + accum_2 = _mm512_add_ps(accum_2, x02); + accum_3 = _mm512_add_ps(accum_3, x03); + + x04 = _mm512_and_ps(x04, abs_mask); + x05 = _mm512_and_ps(x05, abs_mask); + x06 = _mm512_and_ps(x06, abs_mask); + x07 = _mm512_and_ps(x07, abs_mask); + + accum_0 = _mm512_add_ps(accum_0, x04); + accum_1 = _mm512_add_ps(accum_1, x05); + accum_2 = _mm512_add_ps(accum_2, x06); + accum_3 = _mm512_add_ps(accum_3, x07); + + n2 -= 128; + x1 += 128; + } + + if (n2 >= 64) { + x00 = _mm512_loadu_ps(&x1[ 0]); + x01 = _mm512_loadu_ps(&x1[16]); + x02 = _mm512_loadu_ps(&x1[32]); + x03 = _mm512_loadu_ps(&x1[48]); + x00 = _mm512_and_ps(x00, abs_mask); + x01 = _mm512_and_ps(x01, abs_mask); + x02 = _mm512_and_ps(x02, abs_mask); + x03 = _mm512_and_ps(x03, abs_mask); + accum_0 = _mm512_add_ps(accum_0, x00); + accum_1 = _mm512_add_ps(accum_1, x01); + accum_2 = _mm512_add_ps(accum_2, x02); + accum_3 = _mm512_add_ps(accum_3, x03); + + n2 -= 64; + x1 += 64; + } + + if (n2 >= 32) { + x00 = _mm512_loadu_ps(&x1[ 0]); + x01 = _mm512_loadu_ps(&x1[16]); + x00 = _mm512_and_ps(x00, abs_mask); + x01 = _mm512_and_ps(x01, abs_mask); + accum_0 = _mm512_add_ps(accum_0, x00); + accum_1 = _mm512_add_ps(accum_1, x01); + + n2 -= 32; + x1 += 32; + } + + if (n2 >= 16) { + x00 = _mm512_loadu_ps(&x1[ 0]); + x00 = _mm512_and_ps(x00, abs_mask); + accum_0 = _mm512_add_ps(accum_0, x00); + + n2 -= 16; + x1 += 16; + } + + if (n2) { + uint16_t tail_mask16 = (((uint16_t) 0xffff) >> (16 - n2)); + x00 = _mm512_maskz_loadu_ps(*((__mmask16*) &tail_mask16), &x1[ 0]); + x00 = _mm512_and_ps(x00, abs_mask); + accum_0 = _mm512_add_ps(accum_0, x00); + } + accum_0 = _mm512_add_ps(accum_0, accum_1); + accum_2 = _mm512_add_ps(accum_2, accum_3); + accum_0 = _mm512_add_ps(accum_0, accum_2); + + sumf = _mm512_reduce_add_ps(accum_0); + } + // n2 >= 256, doing alignment + else { + + int align_header = ((64 - ((uintptr_t)x1 & (uintptr_t)0x3f)) >> 2) & 0xf; + + if (0 != align_header) { + uint16_t align_mask16 = (((uint16_t)0xffff) >> (16 - align_header)); + x00 = _mm512_maskz_loadu_ps(*((__mmask16*) &align_mask16), &x1[0]); + x00 = _mm512_and_ps(x00, abs_mask); + accum_0 = _mm512_add_ps(accum_0, x00); + + n2 -= align_header; + x1 += align_header; + } + + x00 = _mm512_load_ps(&x1[ 0]); + x01 = _mm512_load_ps(&x1[ 16]); + x02 = _mm512_load_ps(&x1[ 32]); + x03 = _mm512_load_ps(&x1[ 48]); + x04 = _mm512_load_ps(&x1[ 64]); + x05 = _mm512_load_ps(&x1[ 80]); + x06 = _mm512_load_ps(&x1[ 96]); + x07 = _mm512_load_ps(&x1[112]); + + n2 -= 128; + x1 += 128; + + while (n2 >= 128) { + x00 = _mm512_and_ps(x00, abs_mask); + x01 = _mm512_and_ps(x01, abs_mask); + x02 = _mm512_and_ps(x02, abs_mask); + x03 = _mm512_and_ps(x03, abs_mask); + + accum_0 = _mm512_add_ps(accum_0, x00); + x00 = _mm512_load_ps(&x1[ 0]); + accum_1 = _mm512_add_ps(accum_1, x01); + x01 = _mm512_load_ps(&x1[ 16]); + accum_2 = _mm512_add_ps(accum_2, x02); + x02 = _mm512_load_ps(&x1[ 32]); + accum_3 = _mm512_add_ps(accum_3, x03); + x03 = _mm512_load_ps(&x1[ 48]); + + x04 = _mm512_and_ps(x04, abs_mask); + x05 = _mm512_and_ps(x05, abs_mask); + x06 = _mm512_and_ps(x06, abs_mask); + x07 = _mm512_and_ps(x07, abs_mask); + accum_0 = _mm512_add_ps(accum_0, x04); + x04 = _mm512_load_ps(&x1[ 64]); + accum_1 = _mm512_add_ps(accum_1, x05); + x05 = _mm512_load_ps(&x1[ 80]); + accum_2 = _mm512_add_ps(accum_2, x06); + x06 = _mm512_load_ps(&x1[ 96]); + accum_3 = _mm512_add_ps(accum_3, x07); + x07 = _mm512_load_ps(&x1[112]); + + n2 -= 128; + x1 += 128; + } + x00 = _mm512_and_ps(x00, abs_mask); + x01 = _mm512_and_ps(x01, abs_mask); + x02 = _mm512_and_ps(x02, abs_mask); + x03 = _mm512_and_ps(x03, abs_mask); + + accum_0 = _mm512_add_ps(accum_0, x00); + accum_1 = _mm512_add_ps(accum_1, x01); + accum_2 = _mm512_add_ps(accum_2, x02); + accum_3 = _mm512_add_ps(accum_3, x03); + + x04 = _mm512_and_ps(x04, abs_mask); + x05 = _mm512_and_ps(x05, abs_mask); + x06 = _mm512_and_ps(x06, abs_mask); + x07 = _mm512_and_ps(x07, abs_mask); + + accum_0 = _mm512_add_ps(accum_0, x04); + accum_1 = _mm512_add_ps(accum_1, x05); + accum_2 = _mm512_add_ps(accum_2, x06); + accum_3 = _mm512_add_ps(accum_3, x07); + + if (n2 >= 64) { + x00 = _mm512_load_ps(&x1[ 0]); + x01 = _mm512_load_ps(&x1[16]); + x02 = _mm512_load_ps(&x1[32]); + x03 = _mm512_load_ps(&x1[48]); + x00 = _mm512_and_ps(x00, abs_mask); + x01 = _mm512_and_ps(x01, abs_mask); + x02 = _mm512_and_ps(x02, abs_mask); + x03 = _mm512_and_ps(x03, abs_mask); + accum_0 = _mm512_add_ps(accum_0, x00); + accum_1 = _mm512_add_ps(accum_1, x01); + accum_2 = _mm512_add_ps(accum_2, x02); + accum_3 = _mm512_add_ps(accum_3, x03); + + n2 -= 64; + x1 += 64; + } + + if (n2 >= 32) { + x00 = _mm512_load_ps(&x1[ 0]); + x01 = _mm512_load_ps(&x1[16]); + x00 = _mm512_and_ps(x00, abs_mask); + x01 = _mm512_and_ps(x01, abs_mask); + accum_0 = _mm512_add_ps(accum_0, x00); + accum_1 = _mm512_add_ps(accum_1, x01); + + n2 -= 32; + x1 += 32; + } + + if (n2 >= 16) { + x00 = _mm512_load_ps(&x1[ 0]); + x00 = _mm512_and_ps(x00, abs_mask); + accum_0 = _mm512_add_ps(accum_0, x00); + + n2 -= 16; + x1 += 16; + } + + if (n2) { + uint16_t tail_mask16 = (((uint16_t) 0xffff) >> (16 - n2)); + x00 = _mm512_maskz_load_ps(*((__mmask16*) &tail_mask16), &x1[ 0]); + x00 = _mm512_and_ps(x00, abs_mask); + accum_0 = _mm512_add_ps(accum_0, x00); + } + + accum_0 = _mm512_add_ps(accum_0, accum_1); + accum_2 = _mm512_add_ps(accum_2, accum_3); + accum_0 = _mm512_add_ps(accum_0, accum_2); + sumf = _mm512_reduce_add_ps(accum_0); + } + } + + return sumf; +} +#endif diff --git a/kernel/x86_64/caxpy.c b/kernel/x86_64/caxpy.c index 586d05ac2..c19b98f02 100644 --- a/kernel/x86_64/caxpy.c +++ b/kernel/x86_64/caxpy.c @@ -33,7 +33,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "caxpy_microk_steamroller-2.c" #elif defined(BULLDOZER) #include "caxpy_microk_bulldozer-2.c" -#elif defined(HASWELL) || defined(ZEN) || defined(SKYLAKEX) +#elif defined(HASWELL) || defined(ZEN) || defined(SKYLAKEX) || defined(COOPERLAKE) #include "caxpy_microk_haswell-2.c" #elif defined(SANDYBRIDGE) #include "caxpy_microk_sandy-2.c" diff --git a/kernel/x86_64/caxpy_microk_bulldozer-2.c b/kernel/x86_64/caxpy_microk_bulldozer-2.c index ca2209340..a32558dc9 100644 --- a/kernel/x86_64/caxpy_microk_bulldozer-2.c +++ b/kernel/x86_64/caxpy_microk_bulldozer-2.c @@ -122,7 +122,7 @@ static void caxpy_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) "r" (alpha), // 4 "r" (mvec) // 5 : "cc", - "%xmm0", "%xmm1", + "%xmm0", "%xmm1", "%xmm2", "%xmm3", "%xmm4", "%xmm5", "%xmm6", "%xmm7", "%xmm8", "%xmm9", "%xmm10", "%xmm11", "%xmm12", "%xmm13", "%xmm14", "%xmm15", @@ -189,9 +189,10 @@ static void caxpy_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) "r" (alpha), // 4 "r" (mvec) // 5 : "cc", - "%xmm0", "%xmm1", + "%xmm0", "%xmm1", "%xmm2", "%xmm3", "%xmm4", "%xmm5", "%xmm6", "%xmm7", "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", "memory" ); diff --git a/kernel/x86_64/caxpy_microk_haswell-2.c b/kernel/x86_64/caxpy_microk_haswell-2.c index b605ea34c..129ce7a49 100644 --- a/kernel/x86_64/caxpy_microk_haswell-2.c +++ b/kernel/x86_64/caxpy_microk_haswell-2.c @@ -120,7 +120,7 @@ static void caxpy_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) "r" (alpha), // 4 "r" (mvec) // 5 : "cc", - "%xmm0", "%xmm1", + "%xmm0", "%xmm1", "%xmm2", "%xmm3", "%xmm4", "%xmm5", "%xmm6", "%xmm7", "%xmm8", "%xmm9", "%xmm10", "%xmm11", "%xmm12", "%xmm13", "%xmm14", "%xmm15", diff --git a/kernel/x86_64/caxpy_microk_sandy-2.c b/kernel/x86_64/caxpy_microk_sandy-2.c index 72d37afed..564dfbd0f 100644 --- a/kernel/x86_64/caxpy_microk_sandy-2.c +++ b/kernel/x86_64/caxpy_microk_sandy-2.c @@ -104,7 +104,7 @@ static void caxpy_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) "r" (alpha), // 4 "r" (mvec) // 5 : "cc", - "%xmm0", "%xmm1", + "%xmm0", "%xmm1", "%xmm2", "%xmm3", "%xmm4", "%xmm5", "%xmm6", "%xmm7", "%xmm8", "%xmm9", "%xmm10", "%xmm11", "%xmm12", "%xmm13", "%xmm14", "%xmm15", diff --git a/kernel/x86_64/caxpy_microk_steamroller-2.c b/kernel/x86_64/caxpy_microk_steamroller-2.c index 7ca7af070..cc5c5de76 100644 --- a/kernel/x86_64/caxpy_microk_steamroller-2.c +++ b/kernel/x86_64/caxpy_microk_steamroller-2.c @@ -122,7 +122,7 @@ static void caxpy_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) "r" (alpha), // 4 "r" (mvec) // 5 : "cc", - "%xmm0", "%xmm1", + "%xmm0", "%xmm1", "%xmm2", "%xmm3", "%xmm4", "%xmm5", "%xmm6", "%xmm7", "%xmm8", "%xmm9", "%xmm10", "%xmm11", "%xmm12", "%xmm13", "%xmm14", "%xmm15", @@ -189,9 +189,10 @@ static void caxpy_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) "r" (alpha), // 4 "r" (mvec) // 5 : "cc", - "%xmm0", "%xmm1", + "%xmm0", "%xmm1", "%xmm2", "%xmm3", "%xmm4", "%xmm5", "%xmm6", "%xmm7", "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", "memory" ); diff --git a/kernel/x86_64/cdot.c b/kernel/x86_64/cdot.c index 93fca0a0d..f2bf19dcd 100644 --- a/kernel/x86_64/cdot.c +++ b/kernel/x86_64/cdot.c @@ -34,7 +34,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "cdot_microk_bulldozer-2.c" #elif defined(STEAMROLLER) || defined(PILEDRIVER) || defined(EXCAVATOR) #include "cdot_microk_steamroller-2.c" -#elif defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) +#elif defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) || defined (COOPERLAKE) #include "cdot_microk_haswell-2.c" #elif defined(SANDYBRIDGE) #include "cdot_microk_sandy-2.c" @@ -141,8 +141,8 @@ OPENBLAS_COMPLEX_FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLA i=0; ix=0; iy=0; - inc_x <<= 1; - inc_y <<= 1; + inc_x *= 2; + inc_y *= 2; while(i < n) { diff --git a/kernel/x86_64/cgemm3m_kernel_8x4_haswell.c b/kernel/x86_64/cgemm3m_kernel_8x4_haswell.c new file mode 100644 index 000000000..01fbf3064 --- /dev/null +++ b/kernel/x86_64/cgemm3m_kernel_8x4_haswell.c @@ -0,0 +1,279 @@ +/* %0 = "+r"(a_pointer), %1 = "+r"(b_pointer), %2 = "+r"(c_pointer), %3 = "+r"(ldc_in_bytes), %4 for k_count, %5 for c_store */ +/* r12 = k << 4(const), r13 = k(const), r14 = b_head_pos(const), r15 = tmp */ + +#include "common.h" +#include + +//recommended settings: GEMM_P = 320, GEMM_Q = 320. + +/* m = 8 *//* ymm0 for alpha, ymm1-ymm3 for temporary use, ymm4-ymm15 for accumulators */ +#define KERNEL_k1m8n1 \ + "vmovups (%0),%%ymm1; addq $32,%0;"\ + "vbroadcastss (%1),%%ymm2; vfmadd231ps %%ymm1,%%ymm2,%%ymm4;"\ + "addq $4,%1;" +#define KERNEL_h_k1m8n2 \ + "vmovsldup (%0),%%ymm1; vmovshdup (%0),%%ymm2; addq $32,%0;"\ + "vbroadcastsd (%1),%%ymm3; vfmadd231ps %%ymm1,%%ymm3,%%ymm4; vfmadd231ps %%ymm2,%%ymm3,%%ymm5;" +#define KERNEL_k1m8n2 KERNEL_h_k1m8n2 "addq $8,%1;" +#define KERNEL_h_k1m8n4 \ + KERNEL_h_k1m8n2 "vbroadcastsd 8(%1),%%ymm3; vfmadd231ps %%ymm1,%%ymm3,%%ymm6; vfmadd231ps %%ymm2,%%ymm3,%%ymm7;" +#define KERNEL_k1m8n4 KERNEL_h_k1m8n4 "addq $16,%1;" +#define unit_kernel_k1m8n4(c1,c2,c3,c4,...) \ + "vbroadcastsd ("#__VA_ARGS__"),%%ymm3; vfmadd231ps %%ymm1,%%ymm3,"#c1"; vfmadd231ps %%ymm2,%%ymm3,"#c2";"\ + "vbroadcastsd 8("#__VA_ARGS__"),%%ymm3; vfmadd231ps %%ymm1,%%ymm3,"#c3"; vfmadd231ps %%ymm2,%%ymm3,"#c4";" +#define KERNEL_h_k1m8n8 KERNEL_h_k1m8n4 unit_kernel_k1m8n4(%%ymm8,%%ymm9,%%ymm10,%%ymm11,%1,%%r12,1) +#define KERNEL_k1m8n8 KERNEL_h_k1m8n8 "addq $16,%1;" +#define KERNEL_h_k1m8n12 KERNEL_h_k1m8n8 unit_kernel_k1m8n4(%%ymm12,%%ymm13,%%ymm14,%%ymm15,%1,%%r12,2) +#define KERNEL_k1m8n12 KERNEL_h_k1m8n12 "addq $16,%1;" +#define INIT_m8n1 "vpxor %%ymm4,%%ymm4,%%ymm4;" +#define INIT_m8n2 INIT_m8n1 "vpxor %%ymm5,%%ymm5,%%ymm5;" +#define INIT_m8n4 INIT_m8n2 "vpxor %%ymm6,%%ymm6,%%ymm6;vpxor %%ymm7,%%ymm7,%%ymm7;" +#define unit_init_m8n4(c1,c2,c3,c4) \ + "vpxor "#c1","#c1","#c1";vpxor "#c2","#c2","#c2";vpxor "#c3","#c3","#c3";vpxor "#c4","#c4","#c4";" +#define INIT_m8n8 INIT_m8n4 unit_init_m8n4(%%ymm8,%%ymm9,%%ymm10,%%ymm11) +#define INIT_m8n12 INIT_m8n8 unit_init_m8n4(%%ymm12,%%ymm13,%%ymm14,%%ymm15) +#define SAVE_m8n1 \ + "vunpcklps %%ymm4,%%ymm4,%%ymm2; vunpckhps %%ymm4,%%ymm4,%%ymm3;"\ + "vperm2f128 $2,%%ymm2,%%ymm3,%%ymm1; vperm2f128 $19,%%ymm2,%%ymm3,%%ymm2;"\ + "vfmadd213ps (%2),%%ymm0,%%ymm1; vfmadd213ps 32(%2),%%ymm0,%%ymm2; vmovups %%ymm1,(%2); vmovups %%ymm2,32(%2);" +#define unit_save_m8n2(c1,c2) \ + "vunpcklpd "#c2","#c1",%%ymm2; vunpckhpd "#c2","#c1",%%ymm3;"\ + "vperm2f128 $2,%%ymm2,%%ymm3,"#c1"; vperm2f128 $19,%%ymm2,%%ymm3,"#c2";"\ + "vmovsldup "#c1",%%ymm2; vmovsldup "#c2",%%ymm3;"\ + "vfmadd213ps (%5),%%ymm0,%%ymm2; vfmadd213ps 32(%5),%%ymm0,%%ymm3; vmovups %%ymm2,(%5); vmovups %%ymm3,32(%5);"\ + "vmovshdup "#c1",%%ymm2; vmovshdup "#c2",%%ymm3;"\ + "vfmadd213ps (%5,%3,1),%%ymm0,%%ymm2; vfmadd213ps 32(%5,%3,1),%%ymm0,%%ymm3; vmovups %%ymm2,(%5,%3,1); vmovups %%ymm3,32(%5,%3,1);"\ + "leaq (%5,%3,2),%5;" +#define SAVE_m8n2 "movq %2,%5;" unit_save_m8n2(%%ymm4,%%ymm5) +#define SAVE_m8n4 SAVE_m8n2 unit_save_m8n2(%%ymm6,%%ymm7) +#define SAVE_m8n8 SAVE_m8n4 unit_save_m8n2(%%ymm8,%%ymm9) unit_save_m8n2(%%ymm10,%%ymm11) +#define SAVE_m8n12 SAVE_m8n8 unit_save_m8n2(%%ymm12,%%ymm13) unit_save_m8n2(%%ymm14,%%ymm15) +#define COMPUTE_m8(ndim) \ + INIT_m8n##ndim\ + "movq %%r13,%4; movq %%r14,%1; movq %2,%5; xorq %%r15,%%r15;"\ + "cmpq $24,%4; jb "#ndim"882f;"\ + #ndim"881:\n\t"\ + "cmpq $126,%%r15; movq $126,%%r15; cmoveq %3,%%r15;"\ + "prefetcht0 64(%1); prefetcht0 64(%1,%%r12,1); prefetcht0 64(%1,%%r12,2);"\ + "prefetcht0 512(%0);" KERNEL_k1m8n##ndim KERNEL_k1m8n##ndim\ + "prefetcht0 512(%0);" KERNEL_k1m8n##ndim KERNEL_k1m8n##ndim\ + "prefetcht1 (%5); leaq -63(%5,%%r15,1),%5;"\ + "prefetcht0 64(%1); prefetcht0 64(%1,%%r12,1); prefetcht0 64(%1,%%r12,2);"\ + "prefetcht0 512(%0);" KERNEL_k1m8n##ndim KERNEL_k1m8n##ndim\ + "prefetcht0 512(%0);" KERNEL_k1m8n##ndim KERNEL_k1m8n##ndim\ + "prefetcht1 (%8); addq $16,%8;"\ + "subq $8,%4; cmpq $24,%4; jnb "#ndim"881b;"\ + "movq %2,%5;"\ + #ndim"882:\n\t"\ + "testq %4,%4; jz "#ndim"883f;"\ + "prefetcht0 (%5); prefetcht0 63(%5); addq %3,%5;"\ + KERNEL_k1m8n##ndim\ + "decq %4; jmp "#ndim"882b;"\ + #ndim"883:\n\t"\ + "prefetcht0 (%%r14); prefetcht0 64(%%r14);"\ + SAVE_m8n##ndim "addq $64,%2;" + +/* m = 4 *//* xmm0 for alpha, xmm1-xmm3 for temporary use, xmm4-xmm15 for accumulators */ +#define KERNEL_k1m4n1 \ + "vmovups (%0),%%xmm1; addq $16,%0;"\ + "vbroadcastss (%1),%%xmm2; vfmadd231ps %%xmm1,%%xmm2,%%xmm4;"\ + "addq $4,%1;" +#define KERNEL_h_k1m4n2 \ + "vmovsldup (%0),%%xmm1; vmovshdup (%0),%%xmm2; addq $16,%0;"\ + "vmovddup (%1),%%xmm3; vfmadd231ps %%xmm1,%%xmm3,%%xmm4; vfmadd231ps %%xmm2,%%xmm3,%%xmm5;" +#define KERNEL_k1m4n2 KERNEL_h_k1m4n2 "addq $8,%1;" +#define KERNEL_h_k1m4n4 \ + KERNEL_h_k1m4n2 "vmovddup 8(%1),%%xmm3; vfmadd231ps %%xmm1,%%xmm3,%%xmm6; vfmadd231ps %%xmm2,%%xmm3,%%xmm7;" +#define KERNEL_k1m4n4 KERNEL_h_k1m4n4 "addq $16,%1;" +#define unit_kernel_k1m4n4(c1,c2,c3,c4,...) \ + "vmovddup ("#__VA_ARGS__"),%%xmm3; vfmadd231ps %%xmm1,%%xmm3,"#c1"; vfmadd231ps %%xmm2,%%xmm3,"#c2";"\ + "vmovddup 8("#__VA_ARGS__"),%%xmm3; vfmadd231ps %%xmm1,%%xmm3,"#c3"; vfmadd231ps %%xmm2,%%xmm3,"#c4";" +#define KERNEL_h_k1m4n8 KERNEL_h_k1m4n4 unit_kernel_k1m4n4(%%xmm8,%%xmm9,%%xmm10,%%xmm11,%1,%%r12,1) +#define KERNEL_k1m4n8 KERNEL_h_k1m4n8 "addq $16,%1;" +#define KERNEL_h_k1m4n12 KERNEL_h_k1m4n8 unit_kernel_k1m4n4(%%xmm12,%%xmm13,%%xmm14,%%xmm15,%1,%%r12,2) +#define KERNEL_k1m4n12 KERNEL_h_k1m4n12 "addq $16,%1;" +#define INIT_m4n1 "vpxor %%xmm4,%%xmm4,%%xmm4;" +#define INIT_m4n2 INIT_m4n1 "vpxor %%xmm5,%%xmm5,%%xmm5;" +#define INIT_m4n4 INIT_m4n2 "vpxor %%xmm6,%%xmm6,%%xmm6;vpxor %%xmm7,%%xmm7,%%xmm7;" +#define unit_init_m4n4(c1,c2,c3,c4) \ + "vpxor "#c1","#c1","#c1";vpxor "#c2","#c2","#c2";vpxor "#c3","#c3","#c3";vpxor "#c4","#c4","#c4";" +#define INIT_m4n8 INIT_m4n4 unit_init_m4n4(%%xmm8,%%xmm9,%%xmm10,%%xmm11) +#define INIT_m4n12 INIT_m4n8 unit_init_m4n4(%%xmm12,%%xmm13,%%xmm14,%%xmm15) +#define SAVE_m4n1 \ + "vunpcklps %%xmm4,%%xmm4,%%xmm2; vunpckhps %%xmm4,%%xmm4,%%xmm3;"\ + "vfmadd213ps (%2),%%xmm0,%%xmm2; vfmadd213ps 16(%2),%%xmm0,%%xmm3; vmovups %%xmm2,(%2); vmovups %%xmm3,16(%2);" +#define unit_save_m4n2(c1,c2) \ + "vunpcklpd "#c2","#c1",%%xmm2; vunpckhpd "#c2","#c1","#c2"; vmovapd %%xmm2,"#c1";"\ + "vmovsldup "#c1",%%xmm2; vmovsldup "#c2",%%xmm3;"\ + "vfmadd213ps (%5),%%xmm0,%%xmm2; vfmadd213ps 16(%5),%%xmm0,%%xmm3; vmovups %%xmm2,(%5); vmovups %%xmm3,16(%5);"\ + "vmovshdup "#c1",%%xmm2; vmovshdup "#c2",%%xmm3;"\ + "vfmadd213ps (%5,%3,1),%%xmm0,%%xmm2; vfmadd213ps 16(%5,%3,1),%%xmm0,%%xmm3; vmovups %%xmm2,(%5,%3,1); vmovups %%xmm3,16(%5,%3,1);"\ + "leaq (%5,%3,2),%5;" +#define SAVE_m4n2 "movq %2,%5;" unit_save_m4n2(%%xmm4,%%xmm5) +#define SAVE_m4n4 SAVE_m4n2 unit_save_m4n2(%%xmm6,%%xmm7) +#define SAVE_m4n8 SAVE_m4n4 unit_save_m4n2(%%xmm8,%%xmm9) unit_save_m4n2(%%xmm10,%%xmm11) +#define SAVE_m4n12 SAVE_m4n8 unit_save_m4n2(%%xmm12,%%xmm13) unit_save_m4n2(%%xmm14,%%xmm15) +#define COMPUTE_m4(ndim) \ + INIT_m4n##ndim\ + "movq %%r13,%4; movq %%r14,%1;"\ + #ndim"442:\n\t"\ + "testq %4,%4; jz "#ndim"443f;"\ + KERNEL_k1m4n##ndim\ + "decq %4; jmp "#ndim"442b;"\ + #ndim"443:\n\t"\ + SAVE_m4n##ndim "addq $32,%2;" + +/* m = 2 *//* xmm0 for alpha, xmm1-xmm3 and xmm10 for temporary use, xmm4-xmm9 for accumulators */ +#define INIT_m2n1 "vpxor %%xmm4,%%xmm4,%%xmm4;" +#define KERNEL_k1m2n1 \ + "vmovsd (%0),%%xmm1; addq $8,%0;"\ + "vbroadcastss (%1),%%xmm2; vfmadd231ps %%xmm1,%%xmm2,%%xmm4;"\ + "addq $4,%1;" +#define SAVE_m2n1 \ + "vunpcklps %%xmm4,%%xmm4,%%xmm1; vfmadd213ps (%2),%%xmm0,%%xmm1; vmovups %%xmm1,(%2);" +#define INIT_m2n2 INIT_m2n1 "vpxor %%xmm5,%%xmm5,%%xmm5;" +#define KERNEL_k1m2n2 \ + "vmovsd (%0),%%xmm1; addq $8,%0;"\ + "vbroadcastss (%1),%%xmm2; vfmadd231ps %%xmm1,%%xmm2,%%xmm4;"\ + "vbroadcastss 4(%1),%%xmm3; vfmadd231ps %%xmm1,%%xmm3,%%xmm5;"\ + "addq $8,%1;" +#define SAVE_m2n2 SAVE_m2n1 \ + "vunpcklps %%xmm5,%%xmm5,%%xmm1; vfmadd213ps (%2,%3,1),%%xmm0,%%xmm1; vmovups %%xmm1,(%2,%3,1);" +#define INIT_m2n4 INIT_m2n2 +#define INIT_m2n8 INIT_m2n4 "vpxor %%xmm6,%%xmm6,%%xmm6; vpxor %%xmm7,%%xmm7,%%xmm7;" +#define INIT_m2n12 INIT_m2n8 "vpxor %%xmm8,%%xmm8,%%xmm8; vpxor %%xmm9,%%xmm9,%%xmm9;" +#define KERNEL_k1m2n4 \ + "vmovups (%1),%%xmm3; addq $16,%1;"\ + "vbroadcastss (%0),%%xmm1; vfmadd231ps %%xmm3,%%xmm1,%%xmm4;"\ + "vbroadcastss 4(%0),%%xmm2; vfmadd231ps %%xmm3,%%xmm2,%%xmm5;"\ + "addq $8,%0;" +#define KERNEL_k1m2n8 \ + "vmovups (%1),%%xmm3; vmovups (%1,%%r12,1),%%xmm2; addq $16,%1;"\ + "vbroadcastss (%0),%%xmm1; vfmadd231ps %%xmm3,%%xmm1,%%xmm4; vfmadd231ps %%xmm2,%%xmm1,%%xmm6;"\ + "vbroadcastss 4(%0),%%xmm1; vfmadd231ps %%xmm3,%%xmm1,%%xmm5; vfmadd231ps %%xmm2,%%xmm1,%%xmm7;"\ + "addq $8,%0;" +#define KERNEL_k1m2n12 \ + "vmovups (%1),%%xmm3; vmovups (%1,%%r12,1),%%xmm2; vmovups (%1,%%r12,2),%%xmm1; addq $16,%1;"\ + "vbroadcastss (%0),%%xmm10; vfmadd231ps %%xmm3,%%xmm10,%%xmm4; vfmadd231ps %%xmm2,%%xmm10,%%xmm6; vfmadd231ps %%xmm1,%%xmm10,%%xmm8;"\ + "vbroadcastss 4(%0),%%xmm10; vfmadd231ps %%xmm3,%%xmm10,%%xmm5; vfmadd231ps %%xmm2,%%xmm10,%%xmm7; vfmadd231ps %%xmm1,%%xmm10,%%xmm9;"\ + "addq $8,%0;" +#define unit_save_m2n4(c1,c2) \ + "vunpcklpd "#c2","#c1",%%xmm1; vunpckhpd "#c2","#c1",%%xmm2;"\ + "vmovsldup %%xmm1,%%xmm3; vfmadd213ps (%5),%%xmm0,%%xmm3; vmovups %%xmm3,(%5);"\ + "vmovshdup %%xmm1,%%xmm3; vfmadd213ps (%5,%3,1),%%xmm0,%%xmm3; vmovups %%xmm3,(%5,%3,1);"\ + "leaq (%5,%3,2),%5;"\ + "vmovsldup %%xmm2,%%xmm3; vfmadd213ps (%5),%%xmm0,%%xmm3; vmovups %%xmm3,(%5);"\ + "vmovshdup %%xmm2,%%xmm3; vfmadd213ps (%5,%3,1),%%xmm0,%%xmm3; vmovups %%xmm3,(%5,%3,1);"\ + "leaq (%5,%3,2),%5;" +#define SAVE_m2n4 "movq %2,%5;" unit_save_m2n4(%%xmm4,%%xmm5) +#define SAVE_m2n8 SAVE_m2n4 unit_save_m2n4(%%xmm6,%%xmm7) +#define SAVE_m2n12 SAVE_m2n8 unit_save_m2n4(%%xmm8,%%xmm9) +#define COMPUTE_m2(ndim) \ + INIT_m2n##ndim\ + "movq %%r13,%4; movq %%r14,%1;"\ + #ndim"222:\n\t"\ + "testq %4,%4; jz "#ndim"223f;"\ + KERNEL_k1m2n##ndim\ + "decq %4; jmp "#ndim"222b;"\ + #ndim"223:\n\t"\ + SAVE_m2n##ndim "addq $16,%2;" + +/* m = 1 *//* xmm0 for alpha, xmm1-xmm3 and xmm10 for temporary use, xmm4-xmm6 for accumulators */ +#define INIT_m1n1 "vpxor %%xmm4,%%xmm4,%%xmm4;" +#define KERNEL_k1m1n1 \ + "vmovss (%1),%%xmm3; addq $4,%1;"\ + "vmovss (%0),%%xmm1; vfmadd231ss %%xmm3,%%xmm1,%%xmm4;"\ + "addq $4,%0;" +#define SAVE_m1n1 \ + "vunpcklps %%xmm4,%%xmm4,%%xmm4; vmovsd (%2),%%xmm1; vfmadd213ps %%xmm1,%%xmm0,%%xmm4; vmovsd %%xmm4,(%2);" +#define INIT_m1n2 INIT_m1n1 +#define KERNEL_k1m1n2 \ + "vmovsd (%1),%%xmm3; addq $8,%1;"\ + "vbroadcastss (%0),%%xmm1; vfmadd231ps %%xmm3,%%xmm1,%%xmm4;"\ + "addq $4,%0;" +#define SAVE_m1n2 \ + "vunpcklps %%xmm4,%%xmm4,%%xmm4; vmovsd (%2),%%xmm3; vmovhpd (%2,%3,1),%%xmm3,%%xmm3; vfmadd213ps %%xmm3,%%xmm0,%%xmm4;"\ + "vmovsd %%xmm4,(%2); vmovhpd %%xmm4,(%2,%3,1);" +#define INIT_m1n4 INIT_m1n2 +#define INIT_m1n8 INIT_m1n4 "vpxor %%xmm5,%%xmm5,%%xmm5;" +#define INIT_m1n12 INIT_m1n8 "vpxor %%xmm6,%%xmm6,%%xmm6;" +#define KERNEL_k1m1n4 \ + "vmovups (%1),%%xmm3; addq $16,%1;"\ + "vbroadcastss (%0),%%xmm1; vfmadd231ps %%xmm3,%%xmm1,%%xmm4;"\ + "addq $4,%0;" +#define KERNEL_k1m1n8 \ + "vmovups (%1),%%xmm3; vmovups (%1,%%r12,1),%%xmm2; addq $16,%1;"\ + "vbroadcastss (%0),%%xmm1; vfmadd231ps %%xmm3,%%xmm1,%%xmm4; vfmadd231ps %%xmm2,%%xmm1,%%xmm5;"\ + "addq $4,%0;" +#define KERNEL_k1m1n12 \ + "vmovups (%1),%%xmm3; vmovups (%1,%%r12,1),%%xmm2; vmovups (%1,%%r12,2),%%xmm1; addq $16,%1;"\ + "vbroadcastss (%0),%%xmm10; vfmadd231ps %%xmm3,%%xmm10,%%xmm4; vfmadd231ps %%xmm2,%%xmm10,%%xmm5; vfmadd231ps %%xmm1,%%xmm10,%%xmm6;"\ + "addq $4,%0;" +#define unit_save_m1n4(c1) \ + "vunpcklps "#c1","#c1",%%xmm1; vunpckhps "#c1","#c1",%%xmm2;"\ + "vmovsd (%5),%%xmm3; vmovhpd (%5,%3,1),%%xmm3,%%xmm3; vfmadd213ps %%xmm3,%%xmm0,%%xmm1;"\ + "vmovsd %%xmm1,(%5); vmovhpd %%xmm1,(%5,%3,1); leaq (%5,%3,2),%5;"\ + "vmovsd (%5),%%xmm3; vmovhpd (%5,%3,1),%%xmm3,%%xmm3; vfmadd213ps %%xmm3,%%xmm0,%%xmm2;"\ + "vmovsd %%xmm2,(%5); vmovhpd %%xmm2,(%5,%3,1); leaq (%5,%3,2),%5;" +#define SAVE_m1n4 "movq %2,%5;" unit_save_m1n4(%%xmm4) +#define SAVE_m1n8 SAVE_m1n4 unit_save_m1n4(%%xmm5) +#define SAVE_m1n12 SAVE_m1n8 unit_save_m1n4(%%xmm6) +#define COMPUTE_m1(ndim) \ + INIT_m1n##ndim\ + "movq %%r13,%4; movq %%r14,%1;"\ + #ndim"112:\n\t"\ + "testq %4,%4; jz "#ndim"113f;"\ + KERNEL_k1m1n##ndim\ + "decq %4; jmp "#ndim"112b;"\ + #ndim"113:\n\t"\ + SAVE_m1n##ndim "addq $8,%2;" + +/* %0 = "+r"(a_pointer), %1 = "+r"(b_pointer), %2 = "+r"(c_pointer), %3 = "+r"(ldc_in_bytes), %4 = "+r"(K), %5 = "+r"(ctemp) */ +/* %6 = "+r"(&alpha), %7 = "+r"(M), %8 = "+r"(next_b) */ +/* r11 = m(const), r12 = k << 4(const), r13 = k(const), r14 = b_head_pos(const),r15 = tmp */ + +#define COMPUTE(ndim) {\ + next_b = b_pointer + ndim * K;\ + __asm__ __volatile__(\ + "vbroadcastsd (%6),%%ymm0;"\ + "movq %4,%%r13; movq %4,%%r12; salq $4,%%r12; movq %1,%%r14; movq %7,%%r11;"\ + "cmpq $8,%7;jb 33101"#ndim"f;"\ + "33109"#ndim":\n\t"\ + COMPUTE_m8(ndim)\ + "subq $8,%7;cmpq $8,%7;jnb 33109"#ndim"b;"\ + "33101"#ndim":\n\t"\ + "cmpq $4,%7;jb 33103"#ndim"f;"\ + COMPUTE_m4(ndim)\ + "subq $4,%7;"\ + "33103"#ndim":\n\t"\ + "cmpq $2,%7;jb 33104"#ndim"f;"\ + COMPUTE_m2(ndim)\ + "subq $2,%7;"\ + "33104"#ndim":\n\t"\ + "testq %7,%7;jz 33105"#ndim"f;"\ + COMPUTE_m1(ndim)\ + "33105"#ndim":\n\t"\ + "movq %%r13,%4; movq %%r14,%1; movq %%r11,%7;"\ + :"+r"(a_pointer),"+r"(b_pointer),"+r"(c_pointer),"+r"(ldc_in_bytes),"+r"(K),"+r"(ctemp),"+r"(const_val),"+r"(M),"+r"(next_b)\ + ::"r11","r12","r13","r14","r15",\ + "xmm0","xmm1","xmm2","xmm3","xmm4","xmm5","xmm6","xmm7","xmm8","xmm9","xmm10","xmm11","xmm12","xmm13","xmm14","xmm15","cc","memory");\ + a_pointer -= M * K; b_pointer += ndim * K; c_pointer += 2*(LDC * ndim - M);\ +} + +int __attribute__ ((noinline)) +CNAME(BLASLONG m, BLASLONG n, BLASLONG k, float alphar, float alphai, float * __restrict__ A, float * __restrict__ B, float * __restrict__ C, BLASLONG LDC) +{ + if(m==0||n==0||k==0) return 0; + int64_t ldc_in_bytes = (int64_t)LDC * sizeof(float) * 2; + float constval[2]; constval[0] = alphar; constval[1] = alphai; + float *const_val=constval; + int64_t M = (int64_t)m, K = (int64_t)k; + BLASLONG n_count = n; + float *a_pointer = A,*b_pointer = B,*c_pointer = C,*ctemp = C,*next_b = B; + for(;n_count>11;n_count-=12) COMPUTE(12) + for(;n_count>7;n_count-=8) COMPUTE(8) + for(;n_count>3;n_count-=4) COMPUTE(4) + for(;n_count>1;n_count-=2) COMPUTE(2) + if(n_count>0) COMPUTE(1) + return 0; +} diff --git a/kernel/x86_64/cgemm_kernel_8x2_haswell.c b/kernel/x86_64/cgemm_kernel_8x2_haswell.c new file mode 100644 index 000000000..08882346d --- /dev/null +++ b/kernel/x86_64/cgemm_kernel_8x2_haswell.c @@ -0,0 +1,291 @@ +#include "common.h" +#include + +/* recommended settings: GEMM_P = 256, GEMM_Q = 256 */ + +#if defined(NN) || defined(NT) || defined(TN) || defined(TT) + #define A_CONJ 0 + #define B_CONJ 0 +#endif +#if defined(RN) || defined(RT) || defined(CN) || defined(CT) + #define A_CONJ 1 + #define B_CONJ 0 +#endif +#if defined(NR) || defined(NC) || defined(TR) || defined(TC) + #define A_CONJ 0 + #define B_CONJ 1 +#endif +#if defined(RR) || defined(RC) || defined(CR) || defined(CC) + #define A_CONJ 1 + #define B_CONJ 1 +#endif + +/* %0 = a_ptr, %1 = b_ptr, %2 = c_ptr, %3 = c_tmp, %4 = ldc(bytes), %5 = k_counter, %6 = &alpha, %7 = m_counter, %8 = b_pref */ +/* r11 = m, r12 = k << 4, r13 = k, r14 = b_head, r15 = temp */ + +/* m=8, ymm 0-3 temp, ymm 4-15 acc */ +#if A_CONJ == B_CONJ + #define acc_m4n1_exp(ar,ai,b2,cl,cr) "vfmadd231ps %%ymm"#ar",%%ymm"#b2",%%ymm"#cl"; vfmadd231ps %%ymm"#ai",%%ymm"#b2",%%ymm"#cr";" + #define acc_m8n1_con(ua,la,b1,uc,lc) "vfmaddsub231ps %%ymm"#ua",%%ymm"#b1",%%ymm"#uc"; vfmaddsub231ps %%ymm"#la",%%ymm"#b1",%%ymm"#lc";" +#else + #define acc_m4n1_exp(ar,ai,b2,cl,cr) "vfmadd231ps %%ymm"#ar",%%ymm"#b2",%%ymm"#cl"; vfnmadd231ps %%ymm"#ai",%%ymm"#b2",%%ymm"#cr";" + #define acc_m8n1_con(ua,la,b1,uc,lc) "vfmsubadd231ps %%ymm"#ua",%%ymm"#b1",%%ymm"#uc"; vfmsubadd231ps %%ymm"#la",%%ymm"#b1",%%ymm"#lc";" +#endif +/* expanded accumulators for m8n1 and m8n2 */ +#define KERNEL_k1m8n1 \ + "vbroadcastsd (%1),%%ymm0; addq $8,%1;"\ + "vmovsldup (%0),%%ymm1; vmovshdup (%0),%%ymm2;" acc_m4n1_exp(1,2,0,4,5)\ + "vmovsldup 32(%0),%%ymm1; vmovshdup 32(%0),%%ymm2;" acc_m4n1_exp(1,2,0,6,7)\ + "addq $64,%0;" +#define KERNEL_k1m8n2 \ + "vbroadcastsd (%1),%%ymm0; vbroadcastsd 8(%1),%%ymm1; addq $16,%1;"\ + "vmovsldup (%0),%%ymm2; vmovshdup (%0),%%ymm3;" acc_m4n1_exp(2,3,0,4,5) acc_m4n1_exp(2,3,1,8,9)\ + "vmovsldup 32(%0),%%ymm2; vmovshdup 32(%0),%%ymm3;" acc_m4n1_exp(2,3,0,6,7) acc_m4n1_exp(2,3,1,10,11)\ + "addq $64,%0;" +/* contracted accumulators for m8n4 and m8n6 */ +#define acc_m8n2_con(ua,la,luc,llc,ruc,rlc,lboff,rboff,...) \ + "vbroadcastss "#lboff"("#__VA_ARGS__"),%%ymm2;" acc_m8n1_con(ua,la,2,luc,llc)\ + "vbroadcastss "#rboff"("#__VA_ARGS__"),%%ymm3;" acc_m8n1_con(ua,la,3,ruc,rlc) +#define KERNEL_1_k1m8n4 \ + "vmovups (%0),%%ymm0; vmovups 32(%0),%%ymm1; prefetcht0 512(%0); addq $64,%0;"\ + acc_m8n2_con(0,1,4,5,6,7,0,8,%1) acc_m8n2_con(0,1,8,9,10,11,0,8,%1,%%r12,1) +#define KERNEL_2_k1m8n4 \ + "vpermilps $177,-64(%0),%%ymm0; vpermilps $177,-32(%0),%%ymm1;"\ + acc_m8n2_con(0,1,4,5,6,7,4,12,%1) acc_m8n2_con(0,1,8,9,10,11,4,12,%1,%%r12,1) +#define KERNEL_1_k1m8n6 KERNEL_1_k1m8n4 acc_m8n2_con(0,1,12,13,14,15,0,8,%1,%%r12,2) +#define KERNEL_2_k1m8n6 KERNEL_2_k1m8n4 acc_m8n2_con(0,1,12,13,14,15,4,12,%1,%%r12,2) +#define KERNEL_k1m8n4 KERNEL_1_k1m8n4 KERNEL_2_k1m8n4 "addq $16,%1;" +#define KERNEL_k1m8n6 KERNEL_1_k1m8n6 KERNEL_2_k1m8n6 "addq $16,%1;" +#define zero_4ymm(no1,no2,no3,no4) \ + "vpxor %%ymm"#no1",%%ymm"#no1",%%ymm"#no1"; vpxor %%ymm"#no2",%%ymm"#no2",%%ymm"#no2";"\ + "vpxor %%ymm"#no3",%%ymm"#no3",%%ymm"#no3"; vpxor %%ymm"#no4",%%ymm"#no4",%%ymm"#no4";" +/* initialization and storage macros */ +#define INIT_m8n1 zero_4ymm(4,5,6,7) +#define INIT_m8n2 zero_4ymm(4,5,6,7) zero_4ymm(8,9,10,11) +#define INIT_m8n4 zero_4ymm(4,5,6,7) zero_4ymm(8,9,10,11) +#define INIT_m8n6 INIT_m8n4 zero_4ymm(12,13,14,15) +#if A_CONJ == B_CONJ + #define cont_expacc(cl,cr,dst) "vpermilps $177,%%ymm"#cr",%%ymm"#cr"; vaddsubps %%ymm"#cl",%%ymm"#cr",%%ymm"#dst";" +#else + #define cont_expacc(cl,cr,dst) "vpermilps $177,%%ymm"#cr",%%ymm"#cr"; vaddsubps %%ymm"#cr",%%ymm"#cl",%%ymm"#dst";" +#endif +#if A_CONJ == 0 + #define save_1ymm(c,tmp,off,alpr,alpi,...) \ + "vpermilps $177,%%ymm"#c",%%ymm"#tmp"; vfmsubadd213ps "#off"("#__VA_ARGS__"),%%ymm"#alpr",%%ymm"#c";"\ + "vfmsubadd231ps %%ymm"#tmp",%%ymm"#alpi",%%ymm"#c"; vmovups %%ymm"#c","#off"("#__VA_ARGS__");" +#else + #define save_1ymm(c,tmp,off,alpr,alpi,...) \ + "vpermilps $177,%%ymm"#c",%%ymm"#tmp"; vfmaddsub213ps "#off"("#__VA_ARGS__"),%%ymm"#alpi",%%ymm"#tmp";"\ + "vfmaddsub231ps %%ymm"#c",%%ymm"#alpr",%%ymm"#tmp"; vmovups %%ymm"#tmp","#off"("#__VA_ARGS__");" +#endif +#define save_init_m8 "movq %2,%3; addq $64,%2; vbroadcastss (%6),%%ymm0; vbroadcastss 4(%6),%%ymm1;" +#define SAVE_m8n1 save_init_m8 cont_expacc(4,5,4) cont_expacc(6,7,6) save_1ymm(4,2,0,0,1,%3) save_1ymm(6,3,32,0,1,%3) +#define SAVE_m8n2 SAVE_m8n1\ + cont_expacc(8,9,8) cont_expacc(10,11,10) save_1ymm(8,2,0,0,1,%3,%4,1) save_1ymm(10,3,32,0,1,%3,%4,1) +#define SAVE_m8n4 save_init_m8\ + save_1ymm(4,2,0,0,1,%3) save_1ymm(5,3,32,0,1,%3) save_1ymm(6,2,0,0,1,%3,%4,1) save_1ymm(7,3,32,0,1,%3,%4,1) "leaq (%3,%4,2),%3;"\ + save_1ymm(8,2,0,0,1,%3) save_1ymm(9,3,32,0,1,%3) save_1ymm(10,2,0,0,1,%3,%4,1) save_1ymm(11,3,32,0,1,%3,%4,1) +#define SAVE_m8n6 SAVE_m8n4 "leaq (%3,%4,2),%3;"\ + save_1ymm(12,2,0,0,1,%3) save_1ymm(13,3,32,0,1,%3) save_1ymm(14,2,0,0,1,%3,%4,1) save_1ymm(15,3,32,0,1,%3,%4,1) +#define COMPUTE_m8(ndim) \ + "movq %%r14,%1;" INIT_m8n##ndim "movq %2,%3; movq %%r13,%5;"\ + "testq %5,%5; jz "#ndim"8883f; cmpq $10,%5; jb "#ndim"8882f;"\ + "movq $10,%5; movq $84,%%r15;"\ + #ndim"8881:\n\t"\ + "prefetcht1 (%3); subq $63,%3; addq %%r15,%3;"\ + KERNEL_k1m8n##ndim KERNEL_k1m8n##ndim\ + "testq $12,%5; movq $84,%%r15; cmovz %4,%%r15; prefetcht1 (%8); addq $16,%8;"\ + KERNEL_k1m8n##ndim KERNEL_k1m8n##ndim\ + "addq $4,%5; cmpq %5,%%r13; jnb "#ndim"8881b;"\ + "movq %2,%3; negq %5; leaq 10(%%r13,%5,1),%5; prefetcht0 (%6); prefetcht0 7(%6);"\ + #ndim"8882:\n\t"\ + "prefetcht0 (%3); prefetcht0 63(%3); addq %4,%3;"\ + KERNEL_k1m8n##ndim "decq %5; jnz "#ndim"8882b;"\ + #ndim"8883:\n\t"\ + "prefetcht0 (%%r14); prefetcht0 64(%%r14);" SAVE_m8n##ndim + +/* m=4, ymm 0-3 temp, ymm 4-15 acc, expanded accumulators */ +#define KERNEL_k1m4n1 \ + "vmovsldup (%0),%%ymm1; vmovshdup (%0),%%ymm2; addq $32,%0;"\ + "vbroadcastsd (%1),%%ymm0;" acc_m4n1_exp(1,2,0,4,5) "addq $8,%1;" +#define acc_m4n2_exp(c1l,c1r,c2l,c2r,...) \ + "vbroadcastsd ("#__VA_ARGS__"),%%ymm2;" acc_m4n1_exp(0,1,2,c1l,c1r)\ + "vbroadcastsd 8("#__VA_ARGS__"),%%ymm3;" acc_m4n1_exp(0,1,3,c2l,c2r) +#define KERNEL_h_k1m4n2 \ + "vmovsldup (%0),%%ymm0; vmovshdup (%0),%%ymm1; addq $32,%0;" acc_m4n2_exp(4,5,6,7,%1) +#define KERNEL_h_k1m4n4 KERNEL_h_k1m4n2 acc_m4n2_exp(8,9,10,11,%1,%%r12,1) +#define KERNEL_h_k1m4n6 KERNEL_h_k1m4n4 acc_m4n2_exp(12,13,14,15,%1,%%r12,2) +#define KERNEL_k1m4n2 KERNEL_h_k1m4n2 "addq $16,%1;" +#define KERNEL_k1m4n4 KERNEL_h_k1m4n4 "addq $16,%1;" +#define KERNEL_k1m4n6 KERNEL_h_k1m4n6 "addq $16,%1;" +#define INIT_m4n1 "vpxor %%ymm4,%%ymm4,%%ymm4; vpxor %%ymm5,%%ymm5,%%ymm5;" +#define INIT_m4n2 zero_4ymm(4,5,6,7) +#define INIT_m4n4 INIT_m4n2 zero_4ymm(8,9,10,11) +#define INIT_m4n6 INIT_m4n4 zero_4ymm(12,13,14,15) +#define save_init_m4 "movq %2,%3; addq $32,%2; vbroadcastss (%6),%%ymm0; vbroadcastss 4(%6),%%ymm1;" +#define SAVE_m4n1 save_init_m4 cont_expacc(4,5,4) save_1ymm(4,2,0,0,1,%3) +#define SAVE_m4n2 SAVE_m4n1 cont_expacc(6,7,6) save_1ymm(6,3,0,0,1,%3,%4,1) +#define SAVE_m4n4 SAVE_m4n2 "leaq (%3,%4,2),%3;"\ + cont_expacc(8,9,8) cont_expacc(10,11,10) save_1ymm(8,2,0,0,1,%3) save_1ymm(10,3,0,0,1,%3,%4,1) +#define SAVE_m4n6 SAVE_m4n4 "leaq (%3,%4,2),%3;"\ + cont_expacc(12,13,12) cont_expacc(14,15,14) save_1ymm(12,2,0,0,1,%3) save_1ymm(14,3,0,0,1,%3,%4,1) +#define COMPUTE_m4(ndim) \ + "movq %%r14,%1;" INIT_m4n##ndim "movq %%r13,%5;"\ + "testq %5,%5; jz "#ndim"4442f;"\ + #ndim"4441:\n\t"\ + KERNEL_k1m4n##ndim\ + "decq %5; jnz "#ndim"4441b;"\ + #ndim"4442:\n\t"\ + SAVE_m4n##ndim + +/* m=2, xmm 0-3 temp, xmm 4-15 acc, expanded accumulators */ +#if A_CONJ == B_CONJ + #define acc_m2n1_exp(ar,ai,b2,cl,cr) "vfmadd231ps %%xmm"#ar",%%xmm"#b2",%%xmm"#cl"; vfmadd231ps %%xmm"#ai",%%xmm"#b2",%%xmm"#cr";" +#else + #define acc_m2n1_exp(ar,ai,b2,cl,cr) "vfmadd231ps %%xmm"#ar",%%xmm"#b2",%%xmm"#cl"; vfnmadd231ps %%xmm"#ai",%%xmm"#b2",%%xmm"#cr";" +#endif +#define KERNEL_h_k1m2n1 \ + "vmovsldup (%0),%%xmm0; vmovshdup (%0),%%xmm1; addq $16,%0;"\ + "vmovddup (%1),%%xmm2;" acc_m2n1_exp(0,1,2,4,5) +#define KERNEL_h_k1m2n2 KERNEL_h_k1m2n1\ + "vmovddup 8(%1),%%xmm3;" acc_m2n1_exp(0,1,3,6,7) +#define acc_m2n2_exp(c1,c2,c3,c4,...)\ + "vmovddup ("#__VA_ARGS__"),%%xmm2;" acc_m2n1_exp(0,1,2,c1,c2)\ + "vmovddup 8("#__VA_ARGS__"),%%xmm3;" acc_m2n1_exp(0,1,3,c3,c4) +#define KERNEL_h_k1m2n4 KERNEL_h_k1m2n2 acc_m2n2_exp(8,9,10,11,%1,%%r12,1) +#define KERNEL_h_k1m2n6 KERNEL_h_k1m2n4 acc_m2n2_exp(12,13,14,15,%1,%%r12,2) +#define KERNEL_k1m2n1 KERNEL_h_k1m2n1 "addq $8,%1;" +#define KERNEL_k1m2n2 KERNEL_h_k1m2n2 "addq $16,%1;" +#define KERNEL_k1m2n4 KERNEL_h_k1m2n4 "addq $16,%1;" +#define KERNEL_k1m2n6 KERNEL_h_k1m2n6 "addq $16,%1;" +#define zero_2xmm(no1,no2) "vpxor %%xmm"#no1",%%xmm"#no1",%%xmm"#no1"; vpxor %%xmm"#no2",%%xmm"#no2",%%xmm"#no2";" +#define INIT_m2n1 zero_2xmm(4,5) +#define INIT_m2n2 INIT_m2n1 zero_2xmm(6,7) +#define INIT_m2n4 INIT_m2n2 zero_2xmm(8,9) zero_2xmm(10,11) +#define INIT_m2n6 INIT_m2n4 zero_2xmm(12,13) zero_2xmm(14,15) +#if A_CONJ == B_CONJ + #define cont_expxmmacc(cl,cr,dst) "vpermilps $177,%%xmm"#cr",%%xmm"#cr"; vaddsubps %%xmm"#cl",%%xmm"#cr",%%xmm"#dst";" +#else + #define cont_expxmmacc(cl,cr,dst) "vpermilps $177,%%xmm"#cr",%%xmm"#cr"; vaddsubps %%xmm"#cr",%%xmm"#cl",%%xmm"#dst";" +#endif +#if A_CONJ == 0 + #define save_1xmm(c,tmp,alpr,alpi) \ + "vpermilps $177,%%xmm"#c",%%xmm"#tmp"; vfmsubadd213ps (%3),%%xmm"#alpr",%%xmm"#c";"\ + "vfmsubadd231ps %%xmm"#tmp",%%xmm"#alpi",%%xmm"#c"; vmovups %%xmm"#c",(%3); addq %4,%3;" +#else + #define save_1xmm(c,tmp,alpr,alpi) \ + "vpermilps $177,%%xmm"#c",%%xmm"#tmp"; vfmaddsub213ps (%3),%%xmm"#alpi",%%xmm"#tmp";"\ + "vfmaddsub231ps %%xmm"#c",%%xmm"#alpr",%%xmm"#tmp"; vmovups %%xmm"#tmp",(%3); addq %4,%3;" +#endif +#define save_init_m2 "movq %2,%3; addq $16,%2; vbroadcastss (%6),%%xmm0; vbroadcastss 4(%6),%%xmm1;" +#define SAVE_m2n1 save_init_m2 cont_expxmmacc(4,5,4) save_1xmm(4,2,0,1) +#define SAVE_m2n2 SAVE_m2n1 cont_expacc(6,7,6) save_1xmm(6,3,0,1) +#define SAVE_m2n4 SAVE_m2n2 cont_expacc(8,9,8) save_1xmm(8,2,0,1) cont_expacc(10,11,10) save_1xmm(10,3,0,1) +#define SAVE_m2n6 SAVE_m2n4 cont_expacc(12,13,12) save_1xmm(12,2,0,1) cont_expacc(14,15,14) save_1xmm(14,3,0,1) +#define COMPUTE_m2(ndim) \ + "movq %%r14,%1;" INIT_m2n##ndim "movq %%r13,%5;"\ + "testq %5,%5; jz "#ndim"2222f;"\ + #ndim"2221:\n\t"\ + KERNEL_k1m2n##ndim\ + "decq %5; jnz "#ndim"2221b;"\ + #ndim"2222:\n\t"\ + SAVE_m2n##ndim + +/* m=1, xmm 0-3 temp, xmm 4-9 acc, expanded accumulators */ +#if A_CONJ == B_CONJ + #define acc_m1n1_exp(ar,ai,b2,cl,cr) "vfmadd231ps %%xmm"#ar",%%xmm"#b2",%%xmm"#cl"; vfmadd231ps %%xmm"#ai",%%xmm"#b2",%%xmm"#cr";" + #define acc_m1n2_exp(arb,aib,b4,cl,cr) "vfmadd231ps %%xmm"#arb",%%xmm"#b4",%%xmm"#cl"; vfmadd231ps %%xmm"#aib",%%xmm"#b4",%%xmm"#cr";" +#else + #define acc_m1n1_exp(ar,ai,b2,cl,cr) "vfmadd231ps %%xmm"#ar",%%xmm"#b2",%%xmm"#cl"; vfnmadd231ps %%xmm"#ai",%%xmm"#b2",%%xmm"#cr";" + #define acc_m1n2_exp(arb,aib,b4,cl,cr) "vfmadd231ps %%xmm"#arb",%%xmm"#b4",%%xmm"#cl"; vfnmadd231ps %%xmm"#aib",%%xmm"#b4",%%xmm"#cr";" +#endif +#define KERNEL_k1m1n1 \ + "vbroadcastss (%0),%%xmm0; vbroadcastss 4(%0),%%xmm1; addq $8,%0;"\ + "vmovsd (%1),%%xmm2; addq $8,%1;" acc_m1n1_exp(0,1,2,4,5) +#define KERNEL_h_k1m1n2 \ + "vbroadcastss (%0),%%xmm0; vbroadcastss 4(%0),%%xmm1; addq $8,%0;"\ + "vmovups (%1),%%xmm2;" acc_m1n2_exp(0,1,2,4,5) +#define KERNEL_h_k1m1n4 KERNEL_h_k1m1n2 "vmovups (%1,%%r12,1),%%xmm2;" acc_m1n2_exp(0,1,2,6,7) +#define KERNEL_h_k1m1n6 KERNEL_h_k1m1n4 "vmovups (%1,%%r12,2),%%xmm2;" acc_m1n2_exp(0,1,2,8,9) +#define KERNEL_k1m1n2 KERNEL_h_k1m1n2 "addq $16,%1;" +#define KERNEL_k1m1n4 KERNEL_h_k1m1n4 "addq $16,%1;" +#define KERNEL_k1m1n6 KERNEL_h_k1m1n6 "addq $16,%1;" +#define INIT_m1n1 zero_2xmm(4,5) +#define INIT_m1n2 zero_2xmm(4,5) +#define INIT_m1n4 INIT_m1n2 zero_2xmm(6,7) +#define INIT_m1n6 INIT_m1n4 zero_2xmm(8,9) +#if A_CONJ == 0 + #define save_m1n1(c,tmp1,tmp2,alpr,alpi) \ + "vpermilps $177,%%xmm"#c",%%xmm"#tmp1"; vmovsd (%3),%%xmm"#tmp2"; vfmsubadd213ps %%xmm"#tmp2",%%xmm"#alpr",%%xmm"#c";"\ + "vfmsubadd231ps %%xmm"#tmp1",%%xmm"#alpi",%%xmm"#c"; vmovsd %%xmm"#c",(%3);" + #define save_m1n2(c,tmp1,tmp2,alpr,alpi) \ + "vpermilps $177,%%xmm"#c",%%xmm"#tmp1"; vmovsd (%3),%%xmm"#tmp2"; vmovhpd (%3,%4,1),%%xmm"#tmp2",%%xmm"#tmp2";"\ + "vfmsubadd213ps %%xmm"#tmp2",%%xmm"#alpr",%%xmm"#c"; vfmsubadd231ps %%xmm"#tmp1",%%xmm"#alpi",%%xmm"#c";"\ + "vmovsd %%xmm"#c",(%3); vmovhpd %%xmm"#c",(%3,%4,1); leaq (%3,%4,2),%3;" +#else + #define save_m1n1(c,tmp1,tmp2,alpr,alpi) \ + "vpermilps $177,%%xmm"#c",%%xmm"#tmp1"; vmovsd (%3),%%xmm"#tmp2"; vfmaddsub213ps %%xmm"#tmp2",%%xmm"#alpi",%%xmm"#tmp1";"\ + "vfmaddsub231ps %%xmm"#c",%%xmm"#alpr",%%xmm"#tmp1"; vmovsd %%xmm"#tmp1",(%3);" + #define save_m1n2(c,tmp1,tmp2,alpr,alpi) \ + "vpermilps $177,%%xmm"#c",%%xmm"#tmp1"; vmovsd (%3),%%xmm"#tmp2"; vmovhpd (%3,%4,1),%%xmm"#tmp2",%%xmm"#tmp2";"\ + "vfmaddsub213ps %%xmm"#tmp2",%%xmm"#alpi",%%xmm"#tmp1"; vfmaddsub231ps %%xmm"#c",%%xmm"#alpr",%%xmm"#tmp1";"\ + "vmovsd %%xmm"#tmp1",(%3); vmovhpd %%xmm"#tmp1",(%3,%4,1); leaq (%3,%4,2),%3;" +#endif +#define save_init_m1 "movq %2,%3; addq $8,%2; vbroadcastss (%6),%%xmm0; vbroadcastss 4(%6),%%xmm1;" +#define SAVE_m1n1 save_init_m1 cont_expxmmacc(4,5,4) save_m1n1(4,2,3,0,1) +#define SAVE_m1n2 save_init_m1 cont_expxmmacc(4,5,4) save_m1n2(4,2,3,0,1) +#define SAVE_m1n4 SAVE_m1n2 cont_expxmmacc(6,7,6) save_m1n2(6,2,3,0,1) +#define SAVE_m1n6 SAVE_m1n4 cont_expxmmacc(8,9,8) save_m1n2(8,2,3,0,1) +#define COMPUTE_m1(ndim) \ + "movq %%r14,%1;" INIT_m1n##ndim "movq %%r13,%5;"\ + "testq %5,%5; jz "#ndim"1112f;"\ + #ndim"1111:\n\t"\ + KERNEL_k1m1n##ndim\ + "decq %5; jnz "#ndim"1111b;"\ + #ndim"1112:\n\t"\ + SAVE_m1n##ndim + +#define COMPUTE(ndim) {\ + b_pref = b_ptr + ndim * K *2;\ + __asm__ __volatile__ (\ + "movq %1,%%r14; movq %5,%%r13; movq %5,%%r12; salq $4,%%r12; movq %7,%%r11;"\ + "cmpq $8,%7; jb "#ndim"9992f;"\ + #ndim"9991:\n\t"\ + COMPUTE_m8(ndim)\ + "subq $8,%7; cmpq $8,%7; jnb "#ndim"9991b;"\ + #ndim"9992:\n\t"\ + "cmpq $4,%7; jb "#ndim"9993f;"\ + COMPUTE_m4(ndim) "subq $4,%7;"\ + #ndim"9993:\n\t"\ + "cmpq $2,%7; jb "#ndim"9994f;"\ + COMPUTE_m2(ndim) "subq $2,%7;"\ + #ndim"9994:\n\t"\ + "testq %7,%7; jz "#ndim"9995f;"\ + COMPUTE_m1(ndim)\ + #ndim"9995:\n\t"\ + "movq %%r14,%1; movq %%r13,%5; movq %%r11,%7; vzeroupper;"\ + :"+r"(a_ptr),"+r"(b_ptr),"+r"(c_ptr),"+r"(c_tmp),"+r"(ldc_in_bytes),"+r"(K),"+r"(alp),"+r"(M),"+r"(b_pref)\ + ::"cc","memory","r11","r12","r13","r14","r15","xmm0","xmm1","xmm2","xmm3","xmm4","xmm5",\ + "xmm6","xmm7","xmm8","xmm9","xmm10","xmm11","xmm12","xmm13","xmm14","xmm15");\ + a_ptr -= M * K *2; b_ptr += ndim * K *2; c_ptr += (ndim * LDC - M) * 2;\ +} + +int __attribute__ ((noinline)) +CNAME(BLASLONG m, BLASLONG n, BLASLONG k, float alphar, float alphai, float * __restrict__ A, float * __restrict__ B, float * __restrict__ C, BLASLONG LDC) +{ + if(m==0||n==0||k==0||(alphar==0.0 && alphai==0.0)) return 0; + int64_t ldc_in_bytes = (int64_t)LDC * sizeof(float) * 2; +#if A_CONJ == B_CONJ + float const_val[2] = {-alphar, -alphai}; +#else + float const_val[2] = {alphar, alphai}; +#endif + int64_t M = (int64_t)m, K = (int64_t)k; + BLASLONG n_count = n; + float *a_ptr = A,*b_ptr = B,*c_ptr = C,*c_tmp = C,*alp = const_val,*b_pref = B; + for(;n_count>5;n_count-=6) COMPUTE(6) + for(;n_count>3;n_count-=4) COMPUTE(4) + for(;n_count>1;n_count-=2) COMPUTE(2) + if(n_count>0) COMPUTE(1) + return 0; +} diff --git a/kernel/x86_64/cgemm_kernel_8x2_skylakex.c b/kernel/x86_64/cgemm_kernel_8x2_skylakex.c new file mode 100644 index 000000000..35a57b98a --- /dev/null +++ b/kernel/x86_64/cgemm_kernel_8x2_skylakex.c @@ -0,0 +1,352 @@ +#include +#include "common.h" + +#if defined(NN) || defined(NT) || defined(TN) || defined(TT) + #define CGEMM_SKX_MODE 0 //not to do conjugation on a_block and b_block +#endif +#if defined(RN) || defined(RT) || defined(CN) || defined(CT) + #define CGEMM_SKX_MODE 1 //do conjugation on a_block, not b_block +#endif +#if defined(NR) || defined(NC) || defined(TR) || defined(TC) + #define CGEMM_SKX_MODE 2 //do conjugation on b_block, not a_block +#endif +#if defined(RR) || defined(RC) || defined(CR) || defined(CC) + #define CGEMM_SKX_MODE 3 //do conjugation on a_block and b_block +#endif + +// recommended settings: GEMM_DEFAULT_Q = 192, GEMM_DEFAULT_P = 384 +/* %0=a_pointer, %1=b_pointer, %2=c_pointer, %3=c_store, %4=ldc(bytes), %5=&constval, %6 = k_counter, %7 = m_counter, %8 = b_pref */ +// const float constval[4] = {alpha_r, alpha_i, -1, 1}; +/* r11 = m; r12 = k * 16; r13 = k; r14 = b_head; r15 = %1 + r12 * 3; */ +#define GENERAL_INIT "movq %7,%%r11; movq %1,%%r14; movq %6,%%r13; movq %6,%%r12; salq $4,%%r12;" +#define GENERAL_RECOVER "movq %%r11,%7; movq %%r13,%6; movq %%r14,%1;" +#define CONSTZMM_INIT "vbroadcastss (%5),%%zmm0; vbroadcastss 4(%5),%%zmm1; vbroadcastsd 8(%5),%%zmm2;" +#define COMPUTE_INIT "movq %%r13,%6; movq %%r14,%1; leaq (%%r14,%%r12,2),%%r15; addq %%r12,%%r15;" + +/* m=8, zmm0=alpha_r, zmm1=alpha_i, zmm2={-1,1,...,-1,1}, zmm3-zmm7 for temporary use, zmm8-zmm31 for accumulators */ +#if CGEMM_SKX_MODE == 0 || CGEMM_SKX_MODE == 2 //not to do conjugation on a_block + #define unit_kernel_k1m8n1(a_r,a_i,b_off,c_le,c_ri,...) \ + "vbroadcastsd "#b_off"("#__VA_ARGS__"),%%zmm3; vfmadd231ps "#a_r",%%zmm3,"#c_le"; vfmadd231ps "#a_i",%%zmm3,"#c_ri";" +#else //do conjugation on a_block + #define unit_kernel_k1m8n1(a_r,a_i,b_off,c_le,c_ri,...) \ + "vbroadcastsd "#b_off"("#__VA_ARGS__"),%%zmm3; vfmadd231ps "#a_r",%%zmm3,"#c_le"; vfnmadd231ps "#a_i",%%zmm3,"#c_ri";" +#endif +#define KERNEL_h_k1m8n1 \ + "vmovsldup (%0),%%zmm4; vmovshdup (%0),%%zmm5; prefetcht0 512(%0); addq $64,%0;"\ + unit_kernel_k1m8n1(%%zmm4,%%zmm5,0,%%zmm8,%%zmm9,%1) +#define KERNEL_t_k1m8n1 KERNEL_h_k1m8n1 "addq $8,%1;" +#define KERNEL_h_k1m8n2 KERNEL_h_k1m8n1 unit_kernel_k1m8n1(%%zmm4,%%zmm5,8,%%zmm10,%%zmm11,%1) +#define KERNEL_t_k1m8n2 KERNEL_h_k1m8n2 "addq $16,%1;" +#define unit_kernel_k1m8n2(c1le,c1ri,c2le,c2ri,...) \ + unit_kernel_k1m8n1(%%zmm4,%%zmm5,0,c1le,c1ri,__VA_ARGS__)\ + unit_kernel_k1m8n1(%%zmm4,%%zmm5,8,c2le,c2ri,__VA_ARGS__) +#define KERNEL_h_k1m8n4 KERNEL_h_k1m8n2 unit_kernel_k1m8n2(%%zmm12,%%zmm13,%%zmm14,%%zmm15,%1,%%r12,1) +#define KERNEL_t_k1m8n4 KERNEL_h_k1m8n4 "addq $16,%1;" +#define KERNEL_t_k1m8n6 KERNEL_h_k1m8n4 unit_kernel_k1m8n2(%%zmm16,%%zmm17,%%zmm18,%%zmm19,%1,%%r12,2) "addq $16,%1;" +#define KERNEL_h_k1m8n8 KERNEL_t_k1m8n6 unit_kernel_k1m8n2(%%zmm20,%%zmm21,%%zmm22,%%zmm23,%%r15) +#define KERNEL_t_k1m8n8 KERNEL_h_k1m8n8 "addq $16,%%r15;" +#define KERNEL_h_k1m8n10 KERNEL_h_k1m8n8 unit_kernel_k1m8n2(%%zmm24,%%zmm25,%%zmm26,%%zmm27,%%r15,%%r12,1) +#define KERNEL_t_k1m8n10 KERNEL_h_k1m8n10 "addq $16,%%r15;" +#define KERNEL_h_k1m8n12 KERNEL_h_k1m8n10 unit_kernel_k1m8n2(%%zmm28,%%zmm29,%%zmm30,%%zmm31,%%r15,%%r12,2) +#define KERNEL_t_k1m8n12 KERNEL_h_k1m8n12 "addq $16,%%r15;" +#if CGEMM_SKX_MODE == 0 || CGEMM_SKX_MODE == 1 //not to do conjugation on b_block + #define unit_save_m8n1(c_le,c_ri,...) \ + "vpermilps $177,"#c_ri","#c_ri"; vfmadd231ps "#c_ri",%%zmm2,"#c_le"; vpermilps $177,"#c_le",%%zmm4;"\ + "vfmaddsub213ps ("#__VA_ARGS__"),%%zmm1,%%zmm4; vfmaddsub213ps %%zmm4,%%zmm0,"#c_le"; vmovups "#c_le",("#__VA_ARGS__");" +#else //do conjugation on b_block + #define unit_save_m8n1(c_le,c_ri,...) \ + "vpermilps $177,"#c_ri","#c_ri"; vfnmadd231ps "#c_ri",%%zmm2,"#c_le"; vpermilps $177,"#c_le",%%zmm4;"\ + "vfmsubadd213ps ("#__VA_ARGS__"),%%zmm0,"#c_le"; vfmsubadd231ps %%zmm4,%%zmm1,"#c_le"; vmovups "#c_le",("#__VA_ARGS__");" +#endif +#define SAVE_SETUP_m8 "movq %2,%3; addq $64,%2;" +#define SAVE_m8n1 SAVE_SETUP_m8 unit_save_m8n1(%%zmm8,%%zmm9,%3) +#define SAVE_m8n2 SAVE_m8n1 unit_save_m8n1(%%zmm10,%%zmm11,%3,%4,1) +#define unit_save_m8n2(c1le,c1ri,c2le,c2ri) \ + "leaq (%3,%4,2),%3;" unit_save_m8n1(c1le,c1ri,%3) unit_save_m8n1(c2le,c2ri,%3,%4,1) +#define SAVE_m8n4 SAVE_m8n2 unit_save_m8n2(%%zmm12,%%zmm13,%%zmm14,%%zmm15) +#define SAVE_m8n6 SAVE_m8n4 unit_save_m8n2(%%zmm16,%%zmm17,%%zmm18,%%zmm19) +#define SAVE_m8n8 SAVE_m8n6 unit_save_m8n2(%%zmm20,%%zmm21,%%zmm22,%%zmm23) +#define SAVE_m8n10 SAVE_m8n8 unit_save_m8n2(%%zmm24,%%zmm25,%%zmm26,%%zmm27) +#define SAVE_m8n12 SAVE_m8n10 unit_save_m8n2(%%zmm28,%%zmm29,%%zmm30,%%zmm31) +#define unit_init_m8n1(c_le,c_ri) "vpxorq "#c_le","#c_le","#c_le"; vpxorq "#c_ri","#c_ri","#c_ri";" +#define INIT_m8n1 unit_init_m8n1(%%zmm8,%%zmm9) +#define INIT_m8n2 INIT_m8n1 unit_init_m8n1(%%zmm10,%%zmm11) +#define INIT_m8n4 INIT_m8n2 unit_init_m8n1(%%zmm12,%%zmm13) unit_init_m8n1(%%zmm14,%%zmm15) +#define INIT_m8n6 INIT_m8n4 unit_init_m8n1(%%zmm16,%%zmm17) unit_init_m8n1(%%zmm18,%%zmm19) +#define INIT_m8n8 INIT_m8n6 unit_init_m8n1(%%zmm20,%%zmm21) unit_init_m8n1(%%zmm22,%%zmm23) +#define INIT_m8n10 INIT_m8n8 unit_init_m8n1(%%zmm24,%%zmm25) unit_init_m8n1(%%zmm26,%%zmm27) +#define INIT_m8n12 INIT_m8n10 unit_init_m8n1(%%zmm28,%%zmm29) unit_init_m8n1(%%zmm30,%%zmm31) +#define COMPUTE_m8(ndim) \ + INIT_m8n##ndim\ + COMPUTE_INIT "movq %2,%3;"\ + "cmpq $18,%6; jb "#ndim"88880f;"\ + #ndim"88889:\n\t"\ + KERNEL_t_k1m8n##ndim\ + KERNEL_t_k1m8n##ndim\ + KERNEL_t_k1m8n##ndim\ + "prefetcht1 (%3); prefetcht1 63(%3); addq %4,%3;"\ + KERNEL_t_k1m8n##ndim\ + KERNEL_t_k1m8n##ndim\ + KERNEL_t_k1m8n##ndim\ + "prefetcht1 (%8); addq $40,%8;"\ + "subq $6,%6; cmpq $18,%6; jnb "#ndim"88889b;"\ + "movq %2,%3;"\ + #ndim"88880:\n\t"\ + "testq %6,%6; jz "#ndim"88881f;"\ + "prefetcht0 (%3); prefetcht0 63(%3); addq %4,%3;"\ + KERNEL_t_k1m8n##ndim\ + "decq %6; jmp "#ndim"88880b;"\ + #ndim"88881:\n\t"\ + SAVE_m8n##ndim + +/* m=4, ymm0-ymm3 for temporary use, ymm4-ymm15 for accumulators */ +#if CGEMM_SKX_MODE == 0 || CGEMM_SKX_MODE == 3 //conjg_a == conjg_b; ap = permilps($177,a0) + #define unit_kernel_k1m4n1(a0,ap,b_off_r,b_off_i,c1,...) \ + "vbroadcastss "#b_off_i"("#__VA_ARGS__"),%%ymm2; vfmaddsub231ps "#ap",%%ymm2,"#c1";"\ + "vbroadcastss "#b_off_r"("#__VA_ARGS__"),%%ymm2; vfmaddsub231ps "#a0",%%ymm2,"#c1";" +#else //conjg_a != conjg_b + #define unit_kernel_k1m4n1(a0,ap,b_off_r,b_off_i,c1,...) \ + "vbroadcastss "#b_off_i"("#__VA_ARGS__"),%%ymm2; vfmsubadd231ps "#ap",%%ymm2,"#c1";"\ + "vbroadcastss "#b_off_r"("#__VA_ARGS__"),%%ymm2; vfmsubadd231ps "#a0",%%ymm2,"#c1";" +#endif +#define KERNEL_h_k1m4n1 \ + "vmovups (%0),%%ymm0; vpermilps $177,%%ymm0,%%ymm1; addq $32,%0;"\ + unit_kernel_k1m4n1(%%ymm0,%%ymm1,0,4,%%ymm4,%1) +#define KERNEL_t_k1m4n1 KERNEL_h_k1m4n1 "addq $8,%1;" +#define KERNEL_h_k1m4n2 KERNEL_h_k1m4n1 unit_kernel_k1m4n1(%%ymm0,%%ymm1,8,12,%%ymm5,%1) +#define KERNEL_t_k1m4n2 KERNEL_h_k1m4n2 "addq $16,%1;" +#define unit_kernel_k1m4n2(c1,c2,...) \ + unit_kernel_k1m4n1(%%ymm0,%%ymm1,0,4,c1,__VA_ARGS__)\ + unit_kernel_k1m4n1(%%ymm0,%%ymm1,8,12,c2,__VA_ARGS__) +#define KERNEL_h_k1m4n4 KERNEL_h_k1m4n2 unit_kernel_k1m4n2(%%ymm6,%%ymm7,%1,%%r12,1) +#define KERNEL_t_k1m4n4 KERNEL_h_k1m4n4 "addq $16,%1;" +#define KERNEL_t_k1m4n6 KERNEL_h_k1m4n4 unit_kernel_k1m4n2(%%ymm8,%%ymm9,%1,%%r12,2) "addq $16,%1;" +#define KERNEL_h_k1m4n8 KERNEL_t_k1m4n6 unit_kernel_k1m4n2(%%ymm10,%%ymm11,%%r15) +#define KERNEL_t_k1m4n8 KERNEL_h_k1m4n8 "addq $16,%%r15;" +#define KERNEL_h_k1m4n10 KERNEL_h_k1m4n8 unit_kernel_k1m4n2(%%ymm12,%%ymm13,%%r15,%%r12,1) +#define KERNEL_t_k1m4n10 KERNEL_h_k1m4n10 "addq $16,%%r15;" +#define KERNEL_h_k1m4n12 KERNEL_h_k1m4n10 unit_kernel_k1m4n2(%%ymm14,%%ymm15,%%r15,%%r12,2) +#define KERNEL_t_k1m4n12 KERNEL_h_k1m4n12 "addq $16,%%r15;" +#if CGEMM_SKX_MODE == 0 || CGEMM_SKX_MODE == 2 //not to do conjugation on a_block + #define unit_save_m4n1(alp_r,alp_i,c1,...) \ + "vpermilps $177,"#c1",%%ymm3; vfmaddsub213ps ("#__VA_ARGS__"),"#alp_i",%%ymm3;"\ + "vfmaddsub213ps %%ymm3,"#alp_r","#c1";vmovups "#c1",("#__VA_ARGS__");" +#else //do conjugation on a_block + #define unit_save_m4n1(alp_r,alp_i,c1,...) \ + "vpermilps $177,"#c1",%%ymm3; vfmsubadd213ps ("#__VA_ARGS__"),"#alp_r","#c1";"\ + "vfmsubadd231ps %%ymm3,"#alp_i","#c1";vmovups "#c1",("#__VA_ARGS__");" +#endif +#define SAVE_SETUP_m4 "movq %2,%3; addq $32,%2; vbroadcastss (%5),%%ymm0; vbroadcastss 4(%5),%%ymm1;" +#define SAVE_m4n1 SAVE_SETUP_m4 unit_save_m4n1(%%ymm0,%%ymm1,%%ymm4,%3) +#define SAVE_m4n2 SAVE_m4n1 unit_save_m4n1(%%ymm0,%%ymm1,%%ymm5,%3,%4,1) +#define unit_save_m4n2(c1,c2) \ + "leaq (%3,%4,2),%3;" unit_save_m4n1(%%ymm0,%%ymm1,c1,%3) unit_save_m4n1(%%ymm0,%%ymm1,c2,%3,%4,1) +#define SAVE_m4n4 SAVE_m4n2 unit_save_m4n2(%%ymm6,%%ymm7) +#define SAVE_m4n6 SAVE_m4n4 unit_save_m4n2(%%ymm8,%%ymm9) +#define SAVE_m4n8 SAVE_m4n6 unit_save_m4n2(%%ymm10,%%ymm11) +#define SAVE_m4n10 SAVE_m4n8 unit_save_m4n2(%%ymm12,%%ymm13) +#define SAVE_m4n12 SAVE_m4n10 unit_save_m4n2(%%ymm14,%%ymm15) +#define INIT_m4n1 "vpxor %%ymm4,%%ymm4,%%ymm4;" +#define unit_init_m4n2(c1,c2) "vpxor "#c1","#c1","#c1"; vpxor "#c2","#c2","#c2";" +#define INIT_m4n2 unit_init_m4n2(%%ymm4,%%ymm5) +#define INIT_m4n4 INIT_m4n2 unit_init_m4n2(%%ymm6,%%ymm7) +#define INIT_m4n6 INIT_m4n4 unit_init_m4n2(%%ymm8,%%ymm9) +#define INIT_m4n8 INIT_m4n6 unit_init_m4n2(%%ymm10,%%ymm11) +#define INIT_m4n10 INIT_m4n8 unit_init_m4n2(%%ymm12,%%ymm13) +#define INIT_m4n12 INIT_m4n10 unit_init_m4n2(%%ymm14,%%ymm15) +#define COMPUTE_m4(ndim) \ + INIT_m4n##ndim\ + COMPUTE_INIT\ + #ndim"88440:\n\t"\ + "testq %6,%6; jz "#ndim"88441f;"\ + KERNEL_t_k1m4n##ndim\ + "decq %6; jmp "#ndim"88440b;"\ + #ndim"88441:\n\t"\ + SAVE_m4n##ndim + +/* m=2, xmm0-xmm3 for temporary use, xmm4-xmm15 for accumulators */ +#if CGEMM_SKX_MODE == 0 || CGEMM_SKX_MODE == 3 //conjg_a == conjg_b; + #define unit_kernel_k1m2n1(a0,ap,b_off_r,b_off_i,c1,...) \ + "vbroadcastss "#b_off_i"("#__VA_ARGS__"),%%xmm2; vfmaddsub231ps "#ap",%%xmm2,"#c1";"\ + "vbroadcastss "#b_off_r"("#__VA_ARGS__"),%%xmm2; vfmaddsub231ps "#a0",%%xmm2,"#c1";" +#else //conjg_a != conjg_b + #define unit_kernel_k1m2n1(a0,ap,b_off_r,b_off_i,c1,...) \ + "vbroadcastss "#b_off_i"("#__VA_ARGS__"),%%xmm2; vfmsubadd231ps "#ap",%%xmm2,"#c1";"\ + "vbroadcastss "#b_off_r"("#__VA_ARGS__"),%%xmm2; vfmsubadd231ps "#a0",%%xmm2,"#c1";" +#endif +#define KERNEL_h_k1m2n1 \ + "vmovups (%0),%%xmm0; vpermilps $177,%%xmm0,%%xmm1; addq $16,%0;"\ + unit_kernel_k1m2n1(%%xmm0,%%xmm1,0,4,%%xmm4,%1) +#define KERNEL_t_k1m2n1 KERNEL_h_k1m2n1 "addq $8,%1;" +#define KERNEL_h_k1m2n2 KERNEL_h_k1m2n1 unit_kernel_k1m2n1(%%xmm0,%%xmm1,8,12,%%xmm5,%1) +#define KERNEL_t_k1m2n2 KERNEL_h_k1m2n2 "addq $16,%1;" +#define unit_kernel_k1m2n2(c1,c2,...) \ + unit_kernel_k1m2n1(%%xmm0,%%xmm1,0,4,c1,__VA_ARGS__)\ + unit_kernel_k1m2n1(%%xmm0,%%xmm1,8,12,c2,__VA_ARGS__) +#define KERNEL_h_k1m2n4 KERNEL_h_k1m2n2 unit_kernel_k1m2n2(%%xmm6,%%xmm7,%1,%%r12,1) +#define KERNEL_t_k1m2n4 KERNEL_h_k1m2n4 "addq $16,%1;" +#define KERNEL_t_k1m2n6 KERNEL_h_k1m2n4 unit_kernel_k1m2n2(%%xmm8,%%xmm9,%1,%%r12,2) "addq $16,%1;" +#define KERNEL_h_k1m2n8 KERNEL_t_k1m2n6 unit_kernel_k1m2n2(%%xmm10,%%xmm11,%%r15) +#define KERNEL_t_k1m2n8 KERNEL_h_k1m2n8 "addq $16,%%r15;" +#define KERNEL_h_k1m2n10 KERNEL_h_k1m2n8 unit_kernel_k1m2n2(%%xmm12,%%xmm13,%%r15,%%r12,1) +#define KERNEL_t_k1m2n10 KERNEL_h_k1m2n10 "addq $16,%%r15;" +#define KERNEL_h_k1m2n12 KERNEL_h_k1m2n10 unit_kernel_k1m2n2(%%xmm14,%%xmm15,%%r15,%%r12,2) +#define KERNEL_t_k1m2n12 KERNEL_h_k1m2n12 "addq $16,%%r15;" +#if CGEMM_SKX_MODE == 0 || CGEMM_SKX_MODE == 2 //not to do conjugation on a_block + #define unit_save_m2n1(alp_r,alp_i,c1,...) \ + "vpermilps $177,"#c1",%%xmm3; vfmaddsub213ps ("#__VA_ARGS__"),"#alp_i",%%xmm3;"\ + "vfmaddsub213ps %%xmm3,"#alp_r","#c1";vmovups "#c1",("#__VA_ARGS__");" +#else //do conjugation on a_block + #define unit_save_m2n1(alp_r,alp_i,c1,...) \ + "vpermilps $177,"#c1",%%xmm3; vfmsubadd213ps ("#__VA_ARGS__"),"#alp_r","#c1";"\ + "vfmsubadd231ps %%xmm3,"#alp_i","#c1";vmovups "#c1",("#__VA_ARGS__");" +#endif +#define SAVE_SETUP_m2 "movq %2,%3; addq $16,%2; vbroadcastss (%5),%%xmm0; vbroadcastss 4(%5),%%xmm1;" +#define SAVE_m2n1 SAVE_SETUP_m2 unit_save_m2n1(%%xmm0,%%xmm1,%%xmm4,%3) +#define SAVE_m2n2 SAVE_m2n1 unit_save_m2n1(%%xmm0,%%xmm1,%%xmm5,%3,%4,1) +#define unit_save_m2n2(c1,c2) \ + "leaq (%3,%4,2),%3;" unit_save_m2n1(%%xmm0,%%xmm1,c1,%3) unit_save_m2n1(%%xmm0,%%xmm1,c2,%3,%4,1) +#define SAVE_m2n4 SAVE_m2n2 unit_save_m2n2(%%xmm6,%%xmm7) +#define SAVE_m2n6 SAVE_m2n4 unit_save_m2n2(%%xmm8,%%xmm9) +#define SAVE_m2n8 SAVE_m2n6 unit_save_m2n2(%%xmm10,%%xmm11) +#define SAVE_m2n10 SAVE_m2n8 unit_save_m2n2(%%xmm12,%%xmm13) +#define SAVE_m2n12 SAVE_m2n10 unit_save_m2n2(%%xmm14,%%xmm15) +#define INIT_m2n1 "vpxor %%xmm4,%%xmm4,%%xmm4;" +#define unit_init_m2n2(c1,c2) "vpxor "#c1","#c1","#c1"; vpxor "#c2","#c2","#c2";" +#define INIT_m2n2 unit_init_m2n2(%%xmm4,%%xmm5) +#define INIT_m2n4 INIT_m2n2 unit_init_m2n2(%%xmm6,%%xmm7) +#define INIT_m2n6 INIT_m2n4 unit_init_m2n2(%%xmm8,%%xmm9) +#define INIT_m2n8 INIT_m2n6 unit_init_m2n2(%%xmm10,%%xmm11) +#define INIT_m2n10 INIT_m2n8 unit_init_m2n2(%%xmm12,%%xmm13) +#define INIT_m2n12 INIT_m2n10 unit_init_m2n2(%%xmm14,%%xmm15) +#define COMPUTE_m2(ndim) \ + INIT_m2n##ndim\ + COMPUTE_INIT\ + #ndim"88220:\n\t"\ + "testq %6,%6; jz "#ndim"88221f;"\ + KERNEL_t_k1m2n##ndim\ + "decq %6; jmp "#ndim"88220b;"\ + #ndim"88221:\n\t"\ + SAVE_m2n##ndim + +/* m=1, xmm0-xmm3 and xmm10-xmm15 for temporary use, xmm4-xmm9 for accumulators */ +#if CGEMM_SKX_MODE == 0 || CGEMM_SKX_MODE == 3 //conjg_a == conjg_b; ap = permilps($177,a0) + #define unit_kernel_k1m1n1(a0,ap,b_off_r,b_off_i,c1,...) \ + "vbroadcastss "#b_off_i"("#__VA_ARGS__"),%%xmm2; vfmaddsub231ps "#ap",%%xmm2,"#c1";"\ + "vbroadcastss "#b_off_r"("#__VA_ARGS__"),%%xmm2; vfmaddsub231ps "#a0",%%xmm2,"#c1";" + #define unit_kernel_k1m1n2(a0,ap,c1,...) \ + "vmovshdup ("#__VA_ARGS__"),%%xmm2; vfmaddsub231ps "#ap",%%xmm2,"#c1";"\ + "vmovsldup ("#__VA_ARGS__"),%%xmm2; vfmaddsub231ps "#a0",%%xmm2,"#c1";" +#else //conjg_a != conjg_b + #define unit_kernel_k1m1n1(a0,ap,b_off_r,b_off_i,c1,...) \ + "vbroadcastss "#b_off_i"("#__VA_ARGS__"),%%xmm2; vfmsubadd231ps "#ap",%%xmm2,"#c1";"\ + "vbroadcastss "#b_off_r"("#__VA_ARGS__"),%%xmm2; vfmsubadd231ps "#a0",%%xmm2,"#c1";" + #define unit_kernel_k1m1n2(a0,ap,c1,...) \ + "vmovshdup ("#__VA_ARGS__"),%%xmm2; vfmsubadd231ps "#ap",%%xmm2,"#c1";"\ + "vmovsldup ("#__VA_ARGS__"),%%xmm2; vfmsubadd231ps "#a0",%%xmm2,"#c1";" +#endif +#define KERNEL_h_k1m1n1 \ + "vmovsd (%0),%%xmm0; vpermilps $177,%%xmm0,%%xmm1; addq $8,%0;"\ + unit_kernel_k1m1n1(%%xmm0,%%xmm1,0,4,%%xmm4,%1) +#define KERNEL_t_k1m1n1 KERNEL_h_k1m1n1 "addq $8,%1;" +#define KERNEL_h_k1m1n2 \ + "vmovddup (%0),%%xmm0; vpermilps $177,%%xmm0,%%xmm1; addq $8,%0;"\ + unit_kernel_k1m1n2(%%xmm0,%%xmm1,%%xmm4,%1) +#define KERNEL_t_k1m1n2 KERNEL_h_k1m1n2 "addq $16,%1;" +#define KERNEL_h_k1m1n4 KERNEL_h_k1m1n2 unit_kernel_k1m1n2(%%xmm0,%%xmm1,%%xmm5,%1,%%r12,1) +#define KERNEL_t_k1m1n4 KERNEL_h_k1m1n4 "addq $16,%1;" +#define KERNEL_t_k1m1n6 KERNEL_h_k1m1n4 unit_kernel_k1m1n2(%%xmm0,%%xmm1,%%xmm6,%1,%%r12,2) "addq $16,%1;" +#define KERNEL_h_k1m1n8 KERNEL_t_k1m1n6 unit_kernel_k1m1n2(%%xmm0,%%xmm1,%%xmm7,%%r15) +#define KERNEL_t_k1m1n8 KERNEL_h_k1m1n8 "addq $16,%%r15;" +#define KERNEL_h_k1m1n10 KERNEL_h_k1m1n8 unit_kernel_k1m1n2(%%xmm0,%%xmm1,%%xmm8,%%r15,%%r12,1) +#define KERNEL_t_k1m1n10 KERNEL_h_k1m1n10 "addq $16,%%r15;" +#define KERNEL_h_k1m1n12 KERNEL_h_k1m1n10 unit_kernel_k1m1n2(%%xmm0,%%xmm1,%%xmm9,%%r15,%%r12,2) +#define KERNEL_t_k1m1n12 KERNEL_h_k1m1n12 "addq $16,%%r15;" +#if CGEMM_SKX_MODE == 0 || CGEMM_SKX_MODE == 2 //not to do conjugation on a_block + #define unit_save_m1n1(alp_r,alp_i,c1,...) \ + "vpermilps $177,"#c1",%%xmm3; vmovsd ("#__VA_ARGS__"),%%xmm2; vfmaddsub213ps %%xmm2,"#alp_i",%%xmm3;"\ + "vfmaddsub213ps %%xmm3,"#alp_r","#c1";vmovsd "#c1",("#__VA_ARGS__");" + #define unit_save_m1n2(alp_r,alp_i,c1) \ + "vpermilps $177,"#c1",%%xmm3; vmovsd (%3),%%xmm2; vmovhpd (%3,%4,1),%%xmm2,%%xmm2;"\ + "vfmaddsub213ps %%xmm2,"#alp_i",%%xmm3; vfmaddsub231ps "#c1","#alp_r",%%xmm3;"\ + "vmovsd %%xmm3,(%3); vmovhpd %%xmm3,(%3,%4,1); leaq (%3,%4,2),%3;" +#else //do conjugation on a_block + #define unit_save_m1n1(alp_r,alp_i,c1,...) \ + "vpermilps $177,"#c1",%%xmm3; vmovsd ("#__VA_ARGS__"),%%xmm2; vfmsubadd213ps %%xmm2,"#alp_r","#c1";"\ + "vfmsubadd231ps %%xmm3,"#alp_i","#c1";vmovsd "#c1",("#__VA_ARGS__");" + #define unit_save_m1n2(alp_r,alp_i,c1) \ + "vpermilps $177,"#c1",%%xmm3; vmovsd (%3),%%xmm2; vmovhpd (%3,%4,1),%%xmm2,%%xmm2;"\ + "vfmsubadd213ps %%xmm2,"#alp_r","#c1"; vfmsubadd213ps "#c1","#alp_i",%%xmm3;"\ + "vmovsd %%xmm3,(%3); vmovhpd %%xmm3,(%3,%4,1); leaq (%3,%4,2),%3;" +#endif +#define SAVE_SETUP_m1 "movq %2,%3; addq $8,%2; vbroadcastss (%5),%%xmm0; vbroadcastss 4(%5),%%xmm1;" +#define SAVE_m1n1 SAVE_SETUP_m1 unit_save_m1n1(%%xmm0,%%xmm1,%%xmm4,%3) +#define SAVE_m1n2 SAVE_SETUP_m1 unit_save_m1n2(%%xmm0,%%xmm1,%%xmm4) +#define SAVE_m1n4 SAVE_m1n2 unit_save_m1n2(%%xmm0,%%xmm1,%%xmm5) +#define SAVE_m1n6 SAVE_m1n4 unit_save_m1n2(%%xmm0,%%xmm1,%%xmm6) +#define SAVE_m1n8 SAVE_m1n6 unit_save_m1n2(%%xmm0,%%xmm1,%%xmm7) +#define SAVE_m1n10 SAVE_m1n8 unit_save_m1n2(%%xmm0,%%xmm1,%%xmm8) +#define SAVE_m1n12 SAVE_m1n10 unit_save_m1n2(%%xmm0,%%xmm1,%%xmm9) +#define INIT_m1n1 "vpxor %%xmm4,%%xmm4,%%xmm4;" +#define INIT_m1n2 INIT_m2n1 +#define INIT_m1n4 INIT_m1n2 "vpxor %%xmm5,%%xmm5,%%xmm5;" +#define INIT_m1n6 INIT_m1n4 "vpxor %%xmm6,%%xmm6,%%xmm6;" +#define INIT_m1n8 INIT_m1n6 "vpxor %%xmm7,%%xmm7,%%xmm7;" +#define INIT_m1n10 INIT_m1n8 "vpxor %%xmm8,%%xmm8,%%xmm8;" +#define INIT_m1n12 INIT_m1n10 "vpxor %%xmm9,%%xmm9,%%xmm9;" +#define COMPUTE_m1(ndim) \ + INIT_m1n##ndim\ + COMPUTE_INIT\ + #ndim"88110:\n\t"\ + "testq %6,%6; jz "#ndim"88111f;"\ + KERNEL_t_k1m1n##ndim\ + "decq %6; jmp "#ndim"88110b;"\ + #ndim"88111:\n\t"\ + SAVE_m1n##ndim + +#define COMPUTE(ndim) {\ + b_pref = b_pointer + ndim * K * 2;\ + __asm__ __volatile__(\ + GENERAL_INIT\ + CONSTZMM_INIT\ + "cmpq $8,%7;jb 33101"#ndim"f;"\ + "33109"#ndim":\n\t"\ + COMPUTE_m8(ndim)\ + "subq $8,%7;cmpq $8,%7;jnb 33109"#ndim"b;"\ + "33101"#ndim":\n\t"\ + "cmpq $4,%7;jb 33102"#ndim"f;"\ + COMPUTE_m4(ndim)\ + "subq $4,%7;"\ + "33102"#ndim":\n\t"\ + "cmpq $2,%7;jb 33103"#ndim"f;"\ + COMPUTE_m2(ndim)\ + "subq $2,%7;"\ + "33103"#ndim":\n\t"\ + "testq %7,%7;jz 33104"#ndim"f;"\ + COMPUTE_m1(ndim)\ + "33104"#ndim":\n\t"\ + GENERAL_RECOVER\ + :"+r"(a_pointer),"+r"(b_pointer),"+r"(c_pointer),"+r"(c_store),"+r"(ldc_in_bytes),"+r"(constval),"+r"(K),"+r"(M),"+r"(b_pref)\ + ::"r11","r12","r13","r14","r15","zmm0","zmm1","zmm2","zmm3","zmm4","zmm5","zmm6","zmm7","zmm8","zmm9","zmm10","zmm11","zmm12","zmm13","zmm14",\ + "zmm15","zmm16","zmm17","zmm18","zmm19","zmm20","zmm21","zmm22","zmm23","zmm24","zmm25","zmm26","zmm27","zmm28","zmm29","zmm30","zmm31",\ + "cc","memory");\ + a_pointer -= M * K * 2; b_pointer += ndim * K * 2; c_pointer += (LDC * ndim - M) * 2;\ +} + +int __attribute__ ((noinline)) +CNAME(BLASLONG m, BLASLONG n, BLASLONG k, float alphar, float alphai, float * __restrict__ A, float * __restrict__ B, float * __restrict__ C, BLASLONG LDC) +{ + if(m==0||n==0||k==0) return 0; + int64_t ldc_in_bytes = (int64_t)LDC * sizeof(float) * 2; float const_val[4] = {alphar, alphai, -1, 1}; + int64_t M = (int64_t)m, K = (int64_t)k; + BLASLONG n_count = n; + float *a_pointer = A,*b_pointer = B,*c_pointer = C,*c_store = C,*constval = const_val,*b_pref = B; + for(;n_count>11;n_count-=12) COMPUTE(12) + for(;n_count>9;n_count-=10) COMPUTE(10) + for(;n_count>7;n_count-=8) COMPUTE(8) + for(;n_count>5;n_count-=6) COMPUTE(6) + for(;n_count>3;n_count-=4) COMPUTE(4) + for(;n_count>1;n_count-=2) COMPUTE(2) + if(n_count>0) COMPUTE(1) + return 0; +} diff --git a/kernel/x86_64/cgemv_n_4.c b/kernel/x86_64/cgemv_n_4.c index d81766cd4..0ed02b8d8 100644 --- a/kernel/x86_64/cgemv_n_4.c +++ b/kernel/x86_64/cgemv_n_4.c @@ -29,7 +29,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include #include "common.h" -#if defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) +#if defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) || defined (COOPERLAKE) #include "cgemv_n_microk_haswell-4.c" #elif defined(BULLDOZER) || defined(PILEDRIVER) || defined(STEAMROLLER) || defined(EXCAVATOR) #include "cgemv_n_microk_bulldozer-4.c" diff --git a/kernel/x86_64/cgemv_t_4.c b/kernel/x86_64/cgemv_t_4.c index 6bdea6787..c2903b11f 100644 --- a/kernel/x86_64/cgemv_t_4.c +++ b/kernel/x86_64/cgemv_t_4.c @@ -28,7 +28,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" -#if defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) +#if defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) || defined (COOPERLAKE) #include "cgemv_t_microk_haswell-4.c" #elif defined(BULLDOZER) || defined(PILEDRIVER) || defined(STEAMROLLER) || defined(EXCAVATOR) #include "cgemv_t_microk_bulldozer-4.c" @@ -233,9 +233,9 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha_r, FLOAT alpha_i, if ( m < 1 ) return(0); if ( n < 1 ) return(0); - inc_x <<= 1; - inc_y <<= 1; - lda <<= 1; + inc_x *= 2; + inc_y *= 2; + lda *= 2; lda4 = lda << 2; xbuffer = buffer; diff --git a/kernel/x86_64/copy_sse2.S b/kernel/x86_64/copy_sse2.S index 200daafd9..a5ab2ea91 100644 --- a/kernel/x86_64/copy_sse2.S +++ b/kernel/x86_64/copy_sse2.S @@ -54,7 +54,7 @@ #ifdef OPTERON #define LOAD(OFFSET, ADDR, REG) xorps REG, REG; addpd OFFSET(ADDR), REG #else -#define LOAD(OFFSET, ADDR, REG) movaps OFFSET(ADDR), REG +#define LOAD(OFFSET, ADDR, REG) movups OFFSET(ADDR), REG #endif PROLOGUE @@ -104,14 +104,14 @@ sarq $4, %rax jle .L13 - movaps -16 * SIZE(X), %xmm0 - movaps -14 * SIZE(X), %xmm1 - movaps -12 * SIZE(X), %xmm2 - movaps -10 * SIZE(X), %xmm3 - movaps -8 * SIZE(X), %xmm4 - movaps -6 * SIZE(X), %xmm5 - movaps -4 * SIZE(X), %xmm6 - movaps -2 * SIZE(X), %xmm7 + movups -16 * SIZE(X), %xmm0 + movups -14 * SIZE(X), %xmm1 + movups -12 * SIZE(X), %xmm2 + movups -10 * SIZE(X), %xmm3 + movups -8 * SIZE(X), %xmm4 + movups -6 * SIZE(X), %xmm5 + movups -4 * SIZE(X), %xmm6 + movups -2 * SIZE(X), %xmm7 decq %rax jle .L12 @@ -122,36 +122,36 @@ PREFETCHW (PREFETCHSIZE + 0) - PREOFFSET(Y) #endif - movaps %xmm0, -16 * SIZE(Y) + movups %xmm0, -16 * SIZE(Y) LOAD( 0 * SIZE, X, %xmm0) - movaps %xmm1, -14 * SIZE(Y) + movups %xmm1, -14 * SIZE(Y) LOAD( 2 * SIZE, X, %xmm1) #ifdef PREFETCH PREFETCH (PREFETCHSIZE + 0) - PREOFFSET(X) #endif - movaps %xmm2, -12 * SIZE(Y) + movups %xmm2, -12 * SIZE(Y) LOAD( 4 * SIZE, X, %xmm2) - movaps %xmm3, -10 * SIZE(Y) + movups %xmm3, -10 * SIZE(Y) LOAD( 6 * SIZE, X, %xmm3) #if defined(PREFETCHW) && !defined(FETCH128) PREFETCHW (PREFETCHSIZE + 64) - PREOFFSET(Y) #endif - movaps %xmm4, -8 * SIZE(Y) + movups %xmm4, -8 * SIZE(Y) LOAD( 8 * SIZE, X, %xmm4) - movaps %xmm5, -6 * SIZE(Y) + movups %xmm5, -6 * SIZE(Y) LOAD(10 * SIZE, X, %xmm5) #if defined(PREFETCH) && !defined(FETCH128) PREFETCH (PREFETCHSIZE + 64) - PREOFFSET(X) #endif - movaps %xmm6, -4 * SIZE(Y) + movups %xmm6, -4 * SIZE(Y) LOAD(12 * SIZE, X, %xmm6) - movaps %xmm7, -2 * SIZE(Y) + movups %xmm7, -2 * SIZE(Y) LOAD(14 * SIZE, X, %xmm7) subq $-16 * SIZE, Y @@ -161,14 +161,14 @@ ALIGN_3 .L12: - movaps %xmm0, -16 * SIZE(Y) - movaps %xmm1, -14 * SIZE(Y) - movaps %xmm2, -12 * SIZE(Y) - movaps %xmm3, -10 * SIZE(Y) - movaps %xmm4, -8 * SIZE(Y) - movaps %xmm5, -6 * SIZE(Y) - movaps %xmm6, -4 * SIZE(Y) - movaps %xmm7, -2 * SIZE(Y) + movups %xmm0, -16 * SIZE(Y) + movups %xmm1, -14 * SIZE(Y) + movups %xmm2, -12 * SIZE(Y) + movups %xmm3, -10 * SIZE(Y) + movups %xmm4, -8 * SIZE(Y) + movups %xmm5, -6 * SIZE(Y) + movups %xmm6, -4 * SIZE(Y) + movups %xmm7, -2 * SIZE(Y) subq $-16 * SIZE, Y subq $-16 * SIZE, X @@ -179,15 +179,15 @@ jle .L14 ALIGN_3 - movaps -16 * SIZE(X), %xmm0 - movaps -14 * SIZE(X), %xmm1 - movaps -12 * SIZE(X), %xmm2 - movaps -10 * SIZE(X), %xmm3 + movups -16 * SIZE(X), %xmm0 + movups -14 * SIZE(X), %xmm1 + movups -12 * SIZE(X), %xmm2 + movups -10 * SIZE(X), %xmm3 - movaps %xmm0, -16 * SIZE(Y) - movaps %xmm1, -14 * SIZE(Y) - movaps %xmm2, -12 * SIZE(Y) - movaps %xmm3, -10 * SIZE(Y) + movups %xmm0, -16 * SIZE(Y) + movups %xmm1, -14 * SIZE(Y) + movups %xmm2, -12 * SIZE(Y) + movups %xmm3, -10 * SIZE(Y) addq $8 * SIZE, X addq $8 * SIZE, Y @@ -198,11 +198,11 @@ jle .L15 ALIGN_3 - movaps -16 * SIZE(X), %xmm0 - movaps -14 * SIZE(X), %xmm1 + movups -16 * SIZE(X), %xmm0 + movups -14 * SIZE(X), %xmm1 - movaps %xmm0, -16 * SIZE(Y) - movaps %xmm1, -14 * SIZE(Y) + movups %xmm0, -16 * SIZE(Y) + movups %xmm1, -14 * SIZE(Y) addq $4 * SIZE, X addq $4 * SIZE, Y @@ -213,8 +213,8 @@ jle .L16 ALIGN_3 - movaps -16 * SIZE(X), %xmm0 - movaps %xmm0, -16 * SIZE(Y) + movups -16 * SIZE(X), %xmm0 + movups %xmm0, -16 * SIZE(Y) addq $2 * SIZE, X addq $2 * SIZE, Y @@ -246,13 +246,13 @@ sarq $4, %rax jle .L23 - movaps -15 * SIZE(X), %xmm1 - movaps -13 * SIZE(X), %xmm2 - movaps -11 * SIZE(X), %xmm3 - movaps -9 * SIZE(X), %xmm4 - movaps -7 * SIZE(X), %xmm5 - movaps -5 * SIZE(X), %xmm6 - movaps -3 * SIZE(X), %xmm7 + movups -15 * SIZE(X), %xmm1 + movups -13 * SIZE(X), %xmm2 + movups -11 * SIZE(X), %xmm3 + movups -9 * SIZE(X), %xmm4 + movups -7 * SIZE(X), %xmm5 + movups -5 * SIZE(X), %xmm6 + movups -3 * SIZE(X), %xmm7 decq %rax jle .L22 @@ -264,11 +264,11 @@ #endif SHUFPD_1 %xmm1, %xmm0 - movaps %xmm0, -16 * SIZE(Y) + movups %xmm0, -16 * SIZE(Y) LOAD(-1 * SIZE, X, %xmm0) SHUFPD_1 %xmm2, %xmm1 - movaps %xmm1, -14 * SIZE(Y) + movups %xmm1, -14 * SIZE(Y) LOAD( 1 * SIZE, X, %xmm1) #ifdef PREFETCH @@ -276,11 +276,11 @@ #endif SHUFPD_1 %xmm3, %xmm2 - movaps %xmm2, -12 * SIZE(Y) + movups %xmm2, -12 * SIZE(Y) LOAD( 3 * SIZE, X, %xmm2) SHUFPD_1 %xmm4, %xmm3 - movaps %xmm3, -10 * SIZE(Y) + movups %xmm3, -10 * SIZE(Y) LOAD( 5 * SIZE, X, %xmm3) #if defined(PREFETCHW) && !defined(FETCH128) @@ -288,11 +288,11 @@ #endif SHUFPD_1 %xmm5, %xmm4 - movaps %xmm4, -8 * SIZE(Y) + movups %xmm4, -8 * SIZE(Y) LOAD( 7 * SIZE, X, %xmm4) SHUFPD_1 %xmm6, %xmm5 - movaps %xmm5, -6 * SIZE(Y) + movups %xmm5, -6 * SIZE(Y) LOAD( 9 * SIZE, X, %xmm5) #if defined(PREFETCH) && !defined(FETCH128) @@ -300,11 +300,11 @@ #endif SHUFPD_1 %xmm7, %xmm6 - movaps %xmm6, -4 * SIZE(Y) + movups %xmm6, -4 * SIZE(Y) LOAD(11 * SIZE, X, %xmm6) SHUFPD_1 %xmm0, %xmm7 - movaps %xmm7, -2 * SIZE(Y) + movups %xmm7, -2 * SIZE(Y) LOAD(13 * SIZE, X, %xmm7) subq $-16 * SIZE, X @@ -315,26 +315,26 @@ .L22: SHUFPD_1 %xmm1, %xmm0 - movaps %xmm0, -16 * SIZE(Y) + movups %xmm0, -16 * SIZE(Y) LOAD(-1 * SIZE, X, %xmm0) SHUFPD_1 %xmm2, %xmm1 - movaps %xmm1, -14 * SIZE(Y) + movups %xmm1, -14 * SIZE(Y) SHUFPD_1 %xmm3, %xmm2 - movaps %xmm2, -12 * SIZE(Y) + movups %xmm2, -12 * SIZE(Y) SHUFPD_1 %xmm4, %xmm3 - movaps %xmm3, -10 * SIZE(Y) + movups %xmm3, -10 * SIZE(Y) SHUFPD_1 %xmm5, %xmm4 - movaps %xmm4, -8 * SIZE(Y) + movups %xmm4, -8 * SIZE(Y) SHUFPD_1 %xmm6, %xmm5 - movaps %xmm5, -6 * SIZE(Y) + movups %xmm5, -6 * SIZE(Y) SHUFPD_1 %xmm7, %xmm6 - movaps %xmm6, -4 * SIZE(Y) + movups %xmm6, -4 * SIZE(Y) SHUFPD_1 %xmm0, %xmm7 - movaps %xmm7, -2 * SIZE(Y) + movups %xmm7, -2 * SIZE(Y) subq $-16 * SIZE, X subq $-16 * SIZE, Y @@ -345,24 +345,24 @@ jle .L24 ALIGN_3 - movaps -15 * SIZE(X), %xmm1 - movaps -13 * SIZE(X), %xmm2 - movaps -11 * SIZE(X), %xmm3 - movaps -9 * SIZE(X), %xmm8 + movups -15 * SIZE(X), %xmm1 + movups -13 * SIZE(X), %xmm2 + movups -11 * SIZE(X), %xmm3 + movups -9 * SIZE(X), %xmm8 SHUFPD_1 %xmm1, %xmm0 - movaps %xmm0, -16 * SIZE(Y) + movups %xmm0, -16 * SIZE(Y) SHUFPD_1 %xmm2, %xmm1 - movaps %xmm1, -14 * SIZE(Y) + movups %xmm1, -14 * SIZE(Y) SHUFPD_1 %xmm3, %xmm2 - movaps %xmm2, -12 * SIZE(Y) + movups %xmm2, -12 * SIZE(Y) SHUFPD_1 %xmm8, %xmm3 - movaps %xmm3, -10 * SIZE(Y) + movups %xmm3, -10 * SIZE(Y) - movaps %xmm8, %xmm0 + movups %xmm8, %xmm0 addq $8 * SIZE, X addq $8 * SIZE, Y @@ -373,15 +373,15 @@ jle .L25 ALIGN_3 - movaps -15 * SIZE(X), %xmm1 - movaps -13 * SIZE(X), %xmm2 + movups -15 * SIZE(X), %xmm1 + movups -13 * SIZE(X), %xmm2 SHUFPD_1 %xmm1, %xmm0 SHUFPD_1 %xmm2, %xmm1 - movaps %xmm0, -16 * SIZE(Y) - movaps %xmm1, -14 * SIZE(Y) - movaps %xmm2, %xmm0 + movups %xmm0, -16 * SIZE(Y) + movups %xmm1, -14 * SIZE(Y) + movups %xmm2, %xmm0 addq $4 * SIZE, X addq $4 * SIZE, Y @@ -392,10 +392,10 @@ jle .L26 ALIGN_3 - movaps -15 * SIZE(X), %xmm1 + movups -15 * SIZE(X), %xmm1 SHUFPD_1 %xmm1, %xmm0 - movaps %xmm0, -16 * SIZE(Y) + movups %xmm0, -16 * SIZE(Y) addq $2 * SIZE, X addq $2 * SIZE, Y @@ -424,14 +424,14 @@ sarq $4, %rax jle .L23 - movaps -16 * SIZE(X), %xmm0 - movaps -14 * SIZE(X), %xmm1 - movaps -12 * SIZE(X), %xmm2 - movaps -10 * SIZE(X), %xmm3 - movaps -8 * SIZE(X), %xmm4 - movaps -6 * SIZE(X), %xmm5 - movaps -4 * SIZE(X), %xmm6 - movaps -2 * SIZE(X), %xmm7 + movups -16 * SIZE(X), %xmm0 + movups -14 * SIZE(X), %xmm1 + movups -12 * SIZE(X), %xmm2 + movups -10 * SIZE(X), %xmm3 + movups -8 * SIZE(X), %xmm4 + movups -6 * SIZE(X), %xmm5 + movups -4 * SIZE(X), %xmm6 + movups -2 * SIZE(X), %xmm7 decq %rax jle .L22 @@ -515,16 +515,16 @@ jle .L24 ALIGN_3 - movaps -16 * SIZE(X), %xmm0 + movups -16 * SIZE(X), %xmm0 movlps %xmm0, -16 * SIZE(Y) movhps %xmm0, -15 * SIZE(Y) - movaps -14 * SIZE(X), %xmm1 + movups -14 * SIZE(X), %xmm1 movlps %xmm1, -14 * SIZE(Y) movhps %xmm1, -13 * SIZE(Y) - movaps -12 * SIZE(X), %xmm2 + movups -12 * SIZE(X), %xmm2 movlps %xmm2, -12 * SIZE(Y) movhps %xmm2, -11 * SIZE(Y) - movaps -10 * SIZE(X), %xmm3 + movups -10 * SIZE(X), %xmm3 movlps %xmm3, -10 * SIZE(Y) movhps %xmm3, -9 * SIZE(Y) @@ -537,10 +537,10 @@ jle .L25 ALIGN_3 - movaps -16 * SIZE(X), %xmm0 + movups -16 * SIZE(X), %xmm0 movlps %xmm0, -16 * SIZE(Y) movhps %xmm0, -15 * SIZE(Y) - movaps -14 * SIZE(X), %xmm1 + movups -14 * SIZE(X), %xmm1 movlps %xmm1, -14 * SIZE(Y) movhps %xmm1, -13 * SIZE(Y) @@ -553,7 +553,7 @@ jle .L26 ALIGN_3 - movaps -16 * SIZE(X), %xmm0 + movups -16 * SIZE(X), %xmm0 movlps %xmm0, -16 * SIZE(Y) movhps %xmm0, -15 * SIZE(Y) diff --git a/kernel/x86_64/cscal.c b/kernel/x86_64/cscal.c index 72af99809..6d75358a6 100644 --- a/kernel/x86_64/cscal.c +++ b/kernel/x86_64/cscal.c @@ -28,7 +28,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" -#if defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) +#if defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) || defined (COOPERLAKE) #include "cscal_microk_haswell-2.c" #elif defined(BULLDOZER) || defined(PILEDRIVER) #include "cscal_microk_bulldozer-2.c" diff --git a/kernel/x86_64/dasum.c b/kernel/x86_64/dasum.c new file mode 100644 index 000000000..534f257d2 --- /dev/null +++ b/kernel/x86_64/dasum.c @@ -0,0 +1,129 @@ +#include "common.h" + +#ifndef ABS_K +#define ABS_K(a) ((a) > 0 ? (a) : (-(a))) +#endif + +#if defined(SKYLAKEX) +#include "dasum_microk_skylakex-2.c" +#elif defined(HASWELL) +#include "dasum_microk_haswell-2.c" +#endif + +#ifndef HAVE_DASUM_KERNEL +static FLOAT dasum_kernel(BLASLONG n, FLOAT *x1) +{ + + BLASLONG i=0; + BLASLONG n_8 = n & -8; + FLOAT *x = x1; + FLOAT temp0, temp1, temp2, temp3; + FLOAT temp4, temp5, temp6, temp7; + FLOAT sum0 = 0.0; + FLOAT sum1 = 0.0; + FLOAT sum2 = 0.0; + FLOAT sum3 = 0.0; + FLOAT sum4 = 0.0; + + while (i < n_8) { + temp0 = ABS_K(x[0]); + temp1 = ABS_K(x[1]); + temp2 = ABS_K(x[2]); + temp3 = ABS_K(x[3]); + temp4 = ABS_K(x[4]); + temp5 = ABS_K(x[5]); + temp6 = ABS_K(x[6]); + temp7 = ABS_K(x[7]); + + sum0 += temp0; + sum1 += temp1; + sum2 += temp2; + sum3 += temp3; + + sum0 += temp4; + sum1 += temp5; + sum2 += temp6; + sum3 += temp7; + + x+=8; + i+=8; + } + + while (i < n) { + sum4 += ABS_K(x1[i]); + i++; + } + + return sum0+sum1+sum2+sum3+sum4; +} + +#endif +static FLOAT asum_compute(BLASLONG n, FLOAT *x, BLASLONG inc_x) +{ + BLASLONG i = 0; + FLOAT sumf = 0.0; + + if (n <= 0 || inc_x <= 0) return (sumf); + + if (inc_x == 1) { + sumf = dasum_kernel(n, x); + } + else { + n *= inc_x; + while (i < n) { + sumf += ABS_K(x[i]); + i += inc_x; + } + } + return(sumf); +} + +#if defined(SMP) +static int asum_thread_function(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT dummy2, FLOAT *x, BLASLONG inc_x, FLOAT *dummy3, BLASLONG dummy4, FLOAT *result, BLASLONG dummy5) +{ + *(FLOAT *)result = asum_compute(n, x, inc_x); + return 0; +} + +extern int blas_level1_thread_with_return_value(int mode, BLASLONG m, BLASLONG n, BLASLONG k, void *alpha, void *a, BLASLONG lda, void *b, BLASLONG ldb, void *c, BLASLONG ldc, int (*function)(), int nthreads); +#endif + +FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) +{ +#if defined(SMP) + int nthreads; + FLOAT dummy_alpha; +#endif + FLOAT sumf = 0.0; + +#if defined(SMP) + int num_cpu = num_cpu_avail(1); + if (n <= 100000 || inc_x <= 0) + nthreads = 1; + else + nthreads = num_cpu < n/100000 ? num_cpu : n/100000; + + if (nthreads == 1) { + sumf = asum_compute(n, x, inc_x); + } else { + int mode, i; + char result[MAX_CPU_NUMBER * sizeof(double) *2]; + FLOAT *ptr; +#if !defined(DOUBLE) + mode = BLAS_SINGLE | BLAS_REAL; +#else + mode = BLAS_DOUBLE | BLAS_REAL; +#endif + blas_level1_thread_with_return_value(mode, n, 0, 0, &dummy_alpha, x, inc_x, NULL, 0, result, 0, (void *)asum_thread_function, nthreads); + ptr = (FLOAT *)result; + for (i = 0; i < nthreads; i++) { + sumf += (*ptr); + ptr = (FLOAT *)(((char *)ptr) + sizeof(double) *2); + } + } +#else + sumf = asum_compute(n, x, inc_x); +#endif + return(sumf); +} + diff --git a/kernel/x86_64/dasum_microk_haswell-2.c b/kernel/x86_64/dasum_microk_haswell-2.c new file mode 100644 index 000000000..4fc73ddd4 --- /dev/null +++ b/kernel/x86_64/dasum_microk_haswell-2.c @@ -0,0 +1,86 @@ +#if (( defined(__GNUC__) && __GNUC__ > 6 ) || (defined(__clang__) && __clang_major__ >= 6)) && defined(__AVX2__) + +#define HAVE_DASUM_KERNEL + +#include +#include + +#ifndef ABS_K +#define ABS_K(a) ((a) > 0 ? (a) : (-(a))) +#endif + +static FLOAT dasum_kernel(BLASLONG n, FLOAT *x1) +{ + BLASLONG i = 0; + FLOAT sumf = 0.0; + + if (n >= 256) { + BLASLONG align_256 = ((32 - ((uintptr_t)x1 & (uintptr_t)0x1f)) >> 3) & 0x3; + + for (i = 0; i < align_256; i++) { + sumf += ABS_K(x1[i]); + } + + n -= align_256; + x1 += align_256; + } + + BLASLONG tail_index_SSE = n&(~7); + BLASLONG tail_index_AVX2 = n&(~255); + + if (n >= 256) { + __m256d accum_0, accum_1, accum_2, accum_3; + + accum_0 = _mm256_setzero_pd(); + accum_1 = _mm256_setzero_pd(); + accum_2 = _mm256_setzero_pd(); + accum_3 = _mm256_setzero_pd(); + + __m256i abs_mask = _mm256_set1_epi64x(0x7fffffffffffffff); + for (i = 0; i < tail_index_AVX2; i += 16) { + accum_0 += (__m256d)_mm256_and_si256(_mm256_load_si256(&x1[i+ 0]), abs_mask); + accum_1 += (__m256d)_mm256_and_si256(_mm256_load_si256(&x1[i+ 4]), abs_mask); + accum_2 += (__m256d)_mm256_and_si256(_mm256_load_si256(&x1[i+ 8]), abs_mask); + accum_3 += (__m256d)_mm256_and_si256(_mm256_load_si256(&x1[i+12]), abs_mask); + } + + accum_0 = accum_0 + accum_1 + accum_2 + accum_3; + + __m128d half_accum0; + half_accum0 = _mm_add_pd(_mm256_extractf128_pd(accum_0, 0), _mm256_extractf128_pd(accum_0, 1)); + + half_accum0 = _mm_hadd_pd(half_accum0, half_accum0); + + sumf += half_accum0[0]; + } + + if (n >= 8) { + __m128d accum_20, accum_21, accum_22, accum_23; + accum_20 = _mm_setzero_pd(); + accum_21 = _mm_setzero_pd(); + accum_22 = _mm_setzero_pd(); + accum_23 = _mm_setzero_pd(); + + __m128i abs_mask2 = _mm_set1_epi64x(0x7fffffffffffffff); + for (i = tail_index_AVX2; i < tail_index_SSE; i += 8) { + accum_20 += (__m128d)_mm_and_si128(_mm_loadu_si128(&x1[i + 0]), abs_mask2); + accum_21 += (__m128d)_mm_and_si128(_mm_loadu_si128(&x1[i + 2]), abs_mask2); + accum_22 += (__m128d)_mm_and_si128(_mm_loadu_si128(&x1[i + 4]), abs_mask2); + accum_23 += (__m128d)_mm_and_si128(_mm_loadu_si128(&x1[i + 6]), abs_mask2); + } + + accum_20 = accum_20 + accum_21 + accum_22 + accum_23; + __m128d half_accum20; + half_accum20 = _mm_hadd_pd(accum_20, accum_20); + + sumf += half_accum20[0]; + } + + for (i = tail_index_SSE; i < n; ++i) { + sumf += ABS_K(x1[i]); + } + + return sumf; + +} +#endif diff --git a/kernel/x86_64/dasum_microk_skylakex-2.c b/kernel/x86_64/dasum_microk_skylakex-2.c new file mode 100644 index 000000000..aea8c02d9 --- /dev/null +++ b/kernel/x86_64/dasum_microk_skylakex-2.c @@ -0,0 +1,80 @@ +/* need a new enough GCC for avx512 support */ +#if (( defined(__GNUC__) && __GNUC__ > 6 && defined(__AVX512CD__)) || (defined(__clang__) && __clang_major__ >= 9)) + +#define HAVE_DASUM_KERNEL 1 + +#include + +#include + +#ifndef ABS_K +#define ABS_K(a) ((a) > 0 ? (a) : (-(a))) +#endif + +static FLOAT dasum_kernel(BLASLONG n, FLOAT *x1) +{ + BLASLONG i = 0; + FLOAT sumf = 0.0; + + if (n >= 256) { + BLASLONG align_512 = ((64 - ((uintptr_t)x1 & (uintptr_t)0x3f)) >> 3) & 0x7; + + for (i = 0; i < align_512; i++) { + sumf += ABS_K(x1[i]); + } + + n -= align_512; + x1 += align_512; + } + + BLASLONG tail_index_SSE = n&(~7); + BLASLONG tail_index_AVX512 = n&(~255); + + // + if ( n >= 256 ) { + + __m512d accum_0, accum_1, accum_2, accum_3; + accum_0 = _mm512_setzero_pd(); + accum_1 = _mm512_setzero_pd(); + accum_2 = _mm512_setzero_pd(); + accum_3 = _mm512_setzero_pd(); + for (i = 0; i < tail_index_AVX512; i += 32) { + accum_0 += _mm512_abs_pd(_mm512_load_pd(&x1[i + 0])); + accum_1 += _mm512_abs_pd(_mm512_load_pd(&x1[i + 8])); + accum_2 += _mm512_abs_pd(_mm512_load_pd(&x1[i +16])); + accum_3 += _mm512_abs_pd(_mm512_load_pd(&x1[i +24])); + } + + accum_0 = accum_0 + accum_1 + accum_2 + accum_3; + sumf += _mm512_reduce_add_pd(accum_0); + } + + if (n >= 8) { + __m128d accum_20, accum_21, accum_22, accum_23; + accum_20 = _mm_setzero_pd(); + accum_21 = _mm_setzero_pd(); + accum_22 = _mm_setzero_pd(); + accum_23 = _mm_setzero_pd(); + + __m128i abs_mask2 = _mm_set1_epi64x(0x7fffffffffffffff); + for (i = tail_index_AVX512; i < tail_index_SSE; i += 8) { + accum_20 += (__m128d)_mm_and_si128(_mm_loadu_si128(&x1[i + 0]), abs_mask2); + accum_21 += (__m128d)_mm_and_si128(_mm_loadu_si128(&x1[i + 2]), abs_mask2); + accum_22 += (__m128d)_mm_and_si128(_mm_loadu_si128(&x1[i + 4]), abs_mask2); + accum_23 += (__m128d)_mm_and_si128(_mm_loadu_si128(&x1[i + 6]), abs_mask2); + } + + accum_20 = accum_20 + accum_21 + accum_22 + accum_23; + __m128d half_accum20; + half_accum20 = _mm_hadd_pd(accum_20, accum_20); + + sumf += half_accum20[0]; + } + + for (i = tail_index_SSE; i < n; ++i) { + sumf += ABS_K(x1[i]); + } + + return sumf; +} +#endif diff --git a/kernel/x86_64/daxpy.c b/kernel/x86_64/daxpy.c index cde5bdaa6..26437012c 100644 --- a/kernel/x86_64/daxpy.c +++ b/kernel/x86_64/daxpy.c @@ -39,34 +39,51 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "daxpy_microk_piledriver-2.c" #elif defined(HASWELL) || defined(ZEN) #include "daxpy_microk_haswell-2.c" -#elif defined (SKYLAKEX) +#elif defined (SKYLAKEX) || defined (COOPERLAKE) #include "daxpy_microk_skylakex-2.c" #elif defined(SANDYBRIDGE) #include "daxpy_microk_sandy-2.c" #endif - #ifndef HAVE_KERNEL_8 +#include"../simd/intrin.h" static void daxpy_kernel_8(BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) { BLASLONG register i = 0; FLOAT a = *alpha; - +#if V_SIMD +#ifdef DOUBLE + v_f64 __alpha, tmp; + __alpha = v_setall_f64(*alpha); + const int vstep = v_nlanes_f64; + for (; i < n; i += vstep) { + tmp = v_muladd_f64(__alpha, v_loadu_f64( x + i ), v_loadu_f64(y + i)); + v_storeu_f64(y + i, tmp); + } +#else + v_f32 __alpha, tmp; + __alpha = v_setall_f32(*alpha); + const int vstep = v_nlanes_f32; + for (; i < n; i += vstep) { + tmp = v_muladd_f32(__alpha, v_loadu_f32( x + i ), v_loadu_f32(y + i)); + v_storeu_f32(y + i, tmp); + } +#endif +#else while(i < n) - { - y[i] += a * x[i]; - y[i+1] += a * x[i+1]; - y[i+2] += a * x[i+2]; - y[i+3] += a * x[i+3]; - y[i+4] += a * x[i+4]; - y[i+5] += a * x[i+5]; - y[i+6] += a * x[i+6]; - y[i+7] += a * x[i+7]; - i+=8 ; - - } - + { + y[i] += a * x[i]; + y[i+1] += a * x[i+1]; + y[i+2] += a * x[i+2]; + y[i+3] += a * x[i+3]; + y[i+4] += a * x[i+4]; + y[i+5] += a * x[i+5]; + y[i+6] += a * x[i+6]; + y[i+7] += a * x[i+7]; + i+=8 ; + } +#endif } #endif diff --git a/kernel/x86_64/daxpy_microk_haswell-2.c b/kernel/x86_64/daxpy_microk_haswell-2.c index f3682e6d7..ecc0ecbd3 100644 --- a/kernel/x86_64/daxpy_microk_haswell-2.c +++ b/kernel/x86_64/daxpy_microk_haswell-2.c @@ -67,8 +67,9 @@ static void daxpy_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) "r" (y), // 3 "r" (alpha) // 4 : "cc", - "%xmm0", - "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "%xmm0", "%xmm1", "%xmm2", "%xmm3", + "%xmm4", "%xmm5", "%xmm6", "%xmm7", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", "%xmm12", "%xmm13", "%xmm14", "%xmm15", "memory" ); diff --git a/kernel/x86_64/ddot.c b/kernel/x86_64/ddot.c index 969357614..e4b6622e6 100644 --- a/kernel/x86_64/ddot.c +++ b/kernel/x86_64/ddot.c @@ -39,7 +39,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "ddot_microk_nehalem-2.c" #elif defined(HASWELL) || defined(ZEN) #include "ddot_microk_haswell-2.c" -#elif defined (SKYLAKEX) +#elif defined (SKYLAKEX) || defined (COOPERLAKE) #include "ddot_microk_skylakex-2.c" #elif defined(SANDYBRIDGE) #include "ddot_microk_sandy-2.c" diff --git a/kernel/x86_64/ddot_microk_haswell-2.c b/kernel/x86_64/ddot_microk_haswell-2.c index dbb5487f7..faac72870 100644 --- a/kernel/x86_64/ddot_microk_haswell-2.c +++ b/kernel/x86_64/ddot_microk_haswell-2.c @@ -84,8 +84,9 @@ static void ddot_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *dot) "r" (y), // 3 "r" (dot) // 4 : "cc", - "%xmm4", "%xmm5", - "%xmm6", "%xmm7", + "%xmm0", "%xmm1", "%xmm2", "%xmm3", + "%xmm4", "%xmm5", "%xmm6", "%xmm7", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", "%xmm12", "%xmm13", "%xmm14", "%xmm15", "memory" ); diff --git a/kernel/x86_64/ddot_microk_piledriver-2.c b/kernel/x86_64/ddot_microk_piledriver-2.c index cc4bcd90a..0320a2e36 100644 --- a/kernel/x86_64/ddot_microk_piledriver-2.c +++ b/kernel/x86_64/ddot_microk_piledriver-2.c @@ -91,6 +91,7 @@ static void ddot_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *dot) : "cc", "%xmm0", "%xmm1", "%xmm2", "%xmm3", "%xmm4", "%xmm5", "%xmm6", "%xmm7", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", "%xmm12", "%xmm13", "%xmm14", "%xmm15", "memory" ); @@ -155,6 +156,7 @@ static void ddot_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *dot) : "cc", "%xmm0", "%xmm1", "%xmm2", "%xmm3", "%xmm4", "%xmm5", "%xmm6", "%xmm7", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", "%xmm12", "%xmm13", "%xmm14", "%xmm15", "memory" ); diff --git a/kernel/x86_64/ddot_microk_sandy-2.c b/kernel/x86_64/ddot_microk_sandy-2.c index 84493ec27..35ba86a7d 100644 --- a/kernel/x86_64/ddot_microk_sandy-2.c +++ b/kernel/x86_64/ddot_microk_sandy-2.c @@ -89,8 +89,9 @@ static void ddot_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *dot) "r" (y), // 3 "r" (dot) // 4 : "cc", - "%xmm4", "%xmm5", - "%xmm6", "%xmm7", + "%xmm0", "%xmm1", "%xmm2", "%xmm3", + "%xmm4", "%xmm5", "%xmm6", "%xmm7", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", "%xmm12", "%xmm13", "%xmm14", "%xmm15", "memory" ); diff --git a/kernel/x86_64/ddot_microk_steamroller-2.c b/kernel/x86_64/ddot_microk_steamroller-2.c index 27d5244ce..94c012f0d 100644 --- a/kernel/x86_64/ddot_microk_steamroller-2.c +++ b/kernel/x86_64/ddot_microk_steamroller-2.c @@ -88,6 +88,7 @@ static void ddot_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *dot) : "cc", "%xmm0", "%xmm1", "%xmm2", "%xmm3", "%xmm4", "%xmm5", "%xmm6", "%xmm7", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", "%xmm12", "%xmm13", "%xmm14", "%xmm15", "memory" ); diff --git a/kernel/x86_64/dgemm_kernel_16x2_skylakex.c b/kernel/x86_64/dgemm_kernel_16x2_skylakex.c new file mode 100644 index 000000000..9f2bf24e2 --- /dev/null +++ b/kernel/x86_64/dgemm_kernel_16x2_skylakex.c @@ -0,0 +1,520 @@ +/* %0 = a_ptr, %1 = b_ptr, %2 = c_ptr, %3 = c_tmp, %4 = ldc(bytes), %5 = k_counter, %6 = b_pref */ +/* r10 = tmp, r11 = m_counter, r12 = size_of_1_tile_in_b, r13 = k, r14 = b_head, r15 = %1+3*r12 */ + +#if (defined (LEFT) && !defined(TRANSA)) || (!defined (LEFT) && defined(TRANSA)) + #define BACKWARDS 1 +#else + #define BACKWARDS 0 +#endif +#define GEMM_SET_PB "movq %%r14,%1; leaq (%%r14,%%r12,2),%%r15; addq %%r12,%%r15;" +#define set_p_copy1(ptr) "sarq $1,%%r12; addq %%r12,"#ptr"; salq $1,%%r12; salq $3,%%r13; subq %%r13,"#ptr"; sarq $3,%%r13;" +#define set_p_copy2(ptr) "addq %%r12,"#ptr"; salq $4,%%r13; subq %%r13,"#ptr"; sarq $4,%%r13;" +#define set_p_copy4(ptr) "leaq ("#ptr",%%r12,2),"#ptr"; salq $5,%%r13; subq %%r13,"#ptr"; sarq $5,%%r13;" +#define set_p_copy8(ptr) "leaq ("#ptr",%%r12,4),"#ptr"; salq $6,%%r13; subq %%r13,"#ptr"; sarq $6,%%r13;" +#define set_p_copy16(ptr) "leaq ("#ptr",%%r12,8),"#ptr"; salq $7,%%r13; subq %%r13,"#ptr"; sarq $7,%%r13;" +#define set_p_b_dim1(ptr) set_p_copy1(ptr) +#define set_p_b_dim2(ptr) set_p_copy2(ptr) +#define set_p_b_dim4(ptr) set_p_copy2(ptr) +#define set_p_b_dim6(ptr) set_p_copy2(ptr) +#define set_p_b_dim8(ptr) set_p_copy2(ptr) +#define set_p_b_dim10(ptr) set_p_copy2(ptr) +#define set_p_b_dim12(ptr) set_p_copy2(ptr) +#ifdef TRMMKERNEL + #if BACKWARDS == 1 + #define INIT_set_papb(mdim,ndim) GEMM_SET_PB set_p_copy##mdim(%0) set_p_b_dim##ndim(%1) set_p_b_dim##ndim(%%r15) + #define SAVE_set_pa(mdim) "" + #else + #define INIT_set_papb(mdim,ndim) GEMM_SET_PB + #define SAVE_set_pa(mdim) set_p_copy##mdim(%0) + #endif +#else + #define INIT_set_papb(mdim,ndim) GEMM_SET_PB + #define SAVE_set_pa(mdim) "" +#endif +#if defined(TRMMKERNEL) && !defined(LEFT) + #if BACKWARDS == 1 + #define HEAD_SET_OFF(ndim) {} + #define TAIL_SET_OFF(ndim) {off += ndim;} + #define kernel_kstart_n4(mdim,updk) KERNEL_k1m##mdim##n2 KERNEL_k1m##mdim##n2 "addq $32,%%r15; "#updk" $2,%5;" + #define kernel_kstart_n6(mdim,updk) kernel_kstart_n4(mdim,updk) KERNEL_k1m##mdim##n4 KERNEL_k1m##mdim##n4 "addq $32,%%r15; "#updk" $2,%5;" + #define kernel_kstart_n8(mdim,updk) kernel_kstart_n6(mdim,updk) KERNEL_k1m##mdim##n6 KERNEL_k1m##mdim##n6 "addq $32,%%r15; "#updk" $2,%5;" + #define kernel_kstart_n10(mdim,updk) kernel_kstart_n8(mdim,updk) KERNEL_k1m##mdim##n8 KERNEL_k1m##mdim##n8 #updk" $2,%5;" + #define kernel_kstart_n12(mdim,updk) kernel_kstart_n10(mdim,updk) KERNEL_k1m##mdim##n10 KERNEL_k1m##mdim##n10 #updk" $2,%5;" + #define kernel_kend_n4(mdim) "" + #define kernel_kend_n6(mdim) "" + #define kernel_kend_n8(mdim) "" + #define kernel_kend_n10(mdim) "" + #define kernel_kend_n12(mdim) "" + #else + #define HEAD_SET_OFF(ndim) {off += (ndim > 2 ? 2 : ndim);} + #define TAIL_SET_OFF(ndim) {off += (ndim > 2 ? (ndim-2) : 0);} + #define kernel_kstart_n4(mdim,updk) "" + #define kernel_kstart_n6(mdim,updk) "" + #define kernel_kstart_n8(mdim,updk) "" + #define kernel_kstart_n10(mdim,updk) "" + #define kernel_kstart_n12(mdim,updk) "" + #define kernel_kend_n4(mdim) "xorq %3,%3;"\ + loada_kend_k1m##mdim acc_kend_nc2_k1m##mdim(0)\ + loada_kend_k1m##mdim acc_kend_nc2_k1m##mdim(16) + #define kernel_kend_n6(mdim) "xorq %3,%3;"\ + loada_kend_k1m##mdim acc_kend_nc2_k1m##mdim(0) acc_kend_nc3_k1m##mdim(0)\ + loada_kend_k1m##mdim acc_kend_nc2_k1m##mdim(16) acc_kend_nc3_k1m##mdim(16)\ + loada_kend_k1m##mdim acc_kend_nc3_k1m##mdim(32)\ + loada_kend_k1m##mdim acc_kend_nc3_k1m##mdim(48) + #define kernel_kend_n8(mdim) "xorq %3,%3;"\ + loada_kend_k1m##mdim acc_kend_nc2_k1m##mdim(0) acc_kend_nc3_k1m##mdim(0) acc_kend_nc4_k1m##mdim(0)\ + loada_kend_k1m##mdim acc_kend_nc2_k1m##mdim(16) acc_kend_nc3_k1m##mdim(16) acc_kend_nc4_k1m##mdim(16)\ + loada_kend_k1m##mdim acc_kend_nc3_k1m##mdim(32) acc_kend_nc4_k1m##mdim(32)\ + loada_kend_k1m##mdim acc_kend_nc3_k1m##mdim(48) acc_kend_nc4_k1m##mdim(48)\ + loada_kend_k1m##mdim acc_kend_nc4_k1m##mdim(64)\ + loada_kend_k1m##mdim acc_kend_nc4_k1m##mdim(80) + #define kernel_kend_n10(mdim) "xorq %3,%3;"\ + loada_kend_k1m##mdim acc_kend_nc2_k1m##mdim(0) acc_kend_nc3_k1m##mdim(0) acc_kend_nc4_k1m##mdim(0) acc_kend_nc5_k1m##mdim(0)\ + loada_kend_k1m##mdim acc_kend_nc2_k1m##mdim(16) acc_kend_nc3_k1m##mdim(16) acc_kend_nc4_k1m##mdim(16) acc_kend_nc5_k1m##mdim(16)\ + loada_kend_k1m##mdim acc_kend_nc3_k1m##mdim(32) acc_kend_nc4_k1m##mdim(32) acc_kend_nc5_k1m##mdim(32)\ + loada_kend_k1m##mdim acc_kend_nc3_k1m##mdim(48) acc_kend_nc4_k1m##mdim(48) acc_kend_nc5_k1m##mdim(48)\ + loada_kend_k1m##mdim acc_kend_nc4_k1m##mdim(64) acc_kend_nc5_k1m##mdim(64)\ + loada_kend_k1m##mdim acc_kend_nc4_k1m##mdim(80) acc_kend_nc5_k1m##mdim(80)\ + loada_kend_k1m##mdim acc_kend_nc5_k1m##mdim(96)\ + loada_kend_k1m##mdim acc_kend_nc5_k1m##mdim(112) + #define kernel_kend_n12(mdim) "xorq %3,%3;"\ + loada_kend_k1m##mdim acc_kend_nc2_k1m##mdim(0) acc_kend_nc3_k1m##mdim(0) acc_kend_nc4_k1m##mdim(0) acc_kend_nc5_k1m##mdim(0) acc_kend_nc6_k1m##mdim(0)\ + loada_kend_k1m##mdim acc_kend_nc2_k1m##mdim(16) acc_kend_nc3_k1m##mdim(16) acc_kend_nc4_k1m##mdim(16) acc_kend_nc5_k1m##mdim(16) acc_kend_nc6_k1m##mdim(16)\ + loada_kend_k1m##mdim acc_kend_nc3_k1m##mdim(32) acc_kend_nc4_k1m##mdim(32) acc_kend_nc5_k1m##mdim(32) acc_kend_nc6_k1m##mdim(32)\ + loada_kend_k1m##mdim acc_kend_nc3_k1m##mdim(48) acc_kend_nc4_k1m##mdim(48) acc_kend_nc5_k1m##mdim(48) acc_kend_nc6_k1m##mdim(48)\ + loada_kend_k1m##mdim acc_kend_nc4_k1m##mdim(64) acc_kend_nc5_k1m##mdim(64) acc_kend_nc6_k1m##mdim(64)\ + loada_kend_k1m##mdim acc_kend_nc4_k1m##mdim(80) acc_kend_nc5_k1m##mdim(80) acc_kend_nc6_k1m##mdim(80)\ + loada_kend_k1m##mdim acc_kend_nc5_k1m##mdim(96) acc_kend_nc6_k1m##mdim(96)\ + loada_kend_k1m##mdim acc_kend_nc5_k1m##mdim(112) acc_kend_nc6_k1m##mdim(112)\ + loada_kend_k1m##mdim acc_kend_nc6_k1m##mdim(128)\ + loada_kend_k1m##mdim acc_kend_nc6_k1m##mdim(144) + #endif +#else + #define HEAD_SET_OFF(ndim) {} + #define TAIL_SET_OFF(ndim) {} + #define kernel_kstart_n4(mdim,updk) "" + #define kernel_kstart_n6(mdim,updk) "" + #define kernel_kstart_n8(mdim,updk) "" + #define kernel_kstart_n10(mdim,updk) "" + #define kernel_kstart_n12(mdim,updk) "" + #define kernel_kend_n4(mdim) "" + #define kernel_kend_n6(mdim) "" + #define kernel_kend_n8(mdim) "" + #define kernel_kend_n10(mdim) "" + #define kernel_kend_n12(mdim) "" +#endif +#define kernel_kstart_n1(mdim,updk) "" +#define kernel_kstart_n2(mdim,updk) "" +#define kernel_kend_n1(mdim) "" +#define kernel_kend_n2(mdim) "" + +#ifdef TRMMKERNEL + #if BACKWARDS == 1 + #define INITASM_SET_K "movq %10,%%r13; subq %9,%%r13;" + #else + #define INITASM_SET_K "movq %9,%%r13;" + #endif +#else + #define INITASM_SET_K "movq %10,%%r13;" +#endif +#if defined(TRMMKERNEL) && defined(LEFT) + #if BACKWARDS==1 + #define init_update_k(mdim) "" + #define save_update_k(mdim) "subq $"#mdim",%%r13;" + #else + #define init_update_k(mdim) "addq $"#mdim",%%r13;" + #define save_update_k(mdim) "" + #endif +#else + #define init_update_k(mdim) "" + #define save_update_k(mdim) "" +#endif + +#define KERNEL_h_k1m16n1 \ + "vmovupd (%0),%%zmm1; vmovupd 64(%0),%%zmm2; addq $128,%0;"\ + "vbroadcastsd (%1),%%zmm3; vfmadd231pd %%zmm1,%%zmm3,%%zmm8; vfmadd231pd %%zmm2,%%zmm3,%%zmm9;" +#define KERNEL_k1m16n1 KERNEL_h_k1m16n1 "addq $8,%1;" +#ifdef BROADCAST_KERNEL + #define KERNEL_h_k1m16n2 KERNEL_h_k1m16n1\ + "vbroadcastsd 8(%1),%%zmm4; vfmadd231pd %%zmm1,%%zmm4,%%zmm10; vfmadd231pd %%zmm2,%%zmm4,%%zmm11;" + #define unit_acc_gen_m16n2(c1_no,c2_no,c3_no,c4_no,boff1,...)\ + "vbroadcastsd "#boff1"("#__VA_ARGS__"),%%zmm3; vfmadd231pd %%zmm1,%%zmm3,%%zmm"#c1_no"; vfmadd231pd %%zmm2,%%zmm3,%%zmm"#c2_no";"\ + "vbroadcastsd "#boff1"+8("#__VA_ARGS__"),%%zmm4; vfmadd231pd %%zmm1,%%zmm4,%%zmm"#c3_no"; vfmadd231pd %%zmm2,%%zmm4,%%zmm"#c4_no";" + #define unit_acc_m16n2(c1_no,c2_no,c3_no,c4_no,...) unit_acc_gen_m16n2(c1_no,c2_no,c3_no,c4_no,0,__VA_ARGS__) +#else + #define unit_acc_gen_m16n2(c1_no,c2_no,c3_no,c4_no,boff1,...)\ + "vbroadcastf32x4 "#boff1"("#__VA_ARGS__"),%%zmm5; vfmadd231pd %%zmm1,%%zmm5,%%zmm"#c1_no"; vfmadd231pd %%zmm2,%%zmm5,%%zmm"#c2_no";"\ + "vfmadd231pd %%zmm3,%%zmm5,%%zmm"#c3_no"; vfmadd231pd %%zmm4,%%zmm5,%%zmm"#c4_no";" + #define unit_acc_m16n2(c1_no,c2_no,c3_no,c4_no,...) unit_acc_gen_m16n2(c1_no,c2_no,c3_no,c4_no,0,__VA_ARGS__) + #define KERNEL_h_k1m16n2 \ + "vmovddup (%0),%%zmm1; vmovddup 8(%0),%%zmm2; vmovddup 64(%0),%%zmm3; vmovddup 72(%0),%%zmm4; addq $128,%0;"\ + unit_acc_m16n2(8,9,10,11,%1) +#endif +#define KERNEL_k1m16n2 KERNEL_h_k1m16n2 "addq $16,%1;" +#define KERNEL_h_k1m16n4 KERNEL_h_k1m16n2 "prefetcht0 384(%0);" unit_acc_m16n2(12,13,14,15,%1,%%r12,1) +#define KERNEL_k1m16n4 KERNEL_h_k1m16n4 "addq $16,%1;" +#define KERNEL_k1m16n6 KERNEL_h_k1m16n4 unit_acc_m16n2(16,17,18,19,%1,%%r12,2) "addq $16,%1;" +#define KERNEL_h_k1m16n8 KERNEL_k1m16n6 "prefetcht0 448(%0);" unit_acc_m16n2(20,21,22,23,%%r15) +#define KERNEL_k1m16n8 KERNEL_h_k1m16n8 "addq $16,%%r15;" +#define KERNEL_h_k1m16n10 KERNEL_h_k1m16n8 unit_acc_m16n2(24,25,26,27,%%r15,%%r12,1) +#define KERNEL_k1m16n10 KERNEL_h_k1m16n10 "addq $16,%%r15;" +#define KERNEL_h_k1m16n12 KERNEL_h_k1m16n10 unit_acc_m16n2(28,29,30,31,%%r15,%%r12,2) +#define KERNEL_k1m16n12 KERNEL_h_k1m16n12 "addq $16,%%r15;" +#if defined(TRMMKERNEL) && !defined(LEFT) && (BACKWARDS == 0) + #ifdef BROADCAST_KERNEL + #define loada_kend_k1m16 "vmovupd (%0,%3,1),%%zmm1; vmovupd 64(%0,%3,1),%%zmm2; addq $128,%3;" + #else + #define loada_kend_k1m16 "vmovddup (%0,%3,1),%%zmm1; vmovddup 8(%0,%3,1),%%zmm2; vmovddup 64(%0,%3,1),%%zmm3; vmovddup 72(%0,%3,1),%%zmm4; addq $128,%3;" + #endif + #define acc_kend_nc2_k1m16(boff1) unit_acc_gen_m16n2(12,13,14,15,boff1,%1,%%r12,1) + #define acc_kend_nc3_k1m16(boff1) unit_acc_gen_m16n2(16,17,18,19,boff1,%1,%%r12,2) + #define acc_kend_nc4_k1m16(boff1) unit_acc_gen_m16n2(20,21,22,23,boff1,%%r15) + #define acc_kend_nc5_k1m16(boff1) unit_acc_gen_m16n2(24,25,26,27,boff1,%%r15,%%r12,1) + #define acc_kend_nc6_k1m16(boff1) unit_acc_gen_m16n2(28,29,30,31,boff1,%%r15,%%r12,2) +#endif +#define save_init_m16 "movq %2,%3; addq $128,%2;" +#ifdef TRMMKERNEL + #define SAVE_m16n1 "vmulpd %%zmm8,%%zmm0,%%zmm8; vmovupd %%zmm8,(%2); vmulpd %%zmm9,%%zmm0,%%zmm9; vmovupd %%zmm9,64(%2); addq $128,%2;" + #ifdef BROADCAST_KERNEL + #define unit_save_m16n2(c1_no,c2_no,c3_no,c4_no)\ + "vmulpd %%zmm"#c1_no",%%zmm0,%%zmm"#c1_no"; vmovupd %%zmm"#c1_no",(%3); vmulpd %%zmm"#c2_no",%%zmm0,%%zmm"#c2_no"; vmovupd %%zmm"#c2_no",64(%3);"\ + "vmulpd %%zmm"#c3_no",%%zmm0,%%zmm"#c3_no"; vmovupd %%zmm"#c3_no",(%3,%4,1); vmulpd %%zmm"#c4_no",%%zmm0,%%zmm"#c4_no"; vmovupd %%zmm"#c4_no",64(%3,%4,1); leaq (%3,%4,2),%3;" + #else + #define unit_save_m16n2(c1_no,c2_no,c3_no,c4_no)\ + "vunpcklpd %%zmm"#c2_no",%%zmm"#c1_no",%%zmm1; vunpcklpd %%zmm"#c4_no",%%zmm"#c3_no",%%zmm2; vunpckhpd %%zmm"#c2_no",%%zmm"#c1_no",%%zmm3; vunpckhpd %%zmm"#c4_no",%%zmm"#c3_no",%%zmm4;"\ + "vmulpd %%zmm1,%%zmm0,%%zmm1; vmovupd %%zmm1,(%3); vmulpd %%zmm2,%%zmm0,%%zmm2; vmovupd %%zmm2,64(%3);"\ + "vmulpd %%zmm3,%%zmm0,%%zmm3; vmovupd %%zmm3,(%3,%4,1); vmulpd %%zmm4,%%zmm0,%%zmm4; vmovupd %%zmm4,64(%3,%4,1); leaq (%3,%4,2),%3;" + #endif +#else + #define SAVE_m16n1 "vfmadd213pd (%2),%%zmm0,%%zmm8; vmovupd %%zmm8,(%2); vfmadd213pd 64(%2),%%zmm0,%%zmm9; vmovupd %%zmm9,64(%2); addq $128,%2;" + #ifdef BROADCAST_KERNEL + #define unit_save_m16n2(c1_no,c2_no,c3_no,c4_no)\ + "vfmadd213pd (%3),%%zmm0,%%zmm"#c1_no"; vmovupd %%zmm"#c1_no",(%3); vfmadd213pd 64(%3),%%zmm0,%%zmm"#c2_no"; vmovupd %%zmm"#c2_no",64(%3);"\ + "vfmadd213pd (%3,%4,1),%%zmm0,%%zmm"#c3_no"; vmovupd %%zmm"#c3_no",(%3,%4,1); vfmadd213pd 64(%3,%4,1),%%zmm0,%%zmm"#c4_no"; vmovupd %%zmm"#c4_no",64(%3,%4,1); leaq (%3,%4,2),%3;" + #else + #define unit_save_m16n2(c1_no,c2_no,c3_no,c4_no)\ + "vunpcklpd %%zmm"#c2_no",%%zmm"#c1_no",%%zmm1; vunpcklpd %%zmm"#c4_no",%%zmm"#c3_no",%%zmm2; vunpckhpd %%zmm"#c2_no",%%zmm"#c1_no",%%zmm3; vunpckhpd %%zmm"#c4_no",%%zmm"#c3_no",%%zmm4;"\ + "vfmadd213pd (%3),%%zmm0,%%zmm1; vmovupd %%zmm1,(%3); vfmadd213pd 64(%3),%%zmm0,%%zmm2; vmovupd %%zmm2,64(%3);"\ + "vfmadd213pd (%3,%4,1),%%zmm0,%%zmm3; vmovupd %%zmm3,(%3,%4,1); vfmadd213pd 64(%3,%4,1),%%zmm0,%%zmm4; vmovupd %%zmm4,64(%3,%4,1); leaq (%3,%4,2),%3;" + #endif +#endif +#define SAVE_m16n2 save_init_m16 unit_save_m16n2(8,9,10,11) +#define SAVE_m16n4 SAVE_m16n2 unit_save_m16n2(12,13,14,15) +#define SAVE_m16n6 SAVE_m16n4 unit_save_m16n2(16,17,18,19) +#define SAVE_m16n8 SAVE_m16n6 unit_save_m16n2(20,21,22,23) +#define SAVE_m16n10 SAVE_m16n8 unit_save_m16n2(24,25,26,27) +#define SAVE_m16n12 SAVE_m16n10 unit_save_m16n2(28,29,30,31) +#define unit_init_2zmm(c1_no,c2_no) "vpxorq %%zmm"#c1_no",%%zmm"#c1_no",%%zmm"#c1_no"; vpxorq %%zmm"#c2_no",%%zmm"#c2_no",%%zmm"#c2_no";" +#define unit_init_4zmm(c1_no,c2_no,c3_no,c4_no) unit_init_2zmm(c1_no,c2_no) unit_init_2zmm(c3_no,c4_no) +#define INIT_m16n1 unit_init_2zmm(8,9) +#define INIT_m16n2 unit_init_4zmm(8,9,10,11) +#define INIT_m16n4 INIT_m16n2 unit_init_4zmm(12,13,14,15) +#define INIT_m16n6 INIT_m16n4 unit_init_4zmm(16,17,18,19) +#define INIT_m16n8 INIT_m16n6 unit_init_4zmm(20,21,22,23) +#define INIT_m16n10 INIT_m16n8 unit_init_4zmm(24,25,26,27) +#define INIT_m16n12 INIT_m16n10 unit_init_4zmm(28,29,30,31) + +#define KERNEL_k1m8n1 \ + "vbroadcastsd (%1),%%zmm1; addq $8,%1;"\ + "vfmadd231pd (%0),%%zmm1,%%zmm8; addq $64,%0;" +#define unit_acc_gen_m8n2(c1_no,c2_no,boff,...)\ + "vbroadcastf32x4 "#boff"("#__VA_ARGS__"),%%zmm3; vfmadd231pd %%zmm1,%%zmm3,%%zmm"#c1_no"; vfmadd231pd %%zmm2,%%zmm3,%%zmm"#c2_no";" +#define unit_acc_m8n2(c1_no,c2_no,...) unit_acc_gen_m8n2(c1_no,c2_no,0,__VA_ARGS__) +#define KERNEL_h_k1m8n2 \ + "vmovddup (%0),%%zmm1; vmovddup 8(%0),%%zmm2; addq $64,%0;" unit_acc_m8n2(8,9,%1) +#define KERNEL_k1m8n2 KERNEL_h_k1m8n2 "addq $16,%1;" +#define KERNEL_h_k1m8n4 KERNEL_h_k1m8n2 unit_acc_m8n2(10,11,%1,%%r12,1) +#define KERNEL_k1m8n4 KERNEL_h_k1m8n4 "addq $16,%1;" +#define KERNEL_k1m8n6 KERNEL_h_k1m8n4 unit_acc_m8n2(12,13,%1,%%r12,2) "addq $16,%1;" +#define KERNEL_h_k1m8n8 KERNEL_k1m8n6 unit_acc_m8n2(14,15,%%r15) +#define KERNEL_k1m8n8 KERNEL_h_k1m8n8 "addq $16,%%r15;" +#define KERNEL_h_k1m8n10 KERNEL_h_k1m8n8 unit_acc_m8n2(16,17,%%r15,%%r12,1) +#define KERNEL_k1m8n10 KERNEL_h_k1m8n10 "addq $16,%%r15;" +#define KERNEL_h_k1m8n12 KERNEL_h_k1m8n10 unit_acc_m8n2(18,19,%%r15,%%r12,2) +#define KERNEL_k1m8n12 KERNEL_h_k1m8n12 "addq $16,%%r15;" +#if defined(TRMMKERNEL) && !defined(LEFT) && (BACKWARDS == 0) + #define loada_kend_k1m8 "vmovddup (%0,%3,1),%%zmm1; vmovddup 8(%0,%3,1),%%zmm2; addq $64,%3;" + #define acc_kend_nc2_k1m8(boff1) unit_acc_gen_m8n2(10,11,boff1,%1,%%r12,1) + #define acc_kend_nc3_k1m8(boff1) unit_acc_gen_m8n2(12,13,boff1,%1,%%r12,2) + #define acc_kend_nc4_k1m8(boff1) unit_acc_gen_m8n2(14,15,boff1,%%r15) + #define acc_kend_nc5_k1m8(boff1) unit_acc_gen_m8n2(16,17,boff1,%%r15,%%r12,1) + #define acc_kend_nc6_k1m8(boff1) unit_acc_gen_m8n2(18,19,boff1,%%r15,%%r12,2) +#endif +#define save_init_m8 "movq %2,%3; addq $64,%2;" +#ifdef TRMMKERNEL + #define SAVE_m8n1 "vmulpd %%zmm8,%%zmm0,%%zmm8; vmovupd %%zmm8,(%2); addq $64,%2;" + #define unit_save_m8n2(c1_no,c2_no)\ + "vunpcklpd %%zmm"#c2_no",%%zmm"#c1_no",%%zmm1; vmulpd %%zmm1,%%zmm0,%%zmm1; vmovupd %%zmm1,(%3);"\ + "vunpckhpd %%zmm"#c2_no",%%zmm"#c1_no",%%zmm2; vmulpd %%zmm2,%%zmm0,%%zmm2; vmovupd %%zmm2,(%3,%4,1); leaq (%3,%4,2),%3;" +#else + #define SAVE_m8n1 "vfmadd213pd (%2),%%zmm0,%%zmm8; vmovupd %%zmm8,(%2); addq $64,%2;" + #define unit_save_m8n2(c1_no,c2_no)\ + "vunpcklpd %%zmm"#c2_no",%%zmm"#c1_no",%%zmm1; vfmadd213pd (%3),%%zmm0,%%zmm1; vmovupd %%zmm1,(%3);"\ + "vunpckhpd %%zmm"#c2_no",%%zmm"#c1_no",%%zmm2; vfmadd213pd (%3,%4,1),%%zmm0,%%zmm2; vmovupd %%zmm2,(%3,%4,1); leaq (%3,%4,2),%3;" +#endif +#define SAVE_m8n2 save_init_m8 unit_save_m8n2(8,9) +#define SAVE_m8n4 SAVE_m8n2 unit_save_m8n2(10,11) +#define SAVE_m8n6 SAVE_m8n4 unit_save_m8n2(12,13) +#define SAVE_m8n8 SAVE_m8n6 unit_save_m8n2(14,15) +#define SAVE_m8n10 SAVE_m8n8 unit_save_m8n2(16,17) +#define SAVE_m8n12 SAVE_m8n10 unit_save_m8n2(18,19) +#define INIT_m8n1 "vpxorq %%zmm8,%%zmm8,%%zmm8;" +#define INIT_m8n2 unit_init_2zmm(8,9) +#define INIT_m8n4 INIT_m8n2 unit_init_2zmm(10,11) +#define INIT_m8n6 INIT_m8n4 unit_init_2zmm(12,13) +#define INIT_m8n8 INIT_m8n6 unit_init_2zmm(14,15) +#define INIT_m8n10 INIT_m8n8 unit_init_2zmm(16,17) +#define INIT_m8n12 INIT_m8n10 unit_init_2zmm(18,19) + +#define KERNEL_k1m4n1 \ + "vbroadcastsd (%1),%%ymm1; addq $8,%1;"\ + "vfmadd231pd (%0),%%ymm1,%%ymm4; addq $32,%0;" +#define unit_acc_gen_m4n2(c1_no,c2_no,boff,...)\ + "vbroadcastf128 "#boff"("#__VA_ARGS__"),%%ymm3; vfmadd231pd %%ymm1,%%ymm3,%%ymm"#c1_no"; vfmadd231pd %%ymm2,%%ymm3,%%ymm"#c2_no";" +#define unit_acc_m4n2(c1_no,c2_no,...) unit_acc_gen_m4n2(c1_no,c2_no,0,__VA_ARGS__) +#define KERNEL_h_k1m4n2 \ + "vmovddup (%0),%%ymm1; vmovddup 8(%0),%%ymm2; addq $32,%0;" unit_acc_m4n2(4,5,%1) +#define KERNEL_k1m4n2 KERNEL_h_k1m4n2 "addq $16,%1;" +#define KERNEL_h_k1m4n4 KERNEL_h_k1m4n2 unit_acc_m4n2(6,7,%1,%%r12,1) +#define KERNEL_k1m4n4 KERNEL_h_k1m4n4 "addq $16,%1;" +#define KERNEL_k1m4n6 KERNEL_h_k1m4n4 unit_acc_m4n2(8,9,%1,%%r12,2) "addq $16,%1;" +#define KERNEL_h_k1m4n8 KERNEL_k1m4n6 unit_acc_m4n2(10,11,%%r15) +#define KERNEL_k1m4n8 KERNEL_h_k1m4n8 "addq $16,%%r15;" +#define KERNEL_h_k1m4n10 KERNEL_h_k1m4n8 unit_acc_m4n2(12,13,%%r15,%%r12,1) +#define KERNEL_k1m4n10 KERNEL_h_k1m4n10 "addq $16,%%r15;" +#define KERNEL_h_k1m4n12 KERNEL_h_k1m4n10 unit_acc_m4n2(14,15,%%r15,%%r12,2) +#define KERNEL_k1m4n12 KERNEL_h_k1m4n12 "addq $16,%%r15;" +#if defined(TRMMKERNEL) && !defined(LEFT) && (BACKWARDS == 0) + #define loada_kend_k1m4 "vmovddup (%0,%3,1),%%ymm1; vmovddup 8(%0,%3,1),%%ymm2; addq $32,%3;" + #define acc_kend_nc2_k1m4(boff1) unit_acc_gen_m4n2(6,7,boff1,%1,%%r12,1) + #define acc_kend_nc3_k1m4(boff1) unit_acc_gen_m4n2(8,9,boff1,%1,%%r12,2) + #define acc_kend_nc4_k1m4(boff1) unit_acc_gen_m4n2(10,11,boff1,%%r15) + #define acc_kend_nc5_k1m4(boff1) unit_acc_gen_m4n2(12,13,boff1,%%r15,%%r12,1) + #define acc_kend_nc6_k1m4(boff1) unit_acc_gen_m4n2(14,15,boff1,%%r15,%%r12,2) +#endif +#define save_init_m4 "movq %2,%3; addq $32,%2;" +#ifdef TRMMKERNEL + #define SAVE_m4n1 "vmulpd %%ymm4,%%ymm0,%%ymm4; vmovupd %%ymm4,(%2); addq $32,%2;" + #define unit_save_m4n2(c1_no,c2_no)\ + "vunpcklpd %%ymm"#c2_no",%%ymm"#c1_no",%%ymm1; vmulpd %%ymm1,%%ymm0,%%ymm1; vmovupd %%ymm1,(%3);"\ + "vunpckhpd %%ymm"#c2_no",%%ymm"#c1_no",%%ymm2; vmulpd %%ymm2,%%ymm0,%%ymm2; vmovupd %%ymm2,(%3,%4,1); leaq (%3,%4,2),%3;" +#else + #define SAVE_m4n1 "vfmadd213pd (%2),%%ymm0,%%ymm4; vmovupd %%ymm4,(%2); addq $32,%2;" + #define unit_save_m4n2(c1_no,c2_no)\ + "vunpcklpd %%ymm"#c2_no",%%ymm"#c1_no",%%ymm1; vfmadd213pd (%3),%%ymm0,%%ymm1; vmovupd %%ymm1,(%3);"\ + "vunpckhpd %%ymm"#c2_no",%%ymm"#c1_no",%%ymm2; vfmadd213pd (%3,%4,1),%%ymm0,%%ymm2; vmovupd %%ymm2,(%3,%4,1); leaq (%3,%4,2),%3;" +#endif +#define SAVE_m4n2 save_init_m4 unit_save_m4n2(4,5) +#define SAVE_m4n4 SAVE_m4n2 unit_save_m4n2(6,7) +#define SAVE_m4n6 SAVE_m4n4 unit_save_m4n2(8,9) +#define SAVE_m4n8 SAVE_m4n6 unit_save_m4n2(10,11) +#define SAVE_m4n10 SAVE_m4n8 unit_save_m4n2(12,13) +#define SAVE_m4n12 SAVE_m4n10 unit_save_m4n2(14,15) +#define INIT_m4n1 "vpxor %%ymm4,%%ymm4,%%ymm4;" +#define unit_init_2ymm(c1_no,c2_no) "vpxor %%ymm"#c1_no",%%ymm"#c1_no",%%ymm"#c1_no"; vpxor %%ymm"#c2_no",%%ymm"#c2_no",%%ymm"#c2_no";" +#define INIT_m4n2 unit_init_2ymm(4,5) +#define INIT_m4n4 INIT_m4n2 unit_init_2ymm(6,7) +#define INIT_m4n6 INIT_m4n4 unit_init_2ymm(8,9) +#define INIT_m4n8 INIT_m4n6 unit_init_2ymm(10,11) +#define INIT_m4n10 INIT_m4n8 unit_init_2ymm(12,13) +#define INIT_m4n12 INIT_m4n10 unit_init_2ymm(14,15) + +#define KERNEL_k1m2n1 \ + "vmovddup (%1),%%xmm1; addq $8,%1;"\ + "vfmadd231pd (%0),%%xmm1,%%xmm4; addq $16,%0;" +#define unit_acc_gen_m2n2(c1_no,c2_no,boff,...)\ + "vmovupd "#boff"("#__VA_ARGS__"),%%xmm3; vfmadd231pd %%xmm1,%%xmm3,%%xmm"#c1_no"; vfmadd231pd %%xmm2,%%xmm3,%%xmm"#c2_no";" +#define unit_acc_m2n2(c1_no,c2_no,...) unit_acc_gen_m2n2(c1_no,c2_no,0,__VA_ARGS__) +#define KERNEL_h_k1m2n2 \ + "vmovddup (%0),%%xmm1; vmovddup 8(%0),%%xmm2; addq $16,%0;" unit_acc_m2n2(4,5,%1) +#define KERNEL_k1m2n2 KERNEL_h_k1m2n2 "addq $16,%1;" +#define KERNEL_h_k1m2n4 KERNEL_h_k1m2n2 unit_acc_m2n2(6,7,%1,%%r12,1) +#define KERNEL_k1m2n4 KERNEL_h_k1m2n4 "addq $16,%1;" +#define KERNEL_k1m2n6 KERNEL_h_k1m2n4 unit_acc_m2n2(8,9,%1,%%r12,2) "addq $16,%1;" +#define KERNEL_h_k1m2n8 KERNEL_k1m2n6 unit_acc_m2n2(10,11,%%r15) +#define KERNEL_k1m2n8 KERNEL_h_k1m2n8 "addq $16,%%r15;" +#define KERNEL_h_k1m2n10 KERNEL_h_k1m2n8 unit_acc_m2n2(12,13,%%r15,%%r12,1) +#define KERNEL_k1m2n10 KERNEL_h_k1m2n10 "addq $16,%%r15;" +#define KERNEL_h_k1m2n12 KERNEL_h_k1m2n10 unit_acc_m2n2(14,15,%%r15,%%r12,2) +#define KERNEL_k1m2n12 KERNEL_h_k1m2n12 "addq $16,%%r15;" +#if defined(TRMMKERNEL) && !defined(LEFT) && (BACKWARDS == 0) + #define loada_kend_k1m2 "vmovddup (%0,%3,1),%%xmm1; vmovddup 8(%0,%3,1),%%xmm2; addq $16,%3;" + #define acc_kend_nc2_k1m2(boff1) unit_acc_gen_m2n2(6,7,boff1,%1,%%r12,1) + #define acc_kend_nc3_k1m2(boff1) unit_acc_gen_m2n2(8,9,boff1,%1,%%r12,2) + #define acc_kend_nc4_k1m2(boff1) unit_acc_gen_m2n2(10,11,boff1,%%r15) + #define acc_kend_nc5_k1m2(boff1) unit_acc_gen_m2n2(12,13,boff1,%%r15,%%r12,1) + #define acc_kend_nc6_k1m2(boff1) unit_acc_gen_m2n2(14,15,boff1,%%r15,%%r12,2) +#endif +#define save_init_m2 "movq %2,%3; addq $16,%2;" +#ifdef TRMMKERNEL + #define SAVE_m2n1 "vmulpd %%xmm4,%%xmm0,%%xmm4; vmovupd %%xmm4,(%2); addq $16,%2;" + #define unit_save_m2n2(c1_no,c2_no)\ + "vunpcklpd %%xmm"#c2_no",%%xmm"#c1_no",%%xmm1; vmulpd %%xmm1,%%xmm0,%%xmm1; vmovupd %%xmm1,(%3);"\ + "vunpckhpd %%xmm"#c2_no",%%xmm"#c1_no",%%xmm2; vmulpd %%xmm2,%%xmm0,%%xmm2; vmovupd %%xmm2,(%3,%4,1); leaq (%3,%4,2),%3;" +#else + #define SAVE_m2n1 "vfmadd213pd (%2),%%xmm0,%%xmm4; vmovupd %%xmm4,(%2); addq $16,%2;" + #define unit_save_m2n2(c1_no,c2_no)\ + "vunpcklpd %%xmm"#c2_no",%%xmm"#c1_no",%%xmm1; vfmadd213pd (%3),%%xmm0,%%xmm1; vmovupd %%xmm1,(%3);"\ + "vunpckhpd %%xmm"#c2_no",%%xmm"#c1_no",%%xmm2; vfmadd213pd (%3,%4,1),%%xmm0,%%xmm2; vmovupd %%xmm2,(%3,%4,1); leaq (%3,%4,2),%3;" +#endif +#define SAVE_m2n2 save_init_m2 unit_save_m2n2(4,5) +#define SAVE_m2n4 SAVE_m2n2 unit_save_m2n2(6,7) +#define SAVE_m2n6 SAVE_m2n4 unit_save_m2n2(8,9) +#define SAVE_m2n8 SAVE_m2n6 unit_save_m2n2(10,11) +#define SAVE_m2n10 SAVE_m2n8 unit_save_m2n2(12,13) +#define SAVE_m2n12 SAVE_m2n10 unit_save_m2n2(14,15) +#define INIT_m2n1 "vpxor %%xmm4,%%xmm4,%%xmm4;" +#define unit_init_2xmm(c1_no,c2_no) "vpxor %%xmm"#c1_no",%%xmm"#c1_no",%%xmm"#c1_no"; vpxor %%xmm"#c2_no",%%xmm"#c2_no",%%xmm"#c2_no";" +#define INIT_m2n2 unit_init_2xmm(4,5) +#define INIT_m2n4 INIT_m2n2 unit_init_2xmm(6,7) +#define INIT_m2n6 INIT_m2n4 unit_init_2xmm(8,9) +#define INIT_m2n8 INIT_m2n6 unit_init_2xmm(10,11) +#define INIT_m2n10 INIT_m2n8 unit_init_2xmm(12,13) +#define INIT_m2n12 INIT_m2n10 unit_init_2xmm(14,15) + +#define KERNEL_k1m1n1 \ + "vmovsd (%1),%%xmm1; addq $8,%1;"\ + "vfmadd231sd (%0),%%xmm1,%%xmm4; addq $8,%0;" +#define KERNEL_h_k1m1n2 \ + "vmovddup (%0),%%xmm1; addq $8,%0;"\ + "vfmadd231pd (%1),%%xmm1,%%xmm4;" +#define KERNEL_k1m1n2 KERNEL_h_k1m1n2 "addq $16,%1;" +#define KERNEL_h_k1m1n4 KERNEL_h_k1m1n2 "vfmadd231pd (%1,%%r12,1),%%xmm1,%%xmm5;" +#define KERNEL_k1m1n4 KERNEL_h_k1m1n4 "addq $16,%1;" +#define KERNEL_k1m1n6 KERNEL_h_k1m1n4 "vfmadd231pd (%1,%%r12,2),%%xmm1,%%xmm6; addq $16,%1;" +#define KERNEL_h_k1m1n8 KERNEL_k1m1n6 "vfmadd231pd (%%r15),%%xmm1,%%xmm7;" +#define KERNEL_k1m1n8 KERNEL_h_k1m1n8 "addq $16,%%r15;" +#define KERNEL_h_k1m1n10 KERNEL_h_k1m1n8 "vfmadd231pd (%%r15,%%r12,1),%%xmm1,%%xmm8;" +#define KERNEL_k1m1n10 KERNEL_h_k1m1n10 "addq $16,%%r15;" +#define KERNEL_h_k1m1n12 KERNEL_h_k1m1n10 "vfmadd231pd (%%r15,%%r12,2),%%xmm1,%%xmm9;" +#define KERNEL_k1m1n12 KERNEL_h_k1m1n12 "addq $16,%%r15;" +#if defined(TRMMKERNEL) && !defined(LEFT) && (BACKWARDS == 0) + #define loada_kend_k1m1 "vmovddup (%0,%3,1),%%xmm1; addq $8,%3;" + #define acc_kend_nc2_k1m1(boff1) "vfmadd231pd "#boff1"(%1,%%r12,1),%%xmm1,%%xmm5;" + #define acc_kend_nc3_k1m1(boff1) "vfmadd231pd "#boff1"(%1,%%r12,2),%%xmm1,%%xmm6;" + #define acc_kend_nc4_k1m1(boff1) "vfmadd231pd "#boff1"(%%r15),%%xmm1,%%xmm7;" + #define acc_kend_nc5_k1m1(boff1) "vfmadd231pd "#boff1"(%%r15,%%r12,1),%%xmm1,%%xmm8;" + #define acc_kend_nc6_k1m1(boff1) "vfmadd231pd "#boff1"(%%r15,%%r12,2),%%xmm1,%%xmm9;" +#endif +#define save_init_m1 "movq %2,%3; addq $8,%2;" +#ifdef TRMMKERNEL + #define SAVE_m1n1 "vmulsd %%xmm4,%%xmm0,%%xmm4; vmovsd %%xmm4,(%2); addq $8,%2;" + #define unit_save_m1n2(c1_no)\ + "vmulpd %%xmm"#c1_no",%%xmm0,%%xmm2; vmovsd %%xmm2,(%3); vmovhpd %%xmm2,(%3,%4,1); leaq (%3,%4,2),%3;" +#else + #define SAVE_m1n1 "vfmadd213sd (%2),%%xmm0,%%xmm4; vmovsd %%xmm4,(%2); addq $8,%2;" + #define unit_save_m1n2(c1_no)\ + "vmovsd (%3),%%xmm2; vmovhpd (%3,%4,1),%%xmm2,%%xmm2; vfmadd231pd %%xmm"#c1_no",%%xmm0,%%xmm2; vmovsd %%xmm2,(%3); vmovhpd %%xmm2,(%3,%4,1); leaq (%3,%4,2),%3;" +#endif +#define SAVE_m1n2 save_init_m1 unit_save_m1n2(4) +#define SAVE_m1n4 SAVE_m1n2 unit_save_m1n2(5) +#define SAVE_m1n6 SAVE_m1n4 unit_save_m1n2(6) +#define SAVE_m1n8 SAVE_m1n6 unit_save_m1n2(7) +#define SAVE_m1n10 SAVE_m1n8 unit_save_m1n2(8) +#define SAVE_m1n12 SAVE_m1n10 unit_save_m1n2(9) +#define INIT_m1n1 "vpxor %%xmm4,%%xmm4,%%xmm4;" +#define INIT_m1n2 INIT_m1n1 +#define INIT_m1n4 INIT_m1n2 "vpxor %%xmm5,%%xmm5,%%xmm5;" +#define INIT_m1n6 INIT_m1n4 "vpxor %%xmm6,%%xmm6,%%xmm6;" +#define INIT_m1n8 INIT_m1n6 "vpxor %%xmm7,%%xmm7,%%xmm7;" +#define INIT_m1n10 INIT_m1n8 "vpxor %%xmm8,%%xmm8,%%xmm8;" +#define INIT_m1n12 INIT_m1n10 "vpxor %%xmm9,%%xmm9,%%xmm9;" + +#define COMPUTE_SIMPLE(mdim,ndim)\ + init_update_k(mdim) INIT_m##mdim##n##ndim\ + "movq %%r13,%5;" INIT_set_papb(mdim,ndim)\ + kernel_kstart_n##ndim(mdim,subq)\ + "testq %5,%5; jz 7"#mdim"7"#ndim"9f;"\ + "7"#mdim"7"#ndim"1:\n\t"\ + KERNEL_k1m##mdim##n##ndim "decq %5; jnz 7"#mdim"7"#ndim"1b;"\ + "7"#mdim"7"#ndim"9:\n\t"\ + kernel_kend_n##ndim(mdim)\ + SAVE_set_pa(mdim) SAVE_m##mdim##n##ndim save_update_k(mdim) +#define COMPUTE_m16n1 COMPUTE_SIMPLE(16,1) +#define COMPUTE_m16n2 COMPUTE_SIMPLE(16,2) +#define COMPUTE_m16n4 COMPUTE_SIMPLE(16,4) +#define COMPUTE_m16n6 COMPUTE_SIMPLE(16,6) +#define COMPUTE_m16n8 COMPUTE_SIMPLE(16,8) +#define COMPUTE_m16n10 COMPUTE_SIMPLE(16,10) +#if defined(TRMMKERNEL) && !defined(LEFT) && defined(TRANSA) + #define INVERSE_K_MID "negq %5; leaq 6(%%r13,%5,1),%5;" +#else + #define INVERSE_K_MID "negq %5; leaq 16(%%r13,%5,1),%5;" +#endif +#define COMPUTE_m16n12 \ + init_update_k(16) INIT_m16n12 "movq %%r13,%5;" INIT_set_papb(16,12) "movq %2,%3;"\ + kernel_kstart_n12(16,subq)\ + "cmpq $16,%5; jb 7167123f; movq $16,%5;"\ + "7167121:\n\t"\ + KERNEL_k1m16n12 "addq $4,%5; testq $12,%5; movq $172,%%r10; cmovz %4,%%r10;"\ + KERNEL_k1m16n12 "prefetcht1 (%3); subq $129,%3; addq %%r10,%3;"\ + KERNEL_k1m16n12 "prefetcht1 (%6); addq $32,%6; cmpq $208,%5; cmoveq %2,%3;"\ + KERNEL_k1m16n12 "cmpq %5,%%r13; jnb 7167121b;"\ + "movq %2,%3;" INVERSE_K_MID\ + "7167123:\n\t"\ + "testq %5,%5; jz 7167129f;"\ + "7167125:\n\t"\ + "prefetcht0 (%3); prefetcht0 64(%3); prefetcht0 127(%3);"\ + KERNEL_k1m16n12 "addq %4,%3; decq %5;jnz 7167125b;"\ + "7167129:\n\t"\ + kernel_kend_n12(16)\ + "prefetcht0 (%%r14);" SAVE_set_pa(16) SAVE_m16n12 save_update_k(16) +#define COMPUTE(ndim) {\ + b_pref = b_ptr + ndim * K; HEAD_SET_OFF(ndim)\ + __asm__ __volatile__(\ + "vbroadcastsd %8,%%zmm0; movq %7,%%r11; movq %1,%%r14; movq %10,%%r12; salq $4,%%r12;" INITASM_SET_K\ + "cmpq $16,%%r11; jb "#ndim"33102f;"\ + #ndim"33101:\n\t"\ + COMPUTE_m16n##ndim "subq $16,%%r11; cmpq $16,%%r11; jnb "#ndim"33101b;"\ + #ndim"33102:\n\t"\ + "cmpq $8,%%r11; jb "#ndim"33103f;"\ + COMPUTE_SIMPLE(8,ndim) "subq $8,%%r11;"\ + #ndim"33103:\n\t"\ + "cmpq $4,%%r11; jb "#ndim"33104f;"\ + COMPUTE_SIMPLE(4,ndim) "subq $4,%%r11;"\ + #ndim"33104:\n\t"\ + "cmpq $2,%%r11; jb "#ndim"33105f;"\ + COMPUTE_SIMPLE(2,ndim) "subq $2,%%r11;"\ + #ndim"33105:\n\t"\ + "testq %%r11,%%r11; jz "#ndim"33106f;"\ + COMPUTE_SIMPLE(1,ndim) "subq $1,%%r11;"\ + #ndim"33106:\n\t"\ + "movq %%r14,%1;"\ + :"+r"(a_ptr),"+r"(b_ptr),"+r"(c_ptr),"+r"(c_tmp),"+r"(ldc_in_bytes),"+r"(k_count),"+r"(b_pref):"m"(M),"m"(ALPHA),"m"(off),"m"(K):"r10","r11","r12","r13","r14","r15","cc","memory",\ + "zmm0","zmm1","zmm2","zmm3","zmm4","zmm5","zmm6","zmm7","zmm8","zmm9","zmm10","zmm11","zmm12","zmm13","zmm14","zmm15",\ + "zmm16","zmm17","zmm18","zmm19","zmm20","zmm21","zmm22","zmm23","zmm24","zmm25","zmm26","zmm27","zmm28","zmm29","zmm30","zmm31");\ + a_ptr -= M * K; b_ptr += ndim * K; c_ptr += ndim * ldc - M; TAIL_SET_OFF(ndim)\ +} + +#include "common.h" +#include + +int __attribute__ ((noinline)) +CNAME(BLASLONG m, BLASLONG n, BLASLONG k, double alpha, double * __restrict__ A, double * __restrict__ B, double * __restrict__ C, BLASLONG ldc +#ifdef TRMMKERNEL + , BLASLONG offset +#endif +) +{ + if(m==0||n==0) return 0; + int64_t ldc_in_bytes = (int64_t)ldc * sizeof(double); double ALPHA = alpha; + int64_t M = (int64_t)m, K = (int64_t)k, k_count = 0; + BLASLONG n_count = n, off = 0; + double *a_ptr = A,*b_ptr = B,*c_ptr = C,*c_tmp = C,*b_pref = B; +#ifdef TRMMKERNEL + #ifdef LEFT + off = offset; + #else + off = -offset; + #endif +#endif + for(;n_count>11;n_count-=12) COMPUTE(12) + for(;n_count>9;n_count-=10) COMPUTE(10) + for(;n_count>7;n_count-=8) COMPUTE(8) + for(;n_count>5;n_count-=6) COMPUTE(6) + for(;n_count>3;n_count-=4) COMPUTE(4) + for(;n_count>1;n_count-=2) COMPUTE(2) + if(n_count>0) COMPUTE(1) + return 0; +} + diff --git a/kernel/x86_64/dgemm_kernel_4x8_haswell.S b/kernel/x86_64/dgemm_kernel_4x8_haswell.S index c84b599ce..19e32ef2c 100644 --- a/kernel/x86_64/dgemm_kernel_4x8_haswell.S +++ b/kernel/x86_64/dgemm_kernel_4x8_haswell.S @@ -106,7 +106,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endif #define A_PR1 512 -#define B_PR1 512 +#define B_PR1 160 +#define BROADCASTKERNEL /******************************************************************************************* * Macro definitions @@ -133,7 +134,11 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. prefetcht0 A_PR1(AO) vmovups -12 * SIZE(BO), %ymm1 prefetcht0 B_PR1(BO) +# if defined BROADCASTKERNEL + vbroadcastsd -16 * SIZE(AO), %ymm0 +# else vmovups -16 * SIZE(AO), %ymm0 +# endif prefetcht0 B_PR1+64(BO) vmovups -8 * SIZE(BO), %ymm2 prefetcht0 B_PR1+128(BO) @@ -143,17 +148,29 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. vmulpd %ymm0 ,%ymm2 , %ymm8 vmulpd %ymm0 ,%ymm3 , %ymm12 prefetcht0 B_PR1+256(BO) - vpermpd $ 0xb1, %ymm0 , %ymm0 +# if defined BROADCASTKERNEL + vbroadcastsd -15 * SIZE(AO), %ymm0 +# else + vpermilpd $ 0x05, %ymm0 , %ymm0 +# endif vmulpd %ymm0 ,%ymm1 , %ymm5 vmulpd %ymm0 ,%ymm2 , %ymm9 vmulpd %ymm0 ,%ymm3 , %ymm13 +# if defined BROADCASTKERNEL + vbroadcastsd -14 * SIZE(AO), %ymm0 +# else vpermpd $ 0x1b, %ymm0 , %ymm0 +# endif vmulpd %ymm0 ,%ymm1 , %ymm6 vmulpd %ymm0 ,%ymm2 , %ymm10 addq $ 12*SIZE, BO vmulpd %ymm0 ,%ymm3 , %ymm14 - vpermpd $ 0xb1, %ymm0 , %ymm0 +# if defined BROADCASTKERNEL + vbroadcastsd -13 * SIZE(AO), %ymm0 +# else + vpermilpd $ 0x05, %ymm0 , %ymm0 +# endif vmulpd %ymm0 ,%ymm1 , %ymm7 vmovups -12 * SIZE(BO), %ymm1 vmulpd %ymm0 ,%ymm2 , %ymm11 @@ -165,23 +182,38 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL4x12_M1 prefetcht0 A_PR1(AO) +# if defined BROADCASTKERNEL + vbroadcastsd -16 * SIZE(AO), %ymm0 +# else vmovups -16 * SIZE(AO), %ymm0 +# endif prefetcht0 B_PR1(BO) vfmadd231pd %ymm0 ,%ymm1 , %ymm4 prefetcht0 B_PR1+64(BO) vfmadd231pd %ymm0 ,%ymm2 , %ymm8 prefetcht0 B_PR1+128(BO) vfmadd231pd %ymm0 ,%ymm3 , %ymm12 - vpermpd $ 0xb1, %ymm0 , %ymm0 +# if defined BROADCASTKERNEL + vbroadcastsd -15 * SIZE(AO), %ymm0 +# else + vpermilpd $ 0x05, %ymm0 , %ymm0 +# endif vfmadd231pd %ymm0 ,%ymm1 , %ymm5 vfmadd231pd %ymm0 ,%ymm2 , %ymm9 vfmadd231pd %ymm0 ,%ymm3 , %ymm13 +# if defined BROADCASTKERNEL + vbroadcastsd -14 * SIZE(AO), %ymm0 +# else vpermpd $ 0x1b, %ymm0 , %ymm0 +# endif vfmadd231pd %ymm0 ,%ymm1 , %ymm6 vfmadd231pd %ymm0 ,%ymm2 , %ymm10 - vfmadd231pd %ymm0 ,%ymm3 , %ymm14 - vpermpd $ 0xb1, %ymm0 , %ymm0 +# if defined BROADCASTKERNEL + vbroadcastsd -13 * SIZE(AO), %ymm0 +# else + vpermilpd $ 0x05, %ymm0 , %ymm0 +# endif vfmadd231pd %ymm0 ,%ymm1 , %ymm7 vmovups -12 * SIZE(BO), %ymm1 vfmadd231pd %ymm0 ,%ymm2 , %ymm11 @@ -192,21 +224,37 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .endm .macro KERNEL4x12_M2 +# if defined BROADCASTKERNEL + vbroadcastsd -12 * SIZE(AO), %ymm0 +# else vmovups -12 * SIZE(AO), %ymm0 +# endif vfmadd231pd %ymm0 ,%ymm1 , %ymm4 vfmadd231pd %ymm0 ,%ymm2 , %ymm8 vfmadd231pd %ymm0 ,%ymm3 , %ymm12 - vpermpd $ 0xb1, %ymm0 , %ymm0 +# if defined BROADCASTKERNEL + vbroadcastsd -11 * SIZE(AO), %ymm0 +# else + vpermilpd $ 0x05, %ymm0 , %ymm0 +# endif vfmadd231pd %ymm0 ,%ymm1 , %ymm5 vfmadd231pd %ymm0 ,%ymm2 , %ymm9 vfmadd231pd %ymm0 ,%ymm3 , %ymm13 +# if defined BROADCASTKERNEL + vbroadcastsd -10 * SIZE(AO), %ymm0 +# else vpermpd $ 0x1b, %ymm0 , %ymm0 +# endif vfmadd231pd %ymm0 ,%ymm1 , %ymm6 vfmadd231pd %ymm0 ,%ymm2 , %ymm10 addq $ 8*SIZE, AO vfmadd231pd %ymm0 ,%ymm3 , %ymm14 - vpermpd $ 0xb1, %ymm0 , %ymm0 +# if defined BROADCASTKERNEL + vbroadcastsd -17 * SIZE(AO), %ymm0 +# else + vpermilpd $ 0x05, %ymm0 , %ymm0 +# endif vfmadd231pd %ymm0 ,%ymm1 , %ymm7 vmovups 0 * SIZE(BO), %ymm1 vfmadd231pd %ymm0 ,%ymm2 , %ymm11 @@ -218,21 +266,37 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL4x12_E +# if defined BROADCASTKERNEL + vbroadcastsd -12 * SIZE(AO), %ymm0 +# else vmovups -12 * SIZE(AO), %ymm0 +# endif vfmadd231pd %ymm0 ,%ymm1 , %ymm4 vfmadd231pd %ymm0 ,%ymm2 , %ymm8 vfmadd231pd %ymm0 ,%ymm3 , %ymm12 - vpermpd $ 0xb1, %ymm0 , %ymm0 +# if defined BROADCASTKERNEL + vbroadcastsd -11 * SIZE(AO), %ymm0 +# else + vpermilpd $ 0x05, %ymm0 , %ymm0 +# endif vfmadd231pd %ymm0 ,%ymm1 , %ymm5 vfmadd231pd %ymm0 ,%ymm2 , %ymm9 vfmadd231pd %ymm0 ,%ymm3 , %ymm13 +# if defined BROADCASTKERNEL + vbroadcastsd -10 * SIZE(AO), %ymm0 +# else vpermpd $ 0x1b, %ymm0 , %ymm0 +# endif vfmadd231pd %ymm0 ,%ymm1 , %ymm6 vfmadd231pd %ymm0 ,%ymm2 , %ymm10 addq $ 8*SIZE, AO vfmadd231pd %ymm0 ,%ymm3 , %ymm14 - vpermpd $ 0xb1, %ymm0 , %ymm0 +# if defined BROADCASTKERNEL + vbroadcastsd -17 * SIZE(AO), %ymm0 +# else + vpermilpd $ 0x05, %ymm0 , %ymm0 +# endif vfmadd231pd %ymm0 ,%ymm1 , %ymm7 vfmadd231pd %ymm0 ,%ymm2 , %ymm11 vfmadd231pd %ymm0 ,%ymm3 , %ymm15 @@ -241,23 +305,39 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL4x12_SUB vmovups -12 * SIZE(BO), %ymm1 +# if defined BROADCASTKERNEL + vbroadcastsd -16 * SIZE(AO), %ymm0 +# else vmovups -16 * SIZE(AO), %ymm0 +# endif vfmadd231pd %ymm0 ,%ymm1 , %ymm4 vmovups -8 * SIZE(BO), %ymm2 vfmadd231pd %ymm0 ,%ymm2 , %ymm8 vmovups -4 * SIZE(BO), %ymm3 vfmadd231pd %ymm0 ,%ymm3 , %ymm12 - vpermpd $ 0xb1, %ymm0 , %ymm0 +# if defined BROADCASTKERNEL + vbroadcastsd -15 * SIZE(AO), %ymm0 +# else + vpermilpd $ 0x05, %ymm0 , %ymm0 +# endif vfmadd231pd %ymm0 ,%ymm1 , %ymm5 vfmadd231pd %ymm0 ,%ymm2 , %ymm9 addq $ 12*SIZE, BO vfmadd231pd %ymm0 ,%ymm3 , %ymm13 +# if defined BROADCASTKERNEL + vbroadcastsd -14 * SIZE(AO), %ymm0 +# else vpermpd $ 0x1b, %ymm0 , %ymm0 +# endif vfmadd231pd %ymm0 ,%ymm1 , %ymm6 vfmadd231pd %ymm0 ,%ymm2 , %ymm10 addq $ 4*SIZE, AO vfmadd231pd %ymm0 ,%ymm3 , %ymm14 - vpermpd $ 0xb1, %ymm0 , %ymm0 +# if defined BROADCASTKERNEL + vbroadcastsd -17 * SIZE(AO), %ymm0 +# else + vpermilpd $ 0x05, %ymm0 , %ymm0 +# endif vfmadd231pd %ymm0 ,%ymm1 , %ymm7 vfmadd231pd %ymm0 ,%ymm2 , %ymm11 vfmadd231pd %ymm0 ,%ymm3 , %ymm15 @@ -267,43 +347,83 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro SAVE4x12 + prefetcht0 BUFFER1 vbroadcastsd ALPHA, %ymm0 vmulpd %ymm0 , %ymm4 , %ymm4 vmulpd %ymm0 , %ymm5 , %ymm5 vmulpd %ymm0 , %ymm6 , %ymm6 vmulpd %ymm0 , %ymm7 , %ymm7 - + prefetcht0 64 + BUFFER1 vmulpd %ymm0 , %ymm8 , %ymm8 vmulpd %ymm0 , %ymm9 , %ymm9 vmulpd %ymm0 , %ymm10, %ymm10 vmulpd %ymm0 , %ymm11, %ymm11 - +#if B_PR1 > 32 + prefetcht0 128 + BUFFER1 +#endif vmulpd %ymm0 , %ymm12, %ymm12 vmulpd %ymm0 , %ymm13, %ymm13 vmulpd %ymm0 , %ymm14, %ymm14 vmulpd %ymm0 , %ymm15, %ymm15 +#if B_PR1 > 96 + prefetcht0 192 + BUFFER1 +#endif - vpermpd $ 0xb1 , %ymm5, %ymm5 - vpermpd $ 0xb1 , %ymm7, %ymm7 +#if defined BROADCASTKERNEL + vperm2f128 $ 0x20 , %ymm6, %ymm4 , %ymm0 + vperm2f128 $ 0x20 , %ymm7, %ymm5 , %ymm1 + vperm2f128 $ 0x31 , %ymm6, %ymm4 , %ymm2 + vperm2f128 $ 0x31 , %ymm7, %ymm5 , %ymm3 +#else + vpermilpd $ 0x05 , %ymm5, %ymm5 + vpermilpd $ 0x05 , %ymm7, %ymm7 +#endif + +#if B_PR1 > 160 + prefetcht0 256 + BUFFER1 +#endif +#if defined BROADCASTKERNEL + vunpcklpd %ymm1, %ymm0, %ymm4 + vunpckhpd %ymm1, %ymm0, %ymm5 + vunpcklpd %ymm3, %ymm2, %ymm6 + vunpckhpd %ymm3, %ymm2, %ymm7 +#else vblendpd $ 0x0a, %ymm5, %ymm4, %ymm0 vblendpd $ 0x05, %ymm5, %ymm4, %ymm1 vblendpd $ 0x0a, %ymm7, %ymm6, %ymm2 vblendpd $ 0x05, %ymm7, %ymm6, %ymm3 +#endif + +#if B_PR1 > 224 + prefetcht0 320 + BUFFER1 +#endif - vpermpd $ 0x1b , %ymm2, %ymm2 - vpermpd $ 0x1b , %ymm3, %ymm3 - vpermpd $ 0xb1 , %ymm2, %ymm2 - vpermpd $ 0xb1 , %ymm3, %ymm3 +#ifndef BROADCASTKERNEL + vperm2f128 $ 0x01 , %ymm2, %ymm2 , %ymm2 + vperm2f128 $ 0x01 , %ymm3, %ymm3 , %ymm3 +#endif + +#if B_PR1 > 288 + prefetcht0 384 + BUFFER1 +#endif +#ifndef BROADCASTKERNEL vblendpd $ 0x03, %ymm0, %ymm2 , %ymm4 vblendpd $ 0x03, %ymm1, %ymm3 , %ymm5 vblendpd $ 0x03, %ymm2, %ymm0 , %ymm6 vblendpd $ 0x03, %ymm3, %ymm1 , %ymm7 +#endif +#if B_PR1 > 352 + prefetcht0 448 + BUFFER1 +#endif leaq (CO1, LDC, 2), %rax +#if B_PR1 > 416 + prefetcht0 512 + BUFFER1 +#endif #if !defined(TRMMKERNEL) @@ -319,29 +439,37 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. vmovups %ymm6 , (%rax) vmovups %ymm7 , (%rax, LDC) - prefetcht0 32(CO1) - prefetcht0 32(CO1,LDC) - prefetcht0 32(%rax) - prefetcht0 32(%rax,LDC) - - vpermpd $ 0xb1 , %ymm9 , %ymm9 - vpermpd $ 0xb1 , %ymm11, %ymm11 + prefetcht1 56(CO1) + prefetcht1 56(CO1,LDC) + prefetcht1 56(%rax) + prefetcht1 56(%rax,LDC) + +#if defined BROADCASTKERNEL + vperm2f128 $ 0x20 , %ymm10, %ymm8 , %ymm0 + vperm2f128 $ 0x20 , %ymm11, %ymm9 , %ymm1 + vperm2f128 $ 0x31 , %ymm10, %ymm8 , %ymm2 + vperm2f128 $ 0x31 , %ymm11, %ymm9 , %ymm3 + vunpcklpd %ymm1, %ymm0, %ymm4 + vunpckhpd %ymm1, %ymm0, %ymm5 + vunpcklpd %ymm3, %ymm2, %ymm6 + vunpckhpd %ymm3, %ymm2, %ymm7 +#else + vpermilpd $ 0x05 , %ymm9, %ymm9 + vpermilpd $ 0x05 , %ymm11, %ymm11 - vblendpd $ 0x0a, %ymm9 , %ymm8 , %ymm0 - vblendpd $ 0x05, %ymm9 , %ymm8 , %ymm1 + vblendpd $ 0x0a, %ymm9, %ymm8, %ymm0 + vblendpd $ 0x05, %ymm9, %ymm8, %ymm1 vblendpd $ 0x0a, %ymm11, %ymm10, %ymm2 vblendpd $ 0x05, %ymm11, %ymm10, %ymm3 - vpermpd $ 0x1b , %ymm2, %ymm2 - vpermpd $ 0x1b , %ymm3, %ymm3 - vpermpd $ 0xb1 , %ymm2, %ymm2 - vpermpd $ 0xb1 , %ymm3, %ymm3 + vperm2f128 $ 0x01 , %ymm2, %ymm2 , %ymm2 + vperm2f128 $ 0x01 , %ymm3, %ymm3 , %ymm3 vblendpd $ 0x03, %ymm0, %ymm2 , %ymm4 vblendpd $ 0x03, %ymm1, %ymm3 , %ymm5 vblendpd $ 0x03, %ymm2, %ymm0 , %ymm6 vblendpd $ 0x03, %ymm3, %ymm1 , %ymm7 - +#endif leaq (%rax, LDC, 2), %rax leaq (%rax, LDC, 2), %rbp @@ -360,29 +488,37 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. vmovups %ymm6 , (%rbp) vmovups %ymm7 , (%rbp, LDC) - prefetcht0 32(%rax) - prefetcht0 32(%rax,LDC) - prefetcht0 32(%rbp) - prefetcht0 32(%rbp,LDC) - - vpermpd $ 0xb1 , %ymm13, %ymm13 - vpermpd $ 0xb1 , %ymm15, %ymm15 + prefetcht1 56(%rax) + prefetcht1 56(%rax,LDC) + prefetcht1 56(%rbp) + prefetcht1 56(%rbp,LDC) + +#if defined BROADCASTKERNEL + vperm2f128 $ 0x20 , %ymm14, %ymm12 , %ymm0 + vperm2f128 $ 0x20 , %ymm15, %ymm13 , %ymm1 + vperm2f128 $ 0x31 , %ymm14, %ymm12 , %ymm2 + vperm2f128 $ 0x31 , %ymm15, %ymm13 , %ymm3 + vunpcklpd %ymm1, %ymm0, %ymm4 + vunpckhpd %ymm1, %ymm0, %ymm5 + vunpcklpd %ymm3, %ymm2, %ymm6 + vunpckhpd %ymm3, %ymm2, %ymm7 +#else + vpermilpd $ 0x05 , %ymm13, %ymm13 + vpermilpd $ 0x05 , %ymm15, %ymm15 vblendpd $ 0x0a, %ymm13, %ymm12, %ymm0 vblendpd $ 0x05, %ymm13, %ymm12, %ymm1 vblendpd $ 0x0a, %ymm15, %ymm14, %ymm2 vblendpd $ 0x05, %ymm15, %ymm14, %ymm3 - vpermpd $ 0x1b , %ymm2, %ymm2 - vpermpd $ 0x1b , %ymm3, %ymm3 - vpermpd $ 0xb1 , %ymm2, %ymm2 - vpermpd $ 0xb1 , %ymm3, %ymm3 + vperm2f128 $ 0x01 , %ymm2, %ymm2 , %ymm2 + vperm2f128 $ 0x01 , %ymm3, %ymm3 , %ymm3 vblendpd $ 0x03, %ymm0, %ymm2 , %ymm4 vblendpd $ 0x03, %ymm1, %ymm3 , %ymm5 vblendpd $ 0x03, %ymm2, %ymm0 , %ymm6 vblendpd $ 0x03, %ymm3, %ymm1 , %ymm7 - +#endif leaq (%rax, LDC, 4), %rax leaq (%rbp, LDC, 4), %rbp @@ -401,10 +537,10 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. vmovups %ymm6 , (%rbp) vmovups %ymm7 , (%rbp, LDC) - prefetcht0 32(%rax) - prefetcht0 32(%rax,LDC) - prefetcht0 32(%rbp) - prefetcht0 32(%rbp,LDC) + prefetcht1 56(%rax) + prefetcht1 56(%rax,LDC) + prefetcht1 56(%rbp) + prefetcht1 56(%rbp,LDC) addq $ 4*SIZE, CO1 .endm @@ -683,19 +819,35 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL4x8_I vmovups -12 * SIZE(BO), %ymm1 +#if defined BROADCASTKERNEL + vbroadcastsd -16 * SIZE(AO), %ymm0 +#else vmovups -16 * SIZE(AO), %ymm0 +#endif vmovups -8 * SIZE(BO), %ymm2 vmulpd %ymm0 ,%ymm1 , %ymm4 vmulpd %ymm0 ,%ymm2 , %ymm8 - vpermpd $ 0xb1, %ymm0 , %ymm0 +#if defined BROADCASTKERNEL + vbroadcastsd -15 * SIZE(AO), %ymm0 +#else + vpermilpd $ 0x05, %ymm0 , %ymm0 +#endif vmulpd %ymm0 ,%ymm1 , %ymm5 vmulpd %ymm0 ,%ymm2 , %ymm9 +#if defined BROADCASTKERNEL + vbroadcastsd -14 * SIZE(AO), %ymm0 +#else vpermpd $ 0x1b, %ymm0 , %ymm0 +#endif vmulpd %ymm0 ,%ymm1 , %ymm6 vmulpd %ymm0 ,%ymm2 , %ymm10 addq $ 8*SIZE, BO - vpermpd $ 0xb1, %ymm0 , %ymm0 +#if defined BROADCASTKERNEL + vbroadcastsd -13 * SIZE(AO), %ymm0 +#else + vpermilpd $ 0x05, %ymm0 , %ymm0 +#endif vmulpd %ymm0 ,%ymm1 , %ymm7 vmovups -12 * SIZE(BO), %ymm1 vmulpd %ymm0 ,%ymm2 , %ymm11 @@ -705,19 +857,34 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL4x8_M1 prefetcht0 A_PR1(AO) +#if defined BROADCASTKERNEL + vbroadcastsd -16 * SIZE(AO), %ymm0 +#else vmovups -16 * SIZE(AO), %ymm0 +#endif prefetcht0 B_PR1(BO) vfmadd231pd %ymm0 ,%ymm1 , %ymm4 prefetcht0 B_PR1+64(BO) vfmadd231pd %ymm0 ,%ymm2 , %ymm8 - vpermpd $ 0xb1, %ymm0 , %ymm0 +#if defined BROADCASTKERNEL + vbroadcastsd -15 * SIZE(AO), %ymm0 +#else + vpermilpd $ 0x05, %ymm0 , %ymm0 +#endif vfmadd231pd %ymm0 ,%ymm1 , %ymm5 vfmadd231pd %ymm0 ,%ymm2 , %ymm9 +#if defined BROADCASTKERNEL + vbroadcastsd -14 * SIZE(AO), %ymm0 +#else vpermpd $ 0x1b, %ymm0 , %ymm0 +#endif vfmadd231pd %ymm0 ,%ymm1 , %ymm6 vfmadd231pd %ymm0 ,%ymm2 , %ymm10 - - vpermpd $ 0xb1, %ymm0 , %ymm0 +#if defined BROADCASTKERNEL + vbroadcastsd -13 * SIZE(AO), %ymm0 +#else + vpermilpd $ 0x05, %ymm0 , %ymm0 +#endif vfmadd231pd %ymm0 ,%ymm1 , %ymm7 vmovups -12 * SIZE(BO), %ymm1 vfmadd231pd %ymm0 ,%ymm2 , %ymm11 @@ -726,18 +893,34 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .endm .macro KERNEL4x8_M2 +#if defined BROADCASTKERNEL + vbroadcastsd -12 * SIZE(AO), %ymm0 +#else vmovups -12 * SIZE(AO), %ymm0 +#endif vfmadd231pd %ymm0 ,%ymm1 , %ymm4 vfmadd231pd %ymm0 ,%ymm2 , %ymm8 - vpermpd $ 0xb1, %ymm0 , %ymm0 +#if defined BROADCASTKERNEL + vbroadcastsd -11 * SIZE(AO), %ymm0 +#else + vpermilpd $ 0x05, %ymm0 , %ymm0 +#endif vfmadd231pd %ymm0 ,%ymm1 , %ymm5 vfmadd231pd %ymm0 ,%ymm2 , %ymm9 +#if defined BROADCASTKERNEL + vbroadcastsd -10 * SIZE(AO), %ymm0 +#else vpermpd $ 0x1b, %ymm0 , %ymm0 +#endif vfmadd231pd %ymm0 ,%ymm1 , %ymm6 vfmadd231pd %ymm0 ,%ymm2 , %ymm10 addq $ 8*SIZE, AO - vpermpd $ 0xb1, %ymm0 , %ymm0 +#if defined BROADCASTKERNEL + vbroadcastsd -17 * SIZE(AO), %ymm0 +#else + vpermilpd $ 0x05, %ymm0 , %ymm0 +#endif vfmadd231pd %ymm0 ,%ymm1 , %ymm7 vmovups -4 * SIZE(BO), %ymm1 vfmadd231pd %ymm0 ,%ymm2 , %ymm11 @@ -747,18 +930,34 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL4x8_E +#if defined BROADCASTKERNEL + vbroadcastsd -12 * SIZE(AO), %ymm0 +#else vmovups -12 * SIZE(AO), %ymm0 +#endif vfmadd231pd %ymm0 ,%ymm1 , %ymm4 vfmadd231pd %ymm0 ,%ymm2 , %ymm8 - vpermpd $ 0xb1, %ymm0 , %ymm0 +#if defined BROADCASTKERNEL + vbroadcastsd -11 * SIZE(AO), %ymm0 +#else + vpermilpd $ 0x05, %ymm0 , %ymm0 +#endif vfmadd231pd %ymm0 ,%ymm1 , %ymm5 vfmadd231pd %ymm0 ,%ymm2 , %ymm9 +#if defined BROADCASTKERNEL + vbroadcastsd -10 * SIZE(AO), %ymm0 +#else vpermpd $ 0x1b, %ymm0 , %ymm0 +#endif vfmadd231pd %ymm0 ,%ymm1 , %ymm6 vfmadd231pd %ymm0 ,%ymm2 , %ymm10 addq $ 8*SIZE, AO - vpermpd $ 0xb1, %ymm0 , %ymm0 +#if defined BROADCASTKERNEL + vbroadcastsd -17 * SIZE(AO), %ymm0 +#else + vpermilpd $ 0x05, %ymm0 , %ymm0 +#endif vfmadd231pd %ymm0 ,%ymm1 , %ymm7 vfmadd231pd %ymm0 ,%ymm2 , %ymm11 addq $ 8*SIZE, BO @@ -766,19 +965,35 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL4x8_SUB vmovups -12 * SIZE(BO), %ymm1 +#if defined BROADCASTKERNEL + vbroadcastsd -16 * SIZE(AO), %ymm0 +#else vmovups -16 * SIZE(AO), %ymm0 +#endif vfmadd231pd %ymm0 ,%ymm1 , %ymm4 vmovups -8 * SIZE(BO), %ymm2 vfmadd231pd %ymm0 ,%ymm2 , %ymm8 - vpermpd $ 0xb1, %ymm0 , %ymm0 +#if defined BROADCASTKERNEL + vbroadcastsd -15 * SIZE(AO), %ymm0 +#else + vpermilpd $ 0x05, %ymm0 , %ymm0 +#endif vfmadd231pd %ymm0 ,%ymm1 , %ymm5 vfmadd231pd %ymm0 ,%ymm2 , %ymm9 addq $ 8*SIZE, BO +#if defined BROADCASTKERNEL + vbroadcastsd -14 * SIZE(AO), %ymm0 +#else vpermpd $ 0x1b, %ymm0 , %ymm0 +#endif vfmadd231pd %ymm0 ,%ymm1 , %ymm6 vfmadd231pd %ymm0 ,%ymm2 , %ymm10 addq $ 4*SIZE, AO - vpermpd $ 0xb1, %ymm0 , %ymm0 +#if defined BROADCASTKERNEL + vbroadcastsd -17 * SIZE(AO), %ymm0 +#else + vpermilpd $ 0x05, %ymm0 , %ymm0 +#endif vfmadd231pd %ymm0 ,%ymm1 , %ymm7 vfmadd231pd %ymm0 ,%ymm2 , %ymm11 @@ -799,23 +1014,32 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. vmulpd %ymm0 , %ymm10, %ymm10 vmulpd %ymm0 , %ymm11, %ymm11 - vpermpd $ 0xb1 , %ymm5, %ymm5 - vpermpd $ 0xb1 , %ymm7, %ymm7 +#if defined BROADCASTKERNEL + vperm2f128 $ 0x20 , %ymm6, %ymm4 , %ymm0 + vperm2f128 $ 0x20 , %ymm7, %ymm5 , %ymm1 + vperm2f128 $ 0x31 , %ymm6, %ymm4 , %ymm2 + vperm2f128 $ 0x31 , %ymm7, %ymm5 , %ymm3 + vunpcklpd %ymm1, %ymm0, %ymm4 + vunpckhpd %ymm1, %ymm0, %ymm5 + vunpcklpd %ymm3, %ymm2, %ymm6 + vunpckhpd %ymm3, %ymm2, %ymm7 +#else + vpermilpd $ 0x05 , %ymm5, %ymm5 + vpermilpd $ 0x05 , %ymm7, %ymm7 vblendpd $ 0x0a, %ymm5, %ymm4, %ymm0 vblendpd $ 0x05, %ymm5, %ymm4, %ymm1 vblendpd $ 0x0a, %ymm7, %ymm6, %ymm2 vblendpd $ 0x05, %ymm7, %ymm6, %ymm3 - vpermpd $ 0x1b , %ymm2, %ymm2 - vpermpd $ 0x1b , %ymm3, %ymm3 - vpermpd $ 0xb1 , %ymm2, %ymm2 - vpermpd $ 0xb1 , %ymm3, %ymm3 + vperm2f128 $ 0x01 , %ymm2, %ymm2 , %ymm2 + vperm2f128 $ 0x01 , %ymm3, %ymm3 , %ymm3 vblendpd $ 0x03, %ymm0, %ymm2 , %ymm4 vblendpd $ 0x03, %ymm1, %ymm3 , %ymm5 vblendpd $ 0x03, %ymm2, %ymm0 , %ymm6 vblendpd $ 0x03, %ymm3, %ymm1 , %ymm7 +#endif leaq (CO1, LDC, 2), %rax @@ -834,29 +1058,37 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. vmovups %ymm6 , (%rax) vmovups %ymm7 , (%rax, LDC) - prefetcht0 32(CO1) - prefetcht0 32(CO1,LDC) - prefetcht0 32(%rax) - prefetcht0 32(%rax,LDC) - - vpermpd $ 0xb1 , %ymm9 , %ymm9 - vpermpd $ 0xb1 , %ymm11, %ymm11 + prefetcht0 56(CO1) + prefetcht0 56(CO1,LDC) + prefetcht0 56(%rax) + prefetcht0 56(%rax,LDC) + +#if defined BROADCASTKERNEL + vperm2f128 $ 0x20 , %ymm10, %ymm8 , %ymm0 + vperm2f128 $ 0x20 , %ymm11, %ymm9 , %ymm1 + vperm2f128 $ 0x31 , %ymm10, %ymm8 , %ymm2 + vperm2f128 $ 0x31 , %ymm11, %ymm9 , %ymm3 + vunpcklpd %ymm1, %ymm0, %ymm4 + vunpckhpd %ymm1, %ymm0, %ymm5 + vunpcklpd %ymm3, %ymm2, %ymm6 + vunpckhpd %ymm3, %ymm2, %ymm7 +#else + vpermilpd $ 0x05 , %ymm9 , %ymm9 + vpermilpd $ 0x05 , %ymm11, %ymm11 vblendpd $ 0x0a, %ymm9 , %ymm8 , %ymm0 vblendpd $ 0x05, %ymm9 , %ymm8 , %ymm1 vblendpd $ 0x0a, %ymm11, %ymm10, %ymm2 vblendpd $ 0x05, %ymm11, %ymm10, %ymm3 - vpermpd $ 0x1b , %ymm2, %ymm2 - vpermpd $ 0x1b , %ymm3, %ymm3 - vpermpd $ 0xb1 , %ymm2, %ymm2 - vpermpd $ 0xb1 , %ymm3, %ymm3 + vperm2f128 $ 0x01 , %ymm2, %ymm2 , %ymm2 + vperm2f128 $ 0x01 , %ymm3, %ymm3 , %ymm3 vblendpd $ 0x03, %ymm0, %ymm2 , %ymm4 vblendpd $ 0x03, %ymm1, %ymm3 , %ymm5 vblendpd $ 0x03, %ymm2, %ymm0 , %ymm6 vblendpd $ 0x03, %ymm3, %ymm1 , %ymm7 - +#endif leaq (%rax, LDC, 2), %rax leaq (%rax, LDC, 2), %rbp @@ -875,10 +1107,10 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. vmovups %ymm6 , (%rbp) vmovups %ymm7 , (%rbp, LDC) - prefetcht0 32(%rax) - prefetcht0 32(%rax,LDC) - prefetcht0 32(%rbp) - prefetcht0 32(%rbp,LDC) + prefetcht0 56(%rax) + prefetcht0 56(%rax,LDC) + prefetcht0 56(%rbp) + prefetcht0 56(%rbp,LDC) addq $ 4*SIZE, CO1 .endm @@ -1082,15 +1314,31 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL4x4_I prefetcht0 A_PR1(AO) vmovups -12 * SIZE(BO), %ymm1 +#if defined BROADCASTKERNEL + vbroadcastsd -16 * SIZE(AO), %ymm0 +#else vmovups -16 * SIZE(AO), %ymm0 +#endif vmulpd %ymm0 ,%ymm1 , %ymm4 - vpermpd $ 0xb1, %ymm0 , %ymm0 +#if defined BROADCASTKERNEL + vbroadcastsd -15 * SIZE(AO), %ymm0 +#else + vpermilpd $ 0x05, %ymm0 , %ymm0 +#endif vmulpd %ymm0 ,%ymm1 , %ymm5 +#if defined BROADCASTKERNEL + vbroadcastsd -14 * SIZE(AO), %ymm0 +#else vpermpd $ 0x1b, %ymm0 , %ymm0 +#endif vmulpd %ymm0 ,%ymm1 , %ymm6 addq $ 4*SIZE, BO - vpermpd $ 0xb1, %ymm0 , %ymm0 +#if defined BROADCASTKERNEL + vbroadcastsd -13 * SIZE(AO), %ymm0 +#else + vpermilpd $ 0x05, %ymm0 , %ymm0 +#endif vmulpd %ymm0 ,%ymm1 , %ymm7 vmovups -12 * SIZE(BO), %ymm1 @@ -1098,29 +1346,60 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL4x4_M1 prefetcht0 A_PR1(AO) +#if defined BROADCASTKERNEL + vbroadcastsd -16 * SIZE(AO), %ymm0 +#else vmovups -16 * SIZE(AO), %ymm0 +#endif vfmadd231pd %ymm0 ,%ymm1 , %ymm4 - vpermpd $ 0xb1, %ymm0 , %ymm0 +#if defined BROADCASTKERNEL + vbroadcastsd -15 * SIZE(AO), %ymm0 +#else + vpermilpd $ 0x05, %ymm0 , %ymm0 +#endif vfmadd231pd %ymm0 ,%ymm1 , %ymm5 +#if defined BROADCASTKERNEL + vbroadcastsd -14 * SIZE(AO), %ymm0 +#else vpermpd $ 0x1b, %ymm0 , %ymm0 +#endif vfmadd231pd %ymm0 ,%ymm1 , %ymm6 - - vpermpd $ 0xb1, %ymm0 , %ymm0 +#if defined BROADCASTKERNEL + vbroadcastsd -13 * SIZE(AO), %ymm0 +#else + vpermilpd $ 0x05, %ymm0 , %ymm0 +#endif vfmadd231pd %ymm0 ,%ymm1 , %ymm7 vmovups -12 * SIZE(BO), %ymm1 .endm .macro KERNEL4x4_M2 +#if defined BROADCASTKERNEL + vbroadcastsd -12 * SIZE(AO), %ymm0 +#else vmovups -12 * SIZE(AO), %ymm0 +#endif vfmadd231pd %ymm0 ,%ymm1 , %ymm4 - vpermpd $ 0xb1, %ymm0 , %ymm0 +#if defined BROADCASTKERNEL + vbroadcastsd -11 * SIZE(AO), %ymm0 +#else + vpermilpd $ 0x05, %ymm0 , %ymm0 +#endif vfmadd231pd %ymm0 ,%ymm1 , %ymm5 +#if defined BROADCASTKERNEL + vbroadcastsd -10 * SIZE(AO), %ymm0 +#else vpermpd $ 0x1b, %ymm0 , %ymm0 +#endif vfmadd231pd %ymm0 ,%ymm1 , %ymm6 addq $ 8*SIZE, AO - vpermpd $ 0xb1, %ymm0 , %ymm0 +#if defined BROADCASTKERNEL + vbroadcastsd -17 * SIZE(AO), %ymm0 +#else + vpermilpd $ 0x05, %ymm0 , %ymm0 +#endif vfmadd231pd %ymm0 ,%ymm1 , %ymm7 vmovups -8 * SIZE(BO), %ymm1 addq $ 8*SIZE, BO @@ -1128,30 +1407,62 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL4x4_E +#if defined BROADCASTKERNEL + vbroadcastsd -12 * SIZE(AO), %ymm0 +#else vmovups -12 * SIZE(AO), %ymm0 +#endif vfmadd231pd %ymm0 ,%ymm1 , %ymm4 - vpermpd $ 0xb1, %ymm0 , %ymm0 +#if defined BROADCASTKERNEL + vbroadcastsd -11 * SIZE(AO), %ymm0 +#else + vpermilpd $ 0x05, %ymm0 , %ymm0 +#endif vfmadd231pd %ymm0 ,%ymm1 , %ymm5 +#if defined BROADCASTKERNEL + vbroadcastsd -10 * SIZE(AO), %ymm0 +#else vpermpd $ 0x1b, %ymm0 , %ymm0 +#endif vfmadd231pd %ymm0 ,%ymm1 , %ymm6 addq $ 8*SIZE, AO - vpermpd $ 0xb1, %ymm0 , %ymm0 +#if defined BROADCASTKERNEL + vbroadcastsd -17 * SIZE(AO), %ymm0 +#else + vpermilpd $ 0x05, %ymm0 , %ymm0 +#endif vfmadd231pd %ymm0 ,%ymm1 , %ymm7 addq $ 4*SIZE, BO .endm .macro KERNEL4x4_SUB vmovups -12 * SIZE(BO), %ymm1 +#if defined BROADCASTKERNEL + vbroadcastsd -16 * SIZE(AO), %ymm0 +#else vmovups -16 * SIZE(AO), %ymm0 +#endif vfmadd231pd %ymm0 ,%ymm1 , %ymm4 - vpermpd $ 0xb1, %ymm0 , %ymm0 +#if defined BROADCASTKERNEL + vbroadcastsd -15 * SIZE(AO), %ymm0 +#else + vpermilpd $ 0x05, %ymm0 , %ymm0 +#endif vfmadd231pd %ymm0 ,%ymm1 , %ymm5 addq $ 4*SIZE, BO +#if defined BROADCASTKERNEL + vbroadcastsd -14 * SIZE(AO), %ymm0 +#else vpermpd $ 0x1b, %ymm0 , %ymm0 +#endif vfmadd231pd %ymm0 ,%ymm1 , %ymm6 addq $ 4*SIZE, AO - vpermpd $ 0xb1, %ymm0 , %ymm0 +#if defined BROADCASTKERNEL + vbroadcastsd -17 * SIZE(AO), %ymm0 +#else + vpermilpd $ 0x05, %ymm0 , %ymm0 +#endif vfmadd231pd %ymm0 ,%ymm1 , %ymm7 .endm @@ -1165,23 +1476,32 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. vmulpd %ymm0 , %ymm5 , %ymm5 vmulpd %ymm0 , %ymm6 , %ymm6 - vpermpd $ 0xb1 , %ymm5, %ymm5 - vpermpd $ 0xb1 , %ymm7, %ymm7 +#if defined BROADCASTKERNEL + vperm2f128 $ 0x20 , %ymm6, %ymm4 , %ymm0 + vperm2f128 $ 0x20 , %ymm7, %ymm5 , %ymm1 + vperm2f128 $ 0x31 , %ymm6, %ymm4 , %ymm2 + vperm2f128 $ 0x31 , %ymm7, %ymm5 , %ymm3 + vunpcklpd %ymm1, %ymm0, %ymm4 + vunpckhpd %ymm1, %ymm0, %ymm5 + vunpcklpd %ymm3, %ymm2, %ymm6 + vunpckhpd %ymm3, %ymm2, %ymm7 +#else + vpermilpd $ 0x05 , %ymm5, %ymm5 + vpermilpd $ 0x05 , %ymm7, %ymm7 vblendpd $ 0x0a, %ymm5, %ymm4, %ymm0 vblendpd $ 0x05, %ymm5, %ymm4, %ymm1 vblendpd $ 0x0a, %ymm7, %ymm6, %ymm2 vblendpd $ 0x05, %ymm7, %ymm6, %ymm3 - vpermpd $ 0x1b , %ymm2, %ymm2 - vpermpd $ 0x1b , %ymm3, %ymm3 - vpermpd $ 0xb1 , %ymm2, %ymm2 - vpermpd $ 0xb1 , %ymm3, %ymm3 + vperm2f128 $ 0x01 , %ymm2, %ymm2 , %ymm2 + vperm2f128 $ 0x01 , %ymm3, %ymm3 , %ymm3 vblendpd $ 0x03, %ymm0, %ymm2 , %ymm4 vblendpd $ 0x03, %ymm1, %ymm3 , %ymm5 vblendpd $ 0x03, %ymm2, %ymm0 , %ymm6 vblendpd $ 0x03, %ymm3, %ymm1 , %ymm7 +#endif leaq (CO1, LDC, 2), %rax @@ -1617,6 +1937,14 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .endm +.macro PREFETCHT0_C + prefetcht0 (CO1) + prefetcht0 24(CO1) + prefetcht0 (CO1,LDC,4) + prefetcht0 24(CO1,LDC,4) + prefetcht0 (CO1,LDC,8) + prefetcht0 24(CO1,LDC,8) +.endm /*******************************************************************************************/ #if !defined(TRMMKERNEL) @@ -1784,12 +2112,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. dec %rax jne .L12_12 - + .L12_12a: - + prefetcht0 ALPHA + PREFETCHT0_C + addq LDC,CO1 KERNEL4x12_M1 + PREFETCHT0_C + leaq (CO1,LDC,2),CO1 KERNEL4x12_M2 + PREFETCHT0_C + subq LDC,CO1 KERNEL4x12_M1 + PREFETCHT0_C + subq LDC,CO1 + subq LDC,CO1 KERNEL4x12_M2 KERNEL4x12_M1 @@ -1844,6 +2181,23 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. SAVE4x12 + /* here for the prefetch of next b source block */ + /* the increment should be proportional to GEMM_Q/GEMM_P */ + + salq $3, K +#ifdef WINDOWS_ABI /* GEMM_P == GEMM_Q * 4 */ + prefetcht2 32(B) + prefetcht2 32(B, K, 8) + addq $64, B /* increment */ +#else /* GEMM_P == GEMM_Q * 2 under linux x86_64 */ + prefetcht2 32(B) + prefetcht2 32(B, K, 8) + prefetcht2 96(B) + prefetcht2 96(B, K, 8) + addq $128, B /* increment */ +#endif + sarq $3, K + decq I # i -- jne .L12_11 ALIGN_4 @@ -1851,6 +2205,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. /************************************************************************** * Rest of M ***************************************************************************/ + + /* recover the original value of pointer B after prefetch */ + movq M, I + sarq $2, I +#ifdef WINDOWS_ABI /* GEMM_P == GEMM_Q * 4 */ + salq $6, I +#else /* GEMM_P == GEMM_Q * 2 under linux x86_64 */ + salq $7, I +#endif + subq I, B + .L12_20: // Test rest of M @@ -2068,10 +2433,19 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. jne .L13_12 .L13_12a: - + prefetcht0 ALPHA + PREFETCHT0_C + addq LDC,CO1 KERNEL4x12_M1 + PREFETCHT0_C + leaq (CO1,LDC,2),CO1 KERNEL4x12_M2 + PREFETCHT0_C + subq LDC,CO1 KERNEL4x12_M1 + PREFETCHT0_C + subq LDC,CO1 + subq LDC,CO1 KERNEL4x12_M2 KERNEL4x12_M1 @@ -2081,7 +2455,6 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. jmp .L13_16 - .L13_13: test $1, %rax @@ -2126,6 +2499,23 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. SAVE4x12 + /* here for the prefetch of next b source block */ + /* the increment should be proportional to GEMM_Q/GEMM_P */ + + salq $3, K +#ifdef WINDOWS_ABI /* GEMM_P == GEMM_Q * 4 */ + prefetcht2 (B) + prefetcht2 (B, K, 8) + addq $64, B /* increment */ +#else /* GEMM_P == GEMM_Q * 2 under linux x86_64 */ + prefetcht2 (B) + prefetcht2 (B, K, 8) + prefetcht2 64(B) + prefetcht2 64(B, K, 8) + addq $128, B /* increment */ +#endif + sarq $3, K + decq I # i -- jne .L13_11 ALIGN_4 @@ -2133,6 +2523,16 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. /************************************************************************** * Rest of M ***************************************************************************/ + /* recover the original value of pointer B */ + movq M, I + sarq $2, I +#ifdef WINDOWS_ABI /* GEMM_P == GEMM_Q * 4 */ + salq $6, I +#else /* GEMM_P == GEMM_Q * 2 under linux x86_64 */ + salq $7, I +#endif + subq I, B + .L13_20: // Test rest of M diff --git a/kernel/x86_64/dgemm_kernel_4x8_skylakex_2.c b/kernel/x86_64/dgemm_kernel_4x8_skylakex_2.c new file mode 100644 index 000000000..90a4c2b1d --- /dev/null +++ b/kernel/x86_64/dgemm_kernel_4x8_skylakex_2.c @@ -0,0 +1,670 @@ +#include "common.h" +#include +#include + +//register usage: zmm3 for alpha, zmm0-zmm2 and zmm4-zmm7 for temporary use, zmm8-zmm31 for accumulators. + +/* row-major c_block */ +#define INNER_KERNEL_k1m1n8 \ + "prefetcht0 384(%1);"\ + "vmovupd (%1),%%zmm5; addq $64,%1;"\ + "vbroadcastsd (%0),%%zmm4;vfmadd231pd %%zmm5,%%zmm4,%%zmm8;" + +#define INNER_KERNEL_k1m2n8 \ + INNER_KERNEL_k1m1n8\ + "vbroadcastsd 8(%0),%%zmm4;vfmadd231pd %%zmm5,%%zmm4,%%zmm9;" + +#define INNER_KERNEL_k1m1n16 \ + "prefetcht0 128(%1); prefetcht0 128(%1,%%r12,2);"\ + "vmovupd (%1),%%zmm5; vmovupd (%1,%%r12,2),%%zmm6; addq $64,%1;"\ + "vbroadcastsd (%0),%%zmm4;vfmadd231pd %%zmm5,%%zmm4,%%zmm8; vfmadd231pd %%zmm6,%%zmm4,%%zmm9;" + +#define INNER_KERNEL_k1m2n16 \ + INNER_KERNEL_k1m1n16\ + "vbroadcastsd 8(%0),%%zmm4;vfmadd231pd %%zmm5,%%zmm4,%%zmm10;vfmadd231pd %%zmm6,%%zmm4,%%zmm11;" + +#define INNER_KERNEL_k1m1n24 \ + "prefetcht0 128(%1); prefetcht0 128(%1,%%r12,2); prefetcht0 128(%1,%%r12,4);"\ + "vmovupd (%1),%%zmm5; vmovupd (%1,%%r12,2),%%zmm6; vmovupd (%1,%%r12,4),%%zmm7; addq $64,%1;"\ + "vbroadcastsd (%0),%%zmm4;vfmadd231pd %%zmm5,%%zmm4,%%zmm8; vfmadd231pd %%zmm6,%%zmm4,%%zmm9; vfmadd231pd %%zmm7,%%zmm4,%%zmm10;" + +#define INNER_KERNEL_k1m2n24 \ + INNER_KERNEL_k1m1n24\ + "vbroadcastsd 8(%0),%%zmm4;vfmadd231pd %%zmm5,%%zmm4,%%zmm11;vfmadd231pd %%zmm6,%%zmm4,%%zmm12;vfmadd231pd %%zmm7,%%zmm4,%%zmm13;" + +/* row-major z-partition c_block */ +#define INNER_KERNEL_k1m4n8 \ + "vbroadcastf32x4 (%0),%%zmm4; vbroadcastf32x4 16(%0),%%zmm5; addq $32,%0;"\ + "vmovddup (%1),%%zmm6; vfmadd231pd %%zmm4,%%zmm6,%%zmm8; vfmadd231pd %%zmm5,%%zmm6,%%zmm10;"\ + "vmovddup 8(%1),%%zmm7; vfmadd231pd %%zmm4,%%zmm7,%%zmm9; vfmadd231pd %%zmm5,%%zmm7,%%zmm11;" + +#define INNER_KERNEL_k1m4n16 \ + INNER_KERNEL_k1m4n8\ + "vmovddup (%1,%%r12,2),%%zmm6; vfmadd231pd %%zmm4,%%zmm6,%%zmm12; vfmadd231pd %%zmm5,%%zmm6,%%zmm14;"\ + "vmovddup 8(%1,%%r12,2),%%zmm7; vfmadd231pd %%zmm4,%%zmm7,%%zmm13; vfmadd231pd %%zmm5,%%zmm7,%%zmm15;" + +#define INNER_KERNEL_k1m4n24 \ + INNER_KERNEL_k1m4n16\ + "vmovddup (%1,%%r12,4),%%zmm6; vfmadd231pd %%zmm4,%%zmm6,%%zmm16; vfmadd231pd %%zmm5,%%zmm6,%%zmm18;"\ + "vmovddup 8(%1,%%r12,4),%%zmm7; vfmadd231pd %%zmm4,%%zmm7,%%zmm17; vfmadd231pd %%zmm5,%%zmm7,%%zmm19;" + +#define INNER_KERNEL_k1m8n8 \ + "vbroadcastf32x4 (%0),%%zmm4; vbroadcastf32x4 16(%0),%%zmm5;"\ + "vbroadcastf32x4 (%0,%%r12,1),%%zmm6; vbroadcastf32x4 16(%0,%%r12,1),%%zmm7; addq $32,%0;"\ + "prefetcht0 128(%1);"\ + "vmovddup (%1),%%zmm2; vfmadd231pd %%zmm4,%%zmm2,%%zmm8; vfmadd231pd %%zmm5,%%zmm2,%%zmm10;"\ + "vfmadd231pd %%zmm6,%%zmm2,%%zmm12; vfmadd231pd %%zmm7,%%zmm2,%%zmm14;"\ + "vmovddup 8(%1),%%zmm1; vfmadd231pd %%zmm4,%%zmm1,%%zmm9; vfmadd231pd %%zmm5,%%zmm1,%%zmm11;"\ + "vfmadd231pd %%zmm6,%%zmm1,%%zmm13; vfmadd231pd %%zmm7,%%zmm1,%%zmm15;" + +#define INNER_KERNEL_k1m8n16 \ + INNER_KERNEL_k1m8n8\ + "prefetcht0 128(%1,%%r12,2);"\ + "vmovddup (%1,%%r12,2),%%zmm2; vfmadd231pd %%zmm4,%%zmm2,%%zmm16; vfmadd231pd %%zmm5,%%zmm2,%%zmm18;"\ + "vfmadd231pd %%zmm6,%%zmm2,%%zmm20; vfmadd231pd %%zmm7,%%zmm2,%%zmm22;"\ + "vmovddup 8(%1,%%r12,2),%%zmm1; vfmadd231pd %%zmm4,%%zmm1,%%zmm17; vfmadd231pd %%zmm5,%%zmm1,%%zmm19;"\ + "vfmadd231pd %%zmm6,%%zmm1,%%zmm21; vfmadd231pd %%zmm7,%%zmm1,%%zmm23;" + +#define INNER_KERNEL_k1m8n24 \ + INNER_KERNEL_k1m8n16\ + "prefetcht0 128(%1,%%r12,4);"\ + "vmovddup (%1,%%r12,4),%%zmm2; vfmadd231pd %%zmm4,%%zmm2,%%zmm24; vfmadd231pd %%zmm5,%%zmm2,%%zmm26;"\ + "vfmadd231pd %%zmm6,%%zmm2,%%zmm28; vfmadd231pd %%zmm7,%%zmm2,%%zmm30;"\ + "vmovddup 8(%1,%%r12,4),%%zmm1; vfmadd231pd %%zmm4,%%zmm1,%%zmm25; vfmadd231pd %%zmm5,%%zmm1,%%zmm27;"\ + "vfmadd231pd %%zmm6,%%zmm1,%%zmm29; vfmadd231pd %%zmm7,%%zmm1,%%zmm31;" + +/* micro kernels */ +#define INNER_KERNELm1(nn) \ + "cmpq $1,%2;jb "#nn"3f;"\ + #nn"4:\n\t"\ + INNER_KERNEL_k1m1n##nn "addq $8,%0;"\ + "decq %2;cmpq $1,%2;jnb "#nn"4b;"\ + #nn"3:\n\t" + +#define INNER_KERNELm2(nn) \ + "cmpq $1,%2;jb "#nn"0f;"\ + #nn"1:\n\t"\ + INNER_KERNEL_k1m2n##nn "addq $16,%0;"\ + "decq %2;cmpq $1,%2;jnb "#nn"1b;"\ + #nn"0:\n\t" + +#define INNER_KERNELm4(nn) \ + "cmpq $1,%2;jb "#nn"00f;"\ + #nn"01:\n\t"\ + INNER_KERNEL_k1m4n##nn "addq $64,%1;"\ + "decq %2;cmpq $1,%2;jnb "#nn"01b;"\ + #nn"00:\n\t" + +/* %10 for prefetch of C elements before storage; %4 = ldc(in bytes),%11 for prefetch of next B block */ +#define INNER_KERNELm8(nn) \ + "movq %3,%10;cmpq $18,%2;jb "#nn"001f;"\ + #nn"008:\n\t"\ + INNER_KERNEL_k1m8n##nn "addq $64,%1;"\ + INNER_KERNEL_k1m8n##nn "addq $64,%1;"\ + INNER_KERNEL_k1m8n##nn "addq $64,%1;"\ + "prefetcht1 (%10); prefetcht1 63(%10); addq %4,%10;"\ + INNER_KERNEL_k1m8n##nn "addq $64,%1;"\ + INNER_KERNEL_k1m8n##nn "addq $64,%1;"\ + INNER_KERNEL_k1m8n##nn "addq $64,%1;"\ + "prefetcht1 (%11); addq $32,%11;"\ + "subq $6,%2;cmpq $18,%2;jnb "#nn"008b;"\ + "movq %3,%10;"\ + #nn"001:\n\t"\ + "cmpq $1,%2;jb "#nn"000f;"\ + "prefetcht0 (%10); prefetcht0 63(%10); prefetcht0 (%10,%4,1); prefetcht0 63(%10,%4,1); leaq (%10,%4,2),%10;"\ + INNER_KERNEL_k1m8n##nn "addq $64,%1;"\ + "decq %2;jmp "#nn"001b;"\ + ""#nn"000:\n\t" + +#define INNER_INIT_m1n8 \ + "vpxorq %%zmm8, %%zmm8, %%zmm8;" + +#define INNER_INIT_m2n8 \ + "vpxorq %%zmm8, %%zmm8, %%zmm8; vpxorq %%zmm9, %%zmm9, %%zmm9;" + +#define INNER_INIT_m4n8 \ + "vpxorq %%zmm8, %%zmm8, %%zmm8; vpxorq %%zmm9, %%zmm9, %%zmm9; vpxorq %%zmm10,%%zmm10,%%zmm10;vpxorq %%zmm11,%%zmm11,%%zmm11;" + +#define INNER_INIT_m8n8 \ + INNER_INIT_m4n8\ + "vpxorq %%zmm12,%%zmm12,%%zmm12;vpxorq %%zmm13,%%zmm13,%%zmm13;vpxorq %%zmm14,%%zmm14,%%zmm14;vpxorq %%zmm15,%%zmm15,%%zmm15;" + +#define INNER_INIT_m1n16 INNER_INIT_m2n8 + +#define INNER_INIT_m2n16 INNER_INIT_m4n8 + +#define INNER_INIT_m4n16 INNER_INIT_m8n8 + +#define INNER_INIT_m8n16 \ + INNER_INIT_m8n8\ + "vpxorq %%zmm16,%%zmm16,%%zmm16;vpxorq %%zmm17,%%zmm17,%%zmm17;vpxorq %%zmm18,%%zmm18,%%zmm18;vpxorq %%zmm19,%%zmm19,%%zmm19;"\ + "vpxorq %%zmm20,%%zmm20,%%zmm20;vpxorq %%zmm21,%%zmm21,%%zmm21;vpxorq %%zmm22,%%zmm22,%%zmm22;vpxorq %%zmm23,%%zmm23,%%zmm23;" + +#define INNER_INIT_m1n24 \ + "vpxorq %%zmm8, %%zmm8, %%zmm8; vpxorq %%zmm9, %%zmm9, %%zmm9; vpxorq %%zmm10,%%zmm10,%%zmm10;" + +#define INNER_INIT_m2n24 \ + INNER_INIT_m1n24\ + "vpxorq %%zmm11,%%zmm11,%%zmm11; vpxorq %%zmm12,%%zmm12,%%zmm12; vpxorq %%zmm13,%%zmm13,%%zmm13;" + +#define INNER_INIT_m4n24 \ + INNER_INIT_m4n16\ + "vpxorq %%zmm16,%%zmm16,%%zmm16;vpxorq %%zmm17,%%zmm17,%%zmm17;vpxorq %%zmm18,%%zmm18,%%zmm18;vpxorq %%zmm19,%%zmm19,%%zmm19;" + +#define INNER_INIT_m8n24 \ + INNER_INIT_m8n16\ + "vpxorq %%zmm24,%%zmm24,%%zmm24;vpxorq %%zmm25,%%zmm25,%%zmm25;vpxorq %%zmm26,%%zmm26,%%zmm26;vpxorq %%zmm27,%%zmm27,%%zmm27;"\ + "vpxorq %%zmm28,%%zmm28,%%zmm28;vpxorq %%zmm29,%%zmm29,%%zmm29;vpxorq %%zmm30,%%zmm30,%%zmm30;vpxorq %%zmm31,%%zmm31,%%zmm31;" + +#define INNER_SETINDEX \ + "vpinsrq $0,%4,%%xmm4,%%xmm4; vbroadcastsd %%xmm4,%%zmm4;"\ + "kxnorw %%k1,%%k1,%%k1; kshiftlw $1,%%k1,%%k1; vpxorq %%zmm6,%%zmm6,%%zmm6; vmovapd %%zmm4,%%zmm6%{%%k1%};"\ + "kshiftlw $1,%%k1,%%k1; vpaddq %%zmm4,%%zmm6,%%zmm6%{%%k1%};"\ + "kshiftlw $1,%%k1,%%k1; vpaddq %%zmm4,%%zmm6,%%zmm6%{%%k1%};"\ + "kshiftlw $1,%%k1,%%k1; vpaddq %%zmm4,%%zmm6,%%zmm6%{%%k1%};"\ + "kshiftlw $1,%%k1,%%k1; vpaddq %%zmm4,%%zmm6,%%zmm6%{%%k1%};"\ + "kshiftlw $1,%%k1,%%k1; vpaddq %%zmm4,%%zmm6,%%zmm6%{%%k1%};"\ + "kshiftlw $1,%%k1,%%k1; vpaddq %%zmm4,%%zmm6,%%zmm6%{%%k1%};" + +#define INNER_STORE_m1n8(c1,disp) \ + "kxnorw %%k1,%%k1,%%k1;"\ + "vgatherqpd "#disp"(%10,%%zmm6,1), %%zmm7 %{%%k1%};"\ + "vfmadd132pd %%zmm3,%%zmm7,"#c1";"\ + "kxnorw %%k1,%%k1,%%k1;"\ + "vscatterqpd "#c1", "#disp"(%10,%%zmm6,1) %{%%k1%};" + +#define INNER_SAVE_m1n8 \ + "movq %3,%10;"\ + INNER_SETINDEX\ + INNER_STORE_m1n8(%%zmm8,0) + +#define INNER_SAVE_m1n16 \ + INNER_SAVE_m1n8\ + "leaq (%10,%4,8),%10;"\ + INNER_STORE_m1n8(%%zmm9,0) + +#define INNER_SAVE_m1n24 \ + INNER_SAVE_m1n16\ + "leaq (%10,%4,8),%10;"\ + INNER_STORE_m1n8(%%zmm10,0) + +#define INNER_SAVE_m2n8 \ + "movq %3,%10;"\ + INNER_SETINDEX\ + INNER_STORE_m1n8(%%zmm8,0)\ + INNER_STORE_m1n8(%%zmm9,8) + +#define INNER_SAVE_m2n16 \ + "movq %3,%10;"\ + INNER_SETINDEX\ + INNER_STORE_m1n8(%%zmm8,0)\ + INNER_STORE_m1n8(%%zmm10,8)\ + "leaq (%10,%4,8),%10;"\ + INNER_STORE_m1n8(%%zmm9,0)\ + INNER_STORE_m1n8(%%zmm11,8) + +#define INNER_SAVE_m2n24 \ + "movq %3,%10;"\ + INNER_SETINDEX\ + INNER_STORE_m1n8(%%zmm8,0)\ + INNER_STORE_m1n8(%%zmm11,8)\ + "leaq (%10,%4,8),%10;"\ + INNER_STORE_m1n8(%%zmm9,0)\ + INNER_STORE_m1n8(%%zmm12,8)\ + "leaq (%10,%4,8),%10;"\ + INNER_STORE_m1n8(%%zmm10,0)\ + INNER_STORE_m1n8(%%zmm13,8) + +#define INNER_TRANS_4x8(c1,c2,c3,c4) \ + "vblendmpd "#c3","#c1",%%zmm4%{%6%}; vblendmpd "#c4","#c2",%%zmm6%{%6%};"\ + "vshuff64x2 $177,%%zmm4,%%zmm4,%%zmm4; vshuff64x2 $177,%%zmm6,%%zmm6,%%zmm6;"\ + "vblendmpd "#c1",%%zmm4,"#c1"%{%6%}; vblendmpd "#c2",%%zmm6,"#c2"%{%6%};"\ + "vblendmpd %%zmm4,"#c3","#c3"%{%6%}; vblendmpd %%zmm6,"#c4","#c4"%{%6%};"\ + +#define INNER_TRANS_f128_4x4(c1,c2,c3,c4) \ + "vshuff64x2 $68,"#c3","#c1",%%zmm4; vshuff64x2 $17,"#c4","#c2",%%zmm5;"\ + "vshuff64x2 $238,"#c3","#c1",%%zmm6; vshuff64x2 $187,"#c4","#c2",%%zmm7;"\ + "vblendmpd %%zmm5,%%zmm4,"#c2"%{%6%}; vshuff64x2 $177,"#c2","#c2","#c2"; vblendmpd %%zmm4,%%zmm5,"#c1"%{%6%};"\ + "vblendmpd %%zmm7,%%zmm6,"#c4"%{%6%}; vshuff64x2 $177,"#c4","#c4","#c4"; vblendmpd %%zmm6,%%zmm7,"#c3"%{%6%};" + +#define INNER_TRANS_8x8(c1,c2,c3,c4,c5,c6,c7,c8) \ + INNER_TRANS_f128_4x4(c1,c3,c5,c7) INNER_TRANS_f128_4x4(c2,c4,c6,c8) + +//%7 for k01(input) only when m=4 +#define INNER_STORE_4x8(c1,c2,c3,c4) \ + "vmovupd (%10),%%zmm4%{%5%};vmovupd -32(%10,%4,4),%%zmm4%{%7%};vfmadd132pd %%zmm3,%%zmm4,"#c1";"\ + "vmovupd "#c1",(%10)%{%5%}; vmovupd "#c1",-32(%10,%4,4)%{%7%}; leaq (%10,%4,1),%10;"\ + "vmovupd (%10),%%zmm5%{%5%};vmovupd -32(%10,%4,4),%%zmm5%{%7%};vfmadd132pd %%zmm3,%%zmm5,"#c2";"\ + "vmovupd "#c2",(%10)%{%5%}; vmovupd "#c2",-32(%10,%4,4)%{%7%}; leaq (%10,%4,1),%10;"\ + "vmovupd (%10),%%zmm6%{%5%};vmovupd -32(%10,%4,4),%%zmm6%{%7%};vfmadd132pd %%zmm3,%%zmm6,"#c3";"\ + "vmovupd "#c3",(%10)%{%5%}; vmovupd "#c3",-32(%10,%4,4)%{%7%}; leaq (%10,%4,1),%10;"\ + "vmovupd (%10),%%zmm7%{%5%};vmovupd -32(%10,%4,4),%%zmm7%{%7%};vfmadd132pd %%zmm3,%%zmm7,"#c4";"\ + "vmovupd "#c4",(%10)%{%5%}; vmovupd "#c4",-32(%10,%4,4)%{%7%}; leaq (%10,%4,1),%10;"\ + "leaq (%10,%4,4),%10;" + +#define INNER_STORE_8x8(c1,c2,c3,c4,c5,c6,c7,c8) \ + "vfmadd213pd (%10),%%zmm3,"#c1"; vmovupd "#c1",(%10); vfmadd213pd (%10,%4,1),%%zmm3,"#c2"; vmovupd "#c2",(%10,%4,1); leaq (%10,%4,2),%10;"\ + "vfmadd213pd (%10),%%zmm3,"#c3"; vmovupd "#c3",(%10); vfmadd213pd (%10,%4,1),%%zmm3,"#c4"; vmovupd "#c4",(%10,%4,1); leaq (%10,%4,2),%10;"\ + "vfmadd213pd (%10),%%zmm3,"#c5"; vmovupd "#c5",(%10); vfmadd213pd (%10,%4,1),%%zmm3,"#c6"; vmovupd "#c6",(%10,%4,1); leaq (%10,%4,2),%10;"\ + "vfmadd213pd (%10),%%zmm3,"#c7"; vmovupd "#c7",(%10); vfmadd213pd (%10,%4,1),%%zmm3,"#c8"; vmovupd "#c8",(%10,%4,1); leaq (%10,%4,2),%10;" + +#define INNER_SAVE_m4n8 \ + "movq %3,%10;"\ + INNER_TRANS_4x8(%%zmm8,%%zmm9,%%zmm10,%%zmm11)\ + INNER_STORE_4x8(%%zmm8,%%zmm9,%%zmm10,%%zmm11) + +#define INNER_SAVE_m4n16 \ + INNER_SAVE_m4n8\ + INNER_TRANS_4x8(%%zmm12,%%zmm13,%%zmm14,%%zmm15)\ + INNER_STORE_4x8(%%zmm12,%%zmm13,%%zmm14,%%zmm15) + +#define INNER_SAVE_m4n24 \ + INNER_SAVE_m4n16\ + INNER_TRANS_4x8(%%zmm16,%%zmm17,%%zmm18,%%zmm19)\ + INNER_STORE_4x8(%%zmm16,%%zmm17,%%zmm18,%%zmm19) + +#define INNER_SAVE_m8n8 \ + "movq %3,%10;"\ + INNER_TRANS_8x8(%%zmm8,%%zmm9,%%zmm10,%%zmm11,%%zmm12,%%zmm13,%%zmm14,%%zmm15)\ + INNER_STORE_8x8(%%zmm8,%%zmm9,%%zmm10,%%zmm11,%%zmm12,%%zmm13,%%zmm14,%%zmm15) + +#define INNER_SAVE_m8n16 \ + INNER_SAVE_m8n8\ + INNER_TRANS_8x8(%%zmm16,%%zmm17,%%zmm18,%%zmm19,%%zmm20,%%zmm21,%%zmm22,%%zmm23)\ + INNER_STORE_8x8(%%zmm16,%%zmm17,%%zmm18,%%zmm19,%%zmm20,%%zmm21,%%zmm22,%%zmm23) + +#define INNER_SAVE_m8n24 \ + INNER_SAVE_m8n16\ + INNER_TRANS_8x8(%%zmm24,%%zmm25,%%zmm26,%%zmm27,%%zmm28,%%zmm29,%%zmm30,%%zmm31)\ + INNER_STORE_8x8(%%zmm24,%%zmm25,%%zmm26,%%zmm27,%%zmm28,%%zmm29,%%zmm30,%%zmm31) + +#define COMPUTE_n8 {\ + b_pref = packed_b_pointer + 8 * K;\ + __asm__ __volatile__(\ + "vbroadcastsd (%9),%%zmm3;"\ + "movq %8,%%r14;movq %2,%%r13;movq %2,%%r12;shlq $5,%%r12;"\ + "cmpq $8,%8; jb 42222f;"\ + "42221:\n\t"\ + INNER_INIT_m8n8\ + INNER_KERNELm8(8)\ + INNER_SAVE_m8n8\ + "movq %%r13,%2; subq %%r12,%1; subq %%r12,%1; addq %%r12,%0;"\ + "addq $64,%3;"\ + "subq $8,%8; cmpq $8,%8; jnb 42221b;"\ + "42222:\n\t"\ + "cmpq $4,%8; jb 42223f;"\ + INNER_INIT_m4n8\ + INNER_KERNELm4(8)\ + INNER_SAVE_m4n8\ + "movq %%r13,%2; subq %%r12,%1; subq %%r12,%1;"\ + "addq $32,%3;"\ + "subq $4,%8;"\ + "42223:\n\t"\ + "cmpq $2,%8; jb 42224f;"\ + INNER_INIT_m2n8\ + INNER_KERNELm2(8)\ + INNER_SAVE_m2n8\ + "movq %%r13,%2; subq %%r12,%1; subq %%r12,%1;"\ + "addq $16,%3;"\ + "subq $2,%8;"\ + "42224:\n\t"\ + "cmpq $1,%8; jb 42225f;"\ + INNER_INIT_m1n8\ + INNER_KERNELm1(8)\ + INNER_SAVE_m1n8\ + "movq %%r13,%2; subq %%r12,%1; subq %%r12,%1;"\ + "addq $8,%3;"\ + "42225:\n\t"\ + "movq %%r14,%8;shlq $3,%8;subq %8,%3;shrq $3,%8;"\ + "shlq $3,%4;addq %4,%3;shrq $3,%4;"\ + :"+r"(a_block_pointer),"+r"(packed_b_pointer),"+r"(K),"+r"(c_pointer),"+r"(ldc_in_bytes),"+Yk"(k02),"+Yk"(k03),"+Yk"(k01),\ + "+r"(M),"+r"(alpha),"+r"(c_store),"+r"(b_pref)\ + ::"zmm0","zmm1","zmm2","zmm3","zmm4","zmm5","zmm6","zmm7","zmm8","zmm9","zmm10","zmm11","zmm12","zmm13","zmm14","zmm15","cc","memory","k1","r12","r13","r14");\ + a_block_pointer -= M * K;\ +} +#define COMPUTE_n16 {\ + b_pref = packed_b_pointer + 16 * K;\ + __asm__ __volatile__(\ + "vbroadcastsd (%9),%%zmm3;"\ + "movq %8,%%r14;movq %2,%%r13;movq %2,%%r12;shlq $5,%%r12;"\ + "cmpq $8,%8; jb 32222f;"\ + "32221:\n\t"\ + INNER_INIT_m8n16\ + INNER_KERNELm8(16)\ + INNER_SAVE_m8n16\ + "movq %%r13,%2; subq %%r12,%1; subq %%r12,%1; addq %%r12,%0;"\ + "addq $64,%3;"\ + "subq $8,%8; cmpq $8,%8; jnb 32221b;"\ + "32222:\n\t"\ + "cmpq $4,%8; jb 32223f;"\ + INNER_INIT_m4n16\ + INNER_KERNELm4(16)\ + INNER_SAVE_m4n16\ + "movq %%r13,%2; subq %%r12,%1; subq %%r12,%1;"\ + "addq $32,%3;"\ + "subq $4,%8;"\ + "32223:\n\t"\ + "cmpq $2,%8; jb 32224f;"\ + INNER_INIT_m2n16\ + INNER_KERNELm2(16)\ + INNER_SAVE_m2n16\ + "movq %%r13,%2; subq %%r12,%1; subq %%r12,%1;"\ + "addq $16,%3;"\ + "subq $2,%8;"\ + "32224:\n\t"\ + "cmpq $1,%8; jb 32225f;"\ + INNER_INIT_m1n16\ + INNER_KERNELm1(16)\ + INNER_SAVE_m1n16\ + "movq %%r13,%2; subq %%r12,%1; subq %%r12,%1;"\ + "addq $8,%3;"\ + "32225:\n\t"\ + "movq %%r14,%8;shlq $3,%8;subq %8,%3;shrq $3,%8;"\ + "shlq $4,%4;addq %4,%3;shrq $4,%4;"\ + "leaq (%1,%%r12,4),%1;"\ + :"+r"(a_block_pointer),"+r"(packed_b_pointer),"+r"(K),"+r"(c_pointer),"+r"(ldc_in_bytes),"+Yk"(k02),"+Yk"(k03),"+Yk"(k01),\ + "+r"(M),"+r"(alpha),"+r"(c_store),"+r"(b_pref)\ + ::"zmm0","zmm1","zmm2","zmm3","zmm4","zmm5","zmm6","zmm7","zmm8","zmm9","zmm10","zmm11","zmm12","zmm13","zmm14","zmm15","zmm16","zmm17",\ + "zmm18","zmm19","zmm20","zmm21","zmm22","zmm23","cc","memory","k1","r12","r13","r14");\ + a_block_pointer -= M * K;\ +} +#define COMPUTE_n24 {\ + b_pref = packed_b_pointer + 24 * K;\ + __asm__ __volatile__(\ + "vbroadcastsd (%9),%%zmm3;"\ + "movq %8,%%r14;movq %2,%%r13;movq %2,%%r12;shlq $5,%%r12;"\ + "cmpq $8,%8; jb 22222f;"\ + "22221:\n\t"\ + INNER_INIT_m8n24\ + INNER_KERNELm8(24)\ + INNER_SAVE_m8n24\ + "movq %%r13,%2; subq %%r12,%1; subq %%r12,%1; addq %%r12,%0;"\ + "addq $64,%3;"\ + "subq $8,%8; cmpq $8,%8; jnb 22221b;"\ + "22222:\n\t"\ + "cmpq $4,%8; jb 22223f;"\ + INNER_INIT_m4n24\ + INNER_KERNELm4(24)\ + INNER_SAVE_m4n24\ + "movq %%r13,%2; subq %%r12,%1; subq %%r12,%1;"\ + "addq $32,%3;"\ + "subq $4,%8;"\ + "22223:\n\t"\ + "cmpq $2,%8; jb 22224f;"\ + INNER_INIT_m2n24\ + INNER_KERNELm2(24)\ + INNER_SAVE_m2n24\ + "movq %%r13,%2; subq %%r12,%1; subq %%r12,%1;"\ + "addq $16,%3;"\ + "subq $2,%8;"\ + "22224:\n\t"\ + "cmpq $1,%8; jb 22225f;"\ + INNER_INIT_m1n24\ + INNER_KERNELm1(24)\ + INNER_SAVE_m1n24\ + "movq %%r13,%2; subq %%r12,%1; subq %%r12,%1;"\ + "addq $8,%3;"\ + "22225:\n\t"\ + "movq %%r14,%8;shlq $3,%8;subq %8,%3;shrq $3,%8;"\ + "shlq $3,%4;addq %4,%3;shlq $1,%4;addq %4,%3;shrq $4,%4;"\ + "leaq (%1,%%r12,4),%1; leaq (%1,%%r12,2),%1;"\ + :"+r"(a_block_pointer),"+r"(packed_b_pointer),"+r"(K),"+r"(c_pointer),"+r"(ldc_in_bytes),"+Yk"(k02),"+Yk"(k03),"+Yk"(k01),\ + "+r"(M),"+r"(alpha),"+r"(c_store),"+r"(b_pref)::\ + "zmm0","zmm1","zmm2","zmm3","zmm4","zmm5","zmm6","zmm7","zmm8","zmm9","zmm10","zmm11","zmm12","zmm13","zmm14","zmm15","zmm16","zmm17","zmm18",\ + "zmm19","zmm20","zmm21","zmm22","zmm23","zmm24","zmm25","zmm26","zmm27","zmm28","zmm29","zmm30","zmm31","cc","memory","k1","r12","r13","r14");\ + a_block_pointer -= M * K;\ +} +static void KERNEL_MAIN(double *packed_a, double *packed_b, BLASLONG m, BLASLONG ndiv8, BLASLONG k, BLASLONG LDC, double *c,double *alpha){//icopy=4,ocopy=8 +//perform C += A B + if(k==0 || m==0 || ndiv8==0) return; + int64_t ldc_in_bytes = (int64_t)LDC * sizeof(double); + int64_t K = (int64_t)k; int64_t M = (int64_t)m; + double *a_block_pointer,*b_pref; + double *c_pointer = c,*c_store = c; + __mmask16 k01 = 0x00f0,k02 = 0x000f,k03 = 0x0033; + BLASLONG ndiv8_count; + double *packed_b_pointer = packed_b; + a_block_pointer = packed_a; + for(ndiv8_count=ndiv8;ndiv8_count>2;ndiv8_count-=3){ + COMPUTE_n24 + } + for(;ndiv8_count>1;ndiv8_count-=2){ + COMPUTE_n16 + } + if(ndiv8_count>0){ + COMPUTE_n8 + } +} + +/* __m256d accumulators: yc1-yc4; temporary variables: ya1,yb1-yb2 */ +/* __m128d accumulators: xc1-xc2; temporary variables: xa1,xb1-xb2 */ +/* double accumulator: sc1; temporary variables: sa1,sb1 */ +/* column-major c_block */ +#define KERNEL_m4n4k1 {\ + ya1 = _mm256_loadu_pd(a_block_pointer);a_block_pointer+=4;\ + yb1 = _mm256_broadcast_sd(b_block_pointer); yc1 = _mm256_fmadd_pd(ya1,yb1,yc1);\ + yb2 = _mm256_broadcast_sd(b_block_pointer+1); yc2 = _mm256_fmadd_pd(ya1,yb2,yc2);\ + yb1 = _mm256_broadcast_sd(b_block_pointer+2); yc3 = _mm256_fmadd_pd(ya1,yb1,yc3);\ + yb2 = _mm256_broadcast_sd(b_block_pointer+3); yc4 = _mm256_fmadd_pd(ya1,yb2,yc4);\ + b_block_pointer+=4;\ +} +#define KERNEL_m4n2k1 {\ + ya1 = _mm256_loadu_pd(a_block_pointer);a_block_pointer+=4;\ + yb1 = _mm256_broadcast_sd(b_block_pointer); yc1 = _mm256_fmadd_pd(ya1,yb1,yc1);\ + yb2 = _mm256_broadcast_sd(b_block_pointer+1); yc2 = _mm256_fmadd_pd(ya1,yb2,yc2);\ + b_block_pointer+=2;\ +} +#define KERNEL_m4n1k1 {\ + ya1 = _mm256_loadu_pd(a_block_pointer);a_block_pointer+=4;\ + yb1 = _mm256_broadcast_sd(b_block_pointer); yc1 = _mm256_fmadd_pd(ya1,yb1,yc1);\ + b_block_pointer++;\ +} +#define INIT_m4n1 yc1=_mm256_setzero_pd(); +#define INIT_m4n2 yc2=INIT_m4n1 +#define INIT_m4n4 yc4=yc3=INIT_m4n2 +#define SAVE_m4n1 {\ + yb1 = _mm256_broadcast_sd(alpha);\ + ya1 = _mm256_loadu_pd(c_pointer);\ + yc1 = _mm256_fmadd_pd(yc1,yb1,ya1);\ + _mm256_storeu_pd(c_pointer,yc1);\ + c_pointer += 4;\ +} +#define SAVE_m4n2 {\ + ya1 = _mm256_broadcast_sd(alpha);\ + yb1 = _mm256_loadu_pd(c_pointer); yb2 = _mm256_loadu_pd(c_pointer+LDC);\ + yc1 = _mm256_fmadd_pd(yc1,ya1,yb1); yc2 = _mm256_fmadd_pd(yc2,ya1,yb2);\ + _mm256_storeu_pd(c_pointer,yc1); _mm256_storeu_pd(c_pointer+LDC,yc2);\ + c_pointer += 4;\ +} +#define SAVE_m4n4 {\ + ya1 = _mm256_broadcast_sd(alpha);\ + yb1 = _mm256_loadu_pd(c_pointer); yb2 = _mm256_loadu_pd(c_pointer+LDC);\ + yc1 = _mm256_fmadd_pd(yc1,ya1,yb1); yc2 = _mm256_fmadd_pd(yc2,ya1,yb2);\ + _mm256_storeu_pd(c_pointer,yc1); _mm256_storeu_pd(c_pointer+LDC,yc2);\ + c_pointer += LDC*2;\ + yb1 = _mm256_loadu_pd(c_pointer); yb2 = _mm256_loadu_pd(c_pointer+LDC);\ + yc3 = _mm256_fmadd_pd(yc3,ya1,yb1); yc4 = _mm256_fmadd_pd(yc4,ya1,yb2);\ + _mm256_storeu_pd(c_pointer,yc3); _mm256_storeu_pd(c_pointer+LDC,yc4);\ + c_pointer += 4-LDC*2;\ +} +#define KERNEL_m2n2k1 {\ + xa1 = _mm_loadu_pd(a_block_pointer); a_block_pointer+=2;\ + xb1 = _mm_loaddup_pd(b_block_pointer); xc1 = _mm_fmadd_pd(xa1,xb1,xc1);\ + xb2 = _mm_loaddup_pd(b_block_pointer+1); xc2 = _mm_fmadd_pd(xa1,xb2,xc2);\ + b_block_pointer += 2;\ +} +#define KERNEL_m2n1k1 {\ + xa1 = _mm_loadu_pd(a_block_pointer); a_block_pointer+=2;\ + xb1 = _mm_loaddup_pd(b_block_pointer); xc1 = _mm_fmadd_pd(xa1,xb1,xc1);\ + b_block_pointer ++;\ +} +#define INIT_m2n1 xc1=_mm_setzero_pd(); +#define INIT_m2n2 xc2=INIT_m2n1 +#define SAVE_m2n1 {\ + xb1 = _mm_loaddup_pd(alpha);\ + xa1 = _mm_loadu_pd(c_pointer);\ + xc1 = _mm_fmadd_pd(xc1,xb1,xa1);\ + _mm_storeu_pd(c_pointer,xc1);\ + c_pointer += 2;\ +} +#define SAVE_m2n2 {\ + xa1 = _mm_loaddup_pd(alpha);\ + xb1 = _mm_loadu_pd(c_pointer); xb2 = _mm_loadu_pd(c_pointer+LDC);\ + xc1 = _mm_fmadd_pd(xc1,xa1,xb1); xc2 = _mm_fmadd_pd(xc2,xa1,xb2);\ + _mm_storeu_pd(c_pointer,xc1); _mm_storeu_pd(c_pointer+LDC,xc2);\ + c_pointer += 2;\ +} +#define KERNEL_m1n1k1 {\ + sa1 = *a_block_pointer; a_block_pointer++;\ + sb1 = *b_block_pointer; sc1 += sa1 * sb1;\ + b_block_pointer ++;\ +} +#define INIT_m1n1 sc1=0.0; +#define SAVE_m1n1 {\ + *c_pointer += sc1 * (*alpha);\ + c_pointer++;\ +} +/* row-major c_block */ +#define KERNEL_m2n4k1 {\ + yb1 = _mm256_loadu_pd(b_block_pointer);b_block_pointer+=4;\ + ya1 = _mm256_broadcast_sd(a_block_pointer); yc1 = _mm256_fmadd_pd(ya1,yb1,yc1);\ + ya1 = _mm256_broadcast_sd(a_block_pointer+1);yc2 = _mm256_fmadd_pd(ya1,yb1,yc2);\ + a_block_pointer += 2;\ +} +#define KERNEL_m1n4k1 {\ + yb1 = _mm256_loadu_pd(b_block_pointer);b_block_pointer+=4;\ + ya1 = _mm256_broadcast_sd(a_block_pointer); yc1 = _mm256_fmadd_pd(ya1,yb1,yc1);\ + a_block_pointer ++;\ +} +#define KERNEL_m1n2k1 {\ + xb1 = _mm_loadu_pd(b_block_pointer);b_block_pointer+=2;\ + xa1 = _mm_loaddup_pd(a_block_pointer); xc1 = _mm_fmadd_pd(xa1,xb1,xc1);\ + a_block_pointer ++;\ +} +#define INIT_m1n2 INIT_m2n1 +#define INIT_m1n4 INIT_m4n1 +#define INIT_m2n4 INIT_m4n2 +#define SAVE_m2n4 {\ + ya1 = _mm256_broadcast_sd(alpha);\ + yc1 = _mm256_mul_pd(yc1,ya1);\ + yc2 = _mm256_mul_pd(yc2,ya1);\ + yb1 = _mm256_unpacklo_pd(yc1,yc2);\ + yb2 = _mm256_unpackhi_pd(yc1,yc2);\ + xb1 = _mm_add_pd(_mm_loadu_pd(c_pointer),_mm256_extractf128_pd(yb1,0));\ + xb2 = _mm_add_pd(_mm_loadu_pd(c_pointer+LDC),_mm256_extractf128_pd(yb2,0));\ + _mm_storeu_pd(c_pointer,xb1);\ + _mm_storeu_pd(c_pointer+LDC,xb2);\ + xb1 = _mm_add_pd(_mm_loadu_pd(c_pointer+2*LDC),_mm256_extractf128_pd(yb1,1));\ + xb2 = _mm_add_pd(_mm_loadu_pd(c_pointer+3*LDC),_mm256_extractf128_pd(yb2,1));\ + _mm_storeu_pd(c_pointer+2*LDC,xb1);\ + _mm_storeu_pd(c_pointer+3*LDC,xb2);\ + c_pointer += 2;\ +} +#define SAVE_m1n2 {\ + xb1 = _mm_loaddup_pd(alpha);\ + xc1 = _mm_mul_pd(xc1,xb1);\ + *c_pointer += _mm_cvtsd_f64(xc1);\ + xa1 = _mm_unpackhi_pd(xc1,xc1);\ + c_pointer[LDC]+= _mm_cvtsd_f64(xa1);\ + c_pointer ++;\ +} +#define SAVE_m1n4 {\ + ya1 = _mm256_broadcast_sd(alpha);\ + yc1 = _mm256_mul_pd(yc1,ya1);\ + xb1 = _mm256_extractf128_pd(yc1,0);\ + *c_pointer += _mm_cvtsd_f64(xb1);\ + xb2 = _mm_unpackhi_pd(xb1,xb1);\ + c_pointer[LDC] += _mm_cvtsd_f64(xb2);\ + xb1 = _mm256_extractf128_pd(yc1,1);\ + c_pointer[LDC*2] += _mm_cvtsd_f64(xb1);\ + xb2 = _mm_unpackhi_pd(xb1,xb1);\ + c_pointer[LDC*3] += _mm_cvtsd_f64(xb2);\ + c_pointer ++;\ +} +static void KERNEL_EDGE(double *packed_a, double *packed_b, BLASLONG m, BLASLONG edge_n, BLASLONG k, BLASLONG LDC, double *c,double *alpha){//icopy=8,ocopy=8 +//perform C += A B , edge_n<8 must be satisfied. + if(k==0 || m==0 || edge_n==0 || (*alpha)==0.0) return; + double *a_block_pointer,*b_block_pointer,*b_base_pointer; + double *c_pointer = c; + __m256d yc1,yc2,yc3,yc4,ya1,yb1,yb2; + __m128d xc1,xc2,xa1,xb1,xb2; + double sc1,sa1,sb1; + BLASLONG m_count,n_count,k_count; + b_base_pointer = packed_b; +//now start calculation of the edge part + for(n_count=edge_n;n_count>3;n_count-=4){ + a_block_pointer = packed_a; + for(m_count=m;m_count>3;m_count-=4){ + b_block_pointer = b_base_pointer; + INIT_m4n4 + for(k_count=0;k_count1;m_count-=2){ + b_block_pointer = b_base_pointer; + INIT_m2n4 + for(k_count=0;k_count0){ + b_block_pointer = b_base_pointer; + INIT_m1n4 + for(k_count=0;k_count1;n_count-=2){ + a_block_pointer = packed_a; + for(m_count=m;m_count>3;m_count-=4){ + b_block_pointer = b_base_pointer; + INIT_m4n2 + for(k_count=0;k_count1;m_count-=2){ + b_block_pointer = b_base_pointer; + INIT_m2n2 + for(k_count=0;k_count0){ + b_block_pointer = b_base_pointer; + INIT_m1n2 + for(k_count=0;k_count0){ + a_block_pointer = packed_a; + for(m_count=m;m_count>3;m_count-=4){ + b_block_pointer = b_base_pointer; + INIT_m4n1 + for(k_count=0;k_count1;m_count-=2){ + b_block_pointer = b_base_pointer; + INIT_m2n1 + for(k_count=0;k_count0){ + b_block_pointer = b_base_pointer; + INIT_m1n1 + for(k_count=0;k_count0) KERNEL_MAIN(packed_a,B,m,ndiv8,k,ldc,C,&ALPHA); + if(n>ndiv8*8) KERNEL_EDGE(packed_a,B+(int64_t)k*(int64_t)ndiv8*8,m,n-ndiv8*8,k,ldc,C+(int64_t)ldc*(int64_t)ndiv8*8,&ALPHA); + return 0; +} diff --git a/kernel/x86_64/dgemm_kernel_8x8_skylakex.c b/kernel/x86_64/dgemm_kernel_8x8_skylakex.c new file mode 100644 index 000000000..1139090e2 --- /dev/null +++ b/kernel/x86_64/dgemm_kernel_8x8_skylakex.c @@ -0,0 +1,782 @@ +#include "common.h" +#include +#include + +#define ICOPY_4 +//register usage: zmm3 for alpha, zmm4-zmm7 for temporary use, zmm8-zmm31 for accumulators. +/* row-major c_block */ +#define INNER_KERNEL_k1m1n8 \ + "prefetcht0 384(%1);"\ + "prefetcht0 768(%0); vmovupd (%1),%%zmm5; addq $64,%1;"\ + "vbroadcastsd (%0),%%zmm4;vfmadd231pd %%zmm5,%%zmm4,%%zmm8;" + +#define INNER_KERNEL_k1m2n8 \ + INNER_KERNEL_k1m1n8\ + "vbroadcastsd 8(%0),%%zmm4;vfmadd231pd %%zmm5,%%zmm4,%%zmm9;" + +#define INNER_KERNEL_k1m4n8 \ + INNER_KERNEL_k1m2n8\ + "vbroadcastsd 16(%0),%%zmm4;vfmadd231pd %%zmm5,%%zmm4,%%zmm10;"\ + "vbroadcastsd 24(%0),%%zmm4;vfmadd231pd %%zmm5,%%zmm4,%%zmm11;" + +#define INNER_KERNEL_k1m8n8 \ + INNER_KERNEL_k1m4n8\ + "vbroadcastsd 32(%0),%%zmm4;vfmadd231pd %%zmm5,%%zmm4,%%zmm12;"\ + "vbroadcastsd 40(%0),%%zmm4;vfmadd231pd %%zmm5,%%zmm4,%%zmm13;"\ + "vbroadcastsd 48(%0),%%zmm4;vfmadd231pd %%zmm5,%%zmm4,%%zmm14;"\ + "vbroadcastsd 56(%0),%%zmm4;vfmadd231pd %%zmm5,%%zmm4,%%zmm15;" + +#define INNER_KERNEL_k1m1n16 \ + "prefetcht0 128(%1); prefetcht0 128(%1,%%r12,1);"\ + "prefetcht0 768(%0); vmovupd (%1),%%zmm5; vmovupd (%1,%%r12,1),%%zmm6; addq $64,%1;"\ + "vbroadcastsd (%0),%%zmm4;vfmadd231pd %%zmm5,%%zmm4,%%zmm8; vfmadd231pd %%zmm6,%%zmm4,%%zmm9;" + +#define INNER_KERNEL_k1m2n16 \ + INNER_KERNEL_k1m1n16\ + "vbroadcastsd 8(%0),%%zmm4;vfmadd231pd %%zmm5,%%zmm4,%%zmm10;vfmadd231pd %%zmm6,%%zmm4,%%zmm11;" + +#define INNER_KERNEL_k1m4n16 \ + INNER_KERNEL_k1m2n16\ + "vbroadcastsd 16(%0),%%zmm4;vfmadd231pd %%zmm5,%%zmm4,%%zmm12;vfmadd231pd %%zmm6,%%zmm4,%%zmm13;"\ + "vbroadcastsd 24(%0),%%zmm4;vfmadd231pd %%zmm5,%%zmm4,%%zmm14;vfmadd231pd %%zmm6,%%zmm4,%%zmm15;" + +#define INNER_KERNEL_k1m8n16 \ + INNER_KERNEL_k1m4n16\ + "vbroadcastsd 32(%0),%%zmm4;vfmadd231pd %%zmm5,%%zmm4,%%zmm16;vfmadd231pd %%zmm6,%%zmm4,%%zmm17;"\ + "vbroadcastsd 40(%0),%%zmm4;vfmadd231pd %%zmm5,%%zmm4,%%zmm18;vfmadd231pd %%zmm6,%%zmm4,%%zmm19;"\ + "vbroadcastsd 48(%0),%%zmm4;vfmadd231pd %%zmm5,%%zmm4,%%zmm20;vfmadd231pd %%zmm6,%%zmm4,%%zmm21;"\ + "vbroadcastsd 56(%0),%%zmm4;vfmadd231pd %%zmm5,%%zmm4,%%zmm22;vfmadd231pd %%zmm6,%%zmm4,%%zmm23;" + +#define INNER_KERNEL_k1m1n24 \ + "prefetcht0 128(%1); prefetcht0 128(%1,%%r12,1); prefetcht0 128(%1,%%r12,2);"\ + "prefetcht0 768(%0); vmovupd (%1),%%zmm5; vmovupd (%1,%%r12,1),%%zmm6; vmovupd (%1,%%r12,2),%%zmm7; addq $64,%1;"\ + "vbroadcastsd (%0),%%zmm4;vfmadd231pd %%zmm5,%%zmm4,%%zmm8; vfmadd231pd %%zmm6,%%zmm4,%%zmm9; vfmadd231pd %%zmm7,%%zmm4,%%zmm10;" + +#define INNER_KERNEL_k1m2n24 \ + INNER_KERNEL_k1m1n24\ + "vbroadcastsd 8(%0),%%zmm4;vfmadd231pd %%zmm5,%%zmm4,%%zmm11;vfmadd231pd %%zmm6,%%zmm4,%%zmm12;vfmadd231pd %%zmm7,%%zmm4,%%zmm13;" + +#define INNER_KERNEL_k1m4n24 \ + INNER_KERNEL_k1m2n24\ + "vbroadcastsd 16(%0),%%zmm4;vfmadd231pd %%zmm5,%%zmm4,%%zmm14;vfmadd231pd %%zmm6,%%zmm4,%%zmm15;vfmadd231pd %%zmm7,%%zmm4,%%zmm16;"\ + "vbroadcastsd 24(%0),%%zmm4;vfmadd231pd %%zmm5,%%zmm4,%%zmm17;vfmadd231pd %%zmm6,%%zmm4,%%zmm18;vfmadd231pd %%zmm7,%%zmm4,%%zmm19;" + +#define INNER_KERNEL_k1m8n24 \ + INNER_KERNEL_k1m4n24\ + "vbroadcastsd 32(%0),%%zmm4;vfmadd231pd %%zmm5,%%zmm4,%%zmm20;vfmadd231pd %%zmm6,%%zmm4,%%zmm21;vfmadd231pd %%zmm7,%%zmm4,%%zmm22;"\ + "vbroadcastsd 40(%0),%%zmm4;vfmadd231pd %%zmm5,%%zmm4,%%zmm23;vfmadd231pd %%zmm6,%%zmm4,%%zmm24;vfmadd231pd %%zmm7,%%zmm4,%%zmm25;"\ + "vbroadcastsd 48(%0),%%zmm4;vfmadd231pd %%zmm5,%%zmm4,%%zmm26;vfmadd231pd %%zmm6,%%zmm4,%%zmm27;vfmadd231pd %%zmm7,%%zmm4,%%zmm28;"\ + "vbroadcastsd 56(%0),%%zmm4;vfmadd231pd %%zmm5,%%zmm4,%%zmm29;vfmadd231pd %%zmm6,%%zmm4,%%zmm30;vfmadd231pd %%zmm7,%%zmm4,%%zmm31;" + +#define INNER_KERNELm1(nn) \ + "cmpq $1,%2;jb "#nn"3f;"\ + #nn"4:\n\t"\ + INNER_KERNEL_k1m1n##nn "addq $8,%0;"\ + "decq %2;cmpq $1,%2;jnb "#nn"4b;"\ + #nn"3:\n\t" + +#define INNER_KERNELm2(nn) \ + "cmpq $1,%2;jb "#nn"0f;"\ + #nn"1:\n\t"\ + INNER_KERNEL_k1m2n##nn "addq $16,%0;"\ + "decq %2;cmpq $1,%2;jnb "#nn"1b;"\ + #nn"0:\n\t" + +#define INNER_KERNELm4(nn) \ + "cmpq $1,%2;jb "#nn"00f;"\ + #nn"01:\n\t"\ + INNER_KERNEL_k1m4n##nn "addq $32,%0;"\ + "decq %2;cmpq $1,%2;jnb "#nn"01b;"\ + #nn"00:\n\t" + +#define INNER_KERNELm8(nn) \ + "cmpq $8,%2;jb "#nn"001f;"\ + #nn"008:\n\t"\ + INNER_KERNEL_k1m8n##nn "addq $64,%0;"\ + INNER_KERNEL_k1m8n##nn "addq $64,%0;"\ + INNER_KERNEL_k1m8n##nn "addq $64,%0;"\ + INNER_KERNEL_k1m8n##nn "addq $64,%0;"\ + INNER_KERNEL_k1m8n##nn "addq $64,%0;"\ + INNER_KERNEL_k1m8n##nn "addq $64,%0;"\ + INNER_KERNEL_k1m8n##nn "addq $64,%0;"\ + INNER_KERNEL_k1m8n##nn "addq $64,%0;"\ + "subq $8,%2;cmpq $8,%2;jnb "#nn"008b;"\ + #nn"001:\n\t"\ + "cmpq $1,%2;jb "#nn"000f;"\ + INNER_KERNEL_k1m8n##nn "addq $64,%0;"\ + "decq %2;cmpq $1,%2;jnb "#nn"001b;"\ + ""#nn"000:\n\t" + +#define INNER_INIT_m1n8 \ + "vpxorq %%zmm8, %%zmm8, %%zmm8;" + +#define INNER_INIT_m2n8 \ + "vpxorq %%zmm8, %%zmm8, %%zmm8; vpxorq %%zmm9, %%zmm9, %%zmm9;" + +#define INNER_INIT_m4n8 \ + "vpxorq %%zmm8, %%zmm8, %%zmm8; vpxorq %%zmm9, %%zmm9, %%zmm9; vpxorq %%zmm10,%%zmm10,%%zmm10;vpxorq %%zmm11,%%zmm11,%%zmm11;" + +#define INNER_INIT_m8n8 \ + INNER_INIT_m4n8\ + "vpxorq %%zmm12,%%zmm12,%%zmm12;vpxorq %%zmm13,%%zmm13,%%zmm13;vpxorq %%zmm14,%%zmm14,%%zmm14;vpxorq %%zmm15,%%zmm15,%%zmm15;" + +#define INNER_INIT_m1n16 INNER_INIT_m2n8 + +#define INNER_INIT_m2n16 INNER_INIT_m4n8 + +#define INNER_INIT_m4n16 INNER_INIT_m8n8 + +#define INNER_INIT_m8n16 \ + INNER_INIT_m8n8\ + "vpxorq %%zmm16,%%zmm16,%%zmm16;vpxorq %%zmm17,%%zmm17,%%zmm17;vpxorq %%zmm18,%%zmm18,%%zmm18;vpxorq %%zmm19,%%zmm19,%%zmm19;"\ + "vpxorq %%zmm20,%%zmm20,%%zmm20;vpxorq %%zmm21,%%zmm21,%%zmm21;vpxorq %%zmm22,%%zmm22,%%zmm22;vpxorq %%zmm23,%%zmm23,%%zmm23;" + +#define INNER_INIT_m1n24 \ + "vpxorq %%zmm8, %%zmm8, %%zmm8; vpxorq %%zmm9, %%zmm9, %%zmm9; vpxorq %%zmm10,%%zmm10,%%zmm10;" + +#define INNER_INIT_m2n24 \ + INNER_INIT_m1n24\ + "vpxorq %%zmm11,%%zmm11,%%zmm11; vpxorq %%zmm12,%%zmm12,%%zmm12; vpxorq %%zmm13,%%zmm13,%%zmm13;" + +#define INNER_INIT_m4n24 \ + INNER_INIT_m4n16\ + "vpxorq %%zmm16,%%zmm16,%%zmm16;vpxorq %%zmm17,%%zmm17,%%zmm17;vpxorq %%zmm18,%%zmm18,%%zmm18;vpxorq %%zmm19,%%zmm19,%%zmm19;" + +#define INNER_INIT_m8n24 \ + INNER_INIT_m8n16\ + "vpxorq %%zmm24,%%zmm24,%%zmm24;vpxorq %%zmm25,%%zmm25,%%zmm25;vpxorq %%zmm26,%%zmm26,%%zmm26;vpxorq %%zmm27,%%zmm27,%%zmm27;"\ + "vpxorq %%zmm28,%%zmm28,%%zmm28;vpxorq %%zmm29,%%zmm29,%%zmm29;vpxorq %%zmm30,%%zmm30,%%zmm30;vpxorq %%zmm31,%%zmm31,%%zmm31;" + +#define INNER_SETINDEX \ + "vpinsrq $0,%4,%%xmm4,%%xmm4; vbroadcastsd %%xmm4,%%zmm4;"\ + "kxnorw %%k1,%%k1,%%k1; kshiftlw $1,%%k1,%%k1; vpxorq %%zmm6,%%zmm6,%%zmm6; vmovapd %%zmm4,%%zmm6%{%%k1%};"\ + "kshiftlw $1,%%k1,%%k1; vpaddq %%zmm4,%%zmm6,%%zmm6%{%%k1%};"\ + "kshiftlw $1,%%k1,%%k1; vpaddq %%zmm4,%%zmm6,%%zmm6%{%%k1%};"\ + "kshiftlw $1,%%k1,%%k1; vpaddq %%zmm4,%%zmm6,%%zmm6%{%%k1%};"\ + "kshiftlw $1,%%k1,%%k1; vpaddq %%zmm4,%%zmm6,%%zmm6%{%%k1%};"\ + "kshiftlw $1,%%k1,%%k1; vpaddq %%zmm4,%%zmm6,%%zmm6%{%%k1%};"\ + "kshiftlw $1,%%k1,%%k1; vpaddq %%zmm4,%%zmm6,%%zmm6%{%%k1%};" + +#define INNER_STORE_m1n8(c1,disp) \ + "kxnorw %%k1,%%k1,%%k1;"\ + "vgatherqpd "#disp"(%3,%%zmm6,1), %%zmm7 %{%%k1%};"\ + "vfmadd132pd %%zmm3,%%zmm7,"#c1";"\ + "kxnorw %%k1,%%k1,%%k1;"\ + "vscatterqpd "#c1", "#disp"(%3,%%zmm6,1) %{%%k1%};" + +#define INNER_SAVE_m1n8 \ + INNER_SETINDEX\ + INNER_STORE_m1n8(%%zmm8,0) + +#define INNER_SAVE_m1n16 \ + INNER_SAVE_m1n8\ + "leaq (%3,%4,8),%3;"\ + INNER_STORE_m1n8(%%zmm9,0) + +#define INNER_SAVE_m1n24 \ + INNER_SAVE_m1n16\ + "leaq (%3,%4,8),%3;"\ + INNER_STORE_m1n8(%%zmm10,0) + +#define INNER_SAVE_m2n8 \ + INNER_SETINDEX\ + INNER_STORE_m1n8(%%zmm8,0)\ + INNER_STORE_m1n8(%%zmm9,8) + +#define INNER_SAVE_m2n16 \ + INNER_SETINDEX\ + INNER_STORE_m1n8(%%zmm8,0)\ + INNER_STORE_m1n8(%%zmm10,8)\ + "leaq (%3,%4,8),%3;"\ + INNER_STORE_m1n8(%%zmm9,0)\ + INNER_STORE_m1n8(%%zmm11,8) + +#define INNER_SAVE_m2n24 \ + INNER_SETINDEX\ + INNER_STORE_m1n8(%%zmm8,0)\ + INNER_STORE_m1n8(%%zmm11,8)\ + "leaq (%3,%4,8),%3;"\ + INNER_STORE_m1n8(%%zmm9,0)\ + INNER_STORE_m1n8(%%zmm12,8)\ + "leaq (%3,%4,8),%3;"\ + INNER_STORE_m1n8(%%zmm10,0)\ + INNER_STORE_m1n8(%%zmm13,8) + +#define INNER_PREF_8x8 \ + "prefetcht0 (%3); prefetcht0 56(%3); prefetcht0 (%3,%4,1); prefetcht0 56(%3,%4,1); prefetcht0 (%3,%4,2); prefetcht0 56(%3,%4,2);"\ + "prefetcht0 (%3,%4,4); prefetcht0 56(%3,%4,4); leaq (%3,%4,2),%3;"\ + "prefetcht0 (%3,%4,1); prefetcht0 56(%3,%4,1); prefetcht0 (%3,%4,4); prefetcht0 56(%3,%4,4); leaq (%3,%4,1),%3;"\ + "prefetcht0 (%3,%4,2); prefetcht0 56(%3,%4,2); prefetcht0 (%3,%4,4); prefetcht0 56(%3,%4,4);"\ + "subq %4,%3; subq %4,%3; subq %4,%3;" + +#define INNER_TRANS_4x8(c1,c2,c3,c4) \ + "vunpcklpd "#c2","#c1",%%zmm4;vunpckhpd "#c2","#c1",%%zmm5;vunpcklpd "#c4","#c3",%%zmm6;vunpckhpd "#c4","#c3",%%zmm7;"\ + "vblendmpd %%zmm6,%%zmm4,"#c1"%{%6%};vblendmpd %%zmm7,%%zmm5,"#c3"%{%6%};"\ + "vshuff64x2 $0xb1,"#c1","#c1","#c1";vshuff64x2 $0xb1,"#c3","#c3","#c3";"\ + "vblendmpd %%zmm4,"#c1",%%zmm4%{%6%};vblendmpd %%zmm5,"#c3","#c2"%{%6%};"\ + "vblendmpd "#c1",%%zmm6,%%zmm6%{%6%};vblendmpd "#c3",%%zmm7,"#c4"%{%6%};"\ + "vmovapd %%zmm4,"#c1"; vmovapd %%zmm6,"#c3";" + +#define INNER_TRANS_8x8(c1,c2,c3,c4,c5,c6,c7,c8) \ + INNER_TRANS_4x8(c1,c2,c3,c4)\ + INNER_TRANS_4x8(c5,c6,c7,c8)\ + "vblendmpd "#c5","#c1",%%zmm4%{%5%};vshuff64x2 $0x4e,%%zmm4,%%zmm4,%%zmm4;"\ + "vblendmpd "#c1",%%zmm4,"#c1"%{%5%};vblendmpd %%zmm4,"#c5","#c5"%{%5%};"\ + "vblendmpd "#c6","#c2",%%zmm5%{%5%};vshuff64x2 $0x4e,%%zmm5,%%zmm5,%%zmm5;"\ + "vblendmpd "#c2",%%zmm5,"#c2"%{%5%};vblendmpd %%zmm5,"#c6","#c6"%{%5%};"\ + "vblendmpd "#c7","#c3",%%zmm6%{%5%};vshuff64x2 $0x4e,%%zmm6,%%zmm6,%%zmm6;"\ + "vblendmpd "#c3",%%zmm6,"#c3"%{%5%};vblendmpd %%zmm6,"#c7","#c7"%{%5%};"\ + "vblendmpd "#c8","#c4",%%zmm7%{%5%};vshuff64x2 $0x4e,%%zmm7,%%zmm7,%%zmm7;"\ + "vblendmpd "#c4",%%zmm7,"#c4"%{%5%};vblendmpd %%zmm7,"#c8","#c8"%{%5%};" + +//%7 for k01(input) only when m=4 +#define INNER_STORE_4x8(c1,c2,c3,c4) \ + "vmovupd (%3),%%zmm4%{%5%};vmovupd -32(%3,%4,4),%%zmm4%{%7%};vfmadd132pd %%zmm3,%%zmm4,"#c1";"\ + "vmovupd "#c1",(%3)%{%5%}; vmovupd "#c1",-32(%3,%4,4)%{%7%}; leaq (%3,%4,1),%3;"\ + "vmovupd (%3),%%zmm5%{%5%};vmovupd -32(%3,%4,4),%%zmm5%{%7%};vfmadd132pd %%zmm3,%%zmm5,"#c2";"\ + "vmovupd "#c2",(%3)%{%5%}; vmovupd "#c2",-32(%3,%4,4)%{%7%}; leaq (%3,%4,1),%3;"\ + "vmovupd (%3),%%zmm6%{%5%};vmovupd -32(%3,%4,4),%%zmm6%{%7%};vfmadd132pd %%zmm3,%%zmm6,"#c3";"\ + "vmovupd "#c3",(%3)%{%5%}; vmovupd "#c3",-32(%3,%4,4)%{%7%}; leaq (%3,%4,1),%3;"\ + "vmovupd (%3),%%zmm7%{%5%};vmovupd -32(%3,%4,4),%%zmm7%{%7%};vfmadd132pd %%zmm3,%%zmm7,"#c4";"\ + "vmovupd "#c4",(%3)%{%5%}; vmovupd "#c4",-32(%3,%4,4)%{%7%}; leaq (%3,%4,1),%3;"\ + "leaq (%3,%4,4),%3;" + +#define INNER_STORE_8x8(c1,c2,c3,c4,c5,c6,c7,c8) \ + "prefetcht1 120(%3); prefetcht1 120(%3,%4,1);"\ + "vfmadd213pd (%3),%%zmm3,"#c1"; vmovupd "#c1",(%3); vfmadd213pd (%3,%4,1),%%zmm3,"#c2"; vmovupd "#c2",(%3,%4,1); leaq (%3,%4,2),%3;"\ + "prefetcht1 120(%3); prefetcht1 120(%3,%4,1);"\ + "vfmadd213pd (%3),%%zmm3,"#c3"; vmovupd "#c3",(%3); vfmadd213pd (%3,%4,1),%%zmm3,"#c4"; vmovupd "#c4",(%3,%4,1); leaq (%3,%4,2),%3;"\ + "prefetcht1 120(%3); prefetcht1 120(%3,%4,1);"\ + "vfmadd213pd (%3),%%zmm3,"#c5"; vmovupd "#c5",(%3); vfmadd213pd (%3,%4,1),%%zmm3,"#c6"; vmovupd "#c6",(%3,%4,1); leaq (%3,%4,2),%3;"\ + "prefetcht1 120(%3); prefetcht1 120(%3,%4,1);"\ + "vfmadd213pd (%3),%%zmm3,"#c7"; vmovupd "#c7",(%3); vfmadd213pd (%3,%4,1),%%zmm3,"#c8"; vmovupd "#c8",(%3,%4,1); leaq (%3,%4,2),%3;" + +#define INNER_SAVE_m4n8 \ + INNER_TRANS_4x8(%%zmm8,%%zmm9,%%zmm10,%%zmm11)\ + INNER_STORE_4x8(%%zmm8,%%zmm9,%%zmm10,%%zmm11) + +#define INNER_SAVE_m4n16 \ + INNER_TRANS_4x8(%%zmm8,%%zmm10,%%zmm12,%%zmm14)\ + INNER_STORE_4x8(%%zmm8,%%zmm10,%%zmm12,%%zmm14)\ + INNER_TRANS_4x8(%%zmm9,%%zmm11,%%zmm13,%%zmm15)\ + INNER_STORE_4x8(%%zmm9,%%zmm11,%%zmm13,%%zmm15) + +#define INNER_SAVE_m4n24 \ + INNER_TRANS_4x8(%%zmm8,%%zmm11,%%zmm14,%%zmm17)\ + INNER_STORE_4x8(%%zmm8,%%zmm11,%%zmm14,%%zmm17)\ + INNER_TRANS_4x8(%%zmm9,%%zmm12,%%zmm15,%%zmm18)\ + INNER_STORE_4x8(%%zmm9,%%zmm12,%%zmm15,%%zmm18)\ + INNER_TRANS_4x8(%%zmm10,%%zmm13,%%zmm16,%%zmm19)\ + INNER_STORE_4x8(%%zmm10,%%zmm13,%%zmm16,%%zmm19) + +#define INNER_SAVE_m8n8 \ + INNER_PREF_8x8\ + INNER_TRANS_8x8(%%zmm8,%%zmm9,%%zmm10,%%zmm11,%%zmm12,%%zmm13,%%zmm14,%%zmm15)\ + INNER_STORE_8x8(%%zmm8,%%zmm9,%%zmm10,%%zmm11,%%zmm12,%%zmm13,%%zmm14,%%zmm15) + +#define INNER_SAVE_m8n16 \ + INNER_PREF_8x8\ + INNER_TRANS_8x8(%%zmm8,%%zmm10,%%zmm12,%%zmm14,%%zmm16,%%zmm18,%%zmm20,%%zmm22)\ + INNER_STORE_8x8(%%zmm8,%%zmm10,%%zmm12,%%zmm14,%%zmm16,%%zmm18,%%zmm20,%%zmm22)\ + INNER_PREF_8x8\ + INNER_TRANS_8x8(%%zmm9,%%zmm11,%%zmm13,%%zmm15,%%zmm17,%%zmm19,%%zmm21,%%zmm23)\ + INNER_STORE_8x8(%%zmm9,%%zmm11,%%zmm13,%%zmm15,%%zmm17,%%zmm19,%%zmm21,%%zmm23) + +#define INNER_SAVE_m8n24 \ + INNER_PREF_8x8\ + INNER_TRANS_8x8(%%zmm8,%%zmm11,%%zmm14,%%zmm17,%%zmm20,%%zmm23,%%zmm26,%%zmm29)\ + INNER_STORE_8x8(%%zmm8,%%zmm11,%%zmm14,%%zmm17,%%zmm20,%%zmm23,%%zmm26,%%zmm29)\ + INNER_PREF_8x8\ + INNER_TRANS_8x8(%%zmm9,%%zmm12,%%zmm15,%%zmm18,%%zmm21,%%zmm24,%%zmm27,%%zmm30)\ + INNER_STORE_8x8(%%zmm9,%%zmm12,%%zmm15,%%zmm18,%%zmm21,%%zmm24,%%zmm27,%%zmm30)\ + INNER_PREF_8x8\ + INNER_TRANS_8x8(%%zmm10,%%zmm13,%%zmm16,%%zmm19,%%zmm22,%%zmm25,%%zmm28,%%zmm31)\ + INNER_STORE_8x8(%%zmm10,%%zmm13,%%zmm16,%%zmm19,%%zmm22,%%zmm25,%%zmm28,%%zmm31) + +#define COMPUTE_n8 {\ + __asm__ __volatile__(\ + "vbroadcastsd (%9),%%zmm3;"\ + "movq %8,%%r14;movq %2,%%r13;movq %2,%%r12;shlq $6,%%r12;"\ + "cmpq $8,%8; jb 42222f;"\ + "42221:\n\t"\ + INNER_INIT_m8n8\ + INNER_KERNELm8(8)\ + INNER_SAVE_m8n8\ + "movq %%r13,%2; subq %%r12,%1;"\ + "shlq $3,%4;subq %4,%3;shrq $3,%4;addq $64,%3;"\ + "subq $8,%8; cmpq $8,%8; jnb 42221b;"\ + "42222:\n\t"\ + "cmpq $4,%8; jb 42223f;"\ + INNER_INIT_m4n8\ + INNER_KERNELm4(8)\ + INNER_SAVE_m4n8\ + "movq %%r13,%2; subq %%r12,%1;"\ + "shlq $3,%4;subq %4,%3;shrq $3,%4;addq $32,%3;"\ + "subq $4,%8;"\ + "42223:\n\t"\ + "cmpq $2,%8; jb 42224f;"\ + INNER_INIT_m2n8\ + INNER_KERNELm2(8)\ + INNER_SAVE_m2n8\ + "movq %%r13,%2; subq %%r12,%1;"\ + "addq $16,%3;"\ + "subq $2,%8;"\ + "42224:\n\t"\ + "cmpq $1,%8; jb 42225f;"\ + INNER_INIT_m1n8\ + INNER_KERNELm1(8)\ + INNER_SAVE_m1n8\ + "movq %%r13,%2; subq %%r12,%1;"\ + "addq $8,%3;"\ + "42225:\n\t"\ + "movq %%r14,%8;shlq $3,%8;subq %8,%3;shrq $3,%8;"\ + "shlq $3,%4;addq %4,%3;shrq $3,%4;"\ + :"+r"(a_block_pointer),"+r"(packed_b_pointer),"+r"(K),"+r"(c_pointer),"+r"(ldc_in_bytes),"+Yk"(k02),"+Yk"(k03),"+Yk"(k01),"+r"(M),"+r"(alpha)\ + ::"zmm3","zmm4","zmm5","zmm6","zmm7","zmm8","zmm9","zmm10","zmm11","zmm12","zmm13","zmm14","zmm15","cc","memory","k1","r12","r13","r14");\ + a_block_pointer -= M * K;\ +} +#define COMPUTE_n16 {\ + __asm__ __volatile__(\ + "vbroadcastsd (%9),%%zmm3;"\ + "movq %8,%%r14;movq %2,%%r13;movq %2,%%r12;shlq $6,%%r12;"\ + "cmpq $8,%8; jb 32222f;"\ + "32221:\n\t"\ + INNER_INIT_m8n16\ + INNER_KERNELm8(16)\ + INNER_SAVE_m8n16\ + "movq %%r13,%2; subq %%r12,%1;"\ + "shlq $4,%4;subq %4,%3;shrq $4,%4;addq $64,%3;"\ + "subq $8,%8; cmpq $8,%8; jnb 32221b;"\ + "32222:\n\t"\ + "cmpq $4,%8; jb 32223f;"\ + INNER_INIT_m4n16\ + INNER_KERNELm4(16)\ + INNER_SAVE_m4n16\ + "movq %%r13,%2; subq %%r12,%1;"\ + "shlq $4,%4;subq %4,%3;shrq $4,%4;addq $32,%3;"\ + "subq $4,%8;"\ + "32223:\n\t"\ + "cmpq $2,%8; jb 32224f;"\ + INNER_INIT_m2n16\ + INNER_KERNELm2(16)\ + INNER_SAVE_m2n16\ + "movq %%r13,%2; subq %%r12,%1;"\ + "shlq $3,%4;subq %4,%3;shrq $3,%4;addq $16,%3;"\ + "subq $2,%8;"\ + "32224:\n\t"\ + "cmpq $1,%8; jb 32225f;"\ + INNER_INIT_m1n16\ + INNER_KERNELm1(16)\ + INNER_SAVE_m1n16\ + "movq %%r13,%2; subq %%r12,%1;"\ + "shlq $3,%4;subq %4,%3;shrq $3,%4;addq $8,%3;"\ + "32225:\n\t"\ + "movq %%r14,%8;shlq $3,%8;subq %8,%3;shrq $3,%8;"\ + "shlq $4,%4;addq %4,%3;shrq $4,%4;"\ + "leaq (%1,%%r12,2),%1;"\ + :"+r"(a_block_pointer),"+r"(packed_b_pointer),"+r"(K),"+r"(c_pointer),"+r"(ldc_in_bytes),"+Yk"(k02),"+Yk"(k03),"+Yk"(k01),"+r"(M),"+r"(alpha)\ + ::"zmm3","zmm4","zmm5","zmm6","zmm7","zmm8","zmm9","zmm10","zmm11","zmm12","zmm13","zmm14","zmm15","zmm16","zmm17",\ + "zmm18","zmm19","zmm20","zmm21","zmm22","zmm23","cc","memory","k1","r12","r13","r14");\ + a_block_pointer -= M * K;\ +} +#define COMPUTE_n24 {\ + __asm__ __volatile__(\ + "vbroadcastsd (%9),%%zmm3;"\ + "movq %8,%%r14;movq %2,%%r13;movq %2,%%r12;shlq $6,%%r12;"\ + "cmpq $8,%8; jb 22222f;"\ + "22221:\n\t"\ + INNER_INIT_m8n24\ + INNER_KERNELm8(24)\ + INNER_SAVE_m8n24\ + "movq %%r13,%2; subq %%r12,%1;"\ + "shlq $3,%4;subq %4,%3;shlq $1,%4;subq %4,%3;shrq $4,%4;addq $64,%3;"\ + "subq $8,%8; cmpq $8,%8; jnb 22221b;"\ + "22222:\n\t"\ + "cmpq $4,%8; jb 22223f;"\ + INNER_INIT_m4n24\ + INNER_KERNELm4(24)\ + INNER_SAVE_m4n24\ + "movq %%r13,%2; subq %%r12,%1;"\ + "shlq $3,%4;subq %4,%3;shlq $1,%4;subq %4,%3;shrq $4,%4;addq $32,%3;"\ + "subq $4,%8;"\ + "22223:\n\t"\ + "cmpq $2,%8; jb 22224f;"\ + INNER_INIT_m2n24\ + INNER_KERNELm2(24)\ + INNER_SAVE_m2n24\ + "movq %%r13,%2; subq %%r12,%1;"\ + "shlq $4,%4;subq %4,%3;shrq $4,%4;addq $16,%3;"\ + "subq $2,%8;"\ + "22224:\n\t"\ + "cmpq $1,%8; jb 22225f;"\ + INNER_INIT_m1n24\ + INNER_KERNELm1(24)\ + INNER_SAVE_m1n24\ + "movq %%r13,%2; subq %%r12,%1;"\ + "shlq $4,%4;subq %4,%3;shrq $4,%4;addq $8,%3;"\ + "22225:\n\t"\ + "movq %%r14,%8;shlq $3,%8;subq %8,%3;shrq $3,%8;"\ + "shlq $3,%4;addq %4,%3;shlq $1,%4;addq %4,%3;shrq $4,%4;"\ + "leaq (%1,%%r12,2),%1; addq %%r12,%1;"\ + :"+r"(a_block_pointer),"+r"(packed_b_pointer),"+r"(K),"+r"(c_pointer),"+r"(ldc_in_bytes),"+Yk"(k02),"+Yk"(k03),"+Yk"(k01),"+r"(M),"+r"(alpha)\ + ::"zmm3","zmm4","zmm5","zmm6","zmm7","zmm8","zmm9","zmm10","zmm11","zmm12","zmm13","zmm14","zmm15","zmm16","zmm17","zmm18","zmm19",\ + "zmm20","zmm21","zmm22","zmm23","zmm24","zmm25","zmm26","zmm27","zmm28","zmm29","zmm30","zmm31","cc","memory","k1","r12","r13","r14");\ + a_block_pointer -= M * K;\ +} + +static void KERNEL_MAIN(double *packed_a, double *packed_b, BLASLONG m, BLASLONG ndiv8, BLASLONG k, BLASLONG LDC, double *c,double *alpha){//icopy=8,ocopy=8 +//perform C += A B + if(k==0 || m==0 || ndiv8==0) return; + int64_t ldc_in_bytes = (int64_t)LDC * sizeof(double); + int64_t K = (int64_t)k; int64_t M = (int64_t)m; + double *a_block_pointer; + double *c_pointer = c; + __mmask16 k01 = 0x00f0,k02 = 0x000f,k03 = 0x0033; + BLASLONG m_count,ndiv8_count,k_count; + double *packed_b_pointer = packed_b; + a_block_pointer = packed_a; + for(ndiv8_count=ndiv8;ndiv8_count>2;ndiv8_count-=3){ + COMPUTE_n24 + } + for(;ndiv8_count>1;ndiv8_count-=2){ + COMPUTE_n16 + } + if(ndiv8_count>0){ + COMPUTE_n8 + } +} + +/* __m512d accumulators: zc1-zc4; temporary variables: za1,zb1-zb2 */ +/* __m256d accumulators: yc1-yc4; temporary variables: ya1,yb1-yb2 */ +/* __m128d accumulators: xc1-xc4; temporary variables: xa1,xb1-xb2 */ +/* double accumulator: sc1; temporary variables: sa1,sb1 */ +/* column-major c_block */ +#define KERNEL_m8n4k1 {\ + __asm__ __volatile__(\ + "vmovupd (%0),%2; addq $64,%0;"\ + "vbroadcastsd (%1),%3; vfmadd231pd %2,%3,%5; "\ + "vbroadcastsd 8(%1),%4; vfmadd231pd %2,%4,%6; "\ + "vbroadcastsd 16(%1),%3; vfmadd231pd %2,%3,%7; "\ + "vbroadcastsd 24(%1),%4; vfmadd231pd %2,%4,%8; "\ + "addq $32,%1;"\ + :"+r"(a_block_pointer),"+r"(b_block_pointer),"+v"(za1),"+v"(zb1),"+v"(zb2),"+v"(zc1),"+v"(zc2),"+v"(zc3),"+v"(zc4)::"cc","memory");\ +} +#define KERNEL_m8n2k1 {\ + __asm__ __volatile__(\ + "vmovupd (%0),%2; addq $64,%0;"\ + "vbroadcastsd (%1),%3; vfmadd231pd %2,%3,%5; "\ + "vbroadcastsd 8(%1),%4; vfmadd231pd %2,%4,%6; "\ + "addq $16,%1;"\ + :"+r"(a_block_pointer),"+r"(b_block_pointer),"+v"(za1),"+v"(zb1),"+v"(zb2),"+v"(zc1),"+v"(zc2)::"cc","memory");\ +} +#define KERNEL_m8n1k1 {\ + __asm__ __volatile__(\ + "vmovupd (%0),%2; addq $64,%0;"\ + "vbroadcastsd (%1),%3; vfmadd231pd %2,%3,%4; "\ + "addq $8,%1;"\ + :"+r"(a_block_pointer),"+r"(b_block_pointer),"+v"(za1),"+v"(zb1),"+v"(zc1)::"cc","memory");\ +} +#define INIT_m8n1 zc1=_mm512_setzero_pd(); +#define INIT_m8n2 zc2=INIT_m8n1 +#define INIT_m8n4 zc4=zc3=INIT_m8n2 +#define SAVE_m8n1 {\ + __asm__ __volatile__("vbroadcastsd (%0),%1;":"+r"(alpha),"+v"(za1)::"memory");\ + zb1 = _mm512_loadu_pd(c_pointer);\ + zc1 = _mm512_fmadd_pd(zc1,za1,zb1);\ + _mm512_storeu_pd(c_pointer,zc1);\ + c_pointer += 8;\ +} +#define SAVE_m8n2 {\ + __asm__ __volatile__("vbroadcastsd (%0),%1;":"+r"(alpha),"+v"(za1)::"memory");\ + zb1 = _mm512_loadu_pd(c_pointer); zb2 = _mm512_loadu_pd(c_pointer+LDC);\ + zc1 = _mm512_fmadd_pd(zc1,za1,zb1); zc2 = _mm512_fmadd_pd(zc2,za1,zb2);\ + _mm512_storeu_pd(c_pointer,zc1); _mm512_storeu_pd(c_pointer+LDC,zc2);\ + c_pointer += 8;\ +} +#define SAVE_m8n4 {\ + __asm__ __volatile__("vbroadcastsd (%0),%1;":"+r"(alpha),"+v"(za1)::"memory");\ + zb1 = _mm512_loadu_pd(c_pointer); zb2 = _mm512_loadu_pd(c_pointer+LDC);\ + zc1 = _mm512_fmadd_pd(zc1,za1,zb1); zc2 = _mm512_fmadd_pd(zc2,za1,zb2);\ + _mm512_storeu_pd(c_pointer,zc1); _mm512_storeu_pd(c_pointer+LDC,zc2);\ + c_pointer += LDC*2;\ + zb1 = _mm512_loadu_pd(c_pointer); zb2 = _mm512_loadu_pd(c_pointer+LDC);\ + zc3 = _mm512_fmadd_pd(zc3,za1,zb1); zc4 = _mm512_fmadd_pd(zc4,za1,zb2);\ + _mm512_storeu_pd(c_pointer,zc3); _mm512_storeu_pd(c_pointer+LDC,zc4);\ + c_pointer += 8-LDC*2;\ +} +#define KERNEL_m4n4k1 {\ + ya1 = _mm256_loadu_pd(a_block_pointer);a_block_pointer+=4;\ + yb1 = _mm256_broadcast_sd(b_block_pointer); yc1 = _mm256_fmadd_pd(ya1,yb1,yc1);\ + yb2 = _mm256_broadcast_sd(b_block_pointer+1); yc2 = _mm256_fmadd_pd(ya1,yb2,yc2);\ + yb1 = _mm256_broadcast_sd(b_block_pointer+2); yc3 = _mm256_fmadd_pd(ya1,yb1,yc3);\ + yb2 = _mm256_broadcast_sd(b_block_pointer+3); yc4 = _mm256_fmadd_pd(ya1,yb2,yc4);\ + b_block_pointer+=4;\ +} +#define KERNEL_m4n2k1 {\ + ya1 = _mm256_loadu_pd(a_block_pointer);a_block_pointer+=4;\ + yb1 = _mm256_broadcast_sd(b_block_pointer); yc1 = _mm256_fmadd_pd(ya1,yb1,yc1);\ + yb2 = _mm256_broadcast_sd(b_block_pointer+1); yc2 = _mm256_fmadd_pd(ya1,yb2,yc2);\ + b_block_pointer+=2;\ +} +#define KERNEL_m4n1k1 {\ + ya1 = _mm256_loadu_pd(a_block_pointer);a_block_pointer+=4;\ + yb1 = _mm256_broadcast_sd(b_block_pointer); yc1 = _mm256_fmadd_pd(ya1,yb1,yc1);\ + b_block_pointer++;\ +} +#define INIT_m4n1 yc1=_mm256_setzero_pd(); +#define INIT_m4n2 yc2=INIT_m4n1 +#define INIT_m4n4 yc4=yc3=INIT_m4n2 +#define SAVE_m4n1 {\ + yb1 = _mm256_broadcast_sd(alpha);\ + ya1 = _mm256_loadu_pd(c_pointer);\ + yc1 = _mm256_fmadd_pd(yc1,yb1,ya1);\ + _mm256_storeu_pd(c_pointer,yc1);\ + c_pointer += 4;\ +} +#define SAVE_m4n2 {\ + ya1 = _mm256_broadcast_sd(alpha);\ + yb1 = _mm256_loadu_pd(c_pointer); yb2 = _mm256_loadu_pd(c_pointer+LDC);\ + yc1 = _mm256_fmadd_pd(yc1,ya1,yb1); yc2 = _mm256_fmadd_pd(yc2,ya1,yb2);\ + _mm256_storeu_pd(c_pointer,yc1); _mm256_storeu_pd(c_pointer+LDC,yc2);\ + c_pointer += 4;\ +} +#define SAVE_m4n4 {\ + ya1 = _mm256_broadcast_sd(alpha);\ + yb1 = _mm256_loadu_pd(c_pointer); yb2 = _mm256_loadu_pd(c_pointer+LDC);\ + yc1 = _mm256_fmadd_pd(yc1,ya1,yb1); yc2 = _mm256_fmadd_pd(yc2,ya1,yb2);\ + _mm256_storeu_pd(c_pointer,yc1); _mm256_storeu_pd(c_pointer+LDC,yc2);\ + c_pointer += LDC*2;\ + yb1 = _mm256_loadu_pd(c_pointer); yb2 = _mm256_loadu_pd(c_pointer+LDC);\ + yc3 = _mm256_fmadd_pd(yc3,ya1,yb1); yc4 = _mm256_fmadd_pd(yc4,ya1,yb2);\ + _mm256_storeu_pd(c_pointer,yc3); _mm256_storeu_pd(c_pointer+LDC,yc4);\ + c_pointer += 4-LDC*2;\ +} +#define KERNEL_m2n2k1 {\ + xa1 = _mm_loadu_pd(a_block_pointer); a_block_pointer+=2;\ + xb1 = _mm_loaddup_pd(b_block_pointer); xc1 = _mm_fmadd_pd(xa1,xb1,xc1);\ + xb2 = _mm_loaddup_pd(b_block_pointer+1); xc2 = _mm_fmadd_pd(xa1,xb2,xc2);\ + b_block_pointer += 2;\ +} +#define KERNEL_m2n1k1 {\ + xa1 = _mm_loadu_pd(a_block_pointer); a_block_pointer+=2;\ + xb1 = _mm_loaddup_pd(b_block_pointer); xc1 = _mm_fmadd_pd(xa1,xb1,xc1);\ + b_block_pointer ++;\ +} +#define INIT_m2n1 xc1=_mm_setzero_pd(); +#define INIT_m2n2 xc2=INIT_m2n1 +#define SAVE_m2n1 {\ + xb1 = _mm_loaddup_pd(alpha);\ + xa1 = _mm_loadu_pd(c_pointer);\ + xc1 = _mm_fmadd_pd(xc1,xb1,xa1);\ + _mm_storeu_pd(c_pointer,xc1);\ + c_pointer += 2;\ +} +#define SAVE_m2n2 {\ + xa1 = _mm_loaddup_pd(alpha);\ + xb1 = _mm_loadu_pd(c_pointer); xb2 = _mm_loadu_pd(c_pointer+LDC);\ + xc1 = _mm_fmadd_pd(xc1,xa1,xb1); xc2 = _mm_fmadd_pd(xc2,xa1,xb2);\ + _mm_storeu_pd(c_pointer,xc1); _mm_storeu_pd(c_pointer+LDC,xc2);\ + c_pointer += 2;\ +} +#define KERNEL_m1n1k1 {\ + sa1 = *a_block_pointer; a_block_pointer++;\ + sb1 = *b_block_pointer; sc1 += sa1 * sb1;\ + b_block_pointer ++;\ +} +#define INIT_m1n1 sc1=0.0; +#define SAVE_m1n1 {\ + *c_pointer += sc1 * (*alpha);\ + c_pointer++;\ +} + +/* row-major c_block */ +#define KERNEL_m2n4k1 {\ + yb1 = _mm256_loadu_pd(b_block_pointer);b_block_pointer+=4;\ + ya1 = _mm256_broadcast_sd(a_block_pointer); yc1 = _mm256_fmadd_pd(ya1,yb1,yc1);\ + ya1 = _mm256_broadcast_sd(a_block_pointer+1);yc2 = _mm256_fmadd_pd(ya1,yb1,yc2);\ + a_block_pointer += 2;\ +} +#define KERNEL_m1n4k1 {\ + yb1 = _mm256_loadu_pd(b_block_pointer);b_block_pointer+=4;\ + ya1 = _mm256_broadcast_sd(a_block_pointer); yc1 = _mm256_fmadd_pd(ya1,yb1,yc1);\ + a_block_pointer ++;\ +} +#define KERNEL_m1n2k1 {\ + xb1 = _mm_loadu_pd(b_block_pointer);b_block_pointer+=2;\ + xa1 = _mm_loaddup_pd(a_block_pointer); xc1 = _mm_fmadd_pd(xa1,xb1,xc1);\ + a_block_pointer ++;\ +} +#define INIT_m1n2 INIT_m2n1 +#define INIT_m1n4 INIT_m4n1 +#define INIT_m2n4 INIT_m4n2 +#define SAVE_m2n4 {\ + ya1 = _mm256_broadcast_sd(alpha);\ + yc1 = _mm256_mul_pd(yc1,ya1);\ + yc2 = _mm256_mul_pd(yc2,ya1);\ + yb1 = _mm256_unpacklo_pd(yc1,yc2);\ + yb2 = _mm256_unpackhi_pd(yc1,yc2);\ + xb1 = _mm_add_pd(_mm_loadu_pd(c_pointer),_mm256_extractf128_pd(yb1,0));\ + xb2 = _mm_add_pd(_mm_loadu_pd(c_pointer+LDC),_mm256_extractf128_pd(yb2,0));\ + _mm_storeu_pd(c_pointer,xb1);\ + _mm_storeu_pd(c_pointer+LDC,xb2);\ + xb1 = _mm_add_pd(_mm_loadu_pd(c_pointer+2*LDC),_mm256_extractf128_pd(yb1,1));\ + xb2 = _mm_add_pd(_mm_loadu_pd(c_pointer+3*LDC),_mm256_extractf128_pd(yb2,1));\ + _mm_storeu_pd(c_pointer+2*LDC,xb1);\ + _mm_storeu_pd(c_pointer+3*LDC,xb2);\ + c_pointer += 2;\ +} +#define SAVE_m1n2 {\ + xb1 = _mm_loaddup_pd(alpha);\ + xc1 = _mm_mul_pd(xc1,xb1);\ + *c_pointer += _mm_cvtsd_f64(xc1);\ + xa1 = _mm_unpackhi_pd(xc1,xc1);\ + c_pointer[LDC]+= _mm_cvtsd_f64(xa1);\ + c_pointer ++;\ +} +#define SAVE_m1n4 {\ + ya1 = _mm256_broadcast_sd(alpha);\ + yc1 = _mm256_mul_pd(yc1,ya1);\ + xb1 = _mm256_extractf128_pd(yc1,0);\ + *c_pointer += _mm_cvtsd_f64(xb1);\ + xb2 = _mm_unpackhi_pd(xb1,xb1);\ + c_pointer[LDC] += _mm_cvtsd_f64(xb2);\ + xb1 = _mm256_extractf128_pd(yc1,1);\ + c_pointer[LDC*2] += _mm_cvtsd_f64(xb1);\ + xb2 = _mm_unpackhi_pd(xb1,xb1);\ + c_pointer[LDC*3] += _mm_cvtsd_f64(xb2);\ + c_pointer ++;\ +} + +static void KERNEL_EDGE(double *packed_a, double *packed_b, BLASLONG m, BLASLONG edge_n, BLASLONG k, BLASLONG LDC, double *c,double *alpha){//icopy=8,ocopy=8 +//perform C += A B , edge_n<8 must be satisfied. + if(k==0 || m==0 || edge_n==0) return; + double *a_block_pointer,*b_block_pointer,*b_base_pointer; + double *c_pointer = c; + __m512d zb1,zb2,za1,zc1,zc2,zc3,zc4; + __m256d yc1,yc2,yc3,yc4,ya1,yb1,yb2; + __m128d xc1,xc2,xa1,xb1,xb2; + double sc1,sa1,sb1; + BLASLONG m_count,n_count,k_count; + b_base_pointer = packed_b; +//now start calculation of the edge part + for(n_count=edge_n;n_count>3;n_count-=4){ + a_block_pointer = packed_a; + for(m_count=m;m_count>7;m_count-=8){ + b_block_pointer = b_base_pointer; + INIT_m8n4 + for(k_count=0;k_count3;m_count-=4){ + b_block_pointer = b_base_pointer; + INIT_m4n4 + for(k_count=0;k_count1;m_count-=2){ + b_block_pointer = b_base_pointer; + INIT_m2n4 + for(k_count=0;k_count0){ + b_block_pointer = b_base_pointer; + INIT_m1n4 + for(k_count=0;k_count1;n_count-=2){ + a_block_pointer = packed_a; + for(m_count=m;m_count>7;m_count-=8){ + b_block_pointer = b_base_pointer; + INIT_m8n2 + for(k_count=0;k_count3;m_count-=4){ + b_block_pointer = b_base_pointer; + INIT_m4n2 + for(k_count=0;k_count1;m_count-=2){ + b_block_pointer = b_base_pointer; + INIT_m2n2 + for(k_count=0;k_count0){ + b_block_pointer = b_base_pointer; + INIT_m1n2 + for(k_count=0;k_count0){ + a_block_pointer = packed_a; + for(m_count=m;m_count>7;m_count-=8){ + b_block_pointer = b_base_pointer; + INIT_m8n1 + for(k_count=0;k_count3;m_count-=4){ + b_block_pointer = b_base_pointer; + INIT_m4n1 + for(k_count=0;k_count1;m_count-=2){ + b_block_pointer = b_base_pointer; + INIT_m2n1 + for(k_count=0;k_count0){ + b_block_pointer = b_base_pointer; + INIT_m1n1 + for(k_count=0;k_count7;m_count-=8){ + for(k_count=k;k_count>0;k_count--){ + tmp = _mm256_loadu_pd(src1);_mm256_storeu_pd(dst1+0,tmp);src1+=4; + tmp = _mm256_loadu_pd(src2);_mm256_storeu_pd(dst1+4,tmp);src2+=4; + dst1+=8; + } + src1+=4*k;src2+=4*k; + } + for(;m_count>0;m_count--){ + for(k_count=k;k_count>0;k_count--){ + *dst1 = (*src1); src1++; dst1++; + } + } +} +#endif +int __attribute__ ((noinline)) CNAME(BLASLONG m, BLASLONG n, BLASLONG k, double alpha, double * __restrict__ A, double * __restrict__ B, double * __restrict__ C, BLASLONG ldc){ + if(m==0 || n==0 || k==0 || alpha == 0.0) return 0; + BLASLONG ndiv8 = n/8;double ALPHA = alpha; +#ifdef ICOPY_4 + double *packed_a = (double *)malloc(m*k*sizeof(double)); + copy_4_to_8(A,packed_a,m,k); +#else //ICOPY_8 + double *packed_a = A; +#endif + if(ndiv8>0) KERNEL_MAIN(packed_a,B,m,ndiv8,k,ldc,C,&ALPHA); + if(n>ndiv8*8) KERNEL_EDGE(packed_a,B+(int64_t)k*(int64_t)ndiv8*8,m,n-ndiv8*8,k,ldc,C+(int64_t)ldc*(int64_t)ndiv8*8,&ALPHA); +#ifdef ICOPY_4 + free(packed_a);packed_a=NULL; +#endif + return 0; +} diff --git a/kernel/x86_64/dgemm_tcopy_16_skylakex.c b/kernel/x86_64/dgemm_tcopy_16_skylakex.c new file mode 100644 index 000000000..ff2c48617 --- /dev/null +++ b/kernel/x86_64/dgemm_tcopy_16_skylakex.c @@ -0,0 +1,130 @@ +#include +#include "common.h" +#include + +int CNAME(BLASLONG dim_second, BLASLONG dim_first, double *src, BLASLONG lead_dim, double *dst){ + double *src1, *src2, *src3, *src4, *dst1; + __m512d z1,z2,z3,z4,z5,z6,z7,z8; __m256d y1,y2,y3,y4; __m128d x1,x2,x3,x4; double s1,s2,s3,s4; + BLASLONG dim1_count, dim2_count, src_inc; + src_inc = 4 * lead_dim - dim_first; + src1 = src; src2 = src + lead_dim; src3 = src2 + lead_dim; src4 = src3 + lead_dim; + for(dim2_count=dim_second; dim2_count>3; dim2_count-=4){ + dst1 = dst + 16 * (dim_second - dim2_count); + for(dim1_count=dim_first; dim1_count>15; dim1_count-=16){ + z1 = _mm512_loadu_pd(src1); z2 = _mm512_loadu_pd(src1+8); src1 += 16; + z3 = _mm512_loadu_pd(src2); z4 = _mm512_loadu_pd(src2+8); src2 += 16; + z5 = _mm512_loadu_pd(src3); z6 = _mm512_loadu_pd(src3+8); src3 += 16; + z7 = _mm512_loadu_pd(src4); z8 = _mm512_loadu_pd(src4+8); src4 += 16; + _mm512_storeu_pd(dst1+ 0,z1); _mm512_storeu_pd(dst1+ 8,z2); + _mm512_storeu_pd(dst1+16,z3); _mm512_storeu_pd(dst1+24,z4); + _mm512_storeu_pd(dst1+32,z5); _mm512_storeu_pd(dst1+40,z6); + _mm512_storeu_pd(dst1+48,z7); _mm512_storeu_pd(dst1+56,z8); dst1 += 16 * dim_second; + } + dst1 -= 8 * (dim_second - dim2_count); + if(dim1_count>7){ + z1 = _mm512_loadu_pd(src1); src1 += 8; + z2 = _mm512_loadu_pd(src2); src2 += 8; + z3 = _mm512_loadu_pd(src3); src3 += 8; + z4 = _mm512_loadu_pd(src4); src4 += 8; + _mm512_storeu_pd(dst1+ 0,z1); _mm512_storeu_pd(dst1+ 8,z2); + _mm512_storeu_pd(dst1+16,z3); _mm512_storeu_pd(dst1+24,z4); dst1 += 8 * dim_second; + dim1_count -= 8; + } + dst1 -= 4 * (dim_second - dim2_count); + if(dim1_count>3){ + y1 = _mm256_loadu_pd(src1); src1 += 4; + y2 = _mm256_loadu_pd(src2); src2 += 4; + y3 = _mm256_loadu_pd(src3); src3 += 4; + y4 = _mm256_loadu_pd(src4); src4 += 4; + _mm256_storeu_pd(dst1+ 0,y1); _mm256_storeu_pd(dst1+ 4,y2); + _mm256_storeu_pd(dst1+ 8,y3); _mm256_storeu_pd(dst1+12,y4); dst1 += 4 * dim_second; + dim1_count -= 4; + } + dst1 -= 2 * (dim_second - dim2_count); + if(dim1_count>1){ + x1 = _mm_loadu_pd(src1); src1 += 2; + x2 = _mm_loadu_pd(src2); src2 += 2; + x3 = _mm_loadu_pd(src3); src3 += 2; + x4 = _mm_loadu_pd(src4); src4 += 2; + _mm_storeu_pd(dst1+0,x1); _mm_storeu_pd(dst1+2,x2); + _mm_storeu_pd(dst1+4,x3); _mm_storeu_pd(dst1+6,x4); dst1 += 2 * dim_second; + dim1_count -= 2; + } + dst1 -= dim_second - dim2_count; + if(dim1_count>0){ + s1 = *src1; src1++; s2 = *src2; src2++; s3 = *src3; src3++; s4 = *src4; src4++; + dst1[0] = s1; dst1[1] = s2; dst1[2] = s3; dst1[3] = s4; + } + src1 += src_inc; src2 += src_inc; src3 += src_inc; src4 += src_inc; + } + src_inc -= 2 * lead_dim; + for(; dim2_count>1; dim2_count-=2){ + dst1 = dst + 16 * (dim_second - dim2_count); + for(dim1_count=dim_first; dim1_count>15; dim1_count-=16){ + z1 = _mm512_loadu_pd(src1); z2 = _mm512_loadu_pd(src1+8); src1 += 16; + z3 = _mm512_loadu_pd(src2); z4 = _mm512_loadu_pd(src2+8); src2 += 16; + _mm512_storeu_pd(dst1+ 0,z1); _mm512_storeu_pd(dst1+ 8,z2); + _mm512_storeu_pd(dst1+16,z3); _mm512_storeu_pd(dst1+24,z4); dst1 += 16 * dim_second; + } + dst1 -= 8 * (dim_second - dim2_count); + if(dim1_count>7){ + z1 = _mm512_loadu_pd(src1); src1 += 8; + z2 = _mm512_loadu_pd(src2); src2 += 8; + _mm512_storeu_pd(dst1+ 0,z1); _mm512_storeu_pd(dst1+ 8,z2); dst1 += 8 * dim_second; + dim1_count -= 8; + } + dst1 -= 4 * (dim_second - dim2_count); + if(dim1_count>3){ + y1 = _mm256_loadu_pd(src1); src1 += 4; + y2 = _mm256_loadu_pd(src2); src2 += 4; + _mm256_storeu_pd(dst1+ 0,y1); _mm256_storeu_pd(dst1+ 4,y2); dst1 += 4 * dim_second; + dim1_count -= 4; + } + dst1 -= 2 * (dim_second - dim2_count); + if(dim1_count>1){ + x1 = _mm_loadu_pd(src1); src1 += 2; + x2 = _mm_loadu_pd(src2); src2 += 2; + _mm_storeu_pd(dst1+0,x1); _mm_storeu_pd(dst1+2,x2); dst1 += 2 * dim_second; + dim1_count -= 2; + } + dst1 -= dim_second - dim2_count; + if(dim1_count>0){ + s1 = *src1; src1++; s2 = *src2; src2++; + dst1[0] = s1; dst1[1] = s2; + } + src1 += src_inc; src2 += src_inc; + } + src_inc -= lead_dim; + for(; dim2_count>0; dim2_count--){ + dst1 = dst + 16 * (dim_second - dim2_count); + for(dim1_count=dim_first; dim1_count>15; dim1_count-=16){ + z1 = _mm512_loadu_pd(src1); z2 = _mm512_loadu_pd(src1+8); src1 += 16; + _mm512_storeu_pd(dst1+ 0,z1); _mm512_storeu_pd(dst1+ 8,z2); dst1 += 16 * dim_second; + } + dst1 -= 8 * (dim_second - dim2_count); + if(dim1_count>7){ + z1 = _mm512_loadu_pd(src1); src1 += 8; + _mm512_storeu_pd(dst1+ 0,z1); dst1 += 8 * dim_second; + dim1_count -= 8; + } + dst1 -= 4 * (dim_second - dim2_count); + if(dim1_count>3){ + y1 = _mm256_loadu_pd(src1); src1 += 4; + _mm256_storeu_pd(dst1+ 0,y1); dst1 += 4 * dim_second; + dim1_count -= 4; + } + dst1 -= 2 * (dim_second - dim2_count); + if(dim1_count>1){ + x1 = _mm_loadu_pd(src1); src1 += 2; + _mm_storeu_pd(dst1+0,x1); dst1 += 2 * dim_second; + dim1_count -= 2; + } + dst1 -= dim_second - dim2_count; + if(dim1_count>0){ + s1 = *src1; src1++; + dst1[0] = s1; + } + src1 += src_inc; + } + return 0; +} diff --git a/kernel/x86_64/dgemv_n_4.c b/kernel/x86_64/dgemv_n_4.c index 6d33641e9..da68db0cd 100644 --- a/kernel/x86_64/dgemv_n_4.c +++ b/kernel/x86_64/dgemv_n_4.c @@ -33,7 +33,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "dgemv_n_microk_nehalem-4.c" #elif defined(HASWELL) || defined(ZEN) || defined(STEAMROLLER) || defined(EXCAVATOR) #include "dgemv_n_microk_haswell-4.c" -#elif defined (SKYLAKEX) +#elif defined (SKYLAKEX) || defined (COOPERLAKE) #include "dgemv_n_microk_skylakex-4.c" #endif diff --git a/kernel/x86_64/dgemv_n_microk_haswell-4.c b/kernel/x86_64/dgemv_n_microk_haswell-4.c index da0fa2fff..c20c0a030 100644 --- a/kernel/x86_64/dgemv_n_microk_haswell-4.c +++ b/kernel/x86_64/dgemv_n_microk_haswell-4.c @@ -105,9 +105,8 @@ static void dgemv_kernel_4x4( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y, FLOAT "r" (alpha) // 8 : "cc", "%xmm0", "%xmm1", "%xmm2", "%xmm3", - "%xmm4", "%xmm5", - "%xmm6", "%xmm7", - "%xmm8", "%xmm9", + "%xmm4", "%xmm5", "%xmm6", "%xmm7", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", "%xmm12", "%xmm13", "%xmm14", "%xmm15", "memory" ); @@ -182,11 +181,10 @@ static void dgemv_kernel_4x2( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y, FLOAT "r" (ap[1]), // 5 "r" (alpha) // 6 : "cc", - "%xmm0", "%xmm1", - "%xmm4", "%xmm5", - "%xmm6", - "%xmm8", - "%xmm12", "%xmm13", + "%xmm0", "%xmm1", "%xmm2", "%xmm3", + "%xmm4", "%xmm5", "%xmm6", "%xmm7", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", "memory" ); } diff --git a/kernel/x86_64/dgemv_n_microk_piledriver-4.c b/kernel/x86_64/dgemv_n_microk_piledriver-4.c index 466931b82..57fa426ba 100644 --- a/kernel/x86_64/dgemv_n_microk_piledriver-4.c +++ b/kernel/x86_64/dgemv_n_microk_piledriver-4.c @@ -140,7 +140,7 @@ static void dgemv_kernel_4x8( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y, BLASLO "%xmm2", "%xmm3", "%xmm4", "%xmm5", "%xmm6", "%xmm7", - "%xmm8", "%xmm9", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", "%xmm12", "%xmm13", "%xmm14", "%xmm15", "memory" ); @@ -235,9 +235,11 @@ static void dgemv_kernel_4x4( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y, FLOAT "r" (ap[3]), // 7 "r" (alpha) // 8 : "cc", + "%xmm0", "%xmm1", + "%xmm2", "%xmm3", "%xmm4", "%xmm5", "%xmm6", "%xmm7", - "%xmm8", "%xmm9", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", "%xmm12", "%xmm13", "%xmm14", "%xmm15", "memory" ); diff --git a/kernel/x86_64/dgemv_t_4.c b/kernel/x86_64/dgemv_t_4.c index ed672a757..a3bf28dc8 100644 --- a/kernel/x86_64/dgemv_t_4.c +++ b/kernel/x86_64/dgemv_t_4.c @@ -28,7 +28,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" -#if defined(HASWELL) || defined(ZEN) || defined(STEAMROLLER) || defined(EXCAVATOR) || defined (SKYLAKEX) +#if defined(HASWELL) || defined(ZEN) || defined(STEAMROLLER) || defined(EXCAVATOR) || defined (SKYLAKEX) || defined (COOPERLAKE) #include "dgemv_t_microk_haswell-4.c" #endif diff --git a/kernel/x86_64/dgemv_t_microk_haswell-4.c b/kernel/x86_64/dgemv_t_microk_haswell-4.c index 958fd3e0a..b398307d3 100644 --- a/kernel/x86_64/dgemv_t_microk_haswell-4.c +++ b/kernel/x86_64/dgemv_t_microk_haswell-4.c @@ -117,7 +117,9 @@ static void dgemv_kernel_4x4( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y) "r" (ap[2]), // 6 "r" (ap[3]) // 7 : "cc", + "%xmm0", "%xmm1", "%xmm2", "%xmm3", "%xmm4", "%xmm5", "%xmm6", "%xmm7", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", "%xmm12", "%xmm13", "%xmm14", "%xmm15", "memory" ); diff --git a/kernel/x86_64/dot.S b/kernel/x86_64/dot.S index e63d9cd89..a11d25e5d 100644 --- a/kernel/x86_64/dot.S +++ b/kernel/x86_64/dot.S @@ -50,6 +50,10 @@ PROLOGUE PROFCODE +#ifdef WINDOWS_ABI + emms +#endif + salq $BASE_SHIFT, INCX salq $BASE_SHIFT, INCY diff --git a/kernel/x86_64/drot.c b/kernel/x86_64/drot.c new file mode 100644 index 000000000..66e9ff907 --- /dev/null +++ b/kernel/x86_64/drot.c @@ -0,0 +1,205 @@ +#include "common.h" + +#if defined(SKYLAKEX) +#include "drot_microk_skylakex-2.c" +#elif defined(HASWELL) +#include "drot_microk_haswell-2.c" +#endif + +#ifndef HAVE_DROT_KERNEL +#include "../simd/intrin.h" + +static void drot_kernel(BLASLONG n, FLOAT *x, FLOAT *y, FLOAT c, FLOAT s) +{ + BLASLONG i = 0; +#if V_SIMD_F64 && V_SIMD > 256 + const int vstep = v_nlanes_f64; + const int unrollx4 = n & (-vstep * 4); + const int unrollx = n & -vstep; + + v_f64 __c = v_setall_f64(c); + v_f64 __s = v_setall_f64(s); + v_f64 vx0, vx1, vx2, vx3; + v_f64 vy0, vy1, vy2, vy3; + v_f64 vt0, vt1, vt2, vt3; + + for (; i < unrollx4; i += vstep * 4) { + vx0 = v_loadu_f64(x + i); + vx1 = v_loadu_f64(x + i + vstep); + vx2 = v_loadu_f64(x + i + vstep * 2); + vx3 = v_loadu_f64(x + i + vstep * 3); + vy0 = v_loadu_f64(y + i); + vy1 = v_loadu_f64(y + i + vstep); + vy2 = v_loadu_f64(y + i + vstep * 2); + vy3 = v_loadu_f64(y + i + vstep * 3); + + vt0 = v_mul_f64(__s, vy0); + vt1 = v_mul_f64(__s, vy1); + vt2 = v_mul_f64(__s, vy2); + vt3 = v_mul_f64(__s, vy3); + + vt0 = v_muladd_f64(__c, vx0, vt0); + vt1 = v_muladd_f64(__c, vx1, vt1); + vt2 = v_muladd_f64(__c, vx2, vt2); + vt3 = v_muladd_f64(__c, vx3, vt3); + + v_storeu_f64(x + i, vt0); + v_storeu_f64(x + i + vstep, vt1); + v_storeu_f64(x + i + vstep * 2, vt2); + v_storeu_f64(x + i + vstep * 3, vt3); + + vt0 = v_mul_f64(__s, vx0); + vt1 = v_mul_f64(__s, vx1); + vt2 = v_mul_f64(__s, vx2); + vt3 = v_mul_f64(__s, vx3); + + vt0 = v_mulsub_f64(__c, vy0, vt0); + vt1 = v_mulsub_f64(__c, vy1, vt1); + vt2 = v_mulsub_f64(__c, vy2, vt2); + vt3 = v_mulsub_f64(__c, vy3, vt3); + + v_storeu_f64(y + i, vt0); + v_storeu_f64(y + i + vstep, vt1); + v_storeu_f64(y + i + vstep * 2, vt2); + v_storeu_f64(y + i + vstep * 3, vt3); + } + + for (; i < unrollx; i += vstep) { + vx0 = v_loadu_f64(x + i); + vy0 = v_loadu_f64(y + i); + + vt0 = v_mul_f64(__s, vy0); + vt0 = v_muladd_f64(__c, vx0, vt0); + v_storeu_f64(x + i, vt0); + + vt0 = v_mul_f64(__s, vx0); + vt0 = v_mulsub_f64(__c, vy0, vt0); + v_storeu_f64(y + i, vt0); + } +#else + FLOAT f0, f1, f2, f3; + FLOAT x0, x1, x2, x3; + FLOAT g0, g1, g2, g3; + FLOAT y0, y1, y2, y3; + + FLOAT* xp = x; + FLOAT* yp = y; + + BLASLONG n1 = n & (~7); + + while (i < n1) { + x0 = xp[0]; + y0 = yp[0]; + x1 = xp[1]; + y1 = yp[1]; + x2 = xp[2]; + y2 = yp[2]; + x3 = xp[3]; + y3 = yp[3]; + + f0 = c*x0 + s*y0; + g0 = c*y0 - s*x0; + f1 = c*x1 + s*y1; + g1 = c*y1 - s*x1; + f2 = c*x2 + s*y2; + g2 = c*y2 - s*x2; + f3 = c*x3 + s*y3; + g3 = c*y3 - s*x3; + + xp[0] = f0; + yp[0] = g0; + xp[1] = f1; + yp[1] = g1; + xp[2] = f2; + yp[2] = g2; + xp[3] = f3; + yp[3] = g3; + + xp += 4; + yp += 4; + i += 4; + } +#endif + while (i < n) { + FLOAT temp = c*x[i] + s*y[i]; + y[i] = c*y[i] - s*x[i]; + x[i] = temp; + + i++; + } +} + +#endif +static void rot_compute(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT c, FLOAT s) +{ + BLASLONG i = 0; + BLASLONG ix = 0, iy = 0; + + FLOAT temp; + + if (n <= 0) + return; + if ((inc_x == 1) && (inc_y == 1)) { + drot_kernel(n, x, y, c, s); + } + else { + while (i < n) { + temp = c * x[ix] + s * y[iy]; + y[iy] = c * y[iy] - s * x[ix]; + x[ix] = temp; + + ix += inc_x; + iy += inc_y; + i++; + } + } + return; +} + + +#if defined(SMP) +static int rot_thread_function(blas_arg_t *args) +{ + + rot_compute(args->m, + args->a, args->lda, + args->b, args->ldb, + ((FLOAT *)args->alpha)[0], + ((FLOAT *)args->alpha)[1]); + return 0; +} + +extern int blas_level1_thread(int mode, BLASLONG m, BLASLONG n, BLASLONG k, void *alpha, void *a, BLASLONG lda, void *b, BLASLONG ldb, void *c, BLASLONG ldc, int (*function)(), int nthreads); +#endif +int CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT c, FLOAT s) +{ +#if defined(SMP) + int nthreads; + FLOAT alpha[2]={c, s}; + FLOAT dummy_c; +#endif + +#if defined(SMP) + if (inc_x == 0 || inc_y == 0 || n <= 100000) { + nthreads = 1; + } + else { + nthreads = num_cpu_avail(1); + } + + if (nthreads == 1) { + rot_compute(n, x, inc_x, y, inc_y, c, s); + } + else { +#if defined(DOUBLE) + int mode = BLAS_DOUBLE | BLAS_REAL | BLAS_PTHREAD; +#else + int mode = BLAS_SINGLE | BLAS_REAL | BLAS_PTHREAD; +#endif + blas_level1_thread(mode, n, 0, 0, alpha, x, inc_x, y, inc_y, &dummy_c, 0, (void *)rot_thread_function, nthreads); + } +#else + rot_compute(n, x, inc_x, y, inc_y, c, s); +#endif + return 0; +} diff --git a/kernel/x86_64/drot_microk_haswell-2.c b/kernel/x86_64/drot_microk_haswell-2.c new file mode 100644 index 000000000..72a87696e --- /dev/null +++ b/kernel/x86_64/drot_microk_haswell-2.c @@ -0,0 +1,87 @@ +/* need a new enough GCC for avx512 support */ +#if (( defined(__GNUC__) && __GNUC__ > 6 && defined(__AVX512CD__)) || (defined(__clang__) && __clang_major__ >= 9)) + +#define HAVE_DROT_KERNEL 1 + +#include +#include + +static void drot_kernel(BLASLONG n, FLOAT *x, FLOAT *y, FLOAT c, FLOAT s) +{ + BLASLONG i = 0; + + BLASLONG tail_index_4 = n&(~3); + BLASLONG tail_index_16 = n&(~15); + + __m256d c_256, s_256; + if (n >= 4) { + c_256 = _mm256_set1_pd(c); + s_256 = _mm256_set1_pd(s); + } + + __m256d x0, x1, x2, x3; + __m256d y0, y1, y2, y3; + __m256d t0, t1, t2, t3; + + for (i = 0; i < tail_index_16; i += 16) { + x0 = _mm256_loadu_pd(&x[i + 0]); + x1 = _mm256_loadu_pd(&x[i + 4]); + x2 = _mm256_loadu_pd(&x[i + 8]); + x3 = _mm256_loadu_pd(&x[i +12]); + y0 = _mm256_loadu_pd(&y[i + 0]); + y1 = _mm256_loadu_pd(&y[i + 4]); + y2 = _mm256_loadu_pd(&y[i + 8]); + y3 = _mm256_loadu_pd(&y[i +12]); + + t0 = _mm256_mul_pd(s_256, y0); + t1 = _mm256_mul_pd(s_256, y1); + t2 = _mm256_mul_pd(s_256, y2); + t3 = _mm256_mul_pd(s_256, y3); + + t0 = _mm256_fmadd_pd(c_256, x0, t0); + t1 = _mm256_fmadd_pd(c_256, x1, t1); + t2 = _mm256_fmadd_pd(c_256, x2, t2); + t3 = _mm256_fmadd_pd(c_256, x3, t3); + + _mm256_storeu_pd(&x[i + 0], t0); + _mm256_storeu_pd(&x[i + 4], t1); + _mm256_storeu_pd(&x[i + 8], t2); + _mm256_storeu_pd(&x[i +12], t3); + + t0 = _mm256_mul_pd(s_256, x0); + t1 = _mm256_mul_pd(s_256, x1); + t2 = _mm256_mul_pd(s_256, x2); + t3 = _mm256_mul_pd(s_256, x3); + + t0 = _mm256_fmsub_pd(c_256, y0, t0); + t1 = _mm256_fmsub_pd(c_256, y1, t1); + t2 = _mm256_fmsub_pd(c_256, y2, t2); + t3 = _mm256_fmsub_pd(c_256, y3, t3); + + _mm256_storeu_pd(&y[i + 0], t0); + _mm256_storeu_pd(&y[i + 4], t1); + _mm256_storeu_pd(&y[i + 8], t2); + _mm256_storeu_pd(&y[i +12], t3); + + } + + for (i = tail_index_16; i < tail_index_4; i += 4) { + x0 = _mm256_loadu_pd(&x[i]); + y0 = _mm256_loadu_pd(&y[i]); + + t0 = _mm256_mul_pd(s_256, y0); + t0 = _mm256_fmadd_pd(c_256, x0, t0); + _mm256_storeu_pd(&x[i], t0); + + t0 = _mm256_mul_pd(s_256, x0); + t0 = _mm256_fmsub_pd(c_256, y0, t0); + _mm256_storeu_pd(&y[i], t0); + } + + for (i = tail_index_4; i < n; ++i) { + FLOAT temp = c * x[i] + s * y[i]; + y[i] = c * y[i] - s * x[i]; + x[i] = temp; + } +} +#endif diff --git a/kernel/x86_64/drot_microk_skylakex-2.c b/kernel/x86_64/drot_microk_skylakex-2.c new file mode 100644 index 000000000..4e862e663 --- /dev/null +++ b/kernel/x86_64/drot_microk_skylakex-2.c @@ -0,0 +1,94 @@ +/* need a new enough GCC for avx512 support */ +#if (( defined(__GNUC__) && __GNUC__ > 6 && defined(__AVX512CD__)) || (defined(__clang__) && __clang_major__ >= 9)) + +#define HAVE_DROT_KERNEL 1 + +#include +#include + +static void drot_kernel(BLASLONG n, FLOAT *x, FLOAT *y, FLOAT c, FLOAT s) +{ + BLASLONG i = 0; + BLASLONG n1 = n; + + BLASLONG tail_index_8 = 0; + BLASLONG tail_index_32 = 0; + + __m512d c_512 = _mm512_set1_pd(c); + __m512d s_512 = _mm512_set1_pd(s); + + tail_index_8 = n1 & (~7); + tail_index_32 = n1 & (~31); + + + __m512d x0, x1, x2, x3; + __m512d y0, y1, y2, y3; + __m512d t0, t1, t2, t3; + + for (i = 0; i < tail_index_32; i += 32) { + x0 = _mm512_loadu_pd(&x[i + 0]); + x1 = _mm512_loadu_pd(&x[i + 8]); + x2 = _mm512_loadu_pd(&x[i +16]); + x3 = _mm512_loadu_pd(&x[i +24]); + y0 = _mm512_loadu_pd(&y[i + 0]); + y1 = _mm512_loadu_pd(&y[i + 8]); + y2 = _mm512_loadu_pd(&y[i +16]); + y3 = _mm512_loadu_pd(&y[i +24]); + + t0 = _mm512_mul_pd(s_512, y0); + t1 = _mm512_mul_pd(s_512, y1); + t2 = _mm512_mul_pd(s_512, y2); + t3 = _mm512_mul_pd(s_512, y3); + + t0 = _mm512_fmadd_pd(c_512, x0, t0); + t1 = _mm512_fmadd_pd(c_512, x1, t1); + t2 = _mm512_fmadd_pd(c_512, x2, t2); + t3 = _mm512_fmadd_pd(c_512, x3, t3); + + _mm512_storeu_pd(&x[i + 0], t0); + _mm512_storeu_pd(&x[i + 8], t1); + _mm512_storeu_pd(&x[i +16], t2); + _mm512_storeu_pd(&x[i +24], t3); + + t0 = _mm512_mul_pd(s_512, x0); + t1 = _mm512_mul_pd(s_512, x1); + t2 = _mm512_mul_pd(s_512, x2); + t3 = _mm512_mul_pd(s_512, x3); + + t0 = _mm512_fmsub_pd(c_512, y0, t0); + t1 = _mm512_fmsub_pd(c_512, y1, t1); + t2 = _mm512_fmsub_pd(c_512, y2, t2); + t3 = _mm512_fmsub_pd(c_512, y3, t3); + + _mm512_storeu_pd(&y[i + 0], t0); + _mm512_storeu_pd(&y[i + 8], t1); + _mm512_storeu_pd(&y[i +16], t2); + _mm512_storeu_pd(&y[i +24], t3); + } + + for (i = tail_index_32; i < tail_index_8; i += 8) { + x0 = _mm512_loadu_pd(&x[i]); + y0 = _mm512_loadu_pd(&y[i]); + + t0 = _mm512_mul_pd(s_512, y0); + t0 = _mm512_fmadd_pd(c_512, x0, t0); + _mm512_storeu_pd(&x[i], t0); + + t0 = _mm512_mul_pd(s_512, x0); + t0 = _mm512_fmsub_pd(c_512, y0, t0); + _mm512_storeu_pd(&y[i], t0); + } + + if ((n1&7) > 0) { + unsigned char tail_mask8 = (((unsigned char) 0xff) >> (8 -(n1&7))); + __m512d tail_x = _mm512_maskz_loadu_pd(*((__mmask8*) &tail_mask8), &x[tail_index_8]); + __m512d tail_y = _mm512_maskz_loadu_pd(*((__mmask8*) &tail_mask8), &y[tail_index_8]); + __m512d temp = _mm512_mul_pd(s_512, tail_y); + temp = _mm512_fmadd_pd(c_512, tail_x, temp); + _mm512_mask_storeu_pd(&x[tail_index_8],*((__mmask8*)&tail_mask8), temp); + temp = _mm512_mul_pd(s_512, tail_x); + temp = _mm512_fmsub_pd(c_512, tail_y, temp); + _mm512_mask_storeu_pd(&y[tail_index_8], *((__mmask8*)&tail_mask8), temp); + } +} +#endif diff --git a/kernel/x86_64/dscal.c b/kernel/x86_64/dscal.c index d0d7801fd..d1270d20b 100644 --- a/kernel/x86_64/dscal.c +++ b/kernel/x86_64/dscal.c @@ -33,7 +33,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "dscal_microk_sandy-2.c" #elif defined(HASWELL) || defined(ZEN) #include "dscal_microk_haswell-2.c" -#elif defined (SKYLAKEX) +#elif defined (SKYLAKEX) || defined (COOPERLAKE) #include "dscal_microk_skylakex-2.c" #endif @@ -136,10 +136,10 @@ static void dscal_kernel_inc_8(BLASLONG n, FLOAT *alpha, FLOAT *x, BLASLONG inc_ "jnz 1b \n\t" : - "+r" (n) // 0 + "+r" (n), // 0 + "+r" (x), // 1 + "+r" (x1) // 2 : - "r" (x), // 1 - "r" (x1), // 2 "r" (alpha), // 3 "r" (inc_x), // 4 "r" (inc_x3) // 5 diff --git a/kernel/x86_64/dsymv_L.c b/kernel/x86_64/dsymv_L.c index a722cc9df..573377ee0 100644 --- a/kernel/x86_64/dsymv_L.c +++ b/kernel/x86_64/dsymv_L.c @@ -32,7 +32,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "dsymv_L_microk_bulldozer-2.c" #elif defined(HASWELL) || defined(ZEN) #include "dsymv_L_microk_haswell-2.c" -#elif defined (SKYLAKEX) +#elif defined (SKYLAKEX) || defined (COOPERLAKE) #include "dsymv_L_microk_skylakex-2.c" #elif defined(SANDYBRIDGE) #include "dsymv_L_microk_sandy-2.c" diff --git a/kernel/x86_64/dsymv_L_microk_skylakex-2.c b/kernel/x86_64/dsymv_L_microk_skylakex-2.c index 8244dffa1..f0df5aaa8 100644 --- a/kernel/x86_64/dsymv_L_microk_skylakex-2.c +++ b/kernel/x86_64/dsymv_L_microk_skylakex-2.c @@ -33,6 +33,12 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define HAVE_KERNEL_4x4 1 +#if defined(__clang_patchlevel__) && __clang_major__ == 9 && __clang_minor__ == 0 && __clang_patchlevel__ == 0 +#pragma clang optimize off +#endif +#if defined(__apple_build_version__) && __clang_major__ == 11 && __clang_minor__ == 0 && __clang_patchlevel__ == 3 +#pragma clang optimize off +#endif static void dsymv_kernel_4x4(BLASLONG from, BLASLONG to, FLOAT **a, FLOAT *x, FLOAT *y, FLOAT *temp1, FLOAT *temp2) { @@ -155,7 +161,15 @@ static void dsymv_kernel_4x4(BLASLONG from, BLASLONG to, FLOAT **a, FLOAT *x, FL temp2[1] += half_accum1[0]; temp2[2] += half_accum2[0]; temp2[3] += half_accum3[0]; -} +} + +#if defined(__clang_patchlevel__) && __clang_major__ == 9 && __clang_minor__ == 0 && __clang_patchlevel__ == 0 +#pragma clang optimize on +#endif +#if defined(__apple_build_version__) && __clang_major__ == 11 && __clang_minor__ == 0 && __clang_patchlevel__ == 3 +#pragma clang optimize on +#endif + #else #include "dsymv_L_microk_haswell-2.c" -#endif \ No newline at end of file +#endif diff --git a/kernel/x86_64/dsymv_U.c b/kernel/x86_64/dsymv_U.c index 431e4bb3f..530ac8b1d 100644 --- a/kernel/x86_64/dsymv_U.c +++ b/kernel/x86_64/dsymv_U.c @@ -31,7 +31,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #if defined(BULLDOZER) || defined(PILEDRIVER) || defined(STEAMROLLER) || defined(EXCAVATOR) #include "dsymv_U_microk_bulldozer-2.c" -#elif defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) +#elif defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) || defined (COOPERLAKE) #include "dsymv_U_microk_haswell-2.c" #elif defined(SANDYBRIDGE) #include "dsymv_U_microk_sandy-2.c" diff --git a/kernel/x86_64/dtobf16_microk_cooperlake.c b/kernel/x86_64/dtobf16_microk_cooperlake.c new file mode 100644 index 000000000..9b8ac4714 --- /dev/null +++ b/kernel/x86_64/dtobf16_microk_cooperlake.c @@ -0,0 +1,104 @@ +/*************************************************************************** +Copyright (c) 2014, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +/* need a new enough GCC for avx512 support */ +#if (( defined(__GNUC__) && __GNUC__ >= 10 && defined(__AVX512BF16__)) || (defined(__clang__) && __clang_major__ >= 9)) + +#define HAVE_TOBF16_ACCL_KERNEL 1 +#include "common.h" +#include + +static void tobf16_accl_kernel(BLASLONG n, const double * in, bfloat16 * out) +{ + /* Get the 64-bytes unaligned header number targeting for avx512 + * processing (Assume input float array is natural aligned) */ + int align_header = ((64 - ((uintptr_t)in & (uintptr_t)0x3f)) >> 3) & 0x7; + + if (n < align_header) {align_header = n;} + + if (align_header != 0) { + unsigned char align_mask8 = (((unsigned char)0xff) >> (8-align_header)); + __m512d a = _mm512_maskz_loadu_pd(*((__mmask8*) &align_mask8), &in[0]); + _mm_mask_storeu_epi16(&out[0], *((__mmask8*) &align_mask8), (__m128i) _mm256_cvtneps_pbh(_mm512_cvtpd_ps(a))); + } + + if (n == align_header) { + return; + } else { + n -= align_header; + in += align_header; + out += align_header; + } + + int tail_index_8 = n&(~7); + int tail_index_32 = n&(~31); + int tail_index_128 = n&(~127); + unsigned char tail_mask8 = (((unsigned char) 0xff) >> (8 -(n&7))); + + /* Processing the main chunk with 128-elements per round */ + for (int i = 0; i < tail_index_128; i += 128) { + // Fold 1 + __m512 data1_512_low = _mm512_insertf32x8(_mm512_castps256_ps512(_mm512_cvtpd_ps(_mm512_load_pd(&in[i+ 0]))), _mm512_cvtpd_ps(_mm512_load_pd(&in[i+ 8])), 1); + __m512 data1_512_high = _mm512_insertf32x8(_mm512_castps256_ps512(_mm512_cvtpd_ps(_mm512_load_pd(&in[i+16]))), _mm512_cvtpd_ps(_mm512_load_pd(&in[i+24])), 1); + _mm512_storeu_si512(&out[i+ 0], (__m512i) _mm512_cvtne2ps_pbh(data1_512_high, data1_512_low)); + + // Fold 2 + __m512 data2_512_low = _mm512_insertf32x8(_mm512_castps256_ps512(_mm512_cvtpd_ps(_mm512_load_pd(&in[i+32]))), _mm512_cvtpd_ps(_mm512_load_pd(&in[i+40])), 1); + __m512 data2_512_high = _mm512_insertf32x8(_mm512_castps256_ps512(_mm512_cvtpd_ps(_mm512_load_pd(&in[i+48]))), _mm512_cvtpd_ps(_mm512_load_pd(&in[i+56])), 1); + _mm512_storeu_si512(&out[i+32], (__m512i) _mm512_cvtne2ps_pbh(data2_512_high, data2_512_low)); + + // Fold 3 + __m512 data3_512_low = _mm512_insertf32x8(_mm512_castps256_ps512(_mm512_cvtpd_ps(_mm512_load_pd(&in[i+64]))), _mm512_cvtpd_ps(_mm512_load_pd(&in[i+72])), 1); + __m512 data3_512_high = _mm512_insertf32x8(_mm512_castps256_ps512(_mm512_cvtpd_ps(_mm512_load_pd(&in[i+80]))), _mm512_cvtpd_ps(_mm512_load_pd(&in[i+88])), 1); + _mm512_storeu_si512(&out[i+64], (__m512i) _mm512_cvtne2ps_pbh(data3_512_high, data3_512_low)); + + // Fold 4 + __m512 data4_512_low = _mm512_insertf32x8(_mm512_castps256_ps512(_mm512_cvtpd_ps(_mm512_load_pd(&in[i+96]))), _mm512_cvtpd_ps(_mm512_load_pd(&in[i+104])), 1); + __m512 data4_512_high = _mm512_insertf32x8(_mm512_castps256_ps512(_mm512_cvtpd_ps(_mm512_load_pd(&in[i+112]))), _mm512_cvtpd_ps(_mm512_load_pd(&in[i+120])), 1); + _mm512_storeu_si512(&out[i+96], (__m512i) _mm512_cvtne2ps_pbh(data4_512_high, data4_512_low)); + } + + /* Processing the remaining <128 chunk with 32-elements per round */ + for (int j = tail_index_128; j < tail_index_32; j += 32) { + __m512 data1_512_low = _mm512_insertf32x8(_mm512_castps256_ps512(_mm512_cvtpd_ps(_mm512_load_pd(&in[j+ 0]))), _mm512_cvtpd_ps(_mm512_load_pd(&in[j+ 8])), 1); + __m512 data1_512_high = _mm512_insertf32x8(_mm512_castps256_ps512(_mm512_cvtpd_ps(_mm512_load_pd(&in[j+16]))), _mm512_cvtpd_ps(_mm512_load_pd(&in[j+24])), 1); + _mm512_storeu_si512(&out[j], (__m512i) _mm512_cvtne2ps_pbh(data1_512_high, data1_512_low)); + } + + /* Processing the remaining <32 chunk with 8-elements per round */ + for (int j = tail_index_32; j < tail_index_8; j += 8) { + _mm_storeu_si128((__m128i *)&out[j], (__m128i) _mm256_cvtneps_pbh(_mm512_cvtpd_ps(_mm512_load_pd(&in[j])))); + } + + /* Processing the remaining <8 chunk with masked processing */ + if ((n&7) > 0) { + __m512d data_512 = _mm512_maskz_load_pd(*((__mmask8*) &tail_mask8), &in[tail_index_8]); + _mm_mask_storeu_epi16(&out[tail_index_8], *((__mmask8*) &tail_mask8), (__m128i) _mm256_cvtneps_pbh(_mm512_cvtpd_ps(data_512))); + } +} + +#endif diff --git a/kernel/x86_64/dtrmm_kernel_4x8_haswell.c b/kernel/x86_64/dtrmm_kernel_4x8_haswell.c index 651736b89..2acdc4615 100644 --- a/kernel/x86_64/dtrmm_kernel_4x8_haswell.c +++ b/kernel/x86_64/dtrmm_kernel_4x8_haswell.c @@ -33,7 +33,7 @@ static void dtrmm_kernel_4x8( BLASLONG n, FLOAT *alpha ,FLOAT *a, FLOAT *b, FLOA " vfmadd231pd %%ymm0 , %%ymm1 , %%ymm4 \n\t" " vfmadd231pd %%ymm0 , %%ymm2 , %%ymm8 \n\t" - " vpermpd $0xb1 , %%ymm0 , %%ymm0 \n\t" + " vpermilpd $0x05 , %%ymm0 , %%ymm0 \n\t" " vfmadd231pd %%ymm0 , %%ymm1 , %%ymm5 \n\t" " vfmadd231pd %%ymm0 , %%ymm2 , %%ymm9 \n\t" @@ -41,7 +41,7 @@ static void dtrmm_kernel_4x8( BLASLONG n, FLOAT *alpha ,FLOAT *a, FLOAT *b, FLOA " vfmadd231pd %%ymm0 , %%ymm1 , %%ymm6 \n\t" " vfmadd231pd %%ymm0 , %%ymm2 , %%ymm10 \n\t" - " vpermpd $0xb1 , %%ymm0 , %%ymm0 \n\t" + " vpermilpd $0x05 , %%ymm0 , %%ymm0 \n\t" " vfmadd231pd %%ymm0 , %%ymm1 , %%ymm7 \n\t" " vfmadd231pd %%ymm0 , %%ymm2 , %%ymm11 \n\t" @@ -62,18 +62,16 @@ static void dtrmm_kernel_4x8( BLASLONG n, FLOAT *alpha ,FLOAT *a, FLOAT *b, FLOA " vmulpd %%ymm0 , %%ymm10, %%ymm10 \n\t" " vmulpd %%ymm0 , %%ymm11, %%ymm11 \n\t" - " vpermpd $0xb1 , %%ymm5 , %%ymm5 \n\t" - " vpermpd $0xb1 , %%ymm7 , %%ymm7 \n\t" + " vpermilpd $0x05 , %%ymm5 , %%ymm5 \n\t" + " vpermilpd $0x05 , %%ymm7 , %%ymm7 \n\t" " vblendpd $0x0a , %%ymm5 , %%ymm4 , %%ymm0 \n\t" " vblendpd $0x05 , %%ymm5 , %%ymm4 , %%ymm1 \n\t" " vblendpd $0x0a , %%ymm7 , %%ymm6 , %%ymm2 \n\t" " vblendpd $0x05 , %%ymm7 , %%ymm6 , %%ymm3 \n\t" - " vpermpd $0x1b , %%ymm2 , %%ymm2 \n\t" - " vpermpd $0x1b , %%ymm3 , %%ymm3 \n\t" - " vpermpd $0xb1 , %%ymm2 , %%ymm2 \n\t" - " vpermpd $0xb1 , %%ymm3 , %%ymm3 \n\t" + " vperm2f128 $0x01 , %%ymm2 , %%ymm2 , %%ymm2 \n\t" + " vperm2f128 $0x01 , %%ymm3 , %%ymm3 , %%ymm3 \n\t" " vblendpd $0x03 , %%ymm0 , %%ymm2 , %%ymm4 \n\t" " vblendpd $0x03 , %%ymm1 , %%ymm3 , %%ymm5 \n\t" @@ -85,18 +83,16 @@ static void dtrmm_kernel_4x8( BLASLONG n, FLOAT *alpha ,FLOAT *a, FLOAT *b, FLOA " vmovups %%ymm6 , (%7) \n\t" " vmovups %%ymm7 , (%8) \n\t" - " vpermpd $0xb1 , %%ymm9 , %%ymm9 \n\t" - " vpermpd $0xb1 , %%ymm11, %%ymm11 \n\t" + " vpermilpd $0x05 , %%ymm9 , %%ymm9 \n\t" + " vpermilpd $0x05 , %%ymm11, %%ymm11 \n\t" " vblendpd $0x0a , %%ymm9 , %%ymm8 , %%ymm0 \n\t" " vblendpd $0x05 , %%ymm9 , %%ymm8 , %%ymm1 \n\t" " vblendpd $0x0a , %%ymm11, %%ymm10, %%ymm2 \n\t" " vblendpd $0x05 , %%ymm11, %%ymm10, %%ymm3 \n\t" - " vpermpd $0x1b , %%ymm2 , %%ymm2 \n\t" - " vpermpd $0x1b , %%ymm3 , %%ymm3 \n\t" - " vpermpd $0xb1 , %%ymm2 , %%ymm2 \n\t" - " vpermpd $0xb1 , %%ymm3 , %%ymm3 \n\t" + " vperm2f128 $0x01 , %%ymm2 , %%ymm2 , %%ymm2 \n\t" + " vperm2f128 $0x01 , %%ymm3 , %%ymm3 , %%ymm3 \n\t" " vblendpd $0x03 , %%ymm0 , %%ymm2 , %%ymm4 \n\t" " vblendpd $0x03 , %%ymm1 , %%ymm3 , %%ymm5 \n\t" diff --git a/kernel/x86_64/dtrsm_kernel_RN_haswell.c b/kernel/x86_64/dtrsm_kernel_RN_haswell.c index 9ab78fc8e..cb939e762 100644 --- a/kernel/x86_64/dtrsm_kernel_RN_haswell.c +++ b/kernel/x86_64/dtrsm_kernel_RN_haswell.c @@ -132,7 +132,7 @@ static void dtrsm_RN_solve_opt(BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLON "1: \n\t" " vmovups (%8,%1,4), %%ymm4 \n\t" // read a - " vpermpd $0xb1 , %%ymm0 , %%ymm3 \n\t" + " vpermilpd $0x05 , %%ymm0 , %%ymm3 \n\t" // was vpermpd 0xb1 " vfmadd231pd %%ymm0 , %%ymm1 , %%ymm8 \n\t" " vfmadd231pd %%ymm0 , %%ymm2 , %%ymm12 \n\t" @@ -143,7 +143,7 @@ static void dtrsm_RN_solve_opt(BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLON " vpermpd $0x1b , %%ymm3 , %%ymm0 \n\t" " vmovups 32(%9,%1,8), %%ymm6 \n\t" // read b1 - " vpermpd $0xb1 , %%ymm0 , %%ymm3 \n\t" + " vpermilpd $0x05 , %%ymm0 , %%ymm3 \n\t" " vfmadd231pd %%ymm0 , %%ymm1 , %%ymm10 \n\t" " vfmadd231pd %%ymm0 , %%ymm2 , %%ymm14 \n\t" @@ -160,7 +160,7 @@ static void dtrsm_RN_solve_opt(BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLON " vfmadd231pd %%ymm4 , %%ymm5 , %%ymm8 \n\t" " vfmadd231pd %%ymm4 , %%ymm6 , %%ymm12 \n\t" - " vpermpd $0xb1 , %%ymm4 , %%ymm4 \n\t" + " vpermilpd $0x05 , %%ymm4 , %%ymm4 \n\t" " vmovups (%9,%1,8), %%ymm1 \n\t" // read b0 " vfmadd231pd %%ymm4 , %%ymm5 , %%ymm9 \n\t" " vfmadd231pd %%ymm4 , %%ymm6 , %%ymm13 \n\t" @@ -170,7 +170,7 @@ static void dtrsm_RN_solve_opt(BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLON " vfmadd231pd %%ymm4 , %%ymm5 , %%ymm10 \n\t" " vfmadd231pd %%ymm4 , %%ymm6 , %%ymm14 \n\t" - " vpermpd $0xb1 , %%ymm4 , %%ymm4 \n\t" + " vpermilpd $0x05 , %%ymm4 , %%ymm4 \n\t" " addq $8, %1 \n\t" " vfmadd231pd %%ymm4 , %%ymm5 , %%ymm11 \n\t" " vfmadd231pd %%ymm4 , %%ymm6 , %%ymm15 \n\t" @@ -185,7 +185,7 @@ static void dtrsm_RN_solve_opt(BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLON " vfmadd231pd %%ymm0 , %%ymm1 , %%ymm8 \n\t" " vfmadd231pd %%ymm0 , %%ymm2 , %%ymm12 \n\t" - " vpermpd $0xb1 , %%ymm0 , %%ymm0 \n\t" + " vpermilpd $0x05 , %%ymm0 , %%ymm0 \n\t" " vfmadd231pd %%ymm0 , %%ymm1 , %%ymm9 \n\t" " vfmadd231pd %%ymm0 , %%ymm2 , %%ymm13 \n\t" @@ -193,7 +193,7 @@ static void dtrsm_RN_solve_opt(BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLON " vfmadd231pd %%ymm0 , %%ymm1 , %%ymm10 \n\t" " vfmadd231pd %%ymm0 , %%ymm2 , %%ymm14 \n\t" - " vpermpd $0xb1 , %%ymm0 , %%ymm0 \n\t" + " vpermilpd $0x05 , %%ymm0 , %%ymm0 \n\t" " vfmadd231pd %%ymm0 , %%ymm1 , %%ymm11 \n\t" " vfmadd231pd %%ymm0 , %%ymm2 , %%ymm15 \n\t" @@ -204,7 +204,7 @@ static void dtrsm_RN_solve_opt(BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLON " vfmadd231pd %%ymm4 , %%ymm5 , %%ymm8 \n\t" " vfmadd231pd %%ymm4 , %%ymm6 , %%ymm12 \n\t" - " vpermpd $0xb1 , %%ymm4 , %%ymm4 \n\t" + " vpermilpd $0x05 , %%ymm4 , %%ymm4 \n\t" " vfmadd231pd %%ymm4 , %%ymm5 , %%ymm9 \n\t" " vfmadd231pd %%ymm4 , %%ymm6 , %%ymm13 \n\t" @@ -212,42 +212,38 @@ static void dtrsm_RN_solve_opt(BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLON " vfmadd231pd %%ymm4 , %%ymm5 , %%ymm10 \n\t" " vfmadd231pd %%ymm4 , %%ymm6 , %%ymm14 \n\t" - " vpermpd $0xb1 , %%ymm4 , %%ymm4 \n\t" + " vpermilpd $0x05 , %%ymm4 , %%ymm4 \n\t" " vfmadd231pd %%ymm4 , %%ymm5 , %%ymm11 \n\t" " vfmadd231pd %%ymm4 , %%ymm6 , %%ymm15 \n\t" "3: \n\t" - " vpermpd $0xb1 , %%ymm9 , %%ymm9 \n\t" - " vpermpd $0xb1 , %%ymm11, %%ymm11 \n\t" + " vpermilpd $0x05 , %%ymm9 , %%ymm9 \n\t" + " vpermilpd $0x05 , %%ymm11, %%ymm11 \n\t" " vblendpd $0x0a , %%ymm9 , %%ymm8 , %%ymm0 \n\t" " vblendpd $0x05 , %%ymm9 , %%ymm8 , %%ymm1 \n\t" " vblendpd $0x0a , %%ymm11, %%ymm10, %%ymm2 \n\t" " vblendpd $0x05 , %%ymm11, %%ymm10, %%ymm3 \n\t" - " vpermpd $0x1b , %%ymm2 , %%ymm2 \n\t" - " vpermpd $0x1b , %%ymm3 , %%ymm3 \n\t" - " vpermpd $0xb1 , %%ymm2 , %%ymm2 \n\t" - " vpermpd $0xb1 , %%ymm3 , %%ymm3 \n\t" + " vperm2f128 $0x01 , %%ymm2 , %%ymm2 , %%ymm2 \n\t" + " vperm2f128 $0x01 , %%ymm3 , %%ymm3 , %%ymm3 \n\t" " vblendpd $0x03 , %%ymm0 , %%ymm2 , %%ymm8 \n\t" " vblendpd $0x03 , %%ymm1 , %%ymm3 , %%ymm9 \n\t" " vblendpd $0x03 , %%ymm2 , %%ymm0 , %%ymm10 \n\t" " vblendpd $0x03 , %%ymm3 , %%ymm1 , %%ymm11 \n\t" - " vpermpd $0xb1 , %%ymm13, %%ymm13 \n\t" - " vpermpd $0xb1 , %%ymm15, %%ymm15 \n\t" + " vpermilpd $0x05 , %%ymm13, %%ymm13 \n\t" + " vpermilpd $0x05 , %%ymm15, %%ymm15 \n\t" " vblendpd $0x0a , %%ymm13, %%ymm12, %%ymm0 \n\t" " vblendpd $0x05 , %%ymm13, %%ymm12, %%ymm1 \n\t" " vblendpd $0x0a , %%ymm15, %%ymm14, %%ymm2 \n\t" " vblendpd $0x05 , %%ymm15, %%ymm14, %%ymm3 \n\t" - " vpermpd $0x1b , %%ymm2 , %%ymm2 \n\t" - " vpermpd $0x1b , %%ymm3 , %%ymm3 \n\t" - " vpermpd $0xb1 , %%ymm2 , %%ymm2 \n\t" - " vpermpd $0xb1 , %%ymm3 , %%ymm3 \n\t" + " vperm2f128 $0x01 , %%ymm2 , %%ymm2 , %%ymm2 \n\t" + " vperm2f128 $0x01 , %%ymm3 , %%ymm3 , %%ymm3 \n\t" " vblendpd $0x03 , %%ymm0 , %%ymm2 , %%ymm12 \n\t" " vblendpd $0x03 , %%ymm1 , %%ymm3 , %%ymm13 \n\t" diff --git a/kernel/x86_64/iamax.S b/kernel/x86_64/iamax.S index 79e1bae1d..00999e25f 100644 --- a/kernel/x86_64/iamax.S +++ b/kernel/x86_64/iamax.S @@ -60,6 +60,10 @@ PROLOGUE PROFCODE +#ifdef WINDOWS_ABI + emms +#endif + salq $BASE_SHIFT, INCX fldz diff --git a/kernel/x86_64/iamax_sse.S b/kernel/x86_64/iamax_sse.S index f22e34a1d..14c7f43ec 100644 --- a/kernel/x86_64/iamax_sse.S +++ b/kernel/x86_64/iamax_sse.S @@ -48,11 +48,24 @@ #define XX %r10 #define MM %r11 +#define MAXPS maxps +#define MAXSS maxss #ifdef USE_MIN -#define maxps minps -#define maxss minss +#undef MAXPS +#undef MAXSS +#define MAXPS minps +#define MAXSS minss #endif +.macro LOAD_AND_COMPARE_TO_MXX REG + movss 0 * SIZE(X), \REG + addq INCX, X +#ifdef USE_ABS + andps %xmm15, \REG +#endif + cmpeqss %xmm0, \REG +.endm + #include "l1param.h" PROLOGUE @@ -103,7 +116,7 @@ #ifdef USE_ABS andps %xmm15, %xmm4 #endif - maxss %xmm4, %xmm0 + MAXSS %xmm4, %xmm0 decq M addq $SIZE, X ALIGN_3 @@ -117,7 +130,7 @@ #ifdef USE_ABS andps %xmm15, %xmm4 #endif - maxps %xmm4, %xmm1 + MAXPS %xmm4, %xmm1 subq $2, M addq $2 * SIZE, X ALIGN_3 @@ -137,25 +150,25 @@ #ifdef USE_ABS andps %xmm15, %xmm4 #endif - maxps %xmm4, %xmm0 + MAXPS %xmm4, %xmm0 movaps 4 * SIZE(X), %xmm5 #ifdef USE_ABS andps %xmm15, %xmm5 #endif - maxps %xmm5, %xmm1 + MAXPS %xmm5, %xmm1 movaps 8 * SIZE(X), %xmm6 #ifdef USE_ABS andps %xmm15, %xmm6 #endif - maxps %xmm6, %xmm2 + MAXPS %xmm6, %xmm2 movaps 12 * SIZE(X), %xmm7 #ifdef USE_ABS andps %xmm15, %xmm7 #endif - maxps %xmm7, %xmm3 + MAXPS %xmm7, %xmm3 addq $16 * SIZE, X decq I @@ -173,13 +186,13 @@ #ifdef USE_ABS andps %xmm15, %xmm4 #endif - maxps %xmm4, %xmm0 + MAXPS %xmm4, %xmm0 movaps 4 * SIZE(X), %xmm5 #ifdef USE_ABS andps %xmm15, %xmm5 #endif - maxps %xmm5, %xmm1 + MAXPS %xmm5, %xmm1 addq $8 * SIZE, X ALIGN_3 @@ -191,7 +204,7 @@ #ifdef USE_ABS andps %xmm15, %xmm6 #endif - maxps %xmm6, %xmm2 + MAXPS %xmm6, %xmm2 addq $4 * SIZE, X ALIGN_3 @@ -204,7 +217,7 @@ #ifdef USE_ABS andps %xmm15, %xmm7 #endif - maxps %xmm7, %xmm3 + MAXPS %xmm7, %xmm3 addq $2 * SIZE, X .L18: @@ -215,22 +228,22 @@ #ifdef USE_ABS andps %xmm15, %xmm4 #endif - maxss %xmm4, %xmm0 + MAXSS %xmm4, %xmm0 ALIGN_3 .L20: movq XX, X movq MM, M - maxps %xmm1, %xmm0 - maxps %xmm3, %xmm2 - maxps %xmm2, %xmm0 + MAXPS %xmm1, %xmm0 + MAXPS %xmm3, %xmm2 + MAXPS %xmm2, %xmm0 movaps %xmm0, %xmm1 movhlps %xmm0, %xmm0 - maxps %xmm1, %xmm0 + MAXPS %xmm1, %xmm0 movaps %xmm0, %xmm1 shufps $1, %xmm0, %xmm0 - maxss %xmm1, %xmm0 + MAXSS %xmm1, %xmm0 shufps $0, %xmm0, %xmm0 testq $4, X @@ -427,28 +440,28 @@ #ifdef USE_ABS andps %xmm15, %xmm4 #endif - maxps %xmm4, %xmm0 + MAXPS %xmm4, %xmm0 movsd 4 * SIZE(X), %xmm5 movhps 6 * SIZE(X), %xmm5 #ifdef USE_ABS andps %xmm15, %xmm5 #endif - maxps %xmm5, %xmm1 + MAXPS %xmm5, %xmm1 movsd 8 * SIZE(X), %xmm6 movhps 10 * SIZE(X), %xmm6 #ifdef USE_ABS andps %xmm15, %xmm6 #endif - maxps %xmm6, %xmm2 + MAXPS %xmm6, %xmm2 movsd 12 * SIZE(X), %xmm7 movhps 14 * SIZE(X), %xmm7 #ifdef USE_ABS andps %xmm15, %xmm7 #endif - maxps %xmm7, %xmm3 + MAXPS %xmm7, %xmm3 addq $16 * SIZE, X decq I @@ -467,14 +480,14 @@ #ifdef USE_ABS andps %xmm15, %xmm4 #endif - maxps %xmm4, %xmm0 + MAXPS %xmm4, %xmm0 movsd 4 * SIZE(X), %xmm5 movhps 6 * SIZE(X), %xmm5 #ifdef USE_ABS andps %xmm15, %xmm5 #endif - maxps %xmm5, %xmm1 + MAXPS %xmm5, %xmm1 addq $8 * SIZE, X ALIGN_3 @@ -488,7 +501,7 @@ #ifdef USE_ABS andps %xmm15, %xmm6 #endif - maxps %xmm6, %xmm2 + MAXPS %xmm6, %xmm2 addq $4 * SIZE, X ALIGN_3 @@ -501,7 +514,7 @@ #ifdef USE_ABS andps %xmm15, %xmm7 #endif - maxps %xmm7, %xmm3 + MAXPS %xmm7, %xmm3 addq $2 * SIZE, X .L38: @@ -512,7 +525,7 @@ #ifdef USE_ABS andps %xmm15, %xmm4 #endif - maxss %xmm4, %xmm0 + MAXSS %xmm4, %xmm0 jmp .L40 ALIGN_4 @@ -520,15 +533,15 @@ movq XX, X movq MM, M - maxps %xmm1, %xmm0 - maxps %xmm3, %xmm2 - maxps %xmm2, %xmm0 + MAXPS %xmm1, %xmm0 + MAXPS %xmm3, %xmm2 + MAXPS %xmm2, %xmm0 movaps %xmm0, %xmm1 movhlps %xmm0, %xmm0 - maxps %xmm1, %xmm0 + MAXPS %xmm1, %xmm0 movaps %xmm0, %xmm1 shufps $1, %xmm0, %xmm0 - maxss %xmm1, %xmm0 + MAXSS %xmm1, %xmm0 shufps $0, %xmm0, %xmm0 movq M, I @@ -687,56 +700,56 @@ #ifdef USE_ABS andps %xmm15, %xmm4 #endif - maxss %xmm4, %xmm0 + MAXSS %xmm4, %xmm0 movss 0 * SIZE(X), %xmm5 addq INCX, X #ifdef USE_ABS andps %xmm15, %xmm5 #endif - maxss %xmm5, %xmm1 + MAXSS %xmm5, %xmm1 movss 0 * SIZE(X), %xmm6 addq INCX, X #ifdef USE_ABS andps %xmm15, %xmm6 #endif - maxss %xmm6, %xmm2 + MAXSS %xmm6, %xmm2 movss 0 * SIZE(X), %xmm7 addq INCX, X #ifdef USE_ABS andps %xmm15, %xmm7 #endif - maxss %xmm7, %xmm3 + MAXSS %xmm7, %xmm3 movss 0 * SIZE(X), %xmm4 addq INCX, X #ifdef USE_ABS andps %xmm15, %xmm4 #endif - maxss %xmm4, %xmm0 + MAXSS %xmm4, %xmm0 movss 0 * SIZE(X), %xmm5 addq INCX, X #ifdef USE_ABS andps %xmm15, %xmm5 #endif - maxss %xmm5, %xmm1 + MAXSS %xmm5, %xmm1 movss 0 * SIZE(X), %xmm6 addq INCX, X #ifdef USE_ABS andps %xmm15, %xmm6 #endif - maxss %xmm6, %xmm2 + MAXSS %xmm6, %xmm2 movss 0 * SIZE(X), %xmm7 addq INCX, X #ifdef USE_ABS andps %xmm15, %xmm7 #endif - maxss %xmm7, %xmm3 + MAXSS %xmm7, %xmm3 decq I jg .L81 @@ -754,28 +767,28 @@ #ifdef USE_ABS andps %xmm15, %xmm4 #endif - maxss %xmm4, %xmm0 + MAXSS %xmm4, %xmm0 movss 0 * SIZE(X), %xmm5 addq INCX, X #ifdef USE_ABS andps %xmm15, %xmm5 #endif - maxss %xmm5, %xmm1 + MAXSS %xmm5, %xmm1 movss 0 * SIZE(X), %xmm6 addq INCX, X #ifdef USE_ABS andps %xmm15, %xmm6 #endif - maxss %xmm6, %xmm2 + MAXSS %xmm6, %xmm2 movss 0 * SIZE(X), %xmm7 addq INCX, X #ifdef USE_ABS andps %xmm15, %xmm7 #endif - maxss %xmm7, %xmm3 + MAXSS %xmm7, %xmm3 ALIGN_3 .L86: @@ -787,14 +800,14 @@ #ifdef USE_ABS andps %xmm15, %xmm4 #endif - maxss %xmm4, %xmm0 + MAXSS %xmm4, %xmm0 movss 0 * SIZE(X), %xmm5 addq INCX, X #ifdef USE_ABS andps %xmm15, %xmm5 #endif - maxss %xmm5, %xmm1 + MAXSS %xmm5, %xmm1 ALIGN_3 .L87: @@ -806,16 +819,16 @@ #ifdef USE_ABS andps %xmm15, %xmm6 #endif - maxss %xmm6, %xmm2 + MAXSS %xmm6, %xmm2 ALIGN_4 .L90: movq XX, X movq MM, M - maxss %xmm1, %xmm0 - maxss %xmm3, %xmm2 - maxss %xmm2, %xmm0 + MAXSS %xmm1, %xmm0 + MAXSS %xmm3, %xmm2 + MAXSS %xmm2, %xmm0 shufps $0, %xmm0, %xmm0 movq M, I @@ -824,61 +837,14 @@ ALIGN_4 .L93: - movss 0 * SIZE(X), %xmm1 - addq INCX, X -#ifdef USE_ABS - andps %xmm15, %xmm1 -#endif - cmpeqss %xmm0, %xmm1 - - movss 0 * SIZE(X), %xmm2 - addq INCX, X -#ifdef USE_ABS - andps %xmm15, %xmm2 -#endif - cmpeqss %xmm0, %xmm2 - - movss 0 * SIZE(X), %xmm3 - addq INCX, X -#ifdef USE_ABS - andps %xmm15, %xmm3 -#endif - cmpeqss %xmm0, %xmm3 - - movss 0 * SIZE(X), %xmm4 - addq INCX, X -#ifdef USE_ABS - andps %xmm15, %xmm4 -#endif - cmpeqss %xmm0, %xmm4 - - movss 0 * SIZE(X), %xmm5 - addq INCX, X -#ifdef USE_ABS - andps %xmm15, %xmm5 -#endif - cmpeqps %xmm0, %xmm5 - - movss 0 * SIZE(X), %xmm6 - addq INCX, X -#ifdef USE_ABS - andps %xmm15, %xmm6 -#endif - cmpeqss %xmm0, %xmm6 - - movss 0 * SIZE(X), %xmm7 - addq INCX, X -#ifdef USE_ABS - andps %xmm15, %xmm7 -#endif - cmpeqss %xmm0, %xmm7 - - movss 0 * SIZE(X), %xmm8 - addq INCX, X -#ifdef USE_ABS - andps %xmm15, %xmm8 -#endif - cmpeqss %xmm0, %xmm8 + LOAD_AND_COMPARE_TO_MXX %xmm1 + LOAD_AND_COMPARE_TO_MXX %xmm2 + LOAD_AND_COMPARE_TO_MXX %xmm3 + LOAD_AND_COMPARE_TO_MXX %xmm4 + LOAD_AND_COMPARE_TO_MXX %xmm5 + LOAD_AND_COMPARE_TO_MXX %xmm6 + LOAD_AND_COMPARE_TO_MXX %xmm7 + LOAD_AND_COMPARE_TO_MXX %xmm8 orps %xmm2, %xmm1 orps %xmm4, %xmm3 diff --git a/kernel/x86_64/izamax.S b/kernel/x86_64/izamax.S index c066acd62..b24b2e692 100644 --- a/kernel/x86_64/izamax.S +++ b/kernel/x86_64/izamax.S @@ -60,6 +60,10 @@ PROLOGUE PROFCODE +#ifdef WINDOWS_ABI + emms +#endif + salq $ZBASE_SHIFT, INCX fldz diff --git a/kernel/x86_64/nrm2.S b/kernel/x86_64/nrm2.S index e9be1262a..b79ac2adb 100644 --- a/kernel/x86_64/nrm2.S +++ b/kernel/x86_64/nrm2.S @@ -50,6 +50,10 @@ PROLOGUE PROFCODE +#ifdef WINDOWS_ABI + emms +#endif + fldz testq M, M jle .L999 diff --git a/kernel/x86_64/qconjg.S b/kernel/x86_64/qconjg.S index 49ca76649..823a15a84 100644 --- a/kernel/x86_64/qconjg.S +++ b/kernel/x86_64/qconjg.S @@ -42,6 +42,10 @@ PROLOGUE PROFCODE +#ifdef WINDOWS_ABI + emms +#endif + fldz FLD 1 * SIZE(ARG1) fsubrp %st, %st(1) diff --git a/kernel/x86_64/qdot.S b/kernel/x86_64/qdot.S index a48a04fdd..2243b6b6d 100644 --- a/kernel/x86_64/qdot.S +++ b/kernel/x86_64/qdot.S @@ -58,6 +58,10 @@ PROLOGUE +#ifdef WINDOWS_ABI + emms +#endif + pushl %edi pushl %esi pushl %ebx diff --git a/kernel/x86_64/qgemm_kernel_2x2.S b/kernel/x86_64/qgemm_kernel_2x2.S index 99db3961f..c11f3a91d 100644 --- a/kernel/x86_64/qgemm_kernel_2x2.S +++ b/kernel/x86_64/qgemm_kernel_2x2.S @@ -74,6 +74,10 @@ PROLOGUE PROFCODE +#ifdef WINDOWS_ABI + emms +#endif + subq $STACKSIZE, %rsp movq %rbx, 0(%rsp) movq %rbp, 8(%rsp) diff --git a/kernel/x86_64/qgemv_n.S b/kernel/x86_64/qgemv_n.S index 630d03ffb..c9d345cb1 100644 --- a/kernel/x86_64/qgemv_n.S +++ b/kernel/x86_64/qgemv_n.S @@ -76,6 +76,10 @@ PROLOGUE PROFCODE +#ifdef WINDOWS_ABI + emms +#endif + subq $STACKSIZE, %rsp movq %rbx, 0(%rsp) movq %rbp, 8(%rsp) diff --git a/kernel/x86_64/qgemv_t.S b/kernel/x86_64/qgemv_t.S index d7c9cd2a5..32372ff15 100644 --- a/kernel/x86_64/qgemv_t.S +++ b/kernel/x86_64/qgemv_t.S @@ -75,6 +75,10 @@ PROLOGUE PROFCODE +#ifdef WINDOWS_ABI + emms +#endif + subq $STACKSIZE, %rsp movq %rbx, 0(%rsp) movq %rbp, 8(%rsp) diff --git a/kernel/x86_64/qtrsm_kernel_LN_2x2.S b/kernel/x86_64/qtrsm_kernel_LN_2x2.S index 536042e65..0a545faf8 100644 --- a/kernel/x86_64/qtrsm_kernel_LN_2x2.S +++ b/kernel/x86_64/qtrsm_kernel_LN_2x2.S @@ -74,6 +74,10 @@ PROLOGUE PROFCODE +#ifdef WINDOWS_ABI + emms +#endif + subq $STACKSIZE, %rsp movq %rbx, 0(%rsp) movq %rbp, 8(%rsp) diff --git a/kernel/x86_64/qtrsm_kernel_LT_2x2.S b/kernel/x86_64/qtrsm_kernel_LT_2x2.S index 6e94976c5..16063fbcd 100644 --- a/kernel/x86_64/qtrsm_kernel_LT_2x2.S +++ b/kernel/x86_64/qtrsm_kernel_LT_2x2.S @@ -74,6 +74,10 @@ PROLOGUE PROFCODE +#ifdef WINDOWS_ABI + emms +#endif + subq $STACKSIZE, %rsp movq %rbx, 0(%rsp) movq %rbp, 8(%rsp) diff --git a/kernel/x86_64/qtrsm_kernel_RT_2x2.S b/kernel/x86_64/qtrsm_kernel_RT_2x2.S index caa7de14a..4c94ac02c 100644 --- a/kernel/x86_64/qtrsm_kernel_RT_2x2.S +++ b/kernel/x86_64/qtrsm_kernel_RT_2x2.S @@ -74,6 +74,10 @@ PROLOGUE PROFCODE +#ifdef WINDOWS_ABI + emms +#endif + subq $STACKSIZE, %rsp movq %rbx, 0(%rsp) movq %rbp, 8(%rsp) diff --git a/kernel/x86_64/sasum.c b/kernel/x86_64/sasum.c new file mode 100644 index 000000000..d0cea9bee --- /dev/null +++ b/kernel/x86_64/sasum.c @@ -0,0 +1,137 @@ +#include "common.h" + +#if defined(DOUBLE) +#error supports float only +#else +#ifndef ABS_K +#define ABS_K(a) ((a) > 0 ? (a) : (-(a))) +#endif + +#endif + +#if defined(SKYLAKEX) +#include "sasum_microk_skylakex-2.c" +#elif defined(HASWELL) +#include "sasum_microk_haswell-2.c" +#endif + +#ifndef HAVE_SASUM_KERNEL + +static FLOAT sasum_kernel(BLASLONG n, FLOAT *x1) +{ + + BLASLONG i=0; + BLASLONG n_8 = n & -8; + FLOAT *x = x1; + FLOAT temp0, temp1, temp2, temp3; + FLOAT temp4, temp5, temp6, temp7; + FLOAT sum0 = 0.0; + FLOAT sum1 = 0.0; + FLOAT sum2 = 0.0; + FLOAT sum3 = 0.0; + FLOAT sum4 = 0.0; + + while (i < n_8) { + + temp0 = ABS_K(x[0]); + temp1 = ABS_K(x[1]); + temp2 = ABS_K(x[2]); + temp3 = ABS_K(x[3]); + temp4 = ABS_K(x[4]); + temp5 = ABS_K(x[5]); + temp6 = ABS_K(x[6]); + temp7 = ABS_K(x[7]); + + sum0 += temp0; + sum1 += temp1; + sum2 += temp2; + sum3 += temp3; + + sum0 += temp4; + sum1 += temp5; + sum2 += temp6; + sum3 += temp7; + + x+=8; + i+=8; + + } + + while (i < n) { + sum4 += ABS_K(x1[i]); + i++; + } + + return sum0+sum1+sum2+sum3+sum4; +} + +#endif + +static FLOAT asum_compute(BLASLONG n, FLOAT * x, BLASLONG inc_x) +{ + BLASLONG i = 0; + FLOAT sumf = 0.0; + + if (n <= 0 || inc_x <= 0) return (sumf); + + if (inc_x == 1) { + sumf = sasum_kernel(n, x); + } + else { + n *= inc_x; + while(i < n) { + sumf += ABS_K(x[i]); + i += inc_x; + } + } + return (sumf); +} + +#if defined(SMP) +static int asum_thread_function(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT dummy2, FLOAT *x, BLASLONG inc_x, FLOAT *dummy3, BLASLONG dummy4, FLOAT *result, BLASLONG dummy5) +{ + *(FLOAT *)result = asum_compute(n, x, inc_x); + return 0; +} + +extern int blas_level1_thread_with_return_value(int mode, BLASLONG m, BLASLONG n, BLASLONG k, void * alpha, void *a, BLASLONG lda, void *b, BLASLONG ldb, void *c, BLASLONG ldc, int(*function)(), int nthreads); +#endif + +FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) +{ +#if defined(SMP) + int nthreads; + FLOAT dummy_alpha; +#endif + FLOAT sumf = 0.0; + +#if defined(SMP) + int num_cpu = num_cpu_avail(1); + if (n <= 100000 || inc_x <= 0) + nthreads = 1; + else + nthreads = num_cpu < n/100000 ? num_cpu : n/100000; + if (nthreads == 1) { + sumf = asum_compute(n, x, inc_x); + } + else { + int mode, i; + char result[MAX_CPU_NUMBER * sizeof(double) *2]; + FLOAT * ptr; +#if !defined(DOUBLE) + mode = BLAS_SINGLE | BLAS_REAL; +#else + mode = BLAS_DOUBLE | BLAS_REAL; +#endif + blas_level1_thread_with_return_value(mode, n, 0, 0, &dummy_alpha, x, inc_x, NULL, 0, result, 0, (void *)asum_thread_function, nthreads); + ptr = (FLOAT *)result; + for (i = 0; i < nthreads; i++) { + sumf += (*ptr); + ptr = (FLOAT *)(((char *)ptr) + sizeof(double) * 2); + } + } +#else + sumf = asum_compute(n, x, inc_x); +#endif + return(sumf); +} diff --git a/kernel/x86_64/sasum_microk_haswell-2.c b/kernel/x86_64/sasum_microk_haswell-2.c new file mode 100644 index 000000000..8e6cb9a47 --- /dev/null +++ b/kernel/x86_64/sasum_microk_haswell-2.c @@ -0,0 +1,82 @@ +#if (( defined(__GNUC__) && __GNUC__ > 6 ) || (defined(__clang__) && __clang_major__ >= 6)) && defined(__AVX2__) + +#define HAVE_SASUM_KERNEL 1 + +#include +#include + +#ifndef ABS_K +#define ABS_K(a) ((a) > 0 ? (a) : (-(a))) +#endif + +static FLOAT sasum_kernel(BLASLONG n, FLOAT *x1) +{ + BLASLONG i = 0; + FLOAT sumf = 0.0; + + if (n >= 256) { + BLASLONG align_256 = ((32 - ((uintptr_t)x1 & (uintptr_t)0x1f)) >> 2) & 0x7; + + for (i = 0; i < align_256; i++) { + sumf += ABS_K(x1[i]); + } + + n -= align_256; + x1 += align_256; + } + + BLASLONG tail_index_SSE = n&(~7); + BLASLONG tail_index_AVX2 = n&(~255); + + if (n >= 256) { + __m256 accum_0, accum_1, accum_2, accum_3; + + accum_0 = _mm256_setzero_ps(); + accum_1 = _mm256_setzero_ps(); + accum_2 = _mm256_setzero_ps(); + accum_3 = _mm256_setzero_ps(); + + __m256i abs_mask = _mm256_set1_epi32(0x7fffffff); + for (i = 0; i < tail_index_AVX2; i += 32) { + accum_0 += (__m256)_mm256_and_si256(_mm256_load_si256(&x1[i+ 0]), abs_mask); + accum_1 += (__m256)_mm256_and_si256(_mm256_load_si256(&x1[i+ 8]), abs_mask); + accum_2 += (__m256)_mm256_and_si256(_mm256_load_si256(&x1[i+16]), abs_mask); + accum_3 += (__m256)_mm256_and_si256(_mm256_load_si256(&x1[i+24]), abs_mask); + } + + accum_0 = accum_0 + accum_1 + accum_2 + accum_3; + __m128 half_accum0; + half_accum0 = _mm_add_ps(_mm256_extractf128_ps(accum_0, 0), _mm256_extractf128_ps(accum_0, 1)); + + half_accum0 = _mm_hadd_ps(half_accum0, half_accum0); + half_accum0 = _mm_hadd_ps(half_accum0, half_accum0); + + sumf += half_accum0[0]; + + } + + if (n >= 8) { + __m128 accum_20, accum_21; + accum_20 = _mm_setzero_ps(); + accum_21 = _mm_setzero_ps(); + + __m128i abs_mask2 = _mm_set1_epi32(0x7fffffff); + for (i = tail_index_AVX2; i < tail_index_SSE; i += 8) { + accum_20 += (__m128)_mm_and_si128(_mm_loadu_si128(&x1[i + 0]), abs_mask2); + accum_21 += (__m128)_mm_and_si128(_mm_loadu_si128(&x1[i + 4]), abs_mask2); + } + + accum_20 += accum_21; + accum_20 = _mm_hadd_ps(accum_20, accum_20); + accum_20 = _mm_hadd_ps(accum_20, accum_20); + + sumf += accum_20[0]; + } + + for (i = tail_index_SSE; i < n; ++i) { + sumf += ABS_K(x1[i]); + } + + return sumf; +} +#endif diff --git a/kernel/x86_64/sasum_microk_skylakex-2.c b/kernel/x86_64/sasum_microk_skylakex-2.c new file mode 100644 index 000000000..c8c69d1e0 --- /dev/null +++ b/kernel/x86_64/sasum_microk_skylakex-2.c @@ -0,0 +1,73 @@ +/* need a new enough GCC for avx512 support */ +#if (( defined(__GNUC__) && __GNUC__ > 6 && defined(__AVX512CD__)) || (defined(__clang__) && __clang_major__ >= 9)) + +#define HAVE_SASUM_KERNEL 1 + +#ifndef ABS_K +#define ABS_K(a) ((a) > 0 ? (a) : (-(a))) +#endif + +#include +#include + +static FLOAT sasum_kernel(BLASLONG n, FLOAT *x1) +{ + BLASLONG i = 0; + FLOAT sumf = 0.0; + + if (n >= 256) { + BLASLONG align_512 = ((64 - ((uintptr_t)x1 & (uintptr_t)0x3f)) >> 2) & 0xf; + + for (i = 0; i < align_512; i++) { + sumf += ABS_K(x1[i]); + } + n -= align_512; + x1 += align_512; + } + + BLASLONG tail_index_SSE = n&(~7); + BLASLONG tail_index_AVX512 = n&(~255); + + if (n >= 256) { + __m512 accum_0, accum_1, accum_2, accum_3; + accum_0 = _mm512_setzero_ps(); + accum_1 = _mm512_setzero_ps(); + accum_2 = _mm512_setzero_ps(); + accum_3 = _mm512_setzero_ps(); + + for (i = 0; i < tail_index_AVX512; i += 64) { + accum_0 += _mm512_abs_ps(_mm512_load_ps(&x1[i + 0])); + accum_1 += _mm512_abs_ps(_mm512_load_ps(&x1[i +16])); + accum_2 += _mm512_abs_ps(_mm512_load_ps(&x1[i +32])); + accum_3 += _mm512_abs_ps(_mm512_load_ps(&x1[i +48])); + } + + accum_0 = accum_0 + accum_1 + accum_2 + accum_3; + sumf += _mm512_reduce_add_ps(accum_0); + } + + if (n >= 8) { + __m128 accum_20, accum_21; + accum_20 = _mm_setzero_ps(); + accum_21 = _mm_setzero_ps(); + + __m128i abs_mask2 = _mm_set1_epi32(0x7fffffff); + for (i = tail_index_AVX512; i < tail_index_SSE; i += 8) { + accum_20 += (__m128)_mm_and_si128(_mm_loadu_si128(&x1[i + 0]), abs_mask2); + accum_21 += (__m128)_mm_and_si128(_mm_loadu_si128(&x1[i + 4]), abs_mask2); + } + + accum_20 += accum_21; + accum_20 = _mm_hadd_ps(accum_20, accum_20); + accum_20 = _mm_hadd_ps(accum_20, accum_20); + + sumf += accum_20[0]; + } + + for (i = tail_index_SSE; i < n; i++) { + sumf += ABS_K(x1[i]); + } + + return sumf; +} +#endif diff --git a/kernel/x86_64/saxpy.c b/kernel/x86_64/saxpy.c index e1349da58..7b2845636 100644 --- a/kernel/x86_64/saxpy.c +++ b/kernel/x86_64/saxpy.c @@ -33,7 +33,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "saxpy_microk_nehalem-2.c" #elif defined(HASWELL) || defined(ZEN) #include "saxpy_microk_haswell-2.c" -#elif defined (SKYLAKEX) +#elif defined (SKYLAKEX) || defined (COOPERLAKE) #include "saxpy_microk_skylakex-2.c" #elif defined(SANDYBRIDGE) #include "saxpy_microk_sandy-2.c" diff --git a/kernel/x86_64/saxpy_microk_haswell-2.c b/kernel/x86_64/saxpy_microk_haswell-2.c index 7099ba4c6..8cc697f05 100644 --- a/kernel/x86_64/saxpy_microk_haswell-2.c +++ b/kernel/x86_64/saxpy_microk_haswell-2.c @@ -67,7 +67,8 @@ static void saxpy_kernel_16( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) "r" (y), // 3 "r" (alpha) // 4 : "cc", - "%xmm0", + "%xmm0", "%xmm1", "%xmm2", "%xmm3", + "%xmm4", "%xmm5", "%xmm6", "%xmm7", "%xmm8", "%xmm9", "%xmm10", "%xmm11", "%xmm12", "%xmm13", "%xmm14", "%xmm15", "memory" diff --git a/kernel/x86_64/saxpy_microk_piledriver-2.c b/kernel/x86_64/saxpy_microk_piledriver-2.c index 5feea7f24..ebbcc0045 100644 --- a/kernel/x86_64/saxpy_microk_piledriver-2.c +++ b/kernel/x86_64/saxpy_microk_piledriver-2.c @@ -86,7 +86,8 @@ static void saxpy_kernel_16( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) "r" (y), // 3 "r" (alpha) // 4 : "cc", - "%xmm0", + "%xmm0", "%xmm1", "%xmm2", "%xmm3", + "%xmm4", "%xmm5", "%xmm6", "%xmm7", "%xmm8", "%xmm9", "%xmm10", "%xmm11", "%xmm12", "%xmm13", "%xmm14", "%xmm15", "memory" @@ -147,7 +148,8 @@ static void saxpy_kernel_16( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) "r" (y), // 3 "r" (alpha) // 4 : "cc", - "%xmm0", + "%xmm0", "%xmm1", "%xmm2", "%xmm3", + "%xmm4", "%xmm5", "%xmm6", "%xmm7", "%xmm8", "%xmm9", "%xmm10", "%xmm11", "%xmm12", "%xmm13", "%xmm14", "%xmm15", "memory" diff --git a/kernel/x86_64/sbdot.c b/kernel/x86_64/sbdot.c new file mode 100644 index 000000000..ef14fd618 --- /dev/null +++ b/kernel/x86_64/sbdot.c @@ -0,0 +1,115 @@ +/*************************************************************************** +Copyright (c) 2014, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "common.h" + +#if defined(COOPERLAKE) +#include "sbdot_microk_cooperlake.c" +#endif + +static float sbdot_compute(BLASLONG n, bfloat16 *x, BLASLONG inc_x, bfloat16 *y, BLASLONG inc_y) +{ + float d = 0.0; + +#ifdef HAVE_SBDOT_ACCL_KERNEL + if ((inc_x == 1) && (inc_y == 1)) { + return sbdot_accl_kernel(n, x, y); + } +#endif + + float * x_fp32 = malloc(sizeof(float)*n); + float * y_fp32 = malloc(sizeof(float)*n); + + SBF16TOS_K(n, x, inc_x, x_fp32, 1); + SBF16TOS_K(n, y, inc_y, y_fp32, 1); + + d = SDOTU_K(n, x_fp32, 1, y_fp32, 1); + + free(x_fp32); + free(y_fp32); + + return d; +} + +#if defined(SMP) +static int sbdot_thread_func(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, bfloat16 dummy2, + bfloat16 *x, BLASLONG inc_x, bfloat16 *y, BLASLONG inc_y, + float *result, BLASLONG dummy3) +{ + *(float *)result = sbdot_compute(n, x, inc_x, y, inc_y); + return 0; +} + +extern int blas_level1_thread_with_return_value(int mode, BLASLONG m, BLASLONG n, BLASLONG k, void *alpha, + void *a, BLASLONG lda, void *b, BLASLONG ldb, void *c, BLASLONG ldc, + int (*function)(), int nthreads); +#endif + +float CNAME(BLASLONG n, bfloat16 *x, BLASLONG inc_x, bfloat16 *y, BLASLONG inc_y) +{ + float dot_result = 0.0; + + if (n <= 0) return 0.0; + +#if defined(SMP) + int nthreads; + int thread_thres = 40960; + bfloat16 dummy_alpha; +#endif + +#if defined(SMP) + if (inc_x == 0 || inc_y == 0 || n <= thread_thres) + nthreads = 1; + else + nthreads = num_cpu_avail(1); + + int best_threads = (int) (n/(float)thread_thres + 0.5); + + if (best_threads < nthreads) { + nthreads = best_threads; + } + + if (nthreads <= 1) { + dot_result = sbdot_compute(n, x, inc_x, y, inc_y); + } else { + char thread_result[MAX_CPU_NUMBER * sizeof(double) * 2]; + int mode = BLAS_BFLOAT16 | BLAS_REAL; + blas_level1_thread_with_return_value(mode, n, 0, 0, &dummy_alpha, + x, inc_x, y, inc_y, thread_result, 0, + (void *)sbdot_thread_func, nthreads); + float * ptr = (float *)thread_result; + for (int i = 0; i < nthreads; i++) { + dot_result += (*ptr); + ptr = (float *)(((char *)ptr) + sizeof(double) * 2); + } + } +#else + dot_result = sbdot_compute(n, x, inc_x, y, inc_y); +#endif + + return dot_result; +} diff --git a/kernel/x86_64/sbdot_microk_cooperlake.c b/kernel/x86_64/sbdot_microk_cooperlake.c new file mode 100644 index 000000000..067726cb1 --- /dev/null +++ b/kernel/x86_64/sbdot_microk_cooperlake.c @@ -0,0 +1,159 @@ +/*************************************************************************** +Copyright (c) 2014, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +/* need a new enough GCC for avx512 support */ +#if (( defined(__GNUC__) && __GNUC__ >= 10 && defined(__AVX512BF16__)) || (defined(__clang__) && __clang_major__ >= 9)) + +#define HAVE_SBDOT_ACCL_KERNEL 1 +#include "common.h" +#include + +static float sbdot_accl_kernel(BLASLONG n, bfloat16 *x, bfloat16 *y) +{ + __m128 accum128 = _mm_setzero_ps(); + if (n> 127) { /* n range from 128 to inf. */ + long tail_index_32 = n&(~31); + long tail_index_128 = n&(~127); + unsigned int tail_mask_uint = (((unsigned int)0xffffffff) >> (32-(n&31))); + __mmask32 tail_mask = *((__mmask32*) &tail_mask_uint); + + __m512 accum512_0 = _mm512_setzero_ps(); + __m512 accum512_1 = _mm512_setzero_ps(); + __m512 accum512_2 = _mm512_setzero_ps(); + __m512 accum512_3 = _mm512_setzero_ps(); + + /* Processing the main chunk with 128-elements per round */ + for (long i = 0; i < tail_index_128; i += 128) { + accum512_0 = _mm512_dpbf16_ps(accum512_0, (__m512bh) _mm512_loadu_si512(&x[i+ 0]), (__m512bh) _mm512_loadu_si512(&y[i+ 0])); + accum512_1 = _mm512_dpbf16_ps(accum512_1, (__m512bh) _mm512_loadu_si512(&x[i+32]), (__m512bh) _mm512_loadu_si512(&y[i+32])); + accum512_2 = _mm512_dpbf16_ps(accum512_2, (__m512bh) _mm512_loadu_si512(&x[i+64]), (__m512bh) _mm512_loadu_si512(&y[i+64])); + accum512_3 = _mm512_dpbf16_ps(accum512_3, (__m512bh) _mm512_loadu_si512(&x[i+96]), (__m512bh) _mm512_loadu_si512(&y[i+96])); + } + + /* Processing the remaining <128 chunk with 32-elements per round */ + for (long j = tail_index_128; j < tail_index_32; j += 32) { + accum512_0 = _mm512_dpbf16_ps(accum512_0, (__m512bh) _mm512_loadu_si512(&x[j]), (__m512bh) _mm512_loadu_si512(&y[j])); + } + + /* Processing the remaining <32 chunk with masked 32-elements processing */ + if ((n&31) != 0) { + accum512_2 = _mm512_dpbf16_ps(accum512_2, + (__m512bh) _mm512_maskz_loadu_epi16(tail_mask, &x[tail_index_32]), + (__m512bh) _mm512_maskz_loadu_epi16(tail_mask, &y[tail_index_32])); + } + + /* Accumulate the 4 registers into 1 register */ + accum512_0 = _mm512_add_ps(accum512_0, accum512_1); + accum512_2 = _mm512_add_ps(accum512_2, accum512_3); + accum512_0 = _mm512_add_ps(accum512_0, accum512_2); + + __m256 accum256 = _mm256_add_ps(_mm512_castps512_ps256(accum512_0), _mm512_extractf32x8_ps(accum512_0, 1)); + accum128 = _mm_add_ps(_mm256_castps256_ps128(accum256), _mm256_extractf128_ps(accum256, 1)); + } else if (n > 31) { /* n range from 32 to 127 */ + /* Processing <128 chunk with 32-elements per round */ + __m256 accum256 = _mm256_setzero_ps(); + __m256 accum256_1 = _mm256_setzero_ps(); + int tail_index_32 = n&(~31); + for (int j = 0; j < tail_index_32; j += 32) { + accum256 = _mm256_dpbf16_ps(accum256, (__m256bh) _mm256_loadu_si256(&x[j+ 0]), (__m256bh) _mm256_loadu_si256(&y[j+ 0])); + accum256_1 = _mm256_dpbf16_ps(accum256_1, (__m256bh) _mm256_loadu_si256(&x[j+16]), (__m256bh) _mm256_loadu_si256(&y[j+16])); + } + accum256 = _mm256_add_ps(accum256, accum256_1); + + /* Processing the remaining <32 chunk with 16-elements processing */ + if ((n&16) != 0) { + accum256 = _mm256_dpbf16_ps(accum256, (__m256bh) _mm256_loadu_si256(&x[tail_index_32]), (__m256bh) _mm256_loadu_si256(&y[tail_index_32])); + } + accum128 = _mm_add_ps(_mm256_castps256_ps128(accum256), _mm256_extractf128_ps(accum256, 1)); + + /* Processing the remaining <16 chunk with 8-elements processing */ + if ((n&8) != 0) { + int tail_index_16 = n&(~15); + accum128 = _mm_dpbf16_ps(accum128, (__m128bh) _mm_loadu_si128(&x[tail_index_16]), (__m128bh) _mm_loadu_si128(&y[tail_index_16])); + } + + /* Processing the remaining <8 chunk with masked 8-elements processing */ + if ((n&7) != 0) { + unsigned char tail_mask_uint = (((unsigned char)0xff) >> (8-(n&7))); + __mmask8 tail_mask = *((__mmask8*) &tail_mask_uint); + int tail_index_8 = n&(~7); + accum128 = _mm_dpbf16_ps(accum128, + (__m128bh) _mm_maskz_loadu_epi16(tail_mask, &x[tail_index_8]), + (__m128bh) _mm_maskz_loadu_epi16(tail_mask, &y[tail_index_8])); + } + } else if (n > 15) { /* n range from 16 to 31 */ + /* Processing <32 chunk with 16-elements processing */ + __m256 accum256 = _mm256_setzero_ps(); + accum256 = _mm256_dpbf16_ps(accum256, (__m256bh) _mm256_loadu_si256(&x[0]), (__m256bh) _mm256_loadu_si256(&y[0])); + accum128 += _mm_add_ps(_mm256_castps256_ps128(accum256), _mm256_extractf128_ps(accum256, 1)); + + /* Processing the remaining <16 chunk with 8-elements processing */ + if ((n&8) != 0) { + int tail_index_16 = n&(~15); + accum128 = _mm_dpbf16_ps(accum128, (__m128bh) _mm_loadu_si128(&x[tail_index_16]), (__m128bh) _mm_loadu_si128(&y[tail_index_16])); + } + + /* Processing the remaining <8 chunk with masked 8-elements processing */ + if ((n&7) != 0) { + unsigned char tail_mask_uint = (((unsigned char)0xff) >> (8-(n&7))); + __mmask8 tail_mask = *((__mmask8*) &tail_mask_uint); + int tail_index_8 = n&(~7); + accum128 = _mm_dpbf16_ps(accum128, + (__m128bh) _mm_maskz_loadu_epi16(tail_mask, &x[tail_index_8]), + (__m128bh) _mm_maskz_loadu_epi16(tail_mask, &y[tail_index_8])); + } + } else if (n > 7) { /* n range from 8 to 15 */ + /* Processing <16 chunk with 8-elements processing */ + accum128 = _mm_dpbf16_ps(accum128, (__m128bh) _mm_loadu_si128(&x[0]), (__m128bh) _mm_loadu_si128(&y[0])); + + /* Processing the remaining <8 chunk with masked 8-elements processing */ + if ((n&7) != 0) { + unsigned char tail_mask_uint = (((unsigned char)0xff) >> (8-(n&7))); + __mmask8 tail_mask = *((__mmask8*) &tail_mask_uint); + int tail_index_8 = n&(~7); + accum128 = _mm_dpbf16_ps(accum128, + (__m128bh) _mm_maskz_loadu_epi16(tail_mask, &x[tail_index_8]), + (__m128bh) _mm_maskz_loadu_epi16(tail_mask, &y[tail_index_8])); + } + } else { /* n range from 1 to 7 */ + unsigned char tail_mask_uint = (((unsigned char)0xff) >> (8-(n&7))); + __mmask8 tail_mask = *((__mmask8*) &tail_mask_uint); + accum128 = _mm_dpbf16_ps(accum128, + (__m128bh) _mm_maskz_loadu_epi16(tail_mask, &x[0]), + (__m128bh) _mm_maskz_loadu_epi16(tail_mask, &y[0])); + } + + /* Add up the 4 elements into lowest entry */ + __m128 accum128_1 = _mm_shuffle_ps(accum128, accum128, 14); + accum128 = _mm_add_ps(accum128, accum128_1); + accum128_1 = _mm_shuffle_ps(accum128, accum128, 1); + accum128 = _mm_add_ps(accum128, accum128_1); + + return accum128[0]; +} + +#endif diff --git a/kernel/x86_64/sbgemm_block_microk_cooperlake.c b/kernel/x86_64/sbgemm_block_microk_cooperlake.c new file mode 100644 index 000000000..2376fed02 --- /dev/null +++ b/kernel/x86_64/sbgemm_block_microk_cooperlake.c @@ -0,0 +1,426 @@ +#include "sbgemm.h" + +#include +// Walk around those intrinsics that missed by compiler +#define MM256_LOADU_EPI16(addr) \ + _mm256_maskz_loadu_epi16(~0, (addr)) +#define MM256_STOREU_EPI16(addr, reg) \ + _mm256_mask_storeu_epi16((addr), ~0, (reg)) + +#include +void print_block(BLASLONG m, BLASLONG n, bfloat16 * mat) +{ + printf("---- BLOCK %ld x %ld ----\n", m, n); + for (BLASLONG i=0; i> (32-m)); + __mmask32 tail_mask = *((__mmask32*) &tail_mask_value); + + __m512i array512_0, array512_1, array512_2, array512_3; + + BLASLONG idx_src_base0, idx_src_base1; + BLASLONG idx_target_base0, idx_target_base1; + + BLASLONG LDA_2x = 2*lda; + BLASLONG BF16_BLOCK_T_M_2x = 2*32; + idx_src_base0 = 0; + idx_src_base1 = lda; + idx_target_base0 = 0; + idx_target_base1 = 32; + for (BLASLONG idx_k = 0; idx_k < tag_k_2x; idx_k += 2) { + array512_0 = _mm512_maskz_loadu_epi16(tail_mask, &A[idx_src_base0]); + array512_1 = _mm512_maskz_loadu_epi16(tail_mask, &A[idx_src_base1]); + array512_2 = _mm512_unpacklo_epi16(array512_0, array512_1); + array512_3 = _mm512_unpackhi_epi16(array512_0, array512_1); + _mm512_storeu_si512(&block_A[idx_target_base0], array512_2); + _mm512_storeu_si512(&block_A[idx_target_base1], array512_3); + + idx_src_base0 += LDA_2x; + idx_src_base1 += LDA_2x; + idx_target_base0 += BF16_BLOCK_T_M_2x; + idx_target_base1 += BF16_BLOCK_T_M_2x; + } + + if (tag_k_2x != k) { + __m512i ZERO512 = _mm512_setzero_si512(); + array512_0 = _mm512_maskz_loadu_epi16(tail_mask, &A[idx_src_base0]); + array512_2 = _mm512_unpacklo_epi16(array512_0, ZERO512); + array512_3 = _mm512_unpackhi_epi16(array512_0, ZERO512); + _mm512_storeu_si512(&block_A[idx_target_base0], array512_2); + _mm512_storeu_si512(&block_A[idx_target_base1], array512_3); + } + +#ifdef DEBUG_PROFILE + print_block(BF16_BLOCK_THRES_K, BF16_BLOCK_THRES_M, block_A); +#endif +} + +void COL_MAJOR_INCOPY_KERNEL_Kx16(BLASLONG k, BLASLONG m, bfloat16 * A, BLASLONG lda, bfloat16 * block_A) +{ + BLASLONG tag_k_2x = k & (~1); + + __m256i array256_0, array256_1, array256_2, array256_3; + + BLASLONG idx_src_base0, idx_src_base1; + BLASLONG idx_target_base0; + + BLASLONG LDA_2x = 2*lda; + idx_src_base0 = 0; + idx_src_base1 = lda; + idx_target_base0 = 0; + for (BLASLONG idx_k = 0; idx_k < tag_k_2x; idx_k += 2) { + array256_0 = MM256_LOADU_EPI16(&A[idx_src_base0]); + array256_1 = MM256_LOADU_EPI16(&A[idx_src_base1]); + array256_2 = _mm256_unpacklo_epi16(array256_0, array256_1); + array256_3 = _mm256_unpackhi_epi16(array256_0, array256_1); + // Store in one row of block_B + MM256_STOREU_EPI16(&block_A[idx_target_base0], array256_2); + MM256_STOREU_EPI16(&block_A[idx_target_base0 + 16], array256_3); + + idx_src_base0 += LDA_2x; + idx_src_base1 += LDA_2x; + idx_target_base0 += 32; + } + + if (tag_k_2x != k) { + __m256i ZERO256 = _mm256_setzero_si256(); + array256_0 = MM256_LOADU_EPI16(&A[idx_src_base0]); + array256_2 = _mm256_unpacklo_epi16(array256_0, ZERO256); + array256_3 = _mm256_unpackhi_epi16(array256_0, ZERO256); + // Store in one row of block_B + MM256_STOREU_EPI16(&block_A[idx_target_base0], array256_2); + MM256_STOREU_EPI16(&block_A[idx_target_base0 + 16], array256_3); + } + +#ifdef DEBUG_PROFILE + print_block(BF16_BLOCK_THRES_K, BF16_BLOCK_THRES_M, block_A); +#endif +} + +void COL_MAJOR_INCOPY_KERNEL_Kx16m(BLASLONG k, BLASLONG m, bfloat16 * A, BLASLONG lda, bfloat16 * block_A) +{ + BLASLONG tag_k_2x = k & (~1); + unsigned short tail_mask_value = (((unsigned short)0xffff) >> (16-m)); + __mmask16 tail_mask = *((__mmask16*) &tail_mask_value); + + __m256i array256_0, array256_1, array256_2, array256_3; + + BLASLONG idx_src_base0, idx_src_base1; + BLASLONG idx_target_base0; + + BLASLONG LDA_2x = 2*lda; + idx_src_base0 = 0; + idx_src_base1 = lda; + idx_target_base0 = 0; + for (BLASLONG idx_k = 0; idx_k < tag_k_2x; idx_k += 2) { + array256_0 = _mm256_maskz_loadu_epi16(tail_mask, &A[idx_src_base0]); + array256_1 = _mm256_maskz_loadu_epi16(tail_mask, &A[idx_src_base1]); + array256_2 = _mm256_unpacklo_epi16(array256_0, array256_1); + array256_3 = _mm256_unpackhi_epi16(array256_0, array256_1); + // Store in one row of block_B + MM256_STOREU_EPI16(&block_A[idx_target_base0], array256_2); + MM256_STOREU_EPI16(&block_A[idx_target_base0 + 16], array256_3); + + idx_src_base0 += LDA_2x; + idx_src_base1 += LDA_2x; + idx_target_base0 += 32; + } + + if (tag_k_2x != k) { + __m256i ZERO256 = _mm256_setzero_si256(); + array256_0 = _mm256_maskz_loadu_epi16(tail_mask, &A[idx_src_base0]); + array256_2 = _mm256_unpacklo_epi16(array256_0, ZERO256); + array256_3 = _mm256_unpackhi_epi16(array256_0, ZERO256); + // Store in one row of block_B + MM256_STOREU_EPI16(&block_A[idx_target_base0], array256_2); + MM256_STOREU_EPI16(&block_A[idx_target_base0 + 16], array256_3); + } + +#ifdef DEBUG_PROFILE + print_block(BF16_BLOCK_THRES_K, BF16_BLOCK_THRES_M, block_A); +#endif +} + +void COL_MAJOR_ONCOPY_KERNEL_8x32(BLASLONG k, bfloat16 * B, BLASLONG ldb, bfloat16 * block_B) +{ + BLASLONG tag_k_32x = k & (~31); + BLASLONG idx_src_base0, idx_src_base1, idx_src_base2, idx_src_base3, idx_src_base4, idx_src_base5, idx_src_base6, idx_src_base7; + BLASLONG idx_target_base0; + + idx_src_base0 = 0; + idx_src_base1 = 1*ldb; + idx_src_base2 = 2*ldb; + idx_src_base3 = 3*ldb; + idx_src_base4 = 4*ldb; + idx_src_base5 = 5*ldb; + idx_src_base6 = 6*ldb; + idx_src_base7 = 7*ldb; + idx_target_base0 = 0; + + for (BLASLONG idx_k = 0; idx_k < tag_k_32x; idx_k += 32) { + _mm512_storeu_si512(&block_B[idx_target_base0+ 32*0], _mm512_loadu_si512(&B[idx_src_base0+idx_k])); + _mm512_storeu_si512(&block_B[idx_target_base0+ 32*1], _mm512_loadu_si512(&B[idx_src_base1+idx_k])); + _mm512_storeu_si512(&block_B[idx_target_base0+ 32*2], _mm512_loadu_si512(&B[idx_src_base2+idx_k])); + _mm512_storeu_si512(&block_B[idx_target_base0+ 32*3], _mm512_loadu_si512(&B[idx_src_base3+idx_k])); + _mm512_storeu_si512(&block_B[idx_target_base0+ 32*4], _mm512_loadu_si512(&B[idx_src_base4+idx_k])); + _mm512_storeu_si512(&block_B[idx_target_base0+ 32*5], _mm512_loadu_si512(&B[idx_src_base5+idx_k])); + _mm512_storeu_si512(&block_B[idx_target_base0+ 32*6], _mm512_loadu_si512(&B[idx_src_base6+idx_k])); + _mm512_storeu_si512(&block_B[idx_target_base0+ 32*7], _mm512_loadu_si512(&B[idx_src_base7+idx_k])); + idx_target_base0 += 32*8; + } + + if (tag_k_32x != k) { + unsigned int tail_mask_value = (((unsigned int)0xffffffff) >> (32-(k-tag_k_32x))); + __mmask32 tail_mask = *((__mmask32*) &tail_mask_value); + _mm512_storeu_si512(&block_B[idx_target_base0+ 32*0], _mm512_maskz_loadu_epi16(tail_mask, &B[idx_src_base0+tag_k_32x])); + _mm512_storeu_si512(&block_B[idx_target_base0+ 32*1], _mm512_maskz_loadu_epi16(tail_mask, &B[idx_src_base1+tag_k_32x])); + _mm512_storeu_si512(&block_B[idx_target_base0+ 32*2], _mm512_maskz_loadu_epi16(tail_mask, &B[idx_src_base2+tag_k_32x])); + _mm512_storeu_si512(&block_B[idx_target_base0+ 32*3], _mm512_maskz_loadu_epi16(tail_mask, &B[idx_src_base3+tag_k_32x])); + _mm512_storeu_si512(&block_B[idx_target_base0+ 32*4], _mm512_maskz_loadu_epi16(tail_mask, &B[idx_src_base4+tag_k_32x])); + _mm512_storeu_si512(&block_B[idx_target_base0+ 32*5], _mm512_maskz_loadu_epi16(tail_mask, &B[idx_src_base5+tag_k_32x])); + _mm512_storeu_si512(&block_B[idx_target_base0+ 32*6], _mm512_maskz_loadu_epi16(tail_mask, &B[idx_src_base6+tag_k_32x])); + _mm512_storeu_si512(&block_B[idx_target_base0+ 32*7], _mm512_maskz_loadu_epi16(tail_mask, &B[idx_src_base7+tag_k_32x])); + } + +#ifdef DEBUG_PROFILE + print_block(BF16_BLOCK_THRES_N, BF16_BLOCK_THRES_K, block_B); +#endif +} + +void COL_MAJOR_ONCOPY_KERNEL_Nx32(BLASLONG n, BLASLONG k, bfloat16 * B, BLASLONG ldb, bfloat16 * block_B) +{ + BLASLONG tag_k_32x = k & (~31); + BLASLONG tag_n_2x = n & (~1); + BLASLONG idx_src_base0; + BLASLONG idx_target_base0; + + BLASLONG LDB_2x = 2*ldb; + + idx_target_base0 = 0; + + for (BLASLONG idx_k = 0; idx_k < tag_k_32x; idx_k += 32) { + idx_src_base0 = 0; + for (BLASLONG idx_n = 0; idx_n < tag_n_2x; idx_n += 2) { + _mm512_storeu_si512(&block_B[idx_target_base0+ 32*0], _mm512_loadu_si512(&B[idx_src_base0 + idx_k])); + _mm512_storeu_si512(&block_B[idx_target_base0+ 32*1], _mm512_loadu_si512(&B[idx_src_base0 + ldb + idx_k])); + idx_src_base0 += LDB_2x; + idx_target_base0 += 64; + } + + if (tag_n_2x != n) { + _mm512_storeu_si512(&block_B[idx_target_base0], _mm512_loadu_si512(&B[idx_src_base0 + idx_k])); + idx_target_base0 += 32; + } + } + + if (tag_k_32x != k) { + unsigned int tail_mask_value = (((unsigned int)0xffffffff) >> (32-(k-tag_k_32x))); + __mmask32 tail_mask = *((__mmask32*) &tail_mask_value); + idx_src_base0 = 0; + for (BLASLONG idx_n = 0; idx_n < tag_n_2x; idx_n += 2) { + _mm512_storeu_si512(&block_B[idx_target_base0+ 32*0], _mm512_maskz_loadu_epi16(tail_mask, &B[idx_src_base0 + tag_k_32x])); + _mm512_storeu_si512(&block_B[idx_target_base0+ 32*1], _mm512_maskz_loadu_epi16(tail_mask, &B[idx_src_base0 + ldb + tag_k_32x])); + idx_src_base0 += LDB_2x; + idx_target_base0 += 64; + } + + if (tag_n_2x != n) { + _mm512_storeu_si512(&block_B[idx_target_base0], _mm512_maskz_loadu_epi16(tail_mask, &B[idx_src_base0 + tag_k_32x])); + } + } + +#ifdef DEBUG_PROFILE + print_block(BF16_BLOCK_THRES_N, BF16_BLOCK_THRES_K, block_B); +#endif +} + +// Scale matrix C while beta is not ZERO or ONE +void sbgemm_scal_operation(OPENBLAS_CONST enum CBLAS_ORDER Order, OPENBLAS_CONST blasint M, OPENBLAS_CONST blasint N, OPENBLAS_CONST float beta, float *C, OPENBLAS_CONST blasint ldc) +{ + BLASLONG tag_n_Nx = N & (~3); + BLASLONG tag_n_Mx = M & (~15); + + BLASLONG LDC4x = ldc*4; + BLASLONG idx_base_0 = 0; + BLASLONG idx_base_1 = ldc; + BLASLONG idx_base_2 = ldc*2; + BLASLONG idx_base_3 = ldc*3; + + unsigned short tail_mask_value = (((unsigned short)0xffff) >> (16-M+tag_n_Mx)); + __mmask16 tail_mask = *((__mmask16*) &tail_mask_value); + + __m512 array_512_0, array_512_1, array_512_2, array_512_3; + + __m512 BETAVECTOR = _mm512_set1_ps(beta); + + if (Order == CblasColMajor) { + for (BLASLONG idx_n = 0; idx_n < tag_n_Nx; idx_n += 4) { + for (BLASLONG idx_m = 0; idx_m < tag_n_Mx; idx_m += 16) { + array_512_0 = _mm512_loadu_ps(&C[idx_base_0+idx_m]); + array_512_1 = _mm512_loadu_ps(&C[idx_base_1+idx_m]); + array_512_2 = _mm512_loadu_ps(&C[idx_base_2+idx_m]); + array_512_3 = _mm512_loadu_ps(&C[idx_base_3+idx_m]); + + array_512_0 = _mm512_mul_ps(BETAVECTOR, array_512_0); + array_512_1 = _mm512_mul_ps(BETAVECTOR, array_512_1); + array_512_2 = _mm512_mul_ps(BETAVECTOR, array_512_2); + array_512_3 = _mm512_mul_ps(BETAVECTOR, array_512_3); + + _mm512_storeu_ps(&C[idx_base_0+idx_m], array_512_0); + _mm512_storeu_ps(&C[idx_base_1+idx_m], array_512_1); + _mm512_storeu_ps(&C[idx_base_2+idx_m], array_512_2); + _mm512_storeu_ps(&C[idx_base_3+idx_m], array_512_3); + } + + if (tag_n_Mx != M) { + array_512_0 = _mm512_maskz_loadu_ps(tail_mask, &C[idx_base_0+tag_n_Mx]); + array_512_1 = _mm512_maskz_loadu_ps(tail_mask, &C[idx_base_1+tag_n_Mx]); + array_512_2 = _mm512_maskz_loadu_ps(tail_mask, &C[idx_base_2+tag_n_Mx]); + array_512_3 = _mm512_maskz_loadu_ps(tail_mask, &C[idx_base_3+tag_n_Mx]); + + array_512_0 = _mm512_mul_ps(BETAVECTOR, array_512_0); + array_512_1 = _mm512_mul_ps(BETAVECTOR, array_512_1); + array_512_2 = _mm512_mul_ps(BETAVECTOR, array_512_2); + array_512_3 = _mm512_mul_ps(BETAVECTOR, array_512_3); + + _mm512_mask_storeu_ps(&C[idx_base_0+tag_n_Mx], tail_mask, array_512_0); + _mm512_mask_storeu_ps(&C[idx_base_1+tag_n_Mx], tail_mask, array_512_1); + _mm512_mask_storeu_ps(&C[idx_base_2+tag_n_Mx], tail_mask, array_512_2); + _mm512_mask_storeu_ps(&C[idx_base_3+tag_n_Mx], tail_mask, array_512_3); + } + + idx_base_0 += LDC4x; + idx_base_1 += LDC4x; + idx_base_2 += LDC4x; + idx_base_3 += LDC4x; + } + + if (tag_n_Nx != N) { + for (BLASLONG idx_n = tag_n_Nx; idx_n < N; idx_n++) { + for (BLASLONG idx_m = 0; idx_m < tag_n_Mx; idx_m += 16) { + array_512_0 = _mm512_loadu_ps(&C[idx_base_0+idx_m]); + array_512_0 = _mm512_mul_ps(BETAVECTOR, array_512_0); + _mm512_storeu_ps(&C[idx_base_0+idx_m], array_512_0); + } + + if (tag_n_Mx != M) { + array_512_0 = _mm512_maskz_loadu_ps(tail_mask, &C[idx_base_0+tag_n_Mx]); + array_512_0 = _mm512_mul_ps(BETAVECTOR, array_512_0); + _mm512_mask_storeu_ps(&C[idx_base_0+tag_n_Mx], tail_mask, array_512_0); + } + idx_base_0 += ldc; + } + } + } else { + + } +} + +// Scale matrix C while beta is not ZERO or ONE +void sbgemm_zero_operation(OPENBLAS_CONST enum CBLAS_ORDER Order, OPENBLAS_CONST blasint M, OPENBLAS_CONST blasint N, float *C, OPENBLAS_CONST blasint ldc) +{ + BLASLONG tag_n_Nx = N & (~3); + BLASLONG tag_n_Mx = M & (~15); + + BLASLONG LDC4x = ldc*4; + BLASLONG idx_base_0 = 0; + BLASLONG idx_base_1 = ldc; + BLASLONG idx_base_2 = ldc*2; + BLASLONG idx_base_3 = ldc*3; + + unsigned short tail_mask_value = (((unsigned short)0xffff) >> (16-M+tag_n_Mx)); + __mmask16 tail_mask = *((__mmask16*) &tail_mask_value); + + __m512 ZEROVECTOR = _mm512_setzero_ps(); + + if (Order == CblasColMajor) { + for (BLASLONG idx_n = 0; idx_n < tag_n_Nx; idx_n += 4) { + for (BLASLONG idx_m = 0; idx_m < tag_n_Mx; idx_m += 16) { + _mm512_storeu_ps(&C[idx_base_0+idx_m], ZEROVECTOR); + _mm512_storeu_ps(&C[idx_base_1+idx_m], ZEROVECTOR); + _mm512_storeu_ps(&C[idx_base_2+idx_m], ZEROVECTOR); + _mm512_storeu_ps(&C[idx_base_3+idx_m], ZEROVECTOR); + } + + if (tag_n_Mx != M) { + _mm512_mask_storeu_ps(&C[idx_base_0+tag_n_Mx], tail_mask, ZEROVECTOR); + _mm512_mask_storeu_ps(&C[idx_base_1+tag_n_Mx], tail_mask, ZEROVECTOR); + _mm512_mask_storeu_ps(&C[idx_base_2+tag_n_Mx], tail_mask, ZEROVECTOR); + _mm512_mask_storeu_ps(&C[idx_base_3+tag_n_Mx], tail_mask, ZEROVECTOR); + } + + idx_base_0 += LDC4x; + idx_base_1 += LDC4x; + idx_base_2 += LDC4x; + idx_base_3 += LDC4x; + } + + if (tag_n_Nx != N) { + for (BLASLONG idx_n = tag_n_Nx; idx_n < N; idx_n++) { + for (BLASLONG idx_m = 0; idx_m < tag_n_Mx; idx_m += 16) { + _mm512_storeu_ps(&C[idx_base_0+idx_m], ZEROVECTOR); + } + + if (tag_n_Mx != M) { + _mm512_mask_storeu_ps(&C[idx_base_0+tag_n_Mx], tail_mask, ZEROVECTOR); + } + idx_base_0 += ldc; + } + } + } else { + + } +} \ No newline at end of file diff --git a/kernel/x86_64/sbgemm_microk_cooperlake_template.c b/kernel/x86_64/sbgemm_microk_cooperlake_template.c new file mode 100644 index 000000000..dd4cb440b --- /dev/null +++ b/kernel/x86_64/sbgemm_microk_cooperlake_template.c @@ -0,0 +1,625 @@ +#include "sbgemm.h" +#include "bf16_common_macros.h" +#include + +#undef STORE16_COMPLETE_RESULT +#undef STORE16_MASK_COMPLETE_RESULT +#undef SBGEMM_BLOCK_KERNEL_32x8x32 +#undef SBGEMM_BLOCK_KERNEL_16x8x32 +#undef SBGEMM_BLOCK_KERNEL_32xNx32 +#undef SBGEMM_BLOCK_KERNEL_16xNx32 +#undef SBGEMM_BLOCKING_KERNEL_2 + +#ifndef ONE_ALPHA // ALPHA is not ONE + #define STORE16_COMPLETE_RESULT STORE16_COMPLETE_RESULT_ALPHA_ONE + #define STORE16_MASK_COMPLETE_RESULT STORE16_MASK_COMPLETE_RESULT_ALPHA_ONE + #define SBGEMM_BLOCK_KERNEL_32x8x32 sbgemm_block_kernel_32x8x32_alpha + #define SBGEMM_BLOCK_KERNEL_16x8x32 sbgemm_block_kernel_16x8x32_alpha + #define SBGEMM_BLOCK_KERNEL_32xNx32 sbgemm_block_kernel_32xNx32_alpha + #define SBGEMM_BLOCK_KERNEL_16xNx32 sbgemm_block_kernel_16xNx32_alpha + #define SBGEMM_BLOCKING_KERNEL_2 sbgemm_blocking_kernel_2_alpha +#else // ALPHA is ONE + #define STORE16_COMPLETE_RESULT STORE16_COMPLETE_RESULT_ONE_ONE + #define STORE16_MASK_COMPLETE_RESULT STORE16_MASK_COMPLETE_RESULT_ONE_ONE + #define SBGEMM_BLOCK_KERNEL_32x8x32 sbgemm_block_kernel_32x8x32_one + #define SBGEMM_BLOCK_KERNEL_16x8x32 sbgemm_block_kernel_16x8x32_one + #define SBGEMM_BLOCK_KERNEL_32xNx32 sbgemm_block_kernel_32xNx32_one + #define SBGEMM_BLOCK_KERNEL_16xNx32 sbgemm_block_kernel_16xNx32_one + #define SBGEMM_BLOCKING_KERNEL_2 sbgemm_blocking_kernel_2_one +#endif + + +// SBGEMM Kernel for 16> (32-m)); + __mmask16 tail_mask = *((__mmask16*) &tail_mask_value); + result_512_tmp_0 = _mm512_permutex2var_ps(result_512_0, shuffle_idx_base0, result_512_8); + result_512_tmp_1 = _mm512_permutex2var_ps(result_512_0, shuffle_idx_base1, result_512_8); + result_512_tmp_2 = _mm512_permutex2var_ps(result_512_1, shuffle_idx_base0, result_512_9); + result_512_tmp_3 = _mm512_permutex2var_ps(result_512_1, shuffle_idx_base1, result_512_9); + STORE16_COMPLETE_RESULT(result_512_tmp_0, (&C[ldc*0])) + STORE16_MASK_COMPLETE_RESULT(result_512_tmp_1, (&C[ldc*0+16]), tail_mask) + STORE16_COMPLETE_RESULT(result_512_tmp_2, (&C[ldc*1])) + STORE16_MASK_COMPLETE_RESULT(result_512_tmp_3, (&C[ldc*1+16]), tail_mask) + result_512_tmp_0 = _mm512_permutex2var_ps(result_512_2, shuffle_idx_base0, result_512_10); + result_512_tmp_1 = _mm512_permutex2var_ps(result_512_2, shuffle_idx_base1, result_512_10); + result_512_tmp_2 = _mm512_permutex2var_ps(result_512_3, shuffle_idx_base0, result_512_11); + result_512_tmp_3 = _mm512_permutex2var_ps(result_512_3, shuffle_idx_base1, result_512_11); + STORE16_COMPLETE_RESULT(result_512_tmp_0, (&C[ldc*2])) + STORE16_MASK_COMPLETE_RESULT(result_512_tmp_1, (&C[ldc*2+16]), tail_mask) + STORE16_COMPLETE_RESULT(result_512_tmp_2, (&C[ldc*3])) + STORE16_MASK_COMPLETE_RESULT(result_512_tmp_3, (&C[ldc*3+16]), tail_mask) + result_512_tmp_0 = _mm512_permutex2var_ps(result_512_4, shuffle_idx_base0, result_512_12); + result_512_tmp_1 = _mm512_permutex2var_ps(result_512_4, shuffle_idx_base1, result_512_12); + result_512_tmp_2 = _mm512_permutex2var_ps(result_512_5, shuffle_idx_base0, result_512_13); + result_512_tmp_3 = _mm512_permutex2var_ps(result_512_5, shuffle_idx_base1, result_512_13); + STORE16_COMPLETE_RESULT(result_512_tmp_0, (&C[ldc*4])) + STORE16_MASK_COMPLETE_RESULT(result_512_tmp_1, (&C[ldc*4+16]), tail_mask) + STORE16_COMPLETE_RESULT(result_512_tmp_2, (&C[ldc*5])) + STORE16_MASK_COMPLETE_RESULT(result_512_tmp_3, (&C[ldc*5+16]), tail_mask) + result_512_tmp_0 = _mm512_permutex2var_ps(result_512_6, shuffle_idx_base0, result_512_14); + result_512_tmp_1 = _mm512_permutex2var_ps(result_512_6, shuffle_idx_base1, result_512_14); + result_512_tmp_2 = _mm512_permutex2var_ps(result_512_7, shuffle_idx_base0, result_512_15); + result_512_tmp_3 = _mm512_permutex2var_ps(result_512_7, shuffle_idx_base1, result_512_15); + STORE16_COMPLETE_RESULT(result_512_tmp_0, (&C[ldc*6])) + STORE16_MASK_COMPLETE_RESULT(result_512_tmp_1, (&C[ldc*6+16]), tail_mask) + STORE16_COMPLETE_RESULT(result_512_tmp_2, (&C[ldc*7])) + STORE16_MASK_COMPLETE_RESULT(result_512_tmp_3, (&C[ldc*7+16]), tail_mask) + } else { + result_512_tmp_0 = _mm512_permutex2var_ps(result_512_0, shuffle_idx_base0, result_512_8); + result_512_tmp_1 = _mm512_permutex2var_ps(result_512_0, shuffle_idx_base1, result_512_8); + result_512_tmp_2 = _mm512_permutex2var_ps(result_512_1, shuffle_idx_base0, result_512_9); + result_512_tmp_3 = _mm512_permutex2var_ps(result_512_1, shuffle_idx_base1, result_512_9); + STORE16_COMPLETE_RESULT(result_512_tmp_0, (&C[ldc*0])) + STORE16_COMPLETE_RESULT(result_512_tmp_1, (&C[ldc*0+16])) + STORE16_COMPLETE_RESULT(result_512_tmp_2, (&C[ldc*1])) + STORE16_COMPLETE_RESULT(result_512_tmp_3, (&C[ldc*1+16])) + result_512_tmp_0 = _mm512_permutex2var_ps(result_512_2, shuffle_idx_base0, result_512_10); + result_512_tmp_1 = _mm512_permutex2var_ps(result_512_2, shuffle_idx_base1, result_512_10); + result_512_tmp_2 = _mm512_permutex2var_ps(result_512_3, shuffle_idx_base0, result_512_11); + result_512_tmp_3 = _mm512_permutex2var_ps(result_512_3, shuffle_idx_base1, result_512_11); + STORE16_COMPLETE_RESULT(result_512_tmp_0, (&C[ldc*2])) + STORE16_COMPLETE_RESULT(result_512_tmp_1, (&C[ldc*2+16])) + STORE16_COMPLETE_RESULT(result_512_tmp_2, (&C[ldc*3])) + STORE16_COMPLETE_RESULT(result_512_tmp_3, (&C[ldc*3+16])) + result_512_tmp_0 = _mm512_permutex2var_ps(result_512_4, shuffle_idx_base0, result_512_12); + result_512_tmp_1 = _mm512_permutex2var_ps(result_512_4, shuffle_idx_base1, result_512_12); + result_512_tmp_2 = _mm512_permutex2var_ps(result_512_5, shuffle_idx_base0, result_512_13); + result_512_tmp_3 = _mm512_permutex2var_ps(result_512_5, shuffle_idx_base1, result_512_13); + STORE16_COMPLETE_RESULT(result_512_tmp_0, (&C[ldc*4])) + STORE16_COMPLETE_RESULT(result_512_tmp_1, (&C[ldc*4+16])) + STORE16_COMPLETE_RESULT(result_512_tmp_2, (&C[ldc*5])) + STORE16_COMPLETE_RESULT(result_512_tmp_3, (&C[ldc*5+16])) + result_512_tmp_0 = _mm512_permutex2var_ps(result_512_6, shuffle_idx_base0, result_512_14); + result_512_tmp_1 = _mm512_permutex2var_ps(result_512_6, shuffle_idx_base1, result_512_14); + result_512_tmp_2 = _mm512_permutex2var_ps(result_512_7, shuffle_idx_base0, result_512_15); + result_512_tmp_3 = _mm512_permutex2var_ps(result_512_7, shuffle_idx_base1, result_512_15); + STORE16_COMPLETE_RESULT(result_512_tmp_0, (&C[ldc*6])) + STORE16_COMPLETE_RESULT(result_512_tmp_1, (&C[ldc*6+16])) + STORE16_COMPLETE_RESULT(result_512_tmp_2, (&C[ldc*7])) + STORE16_COMPLETE_RESULT(result_512_tmp_3, (&C[ldc*7+16])) + } +} + +// SBGEMM Kernel for M<=16, N=8, K can be any number, but the processing will take 32 as a base +#ifndef ONE_ALPHA // ALPHA is not ONE +void sbgemm_block_kernel_16x8x32_alpha(BLASLONG m, BLASLONG k, float alpha, bfloat16 *A, bfloat16 *B, float *C, int ldc) +#else // ALPHA is ONE +void sbgemm_block_kernel_16x8x32_one(BLASLONG m, BLASLONG k, float alpha, bfloat16 *A, bfloat16 *B, float *C, int ldc) +#endif +{ + int SHUFFLE_MAGIC_NO = 0x39; + BLASLONG tag_k_32x = k & (~31); + BLASLONG idxB_base = 0; + BLASLONG width = 32; + +#ifndef ONE_ALPHA + __m512 ALPHAVECTOR = _mm512_set1_ps(alpha); +#endif + + __m512i arrayA_512_0; + __m512i arrayB_512_0, arrayB_512_1, arrayB_512_2, arrayB_512_3, arrayB_512_4, arrayB_512_5, arrayB_512_6, arrayB_512_7; + __m512 result_512_0, result_512_1, result_512_2, result_512_3, result_512_4, result_512_5, result_512_6, result_512_7; + + result_512_0 = _mm512_setzero_ps(); + result_512_1 = _mm512_setzero_ps(); + result_512_2 = _mm512_setzero_ps(); + result_512_3 = _mm512_setzero_ps(); + result_512_4 = _mm512_setzero_ps(); + result_512_5 = _mm512_setzero_ps(); + result_512_6 = _mm512_setzero_ps(); + result_512_7 = _mm512_setzero_ps(); + + for (BLASLONG idx_k = 0; idx_k < k; idx_k += 32) { + // Load B with unroll 8 + idxB_base = idx_k << 3; + arrayB_512_0 = _mm512_loadu_si512(&B[idxB_base + 32*0]); + arrayB_512_1 = _mm512_loadu_si512(&B[idxB_base + 32*1]); + arrayB_512_2 = _mm512_loadu_si512(&B[idxB_base + 32*2]); + arrayB_512_3 = _mm512_loadu_si512(&B[idxB_base + 32*3]); + arrayB_512_4 = _mm512_loadu_si512(&B[idxB_base + 32*4]); + arrayB_512_5 = _mm512_loadu_si512(&B[idxB_base + 32*5]); + arrayB_512_6 = _mm512_loadu_si512(&B[idxB_base + 32*6]); + arrayB_512_7 = _mm512_loadu_si512(&B[idxB_base + 32*7]); + + if (idx_k == tag_k_32x) {width = k - tag_k_32x;} + + for (BLASLONG idx = 0; idx < width;) { + // Each two rows are a group for 32-pair bf16 elements + // Load two rows into a 512 register + arrayA_512_0 = _mm512_loadu_si512(&A[idx<<4]); + + result_512_0 = _mm512_dpbf16_ps(result_512_0, (__m512bh) arrayA_512_0, (__m512bh) _mm512_broadcastd_epi32(_mm512_castsi512_si128(arrayB_512_0))); + result_512_1 = _mm512_dpbf16_ps(result_512_1, (__m512bh) arrayA_512_0, (__m512bh) _mm512_broadcastd_epi32(_mm512_castsi512_si128(arrayB_512_1))); + result_512_2 = _mm512_dpbf16_ps(result_512_2, (__m512bh) arrayA_512_0, (__m512bh) _mm512_broadcastd_epi32(_mm512_castsi512_si128(arrayB_512_2))); + result_512_3 = _mm512_dpbf16_ps(result_512_3, (__m512bh) arrayA_512_0, (__m512bh) _mm512_broadcastd_epi32(_mm512_castsi512_si128(arrayB_512_3))); + result_512_4 = _mm512_dpbf16_ps(result_512_4, (__m512bh) arrayA_512_0, (__m512bh) _mm512_broadcastd_epi32(_mm512_castsi512_si128(arrayB_512_4))); + result_512_5 = _mm512_dpbf16_ps(result_512_5, (__m512bh) arrayA_512_0, (__m512bh) _mm512_broadcastd_epi32(_mm512_castsi512_si128(arrayB_512_5))); + result_512_6 = _mm512_dpbf16_ps(result_512_6, (__m512bh) arrayA_512_0, (__m512bh) _mm512_broadcastd_epi32(_mm512_castsi512_si128(arrayB_512_6))); + result_512_7 = _mm512_dpbf16_ps(result_512_7, (__m512bh) arrayA_512_0, (__m512bh) _mm512_broadcastd_epi32(_mm512_castsi512_si128(arrayB_512_7))); + + arrayB_512_0 = _mm512_shuffle_epi32(arrayB_512_0, SHUFFLE_MAGIC_NO); + arrayB_512_1 = _mm512_shuffle_epi32(arrayB_512_1, SHUFFLE_MAGIC_NO); + arrayB_512_2 = _mm512_shuffle_epi32(arrayB_512_2, SHUFFLE_MAGIC_NO); + arrayB_512_3 = _mm512_shuffle_epi32(arrayB_512_3, SHUFFLE_MAGIC_NO); + arrayB_512_4 = _mm512_shuffle_epi32(arrayB_512_4, SHUFFLE_MAGIC_NO); + arrayB_512_5 = _mm512_shuffle_epi32(arrayB_512_5, SHUFFLE_MAGIC_NO); + arrayB_512_6 = _mm512_shuffle_epi32(arrayB_512_6, SHUFFLE_MAGIC_NO); + arrayB_512_7 = _mm512_shuffle_epi32(arrayB_512_7, SHUFFLE_MAGIC_NO); + + idx += 2; + // Every 4 loops we need to switch to next 128 bits of arrayB registers + if ((idx & (~7)) == idx) { + arrayB_512_0 = _mm512_shuffle_i32x4(arrayB_512_0, arrayB_512_0, SHUFFLE_MAGIC_NO); + arrayB_512_1 = _mm512_shuffle_i32x4(arrayB_512_1, arrayB_512_1, SHUFFLE_MAGIC_NO); + arrayB_512_2 = _mm512_shuffle_i32x4(arrayB_512_2, arrayB_512_2, SHUFFLE_MAGIC_NO); + arrayB_512_3 = _mm512_shuffle_i32x4(arrayB_512_3, arrayB_512_3, SHUFFLE_MAGIC_NO); + arrayB_512_4 = _mm512_shuffle_i32x4(arrayB_512_4, arrayB_512_4, SHUFFLE_MAGIC_NO); + arrayB_512_5 = _mm512_shuffle_i32x4(arrayB_512_5, arrayB_512_5, SHUFFLE_MAGIC_NO); + arrayB_512_6 = _mm512_shuffle_i32x4(arrayB_512_6, arrayB_512_6, SHUFFLE_MAGIC_NO); + arrayB_512_7 = _mm512_shuffle_i32x4(arrayB_512_7, arrayB_512_7, SHUFFLE_MAGIC_NO); + } + } + } + + if (m != 16) { + unsigned short tail_mask_value = (((unsigned short)0xffff) >> (16-m)); + __mmask16 tail_mask = *((__mmask16*) &tail_mask_value); + + result_512_0 = _mm512_shuffle_f32x4(result_512_0, result_512_0, 0xd8); + result_512_1 = _mm512_shuffle_f32x4(result_512_1, result_512_1, 0xd8); + result_512_2 = _mm512_shuffle_f32x4(result_512_2, result_512_2, 0xd8); + result_512_3 = _mm512_shuffle_f32x4(result_512_3, result_512_3, 0xd8); + STORE16_MASK_COMPLETE_RESULT(result_512_0, (&C[ldc*0]), tail_mask) + STORE16_MASK_COMPLETE_RESULT(result_512_1, (&C[ldc*1]), tail_mask) + STORE16_MASK_COMPLETE_RESULT(result_512_2, (&C[ldc*2]), tail_mask) + STORE16_MASK_COMPLETE_RESULT(result_512_3, (&C[ldc*3]), tail_mask) + result_512_4 = _mm512_shuffle_f32x4(result_512_4, result_512_4, 0xd8); + result_512_5 = _mm512_shuffle_f32x4(result_512_5, result_512_5, 0xd8); + result_512_6 = _mm512_shuffle_f32x4(result_512_6, result_512_6, 0xd8); + result_512_7 = _mm512_shuffle_f32x4(result_512_7, result_512_7, 0xd8); + STORE16_MASK_COMPLETE_RESULT(result_512_4, (&C[ldc*4]), tail_mask) + STORE16_MASK_COMPLETE_RESULT(result_512_5, (&C[ldc*5]), tail_mask) + STORE16_MASK_COMPLETE_RESULT(result_512_6, (&C[ldc*6]), tail_mask) + STORE16_MASK_COMPLETE_RESULT(result_512_7, (&C[ldc*7]), tail_mask) + } else { + result_512_0 = _mm512_shuffle_f32x4(result_512_0, result_512_0, 0xd8); + result_512_1 = _mm512_shuffle_f32x4(result_512_1, result_512_1, 0xd8); + result_512_2 = _mm512_shuffle_f32x4(result_512_2, result_512_2, 0xd8); + result_512_3 = _mm512_shuffle_f32x4(result_512_3, result_512_3, 0xd8); + STORE16_COMPLETE_RESULT(result_512_0, (&C[ldc*0])) + STORE16_COMPLETE_RESULT(result_512_1, (&C[ldc*1])) + STORE16_COMPLETE_RESULT(result_512_2, (&C[ldc*2])) + STORE16_COMPLETE_RESULT(result_512_3, (&C[ldc*3])) + result_512_4 = _mm512_shuffle_f32x4(result_512_4, result_512_4, 0xd8); + result_512_5 = _mm512_shuffle_f32x4(result_512_5, result_512_5, 0xd8); + result_512_6 = _mm512_shuffle_f32x4(result_512_6, result_512_6, 0xd8); + result_512_7 = _mm512_shuffle_f32x4(result_512_7, result_512_7, 0xd8); + STORE16_COMPLETE_RESULT(result_512_4, (&C[ldc*4])) + STORE16_COMPLETE_RESULT(result_512_5, (&C[ldc*5])) + STORE16_COMPLETE_RESULT(result_512_6, (&C[ldc*6])) + STORE16_COMPLETE_RESULT(result_512_7, (&C[ldc*7])) + } +} + +// SBGEMM Kernel for 16> (32-m)); + __mmask16 tail_mask = *((__mmask16*) &tail_mask_value); + for (int i = 0; i < n; i++) { + result_512_tmp_0 = _mm512_permutex2var_ps(result_512[i], shuffle_idx_base0, result_512[i+8]); + result_512_tmp_1 = _mm512_permutex2var_ps(result_512[i], shuffle_idx_base1, result_512[i+8]); + STORE16_COMPLETE_RESULT(result_512_tmp_0, (&C[ldc*i])) + STORE16_MASK_COMPLETE_RESULT(result_512_tmp_1, (&C[ldc*i+16]), tail_mask) + } + } else { + for (int i = 0; i < n; i++) { + result_512_tmp_0 = _mm512_permutex2var_ps(result_512[i], shuffle_idx_base0, result_512[i+8]); + result_512_tmp_1 = _mm512_permutex2var_ps(result_512[i], shuffle_idx_base1, result_512[i+8]); + STORE16_COMPLETE_RESULT(result_512_tmp_0, (&C[ldc*i])) + STORE16_COMPLETE_RESULT(result_512_tmp_1, (&C[ldc*i+16])) + } + } +} + +// SBGEMM Kernel for 16<=M, N<8, K can be any number, but the processing will take 32 as a base +#ifndef ONE_ALPHA // ALPHA is not ONE +void sbgemm_block_kernel_16xNx32_alpha(BLASLONG m, BLASLONG n, BLASLONG k, float alpha, bfloat16 *A, bfloat16 *B, float *C, int ldc) +#else // ALPHA is ONE +void sbgemm_block_kernel_16xNx32_one(BLASLONG m, BLASLONG n, BLASLONG k, float alpha, bfloat16 *A, bfloat16 *B, float *C, int ldc) +#endif +{ + int SHUFFLE_MAGIC_NO = 0x39; + BLASLONG tag_k_32x = k & (~31); + BLASLONG idxB_base = 0; + BLASLONG width = 32; + +#ifndef ONE_ALPHA + __m512 ALPHAVECTOR = _mm512_set1_ps(alpha); +#endif + + __m512i arrayA_512; + __m512i arrayB_512[8]; + __m512 result_512[8]; + + for (int i = 0; i < 8; i += 2) { + result_512[i] = _mm512_setzero_ps(); + result_512[i+1] = _mm512_setzero_ps(); + } + + for (BLASLONG idx_k = 0; idx_k < k; idx_k += 32) { + // Load B with unroll n + for (int i = 0; i < n; i ++) { + arrayB_512[i] = _mm512_loadu_si512(&B[idxB_base]); + idxB_base += 32; + } + + if (idx_k == tag_k_32x) {width = k - tag_k_32x;} + + for (BLASLONG idx = 0; idx < width;) { + // Each two rows are a group for 32-pair bf16 elements + // Load two rows into a 512 register + arrayA_512 = _mm512_loadu_si512(&A[idx<<4]); + + for (int i = 0; i < n; i ++) { + result_512[i] = _mm512_dpbf16_ps(result_512[i], (__m512bh) arrayA_512, (__m512bh) _mm512_broadcastd_epi32(_mm512_castsi512_si128(arrayB_512[i]))); + arrayB_512[i] = _mm512_shuffle_epi32(arrayB_512[i], SHUFFLE_MAGIC_NO); + } + + idx += 2; + // Every 4 loops we need to switch to next 128 bits of arrayB registers + if ((idx & (~7)) == idx) { + for (int i = 0; i < n; i++) { + arrayB_512[i] = _mm512_shuffle_i32x4(arrayB_512[i], arrayB_512[i], SHUFFLE_MAGIC_NO); + } + } + } + } + + if (m != 16) { + unsigned short tail_mask_value = (((unsigned short)0xffff) >> (16-m)); + __mmask16 tail_mask = *((__mmask16*) &tail_mask_value); + for (int i = 0; i < n; i++) { + result_512[i] = _mm512_shuffle_f32x4(result_512[i], result_512[i], 0xd8); + STORE16_MASK_COMPLETE_RESULT(result_512[i], (&C[ldc*i]), tail_mask) + } + } else { + for (int i = 0; i < n; i++) { + result_512[i] = _mm512_shuffle_f32x4(result_512[i], result_512[i], 0xd8); + STORE16_COMPLETE_RESULT(result_512[i], (&C[ldc*i])) + } + } +} +#ifndef ONE_ALPHA // ALPHA is not ONE +void sbgemm_blocking_kernel_2_alpha(blasint M, blasint N, blasint K, float alpha, bfloat16 *A, blasint lda, bfloat16 *B, blasint ldb, float *C, blasint ldc, bfloat16 * block_A, bfloat16 * block_B) +#else // ALPHA is ONE +void sbgemm_blocking_kernel_2_one(blasint M, blasint N, blasint K, float alpha, bfloat16 *A, blasint lda, bfloat16 *B, blasint ldb, float *C, blasint ldc, bfloat16 * block_A, bfloat16 * block_B) +#endif +{ + BLASLONG m_step, n_step, k_step, k_step_round32; + BLASLONG tag_m_Nx = M & (~(BF16_BLOCK_THRES_M-1)); + + BLASLONG n_from, n_to; + BLASLONG tag_n_Nx; + + n_from = 0; + n_to = (BF16_BLOCK_THRES_N > N) ? N : BF16_BLOCK_THRES_N; + tag_n_Nx = n_to & (~(BF16_BLOCK_STEP_N-1)); + + k_step = (K > BF16_BLOCK_THRES_K) ? BF16_BLOCK_THRES_K : K; + k_step_round32 = k_step & (~31); + k_step_round32 = (k_step > k_step_round32) ? (k_step_round32 + 32) : k_step_round32; + + if (M >= BF16_BLOCK_THRES_M) { + while (n_from < N) { + for (BLASLONG idx_k = 0; idx_k < K;) { + // Use Kx32 kernel when BF16_BLOCK_THRES_M==32, Kx16 kernel when BF16_BLOCK_THRES_M==16, ... + COL_MAJOR_INCOPY_KERNEL_Kx32(k_step, &A(idx_k, 0), lda, block_A); + // TODO: MT + for (BLASLONG idx_n = n_from; idx_n < tag_n_Nx; idx_n += BF16_BLOCK_STEP_N) { + // Use 8x32 kernel when BF16_BLOCK_THRES_N==8, 4x32 kernel when BF16_BLOCK_THRES_N==4, ... + COL_MAJOR_ONCOPY_KERNEL_8x32(k_step, &B(idx_n, idx_k), ldb, block_B + (idx_n-n_from)*k_step_round32); + SBGEMM_BLOCK_KERNEL_32x8x32(32, k_step, alpha, block_A, block_B + (idx_n-n_from)*k_step_round32, &C(idx_n, 0), ldc); + } + + if (tag_n_Nx != n_to) { + n_step = n_to - tag_n_Nx; + COL_MAJOR_ONCOPY_KERNEL_Nx32(n_step, k_step, &B(tag_n_Nx, idx_k), ldb, block_B + (tag_n_Nx-n_from)*k_step_round32); + SBGEMM_BLOCK_KERNEL_32xNx32(32, n_step, k_step, alpha, block_A, block_B + (tag_n_Nx-n_from)*k_step_round32, &C(tag_n_Nx, 0), ldc); + } + + for (BLASLONG idx_m = BF16_BLOCK_THRES_M; idx_m < tag_m_Nx; idx_m += BF16_BLOCK_THRES_M) { + COL_MAJOR_INCOPY_KERNEL_Kx32(k_step, &A(idx_k, idx_m), lda, block_A); + for (BLASLONG idx_n = n_from; idx_n < tag_n_Nx; idx_n += BF16_BLOCK_STEP_N) { + SBGEMM_BLOCK_KERNEL_32x8x32(32, k_step, alpha, block_A, block_B + (idx_n-n_from)*k_step_round32, &C(idx_n, idx_m), ldc); + } + + if (tag_n_Nx != n_to) { + n_step = n_to - tag_n_Nx; + SBGEMM_BLOCK_KERNEL_32xNx32(32, n_step, k_step, alpha, block_A, block_B + (tag_n_Nx-n_from)*k_step_round32, &C(tag_n_Nx, idx_m), ldc); + } + } + + if (tag_m_Nx != M) { + m_step = M - tag_m_Nx; + if (m_step > 16) { + COL_MAJOR_INCOPY_KERNEL_Kx32m(k_step, m_step, &A(idx_k, tag_m_Nx), lda, block_A); + for (BLASLONG idx_n = n_from; idx_n < tag_n_Nx; idx_n += BF16_BLOCK_STEP_N) { + SBGEMM_BLOCK_KERNEL_32x8x32(m_step, k_step, alpha, block_A, block_B + (idx_n-n_from)*k_step_round32, &C(idx_n, tag_m_Nx), ldc); + } + + if (tag_n_Nx != n_to) { + n_step = n_to - tag_n_Nx; + SBGEMM_BLOCK_KERNEL_32xNx32(m_step, n_step, k_step, alpha, block_A, block_B + (tag_n_Nx-n_from)*k_step_round32, &C(tag_n_Nx, tag_m_Nx), ldc); + } + } else if (m_step == 16) { + COL_MAJOR_INCOPY_KERNEL_Kx16(k_step, m_step, &A(idx_k, tag_m_Nx), lda, block_A); + for (BLASLONG idx_n = n_from; idx_n < tag_n_Nx; idx_n += BF16_BLOCK_STEP_N) { + SBGEMM_BLOCK_KERNEL_16x8x32(m_step, k_step, alpha, block_A, block_B + (idx_n-n_from)*k_step_round32, &C(idx_n, tag_m_Nx), ldc); + } + + if (tag_n_Nx != n_to) { + n_step = n_to - tag_n_Nx; + SBGEMM_BLOCK_KERNEL_16xNx32(m_step, n_step, k_step, alpha, block_A, block_B + (tag_n_Nx-n_from)*k_step_round32, &C(tag_n_Nx, tag_m_Nx), ldc); + } + } else { + COL_MAJOR_INCOPY_KERNEL_Kx16m(k_step, m_step, &A(idx_k, tag_m_Nx), lda, block_A); + for (BLASLONG idx_n = n_from; idx_n < tag_n_Nx; idx_n += BF16_BLOCK_STEP_N) { + SBGEMM_BLOCK_KERNEL_16x8x32(m_step, k_step, alpha, block_A, block_B + (idx_n-n_from)*k_step_round32, &C(idx_n, tag_m_Nx), ldc); + } + + if (tag_n_Nx != n_to) { + n_step = n_to - tag_n_Nx; + SBGEMM_BLOCK_KERNEL_16xNx32(m_step, n_step, k_step, alpha, block_A, block_B + (tag_n_Nx-n_from)*k_step_round32, &C(tag_n_Nx, tag_m_Nx), ldc); + } + } + } + + idx_k += k_step; + k_step = K - idx_k; + k_step = (k_step > BF16_BLOCK_THRES_K) ? BF16_BLOCK_THRES_K : k_step; + k_step_round32 = k_step & (~31); + k_step_round32 = (k_step > k_step_round32) ? (k_step_round32 + 32) : k_step_round32; + } + + n_from = n_to; + n_to += BF16_BLOCK_THRES_N; + n_to = (n_to > N) ? N : n_to; + tag_n_Nx = n_to & (~(BF16_BLOCK_STEP_N-1)); + } + } else { + m_step = M - tag_m_Nx; + while (n_from < N) { + for (BLASLONG idx_k = 0; idx_k < K;) { + // Use Kx32 kernel when BF16_BLOCK_THRES_M==32, Kx16 kernel when BF16_BLOCK_THRES_M==16, ... + COL_MAJOR_INCOPY_KERNEL_Kx32m(k_step, m_step, &A(idx_k, 0), lda, block_A); + // TODO: MT + for (BLASLONG idx_n = n_from; idx_n < tag_n_Nx; idx_n += BF16_BLOCK_STEP_N) { + // Use 8x32 kernel when BF16_BLOCK_THRES_N==8, 4x32 kernel when BF16_BLOCK_THRES_N==4, ... + COL_MAJOR_ONCOPY_KERNEL_8x32(k_step, &B(idx_n, idx_k), ldb, block_B + (idx_n-n_from)*k_step_round32); + SBGEMM_BLOCK_KERNEL_32x8x32(m_step, k_step, alpha, block_A, block_B + (idx_n-n_from)*k_step_round32, &C(idx_n, 0), ldc); + } + + if (tag_n_Nx != n_to) { + n_step = n_to - tag_n_Nx; + COL_MAJOR_ONCOPY_KERNEL_Nx32(n_step, k_step, &B(tag_n_Nx, idx_k), ldb, block_B + (tag_n_Nx-n_from)*k_step_round32); + SBGEMM_BLOCK_KERNEL_32xNx32(m_step, n_step, k_step, alpha, block_A, block_B + (tag_n_Nx-n_from)*k_step_round32, &C(tag_n_Nx, 0), ldc); + } + + idx_k += k_step; + k_step = K - idx_k; + k_step = (k_step > BF16_BLOCK_THRES_K) ? BF16_BLOCK_THRES_K : k_step; + k_step_round32 = k_step & (~31); + k_step_round32 = (k_step > k_step_round32) ? (k_step_round32 + 32) : k_step_round32; + } + n_from = n_to; + n_to += BF16_BLOCK_THRES_N; + n_to = (n_to > N) ? N : n_to; + tag_n_Nx = n_to & (~(BF16_BLOCK_STEP_N-1)); + } + } +} + +#ifndef ONE_ALPHA // ALPHA is not ONE +void sbgemm_internal_kernel_alpha(OPENBLAS_CONST enum CBLAS_ORDER Order, OPENBLAS_CONST enum CBLAS_TRANSPOSE TransA, OPENBLAS_CONST enum CBLAS_TRANSPOSE TransB, OPENBLAS_CONST blasint M, OPENBLAS_CONST blasint N, OPENBLAS_CONST blasint K, + OPENBLAS_CONST float alpha, OPENBLAS_CONST bfloat16 *A, OPENBLAS_CONST blasint lda, OPENBLAS_CONST bfloat16 *B, OPENBLAS_CONST blasint ldb, float *C, OPENBLAS_CONST blasint ldc) +#else // ALPHA is ONE +void sbgemm_internal_kernel_one(OPENBLAS_CONST enum CBLAS_ORDER Order, OPENBLAS_CONST enum CBLAS_TRANSPOSE TransA, OPENBLAS_CONST enum CBLAS_TRANSPOSE TransB, OPENBLAS_CONST blasint M, OPENBLAS_CONST blasint N, OPENBLAS_CONST blasint K, + OPENBLAS_CONST float alpha, OPENBLAS_CONST bfloat16 *A, OPENBLAS_CONST blasint lda, OPENBLAS_CONST bfloat16 *B, OPENBLAS_CONST blasint ldb, float *C, OPENBLAS_CONST blasint ldc) +#endif +{ + bfloat16 block_A[BF16_BLOCK_THRES_K * BF16_BLOCK_THRES_M]; + bfloat16 block_B[BF16_BLOCK_THRES_N * BF16_BLOCK_THRES_K]; + + // TODO: assume no trans for both A and B, to complement these scenarios later + if (Order == CblasColMajor) { + SBGEMM_BLOCKING_KERNEL_2(M, N, K, alpha, A, lda, B, ldb, C, ldc, block_A, block_B); + } else { + + } +} \ No newline at end of file diff --git a/kernel/x86_64/sbgemv_n.c b/kernel/x86_64/sbgemv_n.c new file mode 100644 index 000000000..18e64dc3f --- /dev/null +++ b/kernel/x86_64/sbgemv_n.c @@ -0,0 +1,137 @@ +/*************************************************************************** +Copyright (c) 2014, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + + +#include "common.h" + +#if defined (COOPERLAKE) +#include "sbgemv_n_microk_cooperlake.c" +#endif + +#define ALIGN64_ALLOC(alloc_size, TYPE, ptr_align, ptr) \ + ptr = (TYPE *) malloc(sizeof(TYPE)*alloc_size + 63); \ + ptr_align = ((int)(((uintptr_t)ptr & (uintptr_t)0x3F))!=0) ? (TYPE *)((char *)ptr + (64 - (int)((uintptr_t)ptr & (uintptr_t)0x3F))) : ptr + +#define ALIGN64_FREE(ptr) \ + free(ptr) + +#ifndef HAVE_SBGEMV_N_ACCL_KERNEL +static void sbgemv_kernel_n(BLASLONG m, BLASLONG n, float alpha, bfloat16 *a, BLASLONG lda, bfloat16 *x, float beta, float *y) +{ + BLASLONG offset_lda, offset_m; + float accum = 0.0; + float tmp_x = 0.0; + + bfloat16 * a_bf16 = malloc(sizeof(bfloat16)*m*n); + float * a_fp32 = malloc(sizeof(float)*m*n); + float * x_fp32 = malloc(sizeof(float)*n); + + for (BLASLONG j=0; j= 10 && defined(__AVX512BF16__)) || (defined(__clang__) && __clang_major__ >= 9)) + +#define HAVE_SBGEMV_N_ACCL_KERNEL 1 +#include "common.h" +#include + +// Define micro kernels for ALPHA not ONE && BETA effective && BETA not ONE scenarios +#undef ZERO_BETA +#undef ONE_BETA +#undef ONE_ALPHA +#include "sbgemv_n_microk_cooperlake_template.c" + +// Define micro kernels for ALPHA not ONE && BETA as ONE scenarios +#undef ZERO_BETA +#define ONE_BETA 1 +#undef ONE_ALPHA +#include "sbgemv_n_microk_cooperlake_template.c" + +// Define micro kernels for ALPHA not ONE && BETA in-effective (BETA == 0) scenarios +#define ZERO_BETA 1 +#undef ONE_ALPHA +#include "sbgemv_n_microk_cooperlake_template.c" + +// Define micro kernels for ALPHA as ONE && BETA in-effective (BETA == 0) scenarios +#define ZERO_BETA 1 +#define ONE_ALPHA 1 +#include "sbgemv_n_microk_cooperlake_template.c" + +static int sbgemv_kernel_n(BLASLONG m, BLASLONG n, float alpha, bfloat16 *a, BLASLONG lda, bfloat16 *x, float beta, float *y) +{ + if (beta == ZERO) { // BETA == 0.0, no need to accumulate the original Y data + if (alpha == ONE) { // ALPHA == 1.0, no need to multipy ALPHA + sbgemv_kernel_32xN_lda_direct(m, n, alpha, a, lda, x, y); + } else { // ALPHA != 1.0, need to multipy ALPHA + sbgemv_kernel_32xN_lda_direct_alpha(m, n, alpha, a, lda, x, y); + } + } else { // BETA != 0.0, need to accumulate the original Y data no matter what ALPHA is + if (beta == ONE) { + sbgemv_kernel_32xN_lda_direct_alpha_one(m, n, alpha, a, lda, x, beta, y); + } else { + sbgemv_kernel_32xN_lda_direct_alpha_beta(m, n, alpha, a, lda, x, beta, y); + } + } + + return 0; +} + +#endif diff --git a/kernel/x86_64/sbgemv_n_microk_cooperlake_template.c b/kernel/x86_64/sbgemv_n_microk_cooperlake_template.c new file mode 100644 index 000000000..46e6d0ff9 --- /dev/null +++ b/kernel/x86_64/sbgemv_n_microk_cooperlake_template.c @@ -0,0 +1,234 @@ +/*************************************************************************** +Copyright (c) 2014, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ +#include +#include "common.h" + +// Include common macros for BF16 based operations with IA intrinsics +#include "bf16_common_macros.h" + +#ifndef ZERO_BETA // Beta is non-zero + +#ifndef ONE_BETA // BETA is not ONE + +#define STORE16_COMPLETE_RESULT STORE16_COMPLETE_RESULT_ALPHA_BETA +#define STORE16_MASK_COMPLETE_RESULT STORE16_MASK_COMPLETE_RESULT_ALPHA_BETA +#define STORE8_COMPLETE_RESULT STORE8_COMPLETE_RESULT_ALPHA_BETA +#define STORE8_MASK_COMPLETE_RESULT STORE8_MASK_COMPLETE_RESULT_ALPHA_BETA +#define STORE4_COMPLETE_RESULT STORE4_COMPLETE_RESULT_ALPHA_BETA +#define STORE4_MASK_COMPLETE_RESULT STORE4_MASK_COMPLETE_RESULT_ALPHA_BETA + +#else // BETA is ONE + +#define STORE16_COMPLETE_RESULT STORE16_COMPLETE_RESULT_ALPHA_ONE +#define STORE16_MASK_COMPLETE_RESULT STORE16_MASK_COMPLETE_RESULT_ALPHA_ONE +#define STORE8_COMPLETE_RESULT STORE8_COMPLETE_RESULT_ALPHA_ONE +#define STORE8_MASK_COMPLETE_RESULT STORE8_MASK_COMPLETE_RESULT_ALPHA_ONE +#define STORE4_COMPLETE_RESULT STORE4_COMPLETE_RESULT_ALPHA_ONE +#define STORE4_MASK_COMPLETE_RESULT STORE4_MASK_COMPLETE_RESULT_ALPHA_ONE + +#endif + +#else // BETA is zero + +#ifndef ONE_ALPHA // ALPHA is not ONE + +#define STORE16_COMPLETE_RESULT STORE16_COMPLETE_RESULT_ALPHA +#define STORE16_MASK_COMPLETE_RESULT STORE16_MASK_COMPLETE_RESULT_ALPHA +#define STORE8_COMPLETE_RESULT STORE8_COMPLETE_RESULT_ALPHA +#define STORE8_MASK_COMPLETE_RESULT STORE8_MASK_COMPLETE_RESULT_ALPHA +#define STORE4_COMPLETE_RESULT STORE4_COMPLETE_RESULT_ALPHA +#define STORE4_MASK_COMPLETE_RESULT STORE4_MASK_COMPLETE_RESULT_ALPHA + +#else // ALPHA is ONE + +#define STORE16_COMPLETE_RESULT STORE16_COMPLETE_RESULT_DIRECT +#define STORE16_MASK_COMPLETE_RESULT STORE16_MASK_COMPLETE_RESULT_DIRECT +#define STORE8_COMPLETE_RESULT STORE8_COMPLETE_RESULT_DIRECT +#define STORE8_MASK_COMPLETE_RESULT STORE8_MASK_COMPLETE_RESULT_DIRECT +#define STORE4_COMPLETE_RESULT STORE4_COMPLETE_RESULT_DIRECT +#define STORE4_MASK_COMPLETE_RESULT STORE4_MASK_COMPLETE_RESULT_DIRECT + +#endif + +#endif + + + +// 8 rows parallel processing BF16 GEMV kernel for big N && lda effective scenario (process before interleave) +#ifndef ZERO_BETA +#ifndef ONE_BETA +static int sbgemv_kernel_32xN_lda_direct_alpha_beta(BLASLONG m, BLASLONG n, float alpha, bfloat16 *a, BLASLONG lda, bfloat16 *x, float beta, float *y) +#else +static int sbgemv_kernel_32xN_lda_direct_alpha_one(BLASLONG m, BLASLONG n, float alpha, bfloat16 *a, BLASLONG lda, bfloat16 *x, float beta, float *y) +#endif +#else +#ifndef ONE_ALPHA +static int sbgemv_kernel_32xN_lda_direct_alpha(BLASLONG m, BLASLONG n, float alpha, bfloat16 *a, BLASLONG lda, bfloat16 *x, float *y) +#else +static int sbgemv_kernel_32xN_lda_direct(BLASLONG m, BLASLONG n, float alpha, bfloat16 *a, BLASLONG lda, bfloat16 *x, float *y) +#endif +#endif +{ + BLASLONG tag_m_32x = m & (~31); + BLASLONG tag_m_128x = m & (~127); + + __m512 accum512_0, accum512_1, accum512_2, accum512_3, accum512_4, accum512_5, accum512_6, accum512_7, \ + accum512_8, accum512_9, accum512_10, accum512_11, accum512_12, accum512_13, accum512_14, accum512_15; + +#ifndef ONE_ALPHA + __m512 ALPHAVECTOR = _mm512_set1_ps(alpha); +#endif +#ifndef ZERO_BETA + __m512 BETAVECTOR = _mm512_set1_ps(beta); +#endif + + __m512i matrixArray_seed_0, matrixArray_seed_1, matrixArray_seed_2, matrixArray_seed_3; + __m512i matrixArray_0, matrixArray_1, matrixArray_2, matrixArray_3, matrixArray_4, matrixArray_5, matrixArray_6, matrixArray_7; + __m512i xArray_0; + + __m512i ZERO512 = _mm512_setzero_si512(); + + unsigned int blend_hi_mask_value = ((unsigned int)0xaaaaaaaa); + __mmask32 blend_hi_mask = *((__mmask32*) &blend_hi_mask_value); + unsigned int blend_lo_mask_value = ((unsigned int)0x55555555); + __mmask32 blend_lo_mask = *((__mmask32*) &blend_lo_mask_value); + + __m512i M512_EPI32_8 = _mm512_set1_epi32(8); + __m512i idx_base_0 = _mm512_set_epi32(23, 7, 22, 6, 21, 5, 20, 4, 19, 3, 18, 2, 17, 1, 16, 0); + __m512i idx_base_1 = _mm512_add_epi32(idx_base_0, M512_EPI32_8); + + for (BLASLONG idx_m = 0; idx_m < tag_m_128x; idx_m+=128) { + accum512_0 = _mm512_setzero_ps(); + accum512_1 = _mm512_setzero_ps(); + accum512_2 = _mm512_setzero_ps(); + accum512_3 = _mm512_setzero_ps(); + accum512_4 = _mm512_setzero_ps(); + accum512_5 = _mm512_setzero_ps(); + accum512_6 = _mm512_setzero_ps(); + accum512_7 = _mm512_setzero_ps(); + + for (BLASLONG idx_n = 0; idx_n < n; idx_n++) { + xArray_0 = _mm512_set1_epi16(x[idx_n]); + + BF16_MATRIX_LOAD_1x32(matrixArray_seed_0, a, lda, idx_n, idx_m + 0) + BF16_MATRIX_LOAD_1x32(matrixArray_seed_1, a, lda, idx_n, idx_m + 32) + BF16_MATRIX_LOAD_1x32(matrixArray_seed_2, a, lda, idx_n, idx_m + 64) + BF16_MATRIX_LOAD_1x32(matrixArray_seed_3, a, lda, idx_n, idx_m + 96) + + matrixArray_0 = _mm512_mask_blend_epi16(blend_lo_mask, ZERO512, matrixArray_seed_0); + matrixArray_1 = _mm512_mask_blend_epi16(blend_hi_mask, ZERO512, matrixArray_seed_0); + matrixArray_2 = _mm512_mask_blend_epi16(blend_lo_mask, ZERO512, matrixArray_seed_1); + matrixArray_3 = _mm512_mask_blend_epi16(blend_hi_mask, ZERO512, matrixArray_seed_1); + matrixArray_4 = _mm512_mask_blend_epi16(blend_lo_mask, ZERO512, matrixArray_seed_2); + matrixArray_5 = _mm512_mask_blend_epi16(blend_hi_mask, ZERO512, matrixArray_seed_2); + matrixArray_6 = _mm512_mask_blend_epi16(blend_lo_mask, ZERO512, matrixArray_seed_3); + matrixArray_7 = _mm512_mask_blend_epi16(blend_hi_mask, ZERO512, matrixArray_seed_3); + + BF16_DOT_1x32(accum512_0, matrixArray_0, xArray_0) + BF16_DOT_1x32(accum512_1, matrixArray_1, xArray_0) + BF16_DOT_1x32(accum512_2, matrixArray_2, xArray_0) + BF16_DOT_1x32(accum512_3, matrixArray_3, xArray_0) + BF16_DOT_1x32(accum512_4, matrixArray_4, xArray_0) + BF16_DOT_1x32(accum512_5, matrixArray_5, xArray_0) + BF16_DOT_1x32(accum512_6, matrixArray_6, xArray_0) + BF16_DOT_1x32(accum512_7, matrixArray_7, xArray_0) + } + accum512_8 = _mm512_permutex2var_ps(accum512_0, idx_base_0, accum512_1); + accum512_9 = _mm512_permutex2var_ps(accum512_0, idx_base_1, accum512_1); + accum512_10 = _mm512_permutex2var_ps(accum512_2, idx_base_0, accum512_3); + accum512_11 = _mm512_permutex2var_ps(accum512_2, idx_base_1, accum512_3); + accum512_12 = _mm512_permutex2var_ps(accum512_4, idx_base_0, accum512_5); + accum512_13 = _mm512_permutex2var_ps(accum512_4, idx_base_1, accum512_5); + accum512_14 = _mm512_permutex2var_ps(accum512_6, idx_base_0, accum512_7); + accum512_15 = _mm512_permutex2var_ps(accum512_6, idx_base_1, accum512_7); + + STORE16_COMPLETE_RESULT(accum512_8, y+idx_m+0) + STORE16_COMPLETE_RESULT(accum512_9, y+idx_m+16) + STORE16_COMPLETE_RESULT(accum512_10, y+idx_m+32) + STORE16_COMPLETE_RESULT(accum512_11, y+idx_m+48) + STORE16_COMPLETE_RESULT(accum512_12, y+idx_m+64) + STORE16_COMPLETE_RESULT(accum512_13, y+idx_m+80) + STORE16_COMPLETE_RESULT(accum512_14, y+idx_m+96) + STORE16_COMPLETE_RESULT(accum512_15, y+idx_m+112) + } + + for (BLASLONG idx_m = tag_m_128x; idx_m < tag_m_32x; idx_m+=32) { + accum512_0 = _mm512_setzero_ps(); + accum512_1 = _mm512_setzero_ps(); + + for (BLASLONG idx_n = 0; idx_n < n; idx_n++) { + xArray_0 = _mm512_set1_epi16(x[idx_n]); + + BF16_MATRIX_LOAD_1x32(matrixArray_seed_0, a, lda, idx_n, idx_m) + + matrixArray_0 = _mm512_mask_blend_epi16(blend_lo_mask, ZERO512, matrixArray_seed_0); + matrixArray_1 = _mm512_mask_blend_epi16(blend_hi_mask, ZERO512, matrixArray_seed_0); + + BF16_DOT_1x32(accum512_0, matrixArray_0, xArray_0) + BF16_DOT_1x32(accum512_1, matrixArray_1, xArray_0) + } + accum512_8 = _mm512_permutex2var_ps(accum512_0, idx_base_0, accum512_1); + accum512_9 = _mm512_permutex2var_ps(accum512_0, idx_base_1, accum512_1); + + STORE16_COMPLETE_RESULT(accum512_8, y+idx_m+0) + STORE16_COMPLETE_RESULT(accum512_9, y+idx_m+16) + } + + if (tag_m_32x != m) { + unsigned int tail_mask_value = (((unsigned int)0xffffffff) >> (32-(m&31))); + __mmask32 tail_mask = *((__mmask32*) &tail_mask_value); + + unsigned short store_tail_mask_value = (((unsigned int)0xffff) >> (16-(m&15))); + __mmask32 store_tail_mask = *((__mmask32*) &store_tail_mask_value); + + accum512_0 = _mm512_setzero_ps(); + accum512_1 = _mm512_setzero_ps(); + + for (BLASLONG idx_n = 0; idx_n < n; idx_n++) { + xArray_0 = _mm512_set1_epi16(x[idx_n]); + + BF16_MATRIX_MASKZ_LOAD_1x32(matrixArray_seed_0, a, lda, idx_n, tag_m_32x, tail_mask) + + matrixArray_0 = _mm512_mask_blend_epi16(blend_lo_mask, ZERO512, matrixArray_seed_0); + matrixArray_1 = _mm512_mask_blend_epi16(blend_hi_mask, ZERO512, matrixArray_seed_0); + + BF16_DOT_1x32(accum512_0, matrixArray_0, xArray_0) + BF16_DOT_1x32(accum512_1, matrixArray_1, xArray_0) + } + accum512_8 = _mm512_permutex2var_ps(accum512_0, idx_base_0, accum512_1); + accum512_9 = _mm512_permutex2var_ps(accum512_0, idx_base_1, accum512_1); + + if ((m-tag_m_32x) > 16) { + STORE16_COMPLETE_RESULT(accum512_8, y+tag_m_32x+0) + STORE16_MASK_COMPLETE_RESULT(accum512_9, y+tag_m_32x+16, store_tail_mask) + } else { + STORE16_MASK_COMPLETE_RESULT(accum512_8, y+tag_m_32x+0, store_tail_mask) + } + } + + return 0; +} diff --git a/kernel/x86_64/sbgemv_t.c b/kernel/x86_64/sbgemv_t.c new file mode 100644 index 000000000..22b099116 --- /dev/null +++ b/kernel/x86_64/sbgemv_t.c @@ -0,0 +1,142 @@ +/*************************************************************************** +Copyright (c) 2014, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + + +#include "common.h" + +#if defined (COOPERLAKE) +#include "sbgemv_t_microk_cooperlake.c" +#endif + +#define ALIGN64_ALLOC(alloc_size, TYPE, ptr_align, ptr) \ + ptr = (TYPE *) malloc(sizeof(TYPE)*alloc_size + 63); \ + ptr_align = ((int)(((uintptr_t)ptr & (uintptr_t)0x3F))!=0) ? (TYPE *)((char *)ptr + (64 - (int)((uintptr_t)ptr & (uintptr_t)0x3F))) : ptr + +#define ALIGN64_FREE(ptr) \ + free(ptr) + +#ifndef HAVE_SBGEMV_T_ACCL_KERNEL +static void sbgemv_kernel_t(BLASLONG m, BLASLONG n, float alpha, bfloat16 *a, BLASLONG lda, bfloat16 *x, float beta, float *y) +{ + BLASLONG offset_lda, offset_n; + float accum = 0.0; + + bfloat16 * a_bf16 = malloc(sizeof(bfloat16)*m*n); + float * a_fp32 = malloc(sizeof(float)*m*n); + float * x_fp32 = malloc(sizeof(float)*n); + + for (BLASLONG i=0; i= 10 && defined(__AVX512BF16__)) || (defined(__clang__) && __clang_major__ >= 9)) + +#define HAVE_SBGEMV_T_ACCL_KERNEL 1 + +// Define micro kernels for ALPHA not ONE && BETA effective && BETA not ONE scenarios +#undef ZERO_BETA +#undef ONE_BETA +#undef ONE_ALPHA +#include "sbgemv_t_microk_cooperlake_template.c" + +// Define micro kernels for ALPHA not ONE && BETA as ONE scenarios +#undef ZERO_BETA +#define ONE_BETA 1 +#undef ONE_ALPHA +#include "sbgemv_t_microk_cooperlake_template.c" + +// Define micro kernels for ALPHA not ONE && BETA in-effective (BETA == 0) scenarios +#define ZERO_BETA 1 +#undef ONE_ALPHA +#include "sbgemv_t_microk_cooperlake_template.c" + +// Define micro kernels for ALPHA as ONE && BETA in-effective (BETA == 0) scenarios +#define ZERO_BETA 1 +#define ONE_ALPHA 1 +#include "sbgemv_t_microk_cooperlake_template.c" + +static int sbgemv_kernel_t(BLASLONG m, BLASLONG n, float alpha, bfloat16 *a, BLASLONG lda, bfloat16 *x, float beta, float *y) +{ + if (beta == ZERO) { // BETA == 0.0, no need to accumulate the original Y data + if (alpha == ONE) { // ALPHA == 1.0, no need to multipy ALPHA + if (n > 127) { + sbgemv_kernel_1x128_lda_direct(m, n, alpha, a, lda, x, y); + } else if (n > 32) { + sbgemv_kernel_8x32_lda_direct(m, n, alpha, a, lda, x, y); + } else { + if (n > 16) { + sbgemv_kernel_8x16p_lda(m, n, alpha, a, lda, x, y); + } else { + if (lda == n) { + switch(n) { + case 1: sbgemv_kernel_32x1 (m, alpha, a, x, y); break; + case 2: sbgemv_kernel_32x2 (m, alpha, a, x, y); break; + case 3: sbgemv_kernel_32x3 (m, alpha, a, x, y); break; + case 4: sbgemv_kernel_16x4 (m, alpha, a, x, y); break; + case 5: sbgemv_kernel_30x5 (m, alpha, a, x, y); break; + case 6: sbgemv_kernel_16x6 (m, alpha, a, x, y); break; + case 7: sbgemv_kernel_16x7 (m, alpha, a, x, y); break; + case 8: sbgemv_kernel_16x8 (m, alpha, a, x, y); break; + case 9: sbgemv_kernel_14x9 (m, alpha, a, x, y); break; + case 10: sbgemv_kernel_12x10(m, alpha, a, x, y); break; + case 11: sbgemv_kernel_15x11(m, alpha, a, x, y); break; + case 12: sbgemv_kernel_15x12(m, alpha, a, x, y); break; + case 13: sbgemv_kernel_16x13(m, alpha, a, x, y); break; + case 14: sbgemv_kernel_16x14(m, alpha, a, x, y); break; + case 15: sbgemv_kernel_16x15(m, alpha, a, x, y); break; + case 16: sbgemv_kernel_16x16(m, alpha, a, x, y); break; + default: break; + } + } else { + sbgemv_kernel_8x16m_lda(m, n, alpha, a, lda, x, y); + } + } + } + } else { // ALPHA != 1.0, need to multipy ALPHA + if (n > 127) { + sbgemv_kernel_1x128_lda_direct_alpha(m, n, alpha, a, lda, x, y); + } else if (n > 32) { + sbgemv_kernel_8x32_lda_direct_alpha(m, n, alpha, a, lda, x, y); + } else { + if (n > 16) { + sbgemv_kernel_8x16p_lda_alpha(m, n, alpha, a, lda, x, y); + } else { + if (lda == n) { + switch(n) { + case 1: sbgemv_kernel_32x1_alpha (m, alpha, a, x, y); break; + case 2: sbgemv_kernel_32x2_alpha (m, alpha, a, x, y); break; + case 3: sbgemv_kernel_32x3_alpha (m, alpha, a, x, y); break; + case 4: sbgemv_kernel_16x4_alpha (m, alpha, a, x, y); break; + case 5: sbgemv_kernel_30x5_alpha (m, alpha, a, x, y); break; + case 6: sbgemv_kernel_16x6_alpha (m, alpha, a, x, y); break; + case 7: sbgemv_kernel_16x7_alpha (m, alpha, a, x, y); break; + case 8: sbgemv_kernel_16x8_alpha (m, alpha, a, x, y); break; + case 9: sbgemv_kernel_14x9_alpha (m, alpha, a, x, y); break; + case 10: sbgemv_kernel_12x10_alpha(m, alpha, a, x, y); break; + case 11: sbgemv_kernel_15x11_alpha(m, alpha, a, x, y); break; + case 12: sbgemv_kernel_15x12_alpha(m, alpha, a, x, y); break; + case 13: sbgemv_kernel_16x13_alpha(m, alpha, a, x, y); break; + case 14: sbgemv_kernel_16x14_alpha(m, alpha, a, x, y); break; + case 15: sbgemv_kernel_16x15_alpha(m, alpha, a, x, y); break; + case 16: sbgemv_kernel_16x16_alpha(m, alpha, a, x, y); break; + default: break; + } + } else { + sbgemv_kernel_8x16m_lda_alpha(m, n, alpha, a, lda, x, y); + } + } + } + } + } else { // BETA != 0.0, need to accumulate the original Y data no matter what ALPHA is + if (beta == ONE) { + if (n > 127) { + sbgemv_kernel_1x128_lda_direct_alpha_one(m, n, alpha, a, lda, x, beta, y); + } else if (n > 32) { + sbgemv_kernel_8x32_lda_direct_alpha_one(m, n, alpha, a, lda, x, beta, y); + } else { + if (n > 16) { + sbgemv_kernel_8x16p_lda_alpha_one(m, n, alpha, a, lda, x, beta, y); + } else { + if (lda == n) { + switch(n) { + case 1: sbgemv_kernel_32x1_alpha_one (m, alpha, a, x, beta, y); break; + case 2: sbgemv_kernel_32x2_alpha_one (m, alpha, a, x, beta, y); break; + case 3: sbgemv_kernel_32x3_alpha_one (m, alpha, a, x, beta, y); break; + case 4: sbgemv_kernel_16x4_alpha_one (m, alpha, a, x, beta, y); break; + case 5: sbgemv_kernel_30x5_alpha_one (m, alpha, a, x, beta, y); break; + case 6: sbgemv_kernel_16x6_alpha_one (m, alpha, a, x, beta, y); break; + case 7: sbgemv_kernel_16x7_alpha_one (m, alpha, a, x, beta, y); break; + case 8: sbgemv_kernel_16x8_alpha_one (m, alpha, a, x, beta, y); break; + case 9: sbgemv_kernel_14x9_alpha_one (m, alpha, a, x, beta, y); break; + case 10: sbgemv_kernel_12x10_alpha_one(m, alpha, a, x, beta, y); break; + case 11: sbgemv_kernel_15x11_alpha_one(m, alpha, a, x, beta, y); break; + case 12: sbgemv_kernel_15x12_alpha_one(m, alpha, a, x, beta, y); break; + case 13: sbgemv_kernel_16x13_alpha_one(m, alpha, a, x, beta, y); break; + case 14: sbgemv_kernel_16x14_alpha_one(m, alpha, a, x, beta, y); break; + case 15: sbgemv_kernel_16x15_alpha_one(m, alpha, a, x, beta, y); break; + case 16: sbgemv_kernel_16x16_alpha_one(m, alpha, a, x, beta, y); break; + default: break; + } + } else { + sbgemv_kernel_8x16m_lda_alpha_one(m, n, alpha, a, lda, x, beta, y); + } + } + } + } else { + if (n > 127) { + sbgemv_kernel_1x128_lda_direct_alpha_beta(m, n, alpha, a, lda, x, beta, y); + } else if (n > 32) { + sbgemv_kernel_8x32_lda_direct_alpha_beta(m, n, alpha, a, lda, x, beta, y); + } else { + if (n > 16) { + sbgemv_kernel_8x16p_lda_alpha_beta(m, n, alpha, a, lda, x, beta, y); + } else { + if (lda == n) { + switch(n) { + case 1: sbgemv_kernel_32x1_alpha_beta (m, alpha, a, x, beta, y); break; + case 2: sbgemv_kernel_32x2_alpha_beta (m, alpha, a, x, beta, y); break; + case 3: sbgemv_kernel_32x3_alpha_beta (m, alpha, a, x, beta, y); break; + case 4: sbgemv_kernel_16x4_alpha_beta (m, alpha, a, x, beta, y); break; + case 5: sbgemv_kernel_30x5_alpha_beta (m, alpha, a, x, beta, y); break; + case 6: sbgemv_kernel_16x6_alpha_beta (m, alpha, a, x, beta, y); break; + case 7: sbgemv_kernel_16x7_alpha_beta (m, alpha, a, x, beta, y); break; + case 8: sbgemv_kernel_16x8_alpha_beta (m, alpha, a, x, beta, y); break; + case 9: sbgemv_kernel_14x9_alpha_beta (m, alpha, a, x, beta, y); break; + case 10: sbgemv_kernel_12x10_alpha_beta(m, alpha, a, x, beta, y); break; + case 11: sbgemv_kernel_15x11_alpha_beta(m, alpha, a, x, beta, y); break; + case 12: sbgemv_kernel_15x12_alpha_beta(m, alpha, a, x, beta, y); break; + case 13: sbgemv_kernel_16x13_alpha_beta(m, alpha, a, x, beta, y); break; + case 14: sbgemv_kernel_16x14_alpha_beta(m, alpha, a, x, beta, y); break; + case 15: sbgemv_kernel_16x15_alpha_beta(m, alpha, a, x, beta, y); break; + case 16: sbgemv_kernel_16x16_alpha_beta(m, alpha, a, x, beta, y); break; + default: break; + } + } else { + sbgemv_kernel_8x16m_lda_alpha_beta(m, n, alpha, a, lda, x, beta, y); + } + } + } + } + } + + return 0; +} + +#endif diff --git a/kernel/x86_64/sbgemv_t_microk_cooperlake_template.c b/kernel/x86_64/sbgemv_t_microk_cooperlake_template.c new file mode 100644 index 000000000..51e681add --- /dev/null +++ b/kernel/x86_64/sbgemv_t_microk_cooperlake_template.c @@ -0,0 +1,3082 @@ +/*************************************************************************** +Copyright (c) 2014, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ +#include +#include "common.h" +// Include common macros for BF16 based operations with IA intrinsics +#include "bf16_common_macros.h" + +#ifndef ZERO_BETA // Beta is non-zero + +#ifndef ONE_BETA // BETA is not ONE + +#define STORE16_COMPLETE_RESULT STORE16_COMPLETE_RESULT_ALPHA_BETA +#define STORE16_MASK_COMPLETE_RESULT STORE16_MASK_COMPLETE_RESULT_ALPHA_BETA +#define STORE8_COMPLETE_RESULT STORE8_COMPLETE_RESULT_ALPHA_BETA +#define STORE8_MASK_COMPLETE_RESULT STORE8_MASK_COMPLETE_RESULT_ALPHA_BETA +#define STORE4_COMPLETE_RESULT STORE4_COMPLETE_RESULT_ALPHA_BETA +#define STORE4_MASK_COMPLETE_RESULT STORE4_MASK_COMPLETE_RESULT_ALPHA_BETA + +#else // BETA is ONE + +#define STORE16_COMPLETE_RESULT STORE16_COMPLETE_RESULT_ALPHA_ONE +#define STORE16_MASK_COMPLETE_RESULT STORE16_MASK_COMPLETE_RESULT_ALPHA_ONE +#define STORE8_COMPLETE_RESULT STORE8_COMPLETE_RESULT_ALPHA_ONE +#define STORE8_MASK_COMPLETE_RESULT STORE8_MASK_COMPLETE_RESULT_ALPHA_ONE +#define STORE4_COMPLETE_RESULT STORE4_COMPLETE_RESULT_ALPHA_ONE +#define STORE4_MASK_COMPLETE_RESULT STORE4_MASK_COMPLETE_RESULT_ALPHA_ONE + +#endif + +#else // BETA is zero + +#ifndef ONE_ALPHA // ALPHA is not ONE + +#define STORE16_COMPLETE_RESULT STORE16_COMPLETE_RESULT_ALPHA +#define STORE16_MASK_COMPLETE_RESULT STORE16_MASK_COMPLETE_RESULT_ALPHA +#define STORE8_COMPLETE_RESULT STORE8_COMPLETE_RESULT_ALPHA +#define STORE8_MASK_COMPLETE_RESULT STORE8_MASK_COMPLETE_RESULT_ALPHA +#define STORE4_COMPLETE_RESULT STORE4_COMPLETE_RESULT_ALPHA +#define STORE4_MASK_COMPLETE_RESULT STORE4_MASK_COMPLETE_RESULT_ALPHA + +#else // ALPHA is ONE + +#define STORE16_COMPLETE_RESULT STORE16_COMPLETE_RESULT_DIRECT +#define STORE16_MASK_COMPLETE_RESULT STORE16_MASK_COMPLETE_RESULT_DIRECT +#define STORE8_COMPLETE_RESULT STORE8_COMPLETE_RESULT_DIRECT +#define STORE8_MASK_COMPLETE_RESULT STORE8_MASK_COMPLETE_RESULT_DIRECT +#define STORE4_COMPLETE_RESULT STORE4_COMPLETE_RESULT_DIRECT +#define STORE4_MASK_COMPLETE_RESULT STORE4_MASK_COMPLETE_RESULT_DIRECT + +#endif + +#endif + + +// 32 rows parallel processing BF16 GEMV kernel for n=1 && lda ineffective scenario +#ifndef ZERO_BETA +#ifndef ONE_BETA +static int sbgemv_kernel_32x1_alpha_beta(BLASLONG m, float alpha, bfloat16 *a, bfloat16 *x, float beta, float *y) +#else +static int sbgemv_kernel_32x1_alpha_one(BLASLONG m, float alpha, bfloat16 *a, bfloat16 *x, float beta, float *y) +#endif +#else +#ifndef ONE_ALPHA +static int sbgemv_kernel_32x1_alpha(BLASLONG m, float alpha, bfloat16 *a, bfloat16 *x, float *y) +#else +static int sbgemv_kernel_32x1(BLASLONG m, float alpha, bfloat16 *a, bfloat16 *x, float *y) +#endif +#endif +{ + BLASLONG tag_m_32x = m & (~31); + + __m512i matrixArray_0, matrixArray_1, matrixArray_2; + __m512i xArray; + __m512 result_0, result_1; +#ifndef ONE_ALPHA + __m512 ALPHAVECTOR = _mm512_set1_ps(alpha); +#endif +#ifndef ZERO_BETA +#ifndef ONE_BETA + __m512 BETAVECTOR = _mm512_set1_ps(beta); +#endif +#endif + + __m512i load_idx_lo = _mm512_set_epi16(0, 15, 0, 14, 0, 13, 0, 12, 0, 11, 0, 10, 0, 9, 0, 8,\ + 0, 7, 0, 6, 0, 5, 0, 4, 0, 3, 0, 2, 0, 1, 0, 0); + __m512i M512_EPI16_16 = _mm512_set1_epi16(16); + __m512i load_idx_hi = _mm512_add_epi16(load_idx_lo, M512_EPI16_16); + + unsigned int interleve_mask_value = ((unsigned int) 0x55555555); + __mmask32 interleave_mask = *((__mmask32*) &interleve_mask_value); + + xArray = _mm512_set1_epi16((short) x[0]); + xArray = _mm512_mask_blend_epi16(interleave_mask, _mm512_setzero_si512(), xArray); + + if (tag_m_32x > 0) { + for (BLASLONG idx_m = 0; idx_m < tag_m_32x; idx_m+=32) { + result_0 = _mm512_setzero_ps(); + result_1 = _mm512_setzero_ps(); + + matrixArray_0 = _mm512_loadu_si512(&a[(idx_m)]); // Load 32 rows with n=1 + matrixArray_1 = _mm512_permutexvar_epi16(load_idx_lo, matrixArray_0); // Expand the low 16 elements + matrixArray_2 = _mm512_permutexvar_epi16(load_idx_hi, matrixArray_0); // Expand the high 16 elements + + result_0 = _mm512_dpbf16_ps(result_0, (__m512bh) matrixArray_1, (__m512bh) xArray); + result_1 = _mm512_dpbf16_ps(result_1, (__m512bh) matrixArray_2, (__m512bh) xArray); + + STORE16_COMPLETE_RESULT(result_0, y+idx_m) + STORE16_COMPLETE_RESULT(result_1, y+idx_m+16) + } + } + + BLASLONG tail_num = m - tag_m_32x; + if (tail_num > 16) { + result_0 = _mm512_setzero_ps(); + result_1 = _mm512_setzero_ps(); + + unsigned int tail_mask_value = (((unsigned int)0xffffffff) >> (32-tail_num)); + __mmask32 tail_mask = *((__mmask32*) &tail_mask_value); + matrixArray_0 = _mm512_maskz_loadu_epi16(tail_mask, &a[(tag_m_32x)]); // Load 32 rows with n=1 + matrixArray_1 = _mm512_permutexvar_epi16(load_idx_lo, matrixArray_0); // Expand the low 16 elements + matrixArray_2 = _mm512_permutexvar_epi16(load_idx_hi, matrixArray_0); // Expand the high 16 elements + + result_0 = _mm512_dpbf16_ps(result_0, (__m512bh) matrixArray_1, (__m512bh) xArray); + result_1 = _mm512_dpbf16_ps(result_1, (__m512bh) matrixArray_2, (__m512bh) xArray); + + unsigned short store_mask_value = (((unsigned short)0xffff) >> (32-tail_num)); + __mmask16 store_mask = *((__mmask16*) &store_mask_value); + STORE16_COMPLETE_RESULT(result_0, y+tag_m_32x) + STORE16_MASK_COMPLETE_RESULT(result_1, y+tag_m_32x+16, store_mask) + } else if (tail_num > 8) { + __m256 result256_0 = _mm256_setzero_ps(); + __m256 result256_1 = _mm256_setzero_ps(); + + __m256i load_idx_lo256 = _mm512_castsi512_si256(load_idx_lo); + __m256i load_idx_hi256 = _mm512_extracti32x8_epi32(load_idx_lo, 0x1); + __m256i xArray256 = _mm512_castsi512_si256(xArray); + + unsigned short tail_mask_value = (((unsigned short)0xffff) >> (16-tail_num)); + __mmask16 tail_mask = *((__mmask16*) &tail_mask_value); + __m256i matrixArray256_0 = _mm256_maskz_loadu_epi16(tail_mask, &a[(tag_m_32x)]); // Load 16 rows with n=1 + __m256i matrixArray256_1 = _mm256_permutexvar_epi16(load_idx_lo256, matrixArray256_0); // Expand the low 8 elements + __m256i matrixArray256_2 = _mm256_permutexvar_epi16(load_idx_hi256, matrixArray256_0); // Expand the high 8 elements + + result256_0 = _mm256_dpbf16_ps(result256_0, (__m256bh) matrixArray256_1, (__m256bh) xArray256); + result256_1 = _mm256_dpbf16_ps(result256_1, (__m256bh) matrixArray256_2, (__m256bh) xArray256); + + unsigned char store_mask_value = (((unsigned char)0xff) >> (16-tail_num)); + __mmask8 store_mask = *((__mmask8*) &store_mask_value); + STORE8_COMPLETE_RESULT(result256_0, y+tag_m_32x) + STORE8_MASK_COMPLETE_RESULT(result256_1, y+tag_m_32x+8, store_mask) + } else { + __m128 result128_0 = _mm_setzero_ps(); + __m128 result128_1 = _mm_setzero_ps(); + + __m128i load_idx_lo128 = _mm_set_epi16(0, 3, 0, 2, 0, 1, 0, 0); + __m128i M128_EPI16_4 = _mm_set1_epi16(4); + __m128i load_idx_hi128 = _mm_add_epi16(load_idx_lo128, M128_EPI16_4); + + __m128i xArray128 = _mm512_castsi512_si128(xArray); + + unsigned char tail_mask_value = (((unsigned char)0xff) >> (8-tail_num)); + __mmask8 tail_mask = *((__mmask8*) &tail_mask_value); + __m128i matrixArray128_0 = _mm_maskz_loadu_epi16(tail_mask, &a[(tag_m_32x)]); // Load 8 rows with n=1 + __m128i matrixArray128_1 = _mm_permutexvar_epi16(load_idx_lo128, matrixArray128_0); // Expand the low 4 elements + __m128i matrixArray128_2 = _mm_permutexvar_epi16(load_idx_hi128, matrixArray128_0); // Expand the high 4 elements + + result128_0 = _mm_dpbf16_ps(result128_0, (__m128bh) matrixArray128_1, (__m128bh) xArray128); + result128_1 = _mm_dpbf16_ps(result128_1, (__m128bh) matrixArray128_2, (__m128bh) xArray128); + + if (tail_num > 4) { + unsigned char store_mask_value = (((unsigned char)0xf) >> (8-tail_num)); + __mmask8 store_mask = *((__mmask8*) &store_mask_value); + STORE4_COMPLETE_RESULT(result128_0, y+tag_m_32x) + STORE4_MASK_COMPLETE_RESULT(result128_1, y+tag_m_32x+4, store_mask) + } else { + unsigned char store_mask_value = (((unsigned char)0xf) >> (4-tail_num)); + __mmask8 store_mask = *((__mmask8*) &store_mask_value); + STORE4_MASK_COMPLETE_RESULT(result128_0, y+tag_m_32x, store_mask) + } + } + + return 0; +} + +// 32 rows parallel processing BF16 GEMV kernel for n=2 && lda ineffective scenario +#ifndef ZERO_BETA +#ifndef ONE_BETA +static int sbgemv_kernel_32x2_alpha_beta(BLASLONG m, float alpha, bfloat16 *a, bfloat16 *x, float beta, float *y) +#else +static int sbgemv_kernel_32x2_alpha_one(BLASLONG m, float alpha, bfloat16 *a, bfloat16 *x, float beta, float *y) +#endif +#else +#ifndef ONE_ALPHA +static int sbgemv_kernel_32x2_alpha(BLASLONG m, float alpha, bfloat16 *a, bfloat16 *x, float *y) +#else +static int sbgemv_kernel_32x2(BLASLONG m, float alpha, bfloat16 *a, bfloat16 *x, float *y) +#endif +#endif +{ + BLASLONG tag_m_32x = m & (~31); + + __m512i matrixArray_0, matrixArray_1; + __m512i xArray; + __m512 result_0, result_1; + +#ifndef ONE_ALPHA + __m512 ALPHAVECTOR = _mm512_set1_ps(alpha); +#endif +#ifndef ZERO_BETA + __m512 BETAVECTOR = _mm512_set1_ps(beta); +#endif + + unsigned char load_mask_value = (((unsigned char)0xff) >> 6); + __mmask8 load_mask = *((__mmask8*) &load_mask_value); + xArray = _mm512_broadcastd_epi32(_mm_maskz_loadu_epi16(load_mask, x)); + + if (tag_m_32x > 0) { + for (BLASLONG idx_m = 0; idx_m < tag_m_32x; idx_m+=32) { + result_0 = _mm512_setzero_ps(); + result_1 = _mm512_setzero_ps(); + + matrixArray_0 = _mm512_loadu_si512(&a[(idx_m)*2]); // Load 16 rows as n=2 + matrixArray_1 = _mm512_loadu_si512(&a[(idx_m+16)*2]); // Load 16 rows as n=2 + + result_0 = _mm512_dpbf16_ps(result_0, (__m512bh) matrixArray_0, (__m512bh) xArray); + result_1 = _mm512_dpbf16_ps(result_1, (__m512bh) matrixArray_1, (__m512bh) xArray); + + STORE16_COMPLETE_RESULT(result_0, y+idx_m) + STORE16_COMPLETE_RESULT(result_1, y+idx_m+16) + } + } + + if (m - tag_m_32x >= 16) { + result_0 = _mm512_setzero_ps(); + + matrixArray_0 = _mm512_loadu_si512(&a[(tag_m_32x)*2]); // Load 16 rows with n=2 + + result_0 = _mm512_dpbf16_ps(result_0, (__m512bh) matrixArray_0, (__m512bh) xArray); + + STORE16_COMPLETE_RESULT(result_0, y+tag_m_32x) + + tag_m_32x += 16; + } + + BLASLONG tail_num = m - tag_m_32x; + if (tail_num > 8) { + result_0 = _mm512_setzero_ps(); + + unsigned short tail_mask_value = (((unsigned short)0xffff) >> (16-(m&15))); + __mmask16 tail_mask = *((__mmask16*) &tail_mask_value); + matrixArray_0 = _mm512_maskz_loadu_epi32(tail_mask, &a[(tag_m_32x)*2]); // Load 16 rows with n=2 + + result_0 = _mm512_dpbf16_ps(result_0, (__m512bh) matrixArray_0, (__m512bh) xArray); + + STORE16_MASK_COMPLETE_RESULT(result_0, y+tag_m_32x, tail_mask) + } else if (tail_num == 8) { + __m256 result256 = _mm256_setzero_ps(); + + __m256i matrixArray256 = _mm256_loadu_si256(&a[(tag_m_32x)*2]); // Load 8 rows with n=2 + __m256i xArray256 = _mm512_castsi512_si256(xArray); + result256 = _mm256_dpbf16_ps(result256, (__m256bh) matrixArray256, (__m256bh) xArray256); + + STORE8_COMPLETE_RESULT(result256, y+tag_m_32x) + } else { + __m256 result256 = _mm256_setzero_ps(); + + unsigned char tail_mask_value = (((unsigned char)0xff) >> (8-(m&7))); + __mmask8 tail_mask = *((__mmask8*) &tail_mask_value); + __m256i matrixArray256 = _mm256_maskz_loadu_epi32(tail_mask, &a[(tag_m_32x)*2]); // Load 8 rows with n=2 + __m256i xArray256 = _mm512_castsi512_si256(xArray); + result256 = _mm256_dpbf16_ps(result256, (__m256bh) matrixArray256, (__m256bh) xArray256); + + STORE8_MASK_COMPLETE_RESULT(result256, y+tag_m_32x, tail_mask) + } + + return 0; +} + +// 32 rows parallel processing BF16 GEMV kernel for n=3 && lda ineffective scenario +#ifndef ZERO_BETA +#ifndef ONE_BETA +static int sbgemv_kernel_32x3_alpha_beta(BLASLONG m, float alpha, bfloat16 *a, bfloat16 *x, float beta, float *y) +#else +static int sbgemv_kernel_32x3_alpha_one(BLASLONG m, float alpha, bfloat16 *a, bfloat16 *x, float beta, float *y) +#endif +#else +#ifndef ONE_ALPHA +static int sbgemv_kernel_32x3_alpha(BLASLONG m, float alpha, bfloat16 *a, bfloat16 *x, float *y) +#else +static int sbgemv_kernel_32x3(BLASLONG m, float alpha, bfloat16 *a, bfloat16 *x, float *y) +#endif +#endif +{ + BLASLONG tag_m_32x = m & (~31); + + __m512 result_0, result_1; + +#ifndef ONE_ALPHA + __m512 ALPHAVECTOR = _mm512_set1_ps(alpha); +#endif +#ifndef ZERO_BETA + __m512 BETAVECTOR = _mm512_set1_ps(beta); +#endif + + unsigned char x_load_mask_value = (((unsigned char)0xff) >> 5); + __mmask8 x_load_mask = *((__mmask8*) &x_load_mask_value); + __m128i xTmp = _mm_maskz_loadu_epi16(x_load_mask, x); // x0|x1|x2|0|0|0|0|0| + __m512i xArray_0 = _mm512_broadcastd_epi32(xTmp); // x0|x1|x0|x1|...|x0|x1| + __m512i xArray_1 = _mm512_broadcastd_epi32(_mm_shuffle_epi32(xTmp, 0x1)); // x2| 0|x2| 0|...|x2| 0| + + __m512i load_idx_base; + __m512i M512_EPI16_2, M512_EPI16_8, M512_EPI16_16; + M512_EPI16_2 = _mm512_set1_epi16(2); + M512_EPI16_8 = _mm512_add_epi16(M512_EPI16_2, M512_EPI16_2); + M512_EPI16_8 = _mm512_add_epi16(M512_EPI16_8, M512_EPI16_8); + M512_EPI16_16 = _mm512_add_epi16(M512_EPI16_8, M512_EPI16_8); + load_idx_base = _mm512_set_epi16(46, 45, 43, 42, 40, 39, 37, 36, 34, 33, 31, 30, 28, 27, 25, 24, + 22, 21, 19, 18, 16, 15, 13, 12, 10, 9, 7, 6, 4, 3, 1, 0); + + if (tag_m_32x > 0) { + __m512i load_idx01_1st, load_idx01_2nd, load_idx2_1st, load_idx2_2nd; + __m512i matrixArray_0, matrixArray_1, matrixArray_2, matrixArray_3, matrixArray_4, matrixArray_5, matrixArray_6; + + unsigned int idx_blend_mask_value = ((unsigned int)0x80000000); + __mmask32 idx_blend_mask = *((__mmask32*) &idx_blend_mask_value); + + load_idx01_1st = load_idx_base; + load_idx01_2nd = _mm512_add_epi16(load_idx01_1st, M512_EPI16_16); + load_idx2_1st = _mm512_add_epi16(load_idx01_1st, M512_EPI16_2); + load_idx2_2nd = _mm512_add_epi16(load_idx01_2nd, M512_EPI16_2); + load_idx2_2nd = _mm512_mask_blend_epi16(idx_blend_mask, load_idx2_2nd, _mm512_setzero_si512()); + + for (BLASLONG idx_m = 0; idx_m < tag_m_32x; idx_m+=32) { + result_0 = _mm512_setzero_ps(); + result_1 = _mm512_setzero_ps(); + + matrixArray_0 = _mm512_loadu_si512(&a[(idx_m)*3]); // Load 10 rows with n=3 plus 2 element + matrixArray_1 = _mm512_loadu_si512(&a[((idx_m+10)*3 + 2)]); // Load 10 rows with n=3 plus 2 element + matrixArray_2 = _mm512_loadu_si512(&a[((idx_m+21)*3 + 1)]); // Load 10 rows with n=3 plus 2 element + + matrixArray_3 = _mm512_permutex2var_epi16(matrixArray_0, load_idx01_1st, matrixArray_1); // Select the first 2 elements for each row + matrixArray_4 = _mm512_permutex2var_epi16(matrixArray_1, load_idx01_2nd, matrixArray_2); // Select the first 2 elements for each row + matrixArray_5 = _mm512_permutex2var_epi16(matrixArray_0, load_idx2_1st, matrixArray_1); // Select the third element for each row + matrixArray_6 = _mm512_permutex2var_epi16(matrixArray_1, load_idx2_2nd, matrixArray_2); // Select the third element for each row + + result_0 = _mm512_dpbf16_ps(result_0, (__m512bh) matrixArray_3, (__m512bh) xArray_0); + result_0 = _mm512_dpbf16_ps(result_0, (__m512bh) matrixArray_5, (__m512bh) xArray_1); + result_1 = _mm512_dpbf16_ps(result_1, (__m512bh) matrixArray_4, (__m512bh) xArray_0); + result_1 = _mm512_dpbf16_ps(result_1, (__m512bh) matrixArray_6, (__m512bh) xArray_1); + + STORE16_COMPLETE_RESULT(result_0, y+idx_m) + STORE16_COMPLETE_RESULT(result_1, y+idx_m+16) + } + } + + if (tag_m_32x != m) { + __m256i load256_idx01_1st, load256_idx01_2nd, load256_idx2_1st, load256_idx2_2nd; + __m256i matrixArray256_0, matrixArray256_1, matrixArray256_2, matrixArray256_3, matrixArray256_4, matrixArray256_5, matrixArray256_6; + __m256 result256_0, result256_1; + + unsigned short idx256_blend_mask_value = ((unsigned short)0x8000); + __mmask16 idx256_blend_mask = *((__mmask16*) &idx256_blend_mask_value); + + load256_idx01_1st = _mm512_castsi512_si256(load_idx_base); + load256_idx01_2nd = _mm256_add_epi16(load256_idx01_1st, _mm512_castsi512_si256(M512_EPI16_8)); + load256_idx2_1st = _mm256_add_epi16(load256_idx01_1st, _mm512_castsi512_si256(M512_EPI16_2)); + load256_idx2_2nd = _mm256_add_epi16(load256_idx01_2nd, _mm512_castsi512_si256(M512_EPI16_2)); + load256_idx2_2nd = _mm256_mask_blend_epi16(idx256_blend_mask, load256_idx2_2nd, _mm256_setzero_si256()); + + if (m - tag_m_32x > 15) { + result256_0 = _mm256_setzero_ps(); + result256_1 = _mm256_setzero_ps(); + + matrixArray256_0 = _mm256_loadu_si256(&a[(tag_m_32x)*3]); // Load 5 rows with n=3 plus 1 element + matrixArray256_1 = _mm256_loadu_si256(&a[((tag_m_32x+5)*3 + 1)]); // Load 5 rows with n=3 plus 1 element + matrixArray256_2 = _mm256_loadu_si256(&a[((tag_m_32x+10)*3 + 2)]); // Load 5 rows with n=3 plus 1 element + + matrixArray256_3 = _mm256_permutex2var_epi16(matrixArray256_0, load256_idx01_1st, matrixArray256_1); // Select the first 2 elements for each row + matrixArray256_4 = _mm256_permutex2var_epi16(matrixArray256_1, load256_idx01_2nd, matrixArray256_2); // Select the first 2 elements for each row + matrixArray256_5 = _mm256_permutex2var_epi16(matrixArray256_0, load256_idx2_1st, matrixArray256_1); // Select the third element for each row + matrixArray256_6 = _mm256_permutex2var_epi16(matrixArray256_1, load256_idx2_2nd, matrixArray256_2); // Select the third element for each row + + result256_0 = _mm256_dpbf16_ps(result256_0, (__m256bh) matrixArray256_3, (__m256bh) _mm512_castsi512_si256(xArray_0)); + result256_0 = _mm256_dpbf16_ps(result256_0, (__m256bh) matrixArray256_5, (__m256bh) _mm512_castsi512_si256(xArray_1)); + result256_1 = _mm256_dpbf16_ps(result256_1, (__m256bh) matrixArray256_4, (__m256bh) _mm512_castsi512_si256(xArray_0)); + result256_1 = _mm256_dpbf16_ps(result256_1, (__m256bh) matrixArray256_6, (__m256bh) _mm512_castsi512_si256(xArray_1)); + + STORE8_COMPLETE_RESULT(result256_0, y+tag_m_32x) + STORE8_COMPLETE_RESULT(result256_1, y+tag_m_32x+8) + + tag_m_32x += 16; + } + + if (tag_m_32x != m) { + result256_0 = _mm256_setzero_ps(); + result256_1 = _mm256_setzero_ps(); + BLASLONG tail_num = m-tag_m_32x; + + if (tail_num > 10) { + unsigned short tail_mask_value = (((unsigned short)0xffff) >> (16-((tail_num-10-1)*3+1))); + __mmask16 tail_mask = *((__mmask16*) &tail_mask_value); + matrixArray256_0 = _mm256_loadu_si256(&a[(tag_m_32x)*3]); // Load 5 rows with n=3 plus 1 element + matrixArray256_1 = _mm256_loadu_si256(&a[((tag_m_32x+5)*3 + 1)]); // Load 5 rows with n=3 plus 1 element + matrixArray256_2 = _mm256_maskz_loadu_epi16(tail_mask, &a[((tag_m_32x+10)*3 + 2)]); // Load m-tag_m_32x-10 rows + + matrixArray256_3 = _mm256_permutex2var_epi16(matrixArray256_0, load256_idx01_1st, matrixArray256_1); // Select the first 2 elements for each row + matrixArray256_4 = _mm256_permutex2var_epi16(matrixArray256_1, load256_idx01_2nd, matrixArray256_2); // Select the first 2 elements for each row + matrixArray256_5 = _mm256_permutex2var_epi16(matrixArray256_0, load256_idx2_1st, matrixArray256_1); // Select the third element for each row + matrixArray256_6 = _mm256_permutex2var_epi16(matrixArray256_1, load256_idx2_2nd, matrixArray256_2); // Select the third element for each row + + result256_0 = _mm256_dpbf16_ps(result256_0, (__m256bh) matrixArray256_3, (__m256bh) _mm512_castsi512_si256(xArray_0)); + result256_0 = _mm256_dpbf16_ps(result256_0, (__m256bh) matrixArray256_5, (__m256bh) _mm512_castsi512_si256(xArray_1)); + result256_1 = _mm256_dpbf16_ps(result256_1, (__m256bh) matrixArray256_4, (__m256bh) _mm512_castsi512_si256(xArray_0)); + result256_1 = _mm256_dpbf16_ps(result256_1, (__m256bh) matrixArray256_6, (__m256bh) _mm512_castsi512_si256(xArray_1)); + } else if (tail_num > 5) { + unsigned short tail_mask_value = (((unsigned short)0xffff) >> (16-((tail_num-5-1)*3+2))); + __mmask16 tail_mask = *((__mmask16*) &tail_mask_value); + matrixArray256_0 = _mm256_loadu_si256(&a[(tag_m_32x)*3]); // Load 5 rows with n=3 plus 1 element + matrixArray256_1 = _mm256_maskz_loadu_epi16(tail_mask, &a[((tag_m_32x+5)*3+1)]); // Load m-tag_m_32x-5 rows + matrixArray256_2 = _mm256_setzero_si256(); + + matrixArray256_3 = _mm256_permutex2var_epi16(matrixArray256_0, load256_idx01_1st, matrixArray256_1); // Select the first 2 elements for each row + matrixArray256_4 = _mm256_permutex2var_epi16(matrixArray256_1, load256_idx01_2nd, matrixArray256_2); // Select the first 2 elements for each row + matrixArray256_5 = _mm256_permutex2var_epi16(matrixArray256_0, load256_idx2_1st, matrixArray256_1); // Select the third element for each row + matrixArray256_6 = _mm256_permutex2var_epi16(matrixArray256_1, load256_idx2_2nd, matrixArray256_2); // Select the third element for each row + + result256_0 = _mm256_dpbf16_ps(result256_0, (__m256bh) matrixArray256_3, (__m256bh) _mm512_castsi512_si256(xArray_0)); + result256_0 = _mm256_dpbf16_ps(result256_0, (__m256bh) matrixArray256_5, (__m256bh) _mm512_castsi512_si256(xArray_1)); + result256_1 = _mm256_dpbf16_ps(result256_1, (__m256bh) matrixArray256_4, (__m256bh) _mm512_castsi512_si256(xArray_0)); + result256_1 = _mm256_dpbf16_ps(result256_1, (__m256bh) matrixArray256_6, (__m256bh) _mm512_castsi512_si256(xArray_1)); + } else { + unsigned short tail_mask_value = (((unsigned short)0xffff) >> (16-(tail_num*3))); + __mmask16 tail_mask = *((__mmask16*) &tail_mask_value); + matrixArray256_0 = _mm256_maskz_loadu_epi16(tail_mask, &a[(tag_m_32x)*3]); // Load m-tag_m_32x rows + matrixArray256_1 = _mm256_setzero_si256(); + + matrixArray256_3 = _mm256_permutex2var_epi16(matrixArray256_0, load256_idx01_1st, matrixArray256_1); // Select the first 2 elements for each row + matrixArray256_5 = _mm256_permutex2var_epi16(matrixArray256_0, load256_idx2_1st, matrixArray256_1); // Select the third element for each row + + result256_0 = _mm256_dpbf16_ps(result256_0, (__m256bh) matrixArray256_3, (__m256bh) _mm512_castsi512_si256(xArray_0)); + result256_0 = _mm256_dpbf16_ps(result256_0, (__m256bh) matrixArray256_5, (__m256bh) _mm512_castsi512_si256(xArray_1)); + } + + unsigned short store_tail_mask_value = (((unsigned short)0xffff) >> (16-(tail_num))); + __mmask16 store_tail_mask = *((__mmask16*) &store_tail_mask_value); + __m512 result512 = _mm512_insertf32x8(_mm512_castps256_ps512(result256_0), result256_1, 0x1); + STORE16_MASK_COMPLETE_RESULT(result512, y+tag_m_32x, store_tail_mask) + } + } + + return 0; +} + +// 16 rows parallel processing BF16 GEMV kernel for n=4 && lda ineffective scenario +#ifndef ZERO_BETA +#ifndef ONE_BETA +static int sbgemv_kernel_16x4_alpha_beta(BLASLONG m, float alpha, bfloat16 *a, bfloat16 *x, float beta, float *y) +#else +static int sbgemv_kernel_16x4_alpha_one(BLASLONG m, float alpha, bfloat16 *a, bfloat16 *x, float beta, float *y) +#endif +#else +#ifndef ONE_ALPHA +static int sbgemv_kernel_16x4_alpha(BLASLONG m, float alpha, bfloat16 *a, bfloat16 *x, float *y) +#else +static int sbgemv_kernel_16x4(BLASLONG m, float alpha, bfloat16 *a, bfloat16 *x, float *y) +#endif +#endif +{ + BLASLONG tag_m_16x = m & (~15); + __m512i matrixArray_0, matrixArray_1, matrixArray_2, matrixArray_3; + __m512i xArray_01, xArray_23, xArray_remix; + __m512 result; + +#ifndef ONE_ALPHA + __m512 ALPHAVECTOR = _mm512_set1_ps(alpha); +#endif +#ifndef ZERO_BETA + __m512 BETAVECTOR = _mm512_set1_ps(beta); +#endif + + __m512i M512_EPI32_1 = _mm512_set1_epi32(1); + __m512i idx_base_0 = _mm512_set_epi32(30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 0); + __m512i idx_base_1 = _mm512_add_epi32(idx_base_0, M512_EPI32_1); + __m512i idx_base_remix = _mm512_inserti32x8(idx_base_0, _mm512_castsi512_si256(idx_base_1), 0x1); + + unsigned char x_load_mask_value = (((unsigned char)0xf) >> 2); + __mmask8 x_load_mask = *((__mmask8*) &x_load_mask_value); + __m128i xTmp = _mm_maskz_loadu_epi32(x_load_mask, x); // |x0|x1|x2|x3|0|0|0|0| + xArray_01 = _mm512_broadcastd_epi32(xTmp); // |x0|x1|x0|x1|...|x0|x1| + xArray_23 = _mm512_broadcastd_epi32(_mm_shuffle_epi32(xTmp, 0x1)); // |x2|x3|x2|x3|...|x2|x3| + unsigned short blend_mask_value = ((unsigned short)0xff00); + __mmask16 blend_mask = *((__mmask16*) &blend_mask_value); + xArray_remix = _mm512_mask_blend_epi32(blend_mask, xArray_01, xArray_23); // |x0|x1|x0|x1|x0|x1|x0|x1|...|x2|x3|x2|x3|x2|x3|x2|x3| + + if (tag_m_16x > 0) { + for (BLASLONG idx_m = 0; idx_m < tag_m_16x; idx_m+=16) { + result = _mm512_setzero_ps(); + + matrixArray_0 = _mm512_loadu_si512(&a[(idx_m)*4]); // Load 8 rows with n=4 + matrixArray_1 = _mm512_loadu_si512(&a[(idx_m+8)*4]); // Load 8 rows with n=4 + + matrixArray_2 = _mm512_permutex2var_epi32(matrixArray_0, idx_base_0, matrixArray_1); // |a0|a1|...|h0|h1|i0|i1|...|p0|p1| + matrixArray_3 = _mm512_permutex2var_epi32(matrixArray_0, idx_base_1, matrixArray_1); // |a2|a3|...|h2|h3|i2|i3|...|p2|p3| + + result = _mm512_dpbf16_ps(result, (__m512bh) matrixArray_2, (__m512bh) xArray_01); + result = _mm512_dpbf16_ps(result, (__m512bh) matrixArray_3, (__m512bh) xArray_23); + + STORE16_COMPLETE_RESULT(result, y+idx_m) + } + } + + if (m - tag_m_16x > 7) { + result = _mm512_setzero_ps(); + + matrixArray_0 = _mm512_loadu_si512(&a[(tag_m_16x)*4]); // Load 8 rows with n=4 + matrixArray_2 = _mm512_permutexvar_epi32(idx_base_remix, matrixArray_0); // a0|a1|...|h0|h1|a2|a3|...|h2|h3| + + result = _mm512_dpbf16_ps(result, (__m512bh) matrixArray_2, (__m512bh) xArray_remix); + __m256 result256 = _mm256_add_ps(_mm512_castps512_ps256(result), _mm512_extractf32x8_ps(result, 1)); + + STORE8_COMPLETE_RESULT(result256, y+tag_m_16x) + tag_m_16x += 8; + } + + BLASLONG tail_num = m-tag_m_16x; + if (tail_num != 0) { + result = _mm512_setzero_ps(); + + unsigned short tail_mask_value = (((unsigned short)0xffff) >> (16-tail_num*2)); + __mmask16 tail_mask = *((__mmask16*) &tail_mask_value); + matrixArray_0 = _mm512_maskz_loadu_epi32(tail_mask, &a[(tag_m_16x)*4]); // Load 8 rows with n=4 + matrixArray_2 = _mm512_permutexvar_epi32(idx_base_remix, matrixArray_0); // a0|a1|...|h0|h1|a2|a3|...|h2|h3| + + result = _mm512_dpbf16_ps(result, (__m512bh) matrixArray_2, (__m512bh) xArray_remix); + __m256 result256 = _mm256_add_ps(_mm512_castps512_ps256(result), _mm512_extractf32x8_ps(result, 1)); + + unsigned char store_tail_mask_value = (((unsigned char)0xff) >> (8-tail_num)); + __mmask8 store_tail_mask = *((__mmask8*) &store_tail_mask_value); + STORE8_MASK_COMPLETE_RESULT(result256, y+tag_m_16x, store_tail_mask) + } + + return 0; +} + +// 30 rows parallel processing BF16 GEMV kernel for n=5 && lda ineffective scenario +#ifndef ZERO_BETA +#ifndef ONE_BETA +static int sbgemv_kernel_30x5_alpha_beta(BLASLONG m, float alpha, bfloat16 *a, bfloat16 *x, float beta, float *y) +#else +static int sbgemv_kernel_30x5_alpha_one(BLASLONG m, float alpha, bfloat16 *a, bfloat16 *x, float beta, float *y) +#endif +#else +#ifndef ONE_ALPHA +static int sbgemv_kernel_30x5_alpha(BLASLONG m, float alpha, bfloat16 *a, bfloat16 *x, float *y) +#else +static int sbgemv_kernel_30x5(BLASLONG m, float alpha, bfloat16 *a, bfloat16 *x, float *y) +#endif +#endif +{ + BLASLONG tag_m_30x = m - (m%30); + + unsigned char x_load_mask_value = (((unsigned char)0xff) >> 3); + __mmask8 x_load_mask = *((__mmask8*) &x_load_mask_value); + __m128i x128 = _mm_maskz_loadu_epi16(x_load_mask, x); // x0|x1|x2|x3|x4|0|0|0| + +#ifndef ONE_ALPHA + __m512 ALPHAVECTOR = _mm512_set1_ps(alpha); +#endif +#ifndef ZERO_BETA + __m512 BETAVECTOR = _mm512_set1_ps(beta); +#endif + + __m512 result_0, result_1; + __m512i xArray_01 = _mm512_broadcastd_epi32(x128); // x0|x1|x0|x1|...|x0|x1| + __m512i xArray_23 = _mm512_broadcastd_epi32(_mm_shuffle_epi32(x128, 0x1)); // x2|x3|x2|x3|...|x2|x3| + __m512i xArray_4 = _mm512_broadcastd_epi32(_mm_shuffle_epi32(x128, 0x2)); // x4| 0|x4| 0|...|x4| 0| + + __m512i M512_EPI16_2 = _mm512_set1_epi16(2); + __m512i load_idx01_stage1_1st = _mm512_set_epi16( 0, 0, 0, 0, 0, 0, 0, 0, 58, 57, 53, 52, 48, 47, 43, 42, + 38, 37, 33, 32, 26, 25, 21, 20, 16, 15, 11, 10, 6, 5, 1, 0); + __m512i load_idx01_stage1_2nd = _mm512_shuffle_i32x4(load_idx01_stage1_1st, load_idx01_stage1_1st, 0x39); + __m512i load_idx01_stage1_3rd = _mm512_shuffle_i32x4(load_idx01_stage1_1st, load_idx01_stage1_1st, 0x4f); + + __m512i load_idx23_stage1_1st = _mm512_add_epi16(load_idx01_stage1_1st, M512_EPI16_2); + __m512i load_idx23_stage1_2nd = _mm512_add_epi16(load_idx01_stage1_2nd, M512_EPI16_2); + __m512i load_idx23_stage1_3rd = _mm512_add_epi16(load_idx01_stage1_3rd, M512_EPI16_2); + + __m512i load_idx4_stage1_1st = _mm512_add_epi16(load_idx23_stage1_1st, M512_EPI16_2); + __m512i load_idx4_stage1_2nd = _mm512_add_epi16(load_idx23_stage1_2nd, M512_EPI16_2); + __m512i load_idx4_stage1_3rd = _mm512_add_epi16(load_idx23_stage1_3rd, M512_EPI16_2); + + __m512i matrixArray_0, matrixArray_1, matrixArray_2, matrixArray_3, matrixArray_4; + __m512i matrixArray_stage1_0, matrixArray_stage1_1, matrixArray_stage1_2; + __m512i matrixArray_stage2_0, matrixArray_stage2_1; + + unsigned int load_mask_value = (((unsigned int)0xffffffff) >> 2); + __mmask32 load_mask = *((__mmask32*) &load_mask_value); + unsigned short store_mask_value = (((unsigned short)0xffff) >> 2); + __mmask16 store_mask = *((__mmask16*) &store_mask_value); + + if (tag_m_30x > 0) { + unsigned short blend_mask_value_0 = ((unsigned short)0xf000); + __mmask16 blend_mask_0 = *((__mmask16*) &blend_mask_value_0); + unsigned short blend_mask_value_1 = ((unsigned short)0x3f00); + __mmask16 blend_mask_1 = *((__mmask16*) &blend_mask_value_1); + for (BLASLONG idx_m = 0; idx_m < tag_m_30x; idx_m+=30) { + result_0 = _mm512_setzero_ps(); + result_1 = _mm512_setzero_ps(); + + matrixArray_0 = _mm512_maskz_loadu_epi16(load_mask, &a[(idx_m)*5]); // Load 6 rows with n=5 + matrixArray_1 = _mm512_maskz_loadu_epi16(load_mask, &a[((idx_m+6)*5)]); // Load 6 rows with n=5 + matrixArray_2 = _mm512_maskz_loadu_epi16(load_mask, &a[((idx_m+12)*5)]); // Load 6 rows with n=5 + matrixArray_3 = _mm512_maskz_loadu_epi16(load_mask, &a[((idx_m+18)*5)]); // Load 6 rows with n=5 + matrixArray_4 = _mm512_maskz_loadu_epi16(load_mask, &a[((idx_m+24)*5)]); // Load 6 rows with n=5 + + // Process the 0|1 elements + // Stage 1: Select the 0|1 elements for each row + matrixArray_stage1_0 = _mm512_permutex2var_epi16(matrixArray_0, load_idx01_stage1_1st, matrixArray_1); + matrixArray_stage1_1 = _mm512_permutex2var_epi16(matrixArray_2, load_idx01_stage1_2nd, matrixArray_3); + matrixArray_stage1_2 = _mm512_permutexvar_epi16(load_idx01_stage1_3rd, matrixArray_4); + // Stage 2: Reorder and compress all the 0|1 elements + matrixArray_stage2_0 = _mm512_mask_blend_epi32(blend_mask_0, matrixArray_stage1_0, matrixArray_stage1_1); + matrixArray_stage2_1 = _mm512_mask_blend_epi32(blend_mask_1, matrixArray_stage1_1, matrixArray_stage1_2); + // Calculate the result of the 0|1 elements + result_0 = _mm512_dpbf16_ps(result_0, (__m512bh) matrixArray_stage2_0, (__m512bh) xArray_01); + result_1 = _mm512_dpbf16_ps(result_1, (__m512bh) matrixArray_stage2_1, (__m512bh) xArray_01); + + // Process the 2|3 elements + // Stage 1: Select the 2|3 elements for each row + matrixArray_stage1_0 = _mm512_permutex2var_epi16(matrixArray_0, load_idx23_stage1_1st, matrixArray_1); + matrixArray_stage1_1 = _mm512_permutex2var_epi16(matrixArray_2, load_idx23_stage1_2nd, matrixArray_3); + matrixArray_stage1_2 = _mm512_permutexvar_epi16(load_idx23_stage1_3rd, matrixArray_4); + // Stage 2: Reorder and compress all the 2|3 elements + matrixArray_stage2_0 = _mm512_mask_blend_epi32(blend_mask_0, matrixArray_stage1_0, matrixArray_stage1_1); + matrixArray_stage2_1 = _mm512_mask_blend_epi32(blend_mask_1, matrixArray_stage1_1, matrixArray_stage1_2); + // Calculate the result of the 2|3 elements and accumulate the result of 0|1 elements + result_0 = _mm512_dpbf16_ps(result_0, (__m512bh) matrixArray_stage2_0, (__m512bh) xArray_23); + result_1 = _mm512_dpbf16_ps(result_1, (__m512bh) matrixArray_stage2_1, (__m512bh) xArray_23); + + // Process the for 4 elements + // Stage 1: Select the 4 elements for each row + matrixArray_stage1_0 = _mm512_permutex2var_epi16(matrixArray_0, load_idx4_stage1_1st, matrixArray_1); + matrixArray_stage1_1 = _mm512_permutex2var_epi16(matrixArray_2, load_idx4_stage1_2nd, matrixArray_3); + matrixArray_stage1_2 = _mm512_permutexvar_epi16(load_idx4_stage1_3rd, matrixArray_4); + // Stage 2: Reorder and compress all the 4 elements + matrixArray_stage2_0 = _mm512_mask_blend_epi32(blend_mask_0, matrixArray_stage1_0, matrixArray_stage1_1); + matrixArray_stage2_1 = _mm512_mask_blend_epi32(blend_mask_1, matrixArray_stage1_1, matrixArray_stage1_2); + // Calculate the result of the 4 element and accumulate the result of 0|1 and 2|3 elements + result_0 = _mm512_dpbf16_ps(result_0, (__m512bh) matrixArray_stage2_0, (__m512bh) xArray_4); + result_1 = _mm512_dpbf16_ps(result_1, (__m512bh) matrixArray_stage2_1, (__m512bh) xArray_4); + + STORE16_COMPLETE_RESULT(result_0, y+idx_m) + STORE16_MASK_COMPLETE_RESULT(result_1, y+idx_m+16, store_mask) + } + } + + if (m - tag_m_30x > 11) { + BLASLONG tag_m_12x = m - ((m-tag_m_30x)%12); + for (BLASLONG idx_m = tag_m_30x; idx_m < tag_m_12x; idx_m+=12) { + unsigned short store_less_mask_value = (((unsigned short)0xffff) >> 4); + __mmask16 store_less_mask = *((__mmask16*) &store_less_mask_value); + result_0 = _mm512_setzero_ps(); + + matrixArray_0 = _mm512_maskz_loadu_epi16(load_mask, &a[(idx_m)*5]); // Load 6 rows with n=5 + matrixArray_1 = _mm512_maskz_loadu_epi16(load_mask, &a[((idx_m+6)*5)]); // Load 6 rows with n=5 + + // Interleave the elements + matrixArray_stage1_0 = _mm512_permutex2var_epi16(matrixArray_0, load_idx01_stage1_1st, matrixArray_1); + matrixArray_stage1_1 = _mm512_permutex2var_epi16(matrixArray_0, load_idx23_stage1_1st, matrixArray_1); + matrixArray_stage1_2 = _mm512_permutex2var_epi16(matrixArray_0, load_idx4_stage1_1st, matrixArray_1); + // Calculate and accumulate the result + result_0 = _mm512_dpbf16_ps(result_0, (__m512bh) matrixArray_stage1_0, (__m512bh) xArray_01); + result_0 = _mm512_dpbf16_ps(result_0, (__m512bh) matrixArray_stage1_1, (__m512bh) xArray_23); + result_0 = _mm512_dpbf16_ps(result_0, (__m512bh) matrixArray_stage1_2, (__m512bh) xArray_4); + + STORE16_MASK_COMPLETE_RESULT(result_0, y+idx_m, store_less_mask) + tag_m_30x += 12; + } + } + + BLASLONG tail_num = m - tag_m_30x; + if (tail_num > 6) { + unsigned short store_less_mask_value = (((unsigned short)0xffff) >> (4+(12-tail_num))); + __mmask16 store_less_mask = *((__mmask16*) &store_less_mask_value); + unsigned int load_less_mask_value = (((unsigned int)0xffffffff) >> (2+(12-tail_num)*5)); + __mmask32 load_less_mask = *((__mmask32*) &load_less_mask_value); + result_0 = _mm512_setzero_ps(); + + matrixArray_0 = _mm512_maskz_loadu_epi16(load_mask, &a[(tag_m_30x)*5]); // Load 6 rows with n=5 + matrixArray_1 = _mm512_maskz_loadu_epi16(load_less_mask, &a[((tag_m_30x+6)*5)]); // Load x rows with n=5 + + // Interleave the elements + matrixArray_stage1_0 = _mm512_permutex2var_epi16(matrixArray_0, load_idx01_stage1_1st, matrixArray_1); + matrixArray_stage1_1 = _mm512_permutex2var_epi16(matrixArray_0, load_idx23_stage1_1st, matrixArray_1); + matrixArray_stage1_2 = _mm512_permutex2var_epi16(matrixArray_0, load_idx4_stage1_1st, matrixArray_1); + // Calculate and accumulate the result + result_0 = _mm512_dpbf16_ps(result_0, (__m512bh) matrixArray_stage1_0, (__m512bh) xArray_01); + result_0 = _mm512_dpbf16_ps(result_0, (__m512bh) matrixArray_stage1_1, (__m512bh) xArray_23); + result_0 = _mm512_dpbf16_ps(result_0, (__m512bh) matrixArray_stage1_2, (__m512bh) xArray_4); + + STORE16_MASK_COMPLETE_RESULT(result_0, y+tag_m_30x, store_less_mask) + } else { + __m128i matrixArray128; + __m128 result128, tmp128; + for (BLASLONG i = tag_m_30x; i < m; i++) { + result128 = _mm_setzero_ps(); + matrixArray128 = _mm_maskz_loadu_epi16(x_load_mask, &a[(i)*5]); // Load 1 rows with n=5 + result128 = _mm_dpbf16_ps(result128, (__m128bh) matrixArray128, (__m128bh) x128); + tmp128 = _mm_shuffle_ps(result128, result128, 14); + result128 = _mm_add_ps(result128, tmp128); + tmp128 = _mm_shuffle_ps(result128, result128, 1); + result128 = _mm_add_ps(result128, tmp128); +#ifndef ZERO_BETA +#ifndef ONE_BETA + y[i] = alpha * result128[0] + beta * y[i]; +#else + y[i] = alpha * result128[0] + y[i]; +#endif +#else +#ifndef ONE_ALPHA + y[i] = result128[0] * alpha; +#else + y[i] = result128[0]; +#endif +#endif + + } + } + + return 0; +} + +// 16 rows parallel processing BF16 GEMV kernel for n=6 && lda ineffective scenario +#ifndef ZERO_BETA +#ifndef ONE_BETA +static int sbgemv_kernel_16x6_alpha_beta(BLASLONG m, float alpha, bfloat16 *a, bfloat16 *x, float beta, float *y) +#else +static int sbgemv_kernel_16x6_alpha_one(BLASLONG m, float alpha, bfloat16 *a, bfloat16 *x, float beta, float *y) +#endif +#else +#ifndef ONE_ALPHA +static int sbgemv_kernel_16x6_alpha(BLASLONG m, float alpha, bfloat16 *a, bfloat16 *x, float *y) +#else +static int sbgemv_kernel_16x6(BLASLONG m, float alpha, bfloat16 *a, bfloat16 *x, float *y) +#endif +#endif +{ + BLASLONG tag_m_16x = m & (~15); + + unsigned char x_load_mask_value = (((unsigned char)0xff) >> 2); + __mmask8 x_load_mask = *((__mmask8*) &x_load_mask_value); + __m128i x128 = _mm_maskz_loadu_epi16(x_load_mask, x); // x0|x1|x2|x3|x4|x5|0|0| + + if (tag_m_16x > 0) { + __m512 result_0; + +#ifndef ONE_ALPHA + __m512 ALPHAVECTOR = _mm512_set1_ps(alpha); +#endif +#ifndef ZERO_BETA + __m512 BETAVECTOR = _mm512_set1_ps(beta); +#endif + + __m512i M512_EPI32_1 = _mm512_set1_epi32(1); + __m512i load_idx01_1st = _mm512_set_epi32( 0, 0, 0, 0, 0, 30, 27, 24, 21, 18, 15, 12, 9, 6, 3, 0); + __m512i load_idx01_2nd = _mm512_set_epi32(13, 10, 7, 4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); + + __m512i load_idx23_1st = _mm512_add_epi32(load_idx01_1st, M512_EPI32_1); + __m512i load_idx23_2nd = _mm512_add_epi32(load_idx01_2nd, M512_EPI32_1); + + __m512i load_idx45_1st = _mm512_add_epi32(load_idx23_1st, M512_EPI32_1); + __m512i load_idx45_2nd = _mm512_add_epi32(load_idx23_2nd, M512_EPI32_1); + + unsigned short blend_mask_value = ((unsigned short)0x0400); + __mmask16 blend_mask = *((__mmask16*) &blend_mask_value); + // Set the 11th element to be 0 as invalid index for a 512 bit epi32 register + load_idx45_1st = _mm512_mask_blend_epi32(blend_mask, load_idx45_1st, load_idx01_2nd); + // Set the 11th element to be 0 as 0 is the correct index + load_idx45_2nd = _mm512_mask_blend_epi32(blend_mask, load_idx45_2nd, load_idx01_2nd); + + __m512i xArray_01 = _mm512_broadcastd_epi32(x128); // x0|x1|x0|x1|...|x0|x1| + __m512i xArray_23 = _mm512_broadcastd_epi32(_mm_shuffle_epi32(x128, 0x1)); // x2|x3|x2|x3|...|x2|x3| + __m512i xArray_45 = _mm512_broadcastd_epi32(_mm_shuffle_epi32(x128, 0x2)); // x4|x5|x4|x5|...|x4|x5| + + unsigned short permute_mask01_uint = (((unsigned short)0xf800)); + __mmask16 permute_mask01 = *((__mmask16*) &permute_mask01_uint); + unsigned short permute_mask45_uint = (((unsigned short)0xfc00)); + __mmask16 permute_mask45 = *((__mmask16*) &permute_mask45_uint); + + __m512i matrixArray_0, matrixArray_1, matrixArray_2; + __m512i matrixArray_stage_0, matrixArray_stage_1, matrixArray_stage_2; + for (BLASLONG idx_m = 0; idx_m < tag_m_16x; idx_m+=16) { + result_0 = _mm512_setzero_ps(); + + matrixArray_0 = _mm512_loadu_si512(&a[(idx_m)*6]); // Load 5 rows with n=6 plus 2 element + matrixArray_1 = _mm512_loadu_si512(&a[((idx_m+5)*6 + 2)]); // Load 5 rows with n=6 plus 2 element + matrixArray_2 = _mm512_loadu_si512(&a[((idx_m+10)*6 + 4)]); // Load 5 rows with n=6 plus 2 element + + // Stage 1: interleave for the a..k elements + matrixArray_stage_0 = _mm512_permutex2var_epi32(matrixArray_0, load_idx01_1st, matrixArray_1); + matrixArray_stage_1 = _mm512_permutex2var_epi32(matrixArray_0, load_idx23_1st, matrixArray_1); + matrixArray_stage_2 = _mm512_permutex2var_epi32(matrixArray_0, load_idx45_1st, matrixArray_1); + + // Stage 2: interleave for the l..p elements and remix together + matrixArray_stage_0 = _mm512_mask_permutexvar_epi32(matrixArray_stage_0, permute_mask01, load_idx01_2nd, matrixArray_2); + matrixArray_stage_1 = _mm512_mask_permutexvar_epi32(matrixArray_stage_1, permute_mask01, load_idx23_2nd, matrixArray_2); + matrixArray_stage_2 = _mm512_mask_permutexvar_epi32(matrixArray_stage_2, permute_mask45, load_idx45_2nd, matrixArray_2); + + // Calculate the result of the 0|1 elements + result_0 = _mm512_dpbf16_ps(result_0, (__m512bh) matrixArray_stage_0, (__m512bh) xArray_01); + result_0 = _mm512_dpbf16_ps(result_0, (__m512bh) matrixArray_stage_1, (__m512bh) xArray_23); + result_0 = _mm512_dpbf16_ps(result_0, (__m512bh) matrixArray_stage_2, (__m512bh) xArray_45); + + STORE16_COMPLETE_RESULT(result_0, y+idx_m) + } + + if (m - tag_m_16x > 7) { + __m256i M256_EPI32_1 = _mm512_castsi512_si256(M512_EPI32_1); + __m256i load_idx01_1st = _mm256_set_epi32( 0, 0, 15, 12, 9, 6, 3, 0); + __m256i load_idx01_2nd = _mm256_set_epi32( 5, 2, 0, 0, 0, 0, 0, 0); + + __m256i load_idx23_1st = _mm256_add_epi32(load_idx01_1st, M256_EPI32_1); + __m256i load_idx23_2nd = _mm256_add_epi32(load_idx01_2nd, M256_EPI32_1); + unsigned char blend_mask_value = ((unsigned char)0x20); + __mmask8 blend_mask = *((__mmask8*) &blend_mask_value); + // Set the 6th element to be 0 as invalid index for a 512 bit epi32 register + load_idx23_1st = _mm256_mask_blend_epi32(blend_mask, load_idx23_1st, load_idx01_2nd); + // Set the 6th element to be 0 as 0 is the correct index + load_idx23_2nd = _mm256_mask_blend_epi32(blend_mask, load_idx23_2nd, load_idx01_2nd); + + __m256i load_idx45_1st = _mm256_add_epi32(load_idx23_1st, M256_EPI32_1); + __m256i load_idx45_2nd = _mm256_add_epi32(load_idx23_2nd, M256_EPI32_1); + + unsigned char permute_mask01_uint = (((unsigned char)0xc0)); + __mmask8 permute_mask01 = *((__mmask8*) &permute_mask01_uint); + unsigned char permute_mask45_uint = (((unsigned char)0xe0)); + __mmask8 permute_mask45 = *((__mmask8*) &permute_mask45_uint); + + __m256i matrixArray_0, matrixArray_1, matrixArray_2; + __m256i matrixArray_stage_0; + __m256 result256_0; + + result256_0 = _mm256_setzero_ps(); + + matrixArray_0 = _mm256_loadu_si256(&a[(tag_m_16x)*6]); // Load 2 rows with n=6 plus 4 element + matrixArray_1 = _mm256_loadu_si256(&a[((tag_m_16x+2)*6 + 4)]); // Load 2 rows with n=6 plus 4 element + matrixArray_2 = _mm256_loadu_si256(&a[((tag_m_16x+5)*6 + 2)]); // Load 2 rows with n=6 plus 4 element + + // Process the 0|1 elements + // Select the 0|1 elements for each row + matrixArray_stage_0 = _mm256_permutex2var_epi32(matrixArray_0, load_idx01_1st, matrixArray_1); + matrixArray_stage_0 = _mm256_mask_permutexvar_epi32(matrixArray_stage_0, permute_mask01, load_idx01_2nd, matrixArray_2); + // Calculate the result of the 0|1 elements + result256_0 = _mm256_dpbf16_ps(result256_0, (__m256bh) matrixArray_stage_0, (__m256bh) _mm512_castsi512_si256(xArray_01)); + + // Process the 2|3 elements + // Select the 2|3 elements for each row + matrixArray_stage_0 = _mm256_permutex2var_epi32(matrixArray_0, load_idx23_1st, matrixArray_1); + matrixArray_stage_0 = _mm256_mask_permutexvar_epi32(matrixArray_stage_0, permute_mask45, load_idx23_2nd, matrixArray_2); + // Calculate the result of the 0|1 elements + result256_0 = _mm256_dpbf16_ps(result256_0, (__m256bh) matrixArray_stage_0, (__m256bh) _mm512_castsi512_si256(xArray_23)); + + // Process the for 4 elements + // Select the 4|5 elements for each row + matrixArray_stage_0 = _mm256_permutex2var_epi32(matrixArray_0, load_idx45_1st, matrixArray_1); + matrixArray_stage_0 = _mm256_mask_permutexvar_epi32(matrixArray_stage_0, permute_mask45, load_idx45_2nd, matrixArray_2); + // Calculate the result of the 0|1 elements + result256_0 = _mm256_dpbf16_ps(result256_0, (__m256bh) matrixArray_stage_0, (__m256bh) _mm512_castsi512_si256(xArray_45)); + + STORE8_COMPLETE_RESULT(result256_0, y+tag_m_16x) + tag_m_16x += 8; + } + } + + if (tag_m_16x != m) { + __m128i matrixArray128; + __m128 result128, tmp128; + for (BLASLONG i = tag_m_16x; i < m; i++) { + result128 = _mm_setzero_ps(); + matrixArray128 = _mm_maskz_loadu_epi16(x_load_mask, &a[(i)*6]); // Load 1 rows with n=6 + result128 = _mm_dpbf16_ps(result128, (__m128bh) matrixArray128, (__m128bh) x128); + tmp128 = _mm_shuffle_ps(result128, result128, 14); + result128 = _mm_add_ps(result128, tmp128); + tmp128 = _mm_shuffle_ps(result128, result128, 1); + result128 = _mm_add_ps(result128, tmp128); +#ifndef ZERO_BETA +#ifndef ONE_BETA + y[i] = alpha * result128[0] + beta * y[i]; +#else + y[i] = alpha * result128[0] + y[i]; +#endif +#else +#ifndef ONE_ALPHA + y[i] = result128[0] * alpha; +#else + y[i] = result128[0]; +#endif +#endif + } + } + + return 0; +} + +// 16 rows parallel processing BF16 GEMV kernel for n=7 && lda ineffective scenario +#ifndef ZERO_BETA +#ifndef ONE_BETA +static int sbgemv_kernel_16x7_alpha_beta(BLASLONG m, float alpha, bfloat16 *a, bfloat16 *x, float beta, float *y) +#else +static int sbgemv_kernel_16x7_alpha_one(BLASLONG m, float alpha, bfloat16 *a, bfloat16 *x, float beta, float *y) +#endif +#else +#ifndef ONE_ALPHA +static int sbgemv_kernel_16x7_alpha(BLASLONG m, float alpha, bfloat16 *a, bfloat16 *x, float *y) +#else +static int sbgemv_kernel_16x7(BLASLONG m, float alpha, bfloat16 *a, bfloat16 *x, float *y) +#endif +#endif +{ + BLASLONG tag_m_16x = m & (~15); + + unsigned char x_load_mask_value = (((unsigned char)0xff) >> 1); + __mmask8 x_load_mask = *((__mmask8*) &x_load_mask_value); + __m128i x128 = _mm_maskz_loadu_epi16(x_load_mask, x); // |x0|x1|x2|x3|x4|x5|x6|0| + + if (tag_m_16x > 0) { + __m512i matrixArray_0, matrixArray_1, matrixArray_2, matrixArray_3; + __m512i matrixArray_stage_0, matrixArray_stage_1, matrixArray_stage_2, matrixArray_stage_3; + __m512i xArray_0123, xArray_4567; + __m512 result_0, result_1, result_2, result_3; + +#ifndef ONE_ALPHA + __m512 ALPHAVECTOR = _mm512_set1_ps(alpha); +#endif +#ifndef ZERO_BETA + __m512 BETAVECTOR = _mm512_set1_ps(beta); +#endif + + __m512i M512_EPI32_2 = _mm512_set1_epi32(2); + __m512i load_idx_stage1_0 = _mm512_set_epi16(31, 27, 26, 25, 24, 23, 22, 21, 31, 20, 19, 18, 17, 16, 15, 14, + 31, 13, 12, 11, 10, 9, 8, 7, 31, 6, 5, 4, 3, 2, 1, 0); + __m512i load_idx_stage2_0 = _mm512_set_epi32(29, 25, 21, 17, 13, 9, 5, 1, 28, 24, 20, 16, 12, 8, 4, 0); + __m512i load_idx_stage2_1 = _mm512_add_epi32(load_idx_stage2_0, M512_EPI32_2); + + unsigned short x_blend_mask_value = ((unsigned short)0xff00); + __mmask16 x_blend_mask = *((__mmask16*) &x_blend_mask_value); + xArray_0123 = _mm512_mask_blend_epi32(x_blend_mask, _mm512_broadcastd_epi32(x128), \ + _mm512_broadcastd_epi32(_mm_shuffle_epi32(x128, 0x1))); + xArray_4567 = _mm512_mask_blend_epi32(x_blend_mask, _mm512_broadcastd_epi32(_mm_shuffle_epi32(x128, 0x2)), \ + _mm512_broadcastd_epi32(_mm_shuffle_epi32(x128, 0x3))); + + unsigned int load_mask_value = (((unsigned int)0xffffffff) >> 4); + __mmask32 load_mask = *((__mmask32*) &load_mask_value); + for (BLASLONG idx_m = 0; idx_m < tag_m_16x; idx_m+=16) { + result_0 = _mm512_setzero_ps(); + result_1 = _mm512_setzero_ps(); + + matrixArray_0 = _mm512_maskz_loadu_epi16(load_mask, &a[(idx_m)*7]); // Load 4 rows with n=7 + matrixArray_1 = _mm512_maskz_loadu_epi16(load_mask, &a[(idx_m+4)*7]); // Load 4 rows with n=7 + matrixArray_2 = _mm512_maskz_loadu_epi16(load_mask, &a[(idx_m+8)*7]); // Load 4 rows with n=7 + matrixArray_3 = _mm512_maskz_loadu_epi16(load_mask, &a[(idx_m+12)*7]); // Load 4 rows with n=7 + + // Stage 1: padding + matrixArray_0 = _mm512_permutexvar_epi16(load_idx_stage1_0, matrixArray_0); // |a0|a1|a2|a3|...|b6|b7|c0|c1|c2|c3|...|d6|d7| + matrixArray_1 = _mm512_permutexvar_epi16(load_idx_stage1_0, matrixArray_1); // |e0|e1|e2|e3|...|f6|f7|g0|g1|g2|g3|...|h6|h7| + matrixArray_2 = _mm512_permutexvar_epi16(load_idx_stage1_0, matrixArray_2); // |i0|i1|i2|i3|...|j6|j7|k0|k1|k2|k3|...|l6|l7| + matrixArray_3 = _mm512_permutexvar_epi16(load_idx_stage1_0, matrixArray_3); // |m0|m1|m2|m3|...|n6|n7|o0|o1|o2|o3|...|p6|p7| + + // Stage 2: interleave per 32 bits + matrixArray_stage_0 = _mm512_permutex2var_epi32(matrixArray_0, load_idx_stage2_0, matrixArray_1); // |a0|a1|...|h0|h1|a2|a3|...|h2|h3| + matrixArray_stage_1 = _mm512_permutex2var_epi32(matrixArray_0, load_idx_stage2_1, matrixArray_1); // |a4|a5|...|h4|h5|a6|a7|...|h6|h7| + matrixArray_stage_2 = _mm512_permutex2var_epi32(matrixArray_2, load_idx_stage2_0, matrixArray_3); // |i0|i1|...|p0|p1|i2|i3|...|p2|p3| + matrixArray_stage_3 = _mm512_permutex2var_epi32(matrixArray_2, load_idx_stage2_1, matrixArray_3); // |i4|i5|...|p4|p5|i6|i7|...|p6|p7| + + result_0 = _mm512_dpbf16_ps(result_0, (__m512bh) matrixArray_stage_0, (__m512bh) xArray_0123); + result_1 = _mm512_dpbf16_ps(result_1, (__m512bh) matrixArray_stage_2, (__m512bh) xArray_0123); + result_0 = _mm512_dpbf16_ps(result_0, (__m512bh) matrixArray_stage_1, (__m512bh) xArray_4567); + result_1 = _mm512_dpbf16_ps(result_1, (__m512bh) matrixArray_stage_3, (__m512bh) xArray_4567); + + // Stage 3: interleave per 256 bits + result_2 = _mm512_shuffle_f32x4(result_0, result_1, 0x44); + result_3 = _mm512_shuffle_f32x4(result_0, result_1, 0xee); + + result_2 = _mm512_add_ps(result_2, result_3); + + STORE16_COMPLETE_RESULT(result_2, y+idx_m) + } + + if (m - tag_m_16x > 7) { + result_0 = _mm512_setzero_ps(); + + matrixArray_0 = _mm512_maskz_loadu_epi16(load_mask, &a[(tag_m_16x)*7]); // Load 4 rows with n=7 + matrixArray_1 = _mm512_maskz_loadu_epi16(load_mask, &a[(tag_m_16x+4)*7]); // Load 4 rows with n=7 + + // Stage 1: padding + matrixArray_0 = _mm512_permutexvar_epi16(load_idx_stage1_0, matrixArray_0); // |a0|a1|a2|a3|...|b6|b7|c0|c1|c2|c3|...|d6|d7| + matrixArray_1 = _mm512_permutexvar_epi16(load_idx_stage1_0, matrixArray_1); // |e0|e1|e2|e3|...|f6|f7|g0|g1|g2|g3|...|h6|h7| + + // Stage 2: interleave per 32 bits + matrixArray_stage_0 = _mm512_permutex2var_epi32(matrixArray_0, load_idx_stage2_0, matrixArray_1); // |a0|a1|b0|b1|...|h0|h1|a2|a3|b2|b3|...|h2|h3| + matrixArray_stage_1 = _mm512_permutex2var_epi32(matrixArray_0, load_idx_stage2_1, matrixArray_1); // |a4|a5|b4|b5|...|h4|h5|a6|a7|b6|b7|...|h6|h7| + + result_0 = _mm512_dpbf16_ps(result_0, (__m512bh) matrixArray_stage_0, (__m512bh) xArray_0123); + result_0 = _mm512_dpbf16_ps(result_0, (__m512bh) matrixArray_stage_1, (__m512bh) xArray_4567); + + __m256 result256 = _mm256_add_ps(_mm512_castps512_ps256(result_0), _mm512_extractf32x8_ps(result_0, 0x1)); + + STORE8_COMPLETE_RESULT(result256, y+tag_m_16x) + + tag_m_16x += 8; + } + + BLASLONG tail_num = m - tag_m_16x; + if (tail_num > 3) { + result_0 = _mm512_setzero_ps(); + + matrixArray_0 = _mm512_maskz_loadu_epi16(load_mask, &a[(tag_m_16x)*7]); // Load 4 rows with n=7 + unsigned int tail_load_mask_value = (((unsigned int)0xffffffff) >> (4+(8-tail_num)*7)); + __mmask32 tail_load_mask = *((__mmask32*) &tail_load_mask_value); + matrixArray_1 = _mm512_maskz_loadu_epi16(tail_load_mask, &a[(tag_m_16x+4)*7]); // Load 4 rows with n=7 + + // Stage 1: padding + matrixArray_0 = _mm512_permutexvar_epi16(load_idx_stage1_0, matrixArray_0); // |a0|a1|a2|a3|...|b6|b7|c0|c1|c2|c3|...|d6|d7| + matrixArray_1 = _mm512_permutexvar_epi16(load_idx_stage1_0, matrixArray_1); // |e0|e1|e2|e3|...|f6|f7|g0|g1|g2|g3|...|h6|h7| + + // Stage 2: interleave per 32 bits + matrixArray_stage_0 = _mm512_permutex2var_epi32(matrixArray_0, load_idx_stage2_0, matrixArray_1); // |a0|a1|b0|b1|...|h0|h1|a2|a3|b2|b3|...|h2|h3| + matrixArray_stage_1 = _mm512_permutex2var_epi32(matrixArray_0, load_idx_stage2_1, matrixArray_1); // |a4|a5|b4|b5|...|h4|h5|a6|a7|b6|b7|...|h6|h7| + + result_0 = _mm512_dpbf16_ps(result_0, (__m512bh) matrixArray_stage_0, (__m512bh) xArray_0123); + result_0 = _mm512_dpbf16_ps(result_0, (__m512bh) matrixArray_stage_1, (__m512bh) xArray_4567); + + __m256 result256 = _mm256_add_ps(_mm512_castps512_ps256(result_0), _mm512_extractf32x8_ps(result_0, 0x1)); + + unsigned char tail_mask_value = (((unsigned char)0xff) >> (8-tail_num)); + __mmask8 tail_mask = *((__mmask8*) &tail_mask_value); + STORE8_MASK_COMPLETE_RESULT(result256, y+tag_m_16x, tail_mask) + tag_m_16x = m; + } + } + + if (tag_m_16x != m) { + __m128i matrixArray128; + __m128 result128, tmp128; + for (BLASLONG i = tag_m_16x; i < m; i++) { + result128 = _mm_setzero_ps(); + matrixArray128 = _mm_maskz_loadu_epi16(x_load_mask, &a[(i)*7]); // Load 1 rows with n=7 + result128 = _mm_dpbf16_ps(result128, (__m128bh) matrixArray128, (__m128bh) x128); + tmp128 = _mm_shuffle_ps(result128, result128, 14); + result128 = _mm_add_ps(result128, tmp128); + tmp128 = _mm_shuffle_ps(result128, result128, 1); + result128 = _mm_add_ps(result128, tmp128); +#ifndef ZERO_BETA +#ifndef ONE_BETA + y[i] = alpha * result128[0] + beta * y[i]; +#else + y[i] = alpha * result128[0] + y[i]; +#endif +#else +#ifndef ONE_ALPHA + y[i] = result128[0] * alpha; +#else + y[i] = result128[0]; +#endif +#endif + } + } + + return 0; +} + +// 16 rows parallel processing BF16 GEMV kernel for n=8 && lda ineffective scenario +#ifndef ZERO_BETA +#ifndef ONE_BETA +static int sbgemv_kernel_16x8_alpha_beta(BLASLONG m, float alpha, bfloat16 *a, bfloat16 *x, float beta, float *y) +#else +static int sbgemv_kernel_16x8_alpha_one(BLASLONG m, float alpha, bfloat16 *a, bfloat16 *x, float beta, float *y) +#endif +#else +#ifndef ONE_ALPHA +static int sbgemv_kernel_16x8_alpha(BLASLONG m, float alpha, bfloat16 *a, bfloat16 *x, float *y) +#else +static int sbgemv_kernel_16x8(BLASLONG m, float alpha, bfloat16 *a, bfloat16 *x, float *y) +#endif +#endif +{ + BLASLONG tag_m_16x = m & (~15); + + __m128i x128 = _mm_loadu_si128(x); // |x0|x1|x2|x3|x4|x5|x6|x7| + + if (tag_m_16x > 0) { + __m512i matrixArray_0, matrixArray_1, matrixArray_2, matrixArray_3; + __m512i matrixArray_stage_0, matrixArray_stage_1, matrixArray_stage_2, matrixArray_stage_3; + __m512i xArray_0123, xArray_4567; + __m512 result_0, result_1, result_2, result_3; + +#ifndef ONE_ALPHA + __m512 ALPHAVECTOR = _mm512_set1_ps(alpha); +#endif +#ifndef ZERO_BETA + __m512 BETAVECTOR = _mm512_set1_ps(beta); +#endif + + __m512i M512_EPI32_2 = _mm512_set1_epi32(2); + __m512i load_idx_stage2_0 = _mm512_set_epi32(29, 25, 21, 17, 13, 9, 5, 1, 28, 24, 20, 16, 12, 8, 4, 0); + __m512i load_idx_stage2_1 = _mm512_add_epi32(load_idx_stage2_0, M512_EPI32_2); + + unsigned short x_blend_mask_value = ((unsigned short)0xff00); + __mmask16 x_blend_mask = *((__mmask16*) &x_blend_mask_value); + xArray_0123 = _mm512_mask_blend_epi32(x_blend_mask, _mm512_broadcastd_epi32(x128), \ + _mm512_broadcastd_epi32(_mm_shuffle_epi32(x128, 0x1))); + xArray_4567 = _mm512_mask_blend_epi32(x_blend_mask, _mm512_broadcastd_epi32(_mm_shuffle_epi32(x128, 0x2)), \ + _mm512_broadcastd_epi32(_mm_shuffle_epi32(x128, 0x3))); + + for (BLASLONG idx_m = 0; idx_m < tag_m_16x; idx_m+=16) { + result_0 = _mm512_setzero_ps(); + result_1 = _mm512_setzero_ps(); + + matrixArray_0 = _mm512_loadu_si512(&a[(idx_m)*8]); // Load 4 rows with n=8 + matrixArray_1 = _mm512_loadu_si512(&a[(idx_m+4)*8]); // Load 4 rows with n=8 + matrixArray_2 = _mm512_loadu_si512(&a[(idx_m+8)*8]); // Load 4 rows with n=8 + matrixArray_3 = _mm512_loadu_si512(&a[(idx_m+12)*8]); // Load 4 rows with n=8 + + // Stage 1: interleave per 32 bits + matrixArray_stage_0 = _mm512_permutex2var_epi32(matrixArray_0, load_idx_stage2_0, matrixArray_1); // |a0|a1|...|h0|h1|a2|a3|...|h2|h3| + matrixArray_stage_1 = _mm512_permutex2var_epi32(matrixArray_0, load_idx_stage2_1, matrixArray_1); // |a4|a5|...|h4|h5|a6|a7|...|h6|h7| + matrixArray_stage_2 = _mm512_permutex2var_epi32(matrixArray_2, load_idx_stage2_0, matrixArray_3); // |i0|i1|...|p0|p1|i2|i3|...|p2|p3| + matrixArray_stage_3 = _mm512_permutex2var_epi32(matrixArray_2, load_idx_stage2_1, matrixArray_3); // |i4|i5|...|p4|p5|i6|i7|...|p6|p7| + + result_0 = _mm512_dpbf16_ps(result_0, (__m512bh) matrixArray_stage_0, (__m512bh) xArray_0123); + result_1 = _mm512_dpbf16_ps(result_1, (__m512bh) matrixArray_stage_2, (__m512bh) xArray_0123); + result_0 = _mm512_dpbf16_ps(result_0, (__m512bh) matrixArray_stage_1, (__m512bh) xArray_4567); + result_1 = _mm512_dpbf16_ps(result_1, (__m512bh) matrixArray_stage_3, (__m512bh) xArray_4567); + + // Stage 2: interleave per 256 bits + result_2 = _mm512_shuffle_f32x4(result_0, result_1, 0x44); + result_3 = _mm512_shuffle_f32x4(result_0, result_1, 0xee); + + result_2 = _mm512_add_ps(result_2, result_3); + + STORE16_COMPLETE_RESULT(result_2, y+idx_m) + } + + if (m - tag_m_16x > 7) { + result_0 = _mm512_setzero_ps(); + + matrixArray_0 = _mm512_loadu_si512(&a[(tag_m_16x)*8]); // Load 4 rows with n=8 + matrixArray_1 = _mm512_loadu_si512(&a[(tag_m_16x+4)*8]); // Load 4 rows with n=8 + + // Stage 1: interleave per 32 bits + matrixArray_stage_0 = _mm512_permutex2var_epi32(matrixArray_0, load_idx_stage2_0, matrixArray_1); // |a0|a1|b0|b1|...|h0|h1|a2|a3|b2|b3|...|h2|h3| + matrixArray_stage_1 = _mm512_permutex2var_epi32(matrixArray_0, load_idx_stage2_1, matrixArray_1); // |a4|a5|b4|b5|...|h4|h5|a6|a7|b6|b7|...|h6|h7| + + result_0 = _mm512_dpbf16_ps(result_0, (__m512bh) matrixArray_stage_0, (__m512bh) xArray_0123); + result_0 = _mm512_dpbf16_ps(result_0, (__m512bh) matrixArray_stage_1, (__m512bh) xArray_4567); + + __m256 result256 = _mm256_add_ps(_mm512_castps512_ps256(result_0), _mm512_extractf32x8_ps(result_0, 0x1)); + + STORE8_COMPLETE_RESULT(result256, y+tag_m_16x) + tag_m_16x += 8; + } + + BLASLONG tail_num = m - tag_m_16x; + if (tail_num > 3) { + result_0 = _mm512_setzero_ps(); + + matrixArray_0 = _mm512_loadu_si512(&a[(tag_m_16x)*8]); // Load 4 rows with n=8 + unsigned short tail_load_mask_value = (((unsigned int)0xffff) >> ((8-tail_num)*4)); + __mmask16 tail_load_mask = *((__mmask16*) &tail_load_mask_value); + matrixArray_1 = _mm512_maskz_loadu_epi32(tail_load_mask, &a[(tag_m_16x+4)*8]); // Load 4 rows with n=8 + + // Stage 1: interleave per 32 bits + matrixArray_stage_0 = _mm512_permutex2var_epi32(matrixArray_0, load_idx_stage2_0, matrixArray_1); // |a0|a1|b0|b1|...|h0|h1|a2|a3|b2|b3|...|h2|h3| + matrixArray_stage_1 = _mm512_permutex2var_epi32(matrixArray_0, load_idx_stage2_1, matrixArray_1); // |a4|a5|b4|b5|...|h4|h5|a6|a7|b6|b7|...|h6|h7| + + result_0 = _mm512_dpbf16_ps(result_0, (__m512bh) matrixArray_stage_0, (__m512bh) xArray_0123); + result_0 = _mm512_dpbf16_ps(result_0, (__m512bh) matrixArray_stage_1, (__m512bh) xArray_4567); + + __m256 result256 = _mm256_add_ps(_mm512_castps512_ps256(result_0), _mm512_extractf32x8_ps(result_0, 0x1)); + + unsigned char tail_mask_value = (((unsigned char)0xff) >> (8-tail_num)); + __mmask8 tail_mask = *((__mmask8*) &tail_mask_value); + STORE8_MASK_COMPLETE_RESULT(result256, y+tag_m_16x, tail_mask) + tag_m_16x = m; + } + } + + if (tag_m_16x != m) { + __m128i matrixArray128; + __m128 result128, tmp128; + for (BLASLONG i = tag_m_16x; i < m; i++) { + result128 = _mm_setzero_ps(); + matrixArray128 = _mm_loadu_si128(&a[(i)*8]); // Load 1 rows with n=8 + result128 = _mm_dpbf16_ps(result128, (__m128bh) matrixArray128, (__m128bh) x128); + tmp128 = _mm_shuffle_ps(result128, result128, 14); + result128 = _mm_add_ps(result128, tmp128); + tmp128 = _mm_shuffle_ps(result128, result128, 1); + result128 = _mm_add_ps(result128, tmp128); +#ifndef ZERO_BETA +#ifndef ONE_BETA + y[i] = alpha * result128[0] + beta * y[i]; +#else + y[i] = alpha * result128[0] + y[i]; +#endif +#else +#ifndef ONE_ALPHA + y[i] = result128[0] * alpha; +#else + y[i] = result128[0]; +#endif +#endif + } + } + + return 0; +} + +// 14 rows parallel processing BF16 GEMV kernel for n=9 && lda ineffective scenario +#ifndef ZERO_BETA +#ifndef ONE_BETA +static int sbgemv_kernel_14x9_alpha_beta(BLASLONG m, float alpha, bfloat16 *a, bfloat16 *x, float beta, float *y) +#else +static int sbgemv_kernel_14x9_alpha_one(BLASLONG m, float alpha, bfloat16 *a, bfloat16 *x, float beta, float *y) +#endif +#else +#ifndef ONE_ALPHA +static int sbgemv_kernel_14x9_alpha(BLASLONG m, float alpha, bfloat16 *a, bfloat16 *x, float *y) +#else +static int sbgemv_kernel_14x9(BLASLONG m, float alpha, bfloat16 *a, bfloat16 *x, float *y) +#endif +#endif +{ + BLASLONG tag_m_14x = m - (m%14); + + unsigned char x_load_mask_value = (((unsigned char)0xff) >> 7); + __mmask8 x_load_mask = *((__mmask8*) &x_load_mask_value); + __m128i x128_0 = _mm_loadu_si128(x); // |x0|x1|x2|x3|x4|x5|x6|x7| + __m128i x128_1 = _mm_maskz_loadu_epi16(x_load_mask, (x+8)); // |x8|0 |0 | 0| 0| 0| 0| 0| + + if (tag_m_14x > 0) { + __m512i matrixArray_0, matrixArray_1, matrixArray_2, matrixArray_3, matrixArray_4, matrixArray_5; + __m512i matrixArray_stage_0, matrixArray_stage_1, matrixArray_stage_2, matrixArray_stage_3; + __m512i xArray_01, xArray_23, xArray_45, xArray_67, xArray_89; + __m512 result_0, result_1; + +#ifndef ONE_ALPHA + __m512 ALPHAVECTOR = _mm512_set1_ps(alpha); +#endif +#ifndef ZERO_BETA + __m512 BETAVECTOR = _mm512_set1_ps(beta); +#endif + + __m256i M256_EPI16_2 = _mm256_set1_epi16(2); + __m256i idx_base_0 = _mm256_set_epi16( 0, 0, 55, 54, 46, 45, 37, 36, 28, 27, 19, 18, 10, 9, 1, 0); + __m256i idx_base_1 = _mm256_add_epi16(idx_base_0, M256_EPI16_2); + __m256i idx_base_2 = _mm256_add_epi16(idx_base_1, M256_EPI16_2); + __m256i idx_base_3 = _mm256_add_epi16(idx_base_2, M256_EPI16_2); + __m256i idx_base_4 = _mm256_add_epi16(idx_base_3, M256_EPI16_2); + __m512i idx_idx = _mm512_set_epi32( 0, 0, 22, 21, 20, 19, 18, 17, 16, 6, 5, 4, 3, 2, 1, 0); + + __m512i load_idx_stage1_0 = _mm512_permutex2var_epi32(_mm512_castsi256_si512(idx_base_0), idx_idx, _mm512_castsi256_si512(idx_base_1)); + __m512i load_idx_stage1_1 = _mm512_permutex2var_epi32(_mm512_castsi256_si512(idx_base_2), idx_idx, _mm512_castsi256_si512(idx_base_3)); + __m512i load_idx_stage1_2 = _mm512_permutex2var_epi32(_mm512_castsi256_si512(idx_base_1), idx_idx, _mm512_castsi256_si512(idx_base_0)); + __m512i load_idx_stage1_3 = _mm512_permutex2var_epi32(_mm512_castsi256_si512(idx_base_3), idx_idx, _mm512_castsi256_si512(idx_base_2)); + __m512i load_idx_stage1_4 = _mm512_permutex2var_epi32(_mm512_castsi256_si512(idx_base_4), idx_idx, _mm512_castsi256_si512(idx_base_4)); + __m512i load_idx_stage2_0 = _mm512_set_epi32( 0, 0, 22, 21, 20, 19, 18, 17, 16, 13, 12, 11, 10, 9, 8, 7); + + xArray_01 = _mm512_broadcastd_epi32(x128_0); // |x0|x1|x0|x1| ... |x0|x1| + xArray_23 = _mm512_broadcastd_epi32(_mm_shuffle_epi32(x128_0, 0x1)); // |x2|x3|x2|x3| ... |x2|x3| + xArray_45 = _mm512_broadcastd_epi32(_mm_shuffle_epi32(x128_0, 0x2)); // |x4|x5|x4|x5| ... |x4|x5| + xArray_67 = _mm512_broadcastd_epi32(_mm_shuffle_epi32(x128_0, 0x3)); // |x6|x7|x6|x7| ... |x6|x7| + xArray_89 = _mm512_broadcastd_epi32(x128_1); // |x8|0 |x8| 0| ... |x8| 0| + + unsigned int load_mask_value = (((unsigned int)0xffffffff) >> 1); + __mmask32 load_mask = *((__mmask32*) &load_mask_value); + unsigned short blend_mask_value = ((unsigned short)0x3f80); + __mmask16 blend_mask = *((__mmask16*) &blend_mask_value); + unsigned short store_mask_value = (((unsigned short)0xffff) >> 2); + __mmask16 store_mask = *((__mmask16*) &store_mask_value); + for (BLASLONG idx_m = 0; idx_m < tag_m_14x; idx_m+=14) { + result_0 = _mm512_setzero_ps(); + result_1 = _mm512_setzero_ps(); + + matrixArray_0 = _mm512_loadu_si512(&a[(idx_m)*9]); // Load 3 rows with n=9 plus 5 elements + matrixArray_1 = _mm512_maskz_loadu_epi16(load_mask, &a[(idx_m+3)*9 + 5]); // Load 3 rows with n=9 plus 4 elements + matrixArray_2 = _mm512_loadu_si512(&a[(idx_m+7)*9]); // Load 3 rows with n=9 plus 5 elements + matrixArray_3 = _mm512_maskz_loadu_epi16(load_mask, &a[(idx_m+10)*9 + 5]); // Load 3 rows with n=9 plus 4 elements + + // Stage 1: interleave per 16 bits + matrixArray_stage_0 = _mm512_permutex2var_epi16(matrixArray_0, load_idx_stage1_0, matrixArray_1); // |a0|a1|...|g0|g1|a2|a3|...|g2|g3|x|x|x|x| + matrixArray_stage_1 = _mm512_permutex2var_epi16(matrixArray_0, load_idx_stage1_1, matrixArray_1); // |a4|a5|...|g4|g5|a6|a7|...|g6|g7|x|x|x|x| + matrixArray_stage_2 = _mm512_permutex2var_epi16(matrixArray_2, load_idx_stage1_2, matrixArray_3); // |h2|h3|...|n2|n3|h0|h1|...|n0|n1|x|x|x|x| + matrixArray_stage_3 = _mm512_permutex2var_epi16(matrixArray_2, load_idx_stage1_3, matrixArray_3); // |h6|h7|...|n6|n7|h4|h5|...|n4|n5|x|x|x|x| + matrixArray_4 = _mm512_permutex2var_epi16(matrixArray_0, load_idx_stage1_4, matrixArray_1); // |a8| x|...|g8| x| x| x|...| x| x|x|x|x|x| + matrixArray_5 = _mm512_permutex2var_epi16(matrixArray_2, load_idx_stage1_4, matrixArray_3); // | x| x|...| x| x|h8| x|...|n8| x|x|x|x|x| + + // Stage 2: interleave per 32 bits + matrixArray_0 = _mm512_mask_blend_epi32(blend_mask, matrixArray_stage_0, matrixArray_stage_2); // |a0|a1|b0|b1|...|h0|h1|i0|i1|j0|j1|...|n0|n1|x|x|x|x| + matrixArray_1 = _mm512_permutex2var_epi32(matrixArray_stage_0, load_idx_stage2_0, matrixArray_stage_2); // |a2|a3|b2|b3|...|h2|h3|i2|i3|j2|j3|...|n2|n3|x|x|x|x| + matrixArray_2 = _mm512_mask_blend_epi32(blend_mask, matrixArray_stage_1, matrixArray_stage_3); // |a4|a5|b4|b5|...|h4|h5|i4|i5|j4|j5|...|n4|n5|x|x|x|x| + matrixArray_3 = _mm512_permutex2var_epi32(matrixArray_stage_1, load_idx_stage2_0, matrixArray_stage_3); // |a6|a7|b6|b7|...|h6|h7|i6|i7|j6|j7|...|n6|n7|x|x|x|x| + matrixArray_4 = _mm512_mask_blend_epi32(blend_mask, matrixArray_4, matrixArray_5); // |a8| x|b8| x|...|h8| x|i8| x|j8| x|...|n8| x|x|x|x|x| + + result_0 = _mm512_dpbf16_ps(result_0, (__m512bh) matrixArray_0, (__m512bh) xArray_01); + result_1 = _mm512_dpbf16_ps(result_1, (__m512bh) matrixArray_1, (__m512bh) xArray_23); + result_0 = _mm512_dpbf16_ps(result_0, (__m512bh) matrixArray_2, (__m512bh) xArray_45); + result_1 = _mm512_dpbf16_ps(result_1, (__m512bh) matrixArray_3, (__m512bh) xArray_67); + result_0 = _mm512_dpbf16_ps(result_0, (__m512bh) matrixArray_4, (__m512bh) xArray_89); + result_0 = _mm512_add_ps(result_0, result_1); + + STORE16_MASK_COMPLETE_RESULT(result_0, y+idx_m, store_mask) + } + } + + if (tag_m_14x != m) { + __m256i matrixArray256; + __m256i x256 = _mm256_insertf128_si256(_mm256_castsi128_si256(x128_0), x128_1, 0x1); + __m256 result256; + __m128 result128, tmp128; + unsigned short load256_mask_value = (((unsigned short)0xffff) >> 7); + __mmask16 load256_mask = *((__mmask16*) &load256_mask_value); + for (BLASLONG i = tag_m_14x; i < m; i++) { + result256 = _mm256_setzero_ps(); + matrixArray256 = _mm256_maskz_loadu_epi16(load256_mask, &a[(i)*9]); + result256 = _mm256_dpbf16_ps(result256, (__m256bh) matrixArray256, (__m256bh) x256); + result128 = _mm_add_ps(_mm256_castps256_ps128(result256), _mm256_extractf128_ps(result256, 0x1)); + tmp128 = _mm_shuffle_ps(result128, result128, 14); + result128 = _mm_add_ps(result128, tmp128); + tmp128 = _mm_shuffle_ps(result128, result128, 1); + result128 = _mm_add_ps(result128, tmp128); +#ifndef ZERO_BETA +#ifndef ONE_BETA + y[i] = alpha * result128[0] + beta * y[i]; +#else + y[i] = alpha * result128[0] + y[i]; +#endif +#else +#ifndef ONE_ALPHA + y[i] = result128[0] * alpha; +#else + y[i] = result128[0]; +#endif +#endif + } + } + + return 0; +} + +// 12 rows parallel processing BF16 GEMV kernel for n=10 && lda ineffective scenario +#ifndef ZERO_BETA +#ifndef ONE_BETA +static int sbgemv_kernel_12x10_alpha_beta(BLASLONG m, float alpha, bfloat16 *a, bfloat16 *x, float beta, float *y) +#else +static int sbgemv_kernel_12x10_alpha_one(BLASLONG m, float alpha, bfloat16 *a, bfloat16 *x, float beta, float *y) +#endif +#else +#ifndef ONE_ALPHA +static int sbgemv_kernel_12x10_alpha(BLASLONG m, float alpha, bfloat16 *a, bfloat16 *x, float *y) +#else +static int sbgemv_kernel_12x10(BLASLONG m, float alpha, bfloat16 *a, bfloat16 *x, float *y) +#endif +#endif +{ + BLASLONG tag_m_12x = m - (m%12); + + unsigned char x_load_mask_value = (((unsigned char)0xf) >> 3); + __mmask8 x_load_mask = *((__mmask8*) &x_load_mask_value); + __m128i x128_0 = _mm_loadu_si128(x); // |x0|x1|x2|x3|x4|x5|x6|x7| + __m128i x128_1 = _mm_maskz_loadu_epi32(x_load_mask, (x+8)); // |x8|x9|0 | 0| 0| 0| 0| 0| + + if (tag_m_12x > 0) { + __m512i matrixArray_0, matrixArray_1, matrixArray_2, matrixArray_3, matrixArray_4; + __m512i matrixArray_stage_0, matrixArray_stage_1, matrixArray_stage_2, matrixArray_stage_3, matrixArray_stage_4, matrixArray_stage_5; + __m512i xArray_01, xArray_23, xArray_45, xArray_67, xArray_89; + __m512 result_0, result_1; + +#ifndef ONE_ALPHA + __m512 ALPHAVECTOR = _mm512_set1_ps(alpha); +#endif +#ifndef ZERO_BETA + __m512 BETAVECTOR = _mm512_set1_ps(beta); +#endif + + __m256i M256_EPI32_1 = _mm256_set1_epi32(1); + __m256i idx_base_0 = _mm256_set_epi32( 0, 0, 26, 21, 16, 10, 5, 0); + __m256i idx_base_1 = _mm256_add_epi32(idx_base_0, M256_EPI32_1); + __m256i idx_base_2 = _mm256_add_epi32(idx_base_1, M256_EPI32_1); + __m256i idx_base_3 = _mm256_add_epi32(idx_base_2, M256_EPI32_1); + __m256i idx_base_4 = _mm256_add_epi32(idx_base_3, M256_EPI32_1); + __m512i idx_idx = _mm512_set_epi32( 0, 0, 0, 0, 21, 20, 19, 18, 17, 16, 5, 4, 3, 2, 1, 0); + + __m512i load_idx_stage1_0 = _mm512_permutex2var_epi32(_mm512_castsi256_si512(idx_base_0), idx_idx, _mm512_castsi256_si512(idx_base_1)); + __m512i load_idx_stage1_1 = _mm512_permutex2var_epi32(_mm512_castsi256_si512(idx_base_2), idx_idx, _mm512_castsi256_si512(idx_base_3)); + __m512i load_idx_stage1_2 = _mm512_permutex2var_epi32(_mm512_castsi256_si512(idx_base_1), idx_idx, _mm512_castsi256_si512(idx_base_0)); + __m512i load_idx_stage1_3 = _mm512_permutex2var_epi32(_mm512_castsi256_si512(idx_base_3), idx_idx, _mm512_castsi256_si512(idx_base_2)); + __m512i load_idx_stage1_4 = _mm512_permutex2var_epi32(_mm512_castsi256_si512(idx_base_4), idx_idx, _mm512_castsi256_si512(idx_base_4)); + __m512i load_idx_stage2_0 = _mm512_set_epi32( 0, 0, 0, 0, 21, 20, 19, 18, 17, 16, 11, 10, 9, 8, 7, 6); + + xArray_01 = _mm512_broadcastd_epi32(x128_0); // |x0|x1|x0|x1| ... |x0|x1| + xArray_23 = _mm512_broadcastd_epi32(_mm_shuffle_epi32(x128_0, 0x1)); // |x2|x3|x2|x3| ... |x2|x3| + xArray_45 = _mm512_broadcastd_epi32(_mm_shuffle_epi32(x128_0, 0x2)); // |x4|x5|x4|x5| ... |x4|x5| + xArray_67 = _mm512_broadcastd_epi32(_mm_shuffle_epi32(x128_0, 0x3)); // |x6|x7|x6|x7| ... |x6|x7| + xArray_89 = _mm512_broadcastd_epi32(x128_1); // |x8|x9|x8|x9| ... |x8|x9| + + unsigned short blend_mask_value = ((unsigned short)0x0fc0); + __mmask16 blend_mask = *((__mmask16*) &blend_mask_value); + unsigned short load_mask_value = (((unsigned short)0xffff) >> 1); + __mmask16 load_mask = *((__mmask16*) &load_mask_value); + unsigned short store_mask_value = (((unsigned short)0xffff) >> 4); + __mmask16 store_mask = *((__mmask16*) &store_mask_value); + for (BLASLONG idx_m = 0; idx_m < tag_m_12x; idx_m+=12) { + result_0 = _mm512_setzero_ps(); + result_1 = _mm512_setzero_ps(); + + matrixArray_0 = _mm512_maskz_loadu_epi32(load_mask, &a[(idx_m)*10]); // Load 3 rows with n=10 + matrixArray_1 = _mm512_maskz_loadu_epi32(load_mask, &a[(idx_m+3)*10]); // Load 3 rows with n=10 + matrixArray_2 = _mm512_maskz_loadu_epi32(load_mask, &a[(idx_m+6)*10]); // Load 3 rows with n=10 + matrixArray_3 = _mm512_maskz_loadu_epi32(load_mask, &a[(idx_m+9)*10]); // Load 3 rows with n=10 + + // Stage 1: interleave per 32 bits + matrixArray_stage_0 = _mm512_permutex2var_epi32(matrixArray_0, load_idx_stage1_0, matrixArray_1); // |a0|a1|...|f0|f1|a2|a3|...|f2|f3|x|x|x|x|x|x|x|x| + matrixArray_stage_1 = _mm512_permutex2var_epi32(matrixArray_0, load_idx_stage1_1, matrixArray_1); // |a4|a5|...|f4|f5|a6|a7|...|f6|f7|x|x|x|x|x|x|x|x| + matrixArray_stage_2 = _mm512_permutex2var_epi32(matrixArray_2, load_idx_stage1_2, matrixArray_3); // |g2|g3|...|l2|l3|g0|g1|...|l0|l1|x|x|x|x|x|x|x|x| + matrixArray_stage_3 = _mm512_permutex2var_epi32(matrixArray_2, load_idx_stage1_3, matrixArray_3); // |g6|g7|...|l6|l7|g4|g5|...|l4|l5|x|x|x|x|x|x|x|x| + matrixArray_stage_4 = _mm512_permutex2var_epi32(matrixArray_0, load_idx_stage1_4, matrixArray_1); // |a8|a9|...|f8|f9| x| x|...| x| x|x|x|x|x|x|x|x|x| + matrixArray_stage_5 = _mm512_permutex2var_epi32(matrixArray_2, load_idx_stage1_4, matrixArray_3); // | x| x|...| x| x|g8|g9|...|l8|l9|x|x|x|x|x|x|x|x| + + // Stage 3: interleave per 256 bits + matrixArray_0 = _mm512_mask_blend_epi32(blend_mask, matrixArray_stage_0, matrixArray_stage_2); // |a0|a1|...|l0|l1|x|x|x|x|x|x|x|x| + matrixArray_1 = _mm512_permutex2var_epi32(matrixArray_stage_0, load_idx_stage2_0, matrixArray_stage_2); // |a2|a3|...|l2|l3|x|x|x|x|x|x|x|x| + matrixArray_2 = _mm512_mask_blend_epi32(blend_mask, matrixArray_stage_1, matrixArray_stage_3); // |a4|a5|...|l4|l5|x|x|x|x|x|x|x|x| + matrixArray_3 = _mm512_permutex2var_epi32(matrixArray_stage_1, load_idx_stage2_0, matrixArray_stage_3); // |a6|a7|...|l6|l7|x|x|x|x|x|x|x|x| + matrixArray_4 = _mm512_mask_blend_epi32(blend_mask, matrixArray_stage_4, matrixArray_stage_5); // |a8|a9|...|l8|l9|x|x|x|x|x|x|x|x| + + result_0 = _mm512_dpbf16_ps(result_0, (__m512bh) matrixArray_0, (__m512bh) xArray_01); + result_1 = _mm512_dpbf16_ps(result_1, (__m512bh) matrixArray_1, (__m512bh) xArray_23); + result_0 = _mm512_dpbf16_ps(result_0, (__m512bh) matrixArray_2, (__m512bh) xArray_45); + result_1 = _mm512_dpbf16_ps(result_1, (__m512bh) matrixArray_3, (__m512bh) xArray_67); + result_0 = _mm512_dpbf16_ps(result_0, (__m512bh) matrixArray_4, (__m512bh) xArray_89); + result_0 = _mm512_add_ps(result_0, result_1); + + STORE16_MASK_COMPLETE_RESULT(result_0, y+idx_m, store_mask) + } + } + + if (tag_m_12x != m) { + __m256i matrixArray256; + __m256i x256 = _mm256_insertf128_si256(_mm256_castsi128_si256(x128_0), x128_1, 0x1); + __m256 result256; + __m128 result128, tmp128; + unsigned char load256_mask_value = (((unsigned char)0xff) >> 3); + __mmask8 load256_mask = *((__mmask8*) &load256_mask_value); + for (BLASLONG i = tag_m_12x; i < m; i++) { + result256 = _mm256_setzero_ps(); + matrixArray256 = _mm256_maskz_loadu_epi32(load256_mask, &a[(i)*10]); + result256 = _mm256_dpbf16_ps(result256, (__m256bh) matrixArray256, (__m256bh) x256); + result128 = _mm_add_ps(_mm256_castps256_ps128(result256), _mm256_extractf128_ps(result256, 0x1)); + tmp128 = _mm_shuffle_ps(result128, result128, 14); + result128 = _mm_add_ps(result128, tmp128); + tmp128 = _mm_shuffle_ps(result128, result128, 1); + result128 = _mm_add_ps(result128, tmp128); +#ifndef ZERO_BETA +#ifndef ONE_BETA + y[i] = alpha * result128[0] + beta * y[i]; +#else + y[i] = alpha * result128[0] + y[i]; +#endif +#else +#ifndef ONE_ALPHA + y[i] = result128[0] * alpha; +#else + y[i] = result128[0]; +#endif +#endif + } + } + + return 0; +} + +// 15 rows parallel processing BF16 GEMV kernel for n=11 && lda ineffective scenario +#ifndef ZERO_BETA +#ifndef ONE_BETA +static int sbgemv_kernel_15x11_alpha_beta(BLASLONG m, float alpha, bfloat16 *a, bfloat16 *x, float beta, float *y) +#else +static int sbgemv_kernel_15x11_alpha_one(BLASLONG m, float alpha, bfloat16 *a, bfloat16 *x, float beta, float *y) +#endif +#else +#ifndef ONE_ALPHA +static int sbgemv_kernel_15x11_alpha(BLASLONG m, float alpha, bfloat16 *a, bfloat16 *x, float *y) +#else +static int sbgemv_kernel_15x11(BLASLONG m, float alpha, bfloat16 *a, bfloat16 *x, float *y) +#endif +#endif +{ + BLASLONG tag_m_15x = m - (m%15); + + unsigned char x_load_mask_value = (((unsigned char)0xff) >> 5); + __mmask8 x_load_mask = *((__mmask8*) &x_load_mask_value); + __m128i x128_0 = _mm_loadu_si128(x); // |x0|x1| x2|x3|x4|x5|x6|x7| + __m128i x128_1 = _mm_maskz_loadu_epi16(x_load_mask, (x+8)); // |x8|x9|x10| 0| 0| 0| 0| 0| + + if (tag_m_15x > 0) { + __m512i matrixArray_0, matrixArray_1, matrixArray_2, matrixArray_3, matrixArray_4, matrixArray_5; + __m512i matrixArray_stage_0, matrixArray_stage_1, matrixArray_stage_2, matrixArray_stage_3, matrixArray_stage_4, matrixArray_stage_5; + __m512i xArray_01, xArray_23, xArray_45, xArray_67, xArray_89, xArray_10; + __m512 result_0, result_1; + +#ifndef ONE_ALPHA + __m512 ALPHAVECTOR = _mm512_set1_ps(alpha); +#endif +#ifndef ZERO_BETA + __m512 BETAVECTOR = _mm512_set1_ps(beta); +#endif + + __m512i idx_stage1_base_0, idx_stage1_base_1, idx_stage1_base_2, idx_stage1_base_3, idx_stage1_base_4, idx_stage1_base_5; + __m512i idx_stage2_base_0, idx_stage2_base_1, idx_stage2_base_2, idx_stage2_base_3; + + __m512i M512_EPI16_2, M512_EPI16_4, M512_EPI16_6, M512_EPI32_5; + M512_EPI16_2 = _mm512_set1_epi16(2); + M512_EPI16_4 = _mm512_add_epi16(M512_EPI16_2, M512_EPI16_2); + M512_EPI16_6 = _mm512_add_epi16(M512_EPI16_4, M512_EPI16_2); + M512_EPI32_5 = _mm512_set1_epi32(5); + + unsigned int BASE_MASK_10_value = ((unsigned int)0x000003ff); + __mmask32 BASE_MASK_10 = *((__mmask32*) &BASE_MASK_10_value); + unsigned int BASE_MASK_20_value = ((unsigned int)0x000ffc00); + __mmask32 BASE_MASK_20 = *((__mmask32*) &BASE_MASK_20_value); + unsigned int BASE_MASK_30_value = ((unsigned int)0x3ff00000); + __mmask32 BASE_MASK_30 = *((__mmask32*) &BASE_MASK_30_value); + + idx_stage1_base_0 = _mm512_set_epi16( 0, 0, 49, 48, 38, 37, 27, 26, 16, 15, 5, 4, 47, 46, 36, 35, + 25, 24, 14, 13, 3, 2, 45, 44, 34, 33, 23, 22, 12, 11, 1, 0); + idx_stage1_base_1 = _mm512_add_epi16(idx_stage1_base_0, M512_EPI16_6); + + idx_stage1_base_2 = _mm512_mask_add_epi16(idx_stage1_base_0, BASE_MASK_10, idx_stage1_base_0, M512_EPI16_2); + idx_stage1_base_2 = _mm512_mask_sub_epi16(idx_stage1_base_2, BASE_MASK_20, idx_stage1_base_0, M512_EPI16_2); + idx_stage1_base_3 = _mm512_add_epi16(idx_stage1_base_2, M512_EPI16_6); + + idx_stage1_base_4 = _mm512_mask_add_epi16(idx_stage1_base_2, BASE_MASK_10, idx_stage1_base_2, M512_EPI16_2); + idx_stage1_base_4 = _mm512_mask_add_epi16(idx_stage1_base_4, BASE_MASK_20, idx_stage1_base_2, M512_EPI16_2); + idx_stage1_base_4 = _mm512_mask_sub_epi16(idx_stage1_base_4, BASE_MASK_30, idx_stage1_base_2, M512_EPI16_4); + idx_stage1_base_5 = _mm512_add_epi16(idx_stage1_base_4, M512_EPI16_6); + + unsigned short idx_stage2_mask_1_value = ((unsigned short)0x03e0); + __mmask16 idx_stage2_mask_1 = *((__mmask16*) &idx_stage2_mask_1_value); + unsigned short idx_stage2_mask_2_value = ((unsigned short)0x7c00); + __mmask16 idx_stage2_mask_2 = *((__mmask16*) &idx_stage2_mask_2_value); + idx_stage2_base_0 = _mm512_set_epi32( 0, 0, 0, 0, 0, 0, 20, 19, 18, 17, 16, 9, 8, 7, 6, 5); + idx_stage2_base_1 = _mm512_set_epi32( 0, 25, 24, 23, 22, 21, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + idx_stage2_base_2 = _mm512_add_epi32(idx_stage2_base_0, M512_EPI32_5); + idx_stage2_base_2 = _mm512_mask_add_epi32(idx_stage2_base_2, idx_stage2_mask_1, idx_stage2_base_2, M512_EPI32_5); + idx_stage2_base_3 = _mm512_mask_sub_epi32(idx_stage2_base_1, idx_stage2_mask_2, idx_stage2_base_1, M512_EPI32_5); + + xArray_01 = _mm512_broadcastd_epi32(x128_0); // |x0 |x1 |x0 |x1 | ... |x0 |x1 | + xArray_23 = _mm512_broadcastd_epi32(_mm_shuffle_epi32(x128_0, 0x1)); // |x2 |x3 |x2 |x3 | ... |x2 |x3 | + xArray_45 = _mm512_broadcastd_epi32(_mm_shuffle_epi32(x128_0, 0x2)); // |x4 |x5 |x4 |x5 | ... |x4 |x5 | + xArray_67 = _mm512_broadcastd_epi32(_mm_shuffle_epi32(x128_0, 0x3)); // |x6 |x7 |x6 |x7 | ... |x6 |x7 | + xArray_89 = _mm512_broadcastd_epi32(x128_1); // |x8 |x9 |x8 |x9 | ... |x8 |x9 | + xArray_10 = _mm512_broadcastd_epi32(_mm_shuffle_epi32(x128_1, 0x1)); // |x10|0 |x10|0 | ... |x10|0 | + + unsigned int load_mask_value = (((unsigned int)0xffffffff) >> 9); + __mmask32 load_mask = *((__mmask32*) &load_mask_value); + + unsigned short store_mask_value = (((unsigned short)0xffff) >> 1); + __mmask16 store_mask = *((__mmask16*) &store_mask_value); + + for (BLASLONG idx_m = 0; idx_m < tag_m_15x; idx_m+=15) { + result_0 = _mm512_setzero_ps(); + result_1 = _mm512_setzero_ps(); + + matrixArray_0 = _mm512_loadu_si512(&a[idx_m*11]); // Load 2 rows with n=11 plus 10 elements + matrixArray_1 = _mm512_maskz_loadu_epi16(load_mask, &a[idx_m*11 + 32]); // Load 2 rows with n=11 plus 1 element + matrixArray_2 = _mm512_loadu_si512(&a[(idx_m+5)*11]); // Load 2 rows with n=11 plus 10 elements + matrixArray_3 = _mm512_maskz_loadu_epi16(load_mask, &a[(idx_m+5)*11 + 32]); // Load 2 rows with n=11 plus 1 element + matrixArray_4 = _mm512_loadu_si512(&a[(idx_m+10)*11]); // Load 2 rows with n=11 plus 10 elements + matrixArray_5 = _mm512_maskz_loadu_epi16(load_mask, &a[(idx_m+10)*11 + 32]); // Load 2 rows with n=11 plus 1 element + + // Stage 1: interleave per 16 bits + matrixArray_stage_0 = _mm512_permutex2var_epi16(matrixArray_0, idx_stage1_base_0, matrixArray_1); // |a0|a1|...|e0|e1|a2|a3|...|e2|e3|a4 |a5|...|e4 |e5| + matrixArray_stage_1 = _mm512_permutex2var_epi16(matrixArray_0, idx_stage1_base_1, matrixArray_1); // |a6|a7|...|e6|e7|a8|a9|...|e8|e9|a10|x |...|e10|x | + matrixArray_stage_2 = _mm512_permutex2var_epi16(matrixArray_2, idx_stage1_base_2, matrixArray_3); // |f2|f3|...|j2|j3|f0|f1|...|j0|j1|f4 |f5|...|j4 |j5| + matrixArray_stage_3 = _mm512_permutex2var_epi16(matrixArray_2, idx_stage1_base_3, matrixArray_3); // |f8|f9|...|j8|j9|f6|f7|...|j6|j7|f10|x |...|j10|x | + matrixArray_stage_4 = _mm512_permutex2var_epi16(matrixArray_4, idx_stage1_base_4, matrixArray_5); // |k4|k5|...|o4|o5|k2|k3|...|o2|o3|k0 |k1|...|o0 |o1| + matrixArray_stage_5 = _mm512_permutex2var_epi16(matrixArray_4, idx_stage1_base_5, matrixArray_5); // |k10|x|...|o10|x|k8|k9|...|o8|o9|k6 |k7|...|o6 |o7| + + // Stage 2: interleave per 32 bits + matrixArray_0 = _mm512_mask_blend_epi32(idx_stage2_mask_1, matrixArray_stage_0, matrixArray_stage_2); // |a0|a1|...|j0|j1|x|x|x|x|x|x|x|x|x|x|x|x| + matrixArray_3 = _mm512_mask_blend_epi32(idx_stage2_mask_1, matrixArray_stage_1, matrixArray_stage_3); // |a6|a7|...|j6|j7|x|x|x|x|x|x|x|x|x|x|x|x| + matrixArray_1 = _mm512_permutex2var_epi32(matrixArray_stage_0, idx_stage2_base_0, matrixArray_stage_2); // |a2|a3|...|j2|j3|x|x|x|x|x|x|x|x|x|x|x|x| + matrixArray_2 = _mm512_permutex2var_epi32(matrixArray_stage_0, idx_stage2_base_2, matrixArray_stage_2); // |a4|a5|...|j4|j5|x|x|x|x|x|x|x|x|x|x|x|x| + matrixArray_4 = _mm512_permutex2var_epi32(matrixArray_stage_1, idx_stage2_base_0, matrixArray_stage_3); // |a8|a9|...|j8|j9|x|x|x|x|x|x|x|x|x|x|x|x| + matrixArray_5 = _mm512_permutex2var_epi32(matrixArray_stage_1, idx_stage2_base_2, matrixArray_stage_3); // |a10|x|...|j10|x|x|x|x|x|x|x|x|x|x|x|x|x| + + matrixArray_0 = _mm512_mask_blend_epi32(idx_stage2_mask_2, matrixArray_0, matrixArray_stage_4); // |a0|a1|.......................|o0|o1|x|x| + matrixArray_3 = _mm512_mask_blend_epi32(idx_stage2_mask_2, matrixArray_3, matrixArray_stage_5); // |a6|a7|.......................|o6|o7|x|x| + matrixArray_1 = _mm512_permutex2var_epi32(matrixArray_1 , idx_stage2_base_1, matrixArray_stage_4); // |a2|a3|.......................|o2|o3|x|x| + matrixArray_2 = _mm512_permutex2var_epi32(matrixArray_2 , idx_stage2_base_3, matrixArray_stage_4); // |a4|a5|.......................|o4|o5|x|x| + matrixArray_4 = _mm512_permutex2var_epi32(matrixArray_4 , idx_stage2_base_1, matrixArray_stage_5); // |a8|a9|.......................|o8|o9|x|x| + matrixArray_5 = _mm512_permutex2var_epi32(matrixArray_5 , idx_stage2_base_3, matrixArray_stage_5); // |a10|x|.......................|o10|x|x|x| + + result_0 = _mm512_dpbf16_ps(result_0, (__m512bh) matrixArray_0, (__m512bh) xArray_01); + result_1 = _mm512_dpbf16_ps(result_1, (__m512bh) matrixArray_1, (__m512bh) xArray_23); + result_0 = _mm512_dpbf16_ps(result_0, (__m512bh) matrixArray_2, (__m512bh) xArray_45); + result_1 = _mm512_dpbf16_ps(result_1, (__m512bh) matrixArray_3, (__m512bh) xArray_67); + result_0 = _mm512_dpbf16_ps(result_0, (__m512bh) matrixArray_4, (__m512bh) xArray_89); + result_1 = _mm512_dpbf16_ps(result_1, (__m512bh) matrixArray_5, (__m512bh) xArray_10); + result_0 = _mm512_add_ps(result_0, result_1); + + STORE16_MASK_COMPLETE_RESULT(result_0, y+idx_m, store_mask) + } + } + + if (tag_m_15x != m) { + __m256i matrixArray256; + __m256i x256 = _mm256_insertf128_si256(_mm256_castsi128_si256(x128_0), x128_1, 0x1); + __m256 result256; + __m128 result128, tmp128; + unsigned short load256_mask_value = (((unsigned short)0xffff) >> 5); + __mmask16 load256_mask = *((__mmask16*) &load256_mask_value); + for (BLASLONG i = tag_m_15x; i < m; i++) { + result256 = _mm256_setzero_ps(); + matrixArray256 = _mm256_maskz_loadu_epi16(load256_mask, &a[(i)*11]); + result256 = _mm256_dpbf16_ps(result256, (__m256bh) matrixArray256, (__m256bh) x256); + result128 = _mm_add_ps(_mm256_castps256_ps128(result256), _mm256_extractf128_ps(result256, 0x1)); + tmp128 = _mm_shuffle_ps(result128, result128, 14); + result128 = _mm_add_ps(result128, tmp128); + tmp128 = _mm_shuffle_ps(result128, result128, 1); + result128 = _mm_add_ps(result128, tmp128); +#ifndef ZERO_BETA +#ifndef ONE_BETA + y[i] = alpha * result128[0] + beta * y[i]; +#else + y[i] = alpha * result128[0] + y[i]; +#endif +#else +#ifndef ONE_ALPHA + y[i] = result128[0] * alpha; +#else + y[i] = result128[0]; +#endif +#endif + } + } + + return 0; +} + +// 15 rows parallel processing BF16 GEMV kernel for n=12 && lda ineffective scenario +#ifndef ZERO_BETA +#ifndef ONE_BETA +static int sbgemv_kernel_15x12_alpha_beta(BLASLONG m, float alpha, bfloat16 *a, bfloat16 *x, float beta, float *y) +#else +static int sbgemv_kernel_15x12_alpha_one(BLASLONG m, float alpha, bfloat16 *a, bfloat16 *x, float beta, float *y) +#endif +#else +#ifndef ONE_ALPHA +static int sbgemv_kernel_15x12_alpha(BLASLONG m, float alpha, bfloat16 *a, bfloat16 *x, float *y) +#else +static int sbgemv_kernel_15x12(BLASLONG m, float alpha, bfloat16 *a, bfloat16 *x, float *y) +#endif +#endif +{ + BLASLONG tag_m_15x = m - (m%15); + + unsigned char x_load_mask_value = (((unsigned char)0xff) >> 4); + __mmask8 x_load_mask = *((__mmask8*) &x_load_mask_value); + __m128i x128_0 = _mm_loadu_si128(x); // |x0|x1| x2| x3|x4|x5|x6|x7| + __m128i x128_1 = _mm_maskz_loadu_epi16(x_load_mask, (x+8)); // |x8|x9|x10|x11| 0| 0| 0| 0| + + if (tag_m_15x > 0) { + __m512i matrixArray_0, matrixArray_1, matrixArray_2, matrixArray_3, matrixArray_4, matrixArray_5; + __m512i matrixArray_stage_0, matrixArray_stage_1, matrixArray_stage_2, matrixArray_stage_3, matrixArray_stage_4, matrixArray_stage_5; + __m512i xArray_01, xArray_23, xArray_45, xArray_67, xArray_89, xArray_10; + __m512 result_0, result_1; + +#ifndef ONE_ALPHA + __m512 ALPHAVECTOR = _mm512_set1_ps(alpha); +#endif +#ifndef ZERO_BETA + __m512 BETAVECTOR = _mm512_set1_ps(beta); +#endif + + __m512i idx_stage1_base_0, idx_stage1_base_1, idx_stage1_base_2, idx_stage1_base_3, idx_stage1_base_4, idx_stage1_base_5; + __m512i idx_stage2_base_0, idx_stage2_base_1, idx_stage2_base_2, idx_stage2_base_3; + + __m512i M512_EPI32_1, M512_EPI32_2, M512_EPI32_3, M512_EPI32_5; + M512_EPI32_1 = _mm512_set1_epi32(1); + M512_EPI32_2 = _mm512_add_epi32(M512_EPI32_1, M512_EPI32_1); + M512_EPI32_3 = _mm512_add_epi32(M512_EPI32_2, M512_EPI32_1); + M512_EPI32_5 = _mm512_add_epi32(M512_EPI32_3, M512_EPI32_2); + + unsigned short BASE_MASK_10_value = ((unsigned short)0x001f); + __mmask16 BASE_MASK_10 = *((__mmask16*) &BASE_MASK_10_value); + unsigned short BASE_MASK_20_value = ((unsigned short)0x03e0); + __mmask16 BASE_MASK_20 = *((__mmask16*) &BASE_MASK_20_value); + unsigned short BASE_MASK_30_value = ((unsigned short)0xfc00); + __mmask16 BASE_MASK_30 = *((__mmask16*) &BASE_MASK_30_value); + + idx_stage1_base_0 = _mm512_set_epi32( 0, 26, 20, 14, 8, 2, 25, 19, 13, 7, 1, 24, 18, 12, 6, 0); + idx_stage1_base_1 = _mm512_add_epi32(idx_stage1_base_0, M512_EPI32_3); + + idx_stage1_base_2 = _mm512_mask_add_epi32(idx_stage1_base_0, BASE_MASK_10, idx_stage1_base_0, M512_EPI32_1); + idx_stage1_base_2 = _mm512_mask_sub_epi32(idx_stage1_base_2, BASE_MASK_20, idx_stage1_base_0, M512_EPI32_1); + idx_stage1_base_3 = _mm512_add_epi32(idx_stage1_base_2, M512_EPI32_3); + + idx_stage1_base_4 = _mm512_mask_add_epi32(idx_stage1_base_2, BASE_MASK_10, idx_stage1_base_2, M512_EPI32_1); + idx_stage1_base_4 = _mm512_mask_add_epi32(idx_stage1_base_4, BASE_MASK_20, idx_stage1_base_2, M512_EPI32_1); + idx_stage1_base_4 = _mm512_mask_sub_epi32(idx_stage1_base_4, BASE_MASK_30, idx_stage1_base_2, M512_EPI32_2); + idx_stage1_base_5 = _mm512_add_epi32(idx_stage1_base_4, M512_EPI32_3); + + unsigned short idx_stage2_mask_1_value = ((unsigned short)0x03e0); + __mmask16 idx_stage2_mask_1 = *((__mmask16*) &idx_stage2_mask_1_value); + unsigned short idx_stage2_mask_2_value = ((unsigned short)0x7c00); + __mmask16 idx_stage2_mask_2 = *((__mmask16*) &idx_stage2_mask_2_value); + idx_stage2_base_0 = _mm512_set_epi32( 0, 0, 0, 0, 0, 0, 20, 19, 18, 17, 16, 9, 8, 7, 6, 5); + idx_stage2_base_1 = _mm512_set_epi32( 0, 25, 24, 23, 22, 21, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + idx_stage2_base_2 = _mm512_add_epi32(idx_stage2_base_0, M512_EPI32_5); + idx_stage2_base_2 = _mm512_mask_add_epi32(idx_stage2_base_2, idx_stage2_mask_1, idx_stage2_base_2, M512_EPI32_5); + idx_stage2_base_3 = _mm512_mask_sub_epi32(idx_stage2_base_1, idx_stage2_mask_2, idx_stage2_base_1, M512_EPI32_5); + + xArray_01 = _mm512_broadcastd_epi32(x128_0); // |x0 |x1 |x0 |x1 | ... |x0 |x1 | + xArray_23 = _mm512_broadcastd_epi32(_mm_shuffle_epi32(x128_0, 0x1)); // |x2 |x3 |x2 |x3 | ... |x2 |x3 | + xArray_45 = _mm512_broadcastd_epi32(_mm_shuffle_epi32(x128_0, 0x2)); // |x4 |x5 |x4 |x5 | ... |x4 |x5 | + xArray_67 = _mm512_broadcastd_epi32(_mm_shuffle_epi32(x128_0, 0x3)); // |x6 |x7 |x6 |x7 | ... |x6 |x7 | + xArray_89 = _mm512_broadcastd_epi32(x128_1); // |x8 |x9 |x8 |x9 | ... |x8 |x9 | + xArray_10 = _mm512_broadcastd_epi32(_mm_shuffle_epi32(x128_1, 0x1)); // |x10|x11|x10|x11| ... |x10|x11| + + unsigned int load_mask_value = (((unsigned int)0xffffffff) >> 4); + __mmask32 load_mask = *((__mmask32*) &load_mask_value); + + unsigned short store_mask_value = (((unsigned short)0xffff) >> 1); + __mmask16 store_mask = *((__mmask16*) &store_mask_value); + + for (BLASLONG idx_m = 0; idx_m < tag_m_15x; idx_m+=15) { + result_0 = _mm512_setzero_ps(); + result_1 = _mm512_setzero_ps(); + + matrixArray_0 = _mm512_loadu_si512(&a[idx_m*12]); // Load 2 rows with n=12 plus 8 elements + matrixArray_1 = _mm512_maskz_loadu_epi16(load_mask, &a[idx_m*12 + 32]); // Load 2 rows with n=12 plus 4 element + matrixArray_2 = _mm512_loadu_si512(&a[(idx_m+5)*12]); // Load 2 rows with n=12 plus 8 elements + matrixArray_3 = _mm512_maskz_loadu_epi16(load_mask, &a[(idx_m+5)*12 + 32]); // Load 2 rows with n=12 plus 4 element + matrixArray_4 = _mm512_loadu_si512(&a[(idx_m+10)*12]); // Load 2 rows with n=12 plus 8 elements + matrixArray_5 = _mm512_maskz_loadu_epi16(load_mask, &a[(idx_m+10)*12 + 32]); // Load 2 rows with n=12 plus 4 element + + // Stage 1: interleave per 16 bits + matrixArray_stage_0 = _mm512_permutex2var_epi32(matrixArray_0, idx_stage1_base_0, matrixArray_1); // |a0 |a1 |...|e0 |e1 |a2|a3|...|e2|e3|a4 |a5 |...|e4 |e5 | + matrixArray_stage_1 = _mm512_permutex2var_epi32(matrixArray_0, idx_stage1_base_1, matrixArray_1); // |a6 |a7 |...|e6 |e7 |a8|a9|...|e8|e9|a10|a11|...|e10|e11| + matrixArray_stage_2 = _mm512_permutex2var_epi32(matrixArray_2, idx_stage1_base_2, matrixArray_3); // |f2 |f3 |...|j2 |j3 |f0|f1|...|j0|j1|f4 |f5 |...|j4 |j5 | + matrixArray_stage_3 = _mm512_permutex2var_epi32(matrixArray_2, idx_stage1_base_3, matrixArray_3); // |f8 |f9 |...|j8 |j9 |f6|f7|...|j6|j7|f10|f11|...|j10|j11| + matrixArray_stage_4 = _mm512_permutex2var_epi32(matrixArray_4, idx_stage1_base_4, matrixArray_5); // |k4 |k5 |...|o4 |o5 |k2|k3|...|o2|o3|k0 |k1 |...|o0 |o1 | + matrixArray_stage_5 = _mm512_permutex2var_epi32(matrixArray_4, idx_stage1_base_5, matrixArray_5); // |k10|k11|...|o10|o11|k8|k9|...|o8|o9|k6 |k7 |...|o6 |o7 | + + // Stage 2: interleave per 32 bits + matrixArray_0 = _mm512_mask_blend_epi32(idx_stage2_mask_1, matrixArray_stage_0, matrixArray_stage_2); // |a0 |a1 |...|j0 |j1 |x|x|x|x|x|x|x|x|x|x|x|x| + matrixArray_3 = _mm512_mask_blend_epi32(idx_stage2_mask_1, matrixArray_stage_1, matrixArray_stage_3); // |a6 |a7 |...|j6 |j7 |x|x|x|x|x|x|x|x|x|x|x|x| + matrixArray_1 = _mm512_permutex2var_epi32(matrixArray_stage_0, idx_stage2_base_0, matrixArray_stage_2); // |a2 |a3 |...|j2 |j3 |x|x|x|x|x|x|x|x|x|x|x|x| + matrixArray_2 = _mm512_permutex2var_epi32(matrixArray_stage_0, idx_stage2_base_2, matrixArray_stage_2); // |a4 |a5 |...|j4 |j5 |x|x|x|x|x|x|x|x|x|x|x|x| + matrixArray_4 = _mm512_permutex2var_epi32(matrixArray_stage_1, idx_stage2_base_0, matrixArray_stage_3); // |a8 |a9 |...|j8 |j9 |x|x|x|x|x|x|x|x|x|x|x|x| + matrixArray_5 = _mm512_permutex2var_epi32(matrixArray_stage_1, idx_stage2_base_2, matrixArray_stage_3); // |a10|a11|...|j10|j11|x|x|x|x|x|x|x|x|x|x|x|x| + + matrixArray_0 = _mm512_mask_blend_epi32(idx_stage2_mask_2, matrixArray_0, matrixArray_stage_4); // |a0|a1|.......................|o0|o1|x|x| + matrixArray_3 = _mm512_mask_blend_epi32(idx_stage2_mask_2, matrixArray_3, matrixArray_stage_5); // |a6|a7|.......................|o6|o7|x|x| + matrixArray_1 = _mm512_permutex2var_epi32(matrixArray_1 , idx_stage2_base_1, matrixArray_stage_4); // |a2|a3|.......................|o2|o3|x|x| + matrixArray_2 = _mm512_permutex2var_epi32(matrixArray_2 , idx_stage2_base_3, matrixArray_stage_4); // |a4|a5|.......................|o4|o5|x|x| + matrixArray_4 = _mm512_permutex2var_epi32(matrixArray_4 , idx_stage2_base_1, matrixArray_stage_5); // |a8|a9|.......................|o8|o9|x|x| + matrixArray_5 = _mm512_permutex2var_epi32(matrixArray_5 , idx_stage2_base_3, matrixArray_stage_5); // |a10|x|.......................|o10|x|x|x| + + result_0 = _mm512_dpbf16_ps(result_0, (__m512bh) matrixArray_0, (__m512bh) xArray_01); + result_1 = _mm512_dpbf16_ps(result_1, (__m512bh) matrixArray_1, (__m512bh) xArray_23); + result_0 = _mm512_dpbf16_ps(result_0, (__m512bh) matrixArray_2, (__m512bh) xArray_45); + result_1 = _mm512_dpbf16_ps(result_1, (__m512bh) matrixArray_3, (__m512bh) xArray_67); + result_0 = _mm512_dpbf16_ps(result_0, (__m512bh) matrixArray_4, (__m512bh) xArray_89); + result_1 = _mm512_dpbf16_ps(result_1, (__m512bh) matrixArray_5, (__m512bh) xArray_10); + result_0 = _mm512_add_ps(result_0, result_1); + + STORE16_MASK_COMPLETE_RESULT(result_0, y+idx_m, store_mask) + } + } + + if (tag_m_15x != m) { + __m256i matrixArray256; + __m256i x256 = _mm256_insertf128_si256(_mm256_castsi128_si256(x128_0), x128_1, 0x1); + __m256 result256; + __m128 result128, tmp128; + unsigned short load256_mask_value = (((unsigned short)0xffff) >> 4); + __mmask16 load256_mask = *((__mmask16*) &load256_mask_value); + for (BLASLONG i = tag_m_15x; i < m; i++) { + result256 = _mm256_setzero_ps(); + matrixArray256 = _mm256_maskz_loadu_epi16(load256_mask, &a[(i)*12]); + result256 = _mm256_dpbf16_ps(result256, (__m256bh) matrixArray256, (__m256bh) x256); + result128 = _mm_add_ps(_mm256_castps256_ps128(result256), _mm256_extractf128_ps(result256, 0x1)); + tmp128 = _mm_shuffle_ps(result128, result128, 14); + result128 = _mm_add_ps(result128, tmp128); + tmp128 = _mm_shuffle_ps(result128, result128, 1); + result128 = _mm_add_ps(result128, tmp128); +#ifndef ZERO_BETA +#ifndef ONE_BETA + y[i] = alpha * result128[0] + beta * y[i]; +#else + y[i] = alpha * result128[0] + y[i]; +#endif +#else +#ifndef ONE_ALPHA + y[i] = result128[0] * alpha; +#else + y[i] = result128[0]; +#endif +#endif + } + } + + return 0; +} + + +// 16 rows parallel processing BF16 GEMV kernel for n=13 && lda ineffective scenario +#ifndef ZERO_BETA +#ifndef ONE_BETA +static int sbgemv_kernel_16x13_alpha_beta(BLASLONG m, float alpha, bfloat16 *a, bfloat16 *x, float beta, float *y) +#else +static int sbgemv_kernel_16x13_alpha_one(BLASLONG m, float alpha, bfloat16 *a, bfloat16 *x, float beta, float *y) +#endif +#else +#ifndef ONE_ALPHA +static int sbgemv_kernel_16x13_alpha(BLASLONG m, float alpha, bfloat16 *a, bfloat16 *x, float *y) +#else +static int sbgemv_kernel_16x13(BLASLONG m, float alpha, bfloat16 *a, bfloat16 *x, float *y) +#endif +#endif +{ + BLASLONG tag_m_16x = m & (~15); + + unsigned short x_load_mask_value = (((unsigned short)0xffff) >> 3); + __mmask16 x_load_mask = *((__mmask16*) &x_load_mask_value); + __m256i x256 = _mm256_maskz_loadu_epi16(x_load_mask, x); // |x0|x1|x2|x3|x4|x5|x6|x7|x8|x9|x10|x11|x12|0|0|0| + + if (tag_m_16x > 0) { + __m512i matrixArray_0, matrixArray_1, matrixArray_2, matrixArray_3, matrixArray_4, matrixArray_5, matrixArray_6, matrixArray_7, \ + matrixArray_8, matrixArray_9, matrixArray_10, matrixArray_11, matrixArray_12, matrixArray_13, matrixArray_14, matrixArray_15; + __m512i xArray_0, xArray_1, xArray_2, xArray_3; + __m512 accum512_0, accum512_1; + __m512 result_0, result_1; + + __m256i matrixArray256_0, matrixArray256_1, matrixArray256_2, matrixArray256_3, matrixArray256_4, matrixArray256_5, matrixArray256_6, matrixArray256_7; + +#ifndef ONE_ALPHA + __m512 ALPHAVECTOR = _mm512_set1_ps(alpha); +#endif +#ifndef ZERO_BETA + __m512 BETAVECTOR = _mm512_set1_ps(beta); +#endif + + __m512i M512_EPI32_4 = _mm512_set1_epi32(4); + __m512i idx_base_0 = _mm512_set_epi32(27, 26, 25, 24, 11, 10, 9, 8, 19, 18, 17, 16, 3, 2, 1, 0); + __m512i idx_base_1 = _mm512_add_epi32(idx_base_0, M512_EPI32_4); + + unsigned int load_mask_value = (((unsigned int)0xffffffff) >> 6); + __mmask32 load_mask = *((__mmask32*) &load_mask_value); + + // Prepare X with 2-step interleave way + xArray_0 = _mm512_inserti32x8(_mm512_castsi256_si512(x256), x256, 0x1); + BF16_INTERLEAVE_1x32(xArray) + + for (BLASLONG idx_m = 0; idx_m < tag_m_16x; idx_m+=16) { + accum512_0 = _mm512_setzero_ps(); + accum512_1 = _mm512_setzero_ps(); + + // Load matrix + BF16_MATRIX_MASKZ_LOAD_8x16(matrixArray256, a, 13, idx_m, 0, x_load_mask) + + matrixArray_8 = _mm512_inserti32x8(_mm512_castsi256_si512(matrixArray256_0), matrixArray256_1, 0x1); + matrixArray_9 = _mm512_inserti32x8(_mm512_castsi256_si512(matrixArray256_2), matrixArray256_3, 0x1); + matrixArray_10 = _mm512_inserti32x8(_mm512_castsi256_si512(matrixArray256_4), matrixArray256_5, 0x1); + matrixArray_11 = _mm512_inserti32x8(_mm512_castsi256_si512(matrixArray256_6), matrixArray256_7, 0x1); + + BF16_MATRIX_MASKZ_LOAD_8x16(matrixArray256, a, 13, idx_m+8, 0, x_load_mask) + + matrixArray_12 = _mm512_inserti32x8(_mm512_castsi256_si512(matrixArray256_0), matrixArray256_1, 0x1); + matrixArray_13 = _mm512_inserti32x8(_mm512_castsi256_si512(matrixArray256_2), matrixArray256_3, 0x1); + matrixArray_14 = _mm512_inserti32x8(_mm512_castsi256_si512(matrixArray256_4), matrixArray256_5, 0x1); + matrixArray_15 = _mm512_inserti32x8(_mm512_castsi256_si512(matrixArray256_6), matrixArray256_7, 0x1); + + // interleave per 256 bits + BF16_INTERLEAVE256_8x32(matrixArray) + + // 2-step interleave for matrix + BF16_INTERLEAVE_8x32(matrixArray) + + // Calculate the temp result for a..p[0:15] + BF16_2STEP_INTERLEAVED_DOT_8x32(accum512, matrixArray, xArray) + + // Reorder and add up the final result + result_0 = _mm512_permutex2var_ps(accum512_0, idx_base_0, accum512_1); + result_1 = _mm512_permutex2var_ps(accum512_0, idx_base_1, accum512_1); + result_0 = _mm512_add_ps(result_0, result_1); + STORE16_COMPLETE_RESULT(result_0, y+idx_m) + } + + if (m - tag_m_16x > 7) { + __m512i permutevar_idx = _mm512_set_epi32(15, 14, 13, 12, 7, 6, 5, 4, 11, 10, 9, 8, 3, 2, 1, 0); + accum512_0 = _mm512_setzero_ps(); + accum512_1 = _mm512_setzero_ps(); + + // Load matrix + BF16_MATRIX_MASKZ_LOAD_8x16(matrixArray256, a, 13, tag_m_16x, 0, x_load_mask) + + matrixArray_8 = _mm512_inserti32x8(_mm512_castsi256_si512(matrixArray256_0), matrixArray256_1, 0x1); + matrixArray_9 = _mm512_inserti32x8(_mm512_castsi256_si512(matrixArray256_2), matrixArray256_3, 0x1); + matrixArray_10 = _mm512_inserti32x8(_mm512_castsi256_si512(matrixArray256_4), matrixArray256_5, 0x1); + matrixArray_11 = _mm512_inserti32x8(_mm512_castsi256_si512(matrixArray256_6), matrixArray256_7, 0x1); + + // interleave per 256 bits + matrixArray_0 = _mm512_shuffle_i32x4(matrixArray_8, matrixArray_10, 0x44); + matrixArray_1 = _mm512_shuffle_i32x4(matrixArray_8, matrixArray_10, 0xee); + matrixArray_2 = _mm512_shuffle_i32x4(matrixArray_9, matrixArray_11, 0x44); + matrixArray_3 = _mm512_shuffle_i32x4(matrixArray_9, matrixArray_11, 0xee); + + // 2-step interleave for matrix + BF16_INTERLEAVE_4x32(matrixArray) + + // Calculate the temp result for a..h[0:15] + BF16_2STEP_INTERLEAVED_DOT_4x32(accum512, matrixArray, xArray) + + accum512_0 = _mm512_add_ps(accum512_0, accum512_1); + accum512_0 = _mm512_permutexvar_ps(permutevar_idx, accum512_0); + __m256 result256 = _mm256_add_ps(_mm512_castps512_ps256(accum512_0), _mm512_extractf32x8_ps(accum512_0, 1)); + STORE8_COMPLETE_RESULT(result256, y+tag_m_16x) + tag_m_16x += 8; + } + + if (m - tag_m_16x > 3) { + __m256i xArray256_0, xArray256_1, xArray256_2, xArray256_3; + __m256 accum256_0, accum256_1; + + xArray256_0 = _mm512_castsi512_si256(xArray_0); + xArray256_1 = _mm512_castsi512_si256(xArray_1); + xArray256_2 = _mm512_castsi512_si256(xArray_2); + xArray256_3 = _mm512_castsi512_si256(xArray_3); + + accum256_0 = _mm256_setzero_ps(); + accum256_1 = _mm256_setzero_ps(); + + BF16_MATRIX_MASKZ_LOAD_4x16(matrixArray256, a, 13, tag_m_16x, 0, x_load_mask) + + // 2-step interleave for matrix + BF16_INTERLEAVE_4x16(matrixArray256) + + // Calculate the temp result for a..d[0:15] + BF16_2STEP_INTERLEAVED_DOT_4x16(accum256, matrixArray256, xArray256) + + accum256_0 = _mm256_add_ps(accum256_0, accum256_1); + __m128 result128 = _mm_add_ps(_mm256_castps256_ps128(accum256_0), _mm256_extractf32x4_ps(accum256_0, 1)); + STORE4_COMPLETE_RESULT(result128, y+tag_m_16x) + tag_m_16x += 4; + } + } + + if (tag_m_16x != m) { + __m256i matrixArray256; + __m256 accum256; + __m128 accum128, tmp128; + for (BLASLONG i = tag_m_16x; i < m; i++) { + accum256 = _mm256_setzero_ps(); + matrixArray256 = _mm256_maskz_loadu_epi16(x_load_mask, &a[(i)*13]); // Load 1 rows with n=13 + accum256 = _mm256_dpbf16_ps(accum256, (__m256bh) matrixArray256, (__m256bh) x256); + accum128 = _mm_add_ps(_mm256_castps256_ps128(accum256), _mm256_extractf32x4_ps(accum256, 1)); + tmp128 = _mm_shuffle_ps(accum128, accum128, 0x0e); + accum128 = _mm_add_ps(accum128, tmp128); + tmp128 = _mm_shuffle_ps(accum128, accum128, 0x01); + accum128 = _mm_add_ps(accum128, tmp128); +#ifndef ZERO_BETA +#ifndef ONE_BETA + y[i] = alpha * accum128[0] + beta * y[i]; +#else + y[i] = alpha * accum128[0] + y[i]; +#endif +#else +#ifndef ONE_ALPHA + y[i] = accum128[0] * alpha; +#else + y[i] = accum128[0]; +#endif +#endif + } + } + + return 0; +} + +// 16 rows parallel processing BF16 GEMV kernel for n=14 && lda ineffective scenario +#ifndef ZERO_BETA +#ifndef ONE_BETA +static int sbgemv_kernel_16x14_alpha_beta(BLASLONG m, float alpha, bfloat16 *a, bfloat16 *x, float beta, float *y) +#else +static int sbgemv_kernel_16x14_alpha_one(BLASLONG m, float alpha, bfloat16 *a, bfloat16 *x, float beta, float *y) +#endif +#else +#ifndef ONE_ALPHA +static int sbgemv_kernel_16x14_alpha(BLASLONG m, float alpha, bfloat16 *a, bfloat16 *x, float *y) +#else +static int sbgemv_kernel_16x14(BLASLONG m, float alpha, bfloat16 *a, bfloat16 *x, float *y) +#endif +#endif +{ + BLASLONG tag_m_16x = m & (~15); + + unsigned short x_load_mask_value = (((unsigned short)0xffff) >> 2); + __mmask16 x_load_mask = *((__mmask16*) &x_load_mask_value); + __m256i x256 = _mm256_maskz_loadu_epi16(x_load_mask, x); // |x0|x1|x2|x3|x4|x5|x6|x7|x8|x9|x10|x11|x12|x13|0|0| + + if (tag_m_16x > 0) { + __m512i matrixArray_0, matrixArray_1, matrixArray_2, matrixArray_3, matrixArray_4, matrixArray_5, matrixArray_6, matrixArray_7, \ + matrixArray_8, matrixArray_9, matrixArray_10, matrixArray_11, matrixArray_12, matrixArray_13, matrixArray_14, matrixArray_15; + __m512i xArray_0, xArray_1, xArray_2, xArray_3; + __m512 accum512_0, accum512_1; + __m512 result_0, result_1; + +#ifndef ONE_ALPHA + __m512 ALPHAVECTOR = _mm512_set1_ps(alpha); +#endif +#ifndef ZERO_BETA + __m512 BETAVECTOR = _mm512_set1_ps(beta); +#endif + + __m512i M512_EPI32_4 = _mm512_set1_epi32(4); + __m512i idx_base_0 = _mm512_set_epi32(27, 26, 25, 24, 11, 10, 9, 8, 19, 18, 17, 16, 3, 2, 1, 0); + __m512i idx_base_1 = _mm512_add_epi32(idx_base_0, M512_EPI32_4); + __m512i shift_idx = _mm512_set_epi32(0, 13, 12, 11, 10, 9, 8, 7, 0, 6, 5, 4, 3, 2, 1, 0); + + unsigned int load_mask_value = (((unsigned int)0xffffffff) >> 4); + __mmask32 load_mask = *((__mmask32*) &load_mask_value); + + // Prepare X with 2-step interleave way + xArray_0 = _mm512_inserti32x8(_mm512_castsi256_si512(x256), x256, 0x1); + BF16_INTERLEAVE_1x32(xArray) + + for (BLASLONG idx_m = 0; idx_m < tag_m_16x; idx_m+=16) { + accum512_0 = _mm512_setzero_ps(); + accum512_1 = _mm512_setzero_ps(); + + // Load matrix + BF16_MATRIX_MASKZ_LOAD_8x32_2(matrixArray, a, 14, idx_m, 0, load_mask) + + // Pre-stage: shift the 2nd vector 1 position right for each register + BF16_PERMUTE_8x32_2(shift_idx, matrixArray) + + // interleave per 256 bits + BF16_INTERLEAVE256_8x32(matrixArray) + + // 2-step interleave for matrix + BF16_INTERLEAVE_8x32(matrixArray) + + // Calculate the temp result for a..p[0:15] + BF16_2STEP_INTERLEAVED_DOT_8x32(accum512, matrixArray, xArray) + + // Reorder and add up the final result + result_0 = _mm512_permutex2var_ps(accum512_0, idx_base_0, accum512_1); + result_1 = _mm512_permutex2var_ps(accum512_0, idx_base_1, accum512_1); + result_0 = _mm512_add_ps(result_0, result_1); + STORE16_COMPLETE_RESULT(result_0, y+idx_m) + } + + if (m - tag_m_16x > 7) { + __m512i permutevar_idx = _mm512_set_epi32(15, 14, 13, 12, 7, 6, 5, 4, 11, 10, 9, 8, 3, 2, 1, 0); + accum512_0 = _mm512_setzero_ps(); + accum512_1 = _mm512_setzero_ps(); + + // Load matrix + BF16_MATRIX_MASKZ_LOAD_4x32_2(matrixArray, a, 14, tag_m_16x, 0, load_mask) + + // Pre-stage: shift the 2nd vector 1 position right for each register + BF16_PERMUTE_4x32_2(shift_idx, matrixArray) + + // interleave per 256 bits + BF16_INTERLEAVE256_4x32(matrixArray) + + // 2-step interleave for matrix + BF16_INTERLEAVE_4x32(matrixArray) + + // Calculate the temp result for a..h[0:15] + BF16_2STEP_INTERLEAVED_DOT_4x32(accum512, matrixArray, xArray) + + accum512_0 = _mm512_add_ps(accum512_0, accum512_1); + accum512_0 = _mm512_permutexvar_ps(permutevar_idx, accum512_0); + __m256 result256 = _mm256_add_ps(_mm512_castps512_ps256(accum512_0), _mm512_extractf32x8_ps(accum512_0, 1)); + STORE8_COMPLETE_RESULT(result256, y+tag_m_16x) + tag_m_16x += 8; + } + + if (m - tag_m_16x > 3) { + __m256i matrixArray256_0, matrixArray256_1, matrixArray256_2, matrixArray256_3, matrixArray256_4, matrixArray256_5, matrixArray256_6, matrixArray256_7; + __m256i xArray256_0, xArray256_1, xArray256_2, xArray256_3; + __m256 accum256_0, accum256_1; + + xArray256_0 = _mm512_castsi512_si256(xArray_0); + xArray256_1 = _mm512_castsi512_si256(xArray_1); + xArray256_2 = _mm512_castsi512_si256(xArray_2); + xArray256_3 = _mm512_castsi512_si256(xArray_3); + + accum256_0 = _mm256_setzero_ps(); + accum256_1 = _mm256_setzero_ps(); + + BF16_MATRIX_MASKZ_LOAD_4x16(matrixArray256, a, 14, tag_m_16x, 0, x_load_mask) + + // 2-step interleave for matrix + BF16_INTERLEAVE_4x16(matrixArray256) + + // Calculate the temp result for a..d[0:15] + BF16_2STEP_INTERLEAVED_DOT_4x16(accum256, matrixArray256, xArray256) + + accum256_0 = _mm256_add_ps(accum256_0, accum256_1); + __m128 result128 = _mm_add_ps(_mm256_castps256_ps128(accum256_0), _mm256_extractf32x4_ps(accum256_0, 1)); + STORE4_COMPLETE_RESULT(result128, y+tag_m_16x) + tag_m_16x += 4; + } + } + + if (tag_m_16x != m) { + __m256i matrixArray256; + __m256 accum256; + __m128 accum128, tmp128; + for (BLASLONG i = tag_m_16x; i < m; i++) { + accum256 = _mm256_setzero_ps(); + matrixArray256 = _mm256_maskz_loadu_epi16(x_load_mask, &a[(i)*14]); // Load 1 rows with n=14 + accum256 = _mm256_dpbf16_ps(accum256, (__m256bh) matrixArray256, (__m256bh) x256); + accum128 = _mm_add_ps(_mm256_castps256_ps128(accum256), _mm256_extractf32x4_ps(accum256, 1)); + tmp128 = _mm_shuffle_ps(accum128, accum128, 0x0e); + accum128 = _mm_add_ps(accum128, tmp128); + tmp128 = _mm_shuffle_ps(accum128, accum128, 0x01); + accum128 = _mm_add_ps(accum128, tmp128); +#ifndef ZERO_BETA +#ifndef ONE_BETA + y[i] = alpha * accum128[0] + beta * y[i]; +#else + y[i] = alpha * accum128[0] + y[i]; +#endif +#else +#ifndef ONE_ALPHA + y[i] = accum128[0] * alpha; +#else + y[i] = accum128[0]; +#endif +#endif + } + } + + return 0; +} + +// 16 rows parallel processing BF16 GEMV kernel for n=15 && lda ineffective scenario +#ifndef ZERO_BETA +#ifndef ONE_BETA +static int sbgemv_kernel_16x15_alpha_beta(BLASLONG m, float alpha, bfloat16 *a, bfloat16 *x, float beta, float *y) +#else +static int sbgemv_kernel_16x15_alpha_one(BLASLONG m, float alpha, bfloat16 *a, bfloat16 *x, float beta, float *y) +#endif +#else +#ifndef ONE_ALPHA +static int sbgemv_kernel_16x15_alpha(BLASLONG m, float alpha, bfloat16 *a, bfloat16 *x, float *y) +#else +static int sbgemv_kernel_16x15(BLASLONG m, float alpha, bfloat16 *a, bfloat16 *x, float *y) +#endif +#endif +{ + BLASLONG tag_m_16x = m & (~15); + + unsigned short x_load_mask_value = (((unsigned short)0xffff) >> 1); + __mmask16 x_load_mask = *((__mmask16*) &x_load_mask_value); + __m256i x256 = _mm256_maskz_loadu_epi16(x_load_mask, x); // |x0|x1|x2|x3|x4|x5|x6|x7|x8|x9|x10|x11|x12|x13|x14|0| + + if (tag_m_16x > 0) { + __m512i matrixArray_0, matrixArray_1, matrixArray_2, matrixArray_3, matrixArray_4, matrixArray_5, matrixArray_6, matrixArray_7, \ + matrixArray_8, matrixArray_9, matrixArray_10, matrixArray_11, matrixArray_12, matrixArray_13, matrixArray_14, matrixArray_15; + __m512i xArray_0, xArray_1, xArray_2, xArray_3; + __m512 accum512_0, accum512_1; + __m512 result_0, result_1; + + __m256i matrixArray256_0, matrixArray256_1, matrixArray256_2, matrixArray256_3, matrixArray256_4, matrixArray256_5, matrixArray256_6, matrixArray256_7; + +#ifndef ONE_ALPHA + __m512 ALPHAVECTOR = _mm512_set1_ps(alpha); +#endif +#ifndef ZERO_BETA + __m512 BETAVECTOR = _mm512_set1_ps(beta); +#endif + + __m512i M512_EPI32_4 = _mm512_set1_epi32(4); + __m512i idx_base_0 = _mm512_set_epi32(27, 26, 25, 24, 11, 10, 9, 8, 19, 18, 17, 16, 3, 2, 1, 0); + __m512i idx_base_1 = _mm512_add_epi32(idx_base_0, M512_EPI32_4); + + unsigned int load_mask_value = (((unsigned int)0xffffffff) >> 2); + __mmask32 load_mask = *((__mmask32*) &load_mask_value); + + // Prepare X with 2-step interleave way + xArray_0 = _mm512_inserti32x8(_mm512_castsi256_si512(x256), x256, 0x1); + BF16_INTERLEAVE_1x32(xArray) + + for (BLASLONG idx_m = 0; idx_m < tag_m_16x; idx_m+=16) { + accum512_0 = _mm512_setzero_ps(); + accum512_1 = _mm512_setzero_ps(); + + // Load matrix + BF16_MATRIX_MASKZ_LOAD_8x16(matrixArray256, a, 15, idx_m, 0, x_load_mask) + + matrixArray_8 = _mm512_inserti32x8(_mm512_castsi256_si512(matrixArray256_0), matrixArray256_1, 0x1); + matrixArray_9 = _mm512_inserti32x8(_mm512_castsi256_si512(matrixArray256_2), matrixArray256_3, 0x1); + matrixArray_10 = _mm512_inserti32x8(_mm512_castsi256_si512(matrixArray256_4), matrixArray256_5, 0x1); + matrixArray_11 = _mm512_inserti32x8(_mm512_castsi256_si512(matrixArray256_6), matrixArray256_7, 0x1); + + BF16_MATRIX_MASKZ_LOAD_8x16(matrixArray256, a, 15, idx_m+8, 0, x_load_mask) + + matrixArray_12 = _mm512_inserti32x8(_mm512_castsi256_si512(matrixArray256_0), matrixArray256_1, 0x1); + matrixArray_13 = _mm512_inserti32x8(_mm512_castsi256_si512(matrixArray256_2), matrixArray256_3, 0x1); + matrixArray_14 = _mm512_inserti32x8(_mm512_castsi256_si512(matrixArray256_4), matrixArray256_5, 0x1); + matrixArray_15 = _mm512_inserti32x8(_mm512_castsi256_si512(matrixArray256_6), matrixArray256_7, 0x1); + + // interleave per 256 bits + BF16_INTERLEAVE256_8x32(matrixArray) + + // 2-step interleave for matrix + BF16_INTERLEAVE_8x32(matrixArray) + + // Calculate the temp result for a..p[0:15] + BF16_2STEP_INTERLEAVED_DOT_8x32(accum512, matrixArray, xArray) + + // Reorder and add up the final result + result_0 = _mm512_permutex2var_ps(accum512_0, idx_base_0, accum512_1); + result_1 = _mm512_permutex2var_ps(accum512_0, idx_base_1, accum512_1); + result_0 = _mm512_add_ps(result_0, result_1); + STORE16_COMPLETE_RESULT(result_0, y+idx_m) + } + + if (m - tag_m_16x > 7) { + __m512i permutevar_idx = _mm512_set_epi32(15, 14, 13, 12, 7, 6, 5, 4, 11, 10, 9, 8, 3, 2, 1, 0); + accum512_0 = _mm512_setzero_ps(); + accum512_1 = _mm512_setzero_ps(); + + // Load matrix + BF16_MATRIX_MASKZ_LOAD_8x16(matrixArray256, a, 15, tag_m_16x, 0, x_load_mask) + + matrixArray_8 = _mm512_inserti32x8(_mm512_castsi256_si512(matrixArray256_0), matrixArray256_1, 0x1); + matrixArray_9 = _mm512_inserti32x8(_mm512_castsi256_si512(matrixArray256_2), matrixArray256_3, 0x1); + matrixArray_10 = _mm512_inserti32x8(_mm512_castsi256_si512(matrixArray256_4), matrixArray256_5, 0x1); + matrixArray_11 = _mm512_inserti32x8(_mm512_castsi256_si512(matrixArray256_6), matrixArray256_7, 0x1); + + // interleave per 256 bits + matrixArray_0 = _mm512_shuffle_i32x4(matrixArray_8, matrixArray_10, 0x44); + matrixArray_1 = _mm512_shuffle_i32x4(matrixArray_8, matrixArray_10, 0xee); + matrixArray_2 = _mm512_shuffle_i32x4(matrixArray_9, matrixArray_11, 0x44); + matrixArray_3 = _mm512_shuffle_i32x4(matrixArray_9, matrixArray_11, 0xee); + + // 2-step interleave for matrix + BF16_INTERLEAVE_4x32(matrixArray) + + // Calculate the temp result for a..h[0:15] + BF16_2STEP_INTERLEAVED_DOT_4x32(accum512, matrixArray, xArray) + + accum512_0 = _mm512_add_ps(accum512_0, accum512_1); + accum512_0 = _mm512_permutexvar_ps(permutevar_idx, accum512_0); + __m256 result256 = _mm256_add_ps(_mm512_castps512_ps256(accum512_0), _mm512_extractf32x8_ps(accum512_0, 1)); + STORE8_COMPLETE_RESULT(result256, y+tag_m_16x) + tag_m_16x += 8; + } + + if (m - tag_m_16x > 3) { + __m256i xArray256_0, xArray256_1, xArray256_2, xArray256_3; + __m256 accum256_0, accum256_1; + + xArray256_0 = _mm512_castsi512_si256(xArray_0); + xArray256_1 = _mm512_castsi512_si256(xArray_1); + xArray256_2 = _mm512_castsi512_si256(xArray_2); + xArray256_3 = _mm512_castsi512_si256(xArray_3); + + accum256_0 = _mm256_setzero_ps(); + accum256_1 = _mm256_setzero_ps(); + + BF16_MATRIX_MASKZ_LOAD_4x16(matrixArray256, a, 15, tag_m_16x, 0, x_load_mask) + + // 2-step interleave for matrix + BF16_INTERLEAVE_4x16(matrixArray256) + + // Calculate the temp result for a..d[0:15] + BF16_2STEP_INTERLEAVED_DOT_4x16(accum256, matrixArray256, xArray256) + + accum256_0 = _mm256_add_ps(accum256_0, accum256_1); + __m128 result128 = _mm_add_ps(_mm256_castps256_ps128(accum256_0), _mm256_extractf32x4_ps(accum256_0, 1)); + STORE4_COMPLETE_RESULT(result128, y+tag_m_16x) + tag_m_16x += 4; + } + } + + if (tag_m_16x != m) { + __m256i matrixArray256; + __m256 accum256; + __m128 accum128, tmp128; + for (BLASLONG i = tag_m_16x; i < m; i++) { + accum256 = _mm256_setzero_ps(); + matrixArray256 = _mm256_maskz_loadu_epi16(x_load_mask, &a[(i)*15]); // Load 1 rows with n=15 + accum256 = _mm256_dpbf16_ps(accum256, (__m256bh) matrixArray256, (__m256bh) x256); + accum128 = _mm_add_ps(_mm256_castps256_ps128(accum256), _mm256_extractf32x4_ps(accum256, 1)); + tmp128 = _mm_shuffle_ps(accum128, accum128, 0x0e); + accum128 = _mm_add_ps(accum128, tmp128); + tmp128 = _mm_shuffle_ps(accum128, accum128, 0x01); + accum128 = _mm_add_ps(accum128, tmp128); +#ifndef ZERO_BETA +#ifndef ONE_BETA + y[i] = alpha * accum128[0] + beta * y[i]; +#else + y[i] = alpha * accum128[0] + y[i]; +#endif +#else +#ifndef ONE_ALPHA + y[i] = accum128[0] * alpha; +#else + y[i] = accum128[0]; +#endif +#endif + } + } + + return 0; +} + +// 16 rows parallel processing BF16 GEMV kernel for n=16 && lda ineffective scenario +#ifndef ZERO_BETA +#ifndef ONE_BETA +static int sbgemv_kernel_16x16_alpha_beta(BLASLONG m, float alpha, bfloat16 *a, bfloat16 *x, float beta, float *y) +#else +static int sbgemv_kernel_16x16_alpha_one(BLASLONG m, float alpha, bfloat16 *a, bfloat16 *x, float beta, float *y) +#endif +#else +#ifndef ONE_ALPHA +static int sbgemv_kernel_16x16_alpha(BLASLONG m, float alpha, bfloat16 *a, bfloat16 *x, float *y) +#else +static int sbgemv_kernel_16x16(BLASLONG m, float alpha, bfloat16 *a, bfloat16 *x, float *y) +#endif +#endif +{ + BLASLONG tag_m_16x = m & (~15); + + __m256i x256 = _mm256_loadu_si256(x); // |x0|x1|x2|x3|x4|x5|x6|x7|x8|x9|x10|x11|x12|x13|x14|x15| + + if (tag_m_16x > 0) { + __m512i matrixArray_0, matrixArray_1, matrixArray_2, matrixArray_3, matrixArray_4, matrixArray_5, matrixArray_6, matrixArray_7, \ + matrixArray_8, matrixArray_9, matrixArray_10, matrixArray_11, matrixArray_12, matrixArray_13, matrixArray_14, matrixArray_15; + __m512i xArray_0, xArray_1, xArray_2, xArray_3; + __m512 accum512_0, accum512_1; + __m512 result_0, result_1; + +#ifndef ONE_ALPHA + __m512 ALPHAVECTOR = _mm512_set1_ps(alpha); +#endif +#ifndef ZERO_BETA + __m512 BETAVECTOR = _mm512_set1_ps(beta); +#endif + + __m512i M512_EPI32_4 = _mm512_set1_epi32(4); + __m512i idx_base_0 = _mm512_set_epi32(27, 26, 25, 24, 11, 10, 9, 8, 19, 18, 17, 16, 3, 2, 1, 0); + __m512i idx_base_1 = _mm512_add_epi32(idx_base_0, M512_EPI32_4); + + // Prepare X with 2-step interleave way + xArray_0 = _mm512_inserti32x8(_mm512_castsi256_si512(x256), x256, 0x1); + BF16_INTERLEAVE_1x32(xArray) + + for (BLASLONG idx_m = 0; idx_m < tag_m_16x; idx_m+=16) { + accum512_0 = _mm512_setzero_ps(); + accum512_1 = _mm512_setzero_ps(); + + matrixArray_8 = _mm512_loadu_si512(&a[(idx_m )*16]); // Load 2 rows with n=16 + matrixArray_9 = _mm512_loadu_si512(&a[(idx_m+2 )*16]); // Load 2 rows with n=16 + matrixArray_10 = _mm512_loadu_si512(&a[(idx_m+4 )*16]); // Load 2 rows with n=16 + matrixArray_11 = _mm512_loadu_si512(&a[(idx_m+6 )*16]); // Load 2 rows with n=16 + matrixArray_12 = _mm512_loadu_si512(&a[(idx_m+8 )*16]); // Load 2 rows with n=16 + matrixArray_13 = _mm512_loadu_si512(&a[(idx_m+10)*16]); // Load 2 rows with n=16 + matrixArray_14 = _mm512_loadu_si512(&a[(idx_m+12)*16]); // Load 2 rows with n=16 + matrixArray_15 = _mm512_loadu_si512(&a[(idx_m+14)*16]); // Load 2 rows with n=16 + + // interleave per 256 bits + BF16_INTERLEAVE256_8x32(matrixArray) + + // 2-step interleave for matrix + BF16_INTERLEAVE_8x32(matrixArray) + + // Calculate the temp result for a..p[0:15] + BF16_2STEP_INTERLEAVED_DOT_8x32(accum512, matrixArray, xArray) + + // Reorder and add up the final result + result_0 = _mm512_permutex2var_ps(accum512_0, idx_base_0, accum512_1); + result_1 = _mm512_permutex2var_ps(accum512_0, idx_base_1, accum512_1); + result_0 = _mm512_add_ps(result_0, result_1); + STORE16_COMPLETE_RESULT(result_0, y+idx_m) + } + + if (m - tag_m_16x > 7) { + __m512i permutevar_idx = _mm512_set_epi32(15, 14, 13, 12, 7, 6, 5, 4, 11, 10, 9, 8, 3, 2, 1, 0); + accum512_0 = _mm512_setzero_ps(); + accum512_1 = _mm512_setzero_ps(); + + matrixArray_4 = _mm512_loadu_si512(&a[(tag_m_16x )*16]); // Load 2 rows with n=16 + matrixArray_5 = _mm512_loadu_si512(&a[(tag_m_16x+2 )*16]); // Load 2 rows with n=16 + matrixArray_6 = _mm512_loadu_si512(&a[(tag_m_16x+4 )*16]); // Load 2 rows with n=16 + matrixArray_7 = _mm512_loadu_si512(&a[(tag_m_16x+6 )*16]); // Load 2 rows with n=16 + + // interleave per 256 bits + BF16_INTERLEAVE256_4x32(matrixArray) + + // 2-step interleave for matrix + BF16_INTERLEAVE_4x32(matrixArray) + + // Calculate the temp result for a..h[0:15] + BF16_2STEP_INTERLEAVED_DOT_4x32(accum512, matrixArray, xArray) + + accum512_0 = _mm512_add_ps(accum512_0, accum512_1); + accum512_0 = _mm512_permutexvar_ps(permutevar_idx, accum512_0); + __m256 result256 = _mm256_add_ps(_mm512_castps512_ps256(accum512_0), _mm512_extractf32x8_ps(accum512_0, 1)); + STORE8_COMPLETE_RESULT(result256, y+tag_m_16x) + tag_m_16x += 8; + } + + if (m - tag_m_16x > 3) { + __m256i matrixArray256_0, matrixArray256_1, matrixArray256_2, matrixArray256_3, \ + matrixArray256_4, matrixArray256_5, matrixArray256_6, matrixArray256_7; + __m256i xArray256_0, xArray256_1, xArray256_2, xArray256_3; + __m256 accum256_0, accum256_1; + + xArray256_0 = _mm512_castsi512_si256(xArray_0); + xArray256_1 = _mm512_castsi512_si256(xArray_1); + xArray256_2 = _mm512_castsi512_si256(xArray_2); + xArray256_3 = _mm512_castsi512_si256(xArray_3); + + accum256_0 = _mm256_setzero_ps(); + accum256_1 = _mm256_setzero_ps(); + + matrixArray_0 = _mm512_loadu_si512(&a[(tag_m_16x )*16]); // Load 2 rows with n=16 + matrixArray_1 = _mm512_loadu_si512(&a[(tag_m_16x+2 )*16]); // Load 2 rows with n=16 + + matrixArray256_0 = _mm512_castsi512_si256(matrixArray_0); + matrixArray256_1 = _mm512_extracti32x8_epi32(matrixArray_0, 0x1); + matrixArray256_2 = _mm512_castsi512_si256(matrixArray_1); + matrixArray256_3 = _mm512_extracti32x8_epi32(matrixArray_1, 0x1); + + // 2-step interleave for matrix + BF16_INTERLEAVE_4x16(matrixArray256) + + // Calculate the temp result for a..d[0:15] + BF16_2STEP_INTERLEAVED_DOT_4x16(accum256, matrixArray256, xArray256) + + accum256_0 = _mm256_add_ps(accum256_0, accum256_1); + __m128 result128 = _mm_add_ps(_mm256_castps256_ps128(accum256_0), _mm256_extractf32x4_ps(accum256_0, 1)); + STORE4_COMPLETE_RESULT(result128, y+tag_m_16x) + tag_m_16x += 4; + } + } + + if (tag_m_16x != m) { + __m256i matrixArray256; + __m256 accum256; + __m128 accum128, tmp128; + for (BLASLONG i = tag_m_16x; i < m; i++) { + accum256 = _mm256_setzero_ps(); + matrixArray256 = _mm256_loadu_si256(&a[(i)*16]); // Load 1 rows with n=16 + accum256 = _mm256_dpbf16_ps(accum256, (__m256bh) matrixArray256, (__m256bh) x256); + accum128 = _mm_add_ps(_mm256_castps256_ps128(accum256), _mm256_extractf32x4_ps(accum256, 1)); + tmp128 = _mm_shuffle_ps(accum128, accum128, 0x0e); + accum128 = _mm_add_ps(accum128, tmp128); + tmp128 = _mm_shuffle_ps(accum128, accum128, 0x01); + accum128 = _mm_add_ps(accum128, tmp128); +#ifndef ZERO_BETA +#ifndef ONE_BETA + y[i] = alpha * accum128[0] + beta * y[i]; +#else + y[i] = alpha * accum128[0] + y[i]; +#endif +#else +#ifndef ONE_ALPHA + y[i] = accum128[0] * alpha; +#else + y[i] = accum128[0]; +#endif +#endif + } + } + + return 0; +} + +// 8 rows parallel processing BF16 GEMV kernel for n>16 && lda effective scenario +#ifndef ZERO_BETA +#ifndef ONE_BETA +static int sbgemv_kernel_8x16p_lda_alpha_beta(BLASLONG m, BLASLONG n, float alpha, bfloat16 *a, BLASLONG lda, bfloat16 *x, float beta, float *y) +#else +static int sbgemv_kernel_8x16p_lda_alpha_one(BLASLONG m, BLASLONG n, float alpha, bfloat16 *a, BLASLONG lda, bfloat16 *x, float beta, float *y) +#endif +#else +#ifndef ONE_ALPHA +static int sbgemv_kernel_8x16p_lda_alpha(BLASLONG m, BLASLONG n, float alpha, bfloat16 *a, BLASLONG lda, bfloat16 *x, float *y) +#else +static int sbgemv_kernel_8x16p_lda(BLASLONG m, BLASLONG n, float alpha, bfloat16 *a, BLASLONG lda, bfloat16 *x, float *y) +#endif +#endif +{ + BLASLONG tag_m_8x = m & (~7); + + unsigned int load_mask_value = (((unsigned int)0xffffffff) >> (32-n)); + __mmask32 load_mask = *((__mmask32*) &load_mask_value); + __m512i x512 = _mm512_maskz_loadu_epi16(load_mask, x); // |x0|x1|x2|x3|x4|x5|x6|x7|x8|x9|x10|x11|x12|x13|x14|x15|... + +#ifndef ONE_ALPHA + __m512 ALPHAVECTOR = _mm512_set1_ps(alpha); +#endif +#ifndef ZERO_BETA + __m512 BETAVECTOR = _mm512_set1_ps(beta); +#endif + + __m512i matrixArray_0, matrixArray_1, matrixArray_2, matrixArray_3, matrixArray_4, matrixArray_5, matrixArray_6, matrixArray_7, \ + matrixArray_8, matrixArray_9, matrixArray_10, matrixArray_11, matrixArray_12, matrixArray_13, matrixArray_14, matrixArray_15; + __m512 accum512_0, accum512_1, accum512_2, accum512_3; + __m256 accum256; + __m128 accum128; + + if (tag_m_8x > 0) { + __m512i xArray_0, xArray_1, xArray_2, xArray_3; + + __m512i M512_EPI32_4 = _mm512_set1_epi32(4); + __m512i idx_base_0 = _mm512_set_epi32(27, 26, 25, 24, 11, 10, 9, 8, 19, 18, 17, 16, 3, 2, 1, 0); + __m512i idx_base_1 = _mm512_add_epi32(idx_base_0, M512_EPI32_4); + + // Prepare X with 2-step interleave way + xArray_0 = x512; + BF16_INTERLEAVE_1x32(xArray) + + for (BLASLONG idx_m = 0; idx_m < tag_m_8x; idx_m+=8) { + accum512_0 = _mm512_setzero_ps(); + accum512_1 = _mm512_setzero_ps(); + + // Load 8 rows from matrix + BF16_MATRIX_MASKZ_LOAD_8x32(matrixArray, a, lda, idx_m, 0, load_mask) + + // 2-step interleave for matrix + BF16_INTERLEAVE_8x32(matrixArray) + + // Calculate the temp result for a..h[0:31] + BF16_2STEP_INTERLEAVED_DOT_8x32(accum512, matrixArray, xArray) + + // Reorder and add up the final result + accum512_2 = _mm512_permutex2var_ps(accum512_0, idx_base_0, accum512_1); + accum512_3 = _mm512_permutex2var_ps(accum512_0, idx_base_1, accum512_1); + accum512_2 = _mm512_add_ps(accum512_2, accum512_3); + accum256 = _mm256_add_ps(_mm512_castps512_ps256(accum512_2), _mm512_extractf32x8_ps(accum512_2, 1)); + STORE8_COMPLETE_RESULT(accum256, y+idx_m) + } + + if (m - tag_m_8x > 3) { + accum512_0 = _mm512_setzero_ps(); + accum512_1 = _mm512_setzero_ps(); + + // Load 4 rows from matrix + BF16_MATRIX_MASKZ_LOAD_4x32(matrixArray, a, lda, tag_m_8x, 0, load_mask) + + // 2-step interleave for matrix + BF16_INTERLEAVE_4x32(matrixArray) + + // Calculate the temp result for a..d[0:31] + BF16_2STEP_INTERLEAVED_DOT_4x32(accum512, matrixArray, xArray) + + accum512_0 = _mm512_add_ps(accum512_0, accum512_1); + accum256 = _mm256_add_ps(_mm512_castps512_ps256(accum512_0), _mm512_extractf32x8_ps(accum512_0, 1)); + accum128 = _mm_add_ps(_mm256_castps256_ps128(accum256), _mm256_extractf32x4_ps(accum256, 1)); + STORE4_COMPLETE_RESULT(accum128, y+tag_m_8x) + tag_m_8x += 4; + } + } + + if (tag_m_8x != m) { + __m128 tmp128; + for (BLASLONG i = tag_m_8x; i < m; i++) { + accum512_0 = _mm512_setzero_ps(); + matrixArray_0 = _mm512_maskz_loadu_epi16(load_mask, &a[(i)*lda]); // Load 1 rows with n=16 + accum512_0 = _mm512_dpbf16_ps(accum512_0, (__m512bh) matrixArray_0, (__m512bh) x512); + accum256 = _mm256_add_ps(_mm512_castps512_ps256(accum512_0), _mm512_extractf32x8_ps(accum512_0, 1)); + accum128 = _mm_add_ps(_mm256_castps256_ps128(accum256), _mm256_extractf32x4_ps(accum256, 1)); + tmp128 = _mm_shuffle_ps(accum128, accum128, 0x0e); + accum128 = _mm_add_ps(accum128, tmp128); + tmp128 = _mm_shuffle_ps(accum128, accum128, 0x01); + accum128 = _mm_add_ps(accum128, tmp128); +#ifndef ZERO_BETA +#ifndef ONE_BETA + y[i] = alpha * accum128[0] + beta * y[i]; +#else + y[i] = alpha * accum128[0] + y[i]; +#endif +#else +#ifndef ONE_ALPHA + y[i] = accum128[0] * alpha; +#else + y[i] = accum128[0]; +#endif +#endif + } + } + + return 0; +} + +// 8 rows parallel processing BF16 GEMV kernel for big N && lda effective scenario (process before interleave) +#ifndef ZERO_BETA +#ifndef ONE_BETA +static int sbgemv_kernel_1x128_lda_direct_alpha_beta(BLASLONG m, BLASLONG n, float alpha, bfloat16 *a, BLASLONG lda, bfloat16 *x, float beta, float *y) +#else +static int sbgemv_kernel_1x128_lda_direct_alpha_one(BLASLONG m, BLASLONG n, float alpha, bfloat16 *a, BLASLONG lda, bfloat16 *x, float beta, float *y) +#endif +#else +#ifndef ONE_ALPHA +static int sbgemv_kernel_1x128_lda_direct_alpha(BLASLONG m, BLASLONG n, float alpha, bfloat16 *a, BLASLONG lda, bfloat16 *x, float *y) +#else +static int sbgemv_kernel_1x128_lda_direct(BLASLONG m, BLASLONG n, float alpha, bfloat16 *a, BLASLONG lda, bfloat16 *x, float *y) +#endif +#endif +{ + BLASLONG tag_m_8x = m & (~7); + BLASLONG tag_n_32x = n & (~31); + BLASLONG tag_n_128x = n & (~127); + + __m512 accum512_0, accum512_1, accum512_2, accum512_3, accum512_4, accum512_5, accum512_6, accum512_7, \ + accum512_8, accum512_9, accum512_10, accum512_11, accum512_12, accum512_13, accum512_14, accum512_15; + __m512 accum512_bridge[8]; + __m512 accum512_t_0, accum512_t_1, accum512_t_2, accum512_t_3; + __m256 accum256_0; + __m128 accum128; + +#ifndef ONE_ALPHA + __m512 ALPHAVECTOR = _mm512_set1_ps(alpha); +#endif +#ifndef ZERO_BETA + __m512 BETAVECTOR = _mm512_set1_ps(beta); +#endif + + __m512i matrixArray_0, matrixArray_1, matrixArray_2, matrixArray_3; + __m512i xArray_0, xArray_1, xArray_2, xArray_3; + + unsigned int tail_mask_value = (((unsigned int)0xffffffff) >> (32-(n&31))); + __mmask32 tail_mask = *((__mmask32*) &tail_mask_value); + + __m512i M512_EPI32_4 = _mm512_set1_epi32(4); + __m512i idx_base_0 = _mm512_set_epi32(27, 26, 25, 24, 11, 10, 9, 8, 19, 18, 17, 16, 3, 2, 1, 0); + __m512i idx_base_1 = _mm512_add_epi32(idx_base_0, M512_EPI32_4); + + if (tag_m_8x > 0) { + for (BLASLONG idx_m = 0; idx_m < tag_m_8x; idx_m+=8) { + for (int j = idx_m; j < idx_m + 8; j++) { + accum512_t_0 = _mm512_setzero_ps(); + accum512_t_1 = _mm512_setzero_ps(); + accum512_t_2 = _mm512_setzero_ps(); + accum512_t_3 = _mm512_setzero_ps(); + /* Processing the main chunk with 128-elements per round */ + for (long idx_n = 0; idx_n < tag_n_128x; idx_n += 128) { + BF16_MATRIX_LOAD_1x32(matrixArray_0, a, lda, j, idx_n + 0) + BF16_MATRIX_LOAD_1x32(matrixArray_1, a, lda, j, idx_n + 32) + BF16_MATRIX_LOAD_1x32(matrixArray_2, a, lda, j, idx_n + 64) + BF16_MATRIX_LOAD_1x32(matrixArray_3, a, lda, j, idx_n + 96) + + BF16_VECTOR_LOAD_1x32(xArray_0, x, idx_n + 0) + BF16_VECTOR_LOAD_1x32(xArray_1, x, idx_n + 32) + BF16_VECTOR_LOAD_1x32(xArray_2, x, idx_n + 64) + BF16_VECTOR_LOAD_1x32(xArray_3, x, idx_n + 96) + + BF16_DOT_1x32(accum512_t_0, matrixArray_0, xArray_0) + BF16_DOT_1x32(accum512_t_1, matrixArray_1, xArray_1) + BF16_DOT_1x32(accum512_t_2, matrixArray_2, xArray_2) + BF16_DOT_1x32(accum512_t_3, matrixArray_3, xArray_3) + } + + /* Processing the remaining <128 chunk with 32-elements per round */ + for (long idx_n = tag_n_128x; idx_n < tag_n_32x; idx_n += 32) { + BF16_MATRIX_LOAD_1x32(matrixArray_0, a, lda, j, idx_n) + BF16_VECTOR_LOAD_1x32(xArray_0, x, idx_n) + BF16_DOT_1x32(accum512_t_0, matrixArray_0, xArray_0) + } + + /* Processing the remaining <32 chunk with masked 32-elements processing */ + if ((n&31) != 0) { + BF16_MATRIX_MASKZ_LOAD_1x32(matrixArray_0, a, lda, j, tag_n_32x, tail_mask) + BF16_VECTOR_MASKZ_LOAD_1x32(xArray_0, x, tag_n_32x, tail_mask) + BF16_DOT_1x32(accum512_t_2, matrixArray_0, xArray_0) + } + + /* Accumulate the 4 registers into 1 register */ + accum512_t_0 = _mm512_add_ps(accum512_t_0, accum512_t_1); + accum512_t_2 = _mm512_add_ps(accum512_t_2, accum512_t_3); + accum512_t_0 = _mm512_add_ps(accum512_t_0, accum512_t_2); + + // Temply save the result into a ZMM + accum512_bridge[j-idx_m] = accum512_t_0; + } + + FP32_INTERLEAVE_8x16_ARRAY(accum512_bridge) + FP32_ACCUM2_8x16_ARRAY(accum512_bridge) + accum512_bridge[1] = _mm512_permutex2var_ps(accum512_bridge[0], idx_base_0, accum512_bridge[4]); + accum512_bridge[2] = _mm512_permutex2var_ps(accum512_bridge[0], idx_base_1, accum512_bridge[4]); + accum512_bridge[1] = _mm512_add_ps(accum512_bridge[1], accum512_bridge[2]); + accum256_0 = _mm256_add_ps(_mm512_castps512_ps256(accum512_bridge[1]), _mm512_extractf32x8_ps(accum512_bridge[1], 1)); + STORE8_COMPLETE_RESULT(accum256_0, y+idx_m) + } + } + + if (tag_m_8x != m) { + __m128 tmp128; + for (BLASLONG j = tag_m_8x; j < m; j++) { + accum512_t_0 = _mm512_setzero_ps(); + accum512_t_1 = _mm512_setzero_ps(); + accum512_t_2 = _mm512_setzero_ps(); + accum512_t_3 = _mm512_setzero_ps(); + /* Processing the main chunk with 128-elements per round */ + for (long idx_n = 0; idx_n < tag_n_128x; idx_n += 128) { + BF16_MATRIX_LOAD_1x32(matrixArray_0, a, lda, j, idx_n + 0) + BF16_MATRIX_LOAD_1x32(matrixArray_1, a, lda, j, idx_n + 32) + BF16_MATRIX_LOAD_1x32(matrixArray_2, a, lda, j, idx_n + 64) + BF16_MATRIX_LOAD_1x32(matrixArray_3, a, lda, j, idx_n + 96) + + BF16_VECTOR_LOAD_1x32(xArray_0, x, idx_n + 0) + BF16_VECTOR_LOAD_1x32(xArray_1, x, idx_n + 32) + BF16_VECTOR_LOAD_1x32(xArray_2, x, idx_n + 64) + BF16_VECTOR_LOAD_1x32(xArray_3, x, idx_n + 96) + + BF16_DOT_1x32(accum512_t_0, matrixArray_0, xArray_0) + BF16_DOT_1x32(accum512_t_1, matrixArray_1, xArray_1) + BF16_DOT_1x32(accum512_t_2, matrixArray_2, xArray_2) + BF16_DOT_1x32(accum512_t_3, matrixArray_3, xArray_3) + } + + /* Processing the remaining <128 chunk with 32-elements per round */ + for (long idx_n = tag_n_128x; idx_n < tag_n_32x; idx_n += 32) { + BF16_MATRIX_LOAD_1x32(matrixArray_0, a, lda, j, idx_n) + BF16_VECTOR_LOAD_1x32(xArray_0, x, idx_n) + BF16_DOT_1x32(accum512_t_0, matrixArray_0, xArray_0) + } + + /* Processing the remaining <32 chunk with masked 32-elements processing */ + if ((n&31) != 0) { + BF16_MATRIX_MASKZ_LOAD_1x32(matrixArray_0, a, lda, j, tag_n_32x, tail_mask) + BF16_VECTOR_MASKZ_LOAD_1x32(xArray_0, x, tag_n_32x, tail_mask) + BF16_DOT_1x32(accum512_t_2, matrixArray_0, xArray_0) + } + + /* Accumulate the 4 registers into 1 register */ + accum512_t_0 = _mm512_add_ps(accum512_t_0, accum512_t_1); + accum512_t_2 = _mm512_add_ps(accum512_t_2, accum512_t_3); + accum512_t_0 = _mm512_add_ps(accum512_t_0, accum512_t_2); + + accum256_0 = _mm256_add_ps(_mm512_castps512_ps256(accum512_t_0), _mm512_extractf32x8_ps(accum512_t_0, 1)); + accum128 = _mm_add_ps(_mm256_castps256_ps128(accum256_0), _mm256_extractf32x4_ps(accum256_0, 1)); + tmp128 = _mm_shuffle_ps(accum128, accum128, 0x0e); + accum128 = _mm_add_ps(accum128, tmp128); + tmp128 = _mm_shuffle_ps(accum128, accum128, 0x01); + accum128 = _mm_add_ps(accum128, tmp128); +#ifndef ZERO_BETA +#ifndef ONE_BETA + y[j] = alpha * accum128[0] + beta * y[j]; +#else + y[j] = alpha * accum128[0] + y[j]; +#endif +#else +#ifndef ONE_ALPHA + y[j] = accum128[0] * alpha; +#else + y[j] = accum128[0]; +#endif +#endif + } + } + + return 0; +} + +// 8 rows parallel processing BF16 GEMV kernel for n=32 && lda effective scenario (process before interleave) +#ifndef ZERO_BETA +#ifndef ONE_BETA +static int sbgemv_kernel_8x32_lda_direct_alpha_beta(BLASLONG m, BLASLONG n, float alpha, bfloat16 *a, BLASLONG lda, bfloat16 *x, float beta, float *y) +#else +static int sbgemv_kernel_8x32_lda_direct_alpha_one(BLASLONG m, BLASLONG n, float alpha, bfloat16 *a, BLASLONG lda, bfloat16 *x, float beta, float *y) +#endif +#else +#ifndef ONE_ALPHA +static int sbgemv_kernel_8x32_lda_direct_alpha(BLASLONG m, BLASLONG n, float alpha, bfloat16 *a, BLASLONG lda, bfloat16 *x, float *y) +#else +static int sbgemv_kernel_8x32_lda_direct(BLASLONG m, BLASLONG n, float alpha, bfloat16 *a, BLASLONG lda, bfloat16 *x, float *y) +#endif +#endif +{ + BLASLONG tag_m_8x = m & (~7); + BLASLONG tag_n_32x = n & (~31); + + __m512 accum512_0, accum512_1, accum512_2, accum512_3, accum512_4, accum512_5, accum512_6, accum512_7, \ + accum512_8, accum512_9, accum512_10, accum512_11, accum512_12, accum512_13, accum512_14, accum512_15; + __m256 accum256_0; + __m128 accum128; + +#ifndef ONE_ALPHA + __m512 ALPHAVECTOR = _mm512_set1_ps(alpha); +#endif +#ifndef ZERO_BETA + __m512 BETAVECTOR = _mm512_set1_ps(beta); +#endif + + __m512i matrixArray_0, matrixArray_1, matrixArray_2, matrixArray_3, matrixArray_4, matrixArray_5, matrixArray_6, matrixArray_7; + __m512i xArray_0; + + unsigned int tail_mask_value = (((unsigned int)0xffffffff) >> (32-(n&31))); + __mmask32 tail_mask = *((__mmask32*) &tail_mask_value); + + if (tag_m_8x > 0) { + __m512i M512_EPI32_4 = _mm512_set1_epi32(4); + __m512i idx_base_0 = _mm512_set_epi32(27, 26, 25, 24, 11, 10, 9, 8, 19, 18, 17, 16, 3, 2, 1, 0); + __m512i idx_base_1 = _mm512_add_epi32(idx_base_0, M512_EPI32_4); + + for (BLASLONG idx_m = 0; idx_m < tag_m_8x; idx_m+=8) { + accum512_0 = _mm512_setzero_ps(); + accum512_1 = _mm512_setzero_ps(); + accum512_2 = _mm512_setzero_ps(); + accum512_3 = _mm512_setzero_ps(); + accum512_4 = _mm512_setzero_ps(); + accum512_5 = _mm512_setzero_ps(); + accum512_6 = _mm512_setzero_ps(); + accum512_7 = _mm512_setzero_ps(); + + for (BLASLONG idx_n = 0; idx_n < tag_n_32x; idx_n+=32) { + // Load 8 rows from matrix + BF16_MATRIX_LOAD_8x32(matrixArray, a, lda, idx_m, idx_n) + + // Load x + BF16_VECTOR_LOAD_1x32(xArray_0, x, idx_n) + + // Calculate the temp result for a..h[0:31] + BF16_DOT_8x32(accum512, matrixArray, xArray_0) + } + + if (tag_n_32x != n) { // Go with masked 512 + // Load 8 rows from matrix + BF16_MATRIX_MASKZ_LOAD_8x32(matrixArray, a, lda, idx_m, tag_n_32x, tail_mask) + + // Load x + BF16_VECTOR_MASKZ_LOAD_1x32(xArray_0, x, tag_n_32x, tail_mask) + + // Calculate the temp result for a..h[0:31] + BF16_DOT_8x32(accum512, matrixArray, xArray_0) + } + + // 2-step interleave for FP32 regsiter array + FP32_INTERLEAVE_8x16(accum512) + + // Accumulate the 2 batch of registers into 2 register (0 and 4) + FP32_ACCUM2_8x16(accum512) + + accum512_1 = _mm512_permutex2var_ps(accum512_0, idx_base_0, accum512_4); + accum512_2 = _mm512_permutex2var_ps(accum512_0, idx_base_1, accum512_4); + accum512_1 = _mm512_add_ps(accum512_1, accum512_2); + accum256_0 = _mm256_add_ps(_mm512_castps512_ps256(accum512_1), _mm512_extractf32x8_ps(accum512_1, 1)); + STORE8_COMPLETE_RESULT(accum256_0, y+idx_m) + } + } + + if (tag_m_8x != m) { + __m128 tmp128; + for (BLASLONG i = tag_m_8x; i < m; i++) { + accum512_0 = _mm512_setzero_ps(); + for (BLASLONG idx_n = 0; idx_n < tag_n_32x; idx_n+=32) { + // Load 32 elements from matrix + BF16_MATRIX_LOAD_1x32(matrixArray_0, a, lda, i, idx_n) + + // Load 32 elements from x + BF16_VECTOR_LOAD_1x32(xArray_0, x, idx_n) + + // Calculate and accumulate the temp result + BF16_DOT_1x32(accum512_0, matrixArray_0, xArray_0) + } + + if (tag_n_32x != n) { + // Load tail elements from matrix + BF16_MATRIX_MASKZ_LOAD_1x32(matrixArray_0, a, lda, i, tag_n_32x, tail_mask) + + // Load 32 elements from x + BF16_VECTOR_MASKZ_LOAD_1x32(xArray_0, x, tag_n_32x, tail_mask) + + // Calculate and accumulate the temp result + BF16_DOT_1x32(accum512_0, matrixArray_0, xArray_0) + } + + accum256_0 = _mm256_add_ps(_mm512_castps512_ps256(accum512_0), _mm512_extractf32x8_ps(accum512_0, 1)); + accum128 = _mm_add_ps(_mm256_castps256_ps128(accum256_0), _mm256_extractf32x4_ps(accum256_0, 1)); + tmp128 = _mm_shuffle_ps(accum128, accum128, 0x0e); + accum128 = _mm_add_ps(accum128, tmp128); + tmp128 = _mm_shuffle_ps(accum128, accum128, 0x01); + accum128 = _mm_add_ps(accum128, tmp128); +#ifndef ZERO_BETA +#ifndef ONE_BETA + y[i] = alpha * accum128[0] + beta * y[i]; +#else + y[i] = alpha * accum128[0] + y[i]; +#endif +#else +#ifndef ONE_ALPHA + y[i] = accum128[0] * alpha; +#else + y[i] = accum128[0]; +#endif +#endif + } + } + + return 0; +} + +// 8 rows parallel processing BF16 GEMV kernel for n<16 && lda effective scenario +#ifndef ZERO_BETA +#ifndef ONE_BETA +static int sbgemv_kernel_8x16m_lda_alpha_beta(BLASLONG m, BLASLONG n, float alpha, bfloat16 *a, BLASLONG lda, bfloat16 *x, float beta, float *y) +#else +static int sbgemv_kernel_8x16m_lda_alpha_one(BLASLONG m, BLASLONG n, float alpha, bfloat16 *a, BLASLONG lda, bfloat16 *x, float beta, float *y) +#endif +#else +#ifndef ONE_ALPHA +static int sbgemv_kernel_8x16m_lda_alpha(BLASLONG m, BLASLONG n, float alpha, bfloat16 *a, BLASLONG lda, bfloat16 *x, float *y) +#else +static int sbgemv_kernel_8x16m_lda(BLASLONG m, BLASLONG n, float alpha, bfloat16 *a, BLASLONG lda, bfloat16 *x, float *y) +#endif +#endif +{ + BLASLONG tag_m_8x = m & (~7); + + __m256i matrixArray_0, matrixArray_1, matrixArray_2, matrixArray_3, matrixArray_4, matrixArray_5, matrixArray_6, matrixArray_7; + __m256i xArray256; + + // Keep align with other kernels and macro definition, the high 256bit is never used +#ifndef ONE_ALPHA + __m512 ALPHAVECTOR = _mm512_castps256_ps512(_mm256_set1_ps(alpha)); +#endif +#ifndef ZERO_BETA + __m512 BETAVECTOR = _mm512_castps256_ps512(_mm256_set1_ps(beta)); +#endif + + __m256 accum256_0, accum256_1, accum256_2, accum256_3, accum256_4, accum256_5, accum256_6, accum256_7, \ + accum256_8, accum256_9, accum256_10, accum256_11, accum256_12, accum256_13, accum256_14, accum256_15; + + __m256i M256_EPI32_4 = _mm256_set1_epi32(4); + __m256i idx_base_0 = _mm256_set_epi32(11, 10, 9, 8, 3, 2, 1, 0); + __m256i idx_base_1 = _mm256_add_epi32(idx_base_0, M256_EPI32_4); + + unsigned short load_mask_value = (((unsigned short)0xffff) >> (16-n)); + __mmask16 load_mask = *((__mmask16*) &load_mask_value); + + if (n == 16) { + BF16_VECTOR_LOAD_1x16(xArray256, x, 0) + } else { + BF16_VECTOR_MASKZ_LOAD_1x16(xArray256, x, 0, load_mask) + } + + if (n == 16) { + for (BLASLONG idx_m = 0; idx_m < tag_m_8x; idx_m+=8) { + accum256_0 = _mm256_setzero_ps(); + accum256_1 = _mm256_setzero_ps(); + accum256_2 = _mm256_setzero_ps(); + accum256_3 = _mm256_setzero_ps(); + accum256_4 = _mm256_setzero_ps(); + accum256_5 = _mm256_setzero_ps(); + accum256_6 = _mm256_setzero_ps(); + accum256_7 = _mm256_setzero_ps(); + + BF16_MATRIX_LOAD_8x16(matrixArray, a, lda, idx_m, 0) + + BF16_DOT_8x16(accum256, matrixArray, xArray256) + + // 2-step interleave for FP32 regsiter array + FP32_INTERLEAVE_8x8(accum256) + + // Accumulate the 2 batch of registers into 2 register (0 and 4) + FP32_ACCUM2_8x8(accum256) + + accum256_1 = _mm256_permutex2var_ps(accum256_0, idx_base_0, accum256_4); + accum256_2 = _mm256_permutex2var_ps(accum256_0, idx_base_1, accum256_4); + accum256_1 = _mm256_add_ps(accum256_1, accum256_2); + + STORE8_COMPLETE_RESULT(accum256_1, y+idx_m) + } + + if (tag_m_8x != m) { + __m128 accum128, tmp128; + for (BLASLONG i = tag_m_8x; i < m; i++) { + accum256_0 = _mm256_setzero_ps(); + matrixArray_0 = _mm256_loadu_si256(&a[(i)*lda]); // Load 1 rows with n=16 + accum256_0 = _mm256_dpbf16_ps(accum256_0, (__m256bh) matrixArray_0, (__m256bh) xArray256); + accum128 = _mm_add_ps(_mm256_castps256_ps128(accum256_0), _mm256_extractf32x4_ps(accum256_0, 1)); + tmp128 = _mm_shuffle_ps(accum128, accum128, 0x0e); + accum128 = _mm_add_ps(accum128, tmp128); + tmp128 = _mm_shuffle_ps(accum128, accum128, 0x01); + accum128 = _mm_add_ps(accum128, tmp128); + y[i] += accum128[0] * alpha; + } + } + } else { + for (BLASLONG idx_m = 0; idx_m < tag_m_8x; idx_m+=8) { + accum256_0 = _mm256_setzero_ps(); + accum256_1 = _mm256_setzero_ps(); + accum256_2 = _mm256_setzero_ps(); + accum256_3 = _mm256_setzero_ps(); + accum256_4 = _mm256_setzero_ps(); + accum256_5 = _mm256_setzero_ps(); + accum256_6 = _mm256_setzero_ps(); + accum256_7 = _mm256_setzero_ps(); + + BF16_MATRIX_MASKZ_LOAD_8x16(matrixArray, a, lda, idx_m, 0, load_mask) + + BF16_DOT_8x16(accum256, matrixArray, xArray256) + + // 2-step interleave for FP32 regsiter array + FP32_INTERLEAVE_8x8(accum256) + + // Accumulate the 2 batch of registers into 2 register (0 and 4) + FP32_ACCUM2_8x8(accum256) + + accum256_1 = _mm256_permutex2var_ps(accum256_0, idx_base_0, accum256_4); + accum256_2 = _mm256_permutex2var_ps(accum256_0, idx_base_1, accum256_4); + accum256_1 = _mm256_add_ps(accum256_1, accum256_2); + + STORE8_COMPLETE_RESULT(accum256_1, y+idx_m) + } + + if (tag_m_8x != m) { + __m128 accum128, tmp128; + for (BLASLONG i = tag_m_8x; i < m; i++) { + accum256_0 = _mm256_setzero_ps(); + matrixArray_0 = _mm256_maskz_loadu_epi16(load_mask, &a[(i)*lda]); // Load 1 rows with n=16 + accum256_0 = _mm256_dpbf16_ps(accum256_0, (__m256bh) matrixArray_0, (__m256bh) xArray256); + accum128 = _mm_add_ps(_mm256_castps256_ps128(accum256_0), _mm256_extractf32x4_ps(accum256_0, 1)); + tmp128 = _mm_shuffle_ps(accum128, accum128, 0x0e); + accum128 = _mm_add_ps(accum128, tmp128); + tmp128 = _mm_shuffle_ps(accum128, accum128, 0x01); + accum128 = _mm_add_ps(accum128, tmp128); +#ifndef ZERO_BETA +#ifndef ONE_BETA + y[i] = alpha * accum128[0] + beta * y[i]; +#else + y[i] = alpha * accum128[0] + y[i]; +#endif +#else +#ifndef ONE_ALPHA + y[i] = accum128[0] * alpha; +#else + y[i] = accum128[0]; +#endif +#endif + } + } + } + + return 0; +} diff --git a/kernel/x86_64/sdot.c b/kernel/x86_64/sdot.c index 3536afc9e..e816c67e9 100644 --- a/kernel/x86_64/sdot.c +++ b/kernel/x86_64/sdot.c @@ -36,7 +36,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "sdot_microk_nehalem-2.c" #elif defined(HASWELL) || defined(ZEN) #include "sdot_microk_haswell-2.c" -#elif defined (SKYLAKEX) +#elif defined (SKYLAKEX) || defined (COOPERLAKE) #include "sdot_microk_skylakex-2.c" #elif defined(SANDYBRIDGE) #include "sdot_microk_sandy-2.c" diff --git a/kernel/x86_64/sdot_microk_haswell-2.c b/kernel/x86_64/sdot_microk_haswell-2.c index 91dc928d3..322f4b28c 100644 --- a/kernel/x86_64/sdot_microk_haswell-2.c +++ b/kernel/x86_64/sdot_microk_haswell-2.c @@ -87,8 +87,9 @@ static void sdot_kernel_16( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *dot) "r" (y), // 3 "r" (dot) // 4 : "cc", - "%xmm4", "%xmm5", - "%xmm6", "%xmm7", + "%xmm0", "%xmm1", "%xmm2", "%xmm3", + "%xmm4", "%xmm5", "%xmm6", "%xmm7", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", "%xmm12", "%xmm13", "%xmm14", "%xmm15", "memory" ); diff --git a/kernel/x86_64/sdot_microk_sandy-2.c b/kernel/x86_64/sdot_microk_sandy-2.c index ae25d5a50..ce09b06cf 100644 --- a/kernel/x86_64/sdot_microk_sandy-2.c +++ b/kernel/x86_64/sdot_microk_sandy-2.c @@ -90,8 +90,9 @@ static void sdot_kernel_16( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *dot) "r" (y), // 3 "r" (dot) // 4 : "cc", - "%xmm4", "%xmm5", - "%xmm6", "%xmm7", + "%xmm0", "%xmm1", "%xmm2", "%xmm3", + "%xmm4", "%xmm5", "%xmm6", "%xmm7", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", "%xmm12", "%xmm13", "%xmm14", "%xmm15", "memory" ); diff --git a/kernel/x86_64/sgemm_direct_performant.c b/kernel/x86_64/sgemm_direct_performant.c new file mode 100644 index 000000000..5a20ce395 --- /dev/null +++ b/kernel/x86_64/sgemm_direct_performant.c @@ -0,0 +1,30 @@ +#include "common.h" +/* helper for the direct sgemm code written by Arjan van der Ven */ + + + + +int CNAME(BLASLONG M, BLASLONG N, BLASLONG K) +{ + unsigned long long mnk = M * N * K; + /* large matrixes -> not performant */ + if (mnk >= 28 * 512 * 512) + return 0; + + /* + * if the B matrix is not a nice multiple if 4 we get many unaligned accesses, + * and the regular sgemm copy/realignment of data pays off much quicker + */ + if ((N & 3) != 0 && (mnk >= 8 * 512 * 512)) + return 0; + +#ifdef SMP + /* if we can run multithreaded, the threading changes the based threshold */ + if (mnk > 2 * 350 * 512 && num_cpu_avail(3)> 1) + return 0; +#endif + + return 1; +} + + diff --git a/kernel/x86_64/sgemm_direct_skylakex.c b/kernel/x86_64/sgemm_direct_skylakex.c new file mode 100644 index 000000000..aaadcf151 --- /dev/null +++ b/kernel/x86_64/sgemm_direct_skylakex.c @@ -0,0 +1,475 @@ +/* the direct sgemm code written by Arjan van der Ven */ +#include +#include "common.h" + +#if defined(SKYLAKEX) || defined (COOPERLAKE) +/* + * "Direct sgemm" code. This code operates directly on the inputs and outputs + * of the sgemm call, avoiding the copies, memory realignments and threading, + * and only supports alpha = 1 and beta = 0. + * This is a common case and provides value for relatively small matrixes. + * For larger matrixes the "regular" sgemm code is superior, there the cost of + * copying/shuffling the B matrix really pays off. + */ + + + +#define DECLARE_RESULT_512(N,M) __m512 result##N##M = _mm512_setzero_ps() +#define BROADCAST_LOAD_A_512(N,M) __m512 Aval##M = _mm512_broadcastss_ps(_mm_load_ss(&A[k + strideA * (i+M)])) +#define LOAD_B_512(N,M) __m512 Bval##N = _mm512_loadu_ps(&B[strideB * k + j + (N*16)]) +#define MATMUL_512(N,M) result##N##M = _mm512_fmadd_ps(Aval##M, Bval##N , result##N##M) +#define STORE_512(N,M) _mm512_storeu_ps(&R[(i+M) * strideR + j+(N*16)], result##N##M) + + +#define DECLARE_RESULT_256(N,M) __m256 result##N##M = _mm256_setzero_ps() +#define BROADCAST_LOAD_A_256(N,M) __m256 Aval##M = _mm256_broadcastss_ps(_mm_load_ss(&A[k + strideA * (i+M)])) +#define LOAD_B_256(N,M) __m256 Bval##N = _mm256_loadu_ps(&B[strideB * k + j + (N*8)]) +#define MATMUL_256(N,M) result##N##M = _mm256_fmadd_ps(Aval##M, Bval##N , result##N##M) +#define STORE_256(N,M) _mm256_storeu_ps(&R[(i+M) * strideR + j+(N*8)], result##N##M) + +#define DECLARE_RESULT_128(N,M) __m128 result##N##M = _mm_setzero_ps() +#define BROADCAST_LOAD_A_128(N,M) __m128 Aval##M = _mm_broadcastss_ps(_mm_load_ss(&A[k + strideA * (i+M)])) +#define LOAD_B_128(N,M) __m128 Bval##N = _mm_loadu_ps(&B[strideB * k + j + (N*4)]) +#define MATMUL_128(N,M) result##N##M = _mm_fmadd_ps(Aval##M, Bval##N , result##N##M) +#define STORE_128(N,M) _mm_storeu_ps(&R[(i+M) * strideR + j+(N*4)], result##N##M) + +#define DECLARE_RESULT_SCALAR(N,M) float result##N##M = 0; +#define BROADCAST_LOAD_A_SCALAR(N,M) float Aval##M = A[k + strideA * (i + M)]; +#define LOAD_B_SCALAR(N,M) float Bval##N = B[k * strideB + j + N]; +#define MATMUL_SCALAR(N,M) result##N##M += Aval##M * Bval##N; +#define STORE_SCALAR(N,M) R[(i+M) * strideR + j + N] = result##N##M; + +#if 0 +int sgemm_kernel_direct_performant(BLASLONG M, BLASLONG N, BLASLONG K) +{ + unsigned long long mnk = M * N * K; + /* large matrixes -> not performant */ + if (mnk >= 28 * 512 * 512) + return 0; + + /* + * if the B matrix is not a nice multiple if 4 we get many unaligned accesses, + * and the regular sgemm copy/realignment of data pays off much quicker + */ + if ((N & 3) != 0 && (mnk >= 8 * 512 * 512)) + return 0; + +#ifdef SMP + /* if we can run multithreaded, the threading changes the based threshold */ + if (mnk > 2 * 350 * 512 && num_cpu_avail(3)> 1) + return 0; +#endif + + return 1; +} + +#endif + +//void sgemm_kernel_direct (BLASLONG M, BLASLONG N, BLASLONG K, float * __restrict A, BLASLONG strideA, float * __restrict B, BLASLONG strideB , float * __restrict R, BLASLONG strideR) +void CNAME (BLASLONG M, BLASLONG N, BLASLONG K, float * __restrict A, BLASLONG strideA, float * __restrict B, BLASLONG strideB , float * __restrict R, BLASLONG strideR) +{ + int i, j, k; + + int m4 = M & ~3; + int m2 = M & ~1; + + int n64 = N & ~63; + int n32 = N & ~31; + int n16 = N & ~15; + int n8 = N & ~7; + int n4 = N & ~3; + int n2 = N & ~1; + + i = 0; + + for (i = 0; i < m4; i+=4) { + + for (j = 0; j < n64; j+= 64) { + k = 0; + DECLARE_RESULT_512(0, 0); DECLARE_RESULT_512(1, 0); DECLARE_RESULT_512(2, 0); DECLARE_RESULT_512(3, 0); + DECLARE_RESULT_512(0, 1); DECLARE_RESULT_512(1, 1); DECLARE_RESULT_512(2, 1); DECLARE_RESULT_512(3, 1); + DECLARE_RESULT_512(0, 2); DECLARE_RESULT_512(1, 2); DECLARE_RESULT_512(2, 2); DECLARE_RESULT_512(3, 2); + DECLARE_RESULT_512(0, 3); DECLARE_RESULT_512(1, 3); DECLARE_RESULT_512(2, 3); DECLARE_RESULT_512(3, 3); + + + for (k = 0; k < K; k++) { + BROADCAST_LOAD_A_512(x, 0); + BROADCAST_LOAD_A_512(x, 1); + BROADCAST_LOAD_A_512(x, 2); + BROADCAST_LOAD_A_512(x, 3); + + LOAD_B_512(0, x); LOAD_B_512(1, x); LOAD_B_512(2, x); LOAD_B_512(3, x); + + MATMUL_512(0, 0); MATMUL_512(1, 0); MATMUL_512(2, 0); MATMUL_512(3, 0); + MATMUL_512(0, 1); MATMUL_512(1, 1); MATMUL_512(2, 1); MATMUL_512(3, 1); + MATMUL_512(0, 2); MATMUL_512(1, 2); MATMUL_512(2, 2); MATMUL_512(3, 2); + MATMUL_512(0, 3); MATMUL_512(1, 3); MATMUL_512(2, 3); MATMUL_512(3, 3); + } + STORE_512(0, 0); STORE_512(1, 0); STORE_512(2, 0); STORE_512(3, 0); + STORE_512(0, 1); STORE_512(1, 1); STORE_512(2, 1); STORE_512(3, 1); + STORE_512(0, 2); STORE_512(1, 2); STORE_512(2, 2); STORE_512(3, 2); + STORE_512(0, 3); STORE_512(1, 3); STORE_512(2, 3); STORE_512(3, 3); + } + + for (; j < n32; j+= 32) { + DECLARE_RESULT_512(0, 0); DECLARE_RESULT_512(1, 0); + DECLARE_RESULT_512(0, 1); DECLARE_RESULT_512(1, 1); + DECLARE_RESULT_512(0, 2); DECLARE_RESULT_512(1, 2); + DECLARE_RESULT_512(0, 3); DECLARE_RESULT_512(1, 3); + + for (k = 0; k < K; k++) { + BROADCAST_LOAD_A_512(x, 0); + BROADCAST_LOAD_A_512(x, 1); + BROADCAST_LOAD_A_512(x, 2); + BROADCAST_LOAD_A_512(x, 3); + + LOAD_B_512(0, x); LOAD_B_512(1, x); + + MATMUL_512(0, 0); MATMUL_512(1, 0); + MATMUL_512(0, 1); MATMUL_512(1, 1); + MATMUL_512(0, 2); MATMUL_512(1, 2); + MATMUL_512(0, 3); MATMUL_512(1, 3); + } + STORE_512(0, 0); STORE_512(1, 0); + STORE_512(0, 1); STORE_512(1, 1); + STORE_512(0, 2); STORE_512(1, 2); + STORE_512(0, 3); STORE_512(1, 3); + } + + for (; j < n16; j+= 16) { + DECLARE_RESULT_512(0, 0); + DECLARE_RESULT_512(0, 1); + DECLARE_RESULT_512(0, 2); + DECLARE_RESULT_512(0, 3); + + for (k = 0; k < K; k++) { + BROADCAST_LOAD_A_512(x, 0); + BROADCAST_LOAD_A_512(x, 1); + BROADCAST_LOAD_A_512(x, 2); + BROADCAST_LOAD_A_512(x, 3); + + LOAD_B_512(0, x); + + MATMUL_512(0, 0); + MATMUL_512(0, 1); + MATMUL_512(0, 2); + MATMUL_512(0, 3); + } + STORE_512(0, 0); + STORE_512(0, 1); + STORE_512(0, 2); + STORE_512(0, 3); + } + + for (; j < n8; j+= 8) { + DECLARE_RESULT_256(0, 0); + DECLARE_RESULT_256(0, 1); + DECLARE_RESULT_256(0, 2); + DECLARE_RESULT_256(0, 3); + + for (k = 0; k < K; k++) { + BROADCAST_LOAD_A_256(x, 0); + BROADCAST_LOAD_A_256(x, 1); + BROADCAST_LOAD_A_256(x, 2); + BROADCAST_LOAD_A_256(x, 3); + + LOAD_B_256(0, x); + + MATMUL_256(0, 0); + MATMUL_256(0, 1); + MATMUL_256(0, 2); + MATMUL_256(0, 3); + } + STORE_256(0, 0); + STORE_256(0, 1); + STORE_256(0, 2); + STORE_256(0, 3); + } + + for (; j < n4; j+= 4) { + DECLARE_RESULT_128(0, 0); + DECLARE_RESULT_128(0, 1); + DECLARE_RESULT_128(0, 2); + DECLARE_RESULT_128(0, 3); + + for (k = 0; k < K; k++) { + BROADCAST_LOAD_A_128(x, 0); + BROADCAST_LOAD_A_128(x, 1); + BROADCAST_LOAD_A_128(x, 2); + BROADCAST_LOAD_A_128(x, 3); + + LOAD_B_128(0, x); + + MATMUL_128(0, 0); + MATMUL_128(0, 1); + MATMUL_128(0, 2); + MATMUL_128(0, 3); + } + STORE_128(0, 0); + STORE_128(0, 1); + STORE_128(0, 2); + STORE_128(0, 3); + } + + for (; j < n2; j+= 2) { + DECLARE_RESULT_SCALAR(0, 0); DECLARE_RESULT_SCALAR(1, 0); + DECLARE_RESULT_SCALAR(0, 1); DECLARE_RESULT_SCALAR(1, 1); + DECLARE_RESULT_SCALAR(0, 2); DECLARE_RESULT_SCALAR(1, 2); + DECLARE_RESULT_SCALAR(0, 3); DECLARE_RESULT_SCALAR(1, 3); + + for (k = 0; k < K; k++) { + BROADCAST_LOAD_A_SCALAR(x, 0); + BROADCAST_LOAD_A_SCALAR(x, 1); + BROADCAST_LOAD_A_SCALAR(x, 2); + BROADCAST_LOAD_A_SCALAR(x, 3); + + LOAD_B_SCALAR(0, x); LOAD_B_SCALAR(1, x); + + MATMUL_SCALAR(0, 0); MATMUL_SCALAR(1, 0); + MATMUL_SCALAR(0, 1); MATMUL_SCALAR(1, 1); + MATMUL_SCALAR(0, 2); MATMUL_SCALAR(1, 2); + MATMUL_SCALAR(0, 3); MATMUL_SCALAR(1, 3); + } + STORE_SCALAR(0, 0); STORE_SCALAR(1, 0); + STORE_SCALAR(0, 1); STORE_SCALAR(1, 1); + STORE_SCALAR(0, 2); STORE_SCALAR(1, 2); + STORE_SCALAR(0, 3); STORE_SCALAR(1, 3); + } + + for (; j < N; j++) { + DECLARE_RESULT_SCALAR(0, 0) + DECLARE_RESULT_SCALAR(0, 1) + DECLARE_RESULT_SCALAR(0, 2) + DECLARE_RESULT_SCALAR(0, 3) + + for (k = 0; k < K; k++) { + BROADCAST_LOAD_A_SCALAR(0, 0); + BROADCAST_LOAD_A_SCALAR(0, 1); + BROADCAST_LOAD_A_SCALAR(0, 2); + BROADCAST_LOAD_A_SCALAR(0, 3); + + LOAD_B_SCALAR(0, 0); + + MATMUL_SCALAR(0, 0); + MATMUL_SCALAR(0, 1); + MATMUL_SCALAR(0, 2); + MATMUL_SCALAR(0, 3); + } + STORE_SCALAR(0, 0); + STORE_SCALAR(0, 1); + STORE_SCALAR(0, 2); + STORE_SCALAR(0, 3); + } + } + + for (; i < m2; i+=2) { + j = 0; + + for (; j < n64; j+= 64) { + DECLARE_RESULT_512(0, 0); DECLARE_RESULT_512(1, 0); DECLARE_RESULT_512(2, 0); DECLARE_RESULT_512(3, 0); + DECLARE_RESULT_512(0, 1); DECLARE_RESULT_512(1, 1); DECLARE_RESULT_512(2, 1); DECLARE_RESULT_512(3, 1); + + + for (k = 0; k < K; k++) { + BROADCAST_LOAD_A_512(x, 0); + BROADCAST_LOAD_A_512(x, 1); + + LOAD_B_512(0, x); LOAD_B_512(1, x); LOAD_B_512(2, x); LOAD_B_512(3, x); + + MATMUL_512(0, 0); MATMUL_512(1, 0); MATMUL_512(2, 0); MATMUL_512(3, 0); + MATMUL_512(0, 1); MATMUL_512(1, 1); MATMUL_512(2, 1); MATMUL_512(3, 1); + } + STORE_512(0, 0); STORE_512(1, 0); STORE_512(2, 0); STORE_512(3, 0); + STORE_512(0, 1); STORE_512(1, 1); STORE_512(2, 1); STORE_512(3, 1); + } + + for (; j < n32; j+= 32) { + DECLARE_RESULT_512(0, 0); DECLARE_RESULT_512(1, 0); + DECLARE_RESULT_512(0, 1); DECLARE_RESULT_512(1, 1); + + for (k = 0; k < K; k++) { + BROADCAST_LOAD_A_512(x, 0); + BROADCAST_LOAD_A_512(x, 1); + + LOAD_B_512(0, x); LOAD_B_512(1, x); + + MATMUL_512(0, 0); MATMUL_512(1, 0); + MATMUL_512(0, 1); MATMUL_512(1, 1); + } + STORE_512(0, 0); STORE_512(1, 0); + STORE_512(0, 1); STORE_512(1, 1); + } + + + for (; j < n16; j+= 16) { + DECLARE_RESULT_512(0, 0); + DECLARE_RESULT_512(0, 1); + + for (k = 0; k < K; k++) { + BROADCAST_LOAD_A_512(x, 0); + BROADCAST_LOAD_A_512(x, 1); + + LOAD_B_512(0, x); + + MATMUL_512(0, 0); + MATMUL_512(0, 1); + } + STORE_512(0, 0); + STORE_512(0, 1); + } + + for (; j < n8; j+= 8) { + DECLARE_RESULT_256(0, 0); + DECLARE_RESULT_256(0, 1); + + for (k = 0; k < K; k++) { + BROADCAST_LOAD_A_256(x, 0); + BROADCAST_LOAD_A_256(x, 1); + + LOAD_B_256(0, x); + + MATMUL_256(0, 0); + MATMUL_256(0, 1); + } + STORE_256(0, 0); + STORE_256(0, 1); + } + + for (; j < n4; j+= 4) { + DECLARE_RESULT_128(0, 0); + DECLARE_RESULT_128(0, 1); + + for (k = 0; k < K; k++) { + BROADCAST_LOAD_A_128(x, 0); + BROADCAST_LOAD_A_128(x, 1); + + LOAD_B_128(0, x); + + MATMUL_128(0, 0); + MATMUL_128(0, 1); + } + STORE_128(0, 0); + STORE_128(0, 1); + } + for (; j < n2; j+= 2) { + DECLARE_RESULT_SCALAR(0, 0); DECLARE_RESULT_SCALAR(1, 0); + DECLARE_RESULT_SCALAR(0, 1); DECLARE_RESULT_SCALAR(1, 1); + + for (k = 0; k < K; k++) { + BROADCAST_LOAD_A_SCALAR(x, 0); + BROADCAST_LOAD_A_SCALAR(x, 1); + + LOAD_B_SCALAR(0, x); LOAD_B_SCALAR(1, x); + + MATMUL_SCALAR(0, 0); MATMUL_SCALAR(1, 0); + MATMUL_SCALAR(0, 1); MATMUL_SCALAR(1, 1); + } + STORE_SCALAR(0, 0); STORE_SCALAR(1, 0); + STORE_SCALAR(0, 1); STORE_SCALAR(1, 1); + } + + for (; j < N; j++) { + DECLARE_RESULT_SCALAR(0, 0); + DECLARE_RESULT_SCALAR(0, 1); + + for (k = 0; k < K; k++) { + BROADCAST_LOAD_A_SCALAR(0, 0); + BROADCAST_LOAD_A_SCALAR(0, 1); + + LOAD_B_SCALAR(0, 0); + + MATMUL_SCALAR(0, 0); + MATMUL_SCALAR(0, 1); + } + STORE_SCALAR(0, 0); + STORE_SCALAR(0, 1); + } + } + + for (; i < M; i+=1) { + j = 0; + for (; j < n64; j+= 64) { + DECLARE_RESULT_512(0, 0); DECLARE_RESULT_512(1, 0); DECLARE_RESULT_512(2, 0); DECLARE_RESULT_512(3, 0); + + for (k = 0; k < K; k++) { + BROADCAST_LOAD_A_512(x, 0); + LOAD_B_512(0, x); LOAD_B_512(1, x); LOAD_B_512(2, x); LOAD_B_512(3, x); + MATMUL_512(0, 0); MATMUL_512(1, 0); MATMUL_512(2, 0); MATMUL_512(3, 0); + } + STORE_512(0, 0); STORE_512(1, 0); STORE_512(2, 0); STORE_512(3, 0); + } + for (; j < n32; j+= 32) { + DECLARE_RESULT_512(0, 0); DECLARE_RESULT_512(1, 0); + + for (k = 0; k < K; k++) { + BROADCAST_LOAD_A_512(x, 0); + LOAD_B_512(0, x); LOAD_B_512(1, x); + MATMUL_512(0, 0); MATMUL_512(1, 0); + } + STORE_512(0, 0); STORE_512(1, 0); + } + + + for (; j < n16; j+= 16) { + DECLARE_RESULT_512(0, 0); + + for (k = 0; k < K; k++) { + BROADCAST_LOAD_A_512(x, 0); + + LOAD_B_512(0, x); + + MATMUL_512(0, 0); + } + STORE_512(0, 0); + } + + for (; j < n8; j+= 8) { + DECLARE_RESULT_256(0, 0); + + for (k = 0; k < K; k++) { + BROADCAST_LOAD_A_256(x, 0); + LOAD_B_256(0, x); + MATMUL_256(0, 0); + } + STORE_256(0, 0); + } + + for (; j < n4; j+= 4) { + DECLARE_RESULT_128(0, 0); + + for (k = 0; k < K; k++) { + BROADCAST_LOAD_A_128(x, 0); + LOAD_B_128(0, x); + MATMUL_128(0, 0); + } + STORE_128(0, 0); + } + + for (; j < n2; j+= 2) { + DECLARE_RESULT_SCALAR(0, 0); DECLARE_RESULT_SCALAR(1, 0); + + for (k = 0; k < K; k++) { + BROADCAST_LOAD_A_SCALAR(x, 0); + LOAD_B_SCALAR(0, 0); LOAD_B_SCALAR(1, 0); + MATMUL_SCALAR(0, 0); MATMUL_SCALAR(1, 0); + } + STORE_SCALAR(0, 0); STORE_SCALAR(1, 0); + } + + for (; j < N; j++) { + DECLARE_RESULT_SCALAR(0, 0); + + for (k = 0; k < K; k++) { + BROADCAST_LOAD_A_SCALAR(0, 0); + LOAD_B_SCALAR(0, 0); + MATMUL_SCALAR(0, 0); + } + STORE_SCALAR(0, 0); + } + } +} +#else +#include "common.h" +void CNAME (BLASLONG M, BLASLONG N, BLASLONG K, float * __restrict A, BLASLONG strideA, float * __restrict B, BLASLONG strideB , float * __restrict R, BLASLONG strideR) +{} +#endif diff --git a/kernel/x86_64/sgemm_kernel_16x4_skylakex.c b/kernel/x86_64/sgemm_kernel_16x4_skylakex.c index 3246e681f..d174bbcc3 100644 --- a/kernel/x86_64/sgemm_kernel_16x4_skylakex.c +++ b/kernel/x86_64/sgemm_kernel_16x4_skylakex.c @@ -762,7 +762,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. int __attribute__ ((noinline)) CNAME(BLASLONG m, BLASLONG n, BLASLONG k, float alpha, float * __restrict A, float * __restrict B, float * __restrict C, BLASLONG ldc) { - unsigned long M = m, N = n, K = k; + unsigned long long M = m, N = n, K = k; if (M == 0) return 0; if (N == 0) @@ -1176,467 +1176,4 @@ CNAME(BLASLONG m, BLASLONG n, BLASLONG k, float alpha, float * __restrict A, flo return 0; } - -/* - * "Direct sgemm" code. This code operates directly on the inputs and outputs - * of the sgemm call, avoiding the copies, memory realignments and threading, - * and only supports alpha = 1 and beta = 0. - * This is a common case and provides value for relatively small matrixes. - * For larger matrixes the "regular" sgemm code is superior, there the cost of - * copying/shuffling the B matrix really pays off. - */ - - - -#define DECLARE_RESULT_512(N,M) __m512 result##N##M = _mm512_setzero_ps() -#define BROADCAST_LOAD_A_512(N,M) __m512 Aval##M = _mm512_broadcastss_ps(_mm_load_ss(&A[k + strideA * (i+M)])) -#define LOAD_B_512(N,M) __m512 Bval##N = _mm512_loadu_ps(&B[strideB * k + j + (N*16)]) -#define MATMUL_512(N,M) result##N##M = _mm512_fmadd_ps(Aval##M, Bval##N , result##N##M) -#define STORE_512(N,M) _mm512_storeu_ps(&R[(i+M) * strideR + j+(N*16)], result##N##M) - - -#define DECLARE_RESULT_256(N,M) __m256 result##N##M = _mm256_setzero_ps() -#define BROADCAST_LOAD_A_256(N,M) __m256 Aval##M = _mm256_broadcastss_ps(_mm_load_ss(&A[k + strideA * (i+M)])) -#define LOAD_B_256(N,M) __m256 Bval##N = _mm256_loadu_ps(&B[strideB * k + j + (N*8)]) -#define MATMUL_256(N,M) result##N##M = _mm256_fmadd_ps(Aval##M, Bval##N , result##N##M) -#define STORE_256(N,M) _mm256_storeu_ps(&R[(i+M) * strideR + j+(N*8)], result##N##M) - -#define DECLARE_RESULT_128(N,M) __m128 result##N##M = _mm_setzero_ps() -#define BROADCAST_LOAD_A_128(N,M) __m128 Aval##M = _mm_broadcastss_ps(_mm_load_ss(&A[k + strideA * (i+M)])) -#define LOAD_B_128(N,M) __m128 Bval##N = _mm_loadu_ps(&B[strideB * k + j + (N*4)]) -#define MATMUL_128(N,M) result##N##M = _mm_fmadd_ps(Aval##M, Bval##N , result##N##M) -#define STORE_128(N,M) _mm_storeu_ps(&R[(i+M) * strideR + j+(N*4)], result##N##M) - -#define DECLARE_RESULT_SCALAR(N,M) float result##N##M = 0; -#define BROADCAST_LOAD_A_SCALAR(N,M) float Aval##M = A[k + strideA * (i + M)]; -#define LOAD_B_SCALAR(N,M) float Bval##N = B[k * strideB + j + N]; -#define MATMUL_SCALAR(N,M) result##N##M += Aval##M * Bval##N; -#define STORE_SCALAR(N,M) R[(i+M) * strideR + j + N] = result##N##M; - -int sgemm_kernel_direct_performant(BLASLONG M, BLASLONG N, BLASLONG K) -{ - int mnk = M * N * K; - /* large matrixes -> not performant */ - if (mnk >= 28 * 512 * 512) - return 0; - - /* - * if the B matrix is not a nice multiple if 4 we get many unaligned accesses, - * and the regular sgemm copy/realignment of data pays off much quicker - */ - if ((N & 3) != 0 && (mnk >= 8 * 512 * 512)) - return 0; - -#ifdef SMP - /* if we can run multithreaded, the threading changes the based threshold */ - if (mnk > 2 * 350 * 512 && num_cpu_avail(3)> 1) - return 0; -#endif - - return 1; -} - - - -void sgemm_kernel_direct (BLASLONG M, BLASLONG N, BLASLONG K, float * __restrict A, BLASLONG strideA, float * __restrict B, BLASLONG strideB , float * __restrict R, BLASLONG strideR) -{ - int i, j, k; - - int m4 = M & ~3; - int m2 = M & ~1; - - int n64 = N & ~63; - int n32 = N & ~31; - int n16 = N & ~15; - int n8 = N & ~7; - int n4 = N & ~3; - int n2 = N & ~1; - - i = 0; - - for (i = 0; i < m4; i+=4) { - - for (j = 0; j < n64; j+= 64) { - k = 0; - DECLARE_RESULT_512(0, 0); DECLARE_RESULT_512(1, 0); DECLARE_RESULT_512(2, 0); DECLARE_RESULT_512(3, 0); - DECLARE_RESULT_512(0, 1); DECLARE_RESULT_512(1, 1); DECLARE_RESULT_512(2, 1); DECLARE_RESULT_512(3, 1); - DECLARE_RESULT_512(0, 2); DECLARE_RESULT_512(1, 2); DECLARE_RESULT_512(2, 2); DECLARE_RESULT_512(3, 2); - DECLARE_RESULT_512(0, 3); DECLARE_RESULT_512(1, 3); DECLARE_RESULT_512(2, 3); DECLARE_RESULT_512(3, 3); - - - for (k = 0; k < K; k++) { - BROADCAST_LOAD_A_512(x, 0); - BROADCAST_LOAD_A_512(x, 1); - BROADCAST_LOAD_A_512(x, 2); - BROADCAST_LOAD_A_512(x, 3); - - LOAD_B_512(0, x); LOAD_B_512(1, x); LOAD_B_512(2, x); LOAD_B_512(3, x); - - MATMUL_512(0, 0); MATMUL_512(1, 0); MATMUL_512(2, 0); MATMUL_512(3, 0); - MATMUL_512(0, 1); MATMUL_512(1, 1); MATMUL_512(2, 1); MATMUL_512(3, 1); - MATMUL_512(0, 2); MATMUL_512(1, 2); MATMUL_512(2, 2); MATMUL_512(3, 2); - MATMUL_512(0, 3); MATMUL_512(1, 3); MATMUL_512(2, 3); MATMUL_512(3, 3); - } - STORE_512(0, 0); STORE_512(1, 0); STORE_512(2, 0); STORE_512(3, 0); - STORE_512(0, 1); STORE_512(1, 1); STORE_512(2, 1); STORE_512(3, 1); - STORE_512(0, 2); STORE_512(1, 2); STORE_512(2, 2); STORE_512(3, 2); - STORE_512(0, 3); STORE_512(1, 3); STORE_512(2, 3); STORE_512(3, 3); - } - - for (; j < n32; j+= 32) { - DECLARE_RESULT_512(0, 0); DECLARE_RESULT_512(1, 0); - DECLARE_RESULT_512(0, 1); DECLARE_RESULT_512(1, 1); - DECLARE_RESULT_512(0, 2); DECLARE_RESULT_512(1, 2); - DECLARE_RESULT_512(0, 3); DECLARE_RESULT_512(1, 3); - - for (k = 0; k < K; k++) { - BROADCAST_LOAD_A_512(x, 0); - BROADCAST_LOAD_A_512(x, 1); - BROADCAST_LOAD_A_512(x, 2); - BROADCAST_LOAD_A_512(x, 3); - - LOAD_B_512(0, x); LOAD_B_512(1, x); - - MATMUL_512(0, 0); MATMUL_512(1, 0); - MATMUL_512(0, 1); MATMUL_512(1, 1); - MATMUL_512(0, 2); MATMUL_512(1, 2); - MATMUL_512(0, 3); MATMUL_512(1, 3); - } - STORE_512(0, 0); STORE_512(1, 0); - STORE_512(0, 1); STORE_512(1, 1); - STORE_512(0, 2); STORE_512(1, 2); - STORE_512(0, 3); STORE_512(1, 3); - } - - for (; j < n16; j+= 16) { - DECLARE_RESULT_512(0, 0); - DECLARE_RESULT_512(0, 1); - DECLARE_RESULT_512(0, 2); - DECLARE_RESULT_512(0, 3); - - for (k = 0; k < K; k++) { - BROADCAST_LOAD_A_512(x, 0); - BROADCAST_LOAD_A_512(x, 1); - BROADCAST_LOAD_A_512(x, 2); - BROADCAST_LOAD_A_512(x, 3); - - LOAD_B_512(0, x); - - MATMUL_512(0, 0); - MATMUL_512(0, 1); - MATMUL_512(0, 2); - MATMUL_512(0, 3); - } - STORE_512(0, 0); - STORE_512(0, 1); - STORE_512(0, 2); - STORE_512(0, 3); - } - - for (; j < n8; j+= 8) { - DECLARE_RESULT_256(0, 0); - DECLARE_RESULT_256(0, 1); - DECLARE_RESULT_256(0, 2); - DECLARE_RESULT_256(0, 3); - - for (k = 0; k < K; k++) { - BROADCAST_LOAD_A_256(x, 0); - BROADCAST_LOAD_A_256(x, 1); - BROADCAST_LOAD_A_256(x, 2); - BROADCAST_LOAD_A_256(x, 3); - - LOAD_B_256(0, x); - - MATMUL_256(0, 0); - MATMUL_256(0, 1); - MATMUL_256(0, 2); - MATMUL_256(0, 3); - } - STORE_256(0, 0); - STORE_256(0, 1); - STORE_256(0, 2); - STORE_256(0, 3); - } - - for (; j < n4; j+= 4) { - DECLARE_RESULT_128(0, 0); - DECLARE_RESULT_128(0, 1); - DECLARE_RESULT_128(0, 2); - DECLARE_RESULT_128(0, 3); - - for (k = 0; k < K; k++) { - BROADCAST_LOAD_A_128(x, 0); - BROADCAST_LOAD_A_128(x, 1); - BROADCAST_LOAD_A_128(x, 2); - BROADCAST_LOAD_A_128(x, 3); - - LOAD_B_128(0, x); - - MATMUL_128(0, 0); - MATMUL_128(0, 1); - MATMUL_128(0, 2); - MATMUL_128(0, 3); - } - STORE_128(0, 0); - STORE_128(0, 1); - STORE_128(0, 2); - STORE_128(0, 3); - } - - for (; j < n2; j+= 2) { - DECLARE_RESULT_SCALAR(0, 0); DECLARE_RESULT_SCALAR(1, 0); - DECLARE_RESULT_SCALAR(0, 1); DECLARE_RESULT_SCALAR(1, 1); - DECLARE_RESULT_SCALAR(0, 2); DECLARE_RESULT_SCALAR(1, 2); - DECLARE_RESULT_SCALAR(0, 3); DECLARE_RESULT_SCALAR(1, 3); - - for (k = 0; k < K; k++) { - BROADCAST_LOAD_A_SCALAR(x, 0); - BROADCAST_LOAD_A_SCALAR(x, 1); - BROADCAST_LOAD_A_SCALAR(x, 2); - BROADCAST_LOAD_A_SCALAR(x, 3); - - LOAD_B_SCALAR(0, x); LOAD_B_SCALAR(1, x); - - MATMUL_SCALAR(0, 0); MATMUL_SCALAR(1, 0); - MATMUL_SCALAR(0, 1); MATMUL_SCALAR(1, 1); - MATMUL_SCALAR(0, 2); MATMUL_SCALAR(1, 2); - MATMUL_SCALAR(0, 3); MATMUL_SCALAR(1, 3); - } - STORE_SCALAR(0, 0); STORE_SCALAR(1, 0); - STORE_SCALAR(0, 1); STORE_SCALAR(1, 1); - STORE_SCALAR(0, 2); STORE_SCALAR(1, 2); - STORE_SCALAR(0, 3); STORE_SCALAR(1, 3); - } - - for (; j < N; j++) { - DECLARE_RESULT_SCALAR(0, 0) - DECLARE_RESULT_SCALAR(0, 1) - DECLARE_RESULT_SCALAR(0, 2) - DECLARE_RESULT_SCALAR(0, 3) - - for (k = 0; k < K; k++) { - BROADCAST_LOAD_A_SCALAR(0, 0); - BROADCAST_LOAD_A_SCALAR(0, 1); - BROADCAST_LOAD_A_SCALAR(0, 2); - BROADCAST_LOAD_A_SCALAR(0, 3); - - LOAD_B_SCALAR(0, 0); - - MATMUL_SCALAR(0, 0); - MATMUL_SCALAR(0, 1); - MATMUL_SCALAR(0, 2); - MATMUL_SCALAR(0, 3); - } - STORE_SCALAR(0, 0); - STORE_SCALAR(0, 1); - STORE_SCALAR(0, 2); - STORE_SCALAR(0, 3); - } - } - - for (; i < m2; i+=2) { - j = 0; - - for (; j < n64; j+= 64) { - DECLARE_RESULT_512(0, 0); DECLARE_RESULT_512(1, 0); DECLARE_RESULT_512(2, 0); DECLARE_RESULT_512(3, 0); - DECLARE_RESULT_512(0, 1); DECLARE_RESULT_512(1, 1); DECLARE_RESULT_512(2, 1); DECLARE_RESULT_512(3, 1); - - - for (k = 0; k < K; k++) { - BROADCAST_LOAD_A_512(x, 0); - BROADCAST_LOAD_A_512(x, 1); - - LOAD_B_512(0, x); LOAD_B_512(1, x); LOAD_B_512(2, x); LOAD_B_512(3, x); - - MATMUL_512(0, 0); MATMUL_512(1, 0); MATMUL_512(2, 0); MATMUL_512(3, 0); - MATMUL_512(0, 1); MATMUL_512(1, 1); MATMUL_512(2, 1); MATMUL_512(3, 1); - } - STORE_512(0, 0); STORE_512(1, 0); STORE_512(2, 0); STORE_512(3, 0); - STORE_512(0, 1); STORE_512(1, 1); STORE_512(2, 1); STORE_512(3, 1); - } - - for (; j < n32; j+= 32) { - DECLARE_RESULT_512(0, 0); DECLARE_RESULT_512(1, 0); - DECLARE_RESULT_512(0, 1); DECLARE_RESULT_512(1, 1); - - for (k = 0; k < K; k++) { - BROADCAST_LOAD_A_512(x, 0); - BROADCAST_LOAD_A_512(x, 1); - - LOAD_B_512(0, x); LOAD_B_512(1, x); - - MATMUL_512(0, 0); MATMUL_512(1, 0); - MATMUL_512(0, 1); MATMUL_512(1, 1); - } - STORE_512(0, 0); STORE_512(1, 0); - STORE_512(0, 1); STORE_512(1, 1); - } - - - for (; j < n16; j+= 16) { - DECLARE_RESULT_512(0, 0); - DECLARE_RESULT_512(0, 1); - - for (k = 0; k < K; k++) { - BROADCAST_LOAD_A_512(x, 0); - BROADCAST_LOAD_A_512(x, 1); - - LOAD_B_512(0, x); - - MATMUL_512(0, 0); - MATMUL_512(0, 1); - } - STORE_512(0, 0); - STORE_512(0, 1); - } - - for (; j < n8; j+= 8) { - DECLARE_RESULT_256(0, 0); - DECLARE_RESULT_256(0, 1); - - for (k = 0; k < K; k++) { - BROADCAST_LOAD_A_256(x, 0); - BROADCAST_LOAD_A_256(x, 1); - - LOAD_B_256(0, x); - - MATMUL_256(0, 0); - MATMUL_256(0, 1); - } - STORE_256(0, 0); - STORE_256(0, 1); - } - - for (; j < n4; j+= 4) { - DECLARE_RESULT_128(0, 0); - DECLARE_RESULT_128(0, 1); - - for (k = 0; k < K; k++) { - BROADCAST_LOAD_A_128(x, 0); - BROADCAST_LOAD_A_128(x, 1); - - LOAD_B_128(0, x); - - MATMUL_128(0, 0); - MATMUL_128(0, 1); - } - STORE_128(0, 0); - STORE_128(0, 1); - } - for (; j < n2; j+= 2) { - DECLARE_RESULT_SCALAR(0, 0); DECLARE_RESULT_SCALAR(1, 0); - DECLARE_RESULT_SCALAR(0, 1); DECLARE_RESULT_SCALAR(1, 1); - - for (k = 0; k < K; k++) { - BROADCAST_LOAD_A_SCALAR(x, 0); - BROADCAST_LOAD_A_SCALAR(x, 1); - - LOAD_B_SCALAR(0, x); LOAD_B_SCALAR(1, x); - - MATMUL_SCALAR(0, 0); MATMUL_SCALAR(1, 0); - MATMUL_SCALAR(0, 1); MATMUL_SCALAR(1, 1); - } - STORE_SCALAR(0, 0); STORE_SCALAR(1, 0); - STORE_SCALAR(0, 1); STORE_SCALAR(1, 1); - } - - for (; j < N; j++) { - DECLARE_RESULT_SCALAR(0, 0); - DECLARE_RESULT_SCALAR(0, 1); - - for (k = 0; k < K; k++) { - BROADCAST_LOAD_A_SCALAR(0, 0); - BROADCAST_LOAD_A_SCALAR(0, 1); - - LOAD_B_SCALAR(0, 0); - - MATMUL_SCALAR(0, 0); - MATMUL_SCALAR(0, 1); - } - STORE_SCALAR(0, 0); - STORE_SCALAR(0, 1); - } - } - - for (; i < M; i+=1) { - j = 0; - for (; j < n64; j+= 64) { - DECLARE_RESULT_512(0, 0); DECLARE_RESULT_512(1, 0); DECLARE_RESULT_512(2, 0); DECLARE_RESULT_512(3, 0); - - for (k = 0; k < K; k++) { - BROADCAST_LOAD_A_512(x, 0); - LOAD_B_512(0, x); LOAD_B_512(1, x); LOAD_B_512(2, x); LOAD_B_512(3, x); - MATMUL_512(0, 0); MATMUL_512(1, 0); MATMUL_512(2, 0); MATMUL_512(3, 0); - } - STORE_512(0, 0); STORE_512(1, 0); STORE_512(2, 0); STORE_512(3, 0); - } - for (; j < n32; j+= 32) { - DECLARE_RESULT_512(0, 0); DECLARE_RESULT_512(1, 0); - - for (k = 0; k < K; k++) { - BROADCAST_LOAD_A_512(x, 0); - LOAD_B_512(0, x); LOAD_B_512(1, x); - MATMUL_512(0, 0); MATMUL_512(1, 0); - } - STORE_512(0, 0); STORE_512(1, 0); - } - - - for (; j < n16; j+= 16) { - DECLARE_RESULT_512(0, 0); - - for (k = 0; k < K; k++) { - BROADCAST_LOAD_A_512(x, 0); - - LOAD_B_512(0, x); - - MATMUL_512(0, 0); - } - STORE_512(0, 0); - } - - for (; j < n8; j+= 8) { - DECLARE_RESULT_256(0, 0); - - for (k = 0; k < K; k++) { - BROADCAST_LOAD_A_256(x, 0); - LOAD_B_256(0, x); - MATMUL_256(0, 0); - } - STORE_256(0, 0); - } - - for (; j < n4; j+= 4) { - DECLARE_RESULT_128(0, 0); - - for (k = 0; k < K; k++) { - BROADCAST_LOAD_A_128(x, 0); - LOAD_B_128(0, x); - MATMUL_128(0, 0); - } - STORE_128(0, 0); - } - - for (; j < n2; j+= 2) { - DECLARE_RESULT_SCALAR(0, 0); DECLARE_RESULT_SCALAR(1, 0); - - for (k = 0; k < K; k++) { - BROADCAST_LOAD_A_SCALAR(x, 0); - LOAD_B_SCALAR(0, 0); LOAD_B_SCALAR(1, 0); - MATMUL_SCALAR(0, 0); MATMUL_SCALAR(1, 0); - } - STORE_SCALAR(0, 0); STORE_SCALAR(1, 0); - } - - for (; j < N; j++) { - DECLARE_RESULT_SCALAR(0, 0); - - for (k = 0; k < K; k++) { - BROADCAST_LOAD_A_SCALAR(0, 0); - LOAD_B_SCALAR(0, 0); - MATMUL_SCALAR(0, 0); - } - STORE_SCALAR(0, 0); - } - } -} \ No newline at end of file +#include "sgemm_direct_skylakex.c" diff --git a/kernel/x86_64/sgemm_kernel_16x4_skylakex_2.c b/kernel/x86_64/sgemm_kernel_16x4_skylakex_2.c new file mode 100644 index 000000000..ec7570179 --- /dev/null +++ b/kernel/x86_64/sgemm_kernel_16x4_skylakex_2.c @@ -0,0 +1,680 @@ +/* %0 = "+r"(a_pointer), %1 = "+r"(b_pointer), %2 = "+r"(c_pointer), %3 = "+r"(ldc_in_bytes), %4 for k_count, %5 for c_store, %6 = b_pref */ +/* r10 to assist prefetch, r11 = m_counter, r12 = k << 4(const), r13 = k_todo, r14 = b_head_pos(const), r15 = %1 + 3r12 */ + +#include "common.h" +#include +#if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + #define BACKWARDS 1 +#else + #define BACKWARDS 0 +#endif +#define REC_POINTER_1(ptr) "salq $2,%%r13; subq %%r13,"#ptr"; sarq $2,%%r13;" +#define REC_POINTER_2(ptr) "salq $3,%%r13; subq %%r13,"#ptr"; sarq $3,%%r13;" +#define REC_POINTER_4(ptr) "salq $4,%%r13; subq %%r13,"#ptr"; sarq $4,%%r13;" +#define REC_POINTER_8(ptr) "salq $5,%%r13; subq %%r13,"#ptr"; sarq $5,%%r13;" +#define REC_POINTER_16(ptr) "salq $6,%%r13; subq %%r13,"#ptr"; sarq $6,%%r13;" +#define INC_POINTER_1(ptr) "sarq $2,%%r12; addq %%r12,"#ptr"; salq $2,%%r12;" +#define INC_POINTER_2(ptr) "sarq $1,%%r12; addq %%r12,"#ptr"; salq $1,%%r12;" +#define INC_POINTER_4(ptr) "addq %%r12,"#ptr";" +#define INC_POINTER_8(ptr) "leaq ("#ptr",%%r12,2),"#ptr";" +#define INC_POINTER_16(ptr) "leaq ("#ptr",%%r12,4),"#ptr";" +#define SET_POINTER(ptr,dim) REC_POINTER_##dim(ptr) INC_POINTER_##dim(ptr) +#define SET_PB_1 SET_POINTER(%1,1) +#define SET_PB_2 SET_POINTER(%1,2) +#define SET_PB_4 SET_POINTER(%1,4) +#define SET_PB_8 SET_POINTER(%1,4) +#define SET_PB_12 SET_POINTER(%1,4) +#define SET_PB_16 SET_POINTER(%1,4) +#define SET_PB_20 SET_POINTER(%1,4) +#define SET_PB_24 SET_POINTER(%1,4) +#ifdef TRMMKERNEL + #if BACKWARDS == 1 + #define START_SET_PAPB(mdim,ndim) SET_POINTER(%0,mdim) "movq %%r14,%1;" SET_PB_##ndim "leaq (%1,%%r12,2),%%r15; addq %%r12,%%r15;" + #define END_SET_PA(mdim) "" + #else + #define START_SET_PAPB(mdim,ndim) "movq %%r14,%1; leaq (%1,%%r12,2),%%r15; addq %%r12,%%r15;" + #define END_SET_PA(mdim) SET_POINTER(%0,mdim) + #endif +#else + #define START_SET_PAPB(mdim,ndim) "movq %%r14,%1; leaq (%1,%%r12,2),%%r15; addq %%r12,%%r15;" + #define END_SET_PA(mdim) "" +#endif +#define RECOVER_PA(mdim) REC_POINTER_##mdim(%0) + +#if defined(TRMMKERNEL) && !defined(LEFT) + #if BACKWARDS == 1 + #define KERNEL_HEAD_C_n8(mdim) \ + KERNEL_k1m##mdim##n4 KERNEL_k1m##mdim##n4 KERNEL_k1m##mdim##n4 KERNEL_k1m##mdim##n4 "subq $4,%4; addq $64,%%r15;" + #define KERNEL_HEAD_C_n12(mdim) KERNEL_HEAD_C_n8(mdim)\ + KERNEL_k1m##mdim##n8 KERNEL_k1m##mdim##n8 KERNEL_k1m##mdim##n8 KERNEL_k1m##mdim##n8 "subq $4,%4; addq $64,%%r15;" + #define KERNEL_HEAD_C_n16(mdim) KERNEL_HEAD_C_n12(mdim)\ + KERNEL_k1m##mdim##n12 KERNEL_k1m##mdim##n12 KERNEL_k1m##mdim##n12 KERNEL_k1m##mdim##n12 "subq $4,%4; addq $64,%%r15;" + #define KERNEL_HEAD_C_n20(mdim) KERNEL_HEAD_C_n16(mdim)\ + KERNEL_k1m##mdim##n16 KERNEL_k1m##mdim##n16 KERNEL_k1m##mdim##n16 KERNEL_k1m##mdim##n16 "subq $4,%4;" + #define KERNEL_HEAD_C_n24(mdim) KERNEL_HEAD_C_n20(mdim)\ + KERNEL_k1m##mdim##n20 KERNEL_k1m##mdim##n20 KERNEL_k1m##mdim##n20 KERNEL_k1m##mdim##n20 "subq $4,%4;" + #define KERNEL_HEAD_R_n4(mdim) "subq $12,%4; addq $64,%%r15; addq $"#mdim"*48,%0;" + #define KERNEL_HEAD_R_n8(mdim) KERNEL_HEAD_R_n4(mdim)\ + kernel_k1m##mdim##n4(%%r15) kernel_k1m##mdim##n4(%%r15) kernel_k1m##mdim##n4(%%r15) kernel_k1m##mdim##n4(%%r15) "subq $4,%4;" + #define KERNEL_HEAD_R_n12(mdim) KERNEL_HEAD_R_n8(mdim)\ + kernel_k1m##mdim##n8(%%r15) kernel_k1m##mdim##n8(%%r15) kernel_k1m##mdim##n8(%%r15) kernel_k1m##mdim##n8(%%r15) "subq $4,%4;" + #define KERNEL_TAIL_C_n8(mdim) "" + #define KERNEL_TAIL_C_n12(mdim) "" + #define KERNEL_TAIL_C_n16(mdim) "" + #define KERNEL_TAIL_C_n20(mdim) "" + #define KERNEL_TAIL_C_n24(mdim) "" + #define KERNEL_TAIL_R_n4(mdim) "" + #define KERNEL_TAIL_R_n8(mdim) "" + #define KERNEL_TAIL_R_n12(mdim) "" + #else + #define KERNEL_HEAD_C_n8(mdim) "" + #define KERNEL_HEAD_C_n12(mdim) "" + #define KERNEL_HEAD_C_n16(mdim) "" + #define KERNEL_HEAD_C_n20(mdim) "" + #define KERNEL_HEAD_C_n24(mdim) "" + #define KERNEL_HEAD_R_n4(mdim) "" + #define KERNEL_HEAD_R_n8(mdim) "" + #define KERNEL_HEAD_R_n12(mdim) "" + #define end_kernel_k4_ncx1(k_0,k_1,k_2,k_3,n1,mdim) \ + end_load_a_k1m##mdim(k_0) end_acc_nc##n1##_k1m##mdim(k_0)\ + end_load_a_k1m##mdim(k_1) end_acc_nc##n1##_k1m##mdim(k_1)\ + end_load_a_k1m##mdim(k_2) end_acc_nc##n1##_k1m##mdim(k_2)\ + end_load_a_k1m##mdim(k_3) end_acc_nc##n1##_k1m##mdim(k_3) + #define end_kernel_k4_ncx2(k_0,k_1,k_2,k_3,n1,n2,mdim) \ + end_load_a_k1m##mdim(k_0) end_acc_nc##n1##_k1m##mdim(k_0) end_acc_nc##n2##_k1m##mdim(k_0)\ + end_load_a_k1m##mdim(k_1) end_acc_nc##n1##_k1m##mdim(k_1) end_acc_nc##n2##_k1m##mdim(k_1)\ + end_load_a_k1m##mdim(k_2) end_acc_nc##n1##_k1m##mdim(k_2) end_acc_nc##n2##_k1m##mdim(k_2)\ + end_load_a_k1m##mdim(k_3) end_acc_nc##n1##_k1m##mdim(k_3) end_acc_nc##n2##_k1m##mdim(k_3) + #define end_kernel_k4_ncx3(k_0,k_1,k_2,k_3,n1,n2,n3,mdim) \ + end_load_a_k1m##mdim(k_0) end_acc_nc##n1##_k1m##mdim(k_0) end_acc_nc##n2##_k1m##mdim(k_0) end_acc_nc##n3##_k1m##mdim(k_0)\ + end_load_a_k1m##mdim(k_1) end_acc_nc##n1##_k1m##mdim(k_1) end_acc_nc##n2##_k1m##mdim(k_1) end_acc_nc##n3##_k1m##mdim(k_1)\ + end_load_a_k1m##mdim(k_2) end_acc_nc##n1##_k1m##mdim(k_2) end_acc_nc##n2##_k1m##mdim(k_2) end_acc_nc##n3##_k1m##mdim(k_2)\ + end_load_a_k1m##mdim(k_3) end_acc_nc##n1##_k1m##mdim(k_3) end_acc_nc##n2##_k1m##mdim(k_3) end_acc_nc##n3##_k1m##mdim(k_3) + #define end_kernel_k4_ncx4(k_0,k_1,k_2,k_3,n1,n2,n3,n4,mdim) \ + end_load_a_k1m##mdim(k_0) end_acc_nc##n1##_k1m##mdim(k_0) end_acc_nc##n2##_k1m##mdim(k_0) end_acc_nc##n3##_k1m##mdim(k_0) end_acc_nc##n4##_k1m##mdim(k_0)\ + end_load_a_k1m##mdim(k_1) end_acc_nc##n1##_k1m##mdim(k_1) end_acc_nc##n2##_k1m##mdim(k_1) end_acc_nc##n3##_k1m##mdim(k_1) end_acc_nc##n4##_k1m##mdim(k_1)\ + end_load_a_k1m##mdim(k_2) end_acc_nc##n1##_k1m##mdim(k_2) end_acc_nc##n2##_k1m##mdim(k_2) end_acc_nc##n3##_k1m##mdim(k_2) end_acc_nc##n4##_k1m##mdim(k_2)\ + end_load_a_k1m##mdim(k_3) end_acc_nc##n1##_k1m##mdim(k_3) end_acc_nc##n2##_k1m##mdim(k_3) end_acc_nc##n3##_k1m##mdim(k_3) end_acc_nc##n4##_k1m##mdim(k_3) + #define end_kernel_k4_ncx5(k_0,k_1,k_2,k_3,n1,n2,n3,n4,n5,mdim) \ + end_load_a_k1m##mdim(k_0) end_acc_nc##n1##_k1m##mdim(k_0) end_acc_nc##n2##_k1m##mdim(k_0)\ + end_acc_nc##n3##_k1m##mdim(k_0) end_acc_nc##n4##_k1m##mdim(k_0) end_acc_nc##n5##_k1m##mdim(k_0)\ + end_load_a_k1m##mdim(k_1) end_acc_nc##n1##_k1m##mdim(k_1) end_acc_nc##n2##_k1m##mdim(k_1)\ + end_acc_nc##n3##_k1m##mdim(k_1) end_acc_nc##n4##_k1m##mdim(k_1) end_acc_nc##n5##_k1m##mdim(k_1)\ + end_load_a_k1m##mdim(k_2) end_acc_nc##n1##_k1m##mdim(k_2) end_acc_nc##n2##_k1m##mdim(k_2)\ + end_acc_nc##n3##_k1m##mdim(k_2) end_acc_nc##n4##_k1m##mdim(k_2) end_acc_nc##n5##_k1m##mdim(k_2)\ + end_load_a_k1m##mdim(k_3) end_acc_nc##n1##_k1m##mdim(k_3) end_acc_nc##n2##_k1m##mdim(k_3)\ + end_acc_nc##n3##_k1m##mdim(k_3) end_acc_nc##n4##_k1m##mdim(k_3) end_acc_nc##n5##_k1m##mdim(k_3) + #define KERNEL_TAIL_C_n8(mdim) end_kernel_k4_ncx1(0,1,2,3,2,mdim) + #define KERNEL_TAIL_C_n12(mdim) \ + end_kernel_k4_ncx2(0,1,2,3,2,3,mdim) end_kernel_k4_ncx1(4,5,6,7,3,mdim) + #define KERNEL_TAIL_C_n16(mdim) \ + end_kernel_k4_ncx3(0,1,2,3,2,3,4,mdim) end_kernel_k4_ncx2(4,5,6,7,3,4,mdim) end_kernel_k4_ncx1(8,9,10,11,4,mdim) + #define KERNEL_TAIL_C_n20(mdim) \ + end_kernel_k4_ncx4(0,1,2,3,2,3,4,5,mdim) end_kernel_k4_ncx3(4,5,6,7,3,4,5,mdim)\ + end_kernel_k4_ncx2(8,9,10,11,4,5,mdim) end_kernel_k4_ncx1(12,13,14,15,5,mdim) + #define KERNEL_TAIL_C_n24(mdim) \ + end_kernel_k4_ncx5(0,1,2,3,2,3,4,5,6,mdim) end_kernel_k4_ncx4(4,5,6,7,3,4,5,6,mdim) end_kernel_k4_ncx3(8,9,10,11,4,5,6,mdim)\ + end_kernel_k4_ncx2(12,13,14,15,5,6,mdim) end_kernel_k4_ncx1(16,17,18,19,6,mdim) + #define KERNEL_TAIL_R_n4(mdim) \ + end_kernel_k4_ncx1(0,1,2,3,4,mdim) end_kernel_k4_ncx1(4,5,6,7,4,mdim) end_kernel_k4_ncx1(8,9,10,11,4,mdim) + #define KERNEL_TAIL_R_n8(mdim) \ + end_kernel_k4_ncx2(0,1,2,3,4,5,mdim) end_kernel_k4_ncx2(4,5,6,7,4,5,mdim) end_kernel_k4_ncx2(8,9,10,11,4,5,mdim) end_kernel_k4_ncx1(12,13,14,15,5,mdim) + #define KERNEL_TAIL_R_n12(mdim) \ + end_kernel_k4_ncx3(0,1,2,3,4,5,6,mdim) end_kernel_k4_ncx3(4,5,6,7,4,5,6,mdim) end_kernel_k4_ncx3(8,9,10,11,4,5,6,mdim)\ + end_kernel_k4_ncx2(12,13,14,15,5,6,mdim) end_kernel_k4_ncx1(16,17,18,19,6,mdim) + #endif +#else + #define KERNEL_HEAD_C_n8(mdim) "" + #define KERNEL_HEAD_C_n12(mdim) "" + #define KERNEL_HEAD_C_n16(mdim) "" + #define KERNEL_HEAD_C_n20(mdim) "" + #define KERNEL_HEAD_C_n24(mdim) "" + #define KERNEL_HEAD_R_n4(mdim) "" + #define KERNEL_HEAD_R_n8(mdim) "" + #define KERNEL_HEAD_R_n12(mdim) "" + #define KERNEL_TAIL_C_n8(mdim) "" + #define KERNEL_TAIL_C_n12(mdim) "" + #define KERNEL_TAIL_C_n16(mdim) "" + #define KERNEL_TAIL_C_n20(mdim) "" + #define KERNEL_TAIL_C_n24(mdim) "" + #define KERNEL_TAIL_R_n4(mdim) "" + #define KERNEL_TAIL_R_n8(mdim) "" + #define KERNEL_TAIL_R_n12(mdim) "" +#endif +#define KERNEL_HEAD_C_n1(mdim) "" +#define KERNEL_HEAD_C_n2(mdim) "" +#define KERNEL_HEAD_C_n4(mdim) "" +#define KERNEL_TAIL_C_n1(mdim) "" +#define KERNEL_TAIL_C_n2(mdim) "" +#define KERNEL_TAIL_C_n4(mdim) "" + +/* m = 16 */ /* zmm8-zmm31 for accumulators, zmm1-zmm7 for temporary use, zmm0 for alpha */ +#define KERNEL_k1m16n1 \ + "vmovups (%0),%%zmm4; addq $64,%0;"\ + "vbroadcastss (%1),%%zmm6; vfmadd231ps %%zmm4,%%zmm6,%%zmm8;"\ + "addq $4,%1;" +#define KERNEL_h_k1m16n2 \ + "vmovsldup (%0),%%zmm4; vmovshdup (%0),%%zmm5; prefetcht0 512(%0); addq $64,%0;"\ + "vbroadcastsd (%1),%%zmm6; vfmadd231ps %%zmm4,%%zmm6,%%zmm8; vfmadd231ps %%zmm5,%%zmm6,%%zmm9;" +#define KERNEL_k1m16n2 KERNEL_h_k1m16n2 "addq $8,%1;" +#define KERNEL_h_k1m16n4 KERNEL_h_k1m16n2 "vbroadcastsd 8(%1),%%zmm7; vfmadd231ps %%zmm4,%%zmm7,%%zmm10; vfmadd231ps %%zmm5,%%zmm7,%%zmm11;" +#define KERNEL_k1m16n4 KERNEL_h_k1m16n4 "addq $16,%1;" +#define unit_gen_kernel_k1m16n4(c1,c2,c3,c4,k_no,...) \ + "vbroadcastsd "#k_no"*16 ("#__VA_ARGS__"),%%zmm6; vfmadd231ps %%zmm4,%%zmm6,"#c1"; vfmadd231ps %%zmm5,%%zmm6,"#c2";"\ + "vbroadcastsd "#k_no"*16+8("#__VA_ARGS__"),%%zmm7; vfmadd231ps %%zmm4,%%zmm7,"#c3"; vfmadd231ps %%zmm5,%%zmm7,"#c4";" +#define unit_kernel_k1m16n4(c1,c2,c3,c4, ...) unit_gen_kernel_k1m16n4(c1,c2,c3,c4,0,__VA_ARGS__) +#define KERNEL_h_k1m16n8 KERNEL_h_k1m16n4 unit_kernel_k1m16n4(%%zmm12,%%zmm13,%%zmm14,%%zmm15,%1,%%r12,1) +#define KERNEL_k1m16n8 KERNEL_h_k1m16n8 "addq $16,%1;" +#define KERNEL_h_k1m16n12 KERNEL_h_k1m16n8 unit_kernel_k1m16n4(%%zmm16,%%zmm17,%%zmm18,%%zmm19,%1,%%r12,2) +#define KERNEL_k1m16n12 KERNEL_h_k1m16n12 "addq $16,%1;" +#define KERNEL_h_k1m16n16 KERNEL_k1m16n12 unit_kernel_k1m16n4(%%zmm20,%%zmm21,%%zmm22,%%zmm23,%%r15) +#define KERNEL_k1m16n16 KERNEL_h_k1m16n16 "addq $16,%%r15;" +#define KERNEL_h_k1m16n20 KERNEL_h_k1m16n16 unit_kernel_k1m16n4(%%zmm24,%%zmm25,%%zmm26,%%zmm27,%%r15,%%r12,1) +#define KERNEL_k1m16n20 KERNEL_h_k1m16n20 "addq $16,%%r15;" +#define KERNEL_h_k1m16n24 KERNEL_h_k1m16n20 unit_kernel_k1m16n4(%%zmm28,%%zmm29,%%zmm30,%%zmm31,%%r15,%%r12,2) +#define KERNEL_k1m16n24 KERNEL_h_k1m16n24 "addq $16,%%r15;" +#define end_load_a_k1m16(k_no) "vmovsldup "#k_no"*64(%0),%%zmm4; vmovshdup "#k_no"*64(%0),%%zmm5;" +#define end_acc_nc2_k1m16(k_no) unit_gen_kernel_k1m16n4(%%zmm12,%%zmm13,%%zmm14,%%zmm15,k_no,%1,%%r12,1) +#define end_acc_nc3_k1m16(k_no) unit_gen_kernel_k1m16n4(%%zmm16,%%zmm17,%%zmm18,%%zmm19,k_no,%1,%%r12,2) +#define end_acc_nc4_k1m16(k_no) unit_gen_kernel_k1m16n4(%%zmm20,%%zmm21,%%zmm22,%%zmm23,k_no,%%r15) +#define end_acc_nc5_k1m16(k_no) unit_gen_kernel_k1m16n4(%%zmm24,%%zmm25,%%zmm26,%%zmm27,k_no,%%r15,%%r12,1) +#define end_acc_nc6_k1m16(k_no) unit_gen_kernel_k1m16n4(%%zmm28,%%zmm29,%%zmm30,%%zmm31,k_no,%%r15,%%r12,2) +#define INIT_m16n1 "vpxorq %%zmm8,%%zmm8,%%zmm8;" +#define INIT_m16n2 INIT_m16n1 "vpxorq %%zmm9,%%zmm9,%%zmm9;" +#define INIT_m16n4 INIT_m16n2 "vpxorq %%zmm10,%%zmm10,%%zmm10;vpxorq %%zmm11,%%zmm11,%%zmm11;" +#define unit_init_m16n4(c1,c2,c3,c4) \ + "vpxorq "#c1","#c1","#c1";vpxorq "#c2","#c2","#c2";vpxorq "#c3","#c3","#c3";vpxorq "#c4","#c4","#c4";" +#define INIT_m16n8 INIT_m16n4 unit_init_m16n4(%%zmm12,%%zmm13,%%zmm14,%%zmm15) +#define INIT_m16n12 INIT_m16n8 unit_init_m16n4(%%zmm16,%%zmm17,%%zmm18,%%zmm19) +#define INIT_m16n16 INIT_m16n12 unit_init_m16n4(%%zmm20,%%zmm21,%%zmm22,%%zmm23) +#define INIT_m16n20 INIT_m16n16 unit_init_m16n4(%%zmm24,%%zmm25,%%zmm26,%%zmm27) +#define INIT_m16n24 INIT_m16n20 unit_init_m16n4(%%zmm28,%%zmm29,%%zmm30,%%zmm31) +#ifdef TRMMKERNEL + #define SAVE_h_m16n1 "vmulps %%zmm8,%%zmm0,%%zmm8; vmovups %%zmm8,(%2);" + #define unit_save_m16n2(c1,c2) \ + "vunpcklps "#c2","#c1",%%zmm6; vunpckhps "#c2","#c1",%%zmm7; vunpcklpd %%zmm7,%%zmm6,%%zmm4; vunpckhpd %%zmm7,%%zmm6,%%zmm5;"\ + "vmulps %%zmm4,%%zmm0,%%zmm4; vmulps %%zmm5,%%zmm0,%%zmm5;"\ + "vmovups %%zmm4,(%5); vmovups %%zmm5,(%5,%3,1); leaq (%5,%3,2),%5;" +#else + #define SAVE_h_m16n1 "vfmadd213ps (%2),%%zmm0,%%zmm8; vmovups %%zmm8,(%2);" + #define unit_save_m16n2(c1,c2) \ + "vunpcklps "#c2","#c1",%%zmm6; vunpckhps "#c2","#c1",%%zmm7; vunpcklpd %%zmm7,%%zmm6,%%zmm4; vunpckhpd %%zmm7,%%zmm6,%%zmm5;"\ + "vfmadd213ps (%5),%%zmm0,%%zmm4; vfmadd213ps (%5,%3,1),%%zmm0,%%zmm5;"\ + "vmovups %%zmm4,(%5); vmovups %%zmm5,(%5,%3,1); leaq (%5,%3,2),%5;" +#endif +#define SAVE_h_m16n2 "movq %2,%5;" unit_save_m16n2(%%zmm8,%%zmm9) +#define SAVE_h_m16n4 SAVE_h_m16n2 unit_save_m16n2(%%zmm10,%%zmm11) +#define SAVE_h_m16n8 SAVE_h_m16n4 unit_save_m16n2(%%zmm12,%%zmm13) unit_save_m16n2(%%zmm14,%%zmm15) +#define SAVE_h_m16n12 SAVE_h_m16n8 unit_save_m16n2(%%zmm16,%%zmm17) unit_save_m16n2(%%zmm18,%%zmm19) +#define SAVE_h_m16n16 SAVE_h_m16n12 unit_save_m16n2(%%zmm20,%%zmm21) unit_save_m16n2(%%zmm22,%%zmm23) +#define SAVE_h_m16n20 SAVE_h_m16n16 unit_save_m16n2(%%zmm24,%%zmm25) unit_save_m16n2(%%zmm26,%%zmm27) +#define SAVE_h_m16n24 SAVE_h_m16n20 unit_save_m16n2(%%zmm28,%%zmm29) unit_save_m16n2(%%zmm30,%%zmm31) +#define SAVE_m16(ndim) SAVE_h_m16n##ndim "addq $64,%2;" +#define COMPUTE_m16(ndim) \ + INIT_m16n##ndim START_SET_PAPB(16,ndim)\ + "movq %%r13,%4; movq %2,%5; xorq %%r10,%%r10;"\ + KERNEL_HEAD_C_n##ndim(16)\ + "cmpq $16,%4; jb "#ndim"016162f;"\ + #ndim"016161:\n\t"\ + "cmpq $126,%%r10; movq $126,%%r10; cmoveq %3,%%r10;"\ + KERNEL_k1m16n##ndim\ + KERNEL_k1m16n##ndim\ + "prefetcht1 (%5); subq $63,%5; addq %%r10,%5;"\ + KERNEL_k1m16n##ndim\ + KERNEL_k1m16n##ndim\ + "prefetcht1 (%6); addq $32,%6;"\ + "subq $4,%4; cmpq $16,%4; jnb "#ndim"016161b;"\ + "movq %2,%5;"\ + #ndim"016162:\n\t"\ + "testq %4,%4; jz "#ndim"016164f;"\ + #ndim"016163:\n\t"\ + "prefetcht0 (%5); prefetcht0 63(%5); prefetcht0 (%5,%3,1); prefetcht0 63(%5,%3,1);"\ + KERNEL_k1m16n##ndim\ + "leaq (%5,%3,2),%5; decq %4; jnz "#ndim"016163b;"\ + #ndim"016164:\n\t"\ + KERNEL_TAIL_C_n##ndim(16)\ + "prefetcht0 (%%r14); prefetcht0 64(%%r14);"\ + SAVE_m16(ndim) END_SET_PA(16) + +/* m = 8 *//* ymm0 for alpha, ymm1-ymm3 for temporary use, ymm4-ymm15 for accumulators */ +#define kernel_k1m8n1(b_addr) \ + "vmovups (%0),%%ymm1; addq $32,%0;"\ + "vbroadcastss ("#b_addr"),%%ymm2; vfmadd231ps %%ymm1,%%ymm2,%%ymm4;"\ + "addq $4,"#b_addr";" +#define kernel_h_k1m8n2(b_addr) \ + "vmovsldup (%0),%%ymm1; vmovshdup (%0),%%ymm2; addq $32,%0;"\ + "vbroadcastsd ("#b_addr"),%%ymm3; vfmadd231ps %%ymm1,%%ymm3,%%ymm4; vfmadd231ps %%ymm2,%%ymm3,%%ymm5;" +#define kernel_k1m8n2(b_addr) kernel_h_k1m8n2(b_addr) "addq $8,"#b_addr";" +#define kernel_h_k1m8n4(b_addr) \ + kernel_h_k1m8n2(b_addr) "vbroadcastsd 8("#b_addr"),%%ymm3; vfmadd231ps %%ymm1,%%ymm3,%%ymm6; vfmadd231ps %%ymm2,%%ymm3,%%ymm7;" +#define kernel_k1m8n4(b_addr) kernel_h_k1m8n4(b_addr) "addq $16,"#b_addr";" +#define unit_gen_kernel_k1m8n4(c1,c2,c3,c4,k_no,...) \ + "vbroadcastsd "#k_no"*16 ("#__VA_ARGS__"),%%ymm3; vfmadd231ps %%ymm1,%%ymm3,"#c1"; vfmadd231ps %%ymm2,%%ymm3,"#c2";"\ + "vbroadcastsd "#k_no"*16+8("#__VA_ARGS__"),%%ymm3; vfmadd231ps %%ymm1,%%ymm3,"#c3"; vfmadd231ps %%ymm2,%%ymm3,"#c4";" +#define unit_kernel_k1m8n4(c1,c2,c3,c4,...) unit_gen_kernel_k1m8n4(c1,c2,c3,c4,0,__VA_ARGS__) +#define kernel_h_k1m8n8(b_addr) kernel_h_k1m8n4(b_addr) unit_kernel_k1m8n4(%%ymm8,%%ymm9,%%ymm10,%%ymm11,b_addr,%%r12,1) +#define kernel_k1m8n8(b_addr) kernel_h_k1m8n8(b_addr) "addq $16,"#b_addr";" +#define kernel_h_k1m8n12(b_addr) kernel_h_k1m8n8(b_addr) unit_kernel_k1m8n4(%%ymm12,%%ymm13,%%ymm14,%%ymm15,b_addr,%%r12,2) +#define kernel_k1m8n12(b_addr) kernel_h_k1m8n12(b_addr) "addq $16,"#b_addr";" +#define KERNEL_k1m8n1 kernel_k1m8n1(%1) +#define KERNEL_k1m8n2 kernel_k1m8n2(%1) +#define KERNEL_k1m8n4 kernel_k1m8n4(%1) +#define KERNEL_k1m8n8 kernel_k1m8n8(%1) +#define KERNEL_k1m8n12 kernel_k1m8n12(%1) +#define end_load_a_k1m8(k_no) "vmovsldup "#k_no"*32(%0),%%ymm1; vmovshdup "#k_no"*32(%0),%%ymm2;" +#define end_acc_nc2_k1m8(k_no) unit_gen_kernel_k1m8n4(%%ymm8,%%ymm9,%%ymm10,%%ymm11,k_no,%1,%%r12,1) +#define end_acc_nc3_k1m8(k_no) unit_gen_kernel_k1m8n4(%%ymm12,%%ymm13,%%ymm14,%%ymm15,k_no,%1,%%r12,2) +#define end_acc_nc4_k1m8(k_no) unit_gen_kernel_k1m8n4(%%ymm4,%%ymm5,%%ymm6,%%ymm7,k_no,%%r15) +#define end_acc_nc5_k1m8(k_no) unit_gen_kernel_k1m8n4(%%ymm8,%%ymm9,%%ymm10,%%ymm11,k_no,%%r15,%%r12,1) +#define end_acc_nc6_k1m8(k_no) unit_gen_kernel_k1m8n4(%%ymm12,%%ymm13,%%ymm14,%%ymm15,k_no,%%r15,%%r12,2) +#define INIT_m8n1 "vpxor %%ymm4,%%ymm4,%%ymm4;" +#define INIT_m8n2 INIT_m8n1 "vpxor %%ymm5,%%ymm5,%%ymm5;" +#define INIT_m8n4 INIT_m8n2 "vpxor %%ymm6,%%ymm6,%%ymm6;vpxor %%ymm7,%%ymm7,%%ymm7;" +#define unit_init_m8n4(c1,c2,c3,c4) \ + "vpxor "#c1","#c1","#c1";vpxor "#c2","#c2","#c2";vpxor "#c3","#c3","#c3";vpxor "#c4","#c4","#c4";" +#define INIT_m8n8 INIT_m8n4 unit_init_m8n4(%%ymm8,%%ymm9,%%ymm10,%%ymm11) +#define INIT_m8n12 INIT_m8n8 unit_init_m8n4(%%ymm12,%%ymm13,%%ymm14,%%ymm15) +#ifdef TRMMKERNEL + #define SAVE_L_m8n1 "vmulps %%ymm4,%%ymm0,%%ymm4; vmovups %%ymm4,(%2);" + #define unit_save_m8n2(c1,c2) \ + "vunpcklps "#c2","#c1",%%ymm2; vunpckhps "#c2","#c1",%%ymm3;"\ + "vunpcklpd %%ymm3,%%ymm2,%%ymm1; vmulps %%ymm1,%%ymm0,%%ymm1; vmovups %%ymm1,(%5);"\ + "vunpckhpd %%ymm3,%%ymm2,%%ymm1; vmulps %%ymm1,%%ymm0,%%ymm1; vmovups %%ymm1,(%5,%3,1);"\ + "leaq (%5,%3,2),%5;" +#else + #define SAVE_L_m8n1 "vfmadd213ps (%2),%%ymm0,%%ymm4; vmovups %%ymm4,(%2);" + #define unit_save_m8n2(c1,c2) \ + "vunpcklps "#c2","#c1",%%ymm2; vunpckhps "#c2","#c1",%%ymm3;"\ + "vunpcklpd %%ymm3,%%ymm2,%%ymm1; vfmadd213ps (%5), %%ymm0,%%ymm1; vmovups %%ymm1,(%5);"\ + "vunpckhpd %%ymm3,%%ymm2,%%ymm1; vfmadd213ps (%5,%3,1),%%ymm0,%%ymm1; vmovups %%ymm1,(%5,%3,1);"\ + "leaq (%5,%3,2),%5;" +#endif +#define SAVE_L_m8n2 "movq %2,%5;" unit_save_m8n2(%%ymm4,%%ymm5) +#define SAVE_L_m8n4 SAVE_L_m8n2 unit_save_m8n2(%%ymm6,%%ymm7) +#define SAVE_L_m8n8 SAVE_L_m8n4 unit_save_m8n2(%%ymm8,%%ymm9) unit_save_m8n2(%%ymm10,%%ymm11) +#define SAVE_L_m8n12 SAVE_L_m8n8 unit_save_m8n2(%%ymm12,%%ymm13) unit_save_m8n2(%%ymm14,%%ymm15) +#define SAVE_R_m8n4 unit_save_m8n2(%%ymm4,%%ymm5) unit_save_m8n2(%%ymm6,%%ymm7) +#define SAVE_R_m8n8 SAVE_R_m8n4 unit_save_m8n2(%%ymm8,%%ymm9) unit_save_m8n2(%%ymm10,%%ymm11) +#define SAVE_R_m8n12 SAVE_R_m8n8 unit_save_m8n2(%%ymm12,%%ymm13) unit_save_m8n2(%%ymm14,%%ymm15) +#define COMPUTE_L_m8(ndim,sim) \ + INIT_m8n##ndim START_SET_PAPB(8,ndim)\ + "movq %%r13,%4;"\ + KERNEL_HEAD_C_n##ndim(8)\ + "testq %4,%4; jz "#ndim""#sim"883f;"\ + #ndim""#sim"882:\n\t"\ + kernel_k1m8n##ndim(%1)\ + "decq %4; jnz "#ndim""#sim"882b;"\ + #ndim""#sim"883:\n\t"\ + KERNEL_TAIL_C_n##ndim(8)\ + SAVE_L_m8n##ndim "addq $32,%2;" +#define COMPUTE_R_m8(ndim,sim) \ + INIT_m8n##ndim RECOVER_PA(8)\ + "movq %%r13,%4;"\ + KERNEL_HEAD_R_n##ndim(8)\ + "testq %4,%4; jz "#ndim""#sim"883f;"\ + #ndim""#sim"882:\n\t"\ + kernel_k1m8n##ndim(%%r15)\ + "decq %4; jnz "#ndim""#sim"882b;"\ + #ndim""#sim"883:\n\t"\ + KERNEL_TAIL_R_n##ndim(8)\ + SAVE_R_m8n##ndim END_SET_PA(8) +#define COMPUTE_m8_n1 COMPUTE_L_m8(1,33833) END_SET_PA(8) +#define COMPUTE_m8_n2 COMPUTE_L_m8(2,33833) END_SET_PA(8) +#define COMPUTE_m8_n4 COMPUTE_L_m8(4,33833) END_SET_PA(8) +#define COMPUTE_m8_n8 COMPUTE_L_m8(8,33833) END_SET_PA(8) +#define COMPUTE_m8_n12 COMPUTE_L_m8(12,33833) END_SET_PA(8) +#define COMPUTE_m8_n16 COMPUTE_L_m8(12,33733) COMPUTE_R_m8(4,33933) +#define COMPUTE_m8_n20 COMPUTE_L_m8(12,33633) COMPUTE_R_m8(8,33933) +#define COMPUTE_m8_n24 COMPUTE_L_m8(12,33533) COMPUTE_R_m8(12,33933) +#define COMPUTE_m8(ndim) COMPUTE_m8_n##ndim + +/* m = 4 *//* xmm0 for alpha, xmm1-xmm3 for temporary use, xmm4-xmm15 for accumulators */ +#define kernel_k1m4n1(b_addr) \ + "vmovups (%0),%%xmm1; addq $16,%0;"\ + "vbroadcastss ("#b_addr"),%%xmm2; vfmadd231ps %%xmm1,%%xmm2,%%xmm4;"\ + "addq $4,"#b_addr";" +#define kernel_h_k1m4n2(b_addr) \ + "vmovsldup (%0),%%xmm1; vmovshdup (%0),%%xmm2; addq $16,%0;"\ + "vmovddup ("#b_addr"),%%xmm3; vfmadd231ps %%xmm1,%%xmm3,%%xmm4; vfmadd231ps %%xmm2,%%xmm3,%%xmm5;" +#define kernel_k1m4n2(b_addr) kernel_h_k1m4n2(b_addr) "addq $8,"#b_addr";" +#define kernel_h_k1m4n4(b_addr) \ + kernel_h_k1m4n2(b_addr) "vmovddup 8("#b_addr"),%%xmm3; vfmadd231ps %%xmm1,%%xmm3,%%xmm6; vfmadd231ps %%xmm2,%%xmm3,%%xmm7;" +#define kernel_k1m4n4(b_addr) kernel_h_k1m4n4(b_addr) "addq $16,"#b_addr";" +#define unit_gen_kernel_k1m4n4(c1,c2,c3,c4,k_no,...) \ + "vmovddup "#k_no"*16 ("#__VA_ARGS__"),%%xmm3; vfmadd231ps %%xmm1,%%xmm3,"#c1"; vfmadd231ps %%xmm2,%%xmm3,"#c2";"\ + "vmovddup "#k_no"*16+8("#__VA_ARGS__"),%%xmm3; vfmadd231ps %%xmm1,%%xmm3,"#c3"; vfmadd231ps %%xmm2,%%xmm3,"#c4";" +#define unit_kernel_k1m4n4(c1,c2,c3,c4,...) unit_gen_kernel_k1m4n4(c1,c2,c3,c4,0,__VA_ARGS__) +#define kernel_h_k1m4n8(b_addr) kernel_h_k1m4n4(b_addr) unit_kernel_k1m4n4(%%xmm8,%%xmm9,%%xmm10,%%xmm11,b_addr,%%r12,1) +#define kernel_k1m4n8(b_addr) kernel_h_k1m4n8(b_addr) "addq $16,"#b_addr";" +#define kernel_h_k1m4n12(b_addr) kernel_h_k1m4n8(b_addr) unit_kernel_k1m4n4(%%xmm12,%%xmm13,%%xmm14,%%xmm15,b_addr,%%r12,2) +#define kernel_k1m4n12(b_addr) kernel_h_k1m4n12(b_addr) "addq $16,"#b_addr";" +#define KERNEL_k1m4n1 kernel_k1m4n1(%1) +#define KERNEL_k1m4n2 kernel_k1m4n2(%1) +#define KERNEL_k1m4n4 kernel_k1m4n4(%1) +#define KERNEL_k1m4n8 kernel_k1m4n8(%1) +#define KERNEL_k1m4n12 kernel_k1m4n12(%1) +#define end_load_a_k1m4(k_no) "vmovsldup "#k_no"*16(%0),%%xmm1; vmovshdup "#k_no"*16(%0),%%xmm2;" +#define end_acc_nc2_k1m4(k_no) unit_gen_kernel_k1m4n4(%%xmm8,%%xmm9,%%xmm10,%%xmm11,k_no,%1,%%r12,1) +#define end_acc_nc3_k1m4(k_no) unit_gen_kernel_k1m4n4(%%xmm12,%%xmm13,%%xmm14,%%xmm15,k_no,%1,%%r12,2) +#define end_acc_nc4_k1m4(k_no) unit_gen_kernel_k1m4n4(%%xmm4,%%xmm5,%%xmm6,%%xmm7,k_no,%%r15) +#define end_acc_nc5_k1m4(k_no) unit_gen_kernel_k1m4n4(%%xmm8,%%xmm9,%%xmm10,%%xmm11,k_no,%%r15,%%r12,1) +#define end_acc_nc6_k1m4(k_no) unit_gen_kernel_k1m4n4(%%xmm12,%%xmm13,%%xmm14,%%xmm15,k_no,%%r15,%%r12,2) +#define INIT_m4n1 "vpxor %%xmm4,%%xmm4,%%xmm4;" +#define INIT_m4n2 INIT_m4n1 "vpxor %%xmm5,%%xmm5,%%xmm5;" +#define INIT_m4n4 INIT_m4n2 "vpxor %%xmm6,%%xmm6,%%xmm6;vpxor %%xmm7,%%xmm7,%%xmm7;" +#define unit_init_m4n4(c1,c2,c3,c4) \ + "vpxor "#c1","#c1","#c1";vpxor "#c2","#c2","#c2";vpxor "#c3","#c3","#c3";vpxor "#c4","#c4","#c4";" +#define INIT_m4n8 INIT_m4n4 unit_init_m4n4(%%xmm8,%%xmm9,%%xmm10,%%xmm11) +#define INIT_m4n12 INIT_m4n8 unit_init_m4n4(%%xmm12,%%xmm13,%%xmm14,%%xmm15) +#ifdef TRMMKERNEL + #define SAVE_L_m4n1 "vmulps %%xmm4,%%xmm0,%%xmm4; vmovups %%xmm4,(%2);" + #define unit_save_m4n2(c1,c2) \ + "vunpcklps "#c2","#c1",%%xmm2; vunpckhps "#c2","#c1",%%xmm3;"\ + "vunpcklpd %%xmm3,%%xmm2,%%xmm1;vmulps %%xmm1,%%xmm0,%%xmm1;vmovups %%xmm1,(%5);"\ + "vunpckhpd %%xmm3,%%xmm2,%%xmm1;vmulps %%xmm1,%%xmm0,%%xmm1;vmovups %%xmm1,(%5,%3,1);"\ + "leaq (%5,%3,2),%5;" +#else + #define SAVE_L_m4n1 "vfmadd213ps (%2),%%xmm0,%%xmm4; vmovups %%xmm4,(%2);" + #define unit_save_m4n2(c1,c2) \ + "vunpcklps "#c2","#c1",%%xmm2; vunpckhps "#c2","#c1",%%xmm3;"\ + "vunpcklpd %%xmm3,%%xmm2,%%xmm1;vfmadd213ps (%5), %%xmm0,%%xmm1;vmovups %%xmm1,(%5);"\ + "vunpckhpd %%xmm3,%%xmm2,%%xmm1;vfmadd213ps (%5,%3,1),%%xmm0,%%xmm1;vmovups %%xmm1,(%5,%3,1);"\ + "leaq (%5,%3,2),%5;" +#endif +#define SAVE_L_m4n2 "movq %2,%5;" unit_save_m4n2(%%xmm4,%%xmm5) +#define SAVE_L_m4n4 SAVE_L_m4n2 unit_save_m4n2(%%xmm6,%%xmm7) +#define SAVE_L_m4n8 SAVE_L_m4n4 unit_save_m4n2(%%xmm8,%%xmm9) unit_save_m4n2(%%xmm10,%%xmm11) +#define SAVE_L_m4n12 SAVE_L_m4n8 unit_save_m4n2(%%xmm12,%%xmm13) unit_save_m4n2(%%xmm14,%%xmm15) +#define SAVE_R_m4n4 unit_save_m4n2(%%xmm4,%%xmm5) unit_save_m4n2(%%xmm6,%%xmm7) +#define SAVE_R_m4n8 SAVE_R_m4n4 unit_save_m4n2(%%xmm8,%%xmm9) unit_save_m4n2(%%xmm10,%%xmm11) +#define SAVE_R_m4n12 SAVE_R_m4n8 unit_save_m4n2(%%xmm12,%%xmm13) unit_save_m4n2(%%xmm14,%%xmm15) +#define COMPUTE_L_m4(ndim,sim) \ + INIT_m4n##ndim START_SET_PAPB(4,ndim)\ + "movq %%r13,%4;"\ + KERNEL_HEAD_C_n##ndim(4)\ + "testq %4,%4; jz "#ndim""#sim"443f;"\ + #ndim""#sim"442:\n\t"\ + kernel_k1m4n##ndim(%1)\ + "decq %4; jnz "#ndim""#sim"442b;"\ + #ndim""#sim"443:\n\t"\ + KERNEL_TAIL_C_n##ndim(4)\ + SAVE_L_m4n##ndim "addq $16,%2;" +#define COMPUTE_R_m4(ndim,sim) \ + INIT_m4n##ndim RECOVER_PA(4)\ + "movq %%r13,%4;"\ + KERNEL_HEAD_R_n##ndim(4)\ + "testq %4,%4; jz "#ndim""#sim"443f;"\ + #ndim""#sim"442:\n\t"\ + kernel_k1m4n##ndim(%%r15)\ + "decq %4; jnz "#ndim""#sim"442b;"\ + #ndim""#sim"443:\n\t"\ + KERNEL_TAIL_R_n##ndim(4)\ + SAVE_R_m4n##ndim END_SET_PA(4) +#define COMPUTE_m4_n1 COMPUTE_L_m4(1,55855) END_SET_PA(4) +#define COMPUTE_m4_n2 COMPUTE_L_m4(2,55855) END_SET_PA(4) +#define COMPUTE_m4_n4 COMPUTE_L_m4(4,55855) END_SET_PA(4) +#define COMPUTE_m4_n8 COMPUTE_L_m4(8,55855) END_SET_PA(4) +#define COMPUTE_m4_n12 COMPUTE_L_m4(12,55855) END_SET_PA(4) +#define COMPUTE_m4_n16 COMPUTE_L_m4(12,55755) COMPUTE_R_m4(4,55955) +#define COMPUTE_m4_n20 COMPUTE_L_m4(12,55655) COMPUTE_R_m4(8,55955) +#define COMPUTE_m4_n24 COMPUTE_L_m4(12,55555) COMPUTE_R_m4(12,55955) +#define COMPUTE_m4(ndim) COMPUTE_m4_n##ndim + +/* m = 2 *//* xmm0 for alpha, xmm1-xmm3 for temporary use, xmm4-xmm15 for accumulators */ +#define INIT_m2n1 "vpxor %%xmm4,%%xmm4,%%xmm4;" +#define KERNEL_k1m2n1 \ + "vmovsd (%0),%%xmm1; addq $8,%0;"\ + "vbroadcastss (%1),%%xmm2; vfmadd231ps %%xmm1,%%xmm2,%%xmm4;"\ + "addq $4,%1;" +#define INIT_m2n2 INIT_m2n1 "vpxor %%xmm5,%%xmm5,%%xmm5;" +#define KERNEL_k1m2n2 \ + "vmovsd (%0),%%xmm1; addq $8,%0;"\ + "vbroadcastss (%1),%%xmm2; vfmadd231ps %%xmm1,%%xmm2,%%xmm4;"\ + "vbroadcastss 4(%1),%%xmm3; vfmadd231ps %%xmm1,%%xmm3,%%xmm5;"\ + "addq $8,%1;" +#ifdef TRMMKERNEL + #define SAVE_h_m2n1 "vmulps %%xmm4,%%xmm0,%%xmm4; vmovsd %%xmm4,(%2);" + #define SAVE_h_m2n2 SAVE_h_m2n1 "vmulps %%xmm5,%%xmm0,%%xmm5; vmovsd %%xmm5,(%2,%3,1);" +#else + #define SAVE_h_m2n1 "vmovsd (%2),%%xmm1; vfmadd213ps %%xmm1,%%xmm0,%%xmm4; vmovsd %%xmm4,(%2);" + #define SAVE_h_m2n2 SAVE_h_m2n1 "vmovsd (%2,%3,1),%%xmm1; vfmadd213ps %%xmm1,%%xmm0,%%xmm5; vmovsd %%xmm5,(%2,%3,1);" +#endif +#define INIT_m2n4 INIT_m2n2 +#define INIT_m2n8 INIT_m2n4 "vpxor %%xmm6,%%xmm6,%%xmm6; vpxor %%xmm7,%%xmm7,%%xmm7;" +#define INIT_m2n12 INIT_m2n8 "vpxor %%xmm8,%%xmm8,%%xmm8; vpxor %%xmm9,%%xmm9,%%xmm9;" +#define INIT_m2n16 INIT_m2n12 "vpxor %%xmm10,%%xmm10,%%xmm10; vpxor %%xmm11,%%xmm11,%%xmm11;" +#define INIT_m2n20 INIT_m2n16 "vpxor %%xmm12,%%xmm12,%%xmm12; vpxor %%xmm13,%%xmm13,%%xmm13;" +#define INIT_m2n24 INIT_m2n20 "vpxor %%xmm14,%%xmm14,%%xmm14; vpxor %%xmm15,%%xmm15,%%xmm15;" +#define unit_gen_kernel_k1m2n4(c1,c2,k_no,...) \ + "vmovups "#k_no"*16("#__VA_ARGS__"),%%xmm3; vfmadd231ps %%xmm1,%%xmm3,"#c1"; vfmadd231ps %%xmm2,%%xmm3,"#c2";" +#define KERNEL_h_k1m2n4 \ + "vbroadcastss (%0),%%xmm1; vbroadcastss 4(%0),%%xmm2; addq $8,%0;" unit_gen_kernel_k1m2n4(%%xmm4,%%xmm5,0,%1) +#define KERNEL_k1m2n4 KERNEL_h_k1m2n4 "addq $16,%1;" +#define KERNEL_h_k1m2n8 KERNEL_h_k1m2n4 unit_gen_kernel_k1m2n4(%%xmm6,%%xmm7,0,%1,%%r12,1) +#define KERNEL_k1m2n8 KERNEL_h_k1m2n8 "addq $16,%1;" +#define KERNEL_k1m2n12 KERNEL_h_k1m2n8 unit_gen_kernel_k1m2n4(%%xmm8,%%xmm9,0,%1,%%r12,2) "addq $16,%1;" +#define KERNEL_h_k1m2n16 KERNEL_k1m2n12 unit_gen_kernel_k1m2n4(%%xmm10,%%xmm11,0,%%r15) +#define KERNEL_k1m2n16 KERNEL_h_k1m2n16 "addq $16,%%r15;" +#define KERNEL_h_k1m2n20 KERNEL_h_k1m2n16 unit_gen_kernel_k1m2n4(%%xmm12,%%xmm13,0,%%r15,%%r12,1) +#define KERNEL_k1m2n20 KERNEL_h_k1m2n20 "addq $16,%%r15;" +#define KERNEL_h_k1m2n24 KERNEL_h_k1m2n20 unit_gen_kernel_k1m2n4(%%xmm14,%%xmm15,0,%%r15,%%r12,2) +#define KERNEL_k1m2n24 KERNEL_h_k1m2n24 "addq $16,%%r15;" +#define end_load_a_k1m2(k_no) "vbroadcastss "#k_no"*8(%0),%%xmm1; vbroadcastss "#k_no"*8+4(%0),%%xmm2;" +#define end_acc_nc2_k1m2(k_no) unit_gen_kernel_k1m2n4(%%xmm6,%%xmm7,k_no,%1,%%r12,1) +#define end_acc_nc3_k1m2(k_no) unit_gen_kernel_k1m2n4(%%xmm8,%%xmm9,k_no,%1,%%r12,2) +#define end_acc_nc4_k1m2(k_no) unit_gen_kernel_k1m2n4(%%xmm10,%%xmm11,k_no,%%r15) +#define end_acc_nc5_k1m2(k_no) unit_gen_kernel_k1m2n4(%%xmm12,%%xmm13,k_no,%%r15,%%r12,1) +#define end_acc_nc6_k1m2(k_no) unit_gen_kernel_k1m2n4(%%xmm14,%%xmm15,k_no,%%r15,%%r12,2) +#ifdef TRMMKERNEL + #define unit_save_m2n4(c1,c2) \ + "vunpcklps "#c2","#c1",%%xmm1; vunpckhps "#c2","#c1",%%xmm2;"\ + "vmulps %%xmm1,%%xmm0,%%xmm1; vmovsd %%xmm1,(%5); vmovhpd %%xmm1,(%5,%3,1);"\ + "leaq (%5,%3,2),%5;"\ + "vmulps %%xmm2,%%xmm0,%%xmm2; vmovsd %%xmm2,(%5); vmovhpd %%xmm2,(%5,%3,1);"\ + "leaq (%5,%3,2),%5;" +#else + #define unit_save_m2n4(c1,c2) \ + "vunpcklps "#c2","#c1",%%xmm1; vunpckhps "#c2","#c1",%%xmm2;"\ + "vmovsd (%5),%%xmm3; vmovhpd (%5,%3,1),%%xmm3,%%xmm3; vfmadd213ps %%xmm3,%%xmm0,%%xmm1; vmovsd %%xmm1,(%5); vmovhpd %%xmm1,(%5,%3,1);"\ + "leaq (%5,%3,2),%5;"\ + "vmovsd (%5),%%xmm3; vmovhpd (%5,%3,1),%%xmm3,%%xmm3; vfmadd213ps %%xmm3,%%xmm0,%%xmm2; vmovsd %%xmm2,(%5); vmovhpd %%xmm2,(%5,%3,1);"\ + "leaq (%5,%3,2),%5;" +#endif +#define SAVE_h_m2n4 "movq %2,%5;" unit_save_m2n4(%%xmm4,%%xmm5) +#define SAVE_h_m2n8 SAVE_h_m2n4 unit_save_m2n4(%%xmm6,%%xmm7) +#define SAVE_h_m2n12 SAVE_h_m2n8 unit_save_m2n4(%%xmm8,%%xmm9) +#define SAVE_h_m2n16 SAVE_h_m2n12 unit_save_m2n4(%%xmm10,%%xmm11) +#define SAVE_h_m2n20 SAVE_h_m2n16 unit_save_m2n4(%%xmm12,%%xmm13) +#define SAVE_h_m2n24 SAVE_h_m2n20 unit_save_m2n4(%%xmm14,%%xmm15) +#define SAVE_m2(ndim) SAVE_h_m2n##ndim "addq $8,%2;" +#define COMPUTE_m2(ndim) \ + INIT_m2n##ndim START_SET_PAPB(2,ndim)\ + "movq %%r13,%4;"\ + KERNEL_HEAD_C_n##ndim(2)\ + "testq %4,%4; jz "#ndim"002022f;"\ + #ndim"002021:\n\t"\ + KERNEL_k1m2n##ndim "decq %4; jnz "#ndim"002021b;"\ + #ndim"002022:\n\t"\ + KERNEL_TAIL_C_n##ndim(2)\ + SAVE_m2(ndim) END_SET_PA(2) + +/* m = 1 *//* xmm0 for alpha, xmm1-xmm3 and xmm10 for temporary use, xmm4-xmm9 for accumulators */ +#define INIT_m1n1 "vpxor %%xmm4,%%xmm4,%%xmm4;" +#define KERNEL_k1m1n1 \ + "vmovss (%1),%%xmm3; addq $4,%1;"\ + "vmovss (%0),%%xmm1; vfmadd231ss %%xmm3,%%xmm1,%%xmm4;"\ + "addq $4,%0;" +#ifdef TRMMKERNEL + #define SAVE_h_m1n1 "vmulss %%xmm4,%%xmm0,%%xmm4; vmovss %%xmm4,(%2);" +#else + #define SAVE_h_m1n1 "vfmadd213ss (%2),%%xmm0,%%xmm4; vmovss %%xmm4,(%2);" +#endif +#define INIT_m1n2 INIT_m1n1 +#define KERNEL_k1m1n2 \ + "vmovsd (%1),%%xmm3; addq $8,%1;"\ + "vbroadcastss (%0),%%xmm1; vfmadd231ps %%xmm3,%%xmm1,%%xmm4;"\ + "addq $4,%0;" +#ifdef TRMMKERNEL + #define SAVE_h_m1n2 \ + "vmulps %%xmm4,%%xmm0,%%xmm4;"\ + "vmovss %%xmm4,(%2); vextractps $1,%%xmm4,(%2,%3,1);" +#else + #define SAVE_h_m1n2 \ + "vmovss (%2),%%xmm3; vinsertps $16,(%2,%3,1),%%xmm3,%%xmm3; vfmadd213ps %%xmm3,%%xmm0,%%xmm4;"\ + "vmovss %%xmm4,(%2); vextractps $1,%%xmm4,(%2,%3,1);" +#endif +#define INIT_m1n4 INIT_m1n2 +#define INIT_m1n8 INIT_m1n4 "vpxor %%xmm5,%%xmm5,%%xmm5;" +#define INIT_m1n12 INIT_m1n8 "vpxor %%xmm6,%%xmm6,%%xmm6;" +#define INIT_m1n16 INIT_m1n12 "vpxor %%xmm7,%%xmm7,%%xmm7;" +#define INIT_m1n20 INIT_m1n16 "vpxor %%xmm8,%%xmm8,%%xmm8;" +#define INIT_m1n24 INIT_m1n20 "vpxor %%xmm9,%%xmm9,%%xmm9;" +#define KERNEL_h_k1m1n4 \ + "vbroadcastss (%0),%%xmm1; addq $4,%0; vfmadd231ps (%1),%%xmm1,%%xmm4;" +#define KERNEL_k1m1n4 KERNEL_h_k1m1n4 "addq $16,%1;" +#define KERNEL_h_k1m1n8 KERNEL_h_k1m1n4 "vfmadd231ps (%1,%%r12,1),%%xmm1,%%xmm5;" +#define KERNEL_k1m1n8 KERNEL_h_k1m1n8 "addq $16,%1;" +#define KERNEL_k1m1n12 KERNEL_h_k1m1n8 "vfmadd231ps (%1,%%r12,2),%%xmm1,%%xmm6; addq $16,%1;" +#define KERNEL_h_k1m1n16 KERNEL_k1m1n12 "vfmadd231ps (%%r15),%%xmm1,%%xmm7;" +#define KERNEL_k1m1n16 KERNEL_h_k1m1n16 "addq $16,%%r15;" +#define KERNEL_h_k1m1n20 KERNEL_h_k1m1n16 "vfmadd231ps (%%r15,%%r12,1),%%xmm1,%%xmm8;" +#define KERNEL_k1m1n20 KERNEL_h_k1m1n20 "addq $16,%%r15;" +#define KERNEL_h_k1m1n24 KERNEL_h_k1m1n20 "vfmadd231ps (%%r15,%%r12,2),%%xmm1,%%xmm9;" +#define KERNEL_k1m1n24 KERNEL_h_k1m1n24 "addq $16,%%r15;" +#define end_load_a_k1m1(k_no) "vbroadcastss "#k_no"*4(%0),%%xmm1;" +#define end_acc_nc2_k1m1(k_no) "vfmadd231ps "#k_no"*16(%1,%%r12,1),%%xmm1,%%xmm5;" +#define end_acc_nc3_k1m1(k_no) "vfmadd231ps "#k_no"*16(%1,%%r12,2),%%xmm1,%%xmm6;" +#define end_acc_nc4_k1m1(k_no) "vfmadd231ps "#k_no"*16(%%r15),%%xmm1,%%xmm7;" +#define end_acc_nc5_k1m1(k_no) "vfmadd231ps "#k_no"*16(%%r15,%%r12,1),%%xmm1,%%xmm8;" +#define end_acc_nc6_k1m1(k_no) "vfmadd231ps "#k_no"*16(%%r15,%%r12,2),%%xmm1,%%xmm9;" +#ifdef TRMMKERNEL + #define unit_save_m1n4(c1) \ + "vmulps "#c1",%%xmm0,"#c1"; vpxor %%xmm10,%%xmm10,%%xmm10; vmovsd "#c1",%%xmm10,%%xmm2; vmovhlps "#c1",%%xmm10,%%xmm1;"\ + "vmovss %%xmm2,(%5); vextractps $1,%%xmm2,(%5,%3,1); leaq (%5,%3,2),%5;"\ + "vmovss %%xmm1,(%5); vextractps $1,%%xmm1,(%5,%3,1); leaq (%5,%3,2),%5;" +#else + #define unit_save_m1n4(c1) \ + "vpxor %%xmm10,%%xmm10,%%xmm10; vmovsd "#c1",%%xmm10,%%xmm2; vmovhlps "#c1",%%xmm10,%%xmm1;"\ + "vmovss (%5),%%xmm3; vinsertps $16,(%5,%3,1),%%xmm3,%%xmm3; vfmadd213ps %%xmm3,%%xmm0,%%xmm2;"\ + "vmovss %%xmm2,(%5); vextractps $1,%%xmm2,(%5,%3,1); leaq (%5,%3,2),%5;"\ + "vmovss (%5),%%xmm3; vinsertps $16,(%5,%3,1),%%xmm3,%%xmm3; vfmadd213ps %%xmm3,%%xmm0,%%xmm1;"\ + "vmovss %%xmm1,(%5); vextractps $1,%%xmm1,(%5,%3,1); leaq (%5,%3,2),%5;" +#endif +#define SAVE_h_m1n4 "movq %2,%5;" unit_save_m1n4(%%xmm4) +#define SAVE_h_m1n8 SAVE_h_m1n4 unit_save_m1n4(%%xmm5) +#define SAVE_h_m1n12 SAVE_h_m1n8 unit_save_m1n4(%%xmm6) +#define SAVE_h_m1n16 SAVE_h_m1n12 unit_save_m1n4(%%xmm7) +#define SAVE_h_m1n20 SAVE_h_m1n16 unit_save_m1n4(%%xmm8) +#define SAVE_h_m1n24 SAVE_h_m1n20 unit_save_m1n4(%%xmm9) +#define SAVE_m1(ndim) SAVE_h_m1n##ndim "addq $4,%2;" +#define COMPUTE_m1(ndim) \ + INIT_m1n##ndim START_SET_PAPB(1,ndim)\ + "movq %%r13,%4;"\ + KERNEL_HEAD_C_n##ndim(1)\ + "testq %4,%4; jz "#ndim"001012f;"\ + #ndim"001011:\n\t"\ + KERNEL_k1m1n##ndim "decq %4; jnz "#ndim"001011b;"\ + #ndim"001012:\n\t"\ + KERNEL_TAIL_C_n##ndim(1)\ + SAVE_m1(ndim) END_SET_PA(1) + +/* %7 = "m"(ALPHA), %8 = "m"(M), %9 = "m"(K), %10 = "m"(off) */ + +#ifdef TRMMKERNEL + #if BACKWARDS == 1 + #define OFFSET_TO_K "movq %9,%%r13; subq %10,%%r13;" + #else + #define OFFSET_TO_K "movq %10,%%r13;" + #endif +#else + #define OFFSET_TO_K "movq %9,%%r13;" +#endif +#if defined(TRMMKERNEL) && !defined(LEFT) + #if BACKWARDS == 1 + #define START_UPDATE_OFFSET(ndim) {} + #define END_UPDATE_OFFSET(ndim) {off += (ndim);} + #else + #define START_UPDATE_OFFSET(ndim) {off += (ndim)>4 ? 4:(ndim);} + #define END_UPDATE_OFFSET(ndim) {off += (ndim)>4 ? ((ndim)-4):0;} + #endif +#else + #define START_UPDATE_OFFSET(ndim) {} + #define END_UPDATE_OFFSET(ndim) {} +#endif +#if defined(TRMMKERNEL) && defined(LEFT) + #if BACKWARDS == 1 + #define START_UPDATE_K(mdim) "" + #define END_UPDATE_K(mdim) "subq $"#mdim",%%r13;" + #else + #define START_UPDATE_K(mdim) "addq $"#mdim",%%r13;" + #define END_UPDATE_K(mdim) "" + #endif +#else + #define START_UPDATE_K(mdim) "" + #define END_UPDATE_K(mdim) "" +#endif +#define COMPUTE(ndim) {\ + next_b = b_pointer + ndim * K; START_UPDATE_OFFSET(ndim)\ + __asm__ __volatile__(\ + "vbroadcastss %7,%%zmm0;"\ + OFFSET_TO_K "movq %9,%%r12; salq $4,%%r12; movq %1,%%r14; movq %8,%%r11;"\ + "cmpq $16,%%r11;jb 33101"#ndim"f;"\ + "33109"#ndim":\n\t"\ + START_UPDATE_K(16) COMPUTE_m16(ndim) END_UPDATE_K(16)\ + "subq $16,%%r11;cmpq $16,%%r11;jnb 33109"#ndim"b;"\ + "33101"#ndim":\n\t"\ + "cmpq $8,%%r11;jb 33102"#ndim"f;"\ + START_UPDATE_K(8) COMPUTE_m8(ndim) END_UPDATE_K(8)\ + "subq $8,%%r11;"\ + "33102"#ndim":\n\t"\ + "cmpq $4,%%r11;jb 33103"#ndim"f;"\ + START_UPDATE_K(4) COMPUTE_m4(ndim) END_UPDATE_K(4)\ + "subq $4,%%r11;"\ + "33103"#ndim":\n\t"\ + "cmpq $2,%%r11;jb 33104"#ndim"f;"\ + START_UPDATE_K(2) COMPUTE_m2(ndim) END_UPDATE_K(2)\ + "subq $2,%%r11;"\ + "33104"#ndim":\n\t"\ + "testq %%r11,%%r11;jz 33105"#ndim"f;"\ + START_UPDATE_K(1) COMPUTE_m1(ndim) END_UPDATE_K(1)\ + "33105"#ndim":\n\t"\ + "movq %%r14,%1; vzeroupper;"\ + :"+r"(a_pointer),"+r"(b_pointer),"+r"(c_pointer),"+r"(ldc_in_bytes),"+r"(k_counter),"+r"(ctemp),"+r"(next_b)\ + :"m"(ALPHA),"m"(M),"m"(K),"m"(off):"r10","r11","r12","r13","r14","r15","cc","memory",\ + "zmm0","zmm1","zmm2","zmm3","zmm4","zmm5","zmm6","zmm7","zmm8","zmm9","zmm10","zmm11","zmm12","zmm13","zmm14","zmm15",\ + "zmm16","zmm17","zmm18","zmm19","zmm20","zmm21","zmm22","zmm23","zmm24","zmm25","zmm26","zmm27","zmm28","zmm29","zmm30","zmm31");\ + a_pointer -= M * K; b_pointer += ndim * K; c_pointer += LDC * ndim - M; END_UPDATE_OFFSET(ndim)\ +} +int __attribute__ ((noinline)) +CNAME(BLASLONG m, BLASLONG n, BLASLONG k, float alpha, float * __restrict__ A, float * __restrict__ B, float * __restrict__ C, BLASLONG LDC +#ifdef TRMMKERNEL +,BLASLONG offset +#endif +) +{ + if(m==0||n==0) return 0; + int64_t ldc_in_bytes = (int64_t)LDC * sizeof(float);float ALPHA = alpha; + int64_t M = (int64_t)m, K = (int64_t)k, k_counter = K, off = 0; + BLASLONG n_count = n; + float *a_pointer = A,*b_pointer = B,*c_pointer = C,*ctemp = C,*next_b = B; +#ifdef TRMMKERNEL + #ifdef LEFT + off = offset; + #else + off = -offset; + #endif +#endif + for(;n_count>23;n_count-=24) COMPUTE(24) + for(;n_count>19;n_count-=20) COMPUTE(20) + for(;n_count>15;n_count-=16) COMPUTE(16) + for(;n_count>11;n_count-=12) COMPUTE(12) + for(;n_count>7;n_count-=8) COMPUTE(8) + for(;n_count>3;n_count-=4) COMPUTE(4) + for(;n_count>1;n_count-=2) COMPUTE(2) + if(n_count>0) COMPUTE(1) + return 0; +} +#ifndef TRMMKERNEL + #include + #include "sgemm_direct_skylakex.c" +#endif diff --git a/kernel/x86_64/sgemm_kernel_16x4_skylakex_3.c b/kernel/x86_64/sgemm_kernel_16x4_skylakex_3.c new file mode 100644 index 000000000..f3d614242 --- /dev/null +++ b/kernel/x86_64/sgemm_kernel_16x4_skylakex_3.c @@ -0,0 +1,515 @@ +/* %0 = "+r"(a_pointer), %1 = "+r"(b_pointer), %2 = "+r"(c_pointer), %3 = "+r"(ldc_in_bytes), %4 for k_count, %5 for c_store */ +/* r10 to assist prefetch, r12 = k << 4(const), r13 = k(const), r14 = b_head_pos(const), r15 = %1 + 3r12 */ + +#include "common.h" +#include + +/* m = 16 */ /* zmm8-zmm31 for accumulators, zmm4-zmm7 for temporary use, zmm0 for alpha */ +#define KERNEL_k1m16n1 \ + "vmovups (%0),%%zmm4; addq $64,%0;"\ + "vbroadcastss (%1),%%zmm6; vfmadd231ps %%zmm4,%%zmm6,%%zmm8;"\ + "addq $4,%1;" +#define KERNEL_h_k1m16n2 \ + "vmovsldup (%0),%%zmm4; vmovshdup (%0),%%zmm5; prefetcht0 512(%0); addq $64,%0;"\ + "vbroadcastsd (%1),%%zmm6; vfmadd231ps %%zmm4,%%zmm6,%%zmm8; vfmadd231ps %%zmm5,%%zmm6,%%zmm9;" +#define KERNEL_k1m16n2 KERNEL_h_k1m16n2 "addq $8,%1;" +#define KERNEL_h_k1m16n4 KERNEL_h_k1m16n2 "vbroadcastsd 8(%1),%%zmm7; vfmadd231ps %%zmm4,%%zmm7,%%zmm10; vfmadd231ps %%zmm5,%%zmm7,%%zmm11;" +#define KERNEL_k1m16n4 KERNEL_h_k1m16n4 "addq $16,%1;" +#define unit_kernel_k1m16n4(c1,c2,c3,c4, ...) \ + "vbroadcastsd ("#__VA_ARGS__"),%%zmm6; vfmadd231ps %%zmm4,%%zmm6,"#c1"; vfmadd231ps %%zmm5,%%zmm6,"#c2";"\ + "vbroadcastsd 8("#__VA_ARGS__"),%%zmm7; vfmadd231ps %%zmm4,%%zmm7,"#c3"; vfmadd231ps %%zmm5,%%zmm7,"#c4";" +#define KERNEL_h_k1m16n8 KERNEL_h_k1m16n4 unit_kernel_k1m16n4(%%zmm12,%%zmm13,%%zmm14,%%zmm15,%1,%%r12,1) +#define KERNEL_k1m16n8 KERNEL_h_k1m16n8 "addq $16,%1;" +#define KERNEL_h_k1m16n12 KERNEL_h_k1m16n8 unit_kernel_k1m16n4(%%zmm16,%%zmm17,%%zmm18,%%zmm19,%1,%%r12,2) +#define KERNEL_k1m16n12 KERNEL_h_k1m16n12 "addq $16,%1;" +#define KERNEL_h_k1m16n16 KERNEL_k1m16n12 unit_kernel_k1m16n4(%%zmm20,%%zmm21,%%zmm22,%%zmm23,%%r15) +#define KERNEL_k1m16n16 KERNEL_h_k1m16n16 "addq $16,%%r15;" +#define KERNEL_h_k1m16n20 KERNEL_h_k1m16n16 unit_kernel_k1m16n4(%%zmm24,%%zmm25,%%zmm26,%%zmm27,%%r15,%%r12,1) +#define KERNEL_k1m16n20 KERNEL_h_k1m16n20 "addq $16,%%r15;" +#define KERNEL_h_k1m16n24 KERNEL_h_k1m16n20 unit_kernel_k1m16n4(%%zmm28,%%zmm29,%%zmm30,%%zmm31,%%r15,%%r12,2) +#define KERNEL_k1m16n24 KERNEL_h_k1m16n24 "addq $16,%%r15;" +#define INIT_m16n1 "vpxorq %%zmm8,%%zmm8,%%zmm8;" +#define INIT_m16n2 INIT_m16n1 "vpxorq %%zmm9,%%zmm9,%%zmm9;" +#define INIT_m16n4 INIT_m16n2 "vpxorq %%zmm10,%%zmm10,%%zmm10;vpxorq %%zmm11,%%zmm11,%%zmm11;" +#define unit_init_m16n4(c1,c2,c3,c4) \ + "vpxorq "#c1","#c1","#c1";vpxorq "#c2","#c2","#c2";vpxorq "#c3","#c3","#c3";vpxorq "#c4","#c4","#c4";" +#define INIT_m16n8 INIT_m16n4 unit_init_m16n4(%%zmm12,%%zmm13,%%zmm14,%%zmm15) +#define INIT_m16n12 INIT_m16n8 unit_init_m16n4(%%zmm16,%%zmm17,%%zmm18,%%zmm19) +#define INIT_m16n16 INIT_m16n12 unit_init_m16n4(%%zmm20,%%zmm21,%%zmm22,%%zmm23) +#define INIT_m16n20 INIT_m16n16 unit_init_m16n4(%%zmm24,%%zmm25,%%zmm26,%%zmm27) +#define INIT_m16n24 INIT_m16n20 unit_init_m16n4(%%zmm28,%%zmm29,%%zmm30,%%zmm31) +#define SAVE_h_m16n1 "vfmadd213ps (%2),%%zmm0,%%zmm8; vmovups %%zmm8,(%2);" +#define unit_save_m16n2(c1,c2) \ + "vunpcklps "#c2","#c1",%%zmm6; vunpckhps "#c2","#c1",%%zmm7; vunpcklpd %%zmm7,%%zmm6,%%zmm4; vunpckhpd %%zmm7,%%zmm6,%%zmm5;"\ + "vfmadd213ps (%5),%%zmm0,%%zmm4; vfmadd213ps (%5,%3,1),%%zmm0,%%zmm5;"\ + "vmovups %%zmm4,(%5); vmovups %%zmm5,(%5,%3,1); leaq (%5,%3,2),%5;" +#define SAVE_h_m16n2 "movq %2,%5;" unit_save_m16n2(%%zmm8,%%zmm9) +#define SAVE_h_m16n4 SAVE_h_m16n2 unit_save_m16n2(%%zmm10,%%zmm11) +#define SAVE_h_m16n8 SAVE_h_m16n4 unit_save_m16n2(%%zmm12,%%zmm13) unit_save_m16n2(%%zmm14,%%zmm15) +#define SAVE_h_m16n12 SAVE_h_m16n8 unit_save_m16n2(%%zmm16,%%zmm17) unit_save_m16n2(%%zmm18,%%zmm19) +#define SAVE_h_m16n16 SAVE_h_m16n12 unit_save_m16n2(%%zmm20,%%zmm21) unit_save_m16n2(%%zmm22,%%zmm23) +#define SAVE_h_m16n20 SAVE_h_m16n16 unit_save_m16n2(%%zmm24,%%zmm25) unit_save_m16n2(%%zmm26,%%zmm27) +#define SAVE_h_m16n24 SAVE_h_m16n20 unit_save_m16n2(%%zmm28,%%zmm29) unit_save_m16n2(%%zmm30,%%zmm31) +#define SAVE_m16(ndim) SAVE_h_m16n##ndim "addq $64,%2;" +#define COMPUTE_m16(ndim) \ + INIT_m16n##ndim\ + "movq %%r13,%4; movq %%r14,%1; leaq (%1,%%r12,2),%%r15; addq %%r12,%%r15; movq %2,%5; xorq %%r10,%%r10;"\ + "cmpq $16,%4; jb "#ndim"016162f;"\ + #ndim"016161:\n\t"\ + "cmpq $126,%%r10; movq $126,%%r10; cmoveq %3,%%r10;"\ + KERNEL_k1m16n##ndim\ + KERNEL_k1m16n##ndim\ + "prefetcht1 (%5); subq $63,%5; addq %%r10,%5;"\ + KERNEL_k1m16n##ndim\ + KERNEL_k1m16n##ndim\ + "prefetcht1 (%6); addq $32,%6;"\ + "subq $4,%4; cmpq $16,%4; jnb "#ndim"016161b;"\ + "movq %2,%5;"\ + #ndim"016162:\n\t"\ + "testq %4,%4; jz "#ndim"016164f;"\ + #ndim"016163:\n\t"\ + "prefetcht0 (%5); prefetcht0 63(%5); prefetcht0 (%5,%3,1); prefetcht0 63(%5,%3,1);"\ + KERNEL_k1m16n##ndim\ + "leaq (%5,%3,2),%5; decq %4; jnz "#ndim"016163b;"\ + #ndim"016164:\n\t"\ + "prefetcht0 (%%r14); prefetcht0 64(%%r14);"\ + SAVE_m16(ndim) +#define unit_save_m16n2_rscr(c1,c2,scr_off) \ + "vunpcklps "#c2","#c1",%%zmm6; vunpckhps "#c2","#c1",%%zmm7; vunpcklpd %%zmm7,%%zmm6,%%zmm4; vunpckhpd %%zmm7,%%zmm6,%%zmm5;"\ + "vmovups "#scr_off"(%7),%%zmm6; vfmadd213ps -64(%5),%%zmm0,%%zmm6; vfmadd213ps (%5),%%zmm0,%%zmm4;"\ + "vmovups %%zmm6,-64(%5); vmovups %%zmm4,(%5);"\ + "vmovups "#scr_off"+64(%7),%%zmm6; vfmadd213ps -64(%5,%3,1),%%zmm0,%%zmm6; vfmadd213ps (%5,%3,1),%%zmm0,%%zmm5;"\ + "vmovups %%zmm6,-64(%5,%3,1); vmovups %%zmm5,(%5,%3,1); leaq (%5,%3,2),%5;" +#define unit_save_m16n2_wscr(c1,c2,scr_off) \ + "vunpcklps "#c2","#c1",%%zmm6; vunpckhps "#c2","#c1",%%zmm7; vunpcklpd %%zmm7,%%zmm6,%%zmm4; vunpckhpd %%zmm7,%%zmm6,%%zmm5;"\ + "vmovups %%zmm4,"#scr_off"(%7); vmovups %%zmm5,"#scr_off"+64(%7);" +#define COMPUTE_m16n24_LSAVE \ + INIT_m16n24\ + "movq %%r13,%4; movq %%r14,%1; leaq (%1,%%r12,2),%%r15; addq %%r12,%%r15; movq %2,%5;"\ + "cmpq $16,%4; jb 24716162f; movq $16,%4;"\ + "24716161:\n\t"\ + KERNEL_k1m16n24 "addq $4,%4; testq $12,%4; movq $172,%%r10; cmovz %3,%%r10;"\ + KERNEL_k1m16n24 "prefetcht1 -64(%5); leaq -129(%5,%%r10,1),%5;"\ + KERNEL_k1m16n24 "prefetcht1 (%6); addq $32,%6; cmpq $208,%4; cmoveq %2,%5;"\ + KERNEL_k1m16n24 "cmpq %4,%%r13; jnb 24716161b;"\ + "movq %2,%5; negq %4; leaq 16(%%r13,%4,1),%4;"\ + "24716162:\n\t"\ + "testq %4,%4; jz 24716164f; movq %7,%%r10;"\ + "24716163:\n\t"\ + "prefetcht0 -64(%5); prefetcht0 (%5); prefetcht0 63(%5); addq %3,%5;"\ + KERNEL_k1m16n24 "prefetcht0 (%%r10); addq $64,%%r10; decq %4; jnz 24716163b;"\ + "24716164:\n\t"\ + "prefetcht0 (%%r14); prefetcht0 64(%%r14); movq %2,%5; addq $64,%2;"\ + unit_save_m16n2_rscr(%%zmm8,%%zmm9,0) unit_save_m16n2_rscr(%%zmm10,%%zmm11,128) unit_save_m16n2_rscr(%%zmm12,%%zmm13,256)\ + unit_save_m16n2_rscr(%%zmm14,%%zmm15,384) unit_save_m16n2_rscr(%%zmm16,%%zmm17,512) unit_save_m16n2_rscr(%%zmm18,%%zmm19,640)\ + unit_save_m16n2_wscr(%%zmm20,%%zmm21,0) unit_save_m16n2_wscr(%%zmm22,%%zmm23,128) unit_save_m16n2_wscr(%%zmm24,%%zmm25,256)\ + unit_save_m16n2_wscr(%%zmm26,%%zmm27,384) unit_save_m16n2_wscr(%%zmm28,%%zmm29,512) unit_save_m16n2_wscr(%%zmm30,%%zmm31,640) +#define COMPUTE_m16n24_RSAVE \ + INIT_m16n24 "leaq (%2,%3,8),%2; leaq (%2,%3,4),%2;"\ + "movq %%r13,%4; movq %%r14,%1; leaq (%1,%%r12,2),%%r15; addq %%r12,%%r15; movq %2,%5;"\ + "cmpq $16,%4; jb 24616162f; movq $16,%4;"\ + "24616161:\n\t"\ + KERNEL_k1m16n24 "addq $4,%4; testq $12,%4; movq $172,%%r10; cmovz %3,%%r10;"\ + KERNEL_k1m16n24 "prefetcht1 -64(%5); leaq -129(%5,%%r10,1),%5;"\ + KERNEL_k1m16n24 "prefetcht1 (%6); addq $32,%6; cmpq $208,%4; cmoveq %2,%5;"\ + KERNEL_k1m16n24 "cmpq %4,%%r13; jnb 24616161b;"\ + "movq %2,%5; negq %4; leaq 16(%%r13,%4,1),%4;"\ + "24616162:\n\t"\ + "testq %4,%4; jz 24616164f; movq %7,%%r10;"\ + "24616163:\n\t"\ + "prefetcht0 -64(%5); prefetcht0 (%5); prefetcht0 63(%5); addq %3,%5;"\ + KERNEL_k1m16n24 "prefetcht0 (%%r10); addq $64,%%r10; decq %4; jnz 24616163b;"\ + "24616164:\n\t"\ + "prefetcht0 (%%r14); prefetcht0 64(%%r14); movq %2,%5; addq $64,%2;"\ + unit_save_m16n2_rscr(%%zmm20,%%zmm21,0) unit_save_m16n2_rscr(%%zmm22,%%zmm23,128) unit_save_m16n2_rscr(%%zmm24,%%zmm25,256)\ + unit_save_m16n2_rscr(%%zmm26,%%zmm27,384) unit_save_m16n2_rscr(%%zmm28,%%zmm29,512) unit_save_m16n2_rscr(%%zmm30,%%zmm31,640)\ + unit_save_m16n2_wscr(%%zmm8,%%zmm9,0) unit_save_m16n2_wscr(%%zmm10,%%zmm11,128) unit_save_m16n2_wscr(%%zmm12,%%zmm13,256)\ + unit_save_m16n2_wscr(%%zmm14,%%zmm15,384) unit_save_m16n2_wscr(%%zmm16,%%zmm17,512) unit_save_m16n2_wscr(%%zmm18,%%zmm19,640)\ + "negq %3; leaq (%2,%3,8),%2; leaq (%2,%3,4),%2; negq %3;" +#define COMPUTE_m16n24_LINIT \ + INIT_m16n24\ + "movq %%r13,%4; movq %%r14,%1; leaq (%1,%%r12,2),%%r15; addq %%r12,%%r15; movq %2,%5;"\ + "cmpq $16,%4; jb 24516162f; movq $16,%4;"\ + "24516161:\n\t"\ + KERNEL_k1m16n24 "addq $4,%4; testq $12,%4; movq $84,%%r10; cmovz %3,%%r10;"\ + KERNEL_k1m16n24 "prefetcht1 (%5); leaq -63(%5,%%r10,1),%5;"\ + KERNEL_k1m16n24 "prefetcht1 (%6); addq $32,%6; cmpq $208,%4; cmoveq %2,%5;"\ + KERNEL_k1m16n24 "cmpq %4,%%r13; jnb 24516161b;"\ + "movq %2,%5; negq %4; leaq 16(%%r13,%4,1),%4;"\ + "24516162:\n\t"\ + "testq %4,%4; jz 24516164f; movq %7,%%r10;"\ + "24516163:\n\t"\ + "prefetcht0 (%5); prefetcht0 63(%5); addq %3,%5;"\ + KERNEL_k1m16n24 "prefetcht0 (%%r10); addq $64,%%r10; decq %4; jnz 24516163b;"\ + "24516164:\n\t"\ + "prefetcht0 (%%r14); prefetcht0 64(%%r14); movq %2,%5; addq $64,%2;"\ + unit_save_m16n2(%%zmm8,%%zmm9) unit_save_m16n2(%%zmm10,%%zmm11) unit_save_m16n2(%%zmm12,%%zmm13)\ + unit_save_m16n2(%%zmm14,%%zmm15) unit_save_m16n2(%%zmm16,%%zmm17) unit_save_m16n2(%%zmm18,%%zmm19)\ + unit_save_m16n2_wscr(%%zmm20,%%zmm21,0) unit_save_m16n2_wscr(%%zmm22,%%zmm23,128) unit_save_m16n2_wscr(%%zmm24,%%zmm25,256)\ + unit_save_m16n2_wscr(%%zmm26,%%zmm27,384) unit_save_m16n2_wscr(%%zmm28,%%zmm29,512) unit_save_m16n2_wscr(%%zmm30,%%zmm31,640) +#define COMPUTE_m16n24_LTAIL \ + INIT_m16n24\ + "movq %%r13,%4; movq %%r14,%1; leaq (%1,%%r12,2),%%r15; addq %%r12,%%r15; movq %2,%5;"\ + "cmpq $16,%4; jb 24416162f; movq $16,%4;"\ + "24416161:\n\t"\ + KERNEL_k1m16n24 "addq $4,%4; testq $4,%4; movq $126,%%r10; cmovz %3,%%r10;"\ + KERNEL_k1m16n24 "prefetcht1 -64(%5); prefetcht1 (%5); leaq -63(%5,%%r10,1),%5;"\ + KERNEL_k1m16n24 "prefetcht1 (%6); addq $32,%6; cmpq $208,%4; cmoveq %2,%5;"\ + KERNEL_k1m16n24 "cmpq %4,%%r13; jnb 24416161b;"\ + "movq %2,%5; negq %4; leaq 16(%%r13,%4,1),%4;"\ + "24416162:\n\t"\ + "testq %4,%4; jz 24416164f; movq %7,%%r10;"\ + "24416163:\n\t"\ + "prefetcht0 -64(%5); prefetcht0 (%5); prefetcht0 63(%5); prefetcht0 -64(%5,%3,1); prefetcht0 (%5,%3,1); prefetcht0 63(%5,%3,1); leaq (%5,%3,2),%5;"\ + KERNEL_k1m16n24 "prefetcht0 (%%r10); addq $64,%%r10; decq %4; jnz 24416163b;"\ + "24416164:\n\t"\ + "prefetcht0 (%%r14); prefetcht0 64(%%r14); movq %2,%5; addq $64,%2;"\ + unit_save_m16n2_rscr(%%zmm8,%%zmm9,0) unit_save_m16n2_rscr(%%zmm10,%%zmm11,128) unit_save_m16n2_rscr(%%zmm12,%%zmm13,256)\ + unit_save_m16n2_rscr(%%zmm14,%%zmm15,384) unit_save_m16n2_rscr(%%zmm16,%%zmm17,512) unit_save_m16n2_rscr(%%zmm18,%%zmm19,640)\ + unit_save_m16n2(%%zmm20,%%zmm21) unit_save_m16n2(%%zmm22,%%zmm23) unit_save_m16n2(%%zmm24,%%zmm25)\ + unit_save_m16n2(%%zmm26,%%zmm27) unit_save_m16n2(%%zmm28,%%zmm29) unit_save_m16n2(%%zmm30,%%zmm31) +#define COMPUTE_m16n24_RTAIL \ + INIT_m16n24\ + "movq %%r13,%4; movq %%r14,%1; leaq (%1,%%r12,2),%%r15; addq %%r12,%%r15; movq %2,%5;"\ + "cmpq $16,%4; jb 24416162f; movq $16,%4;"\ + "24416161:\n\t"\ + KERNEL_k1m16n24 "addq $4,%4; testq $4,%4; movq $126,%%r10; cmovz %3,%%r10;"\ + KERNEL_k1m16n24 "prefetcht1 -64(%5); prefetcht1 (%5); leaq -63(%5,%%r10,1),%5;"\ + KERNEL_k1m16n24 "prefetcht1 (%6); addq $32,%6; cmpq $208,%4; cmoveq %2,%5;"\ + KERNEL_k1m16n24 "cmpq %4,%%r13; jnb 24416161b;"\ + "movq %2,%5; negq %4; leaq 16(%%r13,%4,1),%4;"\ + "24416162:\n\t"\ + "testq %4,%4; jz 24416164f; movq %7,%%r10;"\ + "24416163:\n\t"\ + "prefetcht0 -64(%5); prefetcht0 (%5); prefetcht0 63(%5); prefetcht0 -64(%5,%3,1); prefetcht0 (%5,%3,1); prefetcht0 63(%5,%3,1); leaq (%5,%3,2),%5;"\ + KERNEL_k1m16n24 "prefetcht0 (%%r10); addq $64,%%r10; decq %4; jnz 24416163b;"\ + "24416164:\n\t"\ + "prefetcht0 (%%r14); prefetcht0 64(%%r14); movq %2,%5; addq $64,%2;"\ + unit_save_m16n2(%%zmm8,%%zmm9) unit_save_m16n2(%%zmm10,%%zmm11) unit_save_m16n2(%%zmm12,%%zmm13)\ + unit_save_m16n2(%%zmm14,%%zmm15) unit_save_m16n2(%%zmm16,%%zmm17) unit_save_m16n2(%%zmm18,%%zmm19)\ + unit_save_m16n2_rscr(%%zmm20,%%zmm21,0) unit_save_m16n2_rscr(%%zmm22,%%zmm23,128) unit_save_m16n2_rscr(%%zmm24,%%zmm25,256)\ + unit_save_m16n2_rscr(%%zmm26,%%zmm27,384) unit_save_m16n2_rscr(%%zmm28,%%zmm29,512) unit_save_m16n2_rscr(%%zmm30,%%zmm31,640) + +/* m = 8 *//* zmm0 for alpha, zmm1-2 for perm words, zmm4-7 for temporary use, zmm8-19 for accumulators */ +#define KERNEL_k1m8n1 \ + "vbroadcastss (%1),%%ymm4; addq $4,%1; vfmadd231ps (%0),%%ymm4,%%ymm8; addq $32,%0;" +#define KERNEL_k1m8n2 \ + "vmovups (%0),%%ymm4; addq $32,%0;"\ + "vbroadcastss (%1),%%ymm5; vfmadd231ps %%ymm5,%%ymm4,%%ymm8;"\ + "vbroadcastss 4(%1),%%ymm6; vfmadd231ps %%ymm6,%%ymm4,%%ymm9; addq $8,%1;" +#define unit_kernel_k1m8n4(c1,c2,...)\ + "vbroadcastf32x4 ("#__VA_ARGS__"),%%zmm7; vfmadd231ps %%zmm7,%%zmm4,"#c1"; vfmadd231ps %%zmm7,%%zmm5,"#c2";" +#define KERNEL_h_k1m8n4 \ + "vbroadcastf32x4 (%0),%%zmm4; vpermilps %%zmm2,%%zmm4,%%zmm4; vbroadcastf32x4 16(%0),%%zmm5; vpermilps %%zmm2,%%zmm5,%%zmm5; addq $32,%0;"\ + unit_kernel_k1m8n4(%%zmm8,%%zmm9,%1) +#define KERNEL_k1m8n4 KERNEL_h_k1m8n4 "addq $16,%1;" +#define KERNEL_h_k1m8n8 KERNEL_h_k1m8n4 unit_kernel_k1m8n4(%%zmm10,%%zmm11,%1,%%r12,1) +#define KERNEL_k1m8n8 KERNEL_h_k1m8n8 "addq $16,%1;" +#define KERNEL_k1m8n12 KERNEL_h_k1m8n8 unit_kernel_k1m8n4(%%zmm12,%%zmm13,%1,%%r12,2) "addq $16,%1;" +#define KERNEL_h_k1m8n16 KERNEL_k1m8n12 unit_kernel_k1m8n4(%%zmm14,%%zmm15,%%r15) +#define KERNEL_k1m8n16 KERNEL_h_k1m8n16 "addq $16,%%r15;" +#define KERNEL_h_k1m8n20 KERNEL_h_k1m8n16 unit_kernel_k1m8n4(%%zmm16,%%zmm17,%%r15,%%r12,1) +#define KERNEL_k1m8n20 KERNEL_h_k1m8n20 "addq $16,%%r15;" +#define KERNEL_k1m8n24 KERNEL_h_k1m8n20 unit_kernel_k1m8n4(%%zmm18,%%zmm19,%%r15,%%r12,2) "addq $16,%%r15;" +#define INIT_m8n1 "vpxor %%ymm8,%%ymm8,%%ymm8;" +#define INIT_m8n2 "vpxor %%ymm8,%%ymm8,%%ymm8; vpxor %%ymm9,%%ymm9,%%ymm9;" +#define unit_init_m8n4(c1,c2) "vpxorq "#c1","#c1","#c1";vpxorq "#c2","#c2","#c2";" +#define INIT_m8n4 unit_init_m8n4(%%zmm8,%%zmm9) +#define INIT_m8n8 INIT_m8n4 unit_init_m8n4(%%zmm10,%%zmm11) +#define INIT_m8n12 INIT_m8n8 unit_init_m8n4(%%zmm12,%%zmm13) +#define INIT_m8n16 INIT_m8n12 unit_init_m8n4(%%zmm14,%%zmm15) +#define INIT_m8n20 INIT_m8n16 unit_init_m8n4(%%zmm16,%%zmm17) +#define INIT_m8n24 INIT_m8n20 unit_init_m8n4(%%zmm18,%%zmm19) +#define SAVE_h_m8n1 "vfmadd213ps (%2),%%ymm0,%%ymm8; vmovups %%ymm8,(%2);" +#define SAVE_h_m8n2 \ + "vfmadd213ps (%2),%%ymm0,%%ymm8; vmovups %%ymm8,(%2);"\ + "vfmadd213ps (%2,%3,1),%%ymm0,%%ymm9; vmovups %%ymm9,(%2,%3,1);" +#define unit_save_m8n4(c1_no,c2_no)\ + "vpermps %%zmm"#c1_no",%%zmm1,%%zmm"#c1_no"; vpermps %%zmm"#c2_no",%%zmm1,%%zmm"#c2_no";"\ + "vextractf64x4 $1,%%zmm"#c1_no",%%ymm5; vextractf64x4 $1,%%zmm"#c2_no",%%ymm6;"\ + "vmovups (%5),%%xmm4; vinsertf128 $1,(%5,%3,1),%%ymm4,%%ymm4; vfmadd231ps %%ymm"#c1_no",%%ymm0,%%ymm4;"\ + "vmovups %%xmm4,(%5); vextractf128 $1,%%ymm4,(%5,%3,1);"\ + "vmovups 16(%5),%%xmm4; vinsertf128 $1,16(%5,%3,1),%%ymm4,%%ymm4; vfmadd231ps %%ymm"#c2_no",%%ymm0,%%ymm4;"\ + "vmovups %%xmm4,16(%5); vextractf128 $1,%%ymm4,16(%5,%3,1); leaq (%5,%3,2),%5;"\ + "vmovups (%5),%%xmm4; vinsertf128 $1,(%5,%3,1),%%ymm4,%%ymm4; vfmadd231ps %%ymm5,%%ymm0,%%ymm4;"\ + "vmovups %%xmm4,(%5); vextractf128 $1,%%ymm4,(%5,%3,1);"\ + "vmovups 16(%5),%%xmm4; vinsertf128 $1,16(%5,%3,1),%%ymm4,%%ymm4; vfmadd231ps %%ymm6,%%ymm0,%%ymm4;"\ + "vmovups %%xmm4,16(%5); vextractf128 $1,%%ymm4,16(%5,%3,1); leaq (%5,%3,2),%5;" +#define SAVE_h_m8n4 "movq %2,%5;" unit_save_m8n4(8,9) +#define SAVE_h_m8n8 SAVE_h_m8n4 unit_save_m8n4(10,11) +#define SAVE_h_m8n12 SAVE_h_m8n8 unit_save_m8n4(12,13) +#define SAVE_h_m8n16 SAVE_h_m8n12 unit_save_m8n4(14,15) +#define SAVE_h_m8n20 SAVE_h_m8n16 unit_save_m8n4(16,17) +#define SAVE_h_m8n24 SAVE_h_m8n20 unit_save_m8n4(18,19) +#define SAVE_m8(ndim) SAVE_h_m8n##ndim "addq $32,%2;" +#define COMPUTE_m8(ndim) \ + INIT_m8n##ndim\ + "movq %%r13,%4; movq %%r14,%1; leaq (%1,%%r12,2),%%r15; addq %%r12,%%r15;"\ + "testq %4,%4; jz "#ndim"008082f;"\ + #ndim"008081:\n\t"\ + KERNEL_k1m8n##ndim "decq %4; jnz "#ndim"008081b;"\ + #ndim"008082:\n\t"\ + SAVE_m8(ndim) + +/* m = 4 *//* zmm0 for alpha, zmm1-2 for perm words, zmm4-7 for temporary use, zmm8-15 for accumulators */ +#define KERNEL_k1m4n1 "vbroadcastss (%1),%%xmm4; addq $4,%1; vfmadd231ps (%0),%%xmm4,%%xmm8; addq $16,%0;" +#define KERNEL_k1m4n2 "vmovups (%0),%%xmm4; addq $16,%0;"\ + "vbroadcastss (%1),%%xmm5; vfmadd231ps %%xmm5,%%xmm4,%%xmm8;"\ + "vbroadcastss 4(%1),%%xmm5; vfmadd231ps %%xmm5,%%xmm4,%%xmm9; addq $8,%1;" +#define unit_kernel_k1m4n4(c1,...) "vbroadcastf32x4 ("#__VA_ARGS__"),%%zmm7; vfmadd231ps %%zmm7,%%zmm4,"#c1";" +#define KERNEL_h_k1m4n4 "vbroadcastf32x4 (%0),%%zmm4; vpermilps %%zmm2,%%zmm4,%%zmm4; addq $16,%0;" unit_kernel_k1m4n4(%%zmm8,%1) +#define KERNEL_k1m4n4 KERNEL_h_k1m4n4 "addq $16,%1;" +#define KERNEL_h_k1m4n8 KERNEL_h_k1m4n4 unit_kernel_k1m4n4(%%zmm9,%1,%%r12,1) +#define KERNEL_k1m4n8 KERNEL_h_k1m4n8 "addq $16,%1;" +#define KERNEL_k1m4n12 KERNEL_h_k1m4n8 unit_kernel_k1m4n4(%%zmm10,%1,%%r12,2) "addq $16,%1;" +#define KERNEL_h_k1m4n16 KERNEL_k1m4n12 unit_kernel_k1m4n4(%%zmm11,%%r15) +#define KERNEL_k1m4n16 KERNEL_h_k1m4n16 "addq $16,%%r15;" +#define KERNEL_h_k1m4n20 KERNEL_h_k1m4n16 unit_kernel_k1m4n4(%%zmm12,%%r15,%%r12,1) +#define KERNEL_k1m4n20 KERNEL_h_k1m4n20 "addq $16,%%r15;" +#define KERNEL_h_k1m4n24 KERNEL_h_k1m4n20 unit_kernel_k1m4n4(%%zmm13,%%r15,%%r12,2) +#define KERNEL_k1m4n24 KERNEL_h_k1m4n24 "addq $16,%%r15;" +#define INIT_m4n1 "vpxor %%xmm8,%%xmm8,%%xmm8;" +#define INIT_m4n2 "vpxor %%xmm8,%%xmm8,%%xmm8; vpxor %%xmm9,%%xmm9,%%xmm9;" +#define INIT_m4n4 "vpxorq %%zmm8,%%zmm8,%%zmm8;" +#define INIT_m4n8 INIT_m4n4 "vpxorq %%zmm9,%%zmm9,%%zmm9;" +#define INIT_m4n12 INIT_m4n8 "vpxorq %%zmm10,%%zmm10,%%zmm10;" +#define INIT_m4n16 INIT_m4n12 "vpxorq %%zmm11,%%zmm11,%%zmm11;" +#define INIT_m4n20 INIT_m4n16 "vpxorq %%zmm12,%%zmm12,%%zmm12;" +#define INIT_m4n24 INIT_m4n20 "vpxorq %%zmm13,%%zmm13,%%zmm13;" +#define SAVE_h_m4n1 "vfmadd213ps (%2),%%xmm0,%%xmm8; vmovups %%xmm8,(%2);" +#define SAVE_h_m4n2 "vfmadd213ps (%2),%%xmm0,%%xmm8; vmovups %%xmm8,(%2); vfmadd213ps (%2,%3,1),%%xmm0,%%xmm9; vmovups %%xmm9,(%2,%3,1);" +#define unit_save_m4n4(c1_no)\ + "vpermps %%zmm"#c1_no",%%zmm1,%%zmm"#c1_no"; vextractf64x4 $1,%%zmm"#c1_no",%%ymm5;"\ + "vmovups (%5),%%xmm4; vinsertf128 $1,(%5,%3,1),%%ymm4,%%ymm4; vfmadd231ps %%ymm0,%%ymm"#c1_no",%%ymm4;"\ + "vmovups %%xmm4,(%5); vextractf128 $1,%%ymm4,(%5,%3,1); leaq (%5,%3,2),%5;"\ + "vmovups (%5),%%xmm4; vinsertf128 $1,(%5,%3,1),%%ymm4,%%ymm4; vfmadd231ps %%ymm0,%%ymm5,%%ymm4;"\ + "vmovups %%xmm4,(%5); vextractf128 $1,%%ymm4,(%5,%3,1); leaq (%5,%3,2),%5;" +#define SAVE_h_m4n4 "movq %2,%5;" unit_save_m4n4(8) +#define SAVE_h_m4n8 SAVE_h_m4n4 unit_save_m4n4(9) +#define SAVE_h_m4n12 SAVE_h_m4n8 unit_save_m4n4(10) +#define SAVE_h_m4n16 SAVE_h_m4n12 unit_save_m4n4(11) +#define SAVE_h_m4n20 SAVE_h_m4n16 unit_save_m4n4(12) +#define SAVE_h_m4n24 SAVE_h_m4n20 unit_save_m4n4(13) +#define SAVE_m4(ndim) SAVE_h_m4n##ndim "addq $16,%2;" +#define COMPUTE_m4(ndim) \ + INIT_m4n##ndim\ + "movq %%r13,%4; movq %%r14,%1; leaq (%1,%%r12,2),%%r15; addq %%r12,%%r15;"\ + "testq %4,%4; jz "#ndim"004042f;"\ + #ndim"004041:\n\t"\ + KERNEL_k1m4n##ndim "decq %4; jnz "#ndim"004041b;"\ + #ndim"004042:\n\t"\ + SAVE_m4(ndim) + +/* m = 2 *//* xmm0 for alpha, xmm1-xmm3 for temporary use, xmm4-xmm15 for accumulators */ +#define INIT_m2n1 "vpxor %%xmm4,%%xmm4,%%xmm4;" +#define KERNEL_k1m2n1 \ + "vmovsd (%0),%%xmm1; addq $8,%0;"\ + "vbroadcastss (%1),%%xmm2; vfmadd231ps %%xmm1,%%xmm2,%%xmm4;"\ + "addq $4,%1;" +#define SAVE_h_m2n1 "vmovsd (%2),%%xmm1; vfmadd213ps %%xmm1,%%xmm0,%%xmm4; vmovsd %%xmm4,(%2);" +#define INIT_m2n2 INIT_m2n1 "vpxor %%xmm5,%%xmm5,%%xmm5;" +#define KERNEL_k1m2n2 \ + "vmovsd (%0),%%xmm1; addq $8,%0;"\ + "vbroadcastss (%1),%%xmm2; vfmadd231ps %%xmm1,%%xmm2,%%xmm4;"\ + "vbroadcastss 4(%1),%%xmm3; vfmadd231ps %%xmm1,%%xmm3,%%xmm5;"\ + "addq $8,%1;" +#define SAVE_h_m2n2 SAVE_h_m2n1 "vmovsd (%2,%3,1),%%xmm1; vfmadd213ps %%xmm1,%%xmm0,%%xmm5; vmovsd %%xmm5,(%2,%3,1);" +#define INIT_m2n4 INIT_m2n2 +#define INIT_m2n8 INIT_m2n4 "vpxor %%xmm6,%%xmm6,%%xmm6; vpxor %%xmm7,%%xmm7,%%xmm7;" +#define INIT_m2n12 INIT_m2n8 "vpxor %%xmm8,%%xmm8,%%xmm8; vpxor %%xmm9,%%xmm9,%%xmm9;" +#define INIT_m2n16 INIT_m2n12 "vpxor %%xmm10,%%xmm10,%%xmm10; vpxor %%xmm11,%%xmm11,%%xmm11;" +#define INIT_m2n20 INIT_m2n16 "vpxor %%xmm12,%%xmm12,%%xmm12; vpxor %%xmm13,%%xmm13,%%xmm13;" +#define INIT_m2n24 INIT_m2n20 "vpxor %%xmm14,%%xmm14,%%xmm14; vpxor %%xmm15,%%xmm15,%%xmm15;" +#define KERNEL_h_k1m2n4 \ + "vbroadcastss (%0),%%xmm1; vbroadcastss 4(%0),%%xmm2; addq $8,%0;"\ + "vmovups (%1),%%xmm3; vfmadd231ps %%xmm1,%%xmm3,%%xmm4; vfmadd231ps %%xmm2,%%xmm3,%%xmm5;" +#define KERNEL_k1m2n4 KERNEL_h_k1m2n4 "addq $16,%1;" +#define KERNEL_h_k1m2n8 KERNEL_h_k1m2n4 "vmovups (%1,%%r12,1),%%xmm3; vfmadd231ps %%xmm1,%%xmm3,%%xmm6; vfmadd231ps %%xmm2,%%xmm3,%%xmm7;" +#define KERNEL_k1m2n8 KERNEL_h_k1m2n8 "addq $16,%1;" +#define KERNEL_k1m2n12 KERNEL_h_k1m2n8 \ + "vmovups (%1,%%r12,2),%%xmm3; vfmadd231ps %%xmm1,%%xmm3,%%xmm8; vfmadd231ps %%xmm2,%%xmm3,%%xmm9; addq $16,%1;" +#define KERNEL_h_k1m2n16 KERNEL_k1m2n12 "vmovups (%%r15),%%xmm3; vfmadd231ps %%xmm1,%%xmm3,%%xmm10; vfmadd231ps %%xmm2,%%xmm3,%%xmm11;" +#define KERNEL_k1m2n16 KERNEL_h_k1m2n16 "addq $16,%%r15;" +#define KERNEL_h_k1m2n20 KERNEL_h_k1m2n16 "vmovups (%%r15,%%r12,1),%%xmm3; vfmadd231ps %%xmm1,%%xmm3,%%xmm12; vfmadd231ps %%xmm2,%%xmm3,%%xmm13;" +#define KERNEL_k1m2n20 KERNEL_h_k1m2n20 "addq $16,%%r15;" +#define KERNEL_h_k1m2n24 KERNEL_h_k1m2n20 "vmovups (%%r15,%%r12,2),%%xmm3; vfmadd231ps %%xmm1,%%xmm3,%%xmm14; vfmadd231ps %%xmm2,%%xmm3,%%xmm15;" +#define KERNEL_k1m2n24 KERNEL_h_k1m2n24 "addq $16,%%r15;" +#define unit_save_m2n4(c1,c2) \ + "vunpcklps "#c2","#c1",%%xmm1; vunpckhps "#c2","#c1",%%xmm2;"\ + "vmovsd (%5),%%xmm3; vmovhpd (%5,%3,1),%%xmm3,%%xmm3; vfmadd213ps %%xmm3,%%xmm0,%%xmm1; vmovsd %%xmm1,(%5); vmovhpd %%xmm1,(%5,%3,1);"\ + "leaq (%5,%3,2),%5;"\ + "vmovsd (%5),%%xmm3; vmovhpd (%5,%3,1),%%xmm3,%%xmm3; vfmadd213ps %%xmm3,%%xmm0,%%xmm2; vmovsd %%xmm2,(%5); vmovhpd %%xmm2,(%5,%3,1);"\ + "leaq (%5,%3,2),%5;" +#define SAVE_h_m2n4 "movq %2,%5;" unit_save_m2n4(%%xmm4,%%xmm5) +#define SAVE_h_m2n8 SAVE_h_m2n4 unit_save_m2n4(%%xmm6,%%xmm7) +#define SAVE_h_m2n12 SAVE_h_m2n8 unit_save_m2n4(%%xmm8,%%xmm9) +#define SAVE_h_m2n16 SAVE_h_m2n12 unit_save_m2n4(%%xmm10,%%xmm11) +#define SAVE_h_m2n20 SAVE_h_m2n16 unit_save_m2n4(%%xmm12,%%xmm13) +#define SAVE_h_m2n24 SAVE_h_m2n20 unit_save_m2n4(%%xmm14,%%xmm15) +#define SAVE_m2(ndim) SAVE_h_m2n##ndim "addq $8,%2;" +#define COMPUTE_m2(ndim) \ + INIT_m2n##ndim\ + "movq %%r13,%4; movq %%r14,%1; leaq (%1,%%r12,2),%%r15; addq %%r12,%%r15;"\ + "testq %4,%4; jz "#ndim"002022f;"\ + #ndim"002021:\n\t"\ + KERNEL_k1m2n##ndim "decq %4; jnz "#ndim"002021b;"\ + #ndim"002022:\n\t"\ + SAVE_m2(ndim) + +/* m = 1 *//* xmm0 for alpha, xmm1-xmm3 and xmm10 for temporary use, xmm4-xmm9 for accumulators */ +#define INIT_m1n1 "vpxor %%xmm4,%%xmm4,%%xmm4;" +#define KERNEL_k1m1n1 \ + "vmovss (%1),%%xmm3; addq $4,%1;"\ + "vmovss (%0),%%xmm1; vfmadd231ss %%xmm3,%%xmm1,%%xmm4;"\ + "addq $4,%0;" +#define SAVE_h_m1n1 "vfmadd213ss (%2),%%xmm0,%%xmm4; vmovss %%xmm4,(%2);" +#define INIT_m1n2 INIT_m1n1 +#define KERNEL_k1m1n2 \ + "vmovsd (%1),%%xmm3; addq $8,%1;"\ + "vbroadcastss (%0),%%xmm1; vfmadd231ps %%xmm3,%%xmm1,%%xmm4;"\ + "addq $4,%0;" +#define SAVE_h_m1n2 \ + "vmovss (%2),%%xmm3; vinsertps $16,(%2,%3,1),%%xmm3,%%xmm3; vfmadd213ps %%xmm3,%%xmm0,%%xmm4;"\ + "vmovss %%xmm4,(%2); vextractps $1,%%xmm4,(%2,%3,1);" +#define INIT_m1n4 INIT_m1n2 +#define INIT_m1n8 INIT_m1n4 "vpxor %%xmm5,%%xmm5,%%xmm5;" +#define INIT_m1n12 INIT_m1n8 "vpxor %%xmm6,%%xmm6,%%xmm6;" +#define INIT_m1n16 INIT_m1n12 "vpxor %%xmm7,%%xmm7,%%xmm7;" +#define INIT_m1n20 INIT_m1n16 "vpxor %%xmm8,%%xmm8,%%xmm8;" +#define INIT_m1n24 INIT_m1n20 "vpxor %%xmm9,%%xmm9,%%xmm9;" +#define KERNEL_h_k1m1n4 \ + "vbroadcastss (%0),%%xmm1; addq $4,%0; vfmadd231ps (%1),%%xmm1,%%xmm4;" +#define KERNEL_k1m1n4 KERNEL_h_k1m1n4 "addq $16,%1;" +#define KERNEL_h_k1m1n8 KERNEL_h_k1m1n4 "vfmadd231ps (%1,%%r12,1),%%xmm1,%%xmm5;" +#define KERNEL_k1m1n8 KERNEL_h_k1m1n8 "addq $16,%1;" +#define KERNEL_k1m1n12 KERNEL_h_k1m1n8 "vfmadd231ps (%1,%%r12,2),%%xmm1,%%xmm6; addq $16,%1;" +#define KERNEL_h_k1m1n16 KERNEL_k1m1n12 "vfmadd231ps (%%r15),%%xmm1,%%xmm7;" +#define KERNEL_k1m1n16 KERNEL_h_k1m1n16 "addq $16,%%r15;" +#define KERNEL_h_k1m1n20 KERNEL_h_k1m1n16 "vfmadd231ps (%%r15,%%r12,1),%%xmm1,%%xmm8;" +#define KERNEL_k1m1n20 KERNEL_h_k1m1n20 "addq $16,%%r15;" +#define KERNEL_h_k1m1n24 KERNEL_h_k1m1n20 "vfmadd231ps (%%r15,%%r12,2),%%xmm1,%%xmm9;" +#define KERNEL_k1m1n24 KERNEL_h_k1m1n24 "addq $16,%%r15;" +#define unit_save_m1n4(c1) \ + "vpxor %%xmm10,%%xmm10,%%xmm10; vmovsd "#c1",%%xmm10,%%xmm2; vmovhlps "#c1",%%xmm10,%%xmm1;"\ + "vmovss (%5),%%xmm3; vinsertps $16,(%5,%3,1),%%xmm3,%%xmm3; vfmadd213ps %%xmm3,%%xmm0,%%xmm2;"\ + "vmovss %%xmm2,(%5); vextractps $1,%%xmm2,(%5,%3,1); leaq (%5,%3,2),%5;"\ + "vmovss (%5),%%xmm3; vinsertps $16,(%5,%3,1),%%xmm3,%%xmm3; vfmadd213ps %%xmm3,%%xmm0,%%xmm1;"\ + "vmovss %%xmm1,(%5); vextractps $1,%%xmm1,(%5,%3,1); leaq (%5,%3,2),%5;" +#define SAVE_h_m1n4 "movq %2,%5;" unit_save_m1n4(%%xmm4) +#define SAVE_h_m1n8 SAVE_h_m1n4 unit_save_m1n4(%%xmm5) +#define SAVE_h_m1n12 SAVE_h_m1n8 unit_save_m1n4(%%xmm6) +#define SAVE_h_m1n16 SAVE_h_m1n12 unit_save_m1n4(%%xmm7) +#define SAVE_h_m1n20 SAVE_h_m1n16 unit_save_m1n4(%%xmm8) +#define SAVE_h_m1n24 SAVE_h_m1n20 unit_save_m1n4(%%xmm9) +#define SAVE_m1(ndim) SAVE_h_m1n##ndim "addq $4,%2;" +#define COMPUTE_m1(ndim) \ + INIT_m1n##ndim\ + "movq %%r13,%4; movq %%r14,%1; leaq (%1,%%r12,2),%%r15; addq %%r12,%%r15;"\ + "testq %4,%4; jz "#ndim"001012f;"\ + #ndim"001011:\n\t"\ + KERNEL_k1m1n##ndim "decq %4; jnz "#ndim"001011b;"\ + #ndim"001012:\n\t"\ + SAVE_m1(ndim) + +/* %0 = "+r"(a_pointer), %1 = "+r"(b_pointer), %2 = "+r"(c_pointer), %3 = "+r"(ldc_in_bytes), %4 = "+r"(K), %5 = "+r"(ctemp) */ +/* %6 = "+r"(next_b), %7 = "m"(ALPHA), %8 = "m"(M) */ +/* r11 = m_counter, r12 = k << 4(const), r13 = k(const), r14 = b_head_pos(const), r15 = %1 + 3r12 */ + +#define COMPUTE(ndim) {\ + next_b = b_pointer + ndim * K;\ + __asm__ __volatile__(\ + "vbroadcastss %7,%%zmm0; vmovups %9,%%zmm1; vmovups %10,%%zmm2;"\ + "movq %4,%%r13; movq %4,%%r12; salq $4,%%r12; movq %1,%%r14; movq %8,%%r11;"\ + "cmpq $16,%%r11;jb 33101"#ndim"f;"\ + "33109"#ndim":\n\t"\ + COMPUTE_m16(ndim)\ + "subq $16,%%r11;cmpq $16,%%r11;jnb 33109"#ndim"b;"\ + "33101"#ndim":\n\t"\ + "cmpq $8,%%r11;jb 33102"#ndim"f;"\ + COMPUTE_m8(ndim)\ + "subq $8,%%r11;"\ + "33102"#ndim":\n\t"\ + "cmpq $4,%%r11;jb 33103"#ndim"f;"\ + COMPUTE_m4(ndim)\ + "subq $4,%%r11;"\ + "33103"#ndim":\n\t"\ + "cmpq $2,%%r11;jb 33104"#ndim"f;"\ + COMPUTE_m2(ndim)\ + "subq $2,%%r11;"\ + "33104"#ndim":\n\t"\ + "testq %%r11,%%r11;jz 33105"#ndim"f;"\ + COMPUTE_m1(ndim)\ + "33105"#ndim":\n\t"\ + "movq %%r13,%4; movq %%r14,%1; vzeroupper;"\ + :"+r"(a_pointer),"+r"(b_pointer),"+r"(c_pointer),"+r"(ldc_in_bytes),"+r"(K),"+r"(ctemp),"+r"(next_b):"m"(ALPHA),"m"(M),"m"(perm[0]),"m"(permil[0])\ + :"r10","r11","r12","r13","r14","r15","zmm0","zmm1","zmm2","zmm3","zmm4","zmm5","zmm6","zmm7","zmm8","zmm9","zmm10","zmm11","zmm12","zmm13","zmm14",\ + "zmm15","zmm16","zmm17","zmm18","zmm19","zmm20","zmm21","zmm22","zmm23","zmm24","zmm25","zmm26","zmm27","zmm28","zmm29","zmm30","zmm31",\ + "cc","memory");\ + a_pointer -= M * K; b_pointer += ndim * K; c_pointer += LDC * ndim - M;\ +} + +#define COMPUTE_n24 {\ + next_b = b_pointer + 24 * K;\ + __asm__ __volatile__(\ + "vbroadcastss %8,%%zmm0; vmovups %10,%%zmm1; vmovups %11,%%zmm2;"\ + "movq %4,%%r13; movq %4,%%r12; salq $4,%%r12; movq %1,%%r14; movq %9,%%r11;"\ + "cmpq $32,%%r11;jb 3310024f;"\ + COMPUTE_m16n24_LINIT "subq $16,%%r11; cmpq $32,%%r11;jb 3310724f;"\ + "3310924:\n\t"\ + COMPUTE_m16n24_RSAVE "subq $16,%%r11; cmpq $32,%%r11;jb 3310824f;"\ + COMPUTE_m16n24_LSAVE "subq $16,%%r11; cmpq $32,%%r11;jnb 3310924b;"\ + "3310724:\n\t"\ + COMPUTE_m16n24_RTAIL "subq $16,%%r11; jmp 3310124f;"\ + "3310824:\n\t"\ + COMPUTE_m16n24_LTAIL "subq $16,%%r11; jmp 3310124f;"\ + "3310024:\n\t"\ + "cmpq $16,%%r11;jb 3310124f;"\ + COMPUTE_m16(24)\ + "subq $16,%%r11;"\ + "3310124:\n\t"\ + "cmpq $8,%%r11;jb 3310224f;"\ + COMPUTE_m8(24)\ + "subq $8,%%r11;"\ + "3310224:\n\t"\ + "cmpq $4,%%r11;jb 3310324f;"\ + COMPUTE_m4(24)\ + "subq $4,%%r11;"\ + "3310324:\n\t"\ + "cmpq $2,%%r11;jb 3310424f;"\ + COMPUTE_m2(24)\ + "subq $2,%%r11;"\ + "3310424:\n\t"\ + "testq %%r11,%%r11;jz 3310524f;"\ + COMPUTE_m1(24)\ + "3310524:\n\t"\ + "movq %%r13,%4; movq %%r14,%1; vzeroupper;"\ + :"+r"(a_pointer),"+r"(b_pointer),"+r"(c_pointer),"+r"(ldc_in_bytes),"+r"(K),"+r"(ctemp),"+r"(next_b),"+r"(wscr):"m"(ALPHA),"m"(M),"m"(perm[0]),"m"(permil[0])\ + :"r10","r11","r12","r13","r14","r15","zmm0","zmm1","zmm2","zmm3","zmm4","zmm5","zmm6","zmm7","zmm8","zmm9","zmm10","zmm11","zmm12","zmm13","zmm14",\ + "zmm15","zmm16","zmm17","zmm18","zmm19","zmm20","zmm21","zmm22","zmm23","zmm24","zmm25","zmm26","zmm27","zmm28","zmm29","zmm30","zmm31",\ + "cc","memory");\ + a_pointer -= M * K; b_pointer += 24 * K; c_pointer += LDC * 24 - M;\ +} + +int __attribute__ ((noinline)) +CNAME(BLASLONG m, BLASLONG n, BLASLONG k, float alpha, float * __restrict__ A, float * __restrict__ B, float * __restrict__ C, BLASLONG LDC) +{ + if(m==0||n==0||k==0||alpha==(float)0.0) return 0; + float scr[192]; float *wscr = scr; + int64_t ldc_in_bytes = (int64_t)LDC * sizeof(float);float ALPHA = alpha; + int64_t M = (int64_t)m, K = (int64_t)k; + int32_t perm[16] = {0,4,8,12,1,5,9,13,2,6,10,14,3,7,11,15}; + int32_t permil[16] = {0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3}; + BLASLONG n_count = n; + float *a_pointer = A,*b_pointer = B,*c_pointer = C,*ctemp = C,*next_b = B; + for(;n_count>23;n_count-=24) COMPUTE_n24 + for(;n_count>19;n_count-=20) COMPUTE(20) + for(;n_count>15;n_count-=16) COMPUTE(16) + for(;n_count>11;n_count-=12) COMPUTE(12) + for(;n_count>7;n_count-=8) COMPUTE(8) + for(;n_count>3;n_count-=4) COMPUTE(4) + for(;n_count>1;n_count-=2) COMPUTE(2) + if(n_count>0) COMPUTE(1) + return 0; +} +#include +//#include "sgemm_direct_skylakex.c" diff --git a/kernel/x86_64/sgemm_kernel_8x4_haswell.c b/kernel/x86_64/sgemm_kernel_8x4_haswell.c new file mode 100644 index 000000000..2250e4b97 --- /dev/null +++ b/kernel/x86_64/sgemm_kernel_8x4_haswell.c @@ -0,0 +1,490 @@ +/* %0 = "+r"(a_pointer), %1 = "+r"(b_pointer), %2 = "+r"(c_pointer), %3 = "+r"(ldc_in_bytes), %4 for k_count, %5 for c_store, %6 = &alpha, %7 = b_pref */ +/* r11 = m_counter, r12 = k << 2(const), r13 = k_skip << 2, r14 = b_head_pos(const), r15 for assisting prefetch */ + +//recommended settings: GEMM_P = 320, GEMM_Q = 320. + +#ifdef TRMMKERNEL + #define mult_alpha(acc,alpha,...) "vmulps "#acc","#alpha","#acc";" +#else + #define mult_alpha(acc,alpha,...) "vfmadd213ps ("#__VA_ARGS__"),"#alpha","#acc";" +#endif + +#if defined(TRMMKERNEL) && !defined(LEFT) + #ifdef TRANSA + #define HEAD_SET_OFFSET(ndim) {} + #define TAIL_SET_OFFSET(ndim) {off+=ndim;} + #else + #define HEAD_SET_OFFSET(ndim) {off+=(ndim>4?4:ndim);} + #define TAIL_SET_OFFSET(ndim) {off+=(ndim>4?(ndim-4):0);} + #endif +#else + #define HEAD_SET_OFFSET(ndim) {} + #define TAIL_SET_OFFSET(ndim) {} +#endif + +#if defined(TRMMKERNEL) && defined(LEFT) + #ifdef TRANSA + #define init_update_kskip(val) "subq $"#val",%%r13;" + #define save_update_kskip(val) "" + #else + #define init_update_kskip(val) "" + #define save_update_kskip(val) "addq $"#val",%%r13;" + #endif +#else + #define init_update_kskip(val) "" + #define save_update_kskip(val) "" +#endif + +#ifdef TRMMKERNEL + #define init_set_k "movq %%r12,%4; subq %%r13,%4;" + #if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + #define INIT_SET_KSKIP "movq %9,%%r13; salq $2,%%r13;" + #define init_set_pointers(a_copy,b_copy) "leaq (%0,%%r13,"#a_copy"),%0; leaq (%1,%%r13,"#b_copy"),%1;" + #define save_set_pointers(a_copy,b_copy) "" + #else + #define INIT_SET_KSKIP "movq %4,%%r13; subq %9,%%r13; salq $2,%%r13;" + #define init_set_pointers(a_copy,b_copy) "" + #define save_set_pointers(a_copy,b_copy) "leaq (%0,%%r13,"#a_copy"),%0; leaq (%1,%%r13,"#b_copy"),%1;" + #endif +#else + #define INIT_SET_KSKIP "xorq %%r13,%%r13;" + #define init_set_k "movq %%r12,%4;" + #define init_set_pointers(a_copy,b_copy) "" + #define save_set_pointers(a_copy,b_copy) "" +#endif +#define init_set_pa_pb_n12(mdim) init_set_pointers(mdim,4) +#define init_set_pa_pb_n8(mdim) init_set_pointers(mdim,4) +#define init_set_pa_pb_n4(mdim) init_set_pointers(mdim,4) +#define init_set_pa_pb_n2(mdim) init_set_pointers(mdim,2) +#define init_set_pa_pb_n1(mdim) init_set_pointers(mdim,1) +#define save_set_pa_pb_n12(mdim) save_set_pointers(mdim,4) +#define save_set_pa_pb_n8(mdim) save_set_pointers(mdim,4) +#define save_set_pa_pb_n4(mdim) save_set_pointers(mdim,4) +#define save_set_pa_pb_n2(mdim) save_set_pointers(mdim,2) +#define save_set_pa_pb_n1(mdim) save_set_pointers(mdim,1) + +#if defined(TRMMKERNEL) && !defined(LEFT) && defined(TRANSA) + #define kernel_kstart_n8(mdim) \ + KERNEL_k1m##mdim##n4 KERNEL_k1m##mdim##n4 KERNEL_k1m##mdim##n4 KERNEL_k1m##mdim##n4 "subq $16,%4;" + #define kernel_kstart_n12(mdim) \ + KERNEL_k1m##mdim##n4 KERNEL_k1m##mdim##n4 KERNEL_k1m##mdim##n4 KERNEL_k1m##mdim##n4\ + KERNEL_k1m##mdim##n8 KERNEL_k1m##mdim##n8 KERNEL_k1m##mdim##n8 KERNEL_k1m##mdim##n8 "subq $32,%4;" +#else + #define kernel_kstart_n8(mdim) "" + #define kernel_kstart_n12(mdim) "" +#endif +#define kernel_kstart_n4(mdim) "" +#define kernel_kstart_n2(mdim) "" +#define kernel_kstart_n1(mdim) "" + +/* m = 8 *//* ymm0 for alpha, ymm1-ymm3 for temporary use, ymm4-ymm15 for accumulators */ +#define KERNEL_k1m8n1 \ + "vmovups (%0),%%ymm1; addq $32,%0;"\ + "vbroadcastss (%1),%%ymm2; vfmadd231ps %%ymm1,%%ymm2,%%ymm4;"\ + "addq $4,%1;" +#define KERNEL_h_k1m8n2 \ + "vmovsldup (%0),%%ymm1; vmovshdup (%0),%%ymm2; addq $32,%0;"\ + "vbroadcastsd (%1),%%ymm3; vfmadd231ps %%ymm1,%%ymm3,%%ymm4; vfmadd231ps %%ymm2,%%ymm3,%%ymm5;" +#define KERNEL_k1m8n2 KERNEL_h_k1m8n2 "addq $8,%1;" +#define KERNEL_h_k1m8n4 \ + KERNEL_h_k1m8n2 "vbroadcastsd 8(%1),%%ymm3; vfmadd231ps %%ymm1,%%ymm3,%%ymm6; vfmadd231ps %%ymm2,%%ymm3,%%ymm7;" +#define KERNEL_k1m8n4 KERNEL_h_k1m8n4 "addq $16,%1;" +#define unit_kernel_k1m8n4(c1,c2,c3,c4,boff1,boff2,...) \ + "vbroadcastsd "#boff1"("#__VA_ARGS__"),%%ymm3; vfmadd231ps %%ymm1,%%ymm3,"#c1"; vfmadd231ps %%ymm2,%%ymm3,"#c2";"\ + "vbroadcastsd "#boff2"("#__VA_ARGS__"),%%ymm3; vfmadd231ps %%ymm1,%%ymm3,"#c3"; vfmadd231ps %%ymm2,%%ymm3,"#c4";" +#define KERNEL_h_k1m8n8 KERNEL_h_k1m8n4 unit_kernel_k1m8n4(%%ymm8,%%ymm9,%%ymm10,%%ymm11,0,8,%1,%%r12,4) +#define KERNEL_k1m8n8 KERNEL_h_k1m8n8 "addq $16,%1;" +#define KERNEL_h_k1m8n12 KERNEL_h_k1m8n8 unit_kernel_k1m8n4(%%ymm12,%%ymm13,%%ymm14,%%ymm15,0,8,%1,%%r12,8) +#define KERNEL_k1m8n12 KERNEL_h_k1m8n12 "addq $16,%1;" +#define KERNEL_k2m8n1 KERNEL_k1m8n1 KERNEL_k1m8n1 +#define KERNEL_k2m8n2 KERNEL_k1m8n2 KERNEL_k1m8n2 +#define KERNEL_k2m8n4 KERNEL_k1m8n4 KERNEL_k1m8n4 +#define KERNEL_k2m8n8 KERNEL_k1m8n8 KERNEL_k1m8n8 +#define KERNEL_k2m8n12 \ + "vmovsldup (%0),%%ymm1; vmovshdup (%0),%%ymm2;"\ + unit_kernel_k1m8n4(%%ymm4,%%ymm5,%%ymm6,%%ymm7,0,8,%1)\ + unit_kernel_k1m8n4(%%ymm8,%%ymm9,%%ymm10,%%ymm11,0,8,%1,%%r12,4)\ + unit_kernel_k1m8n4(%%ymm12,%%ymm13,%%ymm14,%%ymm15,0,8,%1,%%r12,8)\ + "vmovsldup 32(%0),%%ymm1; vmovshdup 32(%0),%%ymm2; prefetcht0 512(%0); addq $64,%0;"\ + unit_kernel_k1m8n4(%%ymm4,%%ymm5,%%ymm6,%%ymm7,16,24,%1)\ + unit_kernel_k1m8n4(%%ymm8,%%ymm9,%%ymm10,%%ymm11,16,24,%1,%%r12,4)\ + unit_kernel_k1m8n4(%%ymm12,%%ymm13,%%ymm14,%%ymm15,16,24,%1,%%r12,8) "addq $32,%1;" +#if defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA) + #define unit_kernel_endn4_k1m8n8(offa1,offb1,offb2) \ + "vmovsldup "#offa1"(%0),%%ymm1; vmovshdup "#offa1"(%0),%%ymm2;"\ + unit_kernel_k1m8n4(%%ymm8,%%ymm9,%%ymm10,%%ymm11,offb1,offb2,%1,%%r12,4) + #define unit_kernel_endn4_k1m8n12(offa1,offb1,offb2) \ + "vmovsldup "#offa1"(%0),%%ymm1; vmovshdup "#offa1"(%0),%%ymm2;"\ + unit_kernel_k1m8n4(%%ymm12,%%ymm13,%%ymm14,%%ymm15,offb1,offb2,%1,%%r12,8) + #define unit_kernel_endn8_k1m8n12(offa1,offb1,offb2) unit_kernel_endn4_k1m8n8(offa1,offb1,offb2)\ + unit_kernel_k1m8n4(%%ymm12,%%ymm13,%%ymm14,%%ymm15,offb1,offb2,%1,%%r12,8) + #define kernel_kend_m8n8 \ + unit_kernel_endn4_k1m8n8(0,0,8) unit_kernel_endn4_k1m8n8(32,16,24)\ + unit_kernel_endn4_k1m8n8(64,32,40) unit_kernel_endn4_k1m8n8(96,48,56) + #define kernel_kend_m8n12 \ + unit_kernel_endn8_k1m8n12(0,0,8) unit_kernel_endn8_k1m8n12(32,16,24)\ + unit_kernel_endn8_k1m8n12(64,32,40) unit_kernel_endn8_k1m8n12(96,48,56)\ + unit_kernel_endn4_k1m8n12(128,64,72) unit_kernel_endn4_k1m8n12(160,80,88)\ + unit_kernel_endn4_k1m8n12(192,96,104) unit_kernel_endn4_k1m8n12(224,112,120) +#else + #define kernel_kend_m8n8 "" + #define kernel_kend_m8n12 "" +#endif +#define kernel_kend_m8n4 "" +#define kernel_kend_m8n2 "" +#define kernel_kend_m8n1 "" +#define INIT_m8n1 "vpxor %%ymm4,%%ymm4,%%ymm4;" +#define INIT_m8n2 INIT_m8n1 "vpxor %%ymm5,%%ymm5,%%ymm5;" +#define INIT_m8n4 INIT_m8n2 "vpxor %%ymm6,%%ymm6,%%ymm6;vpxor %%ymm7,%%ymm7,%%ymm7;" +#define unit_init_m8n4(c1,c2,c3,c4) \ + "vpxor "#c1","#c1","#c1";vpxor "#c2","#c2","#c2";vpxor "#c3","#c3","#c3";vpxor "#c4","#c4","#c4";" +#define INIT_m8n8 INIT_m8n4 unit_init_m8n4(%%ymm8,%%ymm9,%%ymm10,%%ymm11) +#define INIT_m8n12 INIT_m8n8 unit_init_m8n4(%%ymm12,%%ymm13,%%ymm14,%%ymm15) +#define SAVE_m8n1 mult_alpha(%%ymm4,%%ymm0,%2) "vmovups %%ymm4,(%2);" +#define unit_save_m8n2(c1,c2) \ + "vunpcklps "#c2","#c1",%%ymm2; vunpckhps "#c2","#c1",%%ymm3; vunpcklpd %%ymm3,%%ymm2,"#c1"; vunpckhpd %%ymm3,%%ymm2,"#c2";"\ + mult_alpha(c1,%%ymm0,%5) "vmovups "#c1",(%5);"\ + mult_alpha(c2,%%ymm0,%5,%3,1) "vmovups "#c2",(%5,%3,1);"\ + "leaq (%5,%3,2),%5;" +#define SAVE_m8n2 "movq %2,%5;" unit_save_m8n2(%%ymm4,%%ymm5) +#define SAVE_m8n4 SAVE_m8n2 unit_save_m8n2(%%ymm6,%%ymm7) +#define SAVE_m8n8 SAVE_m8n4 unit_save_m8n2(%%ymm8,%%ymm9) unit_save_m8n2(%%ymm10,%%ymm11) +#define SAVE_m8n12 SAVE_m8n8 unit_save_m8n2(%%ymm12,%%ymm13) unit_save_m8n2(%%ymm14,%%ymm15) +#define COMPUTE_m8(ndim) \ + init_update_kskip(32) INIT_m8n##ndim\ + init_set_k "movq %%r14,%1;" init_set_pa_pb_n##ndim(8) "movq %2,%5; movq $0,%%r15;"\ + kernel_kstart_n##ndim(8)\ + "cmpq $64,%4; jb "#ndim"882f;"\ + #ndim"881:\n\t"\ + "cmpq $62,%%r15; movq $62,%%r15; cmoveq %3,%%r15;"\ + KERNEL_k2m8n##ndim KERNEL_k2m8n##ndim\ + "prefetcht1 (%5); subq $31,%5;"\ + KERNEL_k2m8n##ndim KERNEL_k2m8n##ndim\ + "addq %%r15,%5; prefetcht1 (%7); addq $16,%7;"\ + "subq $32,%4; cmpq $64,%4; jnb "#ndim"881b;"\ + "movq %2,%5;"\ + #ndim"882:\n\t"\ + "testq %4,%4; jz "#ndim"883f;"\ + "prefetcht0 (%5); prefetcht0 31(%5);"\ + KERNEL_k1m8n##ndim\ + "prefetcht0 (%5,%3,4); prefetcht0 31(%5,%3,4); addq %3,%5;"\ + "subq $4,%4; jmp "#ndim"882b;"\ + #ndim"883:\n\t"\ + kernel_kend_m8n##ndim "prefetcht0 (%%r14); prefetcht0 64(%%r14);"\ + save_set_pa_pb_n##ndim(8) SAVE_m8n##ndim "addq $32,%2;" save_update_kskip(32) + +/* m = 4 *//* xmm0 for alpha, xmm1-xmm3 for temporary use, xmm4-xmm15 for accumulators */ +#define KERNEL_k1m4n1 \ + "vmovups (%0),%%xmm1; addq $16,%0;"\ + "vbroadcastss (%1),%%xmm2; vfmadd231ps %%xmm1,%%xmm2,%%xmm4;"\ + "addq $4,%1;" +#define KERNEL_h_k1m4n2 \ + "vmovsldup (%0),%%xmm1; vmovshdup (%0),%%xmm2; addq $16,%0;"\ + "vmovddup (%1),%%xmm3; vfmadd231ps %%xmm1,%%xmm3,%%xmm4; vfmadd231ps %%xmm2,%%xmm3,%%xmm5;" +#define KERNEL_k1m4n2 KERNEL_h_k1m4n2 "addq $8,%1;" +#define KERNEL_h_k1m4n4 \ + KERNEL_h_k1m4n2 "vmovddup 8(%1),%%xmm3; vfmadd231ps %%xmm1,%%xmm3,%%xmm6; vfmadd231ps %%xmm2,%%xmm3,%%xmm7;" +#define KERNEL_k1m4n4 KERNEL_h_k1m4n4 "addq $16,%1;" +#define unit_kernel_k1m4n4(c1,c2,c3,c4,offb1,offb2,...) \ + "vmovddup "#offb1"("#__VA_ARGS__"),%%xmm3; vfmadd231ps %%xmm1,%%xmm3,"#c1"; vfmadd231ps %%xmm2,%%xmm3,"#c2";"\ + "vmovddup "#offb2"("#__VA_ARGS__"),%%xmm3; vfmadd231ps %%xmm1,%%xmm3,"#c3"; vfmadd231ps %%xmm2,%%xmm3,"#c4";" +#define KERNEL_h_k1m4n8 KERNEL_h_k1m4n4 unit_kernel_k1m4n4(%%xmm8,%%xmm9,%%xmm10,%%xmm11,0,8,%1,%%r12,4) +#define KERNEL_k1m4n8 KERNEL_h_k1m4n8 "addq $16,%1;" +#define KERNEL_h_k1m4n12 KERNEL_h_k1m4n8 unit_kernel_k1m4n4(%%xmm12,%%xmm13,%%xmm14,%%xmm15,0,8,%1,%%r12,8) +#define KERNEL_k1m4n12 KERNEL_h_k1m4n12 "addq $16,%1;" +#if defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA) + #define unit_kernel_endn4_k1m4n8(offa1,offb1,offb2) \ + "vmovsldup "#offa1"(%0),%%xmm1; vmovshdup "#offa1"(%0),%%xmm2;"\ + unit_kernel_k1m4n4(%%xmm8,%%xmm9,%%xmm10,%%xmm11,offb1,offb2,%1,%%r12,4) + #define unit_kernel_endn4_k1m4n12(offa1,offb1,offb2) \ + "vmovsldup "#offa1"(%0),%%xmm1; vmovshdup "#offa1"(%0),%%xmm2;"\ + unit_kernel_k1m4n4(%%xmm12,%%xmm13,%%xmm14,%%xmm15,offb1,offb2,%1,%%r12,8) + #define unit_kernel_endn8_k1m4n12(offa1,offb1,offb2) unit_kernel_endn4_k1m4n8(offa1,offb1,offb2)\ + unit_kernel_k1m4n4(%%xmm12,%%xmm13,%%xmm14,%%xmm15,offb1,offb2,%1,%%r12,8) + #define kernel_kend_m4n8 \ + unit_kernel_endn4_k1m4n8(0,0,8) unit_kernel_endn4_k1m4n8(16,16,24)\ + unit_kernel_endn4_k1m4n8(32,32,40) unit_kernel_endn4_k1m4n8(48,48,56) + #define kernel_kend_m4n12 \ + unit_kernel_endn8_k1m4n12(0,0,8) unit_kernel_endn8_k1m4n12(16,16,24)\ + unit_kernel_endn8_k1m4n12(32,32,40) unit_kernel_endn8_k1m4n12(48,48,56)\ + unit_kernel_endn4_k1m4n12(64,64,72) unit_kernel_endn4_k1m4n12(80,80,88)\ + unit_kernel_endn4_k1m4n12(96,96,104) unit_kernel_endn4_k1m4n12(112,112,120) +#else + #define kernel_kend_m4n8 "" + #define kernel_kend_m4n12 "" +#endif +#define kernel_kend_m4n4 "" +#define kernel_kend_m4n2 "" +#define kernel_kend_m4n1 "" +#define INIT_m4n1 "vpxor %%xmm4,%%xmm4,%%xmm4;" +#define INIT_m4n2 INIT_m4n1 "vpxor %%xmm5,%%xmm5,%%xmm5;" +#define INIT_m4n4 INIT_m4n2 "vpxor %%xmm6,%%xmm6,%%xmm6;vpxor %%xmm7,%%xmm7,%%xmm7;" +#define unit_init_m4n4(c1,c2,c3,c4) \ + "vpxor "#c1","#c1","#c1";vpxor "#c2","#c2","#c2";vpxor "#c3","#c3","#c3";vpxor "#c4","#c4","#c4";" +#define INIT_m4n8 INIT_m4n4 unit_init_m4n4(%%xmm8,%%xmm9,%%xmm10,%%xmm11) +#define INIT_m4n12 INIT_m4n8 unit_init_m4n4(%%xmm12,%%xmm13,%%xmm14,%%xmm15) +#define SAVE_m4n1 \ + mult_alpha(%%xmm4,%%xmm0,%2) "vmovups %%xmm4,(%2);" +#define unit_save_m4n2(c1,c2) \ + "vunpcklps "#c2","#c1",%%xmm2; vunpckhps "#c2","#c1",%%xmm3; vunpcklpd %%xmm3,%%xmm2,"#c1"; vunpckhpd %%xmm3,%%xmm2,"#c2";"\ + mult_alpha(c1,%%xmm0,%5) "vmovups "#c1",(%5);"\ + mult_alpha(c2,%%xmm0,%5,%3,1) "vmovups "#c2",(%5,%3,1);"\ + "leaq (%5,%3,2),%5;" +#define SAVE_m4n2 "movq %2,%5;" unit_save_m4n2(%%xmm4,%%xmm5) +#define SAVE_m4n4 SAVE_m4n2 unit_save_m4n2(%%xmm6,%%xmm7) +#define SAVE_m4n8 SAVE_m4n4 unit_save_m4n2(%%xmm8,%%xmm9) unit_save_m4n2(%%xmm10,%%xmm11) +#define SAVE_m4n12 SAVE_m4n8 unit_save_m4n2(%%xmm12,%%xmm13) unit_save_m4n2(%%xmm14,%%xmm15) +#define COMPUTE_m4(ndim) \ + init_update_kskip(16) INIT_m4n##ndim\ + init_set_k "movq %%r14,%1;" init_set_pa_pb_n##ndim(4)\ + kernel_kstart_n##ndim(4)\ + #ndim"442:\n\t"\ + "testq %4,%4; jz "#ndim"443f;"\ + KERNEL_k1m4n##ndim\ + "subq $4,%4; jmp "#ndim"442b;"\ + #ndim"443:\n\t"\ + kernel_kend_m4n##ndim save_set_pa_pb_n##ndim(4) SAVE_m4n##ndim "addq $16,%2;" save_update_kskip(16) + +/* m = 2 *//* xmm0 for alpha, xmm1-xmm3 and xmm10 for temporary use, xmm4-xmm9 for accumulators */ +#define INIT_m2n1 "vpxor %%xmm4,%%xmm4,%%xmm4;" +#define KERNEL_k1m2n1 \ + "vmovsd (%0),%%xmm1; addq $8,%0;"\ + "vbroadcastss (%1),%%xmm2; vfmadd231ps %%xmm1,%%xmm2,%%xmm4;"\ + "addq $4,%1;" +#ifdef TRMMKERNEL + #define SAVE_m2n1 "vmulps %%xmm4,%%xmm0,%%xmm4; vmovsd %%xmm4,(%2);" +#else + #define SAVE_m2n1 "vmovsd (%2),%%xmm1; vfmadd213ps %%xmm1,%%xmm0,%%xmm4; vmovsd %%xmm4,(%2);" +#endif +#define INIT_m2n2 INIT_m2n1 "vpxor %%xmm5,%%xmm5,%%xmm5;" +#define KERNEL_k1m2n2 \ + "vmovsd (%0),%%xmm1; addq $8,%0;"\ + "vbroadcastss (%1),%%xmm2; vfmadd231ps %%xmm1,%%xmm2,%%xmm4;"\ + "vbroadcastss 4(%1),%%xmm3; vfmadd231ps %%xmm1,%%xmm3,%%xmm5;"\ + "addq $8,%1;" +#ifdef TRMMKERNEL + #define SAVE_m2n2 SAVE_m2n1 "vmulps %%xmm5,%%xmm0,%%xmm5; vmovsd %%xmm5,(%2,%3,1);" +#else + #define SAVE_m2n2 SAVE_m2n1 "vmovsd (%2,%3,1),%%xmm1; vfmadd213ps %%xmm1,%%xmm0,%%xmm5; vmovsd %%xmm5,(%2,%3,1);" +#endif +#define INIT_m2n4 INIT_m2n2 +#define INIT_m2n8 INIT_m2n4 "vpxor %%xmm6,%%xmm6,%%xmm6; vpxor %%xmm7,%%xmm7,%%xmm7;" +#define INIT_m2n12 INIT_m2n8 "vpxor %%xmm8,%%xmm8,%%xmm8; vpxor %%xmm9,%%xmm9,%%xmm9;" +#define KERNEL_k1m2n4 \ + "vmovups (%1),%%xmm3; addq $16,%1;"\ + "vbroadcastss (%0),%%xmm1; vfmadd231ps %%xmm3,%%xmm1,%%xmm4;"\ + "vbroadcastss 4(%0),%%xmm2; vfmadd231ps %%xmm3,%%xmm2,%%xmm5;"\ + "addq $8,%0;" +#define KERNEL_k1m2n8 \ + "vmovups (%1),%%xmm3; vmovups (%1,%%r12,4),%%xmm2; addq $16,%1;"\ + "vbroadcastss (%0),%%xmm1; vfmadd231ps %%xmm3,%%xmm1,%%xmm4; vfmadd231ps %%xmm2,%%xmm1,%%xmm6;"\ + "vbroadcastss 4(%0),%%xmm1; vfmadd231ps %%xmm3,%%xmm1,%%xmm5; vfmadd231ps %%xmm2,%%xmm1,%%xmm7;"\ + "addq $8,%0;" +#define KERNEL_k1m2n12 \ + "vmovups (%1),%%xmm3; vmovups (%1,%%r12,4),%%xmm2; vmovups (%1,%%r12,8),%%xmm1; addq $16,%1;"\ + "vbroadcastss (%0),%%xmm10; vfmadd231ps %%xmm3,%%xmm10,%%xmm4; vfmadd231ps %%xmm2,%%xmm10,%%xmm6; vfmadd231ps %%xmm1,%%xmm10,%%xmm8;"\ + "vbroadcastss 4(%0),%%xmm10; vfmadd231ps %%xmm3,%%xmm10,%%xmm5; vfmadd231ps %%xmm2,%%xmm10,%%xmm7; vfmadd231ps %%xmm1,%%xmm10,%%xmm9;"\ + "addq $8,%0;" +#if defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA) + #define unit_kernel_endn4_k1m2n8(aoff1,aoff2,boff) \ + "vmovups "#boff"(%1,%%r12,4),%%xmm3;"\ + "vbroadcastss "#aoff1"(%0),%%xmm1; vfmadd231ps %%xmm3,%%xmm1,%%xmm6;"\ + "vbroadcastss "#aoff2"(%0),%%xmm2; vfmadd231ps %%xmm3,%%xmm2,%%xmm7;" + #define unit_kernel_endn4_k1m2n12(aoff1,aoff2,boff) \ + "vmovups "#boff"(%1,%%r12,8),%%xmm3;"\ + "vbroadcastss "#aoff1"(%0),%%xmm1; vfmadd231ps %%xmm3,%%xmm1,%%xmm8;"\ + "vbroadcastss "#aoff2"(%0),%%xmm2; vfmadd231ps %%xmm3,%%xmm2,%%xmm9;" + #define unit_kernel_endn8_k1m2n12(aoff1,aoff2,boff) \ + "vmovups "#boff"(%1,%%r12,4),%%xmm3; vmovups "#boff"(%1,%%r12,8),%%xmm2;"\ + "vbroadcastss "#aoff1"(%0),%%xmm1; vfmadd231ps %%xmm3,%%xmm1,%%xmm6; vfmadd231ps %%xmm2,%%xmm1,%%xmm8;"\ + "vbroadcastss "#aoff2"(%0),%%xmm1; vfmadd231ps %%xmm3,%%xmm1,%%xmm7; vfmadd231ps %%xmm2,%%xmm1,%%xmm9;" + #define kernel_kend_m2n8 \ + unit_kernel_endn4_k1m2n8(0,4,0) unit_kernel_endn4_k1m2n8(8,12,16)\ + unit_kernel_endn4_k1m2n8(16,20,32) unit_kernel_endn4_k1m2n8(24,28,48) + #define kernel_kend_m2n12 \ + unit_kernel_endn8_k1m2n12(0,4,0) unit_kernel_endn8_k1m2n12(8,12,16)\ + unit_kernel_endn8_k1m2n12(16,20,32) unit_kernel_endn8_k1m2n12(24,28,48)\ + unit_kernel_endn4_k1m2n12(32,36,64) unit_kernel_endn4_k1m2n12(40,44,80)\ + unit_kernel_endn4_k1m2n12(48,52,96) unit_kernel_endn4_k1m2n12(56,60,112) +#else + #define kernel_kend_m2n8 "" + #define kernel_kend_m2n12 "" +#endif +#define kernel_kend_m2n4 "" +#define kernel_kend_m2n2 "" +#define kernel_kend_m2n1 "" +#ifdef TRMMKERNEL + #define unit_save_m2n4(c1,c2) \ + "vunpcklps "#c2","#c1",%%xmm1; vunpckhps "#c2","#c1",%%xmm2;"\ + "vmulps %%xmm1,%%xmm0,%%xmm1; vmovsd %%xmm1,(%5); vmovhpd %%xmm1,(%5,%3,1); leaq (%5,%3,2),%5;"\ + "vmulps %%xmm2,%%xmm0,%%xmm2; vmovsd %%xmm2,(%5); vmovhpd %%xmm2,(%5,%3,1); leaq (%5,%3,2),%5;" +#else + #define unit_save_m2n4(c1,c2) \ + "vunpcklps "#c2","#c1",%%xmm1; vunpckhps "#c2","#c1",%%xmm2;"\ + "vmovsd (%5),%%xmm3; vmovhpd (%5,%3,1),%%xmm3,%%xmm3; vfmadd213ps %%xmm3,%%xmm0,%%xmm1;"\ + "vmovsd %%xmm1,(%5); vmovhpd %%xmm1,(%5,%3,1); leaq (%5,%3,2),%5;"\ + "vmovsd (%5),%%xmm3; vmovhpd (%5,%3,1),%%xmm3,%%xmm3; vfmadd213ps %%xmm3,%%xmm0,%%xmm2;"\ + "vmovsd %%xmm2,(%5); vmovhpd %%xmm2,(%5,%3,1); leaq (%5,%3,2),%5;" +#endif +#define SAVE_m2n4 "movq %2,%5;" unit_save_m2n4(%%xmm4,%%xmm5) +#define SAVE_m2n8 SAVE_m2n4 unit_save_m2n4(%%xmm6,%%xmm7) +#define SAVE_m2n12 SAVE_m2n8 unit_save_m2n4(%%xmm8,%%xmm9) +#define COMPUTE_m2(ndim) \ + init_update_kskip(8) INIT_m2n##ndim\ + init_set_k "movq %%r14,%1;" init_set_pa_pb_n##ndim(2)\ + kernel_kstart_n##ndim(2)\ + #ndim"222:\n\t"\ + "testq %4,%4; jz "#ndim"223f;"\ + KERNEL_k1m2n##ndim\ + "subq $4,%4; jmp "#ndim"222b;"\ + #ndim"223:\n\t"\ + kernel_kend_m2n##ndim save_set_pa_pb_n##ndim(2) SAVE_m2n##ndim "addq $8,%2;" save_update_kskip(8) + +/* m = 1 *//* xmm0 for alpha, xmm1-xmm3 and xmm10 for temporary use, xmm4-xmm6 for accumulators */ +#define INIT_m1n1 "vpxor %%xmm4,%%xmm4,%%xmm4;" +#define KERNEL_k1m1n1 \ + "vmovss (%1),%%xmm3; addq $4,%1;"\ + "vmovss (%0),%%xmm1; vfmadd231ss %%xmm3,%%xmm1,%%xmm4;"\ + "addq $4,%0;" +#ifdef TRMMKERNEL + #define SAVE_m1n1 "vmulss %%xmm4,%%xmm0,%%xmm4; vmovss %%xmm4,(%2);" +#else + #define SAVE_m1n1 "vfmadd213ss (%2),%%xmm0,%%xmm4; vmovss %%xmm4,(%2);" +#endif +#define INIT_m1n2 INIT_m1n1 +#define KERNEL_k1m1n2 \ + "vmovsd (%1),%%xmm3; addq $8,%1;"\ + "vbroadcastss (%0),%%xmm1; vfmadd231ps %%xmm3,%%xmm1,%%xmm4;"\ + "addq $4,%0;" +#ifdef TRMMKERNEL + #define SAVE_m1n2 \ + "vmulps %%xmm4,%%xmm0,%%xmm4; vmovss %%xmm4,(%2); vextractps $1,%%xmm4,(%2,%3,1);" +#else + #define SAVE_m1n2 \ + "vmovss (%2),%%xmm3; vinsertps $16,(%2,%3,1),%%xmm3,%%xmm3; vfmadd213ps %%xmm3,%%xmm0,%%xmm4;"\ + "vmovss %%xmm4,(%2); vextractps $1,%%xmm4,(%2,%3,1);" +#endif +#define INIT_m1n4 INIT_m1n2 +#define INIT_m1n8 INIT_m1n4 "vpxor %%xmm5,%%xmm5,%%xmm5;" +#define INIT_m1n12 INIT_m1n8 "vpxor %%xmm6,%%xmm6,%%xmm6;" +#define KERNEL_k1m1n4 \ + "vmovups (%1),%%xmm3; addq $16,%1;"\ + "vbroadcastss (%0),%%xmm1; vfmadd231ps %%xmm3,%%xmm1,%%xmm4;"\ + "addq $4,%0;" +#define KERNEL_k1m1n8 \ + "vmovups (%1),%%xmm3; vmovups (%1,%%r12,4),%%xmm2; addq $16,%1;"\ + "vbroadcastss (%0),%%xmm1; vfmadd231ps %%xmm3,%%xmm1,%%xmm4; vfmadd231ps %%xmm2,%%xmm1,%%xmm5;"\ + "addq $4,%0;" +#define KERNEL_k1m1n12 \ + "vmovups (%1),%%xmm3; vmovups (%1,%%r12,4),%%xmm2; vmovups (%1,%%r12,8),%%xmm1; addq $16,%1;"\ + "vbroadcastss (%0),%%xmm10; vfmadd231ps %%xmm3,%%xmm10,%%xmm4; vfmadd231ps %%xmm2,%%xmm10,%%xmm5; vfmadd231ps %%xmm1,%%xmm10,%%xmm6;"\ + "addq $4,%0;" +#if defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA) + #define unit_kernel_endn4_k1m1n8(aoff,boff) \ + "vmovups "#boff"(%1,%%r12,4),%%xmm3;"\ + "vbroadcastss "#aoff"(%0),%%xmm1; vfmadd231ps %%xmm3,%%xmm1,%%xmm5;" + #define unit_kernel_endn4_k1m1n12(aoff,boff) \ + "vmovups "#boff"(%1,%%r12,8),%%xmm3;"\ + "vbroadcastss "#aoff"(%0),%%xmm1; vfmadd231ps %%xmm3,%%xmm1,%%xmm6;" + #define unit_kernel_endn8_k1m1n12(aoff,boff) \ + "vmovups "#boff"(%1,%%r12,4),%%xmm3; vmovups "#boff"(%1,%%r12,8),%%xmm2;"\ + "vbroadcastss "#aoff"(%0),%%xmm1; vfmadd231ps %%xmm3,%%xmm1,%%xmm5; vfmadd231ps %%xmm2,%%xmm1,%%xmm6;" + #define kernel_kend_m1n8 \ + unit_kernel_endn4_k1m1n8(0,0) unit_kernel_endn4_k1m1n8(4,16)\ + unit_kernel_endn4_k1m1n8(8,32) unit_kernel_endn4_k1m1n8(12,48) + #define kernel_kend_m1n12 \ + unit_kernel_endn8_k1m1n12(0,0) unit_kernel_endn8_k1m1n12(4,16)\ + unit_kernel_endn8_k1m1n12(8,32) unit_kernel_endn8_k1m1n12(12,48)\ + unit_kernel_endn4_k1m1n12(16,64) unit_kernel_endn4_k1m1n12(20,80)\ + unit_kernel_endn4_k1m1n12(24,96) unit_kernel_endn4_k1m1n12(28,112) +#else + #define kernel_kend_m1n8 "" + #define kernel_kend_m1n12 "" +#endif +#define kernel_kend_m1n4 "" +#define kernel_kend_m1n2 "" +#define kernel_kend_m1n1 "" +#ifdef TRMMKERNEL + #define unit_save_m1n4(c1) \ + "vpxor %%xmm10,%%xmm10,%%xmm10; vmovsd "#c1",%%xmm10,%%xmm2; vmovhlps "#c1",%%xmm10,%%xmm1;"\ + "vmulps %%xmm2,%%xmm0,%%xmm2; vmovss %%xmm2,(%5); vextractps $1,%%xmm2,(%5,%3,1); leaq (%5,%3,2),%5;"\ + "vmulps %%xmm1,%%xmm0,%%xmm1; vmovss %%xmm1,(%5); vextractps $1,%%xmm1,(%5,%3,1); leaq (%5,%3,2),%5;" +#else + #define unit_save_m1n4(c1) \ + "vpxor %%xmm10,%%xmm10,%%xmm10; vmovsd "#c1",%%xmm10,%%xmm2; vmovhlps "#c1",%%xmm10,%%xmm1;"\ + "vmovss (%5),%%xmm3; vinsertps $16,(%5,%3,1),%%xmm3,%%xmm3; vfmadd213ps %%xmm3,%%xmm0,%%xmm2;"\ + "vmovss %%xmm2,(%5); vextractps $1,%%xmm2,(%5,%3,1); leaq (%5,%3,2),%5;"\ + "vmovss (%5),%%xmm3; vinsertps $16,(%5,%3,1),%%xmm3,%%xmm3; vfmadd213ps %%xmm3,%%xmm0,%%xmm1;"\ + "vmovss %%xmm1,(%5); vextractps $1,%%xmm1,(%5,%3,1); leaq (%5,%3,2),%5;" +#endif +#define SAVE_m1n4 "movq %2,%5;" unit_save_m1n4(%%xmm4) +#define SAVE_m1n8 SAVE_m1n4 unit_save_m1n4(%%xmm5) +#define SAVE_m1n12 SAVE_m1n8 unit_save_m1n4(%%xmm6) +#define COMPUTE_m1(ndim) \ + init_update_kskip(4) INIT_m1n##ndim\ + init_set_k "movq %%r14,%1;" init_set_pa_pb_n##ndim(1)\ + kernel_kstart_n##ndim(1)\ + #ndim"112:\n\t"\ + "testq %4,%4; jz "#ndim"113f;"\ + KERNEL_k1m1n##ndim\ + "subq $4,%4; jmp "#ndim"112b;"\ + #ndim"113:\n\t"\ + kernel_kend_m1n##ndim save_set_pa_pb_n##ndim(1) SAVE_m1n##ndim "addq $4,%2;" save_update_kskip(4) + +#define COMPUTE(ndim) {\ + HEAD_SET_OFFSET(ndim) next_b = b_pointer + ndim * K;\ + __asm__ __volatile__(\ + "vbroadcastss (%6),%%ymm0;"\ + "movq %4,%%r12; salq $2,%%r12; movq %1,%%r14; movq %8,%%r11;" INIT_SET_KSKIP\ + "cmpq $8,%%r11;jb 33101"#ndim"f;"\ + "33109"#ndim":\n\t"\ + COMPUTE_m8(ndim)\ + "subq $8,%%r11;cmpq $8,%%r11;jnb 33109"#ndim"b;"\ + "33101"#ndim":\n\t"\ + "cmpq $4,%%r11;jb 33103"#ndim"f;"\ + COMPUTE_m4(ndim)\ + "subq $4,%%r11;"\ + "33103"#ndim":\n\t"\ + "cmpq $2,%%r11;jb 33104"#ndim"f;"\ + COMPUTE_m2(ndim)\ + "subq $2,%%r11;"\ + "33104"#ndim":\n\t"\ + "testq %%r11,%%r11;jz 33105"#ndim"f;"\ + COMPUTE_m1(ndim)\ + "33105"#ndim":\n\t"\ + "movq %%r12,%4; sarq $2,%4; movq %%r14,%1; vzeroupper;"\ + :"+r"(a_pointer),"+r"(b_pointer),"+r"(c_pointer),"+r"(ldc_in_bytes),"+r"(K),"+r"(ctemp),"+r"(const_val),"+r"(next_b)\ + :"m"(M),"m"(off):"r11","r12","r13","r14","r15",\ + "xmm0","xmm1","xmm2","xmm3","xmm4","xmm5","xmm6","xmm7","xmm8","xmm9","xmm10","xmm11","xmm12","xmm13","xmm14","xmm15","cc","memory");\ + TAIL_SET_OFFSET(ndim) a_pointer -= M * K; b_pointer += ndim * K; c_pointer += (LDC * ndim - M);\ +} + +#include "common.h" +#include +int __attribute__ ((noinline)) +CNAME(BLASLONG m, BLASLONG n, BLASLONG k, float alpha, float * __restrict__ A, float * __restrict__ B, float * __restrict__ C, BLASLONG LDC +#ifdef TRMMKERNEL +,BLASLONG offset +#endif +){ + if(m==0||n==0) return 0; + int64_t ldc_in_bytes = (int64_t)LDC * sizeof(float); + float constval = alpha; + float *const_val=&constval; + int64_t M = (int64_t)m, K = (int64_t)k, off = 0; +#ifdef TRMMKERNEL + #ifdef LEFT + off = offset; + #else + off = -offset; + #endif +#endif + BLASLONG n_count = n; + float *a_pointer = A,*b_pointer = B,*c_pointer = C,*ctemp = C,*next_b = B; + for(;n_count>11;n_count-=12) COMPUTE(12) + for(;n_count>7;n_count-=8) COMPUTE(8) + for(;n_count>3;n_count-=4) COMPUTE(4) + for(;n_count>1;n_count-=2) COMPUTE(2) + if(n_count>0) COMPUTE(1) + return 0; +} diff --git a/kernel/x86_64/sgemm_kernel_8x4_haswell_2.c b/kernel/x86_64/sgemm_kernel_8x4_haswell_2.c new file mode 100644 index 000000000..a2e78c58d --- /dev/null +++ b/kernel/x86_64/sgemm_kernel_8x4_haswell_2.c @@ -0,0 +1,424 @@ +/* %0 = "+r"(a_pointer), %1 = "+r"(b_pointer), %2 = "+r"(c_pointer), %3 = "+r"(ldc_in_bytes), %4 for k_count, %5 for c_store, %6 = b_pref */ +/* r10 = tmp, r11 = m_counter, r12 = k << 2(const), r13 = tmp, r14 = b_head_pos(const), r15 = tmp */ + +/* m = 8 *//* ymm0 for alpha, ymm1-ymm3 for temporary use, ymm4-ymm15 for accumulators */ +#define KERNEL_k1m8n1 \ + "vmovups (%0),%%ymm1; addq $32,%0;"\ + "vbroadcastss (%1),%%ymm2; vfmadd231ps %%ymm1,%%ymm2,%%ymm4;"\ + "addq $4,%1;" +#define KERNEL_h_k1m8n2 \ + "vmovsldup (%0),%%ymm1; vmovshdup (%0),%%ymm2; addq $32,%0;"\ + "vbroadcastsd (%1),%%ymm3; vfmadd231ps %%ymm1,%%ymm3,%%ymm4; vfmadd231ps %%ymm2,%%ymm3,%%ymm5;" +#define KERNEL_k1m8n2 KERNEL_h_k1m8n2 "addq $8,%1;" +#define KERNEL_h_k1m8n4 \ + KERNEL_h_k1m8n2 "vbroadcastsd 8(%1),%%ymm3; vfmadd231ps %%ymm1,%%ymm3,%%ymm6; vfmadd231ps %%ymm2,%%ymm3,%%ymm7;" +#define KERNEL_k1m8n4 KERNEL_h_k1m8n4 "addq $16,%1;" +#define unit_kernel_k1m8n4(c1,c2,c3,c4,boff,...) \ + "vbroadcastsd "#boff"("#__VA_ARGS__"),%%ymm3; vfmadd231ps %%ymm1,%%ymm3,"#c1"; vfmadd231ps %%ymm2,%%ymm3,"#c2";"\ + "vbroadcastsd "#boff"+8("#__VA_ARGS__"),%%ymm3; vfmadd231ps %%ymm1,%%ymm3,"#c3"; vfmadd231ps %%ymm2,%%ymm3,"#c4";" +#define KERNEL_h_k1m8n8 KERNEL_h_k1m8n4 unit_kernel_k1m8n4(%%ymm8,%%ymm9,%%ymm10,%%ymm11,0,%1,%%r12,4) +#define KERNEL_k1m8n8 KERNEL_h_k1m8n8 "addq $16,%1;" +#define KERNEL_h_k1m8n12 KERNEL_h_k1m8n8 unit_kernel_k1m8n4(%%ymm12,%%ymm13,%%ymm14,%%ymm15,0,%1,%%r12,8) +#define KERNEL_k1m8n12 KERNEL_h_k1m8n12 "addq $16,%1;" +#define KERNEL_k2m8n4 \ + "vmovsldup (%0),%%ymm1; vmovshdup (%0),%%ymm2; prefetcht0 512(%0);"\ + unit_kernel_k1m8n4(%%ymm4,%%ymm5,%%ymm6,%%ymm7,0,%1)\ + "vmovsldup 32(%0),%%ymm1; vmovshdup 32(%0),%%ymm2; addq $64,%0;"\ + unit_kernel_k1m8n4(%%ymm8,%%ymm9,%%ymm10,%%ymm11,16,%1)\ + "addq $32,%1;" +#define KERNEL_L_k1m8n6 \ + "vmovsldup (%0),%%ymm1; vmovshdup (%0),%%ymm2; prefetcht0 512(%0); addq $32,%0;"\ + "vbroadcastsd (%1) ,%%ymm3; vfmadd231ps %%ymm1,%%ymm3,%%ymm4; vfmadd231ps %%ymm2,%%ymm3,%%ymm5;"\ + "vbroadcastsd 8(%1) ,%%ymm3; vfmadd231ps %%ymm1,%%ymm3,%%ymm6; vfmadd231ps %%ymm2,%%ymm3,%%ymm7;"\ + "vbroadcastsd (%1,%%r12,4),%%ymm3; vfmadd231ps %%ymm1,%%ymm3,%%ymm8; vfmadd231ps %%ymm2,%%ymm3,%%ymm9;"\ + "addq $16,%1;" +#define KERNEL_L_k2m8n6 \ + "vmovsldup (%0),%%ymm1; vmovshdup (%0),%%ymm2; prefetcht0 512(%0);"\ + "vbroadcastsd (%1) ,%%ymm3; vfmadd231ps %%ymm1,%%ymm3,%%ymm4; vfmadd231ps %%ymm2,%%ymm3,%%ymm5;"\ + "vbroadcastsd 8(%1) ,%%ymm3; vfmadd231ps %%ymm1,%%ymm3,%%ymm6; vfmadd231ps %%ymm2,%%ymm3,%%ymm7;"\ + "vbroadcastsd (%1,%%r12,4),%%ymm3; vfmadd231ps %%ymm1,%%ymm3,%%ymm8; vfmadd231ps %%ymm2,%%ymm3,%%ymm9;"\ + "vmovsldup 32(%0),%%ymm1; vmovshdup 32(%0),%%ymm2; addq $64,%0;"\ + "vbroadcastsd 16(%1) ,%%ymm3; vfmadd231ps %%ymm1,%%ymm3,%%ymm10; vfmadd231ps %%ymm2,%%ymm3,%%ymm11;"\ + "vbroadcastsd 24(%1) ,%%ymm3; vfmadd231ps %%ymm1,%%ymm3,%%ymm12; vfmadd231ps %%ymm2,%%ymm3,%%ymm13;"\ + "vbroadcastsd 16(%1,%%r12,4),%%ymm3; vfmadd231ps %%ymm1,%%ymm3,%%ymm14; vfmadd231ps %%ymm2,%%ymm3,%%ymm15;"\ + "addq $32,%1;" +#define KERNEL_L_k1m16n6 \ + "vmovups (%0),%%ymm1; vmovups (%0,%%r12,8),%%ymm2; prefetcht0 512(%0,%%r12,8); addq $32,%0;"\ + "vbroadcastss (%1) ,%%ymm3; vfmadd231ps %%ymm1,%%ymm3,%%ymm4; vfmadd231ps %%ymm2,%%ymm3,%%ymm5;"\ + "vbroadcastss 4(%1) ,%%ymm3; vfmadd231ps %%ymm1,%%ymm3,%%ymm6; vfmadd231ps %%ymm2,%%ymm3,%%ymm7;"\ + "vbroadcastss 8(%1) ,%%ymm3; vfmadd231ps %%ymm1,%%ymm3,%%ymm8; vfmadd231ps %%ymm2,%%ymm3,%%ymm9;"\ + "vbroadcastss 12(%1) ,%%ymm3; vfmadd231ps %%ymm1,%%ymm3,%%ymm10; vfmadd231ps %%ymm2,%%ymm3,%%ymm11;"\ + "vbroadcastss (%1,%%r12,4),%%ymm3; vfmadd231ps %%ymm1,%%ymm3,%%ymm12; vfmadd231ps %%ymm2,%%ymm3,%%ymm13;"\ + "vbroadcastss 4(%1,%%r12,4),%%ymm3; vfmadd231ps %%ymm1,%%ymm3,%%ymm14; vfmadd231ps %%ymm2,%%ymm3,%%ymm15;"\ + "addq $16,%1;" +#define KERNEL_L_k2m16n6 \ + "vmovups (%0),%%ymm1; vmovups (%0,%%r12,8),%%ymm2; prefetcht0 512(%0,%%r12,8);"\ + "vbroadcastss (%1) ,%%ymm3; vfmadd231ps %%ymm1,%%ymm3,%%ymm4; vfmadd231ps %%ymm2,%%ymm3,%%ymm5;"\ + "vbroadcastss 4(%1) ,%%ymm3; vfmadd231ps %%ymm1,%%ymm3,%%ymm6; vfmadd231ps %%ymm2,%%ymm3,%%ymm7;"\ + "vbroadcastss 8(%1) ,%%ymm3; vfmadd231ps %%ymm1,%%ymm3,%%ymm8; vfmadd231ps %%ymm2,%%ymm3,%%ymm9;"\ + "vbroadcastss 12(%1) ,%%ymm3; vfmadd231ps %%ymm1,%%ymm3,%%ymm10; vfmadd231ps %%ymm2,%%ymm3,%%ymm11;"\ + "vbroadcastss (%1,%%r12,4),%%ymm3; vfmadd231ps %%ymm1,%%ymm3,%%ymm12; vfmadd231ps %%ymm2,%%ymm3,%%ymm13;"\ + "vbroadcastss 4(%1,%%r12,4),%%ymm3; vfmadd231ps %%ymm1,%%ymm3,%%ymm14; vfmadd231ps %%ymm2,%%ymm3,%%ymm15;"\ + "vmovups 32(%0),%%ymm1; vmovups 32(%0,%%r12,8),%%ymm2; addq $64,%0;"\ + "vbroadcastss 16(%1) ,%%ymm3; vfmadd231ps %%ymm1,%%ymm3,%%ymm4; vfmadd231ps %%ymm2,%%ymm3,%%ymm5;"\ + "vbroadcastss 20(%1) ,%%ymm3; vfmadd231ps %%ymm1,%%ymm3,%%ymm6; vfmadd231ps %%ymm2,%%ymm3,%%ymm7;"\ + "vbroadcastss 24(%1) ,%%ymm3; vfmadd231ps %%ymm1,%%ymm3,%%ymm8; vfmadd231ps %%ymm2,%%ymm3,%%ymm9;"\ + "vbroadcastss 28(%1) ,%%ymm3; vfmadd231ps %%ymm1,%%ymm3,%%ymm10; vfmadd231ps %%ymm2,%%ymm3,%%ymm11;"\ + "vbroadcastss 16(%1,%%r12,4),%%ymm3; vfmadd231ps %%ymm1,%%ymm3,%%ymm12; vfmadd231ps %%ymm2,%%ymm3,%%ymm13;"\ + "vbroadcastss 20(%1,%%r12,4),%%ymm3; vfmadd231ps %%ymm1,%%ymm3,%%ymm14; vfmadd231ps %%ymm2,%%ymm3,%%ymm15;"\ + "addq $32,%1;" +#define KERNEL_R_k1m16n6 \ + "vmovups (%0),%%ymm1; vmovups (%0,%%r12,8),%%ymm2; prefetcht0 512(%0,%%r12,8); addq $32,%0;"\ + "vbroadcastss 8(%1,%%r12,4),%%ymm3; vfmadd231ps %%ymm1,%%ymm3,%%ymm4; vfmadd231ps %%ymm2,%%ymm3,%%ymm5;"\ + "vbroadcastss 12(%1,%%r12,4),%%ymm3; vfmadd231ps %%ymm1,%%ymm3,%%ymm6; vfmadd231ps %%ymm2,%%ymm3,%%ymm7;"\ + "vbroadcastss (%1,%%r12,8),%%ymm3; vfmadd231ps %%ymm1,%%ymm3,%%ymm8; vfmadd231ps %%ymm2,%%ymm3,%%ymm9;"\ + "vbroadcastss 4(%1,%%r12,8),%%ymm3; vfmadd231ps %%ymm1,%%ymm3,%%ymm10; vfmadd231ps %%ymm2,%%ymm3,%%ymm11;"\ + "vbroadcastss 8(%1,%%r12,8),%%ymm3; vfmadd231ps %%ymm1,%%ymm3,%%ymm12; vfmadd231ps %%ymm2,%%ymm3,%%ymm13;"\ + "vbroadcastss 12(%1,%%r12,8),%%ymm3; vfmadd231ps %%ymm1,%%ymm3,%%ymm14; vfmadd231ps %%ymm2,%%ymm3,%%ymm15;"\ + "addq $16,%1;" +#define KERNEL_R_k2m16n6 \ + "vmovups (%0),%%ymm1; vmovups (%0,%%r12,8),%%ymm2; prefetcht0 512(%0,%%r12,8);"\ + "vbroadcastss 8(%1,%%r12,4),%%ymm3; vfmadd231ps %%ymm1,%%ymm3,%%ymm4; vfmadd231ps %%ymm2,%%ymm3,%%ymm5;"\ + "vbroadcastss 12(%1,%%r12,4),%%ymm3; vfmadd231ps %%ymm1,%%ymm3,%%ymm6; vfmadd231ps %%ymm2,%%ymm3,%%ymm7;"\ + "vbroadcastss (%1,%%r12,8),%%ymm3; vfmadd231ps %%ymm1,%%ymm3,%%ymm8; vfmadd231ps %%ymm2,%%ymm3,%%ymm9;"\ + "vbroadcastss 4(%1,%%r12,8),%%ymm3; vfmadd231ps %%ymm1,%%ymm3,%%ymm10; vfmadd231ps %%ymm2,%%ymm3,%%ymm11;"\ + "vbroadcastss 8(%1,%%r12,8),%%ymm3; vfmadd231ps %%ymm1,%%ymm3,%%ymm12; vfmadd231ps %%ymm2,%%ymm3,%%ymm13;"\ + "vbroadcastss 12(%1,%%r12,8),%%ymm3; vfmadd231ps %%ymm1,%%ymm3,%%ymm14; vfmadd231ps %%ymm2,%%ymm3,%%ymm15;"\ + "vmovups 32(%0),%%ymm1; vmovups 32(%0,%%r12,8),%%ymm2; addq $64,%0;"\ + "vbroadcastss 24(%1,%%r12,4),%%ymm3; vfmadd231ps %%ymm1,%%ymm3,%%ymm4; vfmadd231ps %%ymm2,%%ymm3,%%ymm5;"\ + "vbroadcastss 28(%1,%%r12,4),%%ymm3; vfmadd231ps %%ymm1,%%ymm3,%%ymm6; vfmadd231ps %%ymm2,%%ymm3,%%ymm7;"\ + "vbroadcastss 16(%1,%%r12,8),%%ymm3; vfmadd231ps %%ymm1,%%ymm3,%%ymm8; vfmadd231ps %%ymm2,%%ymm3,%%ymm9;"\ + "vbroadcastss 20(%1,%%r12,8),%%ymm3; vfmadd231ps %%ymm1,%%ymm3,%%ymm10; vfmadd231ps %%ymm2,%%ymm3,%%ymm11;"\ + "vbroadcastss 24(%1,%%r12,8),%%ymm3; vfmadd231ps %%ymm1,%%ymm3,%%ymm12; vfmadd231ps %%ymm2,%%ymm3,%%ymm13;"\ + "vbroadcastss 28(%1,%%r12,8),%%ymm3; vfmadd231ps %%ymm1,%%ymm3,%%ymm14; vfmadd231ps %%ymm2,%%ymm3,%%ymm15;"\ + "addq $32,%1;" +#define KERNEL_R_k1m8n6 \ + "vmovsldup (%0),%%ymm1; vmovshdup (%0),%%ymm2; prefetcht0 512(%0); addq $32,%0;"\ + "vbroadcastsd 8(%1,%%r12,4),%%ymm3; vfmadd231ps %%ymm1,%%ymm3,%%ymm4; vfmadd231ps %%ymm2,%%ymm3,%%ymm5;"\ + "vbroadcastsd (%1,%%r12,8),%%ymm3; vfmadd231ps %%ymm1,%%ymm3,%%ymm6; vfmadd231ps %%ymm2,%%ymm3,%%ymm7;"\ + "vbroadcastsd 8(%1,%%r12,8),%%ymm3; vfmadd231ps %%ymm1,%%ymm3,%%ymm8; vfmadd231ps %%ymm2,%%ymm3,%%ymm9;"\ + "addq $16,%1;" +#define KERNEL_R_k2m8n6 \ + "vmovsldup (%0),%%ymm1; vmovshdup (%0),%%ymm2; prefetcht0 512(%0);"\ + "vbroadcastsd 8(%1,%%r12,4),%%ymm3; vfmadd231ps %%ymm1,%%ymm3,%%ymm4; vfmadd231ps %%ymm2,%%ymm3,%%ymm5;"\ + "vbroadcastsd (%1,%%r12,8),%%ymm3; vfmadd231ps %%ymm1,%%ymm3,%%ymm6; vfmadd231ps %%ymm2,%%ymm3,%%ymm7;"\ + "vbroadcastsd 8(%1,%%r12,8),%%ymm3; vfmadd231ps %%ymm1,%%ymm3,%%ymm8; vfmadd231ps %%ymm2,%%ymm3,%%ymm9;"\ + "vmovsldup 32(%0),%%ymm1; vmovshdup 32(%0),%%ymm2; addq $64,%0;"\ + "vbroadcastsd 24(%1,%%r12,4),%%ymm3; vfmadd231ps %%ymm1,%%ymm3,%%ymm10; vfmadd231ps %%ymm2,%%ymm3,%%ymm11;"\ + "vbroadcastsd 16(%1,%%r12,8),%%ymm3; vfmadd231ps %%ymm1,%%ymm3,%%ymm12; vfmadd231ps %%ymm2,%%ymm3,%%ymm13;"\ + "vbroadcastsd 24(%1,%%r12,8),%%ymm3; vfmadd231ps %%ymm1,%%ymm3,%%ymm14; vfmadd231ps %%ymm2,%%ymm3,%%ymm15;"\ + "addq $32,%1;" +#define INIT_m8n1 "vpxor %%ymm4,%%ymm4,%%ymm4;" +#define INIT_m8n2 INIT_m8n1 "vpxor %%ymm5,%%ymm5,%%ymm5;" +#define unit_init_m8n4(c1,c2,c3,c4) \ + "vpxor "#c1","#c1","#c1";vpxor "#c2","#c2","#c2";vpxor "#c3","#c3","#c3";vpxor "#c4","#c4","#c4";" +#define INIT_m8n8 unit_init_m8n4(%%ymm4,%%ymm5,%%ymm6,%%ymm7) unit_init_m8n4(%%ymm8,%%ymm9,%%ymm10,%%ymm11) +#define INIT_m8n4 INIT_m8n8 +#define INIT_m8n12 INIT_m8n8 unit_init_m8n4(%%ymm12,%%ymm13,%%ymm14,%%ymm15) +#define INIT_m8n6 INIT_m8n12 +#define INIT_m16n6 INIT_m8n12 +#define SAVE_m8n1 "vfmadd213ps (%2),%%ymm0,%%ymm4; vmovups %%ymm4,(%2);" +#define unit_save_m8n2(c1,c2) \ + "vunpcklps "#c2","#c1",%%ymm2; vunpckhps "#c2","#c1",%%ymm3; vunpcklpd %%ymm3,%%ymm2,"#c1"; vunpckhpd %%ymm3,%%ymm2,"#c2";"\ + "vfmadd213ps (%5),%%ymm0,"#c1"; vfmadd213ps (%5,%3,1),%%ymm0,"#c2"; vmovups "#c1",(%5); vmovups "#c2",(%5,%3,1); leaq (%5,%3,2),%5;" +#define SAVE_m8n2 "movq %2,%5;" unit_save_m8n2(%%ymm4,%%ymm5) +#define SAVE_m8n4 "movq %2,%5;"\ + "vaddps %%ymm4,%%ymm8,%%ymm4; vaddps %%ymm5,%%ymm9,%%ymm5; vaddps %%ymm6,%%ymm10,%%ymm6; vaddps %%ymm7,%%ymm11,%%ymm7;"\ + unit_save_m8n2(%%ymm4,%%ymm5) unit_save_m8n2(%%ymm6,%%ymm7) +#define SAVE_m8n8 "movq %2,%5;"\ + unit_save_m8n2(%%ymm4,%%ymm5) unit_save_m8n2(%%ymm6,%%ymm7) unit_save_m8n2(%%ymm8,%%ymm9) unit_save_m8n2(%%ymm10,%%ymm11) +#define SAVE_m8n12 SAVE_m8n8 unit_save_m8n2(%%ymm12,%%ymm13) unit_save_m8n2(%%ymm14,%%ymm15) +#define unit_save_m16n2(c1,c2,c3,c4) \ + "vfmadd213ps (%5),%%ymm0,"#c1"; vfmadd213ps 32(%5),%%ymm0,"#c2"; vmovups "#c1",(%5); vmovups "#c2",32(%5);"\ + "vfmadd213ps (%5,%3,1),%%ymm0,"#c3"; vfmadd213ps 32(%5,%3,1),%%ymm0,"#c4"; vmovups "#c3",(%5,%3,1); vmovups "#c4",32(%5,%3,1); leaq (%5,%3,2),%5;" +#define SAVE_L_m16n6 "movq %2,%5;"\ + unit_save_m16n2(%%ymm4,%%ymm5,%%ymm6,%%ymm7) unit_save_m16n2(%%ymm8,%%ymm9,%%ymm10,%%ymm11) unit_save_m16n2(%%ymm12,%%ymm13,%%ymm14,%%ymm15) +#define SAVE_R_m16n6 "leaq (%2,%3,4),%5; leaq (%5,%3,2),%5;"\ + unit_save_m16n2(%%ymm4,%%ymm5,%%ymm6,%%ymm7) unit_save_m16n2(%%ymm8,%%ymm9,%%ymm10,%%ymm11) unit_save_m16n2(%%ymm12,%%ymm13,%%ymm14,%%ymm15) +#define SAVE_L_m8n6 "movq %2,%5;"\ + "vaddps %%ymm4,%%ymm10,%%ymm4; vaddps %%ymm5,%%ymm11,%%ymm5; vaddps %%ymm6,%%ymm12,%%ymm6;"\ + "vaddps %%ymm7,%%ymm13,%%ymm7; vaddps %%ymm8,%%ymm14,%%ymm8; vaddps %%ymm9,%%ymm15,%%ymm9;"\ + unit_save_m8n2(%%ymm4,%%ymm5) unit_save_m8n2(%%ymm6,%%ymm7) unit_save_m8n2(%%ymm8,%%ymm9) +#define SAVE_R_m8n6 "leaq (%2,%3,4),%5; leaq (%5,%3,2),%5;"\ + "vaddps %%ymm4,%%ymm10,%%ymm4; vaddps %%ymm5,%%ymm11,%%ymm5; vaddps %%ymm6,%%ymm12,%%ymm6;"\ + "vaddps %%ymm7,%%ymm13,%%ymm7; vaddps %%ymm8,%%ymm14,%%ymm8; vaddps %%ymm9,%%ymm15,%%ymm9;"\ + unit_save_m8n2(%%ymm4,%%ymm5) unit_save_m8n2(%%ymm6,%%ymm7) unit_save_m8n2(%%ymm8,%%ymm9) + +/* m = 4 *//* xmm0 for alpha, xmm1-xmm3 for temporary use, xmm4-xmm15 for accumulators */ +#define KERNEL_k1m4n1 \ + "vmovups (%0),%%xmm1; addq $16,%0;"\ + "vbroadcastss (%1),%%xmm2; vfmadd231ps %%xmm1,%%xmm2,%%xmm4;"\ + "addq $4,%1;" +#define KERNEL_h_k1m4n2 \ + "vmovsldup (%0),%%xmm1; vmovshdup (%0),%%xmm2; addq $16,%0;"\ + "vmovddup (%1),%%xmm3; vfmadd231ps %%xmm1,%%xmm3,%%xmm4; vfmadd231ps %%xmm2,%%xmm3,%%xmm5;" +#define KERNEL_k1m4n2 KERNEL_h_k1m4n2 "addq $8,%1;" +#define KERNEL_h_k1m4n4 \ + KERNEL_h_k1m4n2 "vmovddup 8(%1),%%xmm3; vfmadd231ps %%xmm1,%%xmm3,%%xmm6; vfmadd231ps %%xmm2,%%xmm3,%%xmm7;" +#define KERNEL_k1m4n4 KERNEL_h_k1m4n4 "addq $16,%1;" +#define unit_kernel_k1m4n4(c1,c2,c3,c4,...) \ + "vmovddup ("#__VA_ARGS__"),%%xmm3; vfmadd231ps %%xmm1,%%xmm3,"#c1"; vfmadd231ps %%xmm2,%%xmm3,"#c2";"\ + "vmovddup 8("#__VA_ARGS__"),%%xmm3; vfmadd231ps %%xmm1,%%xmm3,"#c3"; vfmadd231ps %%xmm2,%%xmm3,"#c4";" +#define KERNEL_h_k1m4n8 KERNEL_h_k1m4n4 unit_kernel_k1m4n4(%%xmm8,%%xmm9,%%xmm10,%%xmm11,%1,%%r12,4) +#define KERNEL_k1m4n8 KERNEL_h_k1m4n8 "addq $16,%1;" +#define KERNEL_h_k1m4n12 KERNEL_h_k1m4n8 unit_kernel_k1m4n4(%%xmm12,%%xmm13,%%xmm14,%%xmm15,%1,%%r12,8) +#define KERNEL_k1m4n12 KERNEL_h_k1m4n12 "addq $16,%1;" +#define INIT_m4n1 "vpxor %%xmm4,%%xmm4,%%xmm4;" +#define INIT_m4n2 INIT_m4n1 "vpxor %%xmm5,%%xmm5,%%xmm5;" +#define INIT_m4n4 INIT_m4n2 "vpxor %%xmm6,%%xmm6,%%xmm6;vpxor %%xmm7,%%xmm7,%%xmm7;" +#define unit_init_m4n4(c1,c2,c3,c4) \ + "vpxor "#c1","#c1","#c1";vpxor "#c2","#c2","#c2";vpxor "#c3","#c3","#c3";vpxor "#c4","#c4","#c4";" +#define INIT_m4n8 INIT_m4n4 unit_init_m4n4(%%xmm8,%%xmm9,%%xmm10,%%xmm11) +#define INIT_m4n12 INIT_m4n8 unit_init_m4n4(%%xmm12,%%xmm13,%%xmm14,%%xmm15) +#define SAVE_m4n1 "vfmadd213ps (%2),%%xmm0,%%xmm4; vmovups %%xmm4,(%2);" +#define unit_save_m4n2(c1,c2) \ + "vunpcklps "#c2","#c1",%%xmm2; vunpckhps "#c2","#c1",%%xmm3; vunpcklpd %%xmm3,%%xmm2,"#c1"; vunpckhpd %%xmm3,%%xmm2,"#c2";"\ + "vfmadd213ps (%5),%%xmm0,"#c1"; vmovups "#c1",(%5);"\ + "vfmadd213ps (%5,%3,1),%%xmm0,"#c2"; vmovups "#c2",(%5,%3,1);"\ + "leaq (%5,%3,2),%5;" +#define SAVE_m4n2 "movq %2,%5;" unit_save_m4n2(%%xmm4,%%xmm5) +#define SAVE_m4n4 SAVE_m4n2 unit_save_m4n2(%%xmm6,%%xmm7) +#define SAVE_m4n8 SAVE_m4n4 unit_save_m4n2(%%xmm8,%%xmm9) unit_save_m4n2(%%xmm10,%%xmm11) +#define SAVE_m4n12 SAVE_m4n8 unit_save_m4n2(%%xmm12,%%xmm13) unit_save_m4n2(%%xmm14,%%xmm15) + +/* m = 2 *//* xmm0 for alpha, xmm1-xmm3 and xmm10 for temporary use, xmm4-xmm9 for accumulators */ +#define INIT_m2n1 "vpxor %%xmm4,%%xmm4,%%xmm4;" +#define KERNEL_k1m2n1 \ + "vmovsd (%0),%%xmm1; addq $8,%0;"\ + "vbroadcastss (%1),%%xmm2; vfmadd231ps %%xmm1,%%xmm2,%%xmm4;"\ + "addq $4,%1;" +#define SAVE_m2n1 "vmovsd (%2),%%xmm1; vfmadd213ps %%xmm1,%%xmm0,%%xmm4; vmovsd %%xmm4,(%2);" +#define INIT_m2n2 INIT_m2n1 "vpxor %%xmm5,%%xmm5,%%xmm5;" +#define KERNEL_k1m2n2 \ + "vmovsd (%0),%%xmm1; addq $8,%0;"\ + "vbroadcastss (%1),%%xmm2; vfmadd231ps %%xmm1,%%xmm2,%%xmm4;"\ + "vbroadcastss 4(%1),%%xmm3; vfmadd231ps %%xmm1,%%xmm3,%%xmm5;"\ + "addq $8,%1;" +#define SAVE_m2n2 SAVE_m2n1 "vmovsd (%2,%3,1),%%xmm1; vfmadd213ps %%xmm1,%%xmm0,%%xmm5; vmovsd %%xmm5,(%2,%3,1);" +#define INIT_m2n4 INIT_m2n2 +#define INIT_m2n8 INIT_m2n4 "vpxor %%xmm6,%%xmm6,%%xmm6; vpxor %%xmm7,%%xmm7,%%xmm7;" +#define INIT_m2n12 INIT_m2n8 "vpxor %%xmm8,%%xmm8,%%xmm8; vpxor %%xmm9,%%xmm9,%%xmm9;" +#define KERNEL_k1m2n4 \ + "vmovups (%1),%%xmm3; addq $16,%1;"\ + "vbroadcastss (%0),%%xmm1; vfmadd231ps %%xmm3,%%xmm1,%%xmm4;"\ + "vbroadcastss 4(%0),%%xmm2; vfmadd231ps %%xmm3,%%xmm2,%%xmm5;"\ + "addq $8,%0;" +#define KERNEL_k1m2n8 \ + "vmovups (%1),%%xmm3; vmovups (%1,%%r12,4),%%xmm2; addq $16,%1;"\ + "vbroadcastss (%0),%%xmm1; vfmadd231ps %%xmm3,%%xmm1,%%xmm4; vfmadd231ps %%xmm2,%%xmm1,%%xmm6;"\ + "vbroadcastss 4(%0),%%xmm1; vfmadd231ps %%xmm3,%%xmm1,%%xmm5; vfmadd231ps %%xmm2,%%xmm1,%%xmm7;"\ + "addq $8,%0;" +#define KERNEL_k1m2n12 \ + "vmovups (%1),%%xmm3; vmovups (%1,%%r12,4),%%xmm2; vmovups (%1,%%r12,8),%%xmm1; addq $16,%1;"\ + "vbroadcastss (%0),%%xmm10; vfmadd231ps %%xmm3,%%xmm10,%%xmm4; vfmadd231ps %%xmm2,%%xmm10,%%xmm6; vfmadd231ps %%xmm1,%%xmm10,%%xmm8;"\ + "vbroadcastss 4(%0),%%xmm10; vfmadd231ps %%xmm3,%%xmm10,%%xmm5; vfmadd231ps %%xmm2,%%xmm10,%%xmm7; vfmadd231ps %%xmm1,%%xmm10,%%xmm9;"\ + "addq $8,%0;" +#define unit_save_m2n4(c1,c2) \ + "vunpcklps "#c2","#c1",%%xmm1; vunpckhps "#c2","#c1",%%xmm2;"\ + "vmovsd (%5),%%xmm3; vmovhpd (%5,%3,1),%%xmm3,%%xmm3; vfmadd213ps %%xmm3,%%xmm0,%%xmm1;"\ + "vmovsd %%xmm1,(%5); vmovhpd %%xmm1,(%5,%3,1); leaq (%5,%3,2),%5;"\ + "vmovsd (%5),%%xmm3; vmovhpd (%5,%3,1),%%xmm3,%%xmm3; vfmadd213ps %%xmm3,%%xmm0,%%xmm2;"\ + "vmovsd %%xmm2,(%5); vmovhpd %%xmm2,(%5,%3,1); leaq (%5,%3,2),%5;" +#define SAVE_m2n4 "movq %2,%5;" unit_save_m2n4(%%xmm4,%%xmm5) +#define SAVE_m2n8 SAVE_m2n4 unit_save_m2n4(%%xmm6,%%xmm7) +#define SAVE_m2n12 SAVE_m2n8 unit_save_m2n4(%%xmm8,%%xmm9) + +/* m = 1 *//* xmm0 for alpha, xmm1-xmm3 and xmm10 for temporary use, xmm4-xmm6 for accumulators */ +#define INIT_m1n1 "vpxor %%xmm4,%%xmm4,%%xmm4;" +#define KERNEL_k1m1n1 \ + "vmovss (%1),%%xmm3; addq $4,%1;"\ + "vmovss (%0),%%xmm1; vfmadd231ss %%xmm3,%%xmm1,%%xmm4;"\ + "addq $4,%0;" +#define SAVE_m1n1 "vfmadd213ss (%2),%%xmm0,%%xmm4; vmovss %%xmm4,(%2);" +#define INIT_m1n2 INIT_m1n1 +#define KERNEL_k1m1n2 \ + "vmovsd (%1),%%xmm3; addq $8,%1;"\ + "vbroadcastss (%0),%%xmm1; vfmadd231ps %%xmm3,%%xmm1,%%xmm4;"\ + "addq $4,%0;" +#define SAVE_m1n2 \ + "vmovss (%2),%%xmm3; vinsertps $16,(%2,%3,1),%%xmm3,%%xmm3; vfmadd213ps %%xmm3,%%xmm0,%%xmm4;"\ + "vmovss %%xmm4,(%2); vextractps $1,%%xmm4,(%2,%3,1);" +#define INIT_m1n4 INIT_m1n2 +#define INIT_m1n8 INIT_m1n4 "vpxor %%xmm5,%%xmm5,%%xmm5;" +#define INIT_m1n12 INIT_m1n8 "vpxor %%xmm6,%%xmm6,%%xmm6;" +#define KERNEL_k1m1n4 \ + "vmovups (%1),%%xmm3; addq $16,%1;"\ + "vbroadcastss (%0),%%xmm1; vfmadd231ps %%xmm3,%%xmm1,%%xmm4;"\ + "addq $4,%0;" +#define KERNEL_k1m1n8 \ + "vmovups (%1),%%xmm3; vmovups (%1,%%r12,4),%%xmm2; addq $16,%1;"\ + "vbroadcastss (%0),%%xmm1; vfmadd231ps %%xmm3,%%xmm1,%%xmm4; vfmadd231ps %%xmm2,%%xmm1,%%xmm5;"\ + "addq $4,%0;" +#define KERNEL_k1m1n12 \ + "vmovups (%1),%%xmm3; vmovups (%1,%%r12,4),%%xmm2; vmovups (%1,%%r12,8),%%xmm1; addq $16,%1;"\ + "vbroadcastss (%0),%%xmm10; vfmadd231ps %%xmm3,%%xmm10,%%xmm4; vfmadd231ps %%xmm2,%%xmm10,%%xmm5; vfmadd231ps %%xmm1,%%xmm10,%%xmm6;"\ + "addq $4,%0;" +#define unit_save_m1n4(c1) \ + "vpxor %%xmm10,%%xmm10,%%xmm10; vmovsd "#c1",%%xmm10,%%xmm2; vmovhlps "#c1",%%xmm10,%%xmm1;"\ + "vmovss (%5),%%xmm3; vinsertps $16,(%5,%3,1),%%xmm3,%%xmm3; vfmadd213ps %%xmm3,%%xmm0,%%xmm2;"\ + "vmovss %%xmm2,(%5); vextractps $1,%%xmm2,(%5,%3,1); leaq (%5,%3,2),%5;"\ + "vmovss (%5),%%xmm3; vinsertps $16,(%5,%3,1),%%xmm3,%%xmm3; vfmadd213ps %%xmm3,%%xmm0,%%xmm1;"\ + "vmovss %%xmm1,(%5); vextractps $1,%%xmm1,(%5,%3,1); leaq (%5,%3,2),%5;" +#define SAVE_m1n4 "movq %2,%5;" unit_save_m1n4(%%xmm4) +#define SAVE_m1n8 SAVE_m1n4 unit_save_m1n4(%%xmm5) +#define SAVE_m1n12 SAVE_m1n8 unit_save_m1n4(%%xmm6) + +/* %0 = "+r"(a_pointer), %1 = "+r"(b_pointer), %2 = "+r"(c_pointer), %3 = "+r"(ldc_in_bytes), %4 for k_count, %5 for c_store, %6 = b_pref */ +/* r10 = tmp, r11 = m_counter, r12 = k << 2(const), r13 = tmp, r14 = b_head_pos(const), r15 = tmp */ + +#define COMPUTE_SIMPLE(mdim,ndim) \ + "movq %%r12,%4; sarq $2,%4; movq %%r14,%1;" INIT_m##mdim##n##ndim\ + "testq %4,%4; jz 7"#mdim"7"#ndim"2f;"\ + "7"#mdim"7"#ndim"1:\n\t"\ + KERNEL_k1m##mdim##n##ndim "decq %4; jnz 7"#mdim"7"#ndim"1b;"\ + "7"#mdim"7"#ndim"2:\n\t"\ + SAVE_m##mdim##n##ndim "addq $"#mdim"*4,%2;" +#define COMPUTE_m8n1 COMPUTE_SIMPLE(8,1) +#define COMPUTE_m8n2 COMPUTE_SIMPLE(8,2) +#define COMPUTE_m8n8 COMPUTE_SIMPLE(8,8) +#define COMPUTE_m8n12 COMPUTE_SIMPLE(8,12) +#define COMPUTE_m8n4 \ + "movq %%r12,%4; sarq $2,%4; movq %%r14,%1;" INIT_m8n4\ + "cmpq $8,%4; jb 78740f;"\ + "78749:\n\t"\ + KERNEL_k2m8n4 KERNEL_k2m8n4 KERNEL_k2m8n4 KERNEL_k2m8n4\ + "subq $8,%4; cmpq $8,%4; jnb 78749b;"\ + "78740:\n\t"\ + "testq %4,%4; jz 78742f;"\ + "78741:\n\t"\ + KERNEL_k1m8n4 "decq %4; jnz 78741b;"\ + "78742:\n\t"\ + SAVE_m8n4 "addq $32,%2;" +#define COMPUTE_L_m16n6 \ + "movq %%r12,%%r13; sarq $2,%%r13; movq %%r14,%1;" INIT_m16n6\ + "movq %%r13,%4; movq %2,%5; cmpq $16,%%r13; jb 7116762f; movq $14,%4;"\ + "7116761:\n\t"\ + KERNEL_L_k2m16n6 "prefetcht0 128(%1); testq $24,%4; movq $84,%%r15; cmovz %3,%%r15;"\ + KERNEL_L_k2m16n6 "prefetcht1 (%5); subq $63,%5; addq %%r15,%5;"\ + KERNEL_L_k2m16n6 "prefetcht0 128(%1); prefetcht1 (%6); cmpq $198,%4; cmoveq %2,%5;"\ + KERNEL_L_k2m16n6 "addq $16,%6; addq $8,%4; cmpq %4,%%r13; jnb 7116761b;"\ + "movq %2,%5; negq %4; leaq 14(%%r13,%4,1),%4;"\ + "7116762:\n\t"\ + "xorq %%r15,%%r15; testq %4,%4; jz 7116764f;"\ + "7116763:\n\t"\ + "prefetcht0 (%5); prefetcht0 63(%5); addq %3,%5; incq %%r15;"\ + KERNEL_L_k1m16n6 "cmpq $6,%%r15; cmoveq %2,%5; decq %4; jnz 7116763b;"\ + "7116764:\n\t"\ + SAVE_L_m16n6 "addq $32,%2;" +#define COMPUTE_R_m16n6 \ + "movq %%r12,%%r13; sarq $2,%%r13; movq %%r14,%1;" INIT_m16n6\ + "movq %%r13,%4; leaq (%2,%3,4),%5; leaq (%5,%3,2),%5; movq %5,%%r10; cmpq $16,%%r13; jb 7216762f; movq $14,%4;"\ + "7216761:\n\t"\ + KERNEL_R_k2m16n6 "prefetcht0 128(%1,%%r12,8); testq $24,%4; movq $84,%%r15; cmovz %3,%%r15;"\ + KERNEL_R_k2m16n6 "prefetcht1 (%5); subq $63,%5; addq %%r15,%5;"\ + KERNEL_R_k2m16n6 "prefetcht0 128(%1,%%r12,8); prefetcht1 (%6); cmpq $198,%4; cmoveq %%r10,%5;"\ + KERNEL_R_k2m16n6 "addq $16,%6; addq $8,%4; cmpq %4,%%r13; jnb 7216761b;"\ + "movq %%r10,%5; negq %4; leaq 14(%%r13,%4,1),%4;"\ + "7216762:\n\t"\ + "xorq %%r15,%%r15; testq %4,%4; jz 7216764f;"\ + "7216763:\n\t"\ + "prefetcht0 (%5); prefetcht0 63(%5); addq %3,%5; incq %%r15;"\ + KERNEL_R_k1m16n6 "cmpq $6,%%r15; cmoveq %%r10,%5; decq %4; jnz 7216763b;"\ + "7216764:\n\t"\ + "prefetcht0 (%%r14); prefetcht0 64(%%r14);" SAVE_R_m16n6 "addq $32,%2;" +#define COMPUTE_H_m8n6 \ + "movq %%r12,%4; sarq $2,%4; movq %%r14,%1;" INIT_m8n6\ + "cmpq $8,%4; jb 718760f; movq %2,%5; xorq %%r15,%%r15;"\ + "718769:\n\t"\ + KERNEL_L_k2m8n6 KERNEL_L_k2m8n6 "cmpq $62,%%r15; movq $62,%%r15; cmoveq %3,%%r15;"\ + KERNEL_L_k2m8n6 KERNEL_L_k2m8n6 "prefetcht2 (%5); leaq -31(%5,%%r15,1),%5;"\ + "subq $8,%4; cmpq $8,%4; jnb 718769b;"\ + "718760:\n\t"\ + "testq %4,%4; jz 718762f;"\ + "718761:\n\t"\ + KERNEL_L_k1m8n6 "decq %4; jnz 718761b;"\ + "718762:\n\t"\ + SAVE_L_m8n6 "negq %%r12; leaq (%0,%%r12,8),%0; negq %%r12;" +#define COMPUTE_T_m8n6(side,sim) \ + "movq %%r12,%4; sarq $2,%4; movq %%r14,%1;" INIT_m8n6\ + "cmpq $8,%4; jb 72"#sim"8760f;"\ + "72"#sim"8769:\n\t"\ + KERNEL_##side##_k2m8n6 KERNEL_##side##_k2m8n6 KERNEL_##side##_k2m8n6 KERNEL_##side##_k2m8n6\ + "subq $8,%4; cmpq $8,%4; jnb 72"#sim"8769b;"\ + "72"#sim"8760:\n\t"\ + "testq %4,%4; jz 72"#sim"8762f;"\ + "72"#sim"8761:\n\t"\ + KERNEL_##side##_k1m8n6 "decq %4; jnz 72"#sim"8761b;"\ + "72"#sim"8762:\n\t"\ + SAVE_##side##_m8n6 "addq $32,%2;" +#define COMPUTE_NORMAL(ndim) {\ + next_b = b_pointer + ndim * K;\ + __asm__ __volatile__(\ + "vbroadcastss %9,%%ymm0;"\ + "movq %8,%%r12; salq $2,%%r12; movq %1,%%r14; movq %7,%%r11;"\ + "cmpq $8,%%r11;jb 33101"#ndim"f;"\ + "33109"#ndim":\n\t"\ + COMPUTE_m8n##ndim\ + "subq $8,%%r11;cmpq $8,%%r11;jnb 33109"#ndim"b;"\ + "33101"#ndim":\n\t"\ + "cmpq $4,%%r11;jb 33103"#ndim"f;"\ + COMPUTE_SIMPLE(4,ndim) "subq $4,%%r11;"\ + "33103"#ndim":\n\t"\ + "cmpq $2,%%r11;jb 33104"#ndim"f;"\ + COMPUTE_SIMPLE(2,ndim) "subq $2,%%r11;"\ + "33104"#ndim":\n\t"\ + "testq %%r11,%%r11;jz 33105"#ndim"f;"\ + COMPUTE_SIMPLE(1,ndim)\ + "33105"#ndim":\n\t"\ + "movq %%r14,%1; vzeroupper;"\ + :"+r"(a_pointer),"+r"(b_pointer),"+r"(c_pointer),"+r"(ldc_in_bytes),"+r"(k_count),"+r"(ctemp),"+r"(next_b)\ + :"m"(M),"m"(K),"m"(ALPHA):"r10","r11","r12","r13","r14","r15",\ + "xmm0","xmm1","xmm2","xmm3","xmm4","xmm5","xmm6","xmm7","xmm8","xmm9","xmm10","xmm11","xmm12","xmm13","xmm14","xmm15","cc","memory");\ + a_pointer -= M * K; b_pointer += ndim * K; c_pointer += (LDC * ndim - M);\ +} +#define COMPUTE_n12 {\ + next_b = b_pointer + 12 * K;\ + __asm__ __volatile__(\ + "vbroadcastss %9,%%ymm0;"\ + "movq %8,%%r12; salq $2,%%r12; movq %1,%%r14; movq %7,%%r11;"\ + "cmpq $16,%%r11;jb 3310112f;"\ + COMPUTE_H_m8n6\ + "3310612:\n\t"\ + COMPUTE_R_m16n6 "subq $8,%%r11; cmpq $16,%%r11;jb 3310712f;"\ + COMPUTE_L_m16n6 "subq $8,%%r11; cmpq $16,%%r11;jnb 3310612b;"\ + COMPUTE_T_m8n6(R,5) "subq $8,%%r11; jmp 3310212f;"\ + "3310712:\n\t"\ + COMPUTE_T_m8n6(L,7) "subq $8,%%r11; jmp 3310212f;"\ + "3310112:\n\t"\ + "cmpq $8,%%r11;jb 3310212f;"\ + COMPUTE_SIMPLE(8,12) "subq $8,%%r11;"\ + "3310212:\n\t"\ + "cmpq $4,%%r11;jb 3310312f;"\ + COMPUTE_SIMPLE(4,12) "subq $4,%%r11;"\ + "3310312:\n\t"\ + "cmpq $2,%%r11;jb 3310412f;"\ + COMPUTE_SIMPLE(2,12) "subq $2,%%r11;"\ + "3310412:\n\t"\ + "testq %%r11,%%r11;jz 3310512f;"\ + COMPUTE_SIMPLE(1,12)\ + "3310512:\n\t"\ + "movq %%r14,%1; vzeroupper;"\ + :"+r"(a_pointer),"+r"(b_pointer),"+r"(c_pointer),"+r"(ldc_in_bytes),"+r"(k_count),"+r"(ctemp),"+r"(next_b)\ + :"m"(M),"m"(K),"m"(ALPHA):"r10","r11","r12","r13","r14","r15",\ + "xmm0","xmm1","xmm2","xmm3","xmm4","xmm5","xmm6","xmm7","xmm8","xmm9","xmm10","xmm11","xmm12","xmm13","xmm14","xmm15","cc","memory");\ + a_pointer -= M * K; b_pointer += 12 * K; c_pointer += (LDC * 12 - M);\ +} + +#include "common.h" +#include +int __attribute__ ((noinline)) +CNAME(BLASLONG m, BLASLONG n, BLASLONG k, float alpha, float * __restrict__ A, float * __restrict__ B, float * __restrict__ C, BLASLONG LDC){ + if(m==0||n==0||k==0||alpha==(float)0.0) return 0; + int64_t ldc_in_bytes = (int64_t)LDC * sizeof(float); + float ALPHA = alpha; + int64_t M = (int64_t)m, K = (int64_t)k, k_count = 0; + BLASLONG n_count = n; + float *a_pointer = A,*b_pointer = B,*c_pointer = C,*ctemp = C,*next_b = B; + for(;n_count>11;n_count-=12) COMPUTE_n12 + for(;n_count>7;n_count-=8) COMPUTE_NORMAL(8) + for(;n_count>3;n_count-=4) COMPUTE_NORMAL(4) + for(;n_count>1;n_count-=2) COMPUTE_NORMAL(2) + if(n_count>0) COMPUTE_NORMAL(1) + return 0; +} + diff --git a/kernel/x86_64/sgemv_n_4.c b/kernel/x86_64/sgemv_n_4.c index 63697970f..3eec21774 100644 --- a/kernel/x86_64/sgemv_n_4.c +++ b/kernel/x86_64/sgemv_n_4.c @@ -35,7 +35,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "sgemv_n_microk_nehalem-4.c" #elif defined(SANDYBRIDGE) #include "sgemv_n_microk_sandy-4.c" -#elif defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) +#elif defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) || defined (COOPERLAKE) #include "sgemv_n_microk_haswell-4.c" #endif diff --git a/kernel/x86_64/sgemv_n_microk_haswell-4.c b/kernel/x86_64/sgemv_n_microk_haswell-4.c index 93e1e26e8..556dcfde5 100644 --- a/kernel/x86_64/sgemv_n_microk_haswell-4.c +++ b/kernel/x86_64/sgemv_n_microk_haswell-4.c @@ -164,11 +164,9 @@ static void sgemv_kernel_4x8( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y, BLASLO "r" (ap[3]), // 8 "r" (alpha) // 9 : "cc", - "%xmm0", "%xmm1", - "%xmm2", "%xmm3", - "%xmm4", "%xmm5", - "%xmm6", "%xmm7", - "%xmm8", "%xmm9", + "%xmm0", "%xmm1", "%xmm2", "%xmm3", + "%xmm4", "%xmm5", "%xmm6", "%xmm7", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", "%xmm12", "%xmm13", "%xmm14", "%xmm15", "memory" ); @@ -286,9 +284,9 @@ static void sgemv_kernel_4x4( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y, FLOAT "r" (ap[3]), // 7 "r" (alpha) // 8 : "cc", - "%xmm4", "%xmm5", - "%xmm6", "%xmm7", - "%xmm8", "%xmm9", + "%xmm0", "%xmm1", "%xmm2", "%xmm3", + "%xmm4", "%xmm5", "%xmm6", "%xmm7", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", "%xmm12", "%xmm13", "%xmm14", "%xmm15", "memory" ); diff --git a/kernel/x86_64/sgemv_t_4.c b/kernel/x86_64/sgemv_t_4.c index 86ecaf516..fe886f57f 100644 --- a/kernel/x86_64/sgemv_t_4.c +++ b/kernel/x86_64/sgemv_t_4.c @@ -34,7 +34,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "sgemv_t_microk_bulldozer-4.c" #elif defined(SANDYBRIDGE) #include "sgemv_t_microk_sandy-4.c" -#elif defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) +#elif defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) || defined (COOPERLAKE) #include "sgemv_t_microk_haswell-4.c" #endif diff --git a/kernel/x86_64/sgemv_t_microk_haswell-4.c b/kernel/x86_64/sgemv_t_microk_haswell-4.c index 8c370b4c0..fcabc0def 100644 --- a/kernel/x86_64/sgemv_t_microk_haswell-4.c +++ b/kernel/x86_64/sgemv_t_microk_haswell-4.c @@ -138,7 +138,9 @@ static void sgemv_kernel_4x4( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y) "r" (ap[2]), // 6 "r" (ap[3]) // 7 : "cc", + "%xmm0", "%xmm1", "%xmm2", "%xmm3", "%xmm4", "%xmm5", "%xmm6", "%xmm7", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", "%xmm12", "%xmm13", "%xmm14", "%xmm15", "memory" ); diff --git a/kernel/x86_64/srot.c b/kernel/x86_64/srot.c new file mode 100644 index 000000000..3264d251a --- /dev/null +++ b/kernel/x86_64/srot.c @@ -0,0 +1,207 @@ +#include "common.h" + +#if defined(SKYLAKEX) +#include "srot_microk_skylakex-2.c" +#elif defined(HASWELL) +#include "srot_microk_haswell-2.c" +#endif + +#ifndef HAVE_SROT_KERNEL +#include"../simd/intrin.h" + +static void srot_kernel(BLASLONG n, FLOAT *x, FLOAT *y, FLOAT c, FLOAT s) +{ + BLASLONG i = 0; + +#if V_SIMD && !defined(C_PGI) && (defined(HAVE_FMA3) || V_SIMD > 128) + const int vstep = v_nlanes_f32; + const int unrollx4 = n & (-vstep * 4); + const int unrollx = n & -vstep; + + v_f32 __c = v_setall_f32(c); + v_f32 __s = v_setall_f32(s); + v_f32 vx0, vx1, vx2, vx3; + v_f32 vy0, vy1, vy2, vy3; + v_f32 vt0, vt1, vt2, vt3; + + for (; i < unrollx4; i += vstep * 4) { + vx0 = v_loadu_f32(x + i); + vx1 = v_loadu_f32(x + i + vstep); + vx2 = v_loadu_f32(x + i + vstep * 2); + vx3 = v_loadu_f32(x + i + vstep * 3); + vy0 = v_loadu_f32(y + i); + vy1 = v_loadu_f32(y + i + vstep); + vy2 = v_loadu_f32(y + i + vstep * 2); + vy3 = v_loadu_f32(y + i + vstep * 3); + + vt0 = v_mul_f32(__s, vy0); + vt1 = v_mul_f32(__s, vy1); + vt2 = v_mul_f32(__s, vy2); + vt3 = v_mul_f32(__s, vy3); + + vt0 = v_muladd_f32(__c, vx0, vt0); + vt1 = v_muladd_f32(__c, vx1, vt1); + vt2 = v_muladd_f32(__c, vx2, vt2); + vt3 = v_muladd_f32(__c, vx3, vt3); + + v_storeu_f32(x + i, vt0); + v_storeu_f32(x + i + vstep, vt1); + v_storeu_f32(x + i + vstep * 2, vt2); + v_storeu_f32(x + i + vstep * 3, vt3); + + vt0 = v_mul_f32(__s, vx0); + vt1 = v_mul_f32(__s, vx1); + vt2 = v_mul_f32(__s, vx2); + vt3 = v_mul_f32(__s, vx3); + + vt0 = v_mulsub_f32(__c, vy0, vt0); + vt1 = v_mulsub_f32(__c, vy1, vt1); + vt2 = v_mulsub_f32(__c, vy2, vt2); + vt3 = v_mulsub_f32(__c, vy3, vt3); + + v_storeu_f32(y + i, vt0); + v_storeu_f32(y + i + vstep, vt1); + v_storeu_f32(y + i + vstep * 2, vt2); + v_storeu_f32(y + i + vstep * 3, vt3); + + } + + for (; i < unrollx; i += vstep) { + vx0 = v_loadu_f32(x + i); + vy0 = v_loadu_f32(y + i); + + vt0 = v_mul_f32(__s, vy0); + vt0 = v_muladd_f32(__c, vx0, vt0); + v_storeu_f32(x + i, vt0); + + vt0 = v_mul_f32(__s, vx0); + vt0 = v_mulsub_f32(__c, vy0, vt0); + v_storeu_f32(y + i, vt0); + } +#else + FLOAT f0, f1, f2, f3; + FLOAT x0, x1, x2, x3; + FLOAT g0, g1, g2, g3; + FLOAT y0, y1, y2, y3; + + FLOAT* xp = x; + FLOAT* yp = y; + + BLASLONG n1 = n & (~7); + while (i < n1) { + x0 = xp[0]; + y0 = yp[0]; + x1 = xp[1]; + y1 = yp[1]; + x2 = xp[2]; + y2 = yp[2]; + x3 = xp[3]; + y3 = yp[3]; + + f0 = c*x0 + s*y0; + g0 = c*y0 - s*x0; + f1 = c*x1 + s*y1; + g1 = c*y1 - s*x1; + f2 = c*x2 + s*y2; + g2 = c*y2 - s*x2; + f3 = c*x3 + s*y3; + g3 = c*y3 - s*x3; + + xp[0] = f0; + yp[0] = g0; + xp[1] = f1; + yp[1] = g1; + xp[2] = f2; + yp[2] = g2; + xp[3] = f3; + yp[3] = g3; + + xp += 4; + yp += 4; + i += 4; + } +#endif + + while (i < n) { + FLOAT temp = c*x[i] + s*y[i]; + y[i] = c*y[i] - s*x[i]; + x[i] = temp; + + i++; + } +} + +#endif +static void rot_compute(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT c, FLOAT s) +{ + BLASLONG i = 0; + BLASLONG ix = 0, iy = 0; + + FLOAT temp; + + if (n <= 0) + return; + if ((inc_x == 1) && (inc_y == 1)) { + srot_kernel(n, x, y, c, s); + } + else { + while (i < n) { + temp = c * x[ix] + s * y[iy]; + y[iy] = c * y[iy] - s * x[ix]; + x[ix] = temp; + + ix += inc_x; + iy += inc_y; + i++; + } + } + return; +} + + +#if defined(SMP) +static int rot_thread_function(blas_arg_t *args) +{ + + rot_compute(args->m, + args->a, args->lda, + args->b, args->ldb, + ((float *)args->alpha)[0], + ((float *)args->alpha)[1]); + return 0; +} + +extern int blas_level1_thread(int mode, BLASLONG m, BLASLONG n, BLASLONG k, void *alpha, void *a, BLASLONG lda, void *b, BLASLONG ldb, void *c, BLASLONG ldc, int (*function)(), int nthreads); +#endif +int CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT c, FLOAT s) +{ +#if defined(SMP) + int nthreads; + FLOAT alpha[2]={c, s}; + FLOAT dummy_c; +#endif + +#if defined(SMP) + if (inc_x == 0 || inc_y == 0 || n <= 100000) { + nthreads = 1; + } + else { + nthreads = num_cpu_avail(1); + } + + if (nthreads == 1) { + rot_compute(n, x, inc_x, y, inc_y, c, s); + } + else { +#if defined(DOUBLE) + int mode = BLAS_DOUBLE | BLAS_REAL | BLAS_PTHREAD; +#else + int mode = BLAS_SINGLE | BLAS_REAL | BLAS_PTHREAD; +#endif + blas_level1_thread(mode, n, 0, 0, alpha, x, inc_x, y, inc_y, &dummy_c, 0, (void *)rot_thread_function, nthreads); + } +#else + rot_compute(n, x, inc_x, y, inc_y, c, s); +#endif + return 0; +} diff --git a/kernel/x86_64/srot_microk_haswell-2.c b/kernel/x86_64/srot_microk_haswell-2.c new file mode 100644 index 000000000..8e245cc8f --- /dev/null +++ b/kernel/x86_64/srot_microk_haswell-2.c @@ -0,0 +1,87 @@ +/* need a new enough GCC for avx512 support */ +#if (( defined(__GNUC__) && __GNUC__ > 6 && defined(__AVX512CD__)) || (defined(__clang__) && __clang_major__ >= 9)) + +#define HAVE_SROT_KERNEL 1 + +#include +#include + +static void srot_kernel(BLASLONG n, FLOAT *x, FLOAT *y, FLOAT c, FLOAT s) +{ + BLASLONG i = 0; + + BLASLONG tail_index_8 = n&(~7); + BLASLONG tail_index_32 = n&(~31); + + __m256 c_256, s_256; + if (n >= 8) { + c_256 = _mm256_set1_ps(c); + s_256 = _mm256_set1_ps(s); + } + + __m256 x0, x1, x2, x3; + __m256 y0, y1, y2, y3; + __m256 t0, t1, t2, t3; + + for (i = 0; i < tail_index_32; i += 32) { + x0 = _mm256_loadu_ps(&x[i + 0]); + x1 = _mm256_loadu_ps(&x[i + 8]); + x2 = _mm256_loadu_ps(&x[i +16]); + x3 = _mm256_loadu_ps(&x[i +24]); + y0 = _mm256_loadu_ps(&y[i + 0]); + y1 = _mm256_loadu_ps(&y[i + 8]); + y2 = _mm256_loadu_ps(&y[i +16]); + y3 = _mm256_loadu_ps(&y[i +24]); + + t0 = _mm256_mul_ps(s_256, y0); + t1 = _mm256_mul_ps(s_256, y1); + t2 = _mm256_mul_ps(s_256, y2); + t3 = _mm256_mul_ps(s_256, y3); + + t0 = _mm256_fmadd_ps(c_256, x0, t0); + t1 = _mm256_fmadd_ps(c_256, x1, t1); + t2 = _mm256_fmadd_ps(c_256, x2, t2); + t3 = _mm256_fmadd_ps(c_256, x3, t3); + + _mm256_storeu_ps(&x[i + 0], t0); + _mm256_storeu_ps(&x[i + 8], t1); + _mm256_storeu_ps(&x[i +16], t2); + _mm256_storeu_ps(&x[i +24], t3); + + t0 = _mm256_mul_ps(s_256, x0); + t1 = _mm256_mul_ps(s_256, x1); + t2 = _mm256_mul_ps(s_256, x2); + t3 = _mm256_mul_ps(s_256, x3); + + t0 = _mm256_fmsub_ps(c_256, y0, t0); + t1 = _mm256_fmsub_ps(c_256, y1, t1); + t2 = _mm256_fmsub_ps(c_256, y2, t2); + t3 = _mm256_fmsub_ps(c_256, y3, t3); + + _mm256_storeu_ps(&y[i + 0], t0); + _mm256_storeu_ps(&y[i + 8], t1); + _mm256_storeu_ps(&y[i +16], t2); + _mm256_storeu_ps(&y[i +24], t3); + + } + + for (i = tail_index_32; i < tail_index_8; i += 8) { + x0 = _mm256_loadu_ps(&x[i]); + y0 = _mm256_loadu_ps(&y[i]); + + t0 = _mm256_mul_ps(s_256, y0); + t0 = _mm256_fmadd_ps(c_256, x0, t0); + _mm256_storeu_ps(&x[i], t0); + + t0 = _mm256_mul_ps(s_256, x0); + t0 = _mm256_fmsub_ps(c_256, y0, t0); + _mm256_storeu_ps(&y[i], t0); + } + + for (i = tail_index_8; i < n; ++i) { + FLOAT temp = c * x[i] + s * y[i]; + y[i] = c * y[i] - s * x[i]; + x[i] = temp; + } +} +#endif diff --git a/kernel/x86_64/srot_microk_skylakex-2.c b/kernel/x86_64/srot_microk_skylakex-2.c new file mode 100644 index 000000000..a21d1cf64 --- /dev/null +++ b/kernel/x86_64/srot_microk_skylakex-2.c @@ -0,0 +1,91 @@ +/* need a new enough GCC for avx512 support */ +#if (( defined(__GNUC__) && __GNUC__ > 6 && defined(__AVX512CD__)) || (defined(__clang__) && __clang_major__ >= 9)) + +#define HAVE_SROT_KERNEL 1 + +#include +#include + +static void srot_kernel(BLASLONG n, FLOAT *x, FLOAT *y, FLOAT c, FLOAT s) +{ + BLASLONG i = 0; + __m512 c_512, s_512; + c_512 = _mm512_set1_ps(c); + s_512 = _mm512_set1_ps(s); + + BLASLONG tail_index_16 = n&(~15); + BLASLONG tail_index_64 = n&(~63); + + + __m512 x0, x1, x2, x3; + __m512 y0, y1, y2, y3; + __m512 t0, t1, t2, t3; + + for (i = 0; i < tail_index_64; i += 64) { + x0 = _mm512_loadu_ps(&x[i + 0]); + x1 = _mm512_loadu_ps(&x[i +16]); + x2 = _mm512_loadu_ps(&x[i +32]); + x3 = _mm512_loadu_ps(&x[i +48]); + y0 = _mm512_loadu_ps(&y[i + 0]); + y1 = _mm512_loadu_ps(&y[i +16]); + y2 = _mm512_loadu_ps(&y[i +32]); + y3 = _mm512_loadu_ps(&y[i +48]); + + t0 = _mm512_mul_ps(s_512, y0); + t1 = _mm512_mul_ps(s_512, y1); + t2 = _mm512_mul_ps(s_512, y2); + t3 = _mm512_mul_ps(s_512, y3); + + t0 = _mm512_fmadd_ps(c_512, x0, t0); + t1 = _mm512_fmadd_ps(c_512, x1, t1); + t2 = _mm512_fmadd_ps(c_512, x2, t2); + t3 = _mm512_fmadd_ps(c_512, x3, t3); + + _mm512_storeu_ps(&x[i + 0], t0); + _mm512_storeu_ps(&x[i +16], t1); + _mm512_storeu_ps(&x[i +32], t2); + _mm512_storeu_ps(&x[i +48], t3); + + t0 = _mm512_mul_ps(s_512, x0); + t1 = _mm512_mul_ps(s_512, x1); + t2 = _mm512_mul_ps(s_512, x2); + t3 = _mm512_mul_ps(s_512, x3); + + t0 = _mm512_fmsub_ps(c_512, y0, t0); + t1 = _mm512_fmsub_ps(c_512, y1, t1); + t2 = _mm512_fmsub_ps(c_512, y2, t2); + t3 = _mm512_fmsub_ps(c_512, y3, t3); + + _mm512_storeu_ps(&y[i + 0], t0); + _mm512_storeu_ps(&y[i +16], t1); + _mm512_storeu_ps(&y[i +32], t2); + _mm512_storeu_ps(&y[i +48], t3); + } + + for (i = tail_index_64; i < tail_index_16; i += 16) { + x0 = _mm512_loadu_ps(&x[i]); + y0 = _mm512_loadu_ps(&y[i]); + + t0 = _mm512_mul_ps(s_512, y0); + t0 = _mm512_fmadd_ps(c_512, x0, t0); + _mm512_storeu_ps(&x[i], t0); + + t0 = _mm512_mul_ps(s_512, x0); + t0 = _mm512_fmsub_ps(c_512, y0, t0); + _mm512_storeu_ps(&y[i], t0); + } + + + if ((n & 15) > 0) { + uint16_t tail_mask16 = (((uint16_t) 0xffff) >> (16-(n&15))); + __m512 tail_x = _mm512_maskz_loadu_ps(*((__mmask16*)&tail_mask16), &x[tail_index_16]); + __m512 tail_y = _mm512_maskz_loadu_ps(*((__mmask16*)&tail_mask16), &y[tail_index_16]); + __m512 temp = _mm512_mul_ps(s_512, tail_y); + temp = _mm512_fmadd_ps(c_512, tail_x, temp); + _mm512_mask_storeu_ps(&x[tail_index_16], *((__mmask16*)&tail_mask16), temp); + temp = _mm512_mul_ps(s_512, tail_x); + temp = _mm512_fmsub_ps(c_512, tail_y, temp); + _mm512_mask_storeu_ps(&y[tail_index_16], *((__mmask16*)&tail_mask16), temp); + } +} +#endif diff --git a/kernel/x86_64/ssymv_L.c b/kernel/x86_64/ssymv_L.c index 73ae001ea..c9d698eb7 100644 --- a/kernel/x86_64/ssymv_L.c +++ b/kernel/x86_64/ssymv_L.c @@ -32,7 +32,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "ssymv_L_microk_bulldozer-2.c" #elif defined(NEHALEM) #include "ssymv_L_microk_nehalem-2.c" -#elif defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) +#elif defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) || defined (COOPERLAKE) #include "ssymv_L_microk_haswell-2.c" #elif defined(SANDYBRIDGE) #include "ssymv_L_microk_sandy-2.c" diff --git a/kernel/x86_64/ssymv_U.c b/kernel/x86_64/ssymv_U.c index f37c251a1..4d8aac1ab 100644 --- a/kernel/x86_64/ssymv_U.c +++ b/kernel/x86_64/ssymv_U.c @@ -33,7 +33,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "ssymv_U_microk_bulldozer-2.c" #elif defined(NEHALEM) #include "ssymv_U_microk_nehalem-2.c" -#elif defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) +#elif defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) || defined (COOPERLAKE) #include "ssymv_U_microk_haswell-2.c" #elif defined(SANDYBRIDGE) #include "ssymv_U_microk_sandy-2.c" diff --git a/kernel/x86_64/stobf16_microk_cooperlake.c b/kernel/x86_64/stobf16_microk_cooperlake.c new file mode 100644 index 000000000..2756a6934 --- /dev/null +++ b/kernel/x86_64/stobf16_microk_cooperlake.c @@ -0,0 +1,86 @@ +/*************************************************************************** +Copyright (c) 2014, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +/* need a new enough GCC for avx512 support */ +#if (( defined(__GNUC__) && __GNUC__ >= 10 && defined(__AVX512BF16__)) || (defined(__clang__) && __clang_major__ >= 9)) + +#define HAVE_TOBF16_ACCL_KERNEL 1 +#include "common.h" +#include + +static void tobf16_accl_kernel(BLASLONG n, const float * in, bfloat16 * out) +{ + /* Get the 64-bytes unaligned header number targeting for avx512 + * processing (Assume input float array is natural aligned) */ + int align_header = ((64 - ((uintptr_t)in & (uintptr_t)0x3f)) >> 2) & 0xf; + + if (n < align_header) {align_header = n;} + + if (align_header != 0) { + uint16_t align_mask16 = (((uint16_t)0xffff) >> (16-align_header)); + __m512 a = _mm512_maskz_loadu_ps(*((__mmask16*) &align_mask16), &in[0]); + _mm256_mask_storeu_epi16(&out[0], *((__mmask16*) &align_mask16), (__m256i) _mm512_cvtneps_pbh(a)); + } + + if (n == align_header) { + return; + } else { + n -= align_header; + in += align_header; + out += align_header; + } + + int tail_index_32 = n&(~31); + int tail_index_128 = n&(~127); + uint32_t tail_mask32 = (((uint32_t) 0xffffffff) >> (32-(n&31))); + uint16_t tail_mask16 = (((uint16_t) 0xffff) >> (16-(n&15))); + + /* Processing the main chunk with 128-elements per round */ + for (int i = 0; i < tail_index_128; i += 128) { + _mm512_storeu_si512(&out[i+ 0], (__m512i) _mm512_cvtne2ps_pbh(_mm512_load_ps(&in[i+ 16]), _mm512_load_ps(&in[i+ 0]))); + _mm512_storeu_si512(&out[i+32], (__m512i) _mm512_cvtne2ps_pbh(_mm512_load_ps(&in[i+ 48]), _mm512_load_ps(&in[i+32]))); + _mm512_storeu_si512(&out[i+64], (__m512i) _mm512_cvtne2ps_pbh(_mm512_load_ps(&in[i+ 80]), _mm512_load_ps(&in[i+64]))); + _mm512_storeu_si512(&out[i+96], (__m512i) _mm512_cvtne2ps_pbh(_mm512_load_ps(&in[i+112]), _mm512_load_ps(&in[i+96]))); + } + + /* Processing the remaining <128 chunk with 32-elements per round */ + for (int j = tail_index_128; j < tail_index_32; j += 32) { + _mm512_storeu_si512(&out[j], (__m512i) _mm512_cvtne2ps_pbh(_mm512_load_ps(&in[j+ 16]), _mm512_load_ps(&in[j]))); + } + + /* Processing the remaining <32 chunk with masked processing */ + if ((n&31) > 15) { + __m512 b = _mm512_load_ps(&in[tail_index_32]); + __m512 a = _mm512_maskz_load_ps(*((__mmask16*) &tail_mask16), &in[tail_index_32+16]); + _mm512_mask_storeu_epi16(&out[tail_index_32], *((__mmask32*) &tail_mask32), (__m512i) _mm512_cvtne2ps_pbh(a, b)); + } else if ((n&31) > 0) { + __m512 a = _mm512_maskz_load_ps(*((__mmask16*) &tail_mask16), &in[tail_index_32]); + _mm256_mask_storeu_epi16(&out[tail_index_32], *((__mmask16*) &tail_mask16), (__m256i) _mm512_cvtneps_pbh(a)); + } +} + +#endif diff --git a/kernel/x86_64/strsm_kernel_8x4_haswell_LN.c b/kernel/x86_64/strsm_kernel_8x4_haswell_LN.c new file mode 100644 index 000000000..5410bd4ae --- /dev/null +++ b/kernel/x86_64/strsm_kernel_8x4_haswell_LN.c @@ -0,0 +1,240 @@ +#include "common.h" +#include +#include "strsm_kernel_8x4_haswell_L_common.h" + +#define SOLVE_LN_m1n4 \ + "subq $4,%2; movq %2,%3;" GEMM_SUM_REORDER_1x4(4)\ + SOLVE_m1n4(-4,4) SAVE_b_m1n4(-16,4)\ + "movq %2,%3;" save_c_m1n4(4) + +#define SOLVE_LN_m1n8 \ + "subq $4,%2; movq %2,%3;" GEMM_SUM_REORDER_1x4(4) GEMM_SUM_REORDER_1x4(5)\ + SOLVE_m1n8(-4,4,5) SAVE_b_m1n8(-16,4,5)\ + "movq %2,%3;" save_c_m1n4(4) save_c_m1n4(5) + +#define SOLVE_LN_m1n12 \ + "subq $4,%2; movq %2,%3;" GEMM_SUM_REORDER_1x4(4) GEMM_SUM_REORDER_1x4(5) GEMM_SUM_REORDER_1x4(6)\ + SOLVE_m1n12(-4,4,5,6) SAVE_b_m1n12(-16,4,5,6)\ + "movq %2,%3;" save_c_m1n4(4) save_c_m1n4(5) save_c_m1n4(6) + +#define SOLVE_LN_m2n4 \ + "subq $8,%2; movq %2,%3;" GEMM_SUM_REORDER_2x4(4,5,4)\ + SOLVE_loup_m2n4(-8,4)\ + SOLVE_up_m2n4(-16,4) SAVE_b_m2n4(-32,4)\ + "movq %2,%3;" save_c_m2n4(4) + +#define SOLVE_LN_m2n8 \ + "subq $8,%2; movq %2,%3;" GEMM_SUM_REORDER_2x4(4,5,4) GEMM_SUM_REORDER_2x4(6,7,5)\ + SOLVE_loup_m2n8(-8,4,5)\ + SOLVE_up_m2n8(-16,4,5) SAVE_b_m2n8(-32,4,5)\ + "movq %2,%3;" save_c_m2n4(4) save_c_m2n4(5) + +#define SOLVE_LN_m2n12 \ + "subq $8,%2; movq %2,%3;" GEMM_SUM_REORDER_2x4(4,5,4) GEMM_SUM_REORDER_2x4(6,7,5) GEMM_SUM_REORDER_2x4(8,9,6)\ + SOLVE_loup_m2n12(-8,4,5,6)\ + SOLVE_up_m2n12(-16,4,5,6) SAVE_b_m2n12(-32,4,5,6)\ + "movq %2,%3;" save_c_m2n4(4) save_c_m2n4(5) save_c_m2n4(6) + +#define SOLVE_LN_m4n4 \ + "subq $16,%2; movq %2,%3;" GEMM_SUM_REORDER_4x4(4,5,6,7,4,5)\ +\ + SOLVE_loup_m2n4(-8,5) SUBTRACT_m2n4(-16,4)\ + SOLVE_up_m2n4(-24,5) SUBTRACT_m2n4(-32,4) SAVE_b_m2n4(-32,5)\ +\ + SOLVE_loup_m2n4(-48,4)\ + SOLVE_up_m2n4(-64,4) SAVE_b_m2n4(-64,4)\ +\ + "movq %2,%3;" save_c_m4n4(4,5) + +#define SOLVE_LN_m4n8 \ + "subq $16,%2; movq %2,%3;" GEMM_SUM_REORDER_4x4(4,5,6,7,4,5) GEMM_SUM_REORDER_4x4(8,9,10,11,6,7)\ +\ + SOLVE_loup_m2n8(-8,5,7) SUBTRACT_m2n8(-16,4,6)\ + SOLVE_up_m2n8(-24,5,7) SUBTRACT_m2n8(-32,4,6) SAVE_b_m2n8(-32,5,7)\ +\ + SOLVE_loup_m2n8(-48,4,6)\ + SOLVE_up_m2n8(-64,4,6) SAVE_b_m2n8(-64,4,6)\ +\ + "movq %2,%3;" save_c_m4n4(4,5) save_c_m4n4(6,7) + +#define SOLVE_LN_m4n12 \ + "subq $16,%2; movq %2,%3;" GEMM_SUM_REORDER_4x4(4,5,6,7,4,5) GEMM_SUM_REORDER_4x4(8,9,10,11,6,7) GEMM_SUM_REORDER_4x4(12,13,14,15,8,9)\ +\ + SOLVE_loup_m2n12(-8,5,7,9) SUBTRACT_m2n12(-16,4,6,8)\ + SOLVE_up_m2n12(-24,5,7,9) SUBTRACT_m2n12(-32,4,6,8) SAVE_b_m2n12(-32,5,7,9)\ +\ + SOLVE_loup_m2n12(-48,4,6,8)\ + SOLVE_up_m2n12(-64,4,6,8) SAVE_b_m2n12(-64,4,6,8)\ +\ + "movq %2,%3;" save_c_m4n4(4,5) save_c_m4n4(6,7) save_c_m4n4(8,9) + +#define SOLVE_LN_m8n4 \ + "subq $32,%2; movq %2,%3;" GEMM_SUM_REORDER_8x4(4,5,6,7,-32)\ +\ + SOLVE_loup_m2n4(-8,7) SUBTRACT_m2n4(-16,6) SUBTRACT_m2n4(-24,5) SUBTRACT_m2n4(-32,4)\ + SOLVE_up_m2n4(-40,7) SUBTRACT_m2n4(-48,6) SUBTRACT_m2n4(-56,5) SUBTRACT_m2n4(-64,4) SAVE_b_m2n4(-32,7)\ +\ + SOLVE_loup_m2n4(-80,6) SUBTRACT_m2n4(-88,5) SUBTRACT_m2n4(-96,4)\ + SOLVE_up_m2n4(-112,6) SUBTRACT_m2n4(-120,5) SUBTRACT_m2n4(-128,4) SAVE_b_m2n4(-64,6)\ +\ + SOLVE_loup_m2n4(-152,5) SUBTRACT_m2n4(-160,4)\ + SOLVE_up_m2n4(-184,5) SUBTRACT_m2n4(-192,4) SAVE_b_m2n4(-96,5)\ +\ + SOLVE_loup_m2n4(-224,4)\ + SOLVE_up_m2n4(-256,4) SAVE_b_m2n4(-128,4)\ +\ + "movq %2,%3;" save_c_m8n4(4,5,6,7) + +#define SOLVE_LN_m8n8 \ + "subq $32,%2; movq %2,%3;" GEMM_SUM_REORDER_8x4(4,5,6,7,-32) GEMM_SUM_REORDER_8x4(8,9,10,11,-32)\ +\ + SOLVE_loup_m2n8(-8,7,11) SUBTRACT_m2n8(-16,6,10) SUBTRACT_m2n8(-24,5,9) SUBTRACT_m2n8(-32,4,8)\ + SOLVE_up_m2n8(-40,7,11) SUBTRACT_m2n8(-48,6,10) SUBTRACT_m2n8(-56,5,9) SUBTRACT_m2n8(-64,4,8) SAVE_b_m2n8(-32,7,11)\ +\ + SOLVE_loup_m2n8(-80,6,10) SUBTRACT_m2n8(-88,5,9) SUBTRACT_m2n8(-96,4,8)\ + SOLVE_up_m2n8(-112,6,10) SUBTRACT_m2n8(-120,5,9) SUBTRACT_m2n8(-128,4,8) SAVE_b_m2n8(-64,6,10)\ +\ + SOLVE_loup_m2n8(-152,5,9) SUBTRACT_m2n8(-160,4,8)\ + SOLVE_up_m2n8(-184,5,9) SUBTRACT_m2n8(-192,4,8) SAVE_b_m2n8(-96,5,9)\ +\ + SOLVE_loup_m2n8(-224,4,8)\ + SOLVE_up_m2n8(-256,4,8) SAVE_b_m2n8(-128,4,8)\ +\ + "movq %2,%3;" save_c_m8n4(4,5,6,7) save_c_m8n4(8,9,10,11) + +#define SOLVE_LN_m8n12 \ + "subq $32,%2; movq %2,%3;" GEMM_SUM_REORDER_8x4(4,5,6,7,-32) GEMM_SUM_REORDER_8x4(8,9,10,11,-32) GEMM_SUM_REORDER_8x4(12,13,14,15,-32)\ +\ + SOLVE_loup_m2n12(-8,7,11,15) SUBTRACT_m2n12(-16,6,10,14) SUBTRACT_m2n12(-24,5,9,13) SUBTRACT_m2n12(-32,4,8,12)\ + SOLVE_up_m2n12(-40,7,11,15) SUBTRACT_m2n12(-48,6,10,14) SUBTRACT_m2n12(-56,5,9,13) SUBTRACT_m2n12(-64,4,8,12) SAVE_b_m2n12(-32,7,11,15)\ +\ + SOLVE_loup_m2n12(-80,6,10,14) SUBTRACT_m2n12(-88,5,9,13) SUBTRACT_m2n12(-96,4,8,12)\ + SOLVE_up_m2n12(-112,6,10,14) SUBTRACT_m2n12(-120,5,9,13) SUBTRACT_m2n12(-128,4,8,12) SAVE_b_m2n12(-64,6,10,14)\ +\ + SOLVE_loup_m2n12(-152,5,9,13) SUBTRACT_m2n12(-160,4,8,12)\ + SOLVE_up_m2n12(-184,5,9,13) SUBTRACT_m2n12(-192,4,8,12) SAVE_b_m2n12(-96,5,9,13)\ +\ + SOLVE_loup_m2n12(-224,4,8,12)\ + SOLVE_up_m2n12(-256,4,8,12) SAVE_b_m2n12(-128,4,8,12)\ +\ + "movq %2,%3;" save_c_m8n4(4,5,6,7) save_c_m8n4(8,9,10,11) save_c_m8n4(12,13,14,15) + +/* r13 = k-kk, r14 = b_tail, r15 = a_tail */ + +#define GEMM_LN_SIMPLE(mdim,ndim) \ + "movq %%r15,%0; negq %%r12; leaq (%%r15,%%r12,"#mdim"),%%r15; negq %%r12;"\ + "movq %%r13,%5; addq $"#mdim",%%r13; movq %%r14,%1;" INIT_m##mdim##n##ndim\ + "testq %5,%5; jz 2"#mdim""#ndim"2f;"\ + "2"#mdim""#ndim"1:\n\t"\ + "subq $16,%1; subq $"#mdim"*4,%0;" GEMM_KERNEL_k1m##mdim##n##ndim "decq %5; jnz 2"#mdim""#ndim"1b;"\ + "2"#mdim""#ndim"2:\n\t" +#define GEMM_LN_m8n4 GEMM_LN_SIMPLE(8,4) +#define GEMM_LN_m8n8 GEMM_LN_SIMPLE(8,8) +#define GEMM_LN_m8n12 \ + "movq %%r15,%0; negq %%r12; leaq (%%r15,%%r12,8),%%r15; negq %%r12; movq %%r13,%5; addq $8,%%r13; movq %%r14,%1;" INIT_m8n12\ + "cmpq $8,%5; jb 28122f;"\ + "28121:\n\t"\ + "prefetcht0 -384(%0); subq $32,%0; subq $16,%1;" GEMM_KERNEL_k1m8n12\ + "subq $32,%0; subq $16,%1;" GEMM_KERNEL_k1m8n12\ + "prefetcht0 -384(%0); subq $32,%0; subq $16,%1;" GEMM_KERNEL_k1m8n12\ + "subq $32,%0; subq $16,%1;" GEMM_KERNEL_k1m8n12\ + "prefetcht0 -384(%0); subq $32,%0; subq $16,%1;" GEMM_KERNEL_k1m8n12\ + "subq $32,%0; subq $16,%1;" GEMM_KERNEL_k1m8n12\ + "prefetcht0 -384(%0); subq $32,%0; subq $16,%1;" GEMM_KERNEL_k1m8n12\ + "subq $32,%0; subq $16,%1;" GEMM_KERNEL_k1m8n12\ + "subq $8,%5; cmpq $8,%5; jnb 28121b;"\ + "28122:\n\t"\ + "testq %5,%5; jz 28124f;"\ + "28123:\n\t"\ + "subq $32,%0; subq $16,%1;" GEMM_KERNEL_k1m8n12 "decq %5; jnz 28123b;"\ + "28124:\n\t" +#define GEMM_LN_m4n4 GEMM_LN_SIMPLE(4,4) +#define GEMM_LN_m4n8 GEMM_LN_SIMPLE(4,8) +#define GEMM_LN_m4n12 GEMM_LN_SIMPLE(4,12) +#define GEMM_LN_m2n4 GEMM_LN_SIMPLE(2,4) +#define GEMM_LN_m2n8 GEMM_LN_SIMPLE(2,8) +#define GEMM_LN_m2n12 GEMM_LN_SIMPLE(2,12) +#define GEMM_LN_m1n4 GEMM_LN_SIMPLE(1,4) +#define GEMM_LN_m1n8 GEMM_LN_SIMPLE(1,8) +#define GEMM_LN_m1n12 GEMM_LN_SIMPLE(1,12) + +#define COMPUTE(ndim) {\ + c_ptr += M;\ + __asm__ __volatile__(\ + "movq %0,%%r15; movq %7,%%r13; movq %6,%%r12; salq $2,%%r12; leaq (%1,%%r12,4),%%r14; movq %10,%%r11;"\ + "testq $1,%%r11; jz "#ndim"772f;"\ + #ndim"771:\n\t"\ + GEMM_LN_m1n##ndim SOLVE_LN_m1n##ndim "subq $1,%%r11;"\ + #ndim"772:\n\t"\ + "testq $2,%%r11; jz "#ndim"773f;"\ + GEMM_LN_m2n##ndim SOLVE_LN_m2n##ndim "subq $2,%%r11;"\ + #ndim"773:\n\t"\ + "testq $4,%%r11; jz "#ndim"774f;"\ + GEMM_LN_m4n##ndim SOLVE_LN_m4n##ndim "subq $4,%%r11;"\ + #ndim"774:\n\t"\ + "testq %%r11,%%r11; jz "#ndim"776f;"\ + #ndim"775:\n\t"\ + GEMM_LN_m8n##ndim SOLVE_LN_m8n##ndim "subq $8,%%r11; jnz "#ndim"775b;"\ + #ndim"776:\n\t"\ + "movq %%r15,%0; movq %%r14,%1; vzeroupper;"\ + :"+r"(a_ptr),"+r"(b_ptr),"+r"(c_ptr),"+r"(c_tmp),"+r"(ldc_bytes),"+r"(k_cnt):"m"(K),"m"(kmkkinp),"m"(one[0]),"m"(zero[0]),"m"(M)\ + :"r11","r12","r13","r14","r15","cc","memory",\ + "xmm0","xmm1","xmm2","xmm3","xmm4","xmm5","xmm6","xmm7","xmm8","xmm9","xmm10","xmm11","xmm12","xmm13","xmm14","xmm15");\ + a_ptr += M * K; b_ptr += (ndim-4) * K; c_ptr += ldc * ndim;\ +} +static void solve_LN(BLASLONG m, BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) { + FLOAT a0, b0; + int i, j, k; + for (i=m-1;i>=0;i--) { + a0 = a[i*m+i]; //reciprocal of the original value + for (j=0;j0) GEMM_KERNEL_N(1,n,k-kk,-1.0,a_ptr+kk*1,sb+kk*n,c_ptr,ldc); + solve_LN(1,n,a_ptr+(kk-1)*1,sb+(kk-1)*n,c_ptr,ldc); + kk -= 1; + m_count--; + } + if(m_count&2){ + a_ptr-=k*2; c_ptr-=2; + if(k-kk>0) GEMM_KERNEL_N(2,n,k-kk,-1.0,a_ptr+kk*2,sb+kk*n,c_ptr,ldc); + solve_LN(2,n,a_ptr+(kk-2)*2,sb+(kk-2)*n,c_ptr,ldc); + kk -= 2; + m_count-=2; + } + if(m_count&4){ + a_ptr-=k*4; c_ptr-=4; + if(k-kk>0) GEMM_KERNEL_N(4,n,k-kk,-1.0,a_ptr+kk*4,sb+kk*n,c_ptr,ldc); + solve_LN(4,n,a_ptr+(kk-4)*4,sb+(kk-4)*n,c_ptr,ldc); + kk -= 4; + m_count-=4; + } + for(;m_count>7;m_count-=8){ + a_ptr-=k*8; c_ptr-=8; + if(k-kk>0) GEMM_KERNEL_N(8,n,k-kk,-1.0,a_ptr+kk*8,sb+kk*n,c_ptr,ldc); + solve_LN(8,n,a_ptr+(kk-8)*8,sb+(kk-8)*n,c_ptr,ldc); + kk -= 8; + } +} +int CNAME(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT dummy1, FLOAT *sa, FLOAT *sb, FLOAT *C, BLASLONG ldc, BLASLONG offset){ + float *a_ptr = sa+m*k, *b_ptr = sb, *c_ptr = C, *c_tmp = C; + float one[8] = {1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0}; + float zero[8] = {0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0}; + uint64_t ldc_bytes = (uint64_t)ldc * sizeof(float), K = (uint64_t)k, M = (uint64_t)m, kmkkinp = (uint64_t)(k-m-offset), k_cnt = 0; + BLASLONG n_count = n; + for(;n_count>11;n_count-=12) COMPUTE(12) + for(;n_count>7;n_count-=8) COMPUTE(8) + for(;n_count>3;n_count-=4) COMPUTE(4) + for(;n_count>1;n_count-=2) { COMPUTE_EDGE_1_nchunk(m,2,sa,b_ptr,c_ptr,ldc,k,offset); b_ptr += 2*k; c_ptr += ldc*2;} + if(n_count>0) COMPUTE_EDGE_1_nchunk(m,1,sa,b_ptr,c_ptr,ldc,k,offset); + return 0; +} + diff --git a/kernel/x86_64/strsm_kernel_8x4_haswell_LT.c b/kernel/x86_64/strsm_kernel_8x4_haswell_LT.c new file mode 100644 index 000000000..3c7e9e83b --- /dev/null +++ b/kernel/x86_64/strsm_kernel_8x4_haswell_LT.c @@ -0,0 +1,228 @@ +#include "common.h" +#include +#include "strsm_kernel_8x4_haswell_L_common.h" + +#define SOLVE_LT_m1n4 \ + "movq %2,%3;" GEMM_SUM_REORDER_1x4(4)\ + SOLVE_m1n4(0,4) SAVE_b_m1n4(0,4)\ + "movq %2,%3; addq $4,%2;" save_c_m1n4(4) + +#define SOLVE_LT_m1n8 \ + "movq %2,%3;" GEMM_SUM_REORDER_1x4(4) GEMM_SUM_REORDER_1x4(5)\ + SOLVE_m1n8(0,4,5) SAVE_b_m1n8(0,4,5)\ + "movq %2,%3; addq $4,%2;" save_c_m1n4(4) save_c_m1n4(5) + +#define SOLVE_LT_m1n12 \ + "movq %2,%3;" GEMM_SUM_REORDER_1x4(4) GEMM_SUM_REORDER_1x4(5) GEMM_SUM_REORDER_1x4(6)\ + SOLVE_m1n12(0,4,5,6) SAVE_b_m1n12(0,4,5,6)\ + "movq %2,%3; addq $4,%2;" save_c_m1n4(4) save_c_m1n4(5) save_c_m1n4(6) + +#define SOLVE_LT_m2n4 \ + "movq %2,%3;" GEMM_SUM_REORDER_2x4(4,5,4)\ + SOLVE_uplo_m2n4(0,4)\ + SOLVE_lo_m2n4(8,4) SAVE_b_m2n4(0,4)\ + "movq %2,%3; addq $8,%2;" save_c_m2n4(4) + +#define SOLVE_LT_m2n8 \ + "movq %2,%3;" GEMM_SUM_REORDER_2x4(4,5,4) GEMM_SUM_REORDER_2x4(6,7,5)\ + SOLVE_uplo_m2n8(0,4,5)\ + SOLVE_lo_m2n8(8,4,5) SAVE_b_m2n8(0,4,5)\ + "movq %2,%3; addq $8,%2;" save_c_m2n4(4) save_c_m2n4(5) + +#define SOLVE_LT_m2n12 \ + "movq %2,%3;" GEMM_SUM_REORDER_2x4(4,5,4) GEMM_SUM_REORDER_2x4(6,7,5) GEMM_SUM_REORDER_2x4(8,9,6)\ + SOLVE_uplo_m2n12(0,4,5,6)\ + SOLVE_lo_m2n12(8,4,5,6) SAVE_b_m2n12(0,4,5,6)\ + "movq %2,%3; addq $8,%2;" save_c_m2n4(4) save_c_m2n4(5) save_c_m2n4(6) + +#define SOLVE_LT_m4n4 \ + "movq %2,%3;" GEMM_SUM_REORDER_4x4(4,5,6,7,4,5)\ +\ + SOLVE_uplo_m2n4(0,4) SUBTRACT_m2n4(8,5)\ + SOLVE_lo_m2n4(16,4) SUBTRACT_m2n4(24,5) SAVE_b_m2n4(0,4)\ +\ + SOLVE_uplo_m2n4(40,5)\ + SOLVE_lo_m2n4(56,5) SAVE_b_m2n4(32,5)\ +\ + "movq %2,%3; addq $16,%2;" save_c_m4n4(4,5) + +#define SOLVE_LT_m4n8 \ + "movq %2,%3;" GEMM_SUM_REORDER_4x4(4,5,6,7,4,5) GEMM_SUM_REORDER_4x4(8,9,10,11,6,7)\ +\ + SOLVE_uplo_m2n8(0,4,6) SUBTRACT_m2n8(8,5,7)\ + SOLVE_lo_m2n8(16,4,6) SUBTRACT_m2n8(24,5,7) SAVE_b_m2n8(0,4,6)\ +\ + SOLVE_uplo_m2n8(40,5,7)\ + SOLVE_lo_m2n8(56,5,7) SAVE_b_m2n8(32,5,7)\ +\ + "movq %2,%3; addq $16,%2;" save_c_m4n4(4,5) save_c_m4n4(6,7) + +#define SOLVE_LT_m4n12 \ + "movq %2,%3;" GEMM_SUM_REORDER_4x4(4,5,6,7,4,5) GEMM_SUM_REORDER_4x4(8,9,10,11,6,7) GEMM_SUM_REORDER_4x4(12,13,14,15,8,9)\ +\ + SOLVE_uplo_m2n12(0,4,6,8) SUBTRACT_m2n12(8,5,7,9)\ + SOLVE_lo_m2n12(16,4,6,8) SUBTRACT_m2n12(24,5,7,9) SAVE_b_m2n12(0,4,6,8)\ +\ + SOLVE_uplo_m2n12(40,5,7,9)\ + SOLVE_lo_m2n12(56,5,7,9) SAVE_b_m2n12(32,5,7,9)\ +\ + "movq %2,%3; addq $16,%2;" save_c_m4n4(4,5) save_c_m4n4(6,7) save_c_m4n4(8,9) + +#define SOLVE_LT_m8n4 \ + "movq %2,%3;" GEMM_SUM_REORDER_8x4(4,5,6,7,63)\ +\ + SOLVE_uplo_m2n4(0,4) SUBTRACT_m2n4(8,5) SUBTRACT_m2n4(16,6) SUBTRACT_m2n4(24,7)\ + SOLVE_lo_m2n4(32,4) SUBTRACT_m2n4(40,5) SUBTRACT_m2n4(48,6) SUBTRACT_m2n4(56,7) SAVE_b_m2n4(0,4)\ +\ + SOLVE_uplo_m2n4(72,5) SUBTRACT_m2n4(80,6) SUBTRACT_m2n4(88,7)\ + SOLVE_lo_m2n4(104,5) SUBTRACT_m2n4(112,6) SUBTRACT_m2n4(120,7) SAVE_b_m2n4(32,5)\ +\ + SOLVE_uplo_m2n4(144,6) SUBTRACT_m2n4(152,7)\ + SOLVE_lo_m2n4(176,6) SUBTRACT_m2n4(184,7) SAVE_b_m2n4(64,6)\ +\ + SOLVE_uplo_m2n4(216,7)\ + SOLVE_lo_m2n4(248,7) SAVE_b_m2n4(96,7)\ +\ + "movq %2,%3; addq $32,%2;" save_c_m8n4(4,5,6,7) + +#define SOLVE_LT_m8n8 \ + "movq %2,%3;" GEMM_SUM_REORDER_8x4(4,5,6,7,63) GEMM_SUM_REORDER_8x4(8,9,10,11,63)\ +\ + SOLVE_uplo_m2n8(0,4,8) SUBTRACT_m2n8(8,5,9) SUBTRACT_m2n8(16,6,10) SUBTRACT_m2n8(24,7,11)\ + SOLVE_lo_m2n8(32,4,8) SUBTRACT_m2n8(40,5,9) SUBTRACT_m2n8(48,6,10) SUBTRACT_m2n8(56,7,11) SAVE_b_m2n8(0,4,8)\ +\ + SOLVE_uplo_m2n8(72,5,9) SUBTRACT_m2n8(80,6,10) SUBTRACT_m2n8(88,7,11)\ + SOLVE_lo_m2n8(104,5,9) SUBTRACT_m2n8(112,6,10) SUBTRACT_m2n8(120,7,11) SAVE_b_m2n8(32,5,9)\ +\ + SOLVE_uplo_m2n8(144,6,10) SUBTRACT_m2n8(152,7,11)\ + SOLVE_lo_m2n8(176,6,10) SUBTRACT_m2n8(184,7,11) SAVE_b_m2n8(64,6,10)\ +\ + SOLVE_uplo_m2n8(216,7,11)\ + SOLVE_lo_m2n8(248,7,11) SAVE_b_m2n8(96,7,11)\ +\ + "movq %2,%3; addq $32,%2;" save_c_m8n4(4,5,6,7) save_c_m8n4(8,9,10,11) + +#define SOLVE_LT_m8n12 \ + "movq %2,%3;" GEMM_SUM_REORDER_8x4(4,5,6,7,63) GEMM_SUM_REORDER_8x4(8,9,10,11,63) GEMM_SUM_REORDER_8x4(12,13,14,15,63)\ +\ + SOLVE_uplo_m2n12(0,4,8,12) SUBTRACT_m2n12(8,5,9,13) SUBTRACT_m2n12(16,6,10,14) SUBTRACT_m2n12(24,7,11,15)\ + SOLVE_lo_m2n12(32,4,8,12) SUBTRACT_m2n12(40,5,9,13) SUBTRACT_m2n12(48,6,10,14) SUBTRACT_m2n12(56,7,11,15) SAVE_b_m2n12(0,4,8,12)\ +\ + SOLVE_uplo_m2n12(72,5,9,13) SUBTRACT_m2n12(80,6,10,14) SUBTRACT_m2n12(88,7,11,15)\ + SOLVE_lo_m2n12(104,5,9,13) SUBTRACT_m2n12(112,6,10,14) SUBTRACT_m2n12(120,7,11,15) SAVE_b_m2n12(32,5,9,13)\ +\ + SOLVE_uplo_m2n12(144,6,10,14) SUBTRACT_m2n12(152,7,11,15)\ + SOLVE_lo_m2n12(176,6,10,14) SUBTRACT_m2n12(184,7,11,15) SAVE_b_m2n12(64,6,10,14)\ +\ + SOLVE_uplo_m2n12(216,7,11,15)\ + SOLVE_lo_m2n12(248,7,11,15) SAVE_b_m2n12(96,7,11,15)\ +\ + "movq %2,%3; addq $32,%2;" save_c_m8n4(4,5,6,7) save_c_m8n4(8,9,10,11) save_c_m8n4(12,13,14,15) + +#define GEMM_LT_SIMPLE(mdim,ndim) \ + "movq %%r15,%0; leaq (%%r15,%%r12,"#mdim"),%%r15; movq %%r13,%5; addq $"#mdim",%%r13; movq %%r14,%1;" INIT_m##mdim##n##ndim\ + "testq %5,%5; jz 1"#mdim""#ndim"2f;"\ + "1"#mdim""#ndim"1:\n\t"\ + GEMM_KERNEL_k1m##mdim##n##ndim "addq $16,%1; addq $"#mdim"*4,%0; decq %5; jnz 1"#mdim""#ndim"1b;"\ + "1"#mdim""#ndim"2:\n\t" +#define GEMM_LT_m8n4 GEMM_LT_SIMPLE(8,4) +#define GEMM_LT_m8n8 GEMM_LT_SIMPLE(8,8) +#define GEMM_LT_m8n12 \ + "movq %%r15,%0; leaq (%%r15,%%r12,8),%%r15; movq %%r13,%5; addq $8,%%r13; movq %%r14,%1;" INIT_m8n12\ + "cmpq $8,%5; jb 18122f;"\ + "18121:\n\t"\ + GEMM_KERNEL_k1m8n12 "prefetcht0 384(%0); addq $32,%0; addq $16,%1;"\ + GEMM_KERNEL_k1m8n12 "addq $32,%0; addq $16,%1;"\ + GEMM_KERNEL_k1m8n12 "prefetcht0 384(%0); addq $32,%0; addq $16,%1;"\ + GEMM_KERNEL_k1m8n12 "addq $32,%0; addq $16,%1;"\ + GEMM_KERNEL_k1m8n12 "prefetcht0 384(%0); addq $32,%0; addq $16,%1;"\ + GEMM_KERNEL_k1m8n12 "addq $32,%0; addq $16,%1;"\ + GEMM_KERNEL_k1m8n12 "prefetcht0 384(%0); addq $32,%0; addq $16,%1;"\ + GEMM_KERNEL_k1m8n12 "addq $32,%0; addq $16,%1;"\ + "subq $8,%5; cmpq $8,%5; jnb 18121b;"\ + "18122:\n\t"\ + "testq %5,%5; jz 18124f;"\ + "18123:\n\t"\ + GEMM_KERNEL_k1m8n12 "addq $32,%0; addq $16,%1; decq %5; jnz 18123b;"\ + "18124:\n\t" +#define GEMM_LT_m4n4 GEMM_LT_SIMPLE(4,4) +#define GEMM_LT_m4n8 GEMM_LT_SIMPLE(4,8) +#define GEMM_LT_m4n12 GEMM_LT_SIMPLE(4,12) +#define GEMM_LT_m2n4 GEMM_LT_SIMPLE(2,4) +#define GEMM_LT_m2n8 GEMM_LT_SIMPLE(2,8) +#define GEMM_LT_m2n12 GEMM_LT_SIMPLE(2,12) +#define GEMM_LT_m1n4 GEMM_LT_SIMPLE(1,4) +#define GEMM_LT_m1n8 GEMM_LT_SIMPLE(1,8) +#define GEMM_LT_m1n12 GEMM_LT_SIMPLE(1,12) + +#define COMPUTE(ndim) {\ + __asm__ __volatile__(\ + "movq %0,%%r15; movq %1,%%r14; movq %7,%%r13; movq %6,%%r12; salq $2,%%r12; movq %10,%%r11;"\ + "cmpq $8,%%r11; jb "#ndim"772f;"\ + #ndim"771:\n\t"\ + GEMM_LT_m8n##ndim SOLVE_LT_m8n##ndim "subq $8,%%r11; cmpq $8,%%r11; jnb "#ndim"771b;"\ + #ndim"772:\n\t"\ + "testq $4,%%r11; jz "#ndim"773f;"\ + GEMM_LT_m4n##ndim SOLVE_LT_m4n##ndim "subq $4,%%r11;"\ + #ndim"773:\n\t"\ + "testq $2,%%r11; jz "#ndim"774f;"\ + GEMM_LT_m2n##ndim SOLVE_LT_m2n##ndim "subq $2,%%r11;"\ + #ndim"774:\n\t"\ + "testq $1,%%r11; jz "#ndim"775f;"\ + GEMM_LT_m1n##ndim SOLVE_LT_m1n##ndim "subq $1,%%r11;"\ + #ndim"775:\n\t"\ + "movq %%r15,%0; movq %%r14,%1; vzeroupper;"\ + :"+r"(a_ptr),"+r"(b_ptr),"+r"(c_ptr),"+r"(c_tmp),"+r"(ldc_bytes),"+r"(k_cnt):"m"(K),"m"(OFF),"m"(one[0]),"m"(zero[0]),"m"(M)\ + :"r11","r12","r13","r14","r15","cc","memory",\ + "xmm0","xmm1","xmm2","xmm3","xmm4","xmm5","xmm6","xmm7","xmm8","xmm9","xmm10","xmm11","xmm12","xmm13","xmm14","xmm15");\ + a_ptr -= M * K; b_ptr += ndim * K; c_ptr += ldc * ndim - M;\ +} +static void solve_LT(BLASLONG m, BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) { + FLOAT a0, b0; + int i, j, k; + for (i=0;i7;m_count-=8){ + if(kk>0) GEMM_KERNEL_N(8,n,kk,-1.0,a_ptr,sb,c_ptr,ldc); + solve_LT(8,n,a_ptr+kk*8,sb+kk*n,c_ptr,ldc); + kk += 8; a_ptr += k * 8; c_ptr += 8; + } + for(;m_count>3;m_count-=4){ + if(kk>0) GEMM_KERNEL_N(4,n,kk,-1.0,a_ptr,sb,c_ptr,ldc); + solve_LT(4,n,a_ptr+kk*4,sb+kk*n,c_ptr,ldc); + kk += 4; a_ptr += k * 4; c_ptr += 4; + } + for(;m_count>1;m_count-=2){ + if(kk>0) GEMM_KERNEL_N(2,n,kk,-1.0,a_ptr,sb,c_ptr,ldc); + solve_LT(2,n,a_ptr+kk*2,sb+kk*n,c_ptr,ldc); + kk += 2; a_ptr += k * 2; c_ptr += 2; + } + if(m_count>0){ + if(kk>0) GEMM_KERNEL_N(1,n,kk,-1.0,a_ptr,sb,c_ptr,ldc); + solve_LT(1,n,a_ptr+kk*1,sb+kk*n,c_ptr,ldc); + kk += 1; a_ptr += k * 1; c_ptr += 1; + } +} +int CNAME(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT dummy1, FLOAT *sa, FLOAT *sb, FLOAT *C, BLASLONG ldc, BLASLONG offset){ + float *a_ptr = sa, *b_ptr = sb, *c_ptr = C, *c_tmp = C; + float one[8] = {1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0}; + float zero[8] = {0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0}; + uint64_t ldc_bytes = (uint64_t)ldc * sizeof(float), K = (uint64_t)k, M = (uint64_t)m, OFF = (uint64_t)offset, k_cnt = 0; + BLASLONG n_count = n; + for(;n_count>11;n_count-=12) COMPUTE(12) + for(;n_count>7;n_count-=8) COMPUTE(8) + for(;n_count>3;n_count-=4) COMPUTE(4) + for(;n_count>1;n_count-=2) { COMPUTE_EDGE_1_nchunk(m,2,a_ptr,b_ptr,c_ptr,ldc,k,offset); b_ptr += 2*k; c_ptr += ldc*2;} + if(n_count>0) COMPUTE_EDGE_1_nchunk(m,1,a_ptr,b_ptr,c_ptr,ldc,k,offset); + return 0; +} + diff --git a/kernel/x86_64/strsm_kernel_8x4_haswell_L_common.h b/kernel/x86_64/strsm_kernel_8x4_haswell_L_common.h new file mode 100644 index 000000000..2862a5b8d --- /dev/null +++ b/kernel/x86_64/strsm_kernel_8x4_haswell_L_common.h @@ -0,0 +1,187 @@ +/* r11 = m_counter, r12 = size_of_k_elements, r13 = kk, r14 = b_head, r15 = a_head */ +/* register i/o: %0 = a_ptr, %1 = b_ptr, %2 = c_ptr, %3 = c_tmp, %4 = ldc, %5 = k_counter */ +/* memory input: %6 = K, %7 = offset, %8 = {1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0}, %9 = {0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0}, %10 = M */ + +#define init_m8n4(c1,c2,c3,c4)\ + "vpxor %%ymm"#c1",%%ymm"#c1",%%ymm"#c1"; vpxor %%ymm"#c2",%%ymm"#c2",%%ymm"#c2"; vpxor %%ymm"#c3",%%ymm"#c3",%%ymm"#c3"; vpxor %%ymm"#c4",%%ymm"#c4",%%ymm"#c4";" +#define INIT_m8n4 init_m8n4(4,5,6,7) +#define INIT_m8n8 INIT_m8n4 init_m8n4(8,9,10,11) +#define INIT_m8n12 INIT_m8n8 init_m8n4(12,13,14,15) + +#define init_m4n4(c1,c2,c3,c4)\ + "vpxor %%xmm"#c1",%%xmm"#c1",%%xmm"#c1"; vpxor %%xmm"#c2",%%xmm"#c2",%%xmm"#c2"; vpxor %%xmm"#c3",%%xmm"#c3",%%xmm"#c3"; vpxor %%xmm"#c4",%%xmm"#c4",%%xmm"#c4";" +#define INIT_m4n4 init_m4n4(4,5,6,7) +#define INIT_m4n8 INIT_m4n4 init_m4n4(8,9,10,11) +#define INIT_m4n12 INIT_m4n8 init_m4n4(12,13,14,15) + +#define init_m2n4(c1,c2)\ + "vpxor %%xmm"#c1",%%xmm"#c1",%%xmm"#c1"; vpxor %%xmm"#c2",%%xmm"#c2",%%xmm"#c2";" +#define INIT_m2n4 init_m2n4(4,5) +#define INIT_m2n8 INIT_m2n4 init_m2n4(6,7) +#define INIT_m2n12 INIT_m2n8 init_m2n4(8,9) + +#define init_m1n4(c1) "vpxor %%xmm"#c1",%%xmm"#c1",%%xmm"#c1";" +#define INIT_m1n4 init_m1n4(4) +#define INIT_m1n8 INIT_m1n4 init_m1n4(5) +#define INIT_m1n12 INIT_m1n8 init_m1n4(6) + +#define GEMM_KERNEL_k1m8n4 \ + "vmovsldup (%0),%%ymm1; vmovshdup (%0),%%ymm2;"\ + "vbroadcastsd (%1),%%ymm3; vfnmadd231ps %%ymm3,%%ymm1,%%ymm4; vfnmadd231ps %%ymm3,%%ymm2,%%ymm5;"\ + "vbroadcastsd 8(%1),%%ymm3; vfnmadd231ps %%ymm3,%%ymm1,%%ymm6; vfnmadd231ps %%ymm3,%%ymm2,%%ymm7;" +#define GEMM_KERNEL_k1m8n8 GEMM_KERNEL_k1m8n4\ + "vbroadcastsd (%1,%%r12,4),%%ymm3; vfnmadd231ps %%ymm3,%%ymm1,%%ymm8; vfnmadd231ps %%ymm3,%%ymm2,%%ymm9;"\ + "vbroadcastsd 8(%1,%%r12,4),%%ymm3; vfnmadd231ps %%ymm3,%%ymm1,%%ymm10; vfnmadd231ps %%ymm3,%%ymm2,%%ymm11;" +#define GEMM_KERNEL_k1m8n12 GEMM_KERNEL_k1m8n8\ + "vbroadcastsd (%1,%%r12,8),%%ymm3; vfnmadd231ps %%ymm3,%%ymm1,%%ymm12; vfnmadd231ps %%ymm3,%%ymm2,%%ymm13;"\ + "vbroadcastsd 8(%1,%%r12,8),%%ymm3; vfnmadd231ps %%ymm3,%%ymm1,%%ymm14; vfnmadd231ps %%ymm3,%%ymm2,%%ymm15;" + +#define GEMM_KERNEL_k1m4n4 \ + "vmovsldup (%0),%%xmm1; vmovshdup (%0),%%xmm2;"\ + "vmovddup (%1),%%xmm3; vfnmadd231ps %%xmm3,%%xmm1,%%xmm4; vfnmadd231ps %%xmm3,%%xmm2,%%xmm5;"\ + "vmovddup 8(%1),%%xmm3; vfnmadd231ps %%xmm3,%%xmm1,%%xmm6; vfnmadd231ps %%xmm3,%%xmm2,%%xmm7;" +#define GEMM_KERNEL_k1m4n8 GEMM_KERNEL_k1m4n4\ + "vmovddup (%1,%%r12,4),%%xmm3; vfnmadd231ps %%xmm3,%%xmm1,%%xmm8; vfnmadd231ps %%xmm3,%%xmm2,%%xmm9;"\ + "vmovddup 8(%1,%%r12,4),%%xmm3; vfnmadd231ps %%xmm3,%%xmm1,%%xmm10; vfnmadd231ps %%xmm3,%%xmm2,%%xmm11;" +#define GEMM_KERNEL_k1m4n12 GEMM_KERNEL_k1m4n8\ + "vmovddup (%1,%%r12,8),%%xmm3; vfnmadd231ps %%xmm3,%%xmm1,%%xmm12; vfnmadd231ps %%xmm3,%%xmm2,%%xmm13;"\ + "vmovddup 8(%1,%%r12,8),%%xmm3; vfnmadd231ps %%xmm3,%%xmm1,%%xmm14; vfnmadd231ps %%xmm3,%%xmm2,%%xmm15;" + +#define GEMM_KERNEL_k1m2n4 \ + "vbroadcastss (%0),%%xmm1; vbroadcastss 4(%0),%%xmm2;"\ + "vmovups (%1),%%xmm3; vfnmadd231ps %%xmm3,%%xmm1,%%xmm4; vfnmadd231ps %%xmm3,%%xmm2,%%xmm5;" +#define GEMM_KERNEL_k1m2n8 GEMM_KERNEL_k1m2n4\ + "vmovups (%1,%%r12,4),%%xmm3; vfnmadd231ps %%xmm3,%%xmm1,%%xmm6; vfnmadd231ps %%xmm3,%%xmm2,%%xmm7;" +#define GEMM_KERNEL_k1m2n12 GEMM_KERNEL_k1m2n8\ + "vmovups (%1,%%r12,8),%%xmm3; vfnmadd231ps %%xmm3,%%xmm1,%%xmm8; vfnmadd231ps %%xmm3,%%xmm2,%%xmm9;" + +#define GEMM_KERNEL_k1m1n4 "vbroadcastss (%0),%%xmm1; vfnmadd231ps (%1),%%xmm1,%%xmm4;" +#define GEMM_KERNEL_k1m1n8 GEMM_KERNEL_k1m1n4 "vfnmadd231ps (%1,%%r12,4),%%xmm1,%%xmm5;" +#define GEMM_KERNEL_k1m1n12 GEMM_KERNEL_k1m1n8 "vfnmadd231ps (%1,%%r12,8),%%xmm1,%%xmm6;" + +#define GEMM_SUM_REORDER_8x4(c1,c2,c3,c4,prefpos)\ + "vunpcklps %%ymm"#c2",%%ymm"#c1",%%ymm0; vunpckhps %%ymm"#c2",%%ymm"#c1",%%ymm1;"\ + "vunpcklps %%ymm"#c4",%%ymm"#c3",%%ymm2; vunpckhps %%ymm"#c4",%%ymm"#c3",%%ymm3;"\ + "vmovups (%3),%%ymm"#c1"; vmovups (%3,%4,1),%%ymm"#c2"; prefetcht1 "#prefpos"(%3); prefetcht1 "#prefpos"(%3,%4,1);"\ + "vunpcklpd %%ymm"#c2",%%ymm"#c1",%%ymm"#c3"; vunpckhpd %%ymm"#c2",%%ymm"#c1",%%ymm"#c4";"\ + "vaddps %%ymm0,%%ymm"#c3",%%ymm0; vaddps %%ymm1,%%ymm"#c4",%%ymm1;"\ + "leaq (%3,%4,2),%3;"\ + "vmovups (%3),%%ymm"#c1"; vmovups (%3,%4,1),%%ymm"#c2"; prefetcht1 "#prefpos"(%3); prefetcht1 "#prefpos"(%3,%4,1);"\ + "vunpcklpd %%ymm"#c2",%%ymm"#c1",%%ymm"#c3"; vunpckhpd %%ymm"#c2",%%ymm"#c1",%%ymm"#c4";"\ + "vaddps %%ymm2,%%ymm"#c3",%%ymm2; vaddps %%ymm3,%%ymm"#c4",%%ymm3;"\ + "leaq (%3,%4,2),%3;"\ + "vperm2f128 $2,%%ymm0,%%ymm2,%%ymm"#c1"; vperm2f128 $2,%%ymm1,%%ymm3,%%ymm"#c2";"\ + "vperm2f128 $19,%%ymm0,%%ymm2,%%ymm"#c3"; vperm2f128 $19,%%ymm1,%%ymm3,%%ymm"#c4";" + +#define GEMM_SUM_REORDER_4x4(c1,c2,c3,c4,co1,co2)\ + "vunpcklps %%xmm"#c2",%%xmm"#c1",%%xmm0; vunpckhps %%xmm"#c2",%%xmm"#c1",%%xmm1;"\ + "vunpcklps %%xmm"#c4",%%xmm"#c3",%%xmm2; vunpckhps %%xmm"#c4",%%xmm"#c3",%%xmm3;"\ + "vmovups (%3),%%xmm"#c1"; vmovups (%3,%4,1),%%xmm"#c2";"\ + "vunpcklpd %%xmm"#c2",%%xmm"#c1",%%xmm"#c3"; vunpckhpd %%xmm"#c2",%%xmm"#c1",%%xmm"#c4";"\ + "vaddps %%xmm0,%%xmm"#c3",%%xmm0; vaddps %%xmm1,%%xmm"#c4",%%xmm1;"\ + "leaq (%3,%4,2),%3;"\ + "vmovups (%3),%%xmm"#c1"; vmovups (%3,%4,1),%%xmm"#c2";"\ + "vunpcklpd %%xmm"#c2",%%xmm"#c1",%%xmm"#c3"; vunpckhpd %%xmm"#c2",%%xmm"#c1",%%xmm"#c4";"\ + "vaddps %%xmm2,%%xmm"#c3",%%xmm2; vaddps %%xmm3,%%xmm"#c4",%%xmm3;"\ + "leaq (%3,%4,2),%3;"\ + "vperm2f128 $2,%%ymm0,%%ymm2,%%ymm"#co1"; vperm2f128 $2,%%ymm1,%%ymm3,%%ymm"#co2";" + +#define GEMM_SUM_REORDER_2x4(c1,c2,co1)\ + "vunpcklps %%xmm"#c2",%%xmm"#c1",%%xmm0; vunpckhps %%xmm"#c2",%%xmm"#c1",%%xmm1;"\ + "vmovsd (%3),%%xmm2; vmovhpd (%3,%4,1),%%xmm2,%%xmm2; vaddps %%xmm0,%%xmm2,%%xmm0; leaq (%3,%4,2),%3;"\ + "vmovsd (%3),%%xmm2; vmovhpd (%3,%4,1),%%xmm2,%%xmm2; vaddps %%xmm1,%%xmm2,%%xmm1; leaq (%3,%4,2),%3;"\ + "vperm2f128 $2,%%ymm0,%%ymm1,%%ymm"#co1";" + +#define GEMM_SUM_REORDER_1x4(c1)\ + "vmovss (%3),%%xmm1; vinsertps $16,(%3,%4,1),%%xmm1,%%xmm1; leaq (%3,%4,2),%3;"\ + "vinsertps $32,(%3),%%xmm1,%%xmm1; vinsertps $48,(%3,%4,1),%%xmm1,%%xmm1; leaq (%3,%4,2),%3;"\ + "vaddps %%xmm"#c1",%%xmm1,%%xmm"#c1";" + +#define save_c_m8n4(c1,c2,c3,c4)\ + "vunpcklpd %%ymm"#c2",%%ymm"#c1",%%ymm0; vunpckhpd %%ymm"#c2",%%ymm"#c1",%%ymm1;"\ + "vunpcklpd %%ymm"#c4",%%ymm"#c3",%%ymm2; vunpckhpd %%ymm"#c4",%%ymm"#c3",%%ymm3;"\ + "vperm2f128 $2,%%ymm0,%%ymm2,%%ymm"#c1"; vperm2f128 $2,%%ymm1,%%ymm3,%%ymm"#c2";"\ + "vmovups %%ymm"#c1",(%3); vmovups %%ymm"#c2",(%3,%4,1); leaq (%3,%4,2),%3;"\ + "vperm2f128 $19,%%ymm0,%%ymm2,%%ymm"#c3"; vperm2f128 $19,%%ymm1,%%ymm3,%%ymm"#c4";"\ + "vmovups %%ymm"#c3",(%3); vmovups %%ymm"#c4",(%3,%4,1); leaq (%3,%4,2),%3;" + +#define save_c_m4n4(c1,c2)\ + "vunpcklpd %%ymm"#c2",%%ymm"#c1",%%ymm0; vunpckhpd %%ymm"#c2",%%ymm"#c1",%%ymm1;"\ + "vmovups %%xmm0,(%3); vmovups %%xmm1,(%3,%4,1); leaq (%3,%4,2),%3;"\ + "vextractf128 $1,%%ymm0,(%3); vextractf128 $1,%%ymm1,(%3,%4,1); leaq (%3,%4,2),%3;" + +#define save_c_m2n4(c1)\ + "vextractf128 $1,%%ymm"#c1",%%xmm1; vmovsd %%xmm"#c1",(%3); vmovhpd %%xmm"#c1",(%3,%4,1); leaq (%3,%4,2),%3;"\ + "vmovsd %%xmm1,(%3); vmovhpd %%xmm1,(%3,%4,1); leaq (%3,%4,2),%3;" + +#define save_c_m1n4(c1)\ + "vmovss %%xmm"#c1",(%3); vextractps $1,%%xmm"#c1",(%3,%4,1); leaq (%3,%4,2),%3;"\ + "vextractps $2,%%xmm"#c1",(%3); vextractps $3,%%xmm"#c1",(%3,%4,1); leaq (%3,%4,2),%3;" + +#define SOLVE_up_m2n4(a_off,c1)\ + "vbroadcastsd "#a_off"(%0),%%ymm0; vblendps $170,%8,%%ymm0,%%ymm2;"\ + "vmulps %%ymm2,%%ymm"#c1",%%ymm"#c1";"\ + "vmovsldup %%ymm"#c1",%%ymm1;" + +#define SOLVE_up_m2n8(a_off,c1,c2)\ + "vbroadcastsd "#a_off"(%0),%%ymm0; vblendps $170,%8,%%ymm0,%%ymm2;"\ + "vmulps %%ymm2,%%ymm"#c1",%%ymm"#c1"; vmulps %%ymm2,%%ymm"#c2",%%ymm"#c2";"\ + "vmovsldup %%ymm"#c1",%%ymm1; vmovsldup %%ymm"#c2",%%ymm2;" + +#define SOLVE_up_m2n12(a_off,c1,c2,c3)\ + "vbroadcastsd "#a_off"(%0),%%ymm0; vblendps $170,%8,%%ymm0,%%ymm2;"\ + "vmulps %%ymm2,%%ymm"#c1",%%ymm"#c1"; vmulps %%ymm2,%%ymm"#c2",%%ymm"#c2"; vmulps %%ymm2,%%ymm"#c3",%%ymm"#c3";"\ + "vmovsldup %%ymm"#c1",%%ymm1; vmovsldup %%ymm"#c2",%%ymm2; vmovsldup %%ymm"#c3",%%ymm3;" + +#define SOLVE_uplo_m2n4(a_off,c1) SOLVE_up_m2n4(a_off,c1)\ + "vblendps $85,%9,%%ymm0,%%ymm0; vfnmadd231ps %%ymm0,%%ymm1,%%ymm"#c1";" + +#define SOLVE_uplo_m2n8(a_off,c1,c2) SOLVE_up_m2n8(a_off,c1,c2)\ + "vblendps $85,%9,%%ymm0,%%ymm0; vfnmadd231ps %%ymm0,%%ymm1,%%ymm"#c1"; vfnmadd231ps %%ymm0,%%ymm2,%%ymm"#c2";" + +#define SOLVE_uplo_m2n12(a_off,c1,c2,c3) SOLVE_up_m2n12(a_off,c1,c2,c3)\ + "vblendps $85,%9,%%ymm0,%%ymm0; vfnmadd231ps %%ymm0,%%ymm1,%%ymm"#c1"; vfnmadd231ps %%ymm0,%%ymm2,%%ymm"#c2"; vfnmadd231ps %%ymm0,%%ymm3,%%ymm"#c3";" + +#define SOLVE_lo_m2n4(a_off,c1)\ + "vbroadcastsd "#a_off"(%0),%%ymm0; vblendps $85,%8,%%ymm0,%%ymm2;"\ + "vmulps %%ymm2,%%ymm"#c1",%%ymm"#c1";"\ + "vmovshdup %%ymm"#c1",%%ymm1;" + +#define SOLVE_lo_m2n8(a_off,c1,c2)\ + "vbroadcastsd "#a_off"(%0),%%ymm0; vblendps $85,%8,%%ymm0,%%ymm2;"\ + "vmulps %%ymm2,%%ymm"#c1",%%ymm"#c1"; vmulps %%ymm2,%%ymm"#c2",%%ymm"#c2";"\ + "vmovshdup %%ymm"#c1",%%ymm1; vmovshdup %%ymm"#c2",%%ymm2;" + +#define SOLVE_lo_m2n12(a_off,c1,c2,c3)\ + "vbroadcastsd "#a_off"(%0),%%ymm0; vblendps $85,%8,%%ymm0,%%ymm2;"\ + "vmulps %%ymm2,%%ymm"#c1",%%ymm"#c1"; vmulps %%ymm2,%%ymm"#c2",%%ymm"#c2"; vmulps %%ymm2,%%ymm"#c3",%%ymm"#c3";"\ + "vmovshdup %%ymm"#c1",%%ymm1; vmovshdup %%ymm"#c2",%%ymm2; vmovshdup %%ymm"#c3",%%ymm3;" + +#define SOLVE_loup_m2n4(a_off,c1) SOLVE_lo_m2n4(a_off,c1)\ + "vblendps $170,%9,%%ymm0,%%ymm0; vfnmadd231ps %%ymm0,%%ymm1,%%ymm"#c1";" + +#define SOLVE_loup_m2n8(a_off,c1,c2) SOLVE_lo_m2n8(a_off,c1,c2)\ + "vblendps $170,%9,%%ymm0,%%ymm0; vfnmadd231ps %%ymm0,%%ymm1,%%ymm"#c1"; vfnmadd231ps %%ymm0,%%ymm2,%%ymm"#c2";" + +#define SOLVE_loup_m2n12(a_off,c1,c2,c3) SOLVE_lo_m2n12(a_off,c1,c2,c3)\ + "vblendps $170,%9,%%ymm0,%%ymm0; vfnmadd231ps %%ymm0,%%ymm1,%%ymm"#c1"; vfnmadd231ps %%ymm0,%%ymm2,%%ymm"#c2"; vfnmadd231ps %%ymm0,%%ymm3,%%ymm"#c3";" + +#define SOLVE_m1n4(a_off,c1) "vbroadcastss "#a_off"(%0),%%xmm0; vmulps %%xmm0,%%xmm"#c1",%%xmm"#c1";" +#define SOLVE_m1n8(a_off,c1,c2) SOLVE_m1n4(a_off,c1) "vmulps %%xmm0,%%xmm"#c2",%%xmm"#c2";" +#define SOLVE_m1n12(a_off,c1,c2,c3) SOLVE_m1n8(a_off,c1,c2) "vmulps %%xmm0,%%xmm"#c3",%%xmm"#c3";" + +#define SUBTRACT_m2n4(a_off,c1) "vbroadcastsd "#a_off"(%0),%%ymm0; vfnmadd231ps %%ymm0,%%ymm1,%%ymm"#c1";" +#define SUBTRACT_m2n8(a_off,c1,c2) SUBTRACT_m2n4(a_off,c1) "vfnmadd231ps %%ymm0,%%ymm2,%%ymm"#c2";" +#define SUBTRACT_m2n12(a_off,c1,c2,c3) SUBTRACT_m2n8(a_off,c1,c2) "vfnmadd231ps %%ymm0,%%ymm3,%%ymm"#c3";" + +#define save_b_m2n4(c1,tmp,b_off,...)\ + "vpermilps $216,%%ymm"#c1",%%ymm"#tmp"; vpermpd $216,%%ymm"#tmp",%%ymm"#tmp"; vmovups %%ymm"#tmp","#b_off"("#__VA_ARGS__");" + +#define SAVE_b_m2n4(b_off,c1) save_b_m2n4(c1,1,b_off,%1) +#define SAVE_b_m2n8(b_off,c1,c2) SAVE_b_m2n4(b_off,c1) save_b_m2n4(c2,2,b_off,%1,%%r12,4) +#define SAVE_b_m2n12(b_off,c1,c2,c3) SAVE_b_m2n8(b_off,c1,c2) save_b_m2n4(c3,3,b_off,%1,%%r12,8) + +#define SAVE_b_m1n4(b_off,c1) "vmovups %%xmm"#c1","#b_off"(%1);" +#define SAVE_b_m1n8(b_off,c1,c2) SAVE_b_m1n4(b_off,c1) "vmovups %%xmm"#c2","#b_off"(%1,%%r12,4);" +#define SAVE_b_m1n12(b_off,c1,c2,c3) SAVE_b_m1n8(b_off,c1,c2) "vmovups %%xmm"#c3","#b_off"(%1,%%r12,8);" + diff --git a/kernel/x86_64/strsm_kernel_8x4_haswell_RN.c b/kernel/x86_64/strsm_kernel_8x4_haswell_RN.c new file mode 100644 index 000000000..4e2cd4fe6 --- /dev/null +++ b/kernel/x86_64/strsm_kernel_8x4_haswell_RN.c @@ -0,0 +1,279 @@ +#include "common.h" +#include +#include "strsm_kernel_8x4_haswell_R_common.h" + +#define SOLVE_RN_m8n4 \ + "movq %2,%3;" GEMM_SUM_REORDER_8x4(4,5,6,7,63) "movq %2,%3; addq $32,%2;"\ + SOLVE_leri_m8n2(0,4,5,%1) SUBTRACT_m8n2(8,6,7,%1)\ + SOLVE_ri_m8n2(16,4,5,%1) SUBTRACT_m8n2(24,6,7,%1)\ + SAVE_SOLUTION_m8n2(4,5,0)\ + SOLVE_leri_m8n2(40,6,7,%1)\ + SOLVE_ri_m8n2(56,6,7,%1)\ + SAVE_SOLUTION_m8n2(6,7,64) + +#define SOLVE_RN_m8n8 \ + "movq %2,%3;" GEMM_SUM_REORDER_8x4(4,5,6,7,63) GEMM_SUM_REORDER_8x4(8,9,10,11,63) "movq %2,%3; addq $32,%2;"\ + SOLVE_leri_m8n2(0,4,5,%1) SUBTRACT_m8n2(8,6,7,%1) SUBTRACT_m8n2(0,8,9,%1,%%r12,4) SUBTRACT_m8n2(8,10,11,%1,%%r12,4)\ + SOLVE_ri_m8n2(16,4,5,%1) SUBTRACT_m8n2(24,6,7,%1) SUBTRACT_m8n2(16,8,9,%1,%%r12,4) SUBTRACT_m8n2(24,10,11,%1,%%r12,4)\ + SAVE_SOLUTION_m8n2(4,5,0)\ + SOLVE_leri_m8n2(40,6,7,%1) SUBTRACT_m8n2(32,8,9,%1,%%r12,4) SUBTRACT_m8n2(40,10,11,%1,%%r12,4)\ + SOLVE_ri_m8n2(56,6,7,%1) SUBTRACT_m8n2(48,8,9,%1,%%r12,4) SUBTRACT_m8n2(56,10,11,%1,%%r12,4)\ + SAVE_SOLUTION_m8n2(6,7,64)\ + SOLVE_leri_m8n2(64,8,9,%1,%%r12,4) SUBTRACT_m8n2(72,10,11,%1,%%r12,4)\ + SOLVE_ri_m8n2(80,8,9,%1,%%r12,4) SUBTRACT_m8n2(88,10,11,%1,%%r12,4)\ + SAVE_SOLUTION_m8n2(8,9,128)\ + SOLVE_leri_m8n2(104,10,11,%1,%%r12,4)\ + SOLVE_ri_m8n2(120,10,11,%1,%%r12,4)\ + SAVE_SOLUTION_m8n2(10,11,192) + +#define SOLVE_RN_m8n12 \ + "movq %2,%3;" GEMM_SUM_REORDER_8x4(4,5,6,7,63) GEMM_SUM_REORDER_8x4(8,9,10,11,63) GEMM_SUM_REORDER_8x4(12,13,14,15,63) "movq %2,%3; addq $32,%2;"\ + SOLVE_leri_m8n2(0,4,5,%1) SUBTRACT_m8n2(8,6,7,%1) SUBTRACT_m8n2(0,8,9,%1,%%r12,4) SUBTRACT_m8n2(8,10,11,%1,%%r12,4) SUBTRACT_m8n2(0,12,13,%1,%%r12,8) SUBTRACT_m8n2(8,14,15,%1,%%r12,8)\ + SOLVE_ri_m8n2(16,4,5,%1) SUBTRACT_m8n2(24,6,7,%1) SUBTRACT_m8n2(16,8,9,%1,%%r12,4) SUBTRACT_m8n2(24,10,11,%1,%%r12,4) SUBTRACT_m8n2(16,12,13,%1,%%r12,8) SUBTRACT_m8n2(24,14,15,%1,%%r12,8)\ + SAVE_SOLUTION_m8n2(4,5,0)\ + SOLVE_leri_m8n2(40,6,7,%1) SUBTRACT_m8n2(32,8,9,%1,%%r12,4) SUBTRACT_m8n2(40,10,11,%1,%%r12,4) SUBTRACT_m8n2(32,12,13,%1,%%r12,8) SUBTRACT_m8n2(40,14,15,%1,%%r12,8)\ + SOLVE_ri_m8n2(56,6,7,%1) SUBTRACT_m8n2(48,8,9,%1,%%r12,4) SUBTRACT_m8n2(56,10,11,%1,%%r12,4) SUBTRACT_m8n2(48,12,13,%1,%%r12,8) SUBTRACT_m8n2(56,14,15,%1,%%r12,8)\ + SAVE_SOLUTION_m8n2(6,7,64)\ + SOLVE_leri_m8n2(64,8,9,%1,%%r12,4) SUBTRACT_m8n2(72,10,11,%1,%%r12,4) SUBTRACT_m8n2(64,12,13,%1,%%r12,8) SUBTRACT_m8n2(72,14,15,%1,%%r12,8)\ + SOLVE_ri_m8n2(80,8,9,%1,%%r12,4) SUBTRACT_m8n2(88,10,11,%1,%%r12,4) SUBTRACT_m8n2(80,12,13,%1,%%r12,8) SUBTRACT_m8n2(88,14,15,%1,%%r12,8)\ + SAVE_SOLUTION_m8n2(8,9,128)\ + SOLVE_leri_m8n2(104,10,11,%1,%%r12,4) SUBTRACT_m8n2(96,12,13,%1,%%r12,8) SUBTRACT_m8n2(104,14,15,%1,%%r12,8)\ + SOLVE_ri_m8n2(120,10,11,%1,%%r12,4) SUBTRACT_m8n2(112,12,13,%1,%%r12,8) SUBTRACT_m8n2(120,14,15,%1,%%r12,8)\ + SAVE_SOLUTION_m8n2(10,11,192)\ + SOLVE_leri_m8n2(128,12,13,%1,%%r12,8) SUBTRACT_m8n2(136,14,15,%1,%%r12,8)\ + SOLVE_ri_m8n2(144,12,13,%1,%%r12,8) SUBTRACT_m8n2(152,14,15,%1,%%r12,8)\ + SAVE_SOLUTION_m8n2(12,13,256)\ + SOLVE_leri_m8n2(168,14,15,%1,%%r12,8)\ + SOLVE_ri_m8n2(184,14,15,%1,%%r12,8)\ + SAVE_SOLUTION_m8n2(14,15,320) + +#define SOLVE_RN_m4n4 \ + "movq %2,%3;" GEMM_SUM_REORDER_4x4(4,5,6,7,4,5) "movq %2,%3; addq $16,%2;"\ + SOLVE_leri_m4n2(0,4,%1) SUBTRACT_m4n2(8,5,%1)\ + SOLVE_ri_m4n2(16,4,%1) SUBTRACT_m4n2(24,5,%1)\ + SAVE_SOLUTION_m4n2(4,0)\ + SOLVE_leri_m4n2(40,5,%1)\ + SOLVE_ri_m4n2(56,5,%1)\ + SAVE_SOLUTION_m4n2(5,32) + +#define SOLVE_RN_m4n8 \ + "movq %2,%3;" GEMM_SUM_REORDER_4x4(4,5,6,7,4,5) GEMM_SUM_REORDER_4x4(8,9,10,11,6,7) "movq %2,%3; addq $16,%2;"\ + SOLVE_leri_m4n2(0,4,%1) SUBTRACT_m4n2(8,5,%1) SUBTRACT_m4n2(0,6,%1,%%r12,4) SUBTRACT_m4n2(8,7,%1,%%r12,4)\ + SOLVE_ri_m4n2(16,4,%1) SUBTRACT_m4n2(24,5,%1) SUBTRACT_m4n2(16,6,%1,%%r12,4) SUBTRACT_m4n2(24,7,%1,%%r12,4)\ + SAVE_SOLUTION_m4n2(4,0)\ + SOLVE_leri_m4n2(40,5,%1) SUBTRACT_m4n2(32,6,%1,%%r12,4) SUBTRACT_m4n2(40,7,%1,%%r12,4)\ + SOLVE_ri_m4n2(56,5,%1) SUBTRACT_m4n2(48,6,%1,%%r12,4) SUBTRACT_m4n2(56,7,%1,%%r12,4)\ + SAVE_SOLUTION_m4n2(5,32)\ + SOLVE_leri_m4n2(64,6,%1,%%r12,4) SUBTRACT_m4n2(72,7,%1,%%r12,4)\ + SOLVE_ri_m4n2(80,6,%1,%%r12,4) SUBTRACT_m4n2(88,7,%1,%%r12,4)\ + SAVE_SOLUTION_m4n2(6,64)\ + SOLVE_leri_m4n2(104,7,%1,%%r12,4)\ + SOLVE_ri_m4n2(120,7,%1,%%r12,4)\ + SAVE_SOLUTION_m4n2(7,96) + +#define SOLVE_RN_m4n12 \ + "movq %2,%3;" GEMM_SUM_REORDER_4x4(4,5,6,7,4,5) GEMM_SUM_REORDER_4x4(8,9,10,11,6,7) GEMM_SUM_REORDER_4x4(12,13,14,15,8,9) "movq %2,%3; addq $16,%2;"\ + SOLVE_leri_m4n2(0,4,%1) SUBTRACT_m4n2(8,5,%1) SUBTRACT_m4n2(0,6,%1,%%r12,4) SUBTRACT_m4n2(8,7,%1,%%r12,4) SUBTRACT_m4n2(0,8,%1,%%r12,8) SUBTRACT_m4n2(8,9,%1,%%r12,8)\ + SOLVE_ri_m4n2(16,4,%1) SUBTRACT_m4n2(24,5,%1) SUBTRACT_m4n2(16,6,%1,%%r12,4) SUBTRACT_m4n2(24,7,%1,%%r12,4) SUBTRACT_m4n2(16,8,%1,%%r12,8) SUBTRACT_m4n2(24,9,%1,%%r12,8)\ + SAVE_SOLUTION_m4n2(4,0)\ + SOLVE_leri_m4n2(40,5,%1) SUBTRACT_m4n2(32,6,%1,%%r12,4) SUBTRACT_m4n2(40,7,%1,%%r12,4) SUBTRACT_m4n2(32,8,%1,%%r12,8) SUBTRACT_m4n2(40,9,%1,%%r12,8)\ + SOLVE_ri_m4n2(56,5,%1) SUBTRACT_m4n2(48,6,%1,%%r12,4) SUBTRACT_m4n2(56,7,%1,%%r12,4) SUBTRACT_m4n2(48,8,%1,%%r12,8) SUBTRACT_m4n2(56,9,%1,%%r12,8)\ + SAVE_SOLUTION_m4n2(5,32)\ + SOLVE_leri_m4n2(64,6,%1,%%r12,4) SUBTRACT_m4n2(72,7,%1,%%r12,4) SUBTRACT_m4n2(64,8,%1,%%r12,8) SUBTRACT_m4n2(72,9,%1,%%r12,8)\ + SOLVE_ri_m4n2(80,6,%1,%%r12,4) SUBTRACT_m4n2(88,7,%1,%%r12,4) SUBTRACT_m4n2(80,8,%1,%%r12,8) SUBTRACT_m4n2(88,9,%1,%%r12,8)\ + SAVE_SOLUTION_m4n2(6,64)\ + SOLVE_leri_m4n2(104,7,%1,%%r12,4) SUBTRACT_m4n2(96,8,%1,%%r12,8) SUBTRACT_m4n2(104,9,%1,%%r12,8)\ + SOLVE_ri_m4n2(120,7,%1,%%r12,4) SUBTRACT_m4n2(112,8,%1,%%r12,8) SUBTRACT_m4n2(120,9,%1,%%r12,8)\ + SAVE_SOLUTION_m4n2(7,96)\ + SOLVE_leri_m4n2(128,8,%1,%%r12,8) SUBTRACT_m4n2(136,9,%1,%%r12,8)\ + SOLVE_ri_m4n2(144,8,%1,%%r12,8) SUBTRACT_m4n2(152,9,%1,%%r12,8)\ + SAVE_SOLUTION_m4n2(8,128)\ + SOLVE_leri_m4n2(168,9,%1,%%r12,8)\ + SOLVE_ri_m4n2(184,9,%1,%%r12,8)\ + SAVE_SOLUTION_m4n2(9,160) + +#define SOLVE_RN_m2n4 \ + "movq %2,%3;" GEMM_SUM_REORDER_2x4(4,5) "movq %2,%3; addq $8,%2;"\ + SOLVE_col1_ltor_m2n4(0,4,5,%1)\ + SOLVE_col2_ltor_m2n4(16,4,5,%1)\ + SOLVE_col3_ltor_m2n4(32,4,5,%1)\ + SOLVE_col4_ltor_m2n4(48,4,5,%1)\ + SAVE_SOLUTION_m2n4(4,5,0) + +#define SOLVE_RN_m2n8 \ + "movq %2,%3;" GEMM_SUM_REORDER_2x4(4,5) GEMM_SUM_REORDER_2x4(6,7) "movq %2,%3; addq $8,%2;"\ + SOLVE_col1_ltor_m2n4(0,4,5,%1) SUBTRACT_m2n4(0,6,7,%1,%%r12,4)\ + SOLVE_col2_ltor_m2n4(16,4,5,%1) SUBTRACT_m2n4(16,6,7,%1,%%r12,4)\ + SOLVE_col3_ltor_m2n4(32,4,5,%1) SUBTRACT_m2n4(32,6,7,%1,%%r12,4)\ + SOLVE_col4_ltor_m2n4(48,4,5,%1) SUBTRACT_m2n4(48,6,7,%1,%%r12,4)\ + SAVE_SOLUTION_m2n4(4,5,0)\ + SOLVE_col1_ltor_m2n4(64,6,7,%1,%%r12,4)\ + SOLVE_col2_ltor_m2n4(80,6,7,%1,%%r12,4)\ + SOLVE_col3_ltor_m2n4(96,6,7,%1,%%r12,4)\ + SOLVE_col4_ltor_m2n4(112,6,7,%1,%%r12,4)\ + SAVE_SOLUTION_m2n4(6,7,32) + +#define SOLVE_RN_m2n12 \ + "movq %2,%3;" GEMM_SUM_REORDER_2x4(4,5) GEMM_SUM_REORDER_2x4(6,7) GEMM_SUM_REORDER_2x4(8,9) "movq %2,%3; addq $8,%2;"\ + SOLVE_col1_ltor_m2n4(0,4,5,%1) SUBTRACT_m2n4(0,6,7,%1,%%r12,4) SUBTRACT_m2n4(0,8,9,%1,%%r12,8)\ + SOLVE_col2_ltor_m2n4(16,4,5,%1) SUBTRACT_m2n4(16,6,7,%1,%%r12,4) SUBTRACT_m2n4(16,8,9,%1,%%r12,8)\ + SOLVE_col3_ltor_m2n4(32,4,5,%1) SUBTRACT_m2n4(32,6,7,%1,%%r12,4) SUBTRACT_m2n4(32,8,9,%1,%%r12,8)\ + SOLVE_col4_ltor_m2n4(48,4,5,%1) SUBTRACT_m2n4(48,6,7,%1,%%r12,4) SUBTRACT_m2n4(48,8,9,%1,%%r12,8)\ + SAVE_SOLUTION_m2n4(4,5,0)\ + SOLVE_col1_ltor_m2n4(64,6,7,%1,%%r12,4) SUBTRACT_m2n4(64,8,9,%1,%%r12,8)\ + SOLVE_col2_ltor_m2n4(80,6,7,%1,%%r12,4) SUBTRACT_m2n4(80,8,9,%1,%%r12,8)\ + SOLVE_col3_ltor_m2n4(96,6,7,%1,%%r12,4) SUBTRACT_m2n4(96,8,9,%1,%%r12,8)\ + SOLVE_col4_ltor_m2n4(112,6,7,%1,%%r12,4) SUBTRACT_m2n4(112,8,9,%1,%%r12,8)\ + SAVE_SOLUTION_m2n4(6,7,32)\ + SOLVE_col1_ltor_m2n4(128,8,9,%1,%%r12,8)\ + SOLVE_col2_ltor_m2n4(144,8,9,%1,%%r12,8)\ + SOLVE_col3_ltor_m2n4(160,8,9,%1,%%r12,8)\ + SOLVE_col4_ltor_m2n4(176,8,9,%1,%%r12,8)\ + SAVE_SOLUTION_m2n4(8,9,64) + +#define SOLVE_RN_m1n4 \ + "movq %2,%3;" GEMM_SUM_REORDER_1x4(4) "movq %2,%3; addq $4,%2;"\ + SOLVE_col1_ltor_m1n4(0,4,%1)\ + SOLVE_col2_ltor_m1n4(16,4,%1)\ + SOLVE_col3_ltor_m1n4(32,4,%1)\ + SOLVE_col4_ltor_m1n4(48,4,%1)\ + SAVE_SOLUTION_m1n4(4,0) + +#define SOLVE_RN_m1n8 \ + "movq %2,%3;" GEMM_SUM_REORDER_1x4(4) GEMM_SUM_REORDER_1x4(5) "movq %2,%3; addq $4,%2;"\ + SOLVE_col1_ltor_m1n4(0,4,%1) SUBTRACT_m1n4(0,5,%1,%%r12,4)\ + SOLVE_col2_ltor_m1n4(16,4,%1) SUBTRACT_m1n4(16,5,%1,%%r12,4)\ + SOLVE_col3_ltor_m1n4(32,4,%1) SUBTRACT_m1n4(32,5,%1,%%r12,4)\ + SOLVE_col4_ltor_m1n4(48,4,%1) SUBTRACT_m1n4(48,5,%1,%%r12,4)\ + SAVE_SOLUTION_m1n4(4,0)\ + SOLVE_col1_ltor_m1n4(64,5,%1,%%r12,4)\ + SOLVE_col2_ltor_m1n4(80,5,%1,%%r12,4)\ + SOLVE_col3_ltor_m1n4(96,5,%1,%%r12,4)\ + SOLVE_col4_ltor_m1n4(112,5,%1,%%r12,4)\ + SAVE_SOLUTION_m1n4(5,16) + +#define SOLVE_RN_m1n12 \ + "movq %2,%3;" GEMM_SUM_REORDER_1x4(4) GEMM_SUM_REORDER_1x4(5) GEMM_SUM_REORDER_1x4(6) "movq %2,%3; addq $4,%2;"\ + SOLVE_col1_ltor_m1n4(0,4,%1) SUBTRACT_m1n4(0,5,%1,%%r12,4) SUBTRACT_m1n4(0,6,%1,%%r12,8)\ + SOLVE_col2_ltor_m1n4(16,4,%1) SUBTRACT_m1n4(16,5,%1,%%r12,4) SUBTRACT_m1n4(16,6,%1,%%r12,8)\ + SOLVE_col3_ltor_m1n4(32,4,%1) SUBTRACT_m1n4(32,5,%1,%%r12,4) SUBTRACT_m1n4(32,6,%1,%%r12,8)\ + SOLVE_col4_ltor_m1n4(48,4,%1) SUBTRACT_m1n4(48,5,%1,%%r12,4) SUBTRACT_m1n4(48,6,%1,%%r12,8)\ + SAVE_SOLUTION_m1n4(4,0)\ + SOLVE_col1_ltor_m1n4(64,5,%1,%%r12,4) SUBTRACT_m1n4(64,6,%1,%%r12,8)\ + SOLVE_col2_ltor_m1n4(80,5,%1,%%r12,4) SUBTRACT_m1n4(80,6,%1,%%r12,8)\ + SOLVE_col3_ltor_m1n4(96,5,%1,%%r12,4) SUBTRACT_m1n4(96,6,%1,%%r12,8)\ + SOLVE_col4_ltor_m1n4(112,5,%1,%%r12,4) SUBTRACT_m1n4(112,6,%1,%%r12,8)\ + SAVE_SOLUTION_m1n4(5,16)\ + SOLVE_col1_ltor_m1n4(128,6,%1,%%r12,8)\ + SOLVE_col2_ltor_m1n4(144,6,%1,%%r12,8)\ + SOLVE_col3_ltor_m1n4(160,6,%1,%%r12,8)\ + SOLVE_col4_ltor_m1n4(176,6,%1,%%r12,8)\ + SAVE_SOLUTION_m1n4(6,32) + +#define GEMM_RN_SIMPLE(mdim,ndim) \ + "movq %%r15,%0; leaq (%%r15,%%r12,"#mdim"),%%r15; movq %%r13,%5; movq %%r14,%1;" INIT_m##mdim##n##ndim\ + "testq %5,%5; jz 1"#mdim""#ndim"2f;"\ + "1"#mdim""#ndim"1:\n\t"\ + GEMM_KERNEL_k1m##mdim##n##ndim "addq $16,%1; addq $"#mdim"*4,%0; decq %5; jnz 1"#mdim""#ndim"1b;"\ + "1"#mdim""#ndim"2:\n\t" +#define GEMM_RN_m8n4 GEMM_RN_SIMPLE(8,4) +#define GEMM_RN_m8n8 GEMM_RN_SIMPLE(8,8) +#define GEMM_RN_m8n12 \ + "movq %%r15,%0; leaq (%%r15,%%r12,8),%%r15; movq %%r13,%5; movq %%r14,%1;" INIT_m8n12\ + "cmpq $8,%5; jb 18122f;"\ + "18121:\n\t"\ + GEMM_KERNEL_k1m8n12 "prefetcht0 384(%0); addq $32,%0; addq $16,%1;"\ + GEMM_KERNEL_k1m8n12 "addq $32,%0; addq $16,%1;"\ + GEMM_KERNEL_k1m8n12 "prefetcht0 384(%0); addq $32,%0; addq $16,%1;"\ + GEMM_KERNEL_k1m8n12 "addq $32,%0; addq $16,%1;"\ + GEMM_KERNEL_k1m8n12 "prefetcht0 384(%0); addq $32,%0; addq $16,%1;"\ + GEMM_KERNEL_k1m8n12 "addq $32,%0; addq $16,%1;"\ + GEMM_KERNEL_k1m8n12 "prefetcht0 384(%0); addq $32,%0; addq $16,%1;"\ + GEMM_KERNEL_k1m8n12 "addq $32,%0; addq $16,%1;"\ + "subq $8,%5; cmpq $8,%5; jnb 18121b;"\ + "18122:\n\t"\ + "testq %5,%5; jz 18124f;"\ + "18123:\n\t"\ + GEMM_KERNEL_k1m8n12 "addq $32,%0; addq $16,%1; decq %5; jnz 18123b;"\ + "18124:\n\t" +#define GEMM_RN_m4n4 GEMM_RN_SIMPLE(4,4) +#define GEMM_RN_m4n8 GEMM_RN_SIMPLE(4,8) +#define GEMM_RN_m4n12 GEMM_RN_SIMPLE(4,12) +#define GEMM_RN_m2n4 GEMM_RN_SIMPLE(2,4) +#define GEMM_RN_m2n8 GEMM_RN_SIMPLE(2,8) +#define GEMM_RN_m2n12 GEMM_RN_SIMPLE(2,12) +#define GEMM_RN_m1n4 GEMM_RN_SIMPLE(1,4) +#define GEMM_RN_m1n8 GEMM_RN_SIMPLE(1,8) +#define GEMM_RN_m1n12 GEMM_RN_SIMPLE(1,12) + +#define COMPUTE(ndim) {\ + __asm__ __volatile__(\ + "movq %0,%%r15; movq %1,%%r14; movq %7,%%r13; movq %6,%%r12; salq $2,%%r12; movq %10,%%r11;"\ + "cmpq $8,%%r11; jb "#ndim"772f;"\ + #ndim"771:\n\t"\ + GEMM_RN_m8n##ndim SOLVE_RN_m8n##ndim "subq $8,%%r11; cmpq $8,%%r11; jnb "#ndim"771b;"\ + #ndim"772:\n\t"\ + "testq $4,%%r11; jz "#ndim"773f;"\ + GEMM_RN_m4n##ndim SOLVE_RN_m4n##ndim "subq $4,%%r11;"\ + #ndim"773:\n\t"\ + "testq $2,%%r11; jz "#ndim"774f;"\ + GEMM_RN_m2n##ndim SOLVE_RN_m2n##ndim "subq $2,%%r11;"\ + #ndim"774:\n\t"\ + "testq $1,%%r11; jz "#ndim"775f;"\ + GEMM_RN_m1n##ndim SOLVE_RN_m1n##ndim "subq $1,%%r11;"\ + #ndim"775:\n\t"\ + "movq %%r15,%0; movq %%r14,%1; vzeroupper;"\ + :"+r"(a_ptr),"+r"(b_ptr),"+r"(c_ptr),"+r"(c_tmp),"+r"(ldc_bytes),"+r"(k_cnt):"m"(K),"m"(OFF),"m"(one[0]),"m"(zero[0]),"m"(M)\ + :"r11","r12","r13","r14","r15","cc","memory",\ + "xmm0","xmm1","xmm2","xmm3","xmm4","xmm5","xmm6","xmm7","xmm8","xmm9","xmm10","xmm11","xmm12","xmm13","xmm14","xmm15");\ + a_ptr -= M * K; b_ptr += ndim * K; c_ptr += ldc * ndim - M; OFF += ndim;\ +} + +static void solve_RN(BLASLONG m, BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) { + FLOAT a0, b0; + int i, j, k; + for (i=0; i7;m_count-=8){ + if(kk>0) GEMM_KERNEL_N(8,n,kk,-1.0,a_ptr,sb,c_ptr,ldc); + solve_RN(8,n,a_ptr+kk*8,sb+kk*n,c_ptr,ldc); + a_ptr += k * 8; c_ptr += 8; + } + for(;m_count>3;m_count-=4){ + if(kk>0) GEMM_KERNEL_N(4,n,kk,-1.0,a_ptr,sb,c_ptr,ldc); + solve_RN(4,n,a_ptr+kk*4,sb+kk*n,c_ptr,ldc); + a_ptr += k * 4; c_ptr += 4; + } + for(;m_count>1;m_count-=2){ + if(kk>0) GEMM_KERNEL_N(2,n,kk,-1.0,a_ptr,sb,c_ptr,ldc); + solve_RN(2,n,a_ptr+kk*2,sb+kk*n,c_ptr,ldc); + a_ptr += k * 2; c_ptr += 2; + } + if(m_count>0){ + if(kk>0) GEMM_KERNEL_N(1,n,kk,-1.0,a_ptr,sb,c_ptr,ldc); + solve_RN(1,n,a_ptr+kk*1,sb+kk*n,c_ptr,ldc); + a_ptr += k * 1; c_ptr += 1; + } +} +int CNAME(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT dummy1, FLOAT *sa, FLOAT *sb, FLOAT *C, BLASLONG ldc, BLASLONG offset){ + float *a_ptr = sa, *b_ptr = sb, *c_ptr = C, *c_tmp = C; + float one[8] = {1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0}; + float zero[8] = {0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0}; + uint64_t ldc_bytes = (uint64_t)ldc * sizeof(float), K = (uint64_t)k, M = (uint64_t)m, OFF = (uint64_t)-offset, k_cnt = 0; + BLASLONG n_count = n; + for(;n_count>11;n_count-=12) COMPUTE(12) + for(;n_count>7;n_count-=8) COMPUTE(8) + for(;n_count>3;n_count-=4) COMPUTE(4) + for(;n_count>1;n_count-=2) { COMPUTE_EDGE_1_nchunk(m,2,a_ptr,b_ptr,c_ptr,ldc,k,OFF); b_ptr += 2*k; c_ptr += ldc*2; OFF+=2;} + if(n_count>0) COMPUTE_EDGE_1_nchunk(m,1,a_ptr,b_ptr,c_ptr,ldc,k,OFF); + return 0; +} diff --git a/kernel/x86_64/strsm_kernel_8x4_haswell_RT.c b/kernel/x86_64/strsm_kernel_8x4_haswell_RT.c new file mode 100644 index 000000000..ffcbfbbf0 --- /dev/null +++ b/kernel/x86_64/strsm_kernel_8x4_haswell_RT.c @@ -0,0 +1,281 @@ +#include "common.h" +#include +#include "strsm_kernel_8x4_haswell_R_common.h" + +#define SOLVE_RT_m8n4 \ + "movq %2,%3;" GEMM_SUM_REORDER_8x4(4,5,6,7,63) "negq %4; leaq (%3,%4,2),%3; negq %4; addq $32,%2;"\ + SOLVE_rile_m8n2(-8,6,7,%1) SUBTRACT_m8n2(-16,4,5,%1)\ + SOLVE_le_m8n2(-24,6,7,%1) SUBTRACT_m8n2(-32,4,5,%1)\ + SAVE_SOLUTION_m8n2(6,7,-64) "negq %4; leaq (%3,%4,4),%3; negq %4;"\ + SOLVE_rile_m8n2(-48,4,5,%1)\ + SOLVE_le_m8n2(-64,4,5,%1)\ + SAVE_SOLUTION_m8n2(4,5,-128) + +#define SOLVE_RT_m8n8 \ + "movq %2,%3;" GEMM_SUM_REORDER_8x4(4,5,6,7,63) GEMM_SUM_REORDER_8x4(8,9,10,11,63) "negq %4; leaq (%3,%4,2),%3; negq %4; addq $32,%2;"\ + SOLVE_rile_m8n2(-8,10,11,%1,%%r12,4) SUBTRACT_m8n2(-16,8,9,%1,%%r12,4) SUBTRACT_m8n2(-8,6,7,%1) SUBTRACT_m8n2(-16,4,5,%1)\ + SOLVE_le_m8n2(-24,10,11,%1,%%r12,4) SUBTRACT_m8n2(-32,8,9,%1,%%r12,4) SUBTRACT_m8n2(-24,6,7,%1) SUBTRACT_m8n2(-32,4,5,%1)\ + SAVE_SOLUTION_m8n2(10,11,-64) "negq %4; leaq (%3,%4,4),%3; negq %4;"\ + SOLVE_rile_m8n2(-48,8,9,%1,%%r12,4) SUBTRACT_m8n2(-40,6,7,%1) SUBTRACT_m8n2(-48,4,5,%1)\ + SOLVE_le_m8n2(-64,8,9,%1,%%r12,4) SUBTRACT_m8n2(-56,6,7,%1) SUBTRACT_m8n2(-64,4,5,%1)\ + SAVE_SOLUTION_m8n2(8,9,-128) "negq %4; leaq (%3,%4,4),%3; negq %4;"\ + SOLVE_rile_m8n2(-72,6,7,%1) SUBTRACT_m8n2(-80,4,5,%1)\ + SOLVE_le_m8n2(-88,6,7,%1) SUBTRACT_m8n2(-96,4,5,%1)\ + SAVE_SOLUTION_m8n2(6,7,-192) "negq %4; leaq (%3,%4,4),%3; negq %4;"\ + SOLVE_rile_m8n2(-112,4,5,%1)\ + SOLVE_le_m8n2(-128,4,5,%1)\ + SAVE_SOLUTION_m8n2(4,5,-256) + +#define SOLVE_RT_m8n12 \ + "movq %2,%3;" GEMM_SUM_REORDER_8x4(4,5,6,7,63) GEMM_SUM_REORDER_8x4(8,9,10,11,63) GEMM_SUM_REORDER_8x4(12,13,14,15,63) "negq %4; leaq (%3,%4,2),%3; negq %4; addq $32,%2;"\ + SOLVE_rile_m8n2(-8,14,15,%1,%%r12,8) SUBTRACT_m8n2(-16,12,13,%1,%%r12,8) SUBTRACT_m8n2(-8,10,11,%1,%%r12,4) SUBTRACT_m8n2(-16,8,9,%1,%%r12,4) SUBTRACT_m8n2(-8,6,7,%1) SUBTRACT_m8n2(-16,4,5,%1)\ + SOLVE_le_m8n2(-24,14,15,%1,%%r12,8) SUBTRACT_m8n2(-32,12,13,%1,%%r12,8) SUBTRACT_m8n2(-24,10,11,%1,%%r12,4) SUBTRACT_m8n2(-32,8,9,%1,%%r12,4) SUBTRACT_m8n2(-24,6,7,%1) SUBTRACT_m8n2(-32,4,5,%1)\ + SAVE_SOLUTION_m8n2(14,15,-64) "negq %4; leaq (%3,%4,4),%3; negq %4;"\ + SOLVE_rile_m8n2(-48,12,13,%1,%%r12,8) SUBTRACT_m8n2(-40,10,11,%1,%%r12,4) SUBTRACT_m8n2(-48,8,9,%1,%%r12,4) SUBTRACT_m8n2(-40,6,7,%1) SUBTRACT_m8n2(-48,4,5,%1)\ + SOLVE_le_m8n2(-64,12,13,%1,%%r12,8) SUBTRACT_m8n2(-56,10,11,%1,%%r12,4) SUBTRACT_m8n2(-64,8,9,%1,%%r12,4) SUBTRACT_m8n2(-56,6,7,%1) SUBTRACT_m8n2(-64,4,5,%1)\ + SAVE_SOLUTION_m8n2(12,13,-128) "negq %4; leaq (%3,%4,4),%3; negq %4;"\ + SOLVE_rile_m8n2(-72,10,11,%1,%%r12,4) SUBTRACT_m8n2(-80,8,9,%1,%%r12,4) SUBTRACT_m8n2(-72,6,7,%1) SUBTRACT_m8n2(-80,4,5,%1)\ + SOLVE_le_m8n2(-88,10,11,%1,%%r12,4) SUBTRACT_m8n2(-96,8,9,%1,%%r12,4) SUBTRACT_m8n2(-88,6,7,%1) SUBTRACT_m8n2(-96,4,5,%1)\ + SAVE_SOLUTION_m8n2(10,11,-192) "negq %4; leaq (%3,%4,4),%3; negq %4;"\ + SOLVE_rile_m8n2(-112,8,9,%1,%%r12,4) SUBTRACT_m8n2(-104,6,7,%1) SUBTRACT_m8n2(-112,4,5,%1)\ + SOLVE_le_m8n2(-128,8,9,%1,%%r12,4) SUBTRACT_m8n2(-120,6,7,%1) SUBTRACT_m8n2(-128,4,5,%1)\ + SAVE_SOLUTION_m8n2(8,9,-256) "negq %4; leaq (%3,%4,4),%3; negq %4;"\ + SOLVE_rile_m8n2(-136,6,7,%1) SUBTRACT_m8n2(-144,4,5,%1)\ + SOLVE_le_m8n2(-152,6,7,%1) SUBTRACT_m8n2(-160,4,5,%1)\ + SAVE_SOLUTION_m8n2(6,7,-320) "negq %4; leaq (%3,%4,4),%3; negq %4;"\ + SOLVE_rile_m8n2(-176,4,5,%1)\ + SOLVE_le_m8n2(-192,4,5,%1)\ + SAVE_SOLUTION_m8n2(4,5,-384) + +#define SOLVE_RT_m4n4 \ + "movq %2,%3;" GEMM_SUM_REORDER_4x4(4,5,6,7,4,5) "negq %4; leaq (%3,%4,2),%3; negq %4; addq $16,%2;"\ + SOLVE_rile_m4n2(-8,5,%1) SUBTRACT_m4n2(-16,4,%1)\ + SOLVE_le_m4n2(-24,5,%1) SUBTRACT_m4n2(-32,4,%1)\ + SAVE_SOLUTION_m4n2(5,-32) "negq %4; leaq (%3,%4,4),%3; negq %4;"\ + SOLVE_rile_m4n2(-48,4,%1)\ + SOLVE_le_m4n2(-64,4,%1)\ + SAVE_SOLUTION_m4n2(4,-64) + +#define SOLVE_RT_m4n8 \ + "movq %2,%3;" GEMM_SUM_REORDER_4x4(4,5,6,7,4,5) GEMM_SUM_REORDER_4x4(8,9,10,11,6,7) "negq %4; leaq (%3,%4,2),%3; negq %4; addq $16,%2;"\ + SOLVE_rile_m4n2(-8,7,%1,%%r12,4) SUBTRACT_m4n2(-16,6,%1,%%r12,4) SUBTRACT_m4n2(-8,5,%1) SUBTRACT_m4n2(-16,4,%1)\ + SOLVE_le_m4n2(-24,7,%1,%%r12,4) SUBTRACT_m4n2(-32,6,%1,%%r12,4) SUBTRACT_m4n2(-24,5,%1) SUBTRACT_m4n2(-32,4,%1)\ + SAVE_SOLUTION_m4n2(7,-32) "negq %4; leaq (%3,%4,4),%3; negq %4;"\ + SOLVE_rile_m4n2(-48,6,%1,%%r12,4) SUBTRACT_m4n2(-40,5,%1) SUBTRACT_m4n2(-48,4,%1)\ + SOLVE_le_m4n2(-64,6,%1,%%r12,4) SUBTRACT_m4n2(-56,5,%1) SUBTRACT_m4n2(-64,4,%1)\ + SAVE_SOLUTION_m4n2(6,-64) "negq %4; leaq (%3,%4,4),%3; negq %4;"\ + SOLVE_rile_m4n2(-72,5,%1) SUBTRACT_m4n2(-80,4,%1)\ + SOLVE_le_m4n2(-88,5,%1) SUBTRACT_m4n2(-96,4,%1)\ + SAVE_SOLUTION_m4n2(5,-96) "negq %4; leaq (%3,%4,4),%3; negq %4;"\ + SOLVE_rile_m4n2(-112,4,%1)\ + SOLVE_le_m4n2(-128,4,%1)\ + SAVE_SOLUTION_m4n2(4,-128) + +#define SOLVE_RT_m4n12 \ + "movq %2,%3;" GEMM_SUM_REORDER_4x4(4,5,6,7,4,5) GEMM_SUM_REORDER_4x4(8,9,10,11,6,7) GEMM_SUM_REORDER_4x4(12,13,14,15,8,9) "negq %4; leaq (%3,%4,2),%3; negq %4; addq $16,%2;"\ + SOLVE_rile_m4n2(-8,9,%1,%%r12,8) SUBTRACT_m4n2(-16,8,%1,%%r12,8) SUBTRACT_m4n2(-8,7,%1,%%r12,4) SUBTRACT_m4n2(-16,6,%1,%%r12,4) SUBTRACT_m4n2(-8,5,%1) SUBTRACT_m4n2(-16,4,%1)\ + SOLVE_le_m4n2(-24,9,%1,%%r12,8) SUBTRACT_m4n2(-32,8,%1,%%r12,8) SUBTRACT_m4n2(-24,7,%1,%%r12,4) SUBTRACT_m4n2(-32,6,%1,%%r12,4) SUBTRACT_m4n2(-24,5,%1) SUBTRACT_m4n2(-32,4,%1)\ + SAVE_SOLUTION_m4n2(9,-32) "negq %4; leaq (%3,%4,4),%3; negq %4;"\ + SOLVE_rile_m4n2(-48,8,%1,%%r12,8) SUBTRACT_m4n2(-40,7,%1,%%r12,4) SUBTRACT_m4n2(-48,6,%1,%%r12,4) SUBTRACT_m4n2(-40,5,%1) SUBTRACT_m4n2(-48,4,%1)\ + SOLVE_le_m4n2(-64,8,%1,%%r12,8) SUBTRACT_m4n2(-56,7,%1,%%r12,4) SUBTRACT_m4n2(-64,6,%1,%%r12,4) SUBTRACT_m4n2(-56,5,%1) SUBTRACT_m4n2(-64,4,%1)\ + SAVE_SOLUTION_m4n2(8,-64) "negq %4; leaq (%3,%4,4),%3; negq %4;"\ + SOLVE_rile_m4n2(-72,7,%1,%%r12,4) SUBTRACT_m4n2(-80,6,%1,%%r12,4) SUBTRACT_m4n2(-72,5,%1) SUBTRACT_m4n2(-80,4,%1)\ + SOLVE_le_m4n2(-88,7,%1,%%r12,4) SUBTRACT_m4n2(-96,6,%1,%%r12,4) SUBTRACT_m4n2(-88,5,%1) SUBTRACT_m4n2(-96,4,%1)\ + SAVE_SOLUTION_m4n2(7,-96) "negq %4; leaq (%3,%4,4),%3; negq %4;"\ + SOLVE_rile_m4n2(-112,6,%1,%%r12,4) SUBTRACT_m4n2(-104,5,%1) SUBTRACT_m4n2(-112,4,%1)\ + SOLVE_le_m4n2(-128,6,%1,%%r12,4) SUBTRACT_m4n2(-120,5,%1) SUBTRACT_m4n2(-128,4,%1)\ + SAVE_SOLUTION_m4n2(6,-128) "negq %4; leaq (%3,%4,4),%3; negq %4;"\ + SOLVE_rile_m4n2(-136,5,%1) SUBTRACT_m4n2(-144,4,%1)\ + SOLVE_le_m4n2(-152,5,%1) SUBTRACT_m4n2(-160,4,%1)\ + SAVE_SOLUTION_m4n2(5,-160) "negq %4; leaq (%3,%4,4),%3; negq %4;"\ + SOLVE_rile_m4n2(-176,4,%1)\ + SOLVE_le_m4n2(-192,4,%1)\ + SAVE_SOLUTION_m4n2(4,-192) + +#define SOLVE_RT_m2n4 \ + "movq %2,%3;" GEMM_SUM_REORDER_2x4(4,5) "negq %4; leaq (%3,%4,4),%3; negq %4; addq $8,%2;"\ + SOLVE_col4_rtol_m2n4(-16,4,5,%1)\ + SOLVE_col3_rtol_m2n4(-32,4,5,%1)\ + SOLVE_col2_rtol_m2n4(-48,4,5,%1)\ + SOLVE_col1_rtol_m2n4(-64,4,5,%1)\ + SAVE_SOLUTION_m2n4(4,5,-32) + +#define SOLVE_RT_m2n8 \ + "movq %2,%3;" GEMM_SUM_REORDER_2x4(4,5) GEMM_SUM_REORDER_2x4(6,7) "negq %4; leaq (%3,%4,4),%3; negq %4; addq $8,%2;"\ + SOLVE_col4_rtol_m2n4(-16,6,7,%1,%%r12,4) SUBTRACT_m2n4(-16,4,5,%1)\ + SOLVE_col3_rtol_m2n4(-32,6,7,%1,%%r12,4) SUBTRACT_m2n4(-32,4,5,%1)\ + SOLVE_col2_rtol_m2n4(-48,6,7,%1,%%r12,4) SUBTRACT_m2n4(-48,4,5,%1)\ + SOLVE_col1_rtol_m2n4(-64,6,7,%1,%%r12,4) SUBTRACT_m2n4(-64,4,5,%1)\ + SAVE_SOLUTION_m2n4(6,7,-32) "negq %4; leaq (%3,%4,8),%3; negq %4;"\ + SOLVE_col4_rtol_m2n4(-80,4,5,%1)\ + SOLVE_col3_rtol_m2n4(-96,4,5,%1)\ + SOLVE_col2_rtol_m2n4(-112,4,5,%1)\ + SOLVE_col1_rtol_m2n4(-128,4,5,%1)\ + SAVE_SOLUTION_m2n4(4,5,-64) + +#define SOLVE_RT_m2n12 \ + "movq %2,%3;" GEMM_SUM_REORDER_2x4(4,5) GEMM_SUM_REORDER_2x4(6,7) GEMM_SUM_REORDER_2x4(8,9) "negq %4; leaq (%3,%4,4),%3; negq %4; addq $8,%2;"\ + SOLVE_col4_rtol_m2n4(-16,8,9,%1,%%r12,8) SUBTRACT_m2n4(-16,6,7,%1,%%r12,4) SUBTRACT_m2n4(-16,4,5,%1)\ + SOLVE_col3_rtol_m2n4(-32,8,9,%1,%%r12,8) SUBTRACT_m2n4(-32,6,7,%1,%%r12,4) SUBTRACT_m2n4(-32,4,5,%1)\ + SOLVE_col2_rtol_m2n4(-48,8,9,%1,%%r12,8) SUBTRACT_m2n4(-48,6,7,%1,%%r12,4) SUBTRACT_m2n4(-48,4,5,%1)\ + SOLVE_col1_rtol_m2n4(-64,8,9,%1,%%r12,8) SUBTRACT_m2n4(-64,6,7,%1,%%r12,4) SUBTRACT_m2n4(-64,4,5,%1)\ + SAVE_SOLUTION_m2n4(8,9,-32) "negq %4; leaq (%3,%4,8),%3; negq %4;"\ + SOLVE_col4_rtol_m2n4(-80,6,7,%1,%%r12,4) SUBTRACT_m2n4(-80,4,5,%1)\ + SOLVE_col3_rtol_m2n4(-96,6,7,%1,%%r12,4) SUBTRACT_m2n4(-96,4,5,%1)\ + SOLVE_col2_rtol_m2n4(-112,6,7,%1,%%r12,4) SUBTRACT_m2n4(-112,4,5,%1)\ + SOLVE_col1_rtol_m2n4(-128,6,7,%1,%%r12,4) SUBTRACT_m2n4(-128,4,5,%1)\ + SAVE_SOLUTION_m2n4(6,7,-64) "negq %4; leaq (%3,%4,8),%3; negq %4;"\ + SOLVE_col4_rtol_m2n4(-144,4,5,%1)\ + SOLVE_col3_rtol_m2n4(-160,4,5,%1)\ + SOLVE_col2_rtol_m2n4(-176,4,5,%1)\ + SOLVE_col1_rtol_m2n4(-192,4,5,%1)\ + SAVE_SOLUTION_m2n4(4,5,-96) + +#define SOLVE_RT_m1n4 \ + "movq %2,%3;" GEMM_SUM_REORDER_1x4(4) "negq %4; leaq (%3,%4,4),%3; negq %4; addq $4,%2;"\ + SOLVE_col4_rtol_m1n4(-16,4,%1)\ + SOLVE_col3_rtol_m1n4(-32,4,%1)\ + SOLVE_col2_rtol_m1n4(-48,4,%1)\ + SOLVE_col1_rtol_m1n4(-64,4,%1)\ + SAVE_SOLUTION_m1n4(4,-16) + +#define SOLVE_RT_m1n8 \ + "movq %2,%3;" GEMM_SUM_REORDER_1x4(4) GEMM_SUM_REORDER_1x4(5) "negq %4; leaq (%3,%4,4),%3; negq %4; addq $4,%2;"\ + SOLVE_col4_rtol_m1n4(-16,5,%1,%%r12,4) SUBTRACT_m1n4(-16,4,%1)\ + SOLVE_col3_rtol_m1n4(-32,5,%1,%%r12,4) SUBTRACT_m1n4(-32,4,%1)\ + SOLVE_col2_rtol_m1n4(-48,5,%1,%%r12,4) SUBTRACT_m1n4(-48,4,%1)\ + SOLVE_col1_rtol_m1n4(-64,5,%1,%%r12,4) SUBTRACT_m1n4(-64,4,%1)\ + SAVE_SOLUTION_m1n4(5,-16) "negq %4; leaq (%3,%4,8),%3; negq %4;"\ + SOLVE_col4_rtol_m1n4(-80,4,%1)\ + SOLVE_col3_rtol_m1n4(-96,4,%1)\ + SOLVE_col2_rtol_m1n4(-112,4,%1)\ + SOLVE_col1_rtol_m1n4(-128,4,%1)\ + SAVE_SOLUTION_m1n4(4,-32) + +#define SOLVE_RT_m1n12 \ + "movq %2,%3;" GEMM_SUM_REORDER_1x4(4) GEMM_SUM_REORDER_1x4(5) GEMM_SUM_REORDER_1x4(6) "negq %4; leaq (%3,%4,4),%3; negq %4; addq $4,%2;"\ + SOLVE_col4_rtol_m1n4(-16,6,%1,%%r12,8) SUBTRACT_m1n4(-16,5,%1,%%r12,4) SUBTRACT_m1n4(-16,4,%1)\ + SOLVE_col3_rtol_m1n4(-32,6,%1,%%r12,8) SUBTRACT_m1n4(-32,5,%1,%%r12,4) SUBTRACT_m1n4(-32,4,%1)\ + SOLVE_col2_rtol_m1n4(-48,6,%1,%%r12,8) SUBTRACT_m1n4(-48,5,%1,%%r12,4) SUBTRACT_m1n4(-48,4,%1)\ + SOLVE_col1_rtol_m1n4(-64,6,%1,%%r12,8) SUBTRACT_m1n4(-64,5,%1,%%r12,4) SUBTRACT_m1n4(-64,4,%1)\ + SAVE_SOLUTION_m1n4(6,-16) "negq %4; leaq (%3,%4,8),%3; negq %4;"\ + SOLVE_col4_rtol_m1n4(-80,5,%1,%%r12,4) SUBTRACT_m1n4(-80,4,%1)\ + SOLVE_col3_rtol_m1n4(-96,5,%1,%%r12,4) SUBTRACT_m1n4(-96,4,%1)\ + SOLVE_col2_rtol_m1n4(-112,5,%1,%%r12,4) SUBTRACT_m1n4(-112,4,%1)\ + SOLVE_col1_rtol_m1n4(-128,5,%1,%%r12,4) SUBTRACT_m1n4(-128,4,%1)\ + SAVE_SOLUTION_m1n4(5,-32) "negq %4; leaq (%3,%4,8),%3; negq %4;"\ + SOLVE_col4_rtol_m1n4(-144,4,%1)\ + SOLVE_col3_rtol_m1n4(-160,4,%1)\ + SOLVE_col2_rtol_m1n4(-176,4,%1)\ + SOLVE_col1_rtol_m1n4(-192,4,%1)\ + SAVE_SOLUTION_m1n4(4,-48) + +/* r14 = b_tail, r15 = a_tail, r13 = k-kk */ +#define GEMM_RT_SIMPLE(mdim,ndim) \ + "leaq (%%r15,%%r12,"#mdim"),%%r15; movq %%r15,%0; movq %%r13,%5; movq %%r14,%1;" INIT_m##mdim##n##ndim\ + "testq %5,%5; jz 1"#mdim""#ndim"2f;"\ + "1"#mdim""#ndim"1:\n\t"\ + "subq $16,%1; subq $"#mdim"*4,%0;" GEMM_KERNEL_k1m##mdim##n##ndim "decq %5; jnz 1"#mdim""#ndim"1b;"\ + "1"#mdim""#ndim"2:\n\t" +#define GEMM_RT_m8n4 GEMM_RT_SIMPLE(8,4) +#define GEMM_RT_m8n8 GEMM_RT_SIMPLE(8,8) +#define GEMM_RT_m8n12 \ + "leaq (%%r15,%%r12,8),%%r15; movq %%r15,%0; movq %%r13,%5; movq %%r14,%1;" INIT_m8n12\ + "cmpq $8,%5; jb 18122f;"\ + "18121:\n\t"\ + "prefetcht0 -384(%0); subq $32,%0; subq $16,%1;" GEMM_KERNEL_k1m8n12\ + "subq $32,%0; subq $16,%1;" GEMM_KERNEL_k1m8n12\ + "prefetcht0 -384(%0); subq $32,%0; subq $16,%1;" GEMM_KERNEL_k1m8n12\ + "subq $32,%0; subq $16,%1;" GEMM_KERNEL_k1m8n12\ + "prefetcht0 -384(%0); subq $32,%0; subq $16,%1;" GEMM_KERNEL_k1m8n12\ + "subq $32,%0; subq $16,%1;" GEMM_KERNEL_k1m8n12\ + "prefetcht0 -384(%0); subq $32,%0; subq $16,%1;" GEMM_KERNEL_k1m8n12\ + "subq $32,%0; subq $16,%1;" GEMM_KERNEL_k1m8n12\ + "subq $8,%5; cmpq $8,%5; jnb 18121b;"\ + "18122:\n\t"\ + "testq %5,%5; jz 18124f;"\ + "18123:\n\t"\ + "subq $32,%0; subq $16,%1;" GEMM_KERNEL_k1m8n12 "decq %5; jnz 18123b;"\ + "18124:\n\t" +#define GEMM_RT_m4n4 GEMM_RT_SIMPLE(4,4) +#define GEMM_RT_m4n8 GEMM_RT_SIMPLE(4,8) +#define GEMM_RT_m4n12 GEMM_RT_SIMPLE(4,12) +#define GEMM_RT_m2n4 GEMM_RT_SIMPLE(2,4) +#define GEMM_RT_m2n8 GEMM_RT_SIMPLE(2,8) +#define GEMM_RT_m2n12 GEMM_RT_SIMPLE(2,12) +#define GEMM_RT_m1n4 GEMM_RT_SIMPLE(1,4) +#define GEMM_RT_m1n8 GEMM_RT_SIMPLE(1,8) +#define GEMM_RT_m1n12 GEMM_RT_SIMPLE(1,12) + +#define COMPUTE(ndim) {\ + b_ptr -= (ndim-4)*K; c_ptr -= ndim * ldc;\ + __asm__ __volatile__(\ + "movq %0,%%r15; movq %6,%%r13; subq %7,%%r13; movq %6,%%r12; salq $2,%%r12; movq %1,%%r14; movq %10,%%r11;"\ + "cmpq $8,%%r11; jb "#ndim"772f;"\ + #ndim"771:\n\t"\ + GEMM_RT_m8n##ndim SOLVE_RT_m8n##ndim "subq $8,%%r11; cmpq $8,%%r11; jnb "#ndim"771b;"\ + #ndim"772:\n\t"\ + "testq $4,%%r11; jz "#ndim"773f;"\ + GEMM_RT_m4n##ndim SOLVE_RT_m4n##ndim "subq $4,%%r11;"\ + #ndim"773:\n\t"\ + "testq $2,%%r11; jz "#ndim"774f;"\ + GEMM_RT_m2n##ndim SOLVE_RT_m2n##ndim "subq $2,%%r11;"\ + #ndim"774:\n\t"\ + "testq $1,%%r11; jz "#ndim"775f;"\ + GEMM_RT_m1n##ndim SOLVE_RT_m1n##ndim "subq $1,%%r11;"\ + #ndim"775:\n\t"\ + "movq %%r15,%0; movq %%r14,%1; vzeroupper;"\ + :"+r"(a_ptr),"+r"(b_ptr),"+r"(c_ptr),"+r"(c_tmp),"+r"(ldc_bytes),"+r"(k_cnt):"m"(K),"m"(OFF),"m"(one[0]),"m"(zero[0]),"m"(M)\ + :"r11","r12","r13","r14","r15","cc","memory",\ + "xmm0","xmm1","xmm2","xmm3","xmm4","xmm5","xmm6","xmm7","xmm8","xmm9","xmm10","xmm11","xmm12","xmm13","xmm14","xmm15");\ + a_ptr -= M * K; b_ptr -= 4 * K; c_ptr -= M; OFF -= ndim;\ +} + +static void solve_RT(BLASLONG m, BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc){ + FLOAT a0, b0; + int i, j, k; + for (i=n-1;i>=0;i--) { + b0 = b[i*n+i]; + for (j=0;j7;m_count-=8){ + if(k-kk>0) GEMM_KERNEL_N(8,n,k-kk,-1.0,a_ptr+kk*8,sb+kk*n,c_ptr,ldc); + solve_RT(8,n,a_ptr+(kk-n)*8,sb+(kk-n)*n,c_ptr,ldc); + a_ptr += k * 8; c_ptr += 8; + } + for(;m_count>3;m_count-=4){ + if(k-kk>0) GEMM_KERNEL_N(4,n,k-kk,-1.0,a_ptr+kk*4,sb+kk*n,c_ptr,ldc); + solve_RT(4,n,a_ptr+(kk-n)*4,sb+(kk-n)*n,c_ptr,ldc); + a_ptr += k * 4; c_ptr += 4; + } + for(;m_count>1;m_count-=2){ + if(k-kk>0) GEMM_KERNEL_N(2,n,k-kk,-1.0,a_ptr+kk*2,sb+kk*n,c_ptr,ldc); + solve_RT(2,n,a_ptr+(kk-n)*2,sb+(kk-n)*n,c_ptr,ldc); + a_ptr += k * 2; c_ptr += 2; + } + if(m_count>0){ + if(k-kk>0) GEMM_KERNEL_N(1,n,k-kk,-1.0,a_ptr+kk*1,sb+kk*n,c_ptr,ldc); + solve_RT(1,n,a_ptr+(kk-n)*1,sb+(kk-n)*n,c_ptr,ldc); + a_ptr += k * 1; c_ptr += 1; + } +} +int CNAME(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT dummy1, FLOAT *sa, FLOAT *sb, FLOAT *C, BLASLONG ldc, BLASLONG offset){ + float *a_ptr = sa, *b_ptr = sb+n*k, *c_ptr = C+n*ldc, *c_tmp = C; + float one[8] = {1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0}; + float zero[8] = {0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0}; + uint64_t ldc_bytes = (uint64_t)ldc * sizeof(float), K = (uint64_t)k, M = (uint64_t)m, OFF = (uint64_t)(n-offset), k_cnt = 0; + BLASLONG n_count = n; + if(n&1){b_ptr-=k; c_ptr-=ldc; COMPUTE_EDGE_1_nchunk(m,1,a_ptr,b_ptr,c_ptr,ldc,k,OFF); OFF--; n_count--;} + if(n&2){b_ptr-=k*2; c_ptr-=ldc*2; COMPUTE_EDGE_1_nchunk(m,2,a_ptr,b_ptr,c_ptr,ldc,k,OFF); OFF-=2; n_count-=2;} + for(;n_count>11;n_count-=12) COMPUTE(12) + for(;n_count>7;n_count-=8) COMPUTE(8) + for(;n_count>3;n_count-=4) COMPUTE(4) + return 0; +} diff --git a/kernel/x86_64/strsm_kernel_8x4_haswell_R_common.h b/kernel/x86_64/strsm_kernel_8x4_haswell_R_common.h new file mode 100644 index 000000000..36b7aa1a3 --- /dev/null +++ b/kernel/x86_64/strsm_kernel_8x4_haswell_R_common.h @@ -0,0 +1,226 @@ +/* r11 = m_counter, r12 = size_of_k_elements, r13 = kk, r14 = b_head, r15 = a_head */ +/* register i/o: %0 = a_ptr, %1 = b_ptr, %2 = c_ptr, %3 = c_tmp, %4 = ldc, %5 = k_counter */ +/* memory input: %6 = K, %7 = offset, %8 = {1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0}, %9 = {0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0}, %10 = M */ + +#define init_m8n4(c1,c2,c3,c4)\ + "vpxor %%ymm"#c1",%%ymm"#c1",%%ymm"#c1"; vpxor %%ymm"#c2",%%ymm"#c2",%%ymm"#c2";"\ + "vpxor %%ymm"#c3",%%ymm"#c3",%%ymm"#c3"; vpxor %%ymm"#c4",%%ymm"#c4",%%ymm"#c4";" +#define INIT_m8n4 init_m8n4(4,5,6,7) +#define INIT_m8n8 INIT_m8n4 init_m8n4(8,9,10,11) +#define INIT_m8n12 INIT_m8n8 init_m8n4(12,13,14,15) + +#define init_m4n4(c1,c2,c3,c4)\ + "vpxor %%xmm"#c1",%%xmm"#c1",%%xmm"#c1"; vpxor %%xmm"#c2",%%xmm"#c2",%%xmm"#c2";"\ + "vpxor %%xmm"#c3",%%xmm"#c3",%%xmm"#c3"; vpxor %%xmm"#c4",%%xmm"#c4",%%xmm"#c4";" +#define INIT_m4n4 init_m4n4(4,5,6,7) +#define INIT_m4n8 INIT_m4n4 init_m4n4(8,9,10,11) +#define INIT_m4n12 INIT_m4n8 init_m4n4(12,13,14,15) + +#define init_m2n4(c1,c2)\ + "vpxor %%xmm"#c1",%%xmm"#c1",%%xmm"#c1"; vpxor %%xmm"#c2",%%xmm"#c2",%%xmm"#c2";" +#define INIT_m2n4 init_m2n4(4,5) +#define INIT_m2n8 INIT_m2n4 init_m2n4(6,7) +#define INIT_m2n12 INIT_m2n8 init_m2n4(8,9) + +#define init_m1n4(c1) "vpxor %%xmm"#c1",%%xmm"#c1",%%xmm"#c1";" +#define INIT_m1n4 init_m1n4(4) +#define INIT_m1n8 INIT_m1n4 init_m1n4(5) +#define INIT_m1n12 INIT_m1n8 init_m1n4(6) + +#define GEMM_KERNEL_k1m8n4 \ + "vmovsldup (%0),%%ymm1; vmovshdup (%0),%%ymm2;"\ + "vbroadcastsd (%1),%%ymm3; vfnmadd231ps %%ymm3,%%ymm1,%%ymm4; vfnmadd231ps %%ymm3,%%ymm2,%%ymm5;"\ + "vbroadcastsd 8(%1),%%ymm3; vfnmadd231ps %%ymm3,%%ymm1,%%ymm6; vfnmadd231ps %%ymm3,%%ymm2,%%ymm7;" +#define GEMM_KERNEL_k1m8n8 GEMM_KERNEL_k1m8n4\ + "vbroadcastsd (%1,%%r12,4),%%ymm3; vfnmadd231ps %%ymm3,%%ymm1,%%ymm8; vfnmadd231ps %%ymm3,%%ymm2,%%ymm9;"\ + "vbroadcastsd 8(%1,%%r12,4),%%ymm3; vfnmadd231ps %%ymm3,%%ymm1,%%ymm10; vfnmadd231ps %%ymm3,%%ymm2,%%ymm11;" +#define GEMM_KERNEL_k1m8n12 GEMM_KERNEL_k1m8n8\ + "vbroadcastsd (%1,%%r12,8),%%ymm3; vfnmadd231ps %%ymm3,%%ymm1,%%ymm12; vfnmadd231ps %%ymm3,%%ymm2,%%ymm13;"\ + "vbroadcastsd 8(%1,%%r12,8),%%ymm3; vfnmadd231ps %%ymm3,%%ymm1,%%ymm14; vfnmadd231ps %%ymm3,%%ymm2,%%ymm15;" + +#define GEMM_KERNEL_k1m4n4 \ + "vmovsldup (%0),%%xmm1; vmovshdup (%0),%%xmm2;"\ + "vmovddup (%1),%%xmm3; vfnmadd231ps %%xmm3,%%xmm1,%%xmm4; vfnmadd231ps %%xmm3,%%xmm2,%%xmm5;"\ + "vmovddup 8(%1),%%xmm3; vfnmadd231ps %%xmm3,%%xmm1,%%xmm6; vfnmadd231ps %%xmm3,%%xmm2,%%xmm7;" +#define GEMM_KERNEL_k1m4n8 GEMM_KERNEL_k1m4n4\ + "vmovddup (%1,%%r12,4),%%xmm3; vfnmadd231ps %%xmm3,%%xmm1,%%xmm8; vfnmadd231ps %%xmm3,%%xmm2,%%xmm9;"\ + "vmovddup 8(%1,%%r12,4),%%xmm3; vfnmadd231ps %%xmm3,%%xmm1,%%xmm10; vfnmadd231ps %%xmm3,%%xmm2,%%xmm11;" +#define GEMM_KERNEL_k1m4n12 GEMM_KERNEL_k1m4n8\ + "vmovddup (%1,%%r12,8),%%xmm3; vfnmadd231ps %%xmm3,%%xmm1,%%xmm12; vfnmadd231ps %%xmm3,%%xmm2,%%xmm13;"\ + "vmovddup 8(%1,%%r12,8),%%xmm3; vfnmadd231ps %%xmm3,%%xmm1,%%xmm14; vfnmadd231ps %%xmm3,%%xmm2,%%xmm15;" + +#define GEMM_KERNEL_k1m2n4 \ + "vbroadcastss (%0),%%xmm1; vbroadcastss 4(%0),%%xmm2;"\ + "vmovups (%1),%%xmm3; vfnmadd231ps %%xmm3,%%xmm1,%%xmm4; vfnmadd231ps %%xmm3,%%xmm2,%%xmm5;" +#define GEMM_KERNEL_k1m2n8 GEMM_KERNEL_k1m2n4\ + "vmovups (%1,%%r12,4),%%xmm3; vfnmadd231ps %%xmm3,%%xmm1,%%xmm6; vfnmadd231ps %%xmm3,%%xmm2,%%xmm7;" +#define GEMM_KERNEL_k1m2n12 GEMM_KERNEL_k1m2n8\ + "vmovups (%1,%%r12,8),%%xmm3; vfnmadd231ps %%xmm3,%%xmm1,%%xmm8; vfnmadd231ps %%xmm3,%%xmm2,%%xmm9;" + +#define GEMM_KERNEL_k1m1n4 "vbroadcastss (%0),%%xmm1; vfnmadd231ps (%1),%%xmm1,%%xmm4;" +#define GEMM_KERNEL_k1m1n8 GEMM_KERNEL_k1m1n4 "vfnmadd231ps (%1,%%r12,4),%%xmm1,%%xmm5;" +#define GEMM_KERNEL_k1m1n12 GEMM_KERNEL_k1m1n8 "vfnmadd231ps (%1,%%r12,8),%%xmm1,%%xmm6;" + +#define GEMM_SUM_REORDER_8x4(c1,c2,c3,c4,prefpos)\ + "vmovups (%3),%%ymm0; vmovups (%3,%4,1),%%ymm1; prefetcht1 "#prefpos"(%3); prefetcht1 "#prefpos"(%3,%4,1); leaq (%3,%4,2),%3;"\ + "vunpcklps %%ymm1,%%ymm0,%%ymm2; vunpckhps %%ymm1,%%ymm0,%%ymm3; vunpcklpd %%ymm3,%%ymm2,%%ymm0; vunpckhpd %%ymm3,%%ymm2,%%ymm1;"\ + "vaddps %%ymm0,%%ymm"#c1",%%ymm"#c1"; vaddps %%ymm1,%%ymm"#c2",%%ymm"#c2";"\ + "vmovups (%3),%%ymm0; vmovups (%3,%4,1),%%ymm1; prefetcht1 "#prefpos"(%3); prefetcht1 "#prefpos"(%3,%4,1); leaq (%3,%4,2),%3;"\ + "vunpcklps %%ymm1,%%ymm0,%%ymm2; vunpckhps %%ymm1,%%ymm0,%%ymm3; vunpcklpd %%ymm3,%%ymm2,%%ymm0; vunpckhpd %%ymm3,%%ymm2,%%ymm1;"\ + "vaddps %%ymm0,%%ymm"#c3",%%ymm"#c3"; vaddps %%ymm1,%%ymm"#c4",%%ymm"#c4";" + +#define GEMM_SUM_REORDER_4x4(c1,c2,c3,c4,co1,co2)\ + "vmovups (%3),%%xmm0; vmovups (%3,%4,1),%%xmm1; leaq (%3,%4,2),%3;"\ + "vunpcklps %%xmm1,%%xmm0,%%xmm2; vunpckhps %%xmm1,%%xmm0,%%xmm3;"\ + "vunpcklpd %%xmm"#c2",%%xmm"#c1",%%xmm0; vunpckhpd %%xmm"#c2",%%xmm"#c1",%%xmm1;"\ + "vaddps %%xmm0,%%xmm2,%%xmm"#c1"; vaddps %%xmm1,%%xmm3,%%xmm"#c2";"\ + "vmovups (%3),%%xmm0; vmovups (%3,%4,1),%%xmm1; leaq (%3,%4,2),%3;"\ + "vunpcklps %%xmm1,%%xmm0,%%xmm2; vunpckhps %%xmm1,%%xmm0,%%xmm3;"\ + "vunpcklpd %%xmm"#c4",%%xmm"#c3",%%xmm0; vunpckhpd %%xmm"#c4",%%xmm"#c3",%%xmm1;"\ + "vaddps %%xmm0,%%xmm2,%%xmm"#c3"; vaddps %%xmm1,%%xmm3,%%xmm"#c4";"\ + "vperm2f128 $2,%%ymm"#c1",%%ymm"#c2",%%ymm"#co1"; vperm2f128 $2,%%ymm"#c3",%%ymm"#c4",%%ymm"#co2";" + +#define GEMM_SUM_REORDER_2x4(c1,c2)\ + "vmovsd (%3),%%xmm0; vmovhpd (%3,%4,1),%%xmm0,%%xmm0; leaq (%3,%4,2),%3; vpermilps $216,%%xmm0,%%xmm0;"\ + "vmovsd (%3),%%xmm1; vmovhpd (%3,%4,1),%%xmm1,%%xmm1; leaq (%3,%4,2),%3; vpermilps $216,%%xmm1,%%xmm1;"\ + "vunpcklpd %%xmm1,%%xmm0,%%xmm2; vaddps %%xmm2,%%xmm"#c1",%%xmm"#c1";"\ + "vunpckhpd %%xmm1,%%xmm0,%%xmm3; vaddps %%xmm3,%%xmm"#c2",%%xmm"#c2";"\ + +#define GEMM_SUM_REORDER_1x4(c1)\ + "vmovss (%3),%%xmm1; vinsertps $16,(%3,%4,1),%%xmm1,%%xmm1; leaq (%3,%4,2),%3;"\ + "vinsertps $32,(%3),%%xmm1,%%xmm1; vinsertps $48,(%3,%4,1),%%xmm1,%%xmm1; leaq (%3,%4,2),%3;"\ + "vaddps %%xmm"#c1",%%xmm1,%%xmm"#c1";" + +#define SOLVE_le_m4n2(b_off,c1,...)\ + "vbroadcastsd "#b_off"("#__VA_ARGS__"),%%ymm0; vblendps $170,%8,%%ymm0,%%ymm2;"\ + "vmulps %%ymm2,%%ymm"#c1",%%ymm"#c1";"\ + "vmovsldup %%ymm"#c1",%%ymm1;" + +#define SOLVE_le_m8n2(b_off,c1,c2,...)\ + "vbroadcastsd "#b_off"("#__VA_ARGS__"),%%ymm0; vblendps $170,%8,%%ymm0,%%ymm2;"\ + "vmulps %%ymm2,%%ymm"#c1",%%ymm"#c1"; vmulps %%ymm2,%%ymm"#c2",%%ymm"#c2";"\ + "vmovsldup %%ymm"#c1",%%ymm1; vmovsldup %%ymm"#c2",%%ymm2;" + +#define SOLVE_leri_m4n2(b_off,c1,...) SOLVE_le_m4n2(b_off,c1,__VA_ARGS__)\ + "vblendps $85,%9,%%ymm0,%%ymm0; vfnmadd231ps %%ymm0,%%ymm1,%%ymm"#c1";" + +#define SOLVE_leri_m8n2(b_off,c1,c2,...) SOLVE_le_m8n2(b_off,c1,c2,__VA_ARGS__)\ + "vblendps $85,%9,%%ymm0,%%ymm0; vfnmadd231ps %%ymm0,%%ymm1,%%ymm"#c1"; vfnmadd231ps %%ymm0,%%ymm2,%%ymm"#c2";" + +#define SOLVE_ri_m4n2(b_off,c1,...)\ + "vbroadcastsd "#b_off"("#__VA_ARGS__"),%%ymm0; vblendps $85,%8,%%ymm0,%%ymm2;"\ + "vmulps %%ymm2,%%ymm"#c1",%%ymm"#c1";"\ + "vmovshdup %%ymm"#c1",%%ymm1;" + +#define SOLVE_ri_m8n2(b_off,c1,c2,...)\ + "vbroadcastsd "#b_off"("#__VA_ARGS__"),%%ymm0; vblendps $85,%8,%%ymm0,%%ymm2;"\ + "vmulps %%ymm2,%%ymm"#c1",%%ymm"#c1"; vmulps %%ymm2,%%ymm"#c2",%%ymm"#c2";"\ + "vmovshdup %%ymm"#c1",%%ymm1; vmovshdup %%ymm"#c2",%%ymm2;" + +#define SOLVE_rile_m4n2(b_off,c1,...) SOLVE_ri_m4n2(b_off,c1,__VA_ARGS__)\ + "vblendps $170,%9,%%ymm0,%%ymm0; vfnmadd231ps %%ymm0,%%ymm1,%%ymm"#c1";" + +#define SOLVE_rile_m8n2(b_off,c1,c2,...) SOLVE_ri_m8n2(b_off,c1,c2,__VA_ARGS__)\ + "vblendps $170,%9,%%ymm0,%%ymm0; vfnmadd231ps %%ymm0,%%ymm1,%%ymm"#c1"; vfnmadd231ps %%ymm0,%%ymm2,%%ymm"#c2";" + +#define SOLVE_col1_rtol_m1n4(b_off,c1,...)\ + "vmovups "#b_off"("#__VA_ARGS__"),%%xmm0; vblendps $14,%8,%%xmm0,%%xmm2;"\ + "vmulps %%xmm2,%%xmm"#c1",%%xmm"#c1";"\ + "vpermilps $0,%%xmm"#c1",%%xmm1;" + +#define SOLVE_col1_rtol_m2n4(b_off,c1,c2,...)\ + "vmovups "#b_off"("#__VA_ARGS__"),%%xmm0; vblendps $14,%8,%%xmm0,%%xmm2;"\ + "vmulps %%xmm2,%%xmm"#c1",%%xmm"#c1"; vmulps %%xmm2,%%xmm"#c2",%%xmm"#c2";"\ + "vpermilps $0,%%xmm"#c1",%%xmm1; vpermilps $0,%%xmm"#c2",%%xmm2;" + +#define SOLVE_col1_ltor_m1n4(b_off,c1,...) SOLVE_col1_rtol_m1n4(b_off,c1,__VA_ARGS__)\ + "vblendps $1,%9,%%xmm0,%%xmm0; vfnmadd231ps %%xmm0,%%xmm1,%%xmm"#c1";" + +#define SOLVE_col1_ltor_m2n4(b_off,c1,c2,...) SOLVE_col1_rtol_m2n4(b_off,c1,c2,__VA_ARGS__)\ + "vblendps $1,%9,%%xmm0,%%xmm0; vfnmadd231ps %%xmm0,%%xmm1,%%xmm"#c1"; vfnmadd231ps %%xmm0,%%xmm2,%%xmm"#c2";" + +#define SOLVE_col2_mul_m1n4(b_off,c1,...)\ + "vmovups "#b_off"("#__VA_ARGS__"),%%xmm0; vblendps $13,%8,%%xmm0,%%xmm2;"\ + "vmulps %%xmm2,%%xmm"#c1",%%xmm"#c1";"\ + "vpermilps $85,%%xmm"#c1",%%xmm1;" + +#define SOLVE_col2_mul_m2n4(b_off,c1,c2,...)\ + "vmovups "#b_off"("#__VA_ARGS__"),%%xmm0; vblendps $13,%8,%%xmm0,%%xmm2;"\ + "vmulps %%xmm2,%%xmm"#c1",%%xmm"#c1"; vmulps %%xmm2,%%xmm"#c2",%%xmm"#c2";"\ + "vpermilps $85,%%xmm"#c1",%%xmm1; vpermilps $85,%%xmm"#c2",%%xmm2;" + +#define SOLVE_col2_rtol_m1n4(b_off,c1,...) SOLVE_col2_mul_m1n4(b_off,c1,__VA_ARGS__)\ + "vblendps $14,%9,%%xmm0,%%xmm0; vfnmadd231ps %%xmm0,%%xmm1,%%xmm"#c1";" + +#define SOLVE_col2_rtol_m2n4(b_off,c1,c2,...) SOLVE_col2_mul_m2n4(b_off,c1,c2,__VA_ARGS__)\ + "vblendps $14,%9,%%xmm0,%%xmm0; vfnmadd231ps %%xmm0,%%xmm1,%%xmm"#c1"; vfnmadd231ps %%xmm0,%%xmm2,%%xmm"#c2";" + +#define SOLVE_col2_ltor_m1n4(b_off,c1,...) SOLVE_col2_mul_m1n4(b_off,c1,__VA_ARGS__)\ + "vblendps $3,%9,%%xmm0,%%xmm0; vfnmadd231ps %%xmm0,%%xmm1,%%xmm"#c1";" + +#define SOLVE_col2_ltor_m2n4(b_off,c1,c2,...) SOLVE_col2_mul_m2n4(b_off,c1,c2,__VA_ARGS__)\ + "vblendps $3,%9,%%xmm0,%%xmm0; vfnmadd231ps %%xmm0,%%xmm1,%%xmm"#c1"; vfnmadd231ps %%xmm0,%%xmm2,%%xmm"#c2";" + +#define SOLVE_col3_mul_m1n4(b_off,c1,...)\ + "vmovups "#b_off"("#__VA_ARGS__"),%%xmm0; vblendps $11,%8,%%xmm0,%%xmm2;"\ + "vmulps %%xmm2,%%xmm"#c1",%%xmm"#c1";"\ + "vpermilps $170,%%xmm"#c1",%%xmm1;" + +#define SOLVE_col3_mul_m2n4(b_off,c1,c2,...)\ + "vmovups "#b_off"("#__VA_ARGS__"),%%xmm0; vblendps $11,%8,%%xmm0,%%xmm2;"\ + "vmulps %%xmm2,%%xmm"#c1",%%xmm"#c1"; vmulps %%xmm2,%%xmm"#c2",%%xmm"#c2";"\ + "vpermilps $170,%%xmm"#c1",%%xmm1; vpermilps $170,%%xmm"#c2",%%xmm2;" + +#define SOLVE_col3_rtol_m1n4(b_off,c1,...) SOLVE_col3_mul_m1n4(b_off,c1,__VA_ARGS__)\ + "vblendps $12,%9,%%xmm0,%%xmm0; vfnmadd231ps %%xmm0,%%xmm1,%%xmm"#c1";" + +#define SOLVE_col3_rtol_m2n4(b_off,c1,c2,...) SOLVE_col3_mul_m2n4(b_off,c1,c2,__VA_ARGS__)\ + "vblendps $12,%9,%%xmm0,%%xmm0; vfnmadd231ps %%xmm0,%%xmm1,%%xmm"#c1"; vfnmadd231ps %%xmm0,%%xmm2,%%xmm"#c2";" + +#define SOLVE_col3_ltor_m1n4(b_off,c1,...) SOLVE_col3_mul_m1n4(b_off,c1,__VA_ARGS__)\ + "vblendps $7,%9,%%xmm0,%%xmm0; vfnmadd231ps %%xmm0,%%xmm1,%%xmm"#c1";" + +#define SOLVE_col3_ltor_m2n4(b_off,c1,c2,...) SOLVE_col3_mul_m2n4(b_off,c1,c2,__VA_ARGS__)\ + "vblendps $7,%9,%%xmm0,%%xmm0; vfnmadd231ps %%xmm0,%%xmm1,%%xmm"#c1"; vfnmadd231ps %%xmm0,%%xmm2,%%xmm"#c2";" + +#define SOLVE_col4_ltor_m1n4(b_off,c1,...)\ + "vmovups "#b_off"("#__VA_ARGS__"),%%xmm0; vblendps $7,%8,%%xmm0,%%xmm2;"\ + "vmulps %%xmm2,%%xmm"#c1",%%xmm"#c1";"\ + "vpermilps $255,%%xmm"#c1",%%xmm1;" + +#define SOLVE_col4_ltor_m2n4(b_off,c1,c2,...)\ + "vmovups "#b_off"("#__VA_ARGS__"),%%xmm0; vblendps $7,%8,%%xmm0,%%xmm2;"\ + "vmulps %%xmm2,%%xmm"#c1",%%xmm"#c1"; vmulps %%xmm2,%%xmm"#c2",%%xmm"#c2";"\ + "vpermilps $255,%%xmm"#c1",%%xmm1; vpermilps $255,%%xmm"#c2",%%xmm2;" + +#define SOLVE_col4_rtol_m1n4(b_off,c1,...) SOLVE_col4_ltor_m1n4(b_off,c1,__VA_ARGS__)\ + "vblendps $8,%9,%%xmm0,%%xmm0; vfnmadd231ps %%xmm0,%%xmm1,%%xmm"#c1";" + +#define SOLVE_col4_rtol_m2n4(b_off,c1,c2,...) SOLVE_col4_ltor_m2n4(b_off,c1,c2,__VA_ARGS__)\ + "vblendps $8,%9,%%xmm0,%%xmm0; vfnmadd231ps %%xmm0,%%xmm1,%%xmm"#c1"; vfnmadd231ps %%xmm0,%%xmm2,%%xmm"#c2";" + +#define SUBTRACT_m4n2(b_off,c1,...) "vbroadcastsd "#b_off"("#__VA_ARGS__"),%%ymm0; vfnmadd231ps %%ymm0,%%ymm1,%%ymm"#c1";" + +#define SUBTRACT_m8n2(b_off,c1,c2,...) SUBTRACT_m4n2(b_off,c1,__VA_ARGS__) "vfnmadd231ps %%ymm0,%%ymm2,%%ymm"#c2";" + +#define SUBTRACT_m1n4(b_off,c1,...) "vmovups "#b_off"("#__VA_ARGS__"),%%xmm0; vfnmadd231ps %%xmm0,%%xmm1,%%xmm"#c1";" + +#define SUBTRACT_m2n4(b_off,c1,c2,...) SUBTRACT_m1n4(b_off,c1,__VA_ARGS__) "vfnmadd231ps %%xmm0,%%xmm2,%%xmm"#c2";" + +#define SAVE_SOLUTION_m8n2(c1,c2,a_off)\ + "vunpcklps %%ymm"#c2",%%ymm"#c1",%%ymm0; vunpckhps %%ymm"#c2",%%ymm"#c1",%%ymm1;"\ + "vunpcklpd %%ymm1,%%ymm0,%%ymm"#c1"; vunpckhpd %%ymm1,%%ymm0,%%ymm"#c2";"\ + "vmovups %%ymm"#c1","#a_off"(%0); vmovups %%ymm"#c2","#a_off"+32(%0);"\ + "vmovups %%ymm"#c1",(%3); vmovups %%ymm"#c2",(%3,%4,1); leaq (%3,%4,2),%3;" + +#define SAVE_SOLUTION_m4n2(c1,a_off)\ + "vpermilps $216,%%ymm"#c1",%%ymm"#c1"; vpermpd $216,%%ymm"#c1",%%ymm"#c1";"\ + "vmovups %%ymm"#c1","#a_off"(%0); vmovups %%xmm"#c1",(%3); vextractf128 $1,%%ymm"#c1",(%3,%4,1); leaq (%3,%4,2),%3;" + +#define SAVE_SOLUTION_m2n4(c1,c2,a_off)\ + "vunpcklps %%xmm"#c2",%%xmm"#c1",%%xmm0; vmovups %%xmm0,"#a_off"(%0); vmovsd %%xmm0,(%3); vmovhpd %%xmm0,(%3,%4,1); leaq (%3,%4,2),%3;"\ + "vunpckhps %%xmm"#c2",%%xmm"#c1",%%xmm0; vmovups %%xmm0,"#a_off"+16(%0); vmovsd %%xmm0,(%3); vmovhpd %%xmm0,(%3,%4,1); leaq (%3,%4,2),%3;" + +#define SAVE_SOLUTION_m1n4(c1,a_off)\ + "vmovups %%xmm"#c1","#a_off"(%0); vmovss %%xmm"#c1",(%3); vextractps $1,%%xmm"#c1",(%3,%4,1); leaq (%3,%4,2),%3;"\ + "vextractps $2,%%xmm"#c1",(%3); vextractps $3,%%xmm"#c1",(%3,%4,1); leaq (%3,%4,2),%3;" diff --git a/kernel/x86_64/sum.S b/kernel/x86_64/sum.S index d075eaa04..9f2cdc1ec 100644 --- a/kernel/x86_64/sum.S +++ b/kernel/x86_64/sum.S @@ -50,6 +50,10 @@ PROLOGUE PROFCODE +#ifdef WINDOWS_ABI + emms +#endif + fldz testq M, M jle .L999 diff --git a/kernel/x86_64/symv_L_sse.S b/kernel/x86_64/symv_L_sse.S index 8a5c44c9b..fea4fc746 100644 --- a/kernel/x86_64/symv_L_sse.S +++ b/kernel/x86_64/symv_L_sse.S @@ -57,7 +57,7 @@ #define PREFETCHSIZE (16 * 12) #endif -#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) +#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) || defined (COOPERLAKE) #define PREFETCH prefetcht0 #define PREFETCHW prefetcht0 #define PREFETCHSIZE (16 * 12) diff --git a/kernel/x86_64/symv_L_sse2.S b/kernel/x86_64/symv_L_sse2.S index 0c40a3435..b853ef365 100644 --- a/kernel/x86_64/symv_L_sse2.S +++ b/kernel/x86_64/symv_L_sse2.S @@ -57,7 +57,7 @@ #define PREFETCHSIZE (16 * 12) #endif -#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) +#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) || defined (COOPERLAKE) #define PREFETCH prefetcht0 #define PREFETCHW prefetcht0 #define PREFETCHSIZE (16 * 12) diff --git a/kernel/x86_64/symv_U_sse.S b/kernel/x86_64/symv_U_sse.S index 7a2eeace5..bad367e91 100644 --- a/kernel/x86_64/symv_U_sse.S +++ b/kernel/x86_64/symv_U_sse.S @@ -57,7 +57,7 @@ #define PREFETCHSIZE (16 * 12) #endif -#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) +#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) || defined (COOPERLAKE) #define PREFETCH prefetcht0 #define PREFETCHW prefetcht0 #define PREFETCHSIZE (16 * 12) diff --git a/kernel/x86_64/symv_U_sse2.S b/kernel/x86_64/symv_U_sse2.S index 0408b577c..147201751 100644 --- a/kernel/x86_64/symv_U_sse2.S +++ b/kernel/x86_64/symv_U_sse2.S @@ -57,7 +57,7 @@ #define PREFETCHSIZE (16 * 12) #endif -#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) +#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) || defined (COOPERLAKE) #define PREFETCH prefetcht0 #define PREFETCHW prefetcht0 #define PREFETCHSIZE (16 * 24) diff --git a/kernel/x86_64/tobf16.c b/kernel/x86_64/tobf16.c new file mode 100644 index 000000000..3d1796621 --- /dev/null +++ b/kernel/x86_64/tobf16.c @@ -0,0 +1,170 @@ +/*************************************************************************** +Copyright (c) 2014, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include +#include "common.h" + +#if defined(DOUBLE) +#define FLOAT_TYPE double +#elif defined(SINGLE) +#define FLOAT_TYPE float +#else +#endif + +#if defined(COOPERLAKE) +#if defined(DOUBLE) +#include "dtobf16_microk_cooperlake.c" +#elif defined(SINGLE) +#include "stobf16_microk_cooperlake.c" +#endif +#endif + +/* Notes for algorithm: + * - Round to Nearest Even used generally + * - QNAN for NAN case + * - Input denormals are treated as zero + */ +static void tobf16_generic_kernel(BLASLONG n, const FLOAT_TYPE * in, BLASLONG inc_in, bfloat16 * out, BLASLONG inc_out) +{ + BLASLONG register index_in = 0; + BLASLONG register index_out = 0; + BLASLONG register index = 0; + float float_in = 0.0; + uint32_t * uint32_in = (uint32_t *)(&float_in); + uint16_t * uint16_in = (uint16_t *)(&float_in); + + while(index> 16) & 0x1u) + 0x7fffu); +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + *(out+index_out) = uint16_in[1]; +#else + *(out+index_out) = uint16_in[0]; +#endif + break; + } + + index_in += inc_in; + index_out += inc_out; + index++; + } +} + +#ifndef HAVE_TOBF16_ACCL_KERNEL +static void tobf16_accl_kernel(BLASLONG n, const FLOAT_TYPE * in, bfloat16 * out) +{ + tobf16_generic_kernel(n, in, 1, out, 1); +} +#endif + +static void tobf16_compute(BLASLONG n, FLOAT_TYPE * in, BLASLONG inc_in, bfloat16 * out, BLASLONG inc_out) +{ + if ((inc_in == 1) && (inc_out == 1)) { + tobf16_accl_kernel(n, in, out); + } else { + tobf16_generic_kernel(n, in, inc_in, out, inc_out); + } +} + +#if defined(SMP) +static int tobf16_thread_func(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT_TYPE dummy2, + FLOAT_TYPE *x, BLASLONG inc_x, bfloat16 *y, BLASLONG inc_y, + FLOAT_TYPE *dummy3, BLASLONG dummy4) +{ + tobf16_compute(n, x, inc_x, y, inc_y); + return 0; +} + +extern int blas_level1_thread(int mode, BLASLONG m, BLASLONG n, BLASLONG k, void *alpha, + void *a, BLASLONG lda, void *b, BLASLONG ldb, void *c, BLASLONG ldc, + int (*function)(), int nthreads); +#endif + +void CNAME(BLASLONG n, FLOAT_TYPE * in, BLASLONG inc_in, bfloat16 * out, BLASLONG inc_out) +{ + if (n <= 0) return; + +#if defined(SMP) + int nthreads; + FLOAT_TYPE dummy_alpha; + FLOAT_TYPE dummy_c; +#endif + +#if defined(SMP) + if (inc_in == 0 || inc_out == 0 || n <= 100000) { + nthreads = 1; + } else { + if (n/100000 < 100) { + nthreads = 4; + } else { + nthreads = 16; + } + } + + if (nthreads == 1) { + tobf16_compute(n, in, inc_in, out, inc_out); + } else { +#if defined(DOUBLE) + int mode = BLAS_REAL | BLAS_DTOBF16; +#elif defined(SINGLE) + int mode = BLAS_REAL | BLAS_STOBF16; +#endif + blas_level1_thread(mode, n, 0, 0, &dummy_alpha, + in, inc_in, out, inc_out, &dummy_c, 0, + (void *)tobf16_thread_func, nthreads); + } +#else + tobf16_compute(n, in, inc_in, out, inc_out); +#endif + +} diff --git a/kernel/x86_64/xdot.S b/kernel/x86_64/xdot.S index ea97164b2..c4b473494 100644 --- a/kernel/x86_64/xdot.S +++ b/kernel/x86_64/xdot.S @@ -59,6 +59,11 @@ PROFCODE +#ifdef WINDOWS_ABI + emms +#endif + + #define N %ebx #define X %esi #define INCX %ecx diff --git a/kernel/x86_64/xgemm3m_kernel_2x2.S b/kernel/x86_64/xgemm3m_kernel_2x2.S index 843fc243a..1d0b23c40 100644 --- a/kernel/x86_64/xgemm3m_kernel_2x2.S +++ b/kernel/x86_64/xgemm3m_kernel_2x2.S @@ -78,6 +78,10 @@ PROLOGUE PROFCODE +#ifdef WINDOWS_ABI + emms +#endif + subq $STACKSIZE, %rsp movq %rbx, 0(%rsp) movq %rbp, 8(%rsp) diff --git a/kernel/x86_64/xgemm_kernel_1x1.S b/kernel/x86_64/xgemm_kernel_1x1.S index e0cd1f1df..ee67d8d43 100644 --- a/kernel/x86_64/xgemm_kernel_1x1.S +++ b/kernel/x86_64/xgemm_kernel_1x1.S @@ -97,6 +97,10 @@ PROLOGUE PROFCODE +#ifdef WINDOWS_ABI + emms +#endif + subq $STACKSIZE, %rsp movq %rbx, 0(%rsp) movq %rbp, 8(%rsp) diff --git a/kernel/x86_64/xgemv_n.S b/kernel/x86_64/xgemv_n.S index cbde6402d..b66f28d58 100644 --- a/kernel/x86_64/xgemv_n.S +++ b/kernel/x86_64/xgemv_n.S @@ -76,6 +76,10 @@ PROLOGUE PROFCODE +#ifdef WINDOWS_ABI + emms +#endif + subq $STACKSIZE, %rsp movq %rbx, 0(%rsp) movq %rbp, 8(%rsp) diff --git a/kernel/x86_64/xgemv_t.S b/kernel/x86_64/xgemv_t.S index 31320f651..d6d37010d 100644 --- a/kernel/x86_64/xgemv_t.S +++ b/kernel/x86_64/xgemv_t.S @@ -75,6 +75,10 @@ PROLOGUE PROFCODE +#ifdef WINDOWS_ABI + emms +#endif + subq $STACKSIZE, %rsp movq %rbx, 0(%rsp) movq %rbp, 8(%rsp) diff --git a/kernel/x86_64/xtrsm_kernel_LT_1x1.S b/kernel/x86_64/xtrsm_kernel_LT_1x1.S index a61a240fd..875206363 100644 --- a/kernel/x86_64/xtrsm_kernel_LT_1x1.S +++ b/kernel/x86_64/xtrsm_kernel_LT_1x1.S @@ -90,6 +90,10 @@ PROLOGUE PROFCODE +#ifdef WINDOWS_ABI + emms +#endif + subq $STACKSIZE, %rsp movq %rbx, 0(%rsp) movq %rbp, 8(%rsp) diff --git a/kernel/x86_64/zamax.S b/kernel/x86_64/zamax.S index 74e127e6c..5cb2f6019 100644 --- a/kernel/x86_64/zamax.S +++ b/kernel/x86_64/zamax.S @@ -55,6 +55,10 @@ PROLOGUE PROFCODE +#ifdef WINDOWS_ABI + emms +#endif + salq $ZBASE_SHIFT, INCX fldz diff --git a/kernel/x86_64/zasum.S b/kernel/x86_64/zasum.S index c372fc5dd..3460fcea3 100644 --- a/kernel/x86_64/zasum.S +++ b/kernel/x86_64/zasum.S @@ -50,6 +50,10 @@ PROLOGUE PROFCODE +#ifdef WINDOWS_ABI + emms +#endif + fldz testq M, M jle .L999 diff --git a/kernel/x86_64/zasum.c b/kernel/x86_64/zasum.c new file mode 100644 index 000000000..6e758e2e3 --- /dev/null +++ b/kernel/x86_64/zasum.c @@ -0,0 +1,144 @@ +#include "common.h" + +#ifndef ABS_K +#define ABS_K(a) ((a) > 0 ? (a) : (-(a))) +#endif + +#if defined(SKYLAKEX) +#include "zasum_microk_skylakex-2.c" +#endif + +#ifndef HAVE_ZASUM_KERNEL +static FLOAT zasum_kernel(BLASLONG n, FLOAT *x) +{ + + BLASLONG i=0; + BLASLONG n_8 = n & -8; + FLOAT *x1 = x; + FLOAT temp0, temp1, temp2, temp3; + FLOAT temp4, temp5, temp6, temp7; + FLOAT sum0 = 0.0; + FLOAT sum1 = 0.0; + FLOAT sum2 = 0.0; + FLOAT sum3 = 0.0; + FLOAT sum4 = 0.0; + + while (i < n_8) { + temp0 = ABS_K(x1[0]); + temp1 = ABS_K(x1[1]); + temp2 = ABS_K(x1[2]); + temp3 = ABS_K(x1[3]); + temp4 = ABS_K(x1[4]); + temp5 = ABS_K(x1[5]); + temp6 = ABS_K(x1[6]); + temp7 = ABS_K(x1[7]); + + sum0 += temp0; + sum1 += temp1; + sum2 += temp2; + sum3 += temp3; + + sum0 += temp4; + sum1 += temp5; + sum2 += temp6; + sum3 += temp7; + + x1+=8; + i+=4; + } + + while (i < n) { + sum4 += ABS_K(x1[0]) + ABS_K(x1[1]); + x1 += 2; + i++; + } + + return sum0+sum1+sum2+sum3+sum4; +} + +#endif + +static FLOAT asum_compute(BLASLONG n, FLOAT *x, BLASLONG inc_x) +{ + BLASLONG i = 0; + BLASLONG ip = 0; + BLASLONG inc_x2; + FLOAT sumf = 0.0; + + if (n <= 0 || inc_x <= 0) return(sumf); + if (inc_x == 1) { + sumf = zasum_kernel(n, x); + } + else { + inc_x2 = 2 * inc_x; + + while (i < n) { + sumf += ABS_K(x[ip]) + ABS_K(x[ip + 1]); + ip += inc_x2; + i++; + } + } + + return(sumf); +} + +#if defined(SMP) +static int asum_thread_function(BLASLONG n, + BLASLONG dummy0, BLASLONG dummy1, FLOAT dummy2, + FLOAT *x, BLASLONG inc_x, + FLOAT * dummy3, BLASLONG dummy4, + FLOAT * result, BLASLONG dummy5) +{ + *(FLOAT *) result = asum_compute(n, x, inc_x); + return 0; +} + +extern int blas_level1_thread_with_return_value(int mode, + BLASLONG m, BLASLONG n, BLASLONG k, void * alpha, + void *a, BLASLONG lda, + void *b, BLASLONG ldb, + void *c, BLASLONG ldc, + int (*function)(), + int nthread); +#endif + +FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) +{ +#if defined(SMP) + int nthreads; + FLOAT dummy_alpha[2]; +#endif + FLOAT sumf = 0.0; + +#if defined(SMP) + int num_cpu = num_cpu_avail(1); + if (n <= 10000 || inc_x <= 0) + nthreads = 1; + else + nthreads = num_cpu < n/10000 ? num_cpu : n/10000; + + if (nthreads == 1) { + sumf = asum_compute(n, x, inc_x); + } + else { + int mode, i; + char result[MAX_CPU_NUMBER * sizeof(double) *2]; + FLOAT *ptr; +#if !defined(DOUBLE) + mode = BLAS_SINGLE | BLAS_COMPLEX; +#else + mode = BLAS_DOUBLE | BLAS_COMPLEX; +#endif + blas_level1_thread_with_return_value(mode, n, 0, 0, dummy_alpha, x, inc_x, + NULL, 0, result, 0, (void *)asum_thread_function, nthreads); + ptr = (FLOAT *)result; + for (i = 0; i < nthreads; i++) { + sumf += (*ptr); + ptr = (FLOAT *)(((char *)ptr) + sizeof(double) *2); + } + } +#else + sumf = asum_compute(n, x, inc_x); +#endif + return(sumf); +} diff --git a/kernel/x86_64/zasum_microk_skylakex-2.c b/kernel/x86_64/zasum_microk_skylakex-2.c new file mode 100644 index 000000000..b44c53801 --- /dev/null +++ b/kernel/x86_64/zasum_microk_skylakex-2.c @@ -0,0 +1,340 @@ +/* need a new enough GCC for avx512 support */ +#if (( defined(__GNUC__) && __GNUC__ > 6 && defined(__AVX512CD__)) || (defined(__clang__) && __clang_major__ >= 9)) + +#define HAVE_ZASUM_KERNEL 1 + +#include + +#include + +static FLOAT zasum_kernel(BLASLONG n, FLOAT *x) +{ + FLOAT *x1 = x; + FLOAT sumf=0.0; + BLASLONG n2 = n + n; + + + if (n2 < 32) { + __m128d accum_10, accum_11, accum_12, accum_13; + __m128d abs_mask1; + + accum_10 = _mm_setzero_pd(); + accum_11 = _mm_setzero_pd(); + accum_12 = _mm_setzero_pd(); + accum_13 = _mm_setzero_pd(); + + // abs_mask1 = (__m128d)_mm_set1_epi64x(0x7fffffffffffffff); + abs_mask1 = (__m128d)_mm_cmpeq_epi8((__m128i) abs_mask1, (__m128i) abs_mask1); + abs_mask1 = (__m128d)_mm_srli_epi64((__m128i) abs_mask1, 1); + + _mm_prefetch(&x1[0], _MM_HINT_T0); + if (n2 >= 16){ + __m128d x00 = _mm_loadu_pd(&x1[ 0]); + __m128d x01 = _mm_loadu_pd(&x1[ 2]); + __m128d x02 = _mm_loadu_pd(&x1[ 4]); + __m128d x03 = _mm_loadu_pd(&x1[ 6]); + + _mm_prefetch(&x1[8], _MM_HINT_T0); + __m128d x04 = _mm_loadu_pd(&x1[ 8]); + __m128d x05 = _mm_loadu_pd(&x1[10]); + __m128d x06 = _mm_loadu_pd(&x1[12]); + __m128d x07 = _mm_loadu_pd(&x1[14]); + + x00 = _mm_and_pd(x00, abs_mask1); + x01 = _mm_and_pd(x01, abs_mask1); + x02 = _mm_and_pd(x02, abs_mask1); + x03 = _mm_and_pd(x03, abs_mask1); + + accum_10 = _mm_add_pd(accum_10, x00); + accum_11 = _mm_add_pd(accum_11, x01); + accum_12 = _mm_add_pd(accum_12, x02); + accum_13 = _mm_add_pd(accum_13, x03); + + x04 = _mm_and_pd(x04, abs_mask1); + x05 = _mm_and_pd(x05, abs_mask1); + x06 = _mm_and_pd(x06, abs_mask1); + x07 = _mm_and_pd(x07, abs_mask1); + + accum_10 = _mm_add_pd(accum_10, x04); + accum_11 = _mm_add_pd(accum_11, x05); + accum_12 = _mm_add_pd(accum_12, x06); + accum_13 = _mm_add_pd(accum_13, x07); + + x1 += 16; + n2 -= 16; + } + + if (n2 >= 8) { + __m128d x00 = _mm_loadu_pd(&x1[ 0]); + __m128d x01 = _mm_loadu_pd(&x1[ 2]); + __m128d x02 = _mm_loadu_pd(&x1[ 4]); + __m128d x03 = _mm_loadu_pd(&x1[ 6]); + + x00 = _mm_and_pd(x00, abs_mask1); + x01 = _mm_and_pd(x01, abs_mask1); + x02 = _mm_and_pd(x02, abs_mask1); + x03 = _mm_and_pd(x03, abs_mask1); + accum_10 = _mm_add_pd(accum_10, x00); + accum_11 = _mm_add_pd(accum_11, x01); + accum_12 = _mm_add_pd(accum_12, x02); + accum_13 = _mm_add_pd(accum_13, x03); + + n2 -= 8; + x1 += 8; + } + + if (n2 >= 4) { + __m128d x00 = _mm_loadu_pd(&x1[ 0]); + __m128d x01 = _mm_loadu_pd(&x1[ 2]); + x00 = _mm_and_pd(x00, abs_mask1); + x01 = _mm_and_pd(x01, abs_mask1); + accum_10 = _mm_add_pd(accum_10, x00); + accum_11 = _mm_add_pd(accum_11, x01); + + n2 -= 4; + x1 += 4; + } + + if (n2) { + __m128d x00 = _mm_loadu_pd(&x1[ 0]); + x00 = _mm_and_pd(x00, abs_mask1); + accum_10 = _mm_add_pd(accum_10, x00); + } + + accum_10 = _mm_add_pd(accum_10, accum_11); + accum_12 = _mm_add_pd(accum_12, accum_13); + accum_10 = _mm_add_pd(accum_10, accum_12); + + accum_10 = _mm_hadd_pd(accum_10, accum_10); + + sumf = accum_10[0]; + } + else { + __m512d accum_0, accum_1, accum_2, accum_3; + __m512d x00, x01, x02, x03, x04, x05, x06, x07; + __m512d abs_mask = (__m512d)_mm512_set1_epi64(0x7fffffffffffffff); + + accum_0 = _mm512_setzero_pd(); + accum_1 = _mm512_setzero_pd(); + accum_2 = _mm512_setzero_pd(); + accum_3 = _mm512_setzero_pd(); + + // alignment has side-effect when the size of input array is not large enough + if (n2 < 128) { + if (n2 >= 64) { + x00 = _mm512_loadu_pd(&x1[ 0]); + x01 = _mm512_loadu_pd(&x1[ 8]); + x02 = _mm512_loadu_pd(&x1[16]); + x03 = _mm512_loadu_pd(&x1[24]); + x04 = _mm512_loadu_pd(&x1[32]); + x05 = _mm512_loadu_pd(&x1[40]); + x06 = _mm512_loadu_pd(&x1[48]); + x07 = _mm512_loadu_pd(&x1[56]); + + x00 = _mm512_and_pd(x00, abs_mask); + x01 = _mm512_and_pd(x01, abs_mask); + x02 = _mm512_and_pd(x02, abs_mask); + x03 = _mm512_and_pd(x03, abs_mask); + + accum_0 = _mm512_add_pd(accum_0, x00); + accum_1 = _mm512_add_pd(accum_1, x01); + accum_2 = _mm512_add_pd(accum_2, x02); + accum_3 = _mm512_add_pd(accum_3, x03); + + x04 = _mm512_and_pd(x04, abs_mask); + x05 = _mm512_and_pd(x05, abs_mask); + x06 = _mm512_and_pd(x06, abs_mask); + x07 = _mm512_and_pd(x07, abs_mask); + + accum_0 = _mm512_add_pd(accum_0, x04); + accum_1 = _mm512_add_pd(accum_1, x05); + accum_2 = _mm512_add_pd(accum_2, x06); + accum_3 = _mm512_add_pd(accum_3, x07); + + n2 -= 64; + x1 += 64; + } + + if (n2 >= 32) { + x00 = _mm512_loadu_pd(&x1[ 0]); + x01 = _mm512_loadu_pd(&x1[ 8]); + x02 = _mm512_loadu_pd(&x1[16]); + x03 = _mm512_loadu_pd(&x1[24]); + x00 = _mm512_and_pd(x00, abs_mask); + x01 = _mm512_and_pd(x01, abs_mask); + x02 = _mm512_and_pd(x02, abs_mask); + x03 = _mm512_and_pd(x03, abs_mask); + accum_0 = _mm512_add_pd(accum_0, x00); + accum_1 = _mm512_add_pd(accum_1, x01); + accum_2 = _mm512_add_pd(accum_2, x02); + accum_3 = _mm512_add_pd(accum_3, x03); + + n2 -= 32; + x1 += 32; + } + + if (n2 >= 16) { + x00 = _mm512_loadu_pd(&x1[ 0]); + x01 = _mm512_loadu_pd(&x1[ 8]); + x00 = _mm512_and_pd(x00, abs_mask); + x01 = _mm512_and_pd(x01, abs_mask); + accum_0 = _mm512_add_pd(accum_0, x00); + accum_1 = _mm512_add_pd(accum_1, x01); + + n2 -= 16; + x1 += 16; + } + + if (n2 >= 8) { + x00 = _mm512_loadu_pd(&x1[ 0]); + x00 = _mm512_and_pd(x00, abs_mask); + accum_0 = _mm512_add_pd(accum_0, x00); + + n2 -= 8; + x1 += 8; + } + + if (n2) { + unsigned char tail_mask8 = (((unsigned char) 0xff) >> (8 - n2)); + x00 = _mm512_maskz_loadu_pd(*((__mmask8*) &tail_mask8), &x1[ 0]); + x00 = _mm512_and_pd(x00, abs_mask); + accum_0 = _mm512_add_pd(accum_0, x00); + } + accum_0 = _mm512_add_pd(accum_0, accum_1); + accum_2 = _mm512_add_pd(accum_2, accum_3); + accum_0 = _mm512_add_pd(accum_0, accum_2); + sumf = _mm512_reduce_add_pd(accum_0); + } + // n2 >= 128, doing alignment + else { + + int align_header = ((64 - ((uintptr_t)x1 & (uintptr_t)0x3f)) >> 3) & 0x7; + + if (0 != align_header) { + unsigned char align_mask8 = (((unsigned char)0xff) >> (8 - align_header)); + x00 = _mm512_maskz_loadu_pd(*((__mmask8*) &align_mask8), &x1[0]); + x00 = _mm512_and_pd(x00, abs_mask); + accum_0 = _mm512_add_pd(accum_0, x00); + + n2 -= align_header; + x1 += align_header; + } + + x00 = _mm512_load_pd(&x1[ 0]); + x01 = _mm512_load_pd(&x1[ 8]); + x02 = _mm512_load_pd(&x1[16]); + x03 = _mm512_load_pd(&x1[24]); + x04 = _mm512_load_pd(&x1[32]); + x05 = _mm512_load_pd(&x1[40]); + x06 = _mm512_load_pd(&x1[48]); + x07 = _mm512_load_pd(&x1[56]); + + n2 -= 64; + x1 += 64; + + while (n2 >= 64) { + x00 = _mm512_and_pd(x00, abs_mask); + x01 = _mm512_and_pd(x01, abs_mask); + x02 = _mm512_and_pd(x02, abs_mask); + x03 = _mm512_and_pd(x03, abs_mask); + accum_0 = _mm512_add_pd(accum_0, x00); + x00 = _mm512_load_pd(&x1[ 0]); + accum_1 = _mm512_add_pd(accum_1, x01); + x01 = _mm512_load_pd(&x1[ 8]); + accum_2 = _mm512_add_pd(accum_2, x02); + x02 = _mm512_load_pd(&x1[16]); + accum_3 = _mm512_add_pd(accum_3, x03); + x03 = _mm512_load_pd(&x1[24]); + + x04 = _mm512_and_pd(x04, abs_mask); + x05 = _mm512_and_pd(x05, abs_mask); + x06 = _mm512_and_pd(x06, abs_mask); + x07 = _mm512_and_pd(x07, abs_mask); + accum_0 = _mm512_add_pd(accum_0, x04); + x04 = _mm512_load_pd(&x1[32]); + accum_1 = _mm512_add_pd(accum_1, x05); + x05 = _mm512_load_pd(&x1[40]); + accum_2 = _mm512_add_pd(accum_2, x06); + x06 = _mm512_load_pd(&x1[48]); + accum_3 = _mm512_add_pd(accum_3, x07); + x07 = _mm512_load_pd(&x1[56]); + + n2 -= 64; + x1 += 64; + } + x00 = _mm512_and_pd(x00, abs_mask); + x01 = _mm512_and_pd(x01, abs_mask); + x02 = _mm512_and_pd(x02, abs_mask); + x03 = _mm512_and_pd(x03, abs_mask); + + accum_0 = _mm512_add_pd(accum_0, x00); + accum_1 = _mm512_add_pd(accum_1, x01); + accum_2 = _mm512_add_pd(accum_2, x02); + accum_3 = _mm512_add_pd(accum_3, x03); + + x04 = _mm512_and_pd(x04, abs_mask); + x05 = _mm512_and_pd(x05, abs_mask); + x06 = _mm512_and_pd(x06, abs_mask); + x07 = _mm512_and_pd(x07, abs_mask); + + accum_0 = _mm512_add_pd(accum_0, x04); + accum_1 = _mm512_add_pd(accum_1, x05); + accum_2 = _mm512_add_pd(accum_2, x06); + accum_3 = _mm512_add_pd(accum_3, x07); + + if (n2 >= 32) { + x00 = _mm512_load_pd(&x1[ 0]); + x01 = _mm512_load_pd(&x1[ 8]); + x02 = _mm512_load_pd(&x1[16]); + x03 = _mm512_load_pd(&x1[24]); + x00 = _mm512_and_pd(x00, abs_mask); + x01 = _mm512_and_pd(x01, abs_mask); + x02 = _mm512_and_pd(x02, abs_mask); + x03 = _mm512_and_pd(x03, abs_mask); + accum_0 = _mm512_add_pd(accum_0, x00); + accum_1 = _mm512_add_pd(accum_1, x01); + accum_2 = _mm512_add_pd(accum_2, x02); + accum_3 = _mm512_add_pd(accum_3, x03); + + n2 -= 32; + x1 += 32; + } + + if (n2 >= 16) { + x00 = _mm512_load_pd(&x1[ 0]); + x01 = _mm512_load_pd(&x1[ 8]); + x00 = _mm512_and_pd(x00, abs_mask); + x01 = _mm512_and_pd(x01, abs_mask); + accum_0 = _mm512_add_pd(accum_0, x00); + accum_1 = _mm512_add_pd(accum_1, x01); + + n2 -= 16; + x1 += 16; + } + + if (n2 >= 8) { + x00 = _mm512_load_pd(&x1[ 0]); + x00 = _mm512_and_pd(x00, abs_mask); + accum_0 = _mm512_add_pd(accum_0, x00); + + n2 -= 8; + x1 += 8; + } + + if (n2) { + unsigned char tail_mask8 = (((unsigned char) 0xff) >> (8 - n2)); + x00 = _mm512_maskz_load_pd(*((__mmask8*) &tail_mask8), &x1[ 0]); + x00 = _mm512_and_pd(x00, abs_mask); + accum_0 = _mm512_add_pd(accum_0, x00); + } + + accum_0 = _mm512_add_pd(accum_0, accum_1); + accum_2 = _mm512_add_pd(accum_2, accum_3); + accum_0 = _mm512_add_pd(accum_0, accum_2); + sumf = _mm512_reduce_add_pd(accum_0); + } + } + + return sumf; +} +#endif diff --git a/kernel/x86_64/zaxpy.c b/kernel/x86_64/zaxpy.c index 53866cf95..25e9f6d42 100644 --- a/kernel/x86_64/zaxpy.c +++ b/kernel/x86_64/zaxpy.c @@ -33,7 +33,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "zaxpy_microk_bulldozer-2.c" #elif defined(PILEDRIVER) || defined(STEAMROLLER) || defined(EXCAVATOR) #include "zaxpy_microk_steamroller-2.c" -#elif defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) +#elif defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) || defined (COOPERLAKE) #include "zaxpy_microk_haswell-2.c" #elif defined(SANDYBRIDGE) #include "zaxpy_microk_sandy-2.c" diff --git a/kernel/x86_64/zaxpy_microk_bulldozer-2.c b/kernel/x86_64/zaxpy_microk_bulldozer-2.c index 15d367971..ccb26134f 100644 --- a/kernel/x86_64/zaxpy_microk_bulldozer-2.c +++ b/kernel/x86_64/zaxpy_microk_bulldozer-2.c @@ -122,7 +122,7 @@ static void zaxpy_kernel_4( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) "r" (alpha), // 4 "r" (mvec) // 5 : "cc", - "%xmm0", "%xmm1", + "%xmm0", "%xmm1", "%xmm2", "%xmm3", "%xmm4", "%xmm5", "%xmm6", "%xmm7", "%xmm8", "%xmm9", "%xmm10", "%xmm11", "%xmm12", "%xmm13", "%xmm14", "%xmm15", @@ -189,9 +189,10 @@ static void zaxpy_kernel_4( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) "r" (alpha), // 4 "r" (mvec) // 5 : "cc", - "%xmm0", "%xmm1", + "%xmm0", "%xmm1", "%xmm2", "%xmm3", "%xmm4", "%xmm5", "%xmm6", "%xmm7", "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", "memory" ); diff --git a/kernel/x86_64/zaxpy_microk_haswell-2.c b/kernel/x86_64/zaxpy_microk_haswell-2.c index 89d23daf3..8f299ea2d 100644 --- a/kernel/x86_64/zaxpy_microk_haswell-2.c +++ b/kernel/x86_64/zaxpy_microk_haswell-2.c @@ -120,7 +120,7 @@ static void zaxpy_kernel_4( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) "r" (alpha), // 4 "r" (mvec) // 5 : "cc", - "%xmm0", "%xmm1", + "%xmm0", "%xmm1", "%xmm2", "%xmm3", "%xmm4", "%xmm5", "%xmm6", "%xmm7", "%xmm8", "%xmm9", "%xmm10", "%xmm11", "%xmm12", "%xmm13", "%xmm14", "%xmm15", diff --git a/kernel/x86_64/zaxpy_microk_sandy-2.c b/kernel/x86_64/zaxpy_microk_sandy-2.c index 17b8b24f7..5246c72e8 100644 --- a/kernel/x86_64/zaxpy_microk_sandy-2.c +++ b/kernel/x86_64/zaxpy_microk_sandy-2.c @@ -108,9 +108,10 @@ static void zaxpy_kernel_4( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) "r" (alpha), // 4 "r" (mvec) // 5 : "cc", - "%xmm0", "%xmm1", + "%xmm0", "%xmm1", "%xmm2", "%xmm3", "%xmm4", "%xmm5", "%xmm6", "%xmm7", "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", "memory" ); return; @@ -185,9 +186,10 @@ static void zaxpy_kernel_4( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) "r" (alpha), // 4 "r" (mvec) // 5 : "cc", - "%xmm0", "%xmm1", + "%xmm0", "%xmm1", "%xmm2", "%xmm3", "%xmm4", "%xmm5", "%xmm6", "%xmm7", "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", "memory" ); diff --git a/kernel/x86_64/zaxpy_microk_steamroller-2.c b/kernel/x86_64/zaxpy_microk_steamroller-2.c index 907b1ae00..88e3a680b 100644 --- a/kernel/x86_64/zaxpy_microk_steamroller-2.c +++ b/kernel/x86_64/zaxpy_microk_steamroller-2.c @@ -122,7 +122,7 @@ static void zaxpy_kernel_4( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) "r" (alpha), // 4 "r" (mvec) // 5 : "cc", - "%xmm0", "%xmm1", + "%xmm0", "%xmm1", "%xmm2", "%xmm3", "%xmm4", "%xmm5", "%xmm6", "%xmm7", "%xmm8", "%xmm9", "%xmm10", "%xmm11", "%xmm12", "%xmm13", "%xmm14", "%xmm15", @@ -189,9 +189,10 @@ static void zaxpy_kernel_4( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) "r" (alpha), // 4 "r" (mvec) // 5 : "cc", - "%xmm0", "%xmm1", + "%xmm0", "%xmm1", "%xmm2", "%xmm3", "%xmm4", "%xmm5", "%xmm6", "%xmm7", "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", "memory" ); diff --git a/kernel/x86_64/zdot.S b/kernel/x86_64/zdot.S index 94d1008ff..87c08d7c8 100644 --- a/kernel/x86_64/zdot.S +++ b/kernel/x86_64/zdot.S @@ -55,6 +55,8 @@ PROFCODE #ifdef WINDOWS_ABI + emms + movq 40(%rsp), INCY #endif diff --git a/kernel/x86_64/zdot.c b/kernel/x86_64/zdot.c index ef12569c8..1bc785ac1 100644 --- a/kernel/x86_64/zdot.c +++ b/kernel/x86_64/zdot.c @@ -33,7 +33,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "zdot_microk_bulldozer-2.c" #elif defined(STEAMROLLER) || defined(PILEDRIVER) || defined(EXCAVATOR) #include "zdot_microk_steamroller-2.c" -#elif defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) +#elif defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) || defined (COOPERLAKE) #include "zdot_microk_haswell-2.c" #elif defined(SANDYBRIDGE) #include "zdot_microk_sandy-2.c" @@ -86,18 +86,26 @@ static void zdot_kernel_8(BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *d) #endif -OPENBLAS_COMPLEX_FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y) + +#if defined(SMP) +extern int blas_level1_thread_with_return_value(int mode, BLASLONG m, BLASLONG n, + BLASLONG k, void *alpha, void *a, BLASLONG lda, void *b, BLASLONG ldb, + void *c, BLASLONG ldc, int (*function)(), int nthreads); +#endif + + + +static void zdot_compute (BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y,OPENBLAS_COMPLEX_FLOAT *result) { BLASLONG i; BLASLONG ix,iy; FLOAT dot[4] = { 0.0, 0.0, 0.0 , 0.0 } ; - + if ( n <= 0 ) { -// CREAL(result) = 0.0 ; -// CIMAG(result) = 0.0 ; - OPENBLAS_COMPLEX_FLOAT result=OPENBLAS_MAKE_COMPLEX_FLOAT(0.0,0.0); - return(result); + OPENBLAS_COMPLEX_FLOAT res=OPENBLAS_MAKE_COMPLEX_FLOAT(0.0,0.0); + *result=res; + return; } @@ -132,8 +140,8 @@ OPENBLAS_COMPLEX_FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLA i=0; ix=0; iy=0; - inc_x <<= 1; - inc_y <<= 1; + inc_x *= 2; + inc_y *= 2; while(i < n) { @@ -150,18 +158,84 @@ OPENBLAS_COMPLEX_FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLA } #if !defined(CONJ) - OPENBLAS_COMPLEX_FLOAT result=OPENBLAS_MAKE_COMPLEX_FLOAT(dot[0]-dot[1],dot[2]+dot[3]); -// CREAL(result) = dot[0] - dot[1]; -// CIMAG(result) = dot[2] + dot[3]; + OPENBLAS_COMPLEX_FLOAT res=OPENBLAS_MAKE_COMPLEX_FLOAT(dot[0]-dot[1],dot[2]+dot[3]); #else - OPENBLAS_COMPLEX_FLOAT result=OPENBLAS_MAKE_COMPLEX_FLOAT(dot[0]+dot[1],dot[2]-dot[3]); -// CREAL(result) = dot[0] + dot[1]; -// CIMAG(result) = dot[2] - dot[3]; + OPENBLAS_COMPLEX_FLOAT res=OPENBLAS_MAKE_COMPLEX_FLOAT(dot[0]+dot[1],dot[2]-dot[3]); +#endif + *result=res; + return; +} +#if defined(SMP) +static int zdot_thread_function(BLASLONG n, BLASLONG dummy0, +BLASLONG dummy1, FLOAT dummy2r, FLOAT dummy2i, FLOAT *x, BLASLONG inc_x, FLOAT *y, +BLASLONG inc_y, FLOAT *result, BLASLONG dummy3) +{ + zdot_compute(n, x, inc_x, y, inc_y, (void *)result); + return 0; +} +#endif + +OPENBLAS_COMPLEX_FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y) +{ +#if defined(SMP) + int nthreads; + FLOAT dummy_alpha; +#if defined(C_PGI) || defined(C_SUN) + FLOAT zdotr=0., zdoti=0.; +#endif #endif + + OPENBLAS_COMPLEX_FLOAT zdot; +#if defined(C_PGI) || defined(C_SUN) + zdot=OPENBLAS_MAKE_COMPLEX_FLOAT(0.0,0.0); +#else + CREAL(zdot) = 0.0; + CIMAG(zdot) = 0.0; +#endif + +#if defined(SMP) + if (inc_x == 0 || inc_y == 0 || n <= 10000) + nthreads = 1; + else + nthreads = num_cpu_avail(1); - return(result); + if (nthreads == 1) { + zdot_compute(n, x, inc_x, y, inc_y, &zdot); + } else { + int mode, i; + char result[MAX_CPU_NUMBER * sizeof(double) * 2]; + OPENBLAS_COMPLEX_FLOAT *ptr; -} +#if !defined(DOUBLE) + mode = BLAS_SINGLE | BLAS_COMPLEX; +#else + mode = BLAS_DOUBLE | BLAS_COMPLEX; +#endif + blas_level1_thread_with_return_value(mode, n, 0, 0, &dummy_alpha, + x, inc_x, y, inc_y, result, 0, + ( void *)zdot_thread_function, nthreads); + + ptr = (OPENBLAS_COMPLEX_FLOAT *)result; + for (i = 0; i < nthreads; i++) { +#if defined(C_PGI) || defined(C_SUN) + zdotr += CREAL(*ptr); + zdoti += CIMAG(*ptr); +#else + CREAL(zdot) = CREAL(zdot) + CREAL(*ptr); + CIMAG(zdot) = CIMAG(zdot) + CIMAG(*ptr); +#endif + ptr = (void *)(((char *)ptr) + sizeof(double) * 2); + } +#if defined(C_PGI) || defined(C_SUN) + zdot = OPENBLAS_MAKE_COMPLEX_FLOAT(zdotr,zdoti); +#endif + } +#else + zdot_compute(n, x, inc_x, y, inc_y, &zdot); +#endif + + return zdot; +} diff --git a/kernel/x86_64/zdot_microk_haswell-2.c b/kernel/x86_64/zdot_microk_haswell-2.c index 9f2fc2c1d..4eade7bfd 100644 --- a/kernel/x86_64/zdot_microk_haswell-2.c +++ b/kernel/x86_64/zdot_microk_haswell-2.c @@ -66,13 +66,17 @@ static void zdot_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *dot) "vfmadd231pd %%ymm8 , %%ymm12, %%ymm0 \n\t" // x_r * y_r, x_i * y_i "vfmadd231pd %%ymm9 , %%ymm13, %%ymm1 \n\t" // x_r * y_r, x_i * y_i - "vpermpd $0xb1 , %%ymm12, %%ymm12 \n\t" - "vpermpd $0xb1 , %%ymm13, %%ymm13 \n\t" + "vpermilpd $0x05 , %%ymm12, %%ymm12 \n\t" + "vpermilpd $0x05 , %%ymm13, %%ymm13 \n\t" +// "vpermpd $0xb1 , %%ymm12, %%ymm12 \n\t" +// "vpermpd $0xb1 , %%ymm13, %%ymm13 \n\t" "vfmadd231pd %%ymm10, %%ymm14, %%ymm2 \n\t" // x_r * y_r, x_i * y_i "vfmadd231pd %%ymm11, %%ymm15, %%ymm3 \n\t" // x_r * y_r, x_i * y_i - "vpermpd $0xb1 , %%ymm14, %%ymm14 \n\t" - "vpermpd $0xb1 , %%ymm15, %%ymm15 \n\t" + "vpermilpd $0x05 , %%ymm14, %%ymm14 \n\t" + "vpermilpd $0x05 , %%ymm15, %%ymm15 \n\t" +// "vpermpd $0xb1 , %%ymm14, %%ymm14 \n\t" +// "vpermpd $0xb1 , %%ymm15, %%ymm15 \n\t" "vfmadd231pd %%ymm8 , %%ymm12, %%ymm4 \n\t" // x_r * y_i, x_i * y_r "addq $16 , %0 \n\t" @@ -151,13 +155,17 @@ static void zdot_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *dot) "vfmadd231pd %%ymm8 , %%ymm12, %%ymm0 \n\t" // x_r * y_r, x_i * y_i "vfmadd231pd %%ymm9 , %%ymm13, %%ymm1 \n\t" // x_r * y_r, x_i * y_i - "vpermpd $0xb1 , %%ymm12, %%ymm12 \n\t" - "vpermpd $0xb1 , %%ymm13, %%ymm13 \n\t" + "vpermilpd $0x05 , %%ymm12, %%ymm12 \n\t" + "vpermilpd $0x05 , %%ymm13, %%ymm13 \n\t" +// "vpermpd $0xb1 , %%ymm12, %%ymm12 \n\t" +// "vpermpd $0xb1 , %%ymm13, %%ymm13 \n\t" "vfmadd231pd %%ymm10, %%ymm14, %%ymm2 \n\t" // x_r * y_r, x_i * y_i "vfmadd231pd %%ymm11, %%ymm15, %%ymm3 \n\t" // x_r * y_r, x_i * y_i - "vpermpd $0xb1 , %%ymm14, %%ymm14 \n\t" - "vpermpd $0xb1 , %%ymm15, %%ymm15 \n\t" + "vpermilpd $0x05 , %%ymm14, %%ymm14 \n\t" + "vpermilpd $0x05 , %%ymm15, %%ymm15 \n\t" +// "vpermpd $0xb1 , %%ymm14, %%ymm14 \n\t" +// "vpermpd $0xb1 , %%ymm15, %%ymm15 \n\t" "vfmadd231pd %%ymm8 , %%ymm12, %%ymm4 \n\t" // x_r * y_i, x_i * y_r "addq $16 , %0 \n\t" diff --git a/kernel/x86_64/zgemm3m_kernel_4x4_haswell.c b/kernel/x86_64/zgemm3m_kernel_4x4_haswell.c new file mode 100644 index 000000000..56bc06c5c --- /dev/null +++ b/kernel/x86_64/zgemm3m_kernel_4x4_haswell.c @@ -0,0 +1,224 @@ +/* %0 = "+r"(a_pointer), %1 = "+r"(b_pointer), %2 = "+r"(c_pointer), %3 = "+r"(ldc_in_bytes), %4 for k_count, %5 for c_store */ +/* r12 = k << 5(const), r13 = k(const), r14 = b_head_pos(const), r15 = tmp */ + +#include "common.h" +#include + +//recommended settings: GEMM_Q=256, GEMM_P=256 + +/* m = 4 *//* ymm0 for alpha, ymm1-ymm3 for temporary use, ymm4-ymm15 for accumulators */ +#define KERNEL_k1m4n1 \ + "vmovupd (%0),%%ymm1; addq $32,%0;"\ + "vbroadcastsd (%1),%%ymm2; vfmadd231pd %%ymm1,%%ymm2,%%ymm4;"\ + "addq $8,%1;" +#define KERNEL_h_k1m4n2 \ + "vmovddup (%0),%%ymm1; vmovddup 8(%0),%%ymm2; addq $32,%0;"\ + "vbroadcastf128 (%1),%%ymm3; vfmadd231pd %%ymm1,%%ymm3,%%ymm4; vfmadd231pd %%ymm2,%%ymm3,%%ymm5;" +#define KERNEL_k1m4n2 KERNEL_h_k1m4n2 "addq $16,%1;" +#define KERNEL_h_k1m4n4 \ + KERNEL_h_k1m4n2 "vbroadcastf128 16(%1),%%ymm3; vfmadd231pd %%ymm1,%%ymm3,%%ymm6; vfmadd231pd %%ymm2,%%ymm3,%%ymm7;" +#define KERNEL_k1m4n4 KERNEL_h_k1m4n4 "addq $32,%1;" +#define unit_kernel_k1m4n4(c1,c2,c3,c4,off1,off2,...) \ + "vbroadcastf128 "#off1"("#__VA_ARGS__"),%%ymm3; vfmadd231pd %%ymm1,%%ymm3,"#c1"; vfmadd231pd %%ymm2,%%ymm3,"#c2";"\ + "vbroadcastf128 "#off2"("#__VA_ARGS__"),%%ymm3; vfmadd231pd %%ymm1,%%ymm3,"#c3"; vfmadd231pd %%ymm2,%%ymm3,"#c4";" +#define KERNEL_h_k1m4n8 KERNEL_h_k1m4n4 unit_kernel_k1m4n4(%%ymm8,%%ymm9,%%ymm10,%%ymm11,0,16,%1,%%r12,1) +#define KERNEL_k1m4n8 KERNEL_h_k1m4n8 "addq $32,%1;" +#define KERNEL_h_k1m4n12 KERNEL_h_k1m4n8 unit_kernel_k1m4n4(%%ymm12,%%ymm13,%%ymm14,%%ymm15,0,16,%1,%%r12,2) +#define KERNEL_k1m4n12 KERNEL_h_k1m4n12 "addq $32,%1;" +#define KERNEL_k2m4n1 KERNEL_k1m4n1 KERNEL_k1m4n1 +#define KERNEL_k2m4n2 KERNEL_k1m4n2 KERNEL_k1m4n2 +#define KERNEL_k2m4n4 KERNEL_k1m4n4 KERNEL_k1m4n4 +#define KERNEL_k2m4n8 KERNEL_k1m4n8 KERNEL_k1m4n8 +#define KERNEL_k2m4n12 \ + "vmovddup (%0),%%ymm1; vmovddup 8(%0),%%ymm2;"\ + unit_kernel_k1m4n4(%%ymm4,%%ymm5,%%ymm6,%%ymm7,0,16,%1)\ + unit_kernel_k1m4n4(%%ymm8,%%ymm9,%%ymm10,%%ymm11,0,16,%1,%%r12,1)\ + unit_kernel_k1m4n4(%%ymm12,%%ymm13,%%ymm14,%%ymm15,0,16,%1,%%r12,2)\ + "vmovddup 32(%0),%%ymm1; vmovddup 40(%0),%%ymm2; prefetcht0 512(%0); addq $64,%0;"\ + unit_kernel_k1m4n4(%%ymm4,%%ymm5,%%ymm6,%%ymm7,32,48,%1)\ + unit_kernel_k1m4n4(%%ymm8,%%ymm9,%%ymm10,%%ymm11,32,48,%1,%%r12,1)\ + unit_kernel_k1m4n4(%%ymm12,%%ymm13,%%ymm14,%%ymm15,32,48,%1,%%r12,2) "addq $64,%1;" +#define INIT_m4n1 "vpxor %%ymm4,%%ymm4,%%ymm4;" +#define INIT_m4n2 INIT_m4n1 "vpxor %%ymm5,%%ymm5,%%ymm5;" +#define INIT_m4n4 INIT_m4n2 "vpxor %%ymm6,%%ymm6,%%ymm6;vpxor %%ymm7,%%ymm7,%%ymm7;" +#define unit_init_m4n4(c1,c2,c3,c4) \ + "vpxor "#c1","#c1","#c1";vpxor "#c2","#c2","#c2";vpxor "#c3","#c3","#c3";vpxor "#c4","#c4","#c4";" +#define INIT_m4n8 INIT_m4n4 unit_init_m4n4(%%ymm8,%%ymm9,%%ymm10,%%ymm11) +#define INIT_m4n12 INIT_m4n8 unit_init_m4n4(%%ymm12,%%ymm13,%%ymm14,%%ymm15) +#define SAVE_h_m4n1 \ + "vpermpd $216,%%ymm4,%%ymm3; vunpcklpd %%ymm3,%%ymm3,%%ymm1; vunpckhpd %%ymm3,%%ymm3,%%ymm2;"\ + "vfmadd213pd (%2),%%ymm0,%%ymm1; vfmadd213pd 32(%2),%%ymm0,%%ymm2; vmovupd %%ymm1,(%2); vmovupd %%ymm2,32(%2);" +#define unit_save_m4n2(c1,c2) \ + "vperm2f128 $2,"#c1","#c2",%%ymm2; vperm2f128 $19,"#c1","#c2","#c2"; vmovapd %%ymm2,"#c1";"\ + "vunpcklpd "#c1","#c1",%%ymm2; vunpcklpd "#c2","#c2",%%ymm3;"\ + "vfmadd213pd (%5),%%ymm0,%%ymm2; vfmadd213pd 32(%5),%%ymm0,%%ymm3; vmovupd %%ymm2,(%5); vmovupd %%ymm3,32(%5);"\ + "vunpckhpd "#c1","#c1",%%ymm2; vunpckhpd "#c2","#c2",%%ymm3;"\ + "vfmadd213pd (%5,%3,1),%%ymm0,%%ymm2; vfmadd213pd 32(%5,%3,1),%%ymm0,%%ymm3; vmovupd %%ymm2,(%5,%3,1); vmovupd %%ymm3,32(%5,%3,1);"\ + "leaq (%5,%3,2),%5;" +#define SAVE_h_m4n2 "movq %2,%5;" unit_save_m4n2(%%ymm4,%%ymm5) +#define SAVE_h_m4n4 SAVE_h_m4n2 unit_save_m4n2(%%ymm6,%%ymm7) +#define SAVE_h_m4n8 SAVE_h_m4n4 unit_save_m4n2(%%ymm8,%%ymm9) unit_save_m4n2(%%ymm10,%%ymm11) +#define SAVE_h_m4n12 SAVE_h_m4n8 unit_save_m4n2(%%ymm12,%%ymm13) unit_save_m4n2(%%ymm14,%%ymm15) +#define SAVE_m4(ndim) SAVE_h_m4n##ndim "addq $64,%2;" +#define COMPUTE_m4(ndim) \ + INIT_m4n##ndim\ + "movq %%r13,%4; movq %%r14,%1; movq %2,%5; xorq %%r15,%%r15;"\ + "cmpq $24,%4; jb "#ndim"004042f;"\ + #ndim"004041:\n\t"\ + "cmpq $126,%%r15; movq $126,%%r15; cmoveq %3,%%r15;"\ + KERNEL_k2m4n##ndim KERNEL_k2m4n##ndim\ + "prefetcht1 (%5); subq $63,%5;"\ + KERNEL_k2m4n##ndim KERNEL_k2m4n##ndim\ + "addq %%r15,%5; prefetcht1 (%8); addq $32,%8;"\ + "subq $8,%4; cmpq $16,%4; jnb "#ndim"004041b;"\ + "movq %2,%5;"\ + #ndim"004042:\n\t"\ + "testq %4,%4; jz "#ndim"004043f;"\ + "prefetcht0 (%5); prefetcht0 63(%5);"\ + KERNEL_k1m4n##ndim\ + "prefetcht0 (%5,%3,4); prefetcht0 63(%5,%3,4); addq %3,%5;"\ + "decq %4; jmp "#ndim"004042b;"\ + #ndim"004043:\n\t"\ + "prefetcht0 (%%r14); prefetcht0 64(%%r14);"\ + SAVE_m4(ndim) + +/* m = 2 *//* vmm0 for alpha, vmm1-vmm3 for temporary use, vmm4-vmm9 for accumulators */ +#define KERNEL_k1m2n1 \ + "vmovupd (%0),%%xmm1; addq $16,%0;"\ + "vmovddup (%1),%%xmm2; vfmadd231pd %%xmm1,%%xmm2,%%xmm4;"\ + "addq $8,%1;" +#define KERNEL_h_k1m2n2 \ + "vmovddup (%0),%%xmm1; vmovddup 8(%0),%%xmm2; addq $16,%0;"\ + "vmovupd (%1),%%xmm3; vfmadd231pd %%xmm1,%%xmm3,%%xmm4; vfmadd231pd %%xmm2,%%xmm3,%%xmm5;" +#define KERNEL_k1m2n2 KERNEL_h_k1m2n2 "addq $16,%1;" +#define unit_kernel_k1m2n4(c1,c2,...) \ + "vmovupd ("#__VA_ARGS__"),%%ymm3; vfmadd231pd %%ymm1,%%ymm3,"#c1"; vfmadd231pd %%ymm2,%%ymm3,"#c2";" +#define KERNEL_h_k1m2n4 \ + "vbroadcastsd (%0),%%ymm1; vbroadcastsd 8(%0),%%ymm2; addq $16,%0;"\ + unit_kernel_k1m2n4(%%ymm4,%%ymm5,%1) +#define KERNEL_k1m2n4 KERNEL_h_k1m2n4 "addq $32,%1;" +#define KERNEL_h_k1m2n8 KERNEL_h_k1m2n4 \ + unit_kernel_k1m2n4(%%ymm6,%%ymm7,%1,%%r12,1) +#define KERNEL_k1m2n8 KERNEL_h_k1m2n8 "addq $32,%1;" +#define KERNEL_h_k1m2n12 KERNEL_h_k1m2n8 \ + unit_kernel_k1m2n4(%%ymm8,%%ymm9,%1,%%r12,2) +#define KERNEL_k1m2n12 KERNEL_h_k1m2n12 "addq $32,%1;" +#define INIT_m2n1 "vpxor %%xmm4,%%xmm4,%%xmm4;" +#define INIT_m2n2 INIT_m2n1 "vpxor %%xmm5,%%xmm5,%%xmm5;" +#define unit_init_m2n4(c1,c2) "vpxor "#c1","#c1","#c1";vpxor "#c2","#c2","#c2";" +#define INIT_m2n4 unit_init_m2n4(%%ymm4,%%ymm5) +#define INIT_m2n8 INIT_m2n4 unit_init_m2n4(%%ymm6,%%ymm7) +#define INIT_m2n12 INIT_m2n8 unit_init_m2n4(%%ymm8,%%ymm9) +#define SAVE_h_m2n1 \ + "vinsertf128 $1,%%xmm4,%%ymm4,%%ymm4; vpermilpd $12,%%ymm4,%%ymm4; vfmadd213pd (%2),%%ymm0,%%ymm4; vmovupd %%ymm4,(%2);" +#define SAVE_h_m2n2 \ + "vinsertf128 $1,%%xmm5,%%ymm4,%%ymm4; vunpcklpd %%ymm4,%%ymm4,%%ymm1; vunpckhpd %%ymm4,%%ymm4,%%ymm2;"\ + "vfmadd213pd (%2),%%ymm0,%%ymm1; vmovupd %%ymm1,(%2);"\ + "vfmadd213pd (%2,%3,1),%%ymm0,%%ymm2; vmovupd %%ymm2,(%2,%3,1);" +#define unit_save_m2n4(c1,c2) \ + "vperm2f128 $2,"#c1","#c2",%%ymm1; vunpcklpd %%ymm1,%%ymm1,%%ymm2; vunpckhpd %%ymm1,%%ymm1,%%ymm3;"\ + "vfmadd213pd (%5),%%ymm0,%%ymm2; vfmadd213pd (%5,%3,1),%%ymm0,%%ymm3; vmovupd %%ymm2,(%5); vmovupd %%ymm3,(%5,%3,1); leaq (%5,%3,2),%5;"\ + "vperm2f128 $19,"#c1","#c2",%%ymm1; vunpcklpd %%ymm1,%%ymm1,%%ymm2; vunpckhpd %%ymm1,%%ymm1,%%ymm3;"\ + "vfmadd213pd (%5),%%ymm0,%%ymm2; vfmadd213pd (%5,%3,1),%%ymm0,%%ymm3; vmovupd %%ymm2,(%5); vmovupd %%ymm3,(%5,%3,1); leaq (%5,%3,2),%5;" +#define SAVE_h_m2n4 "movq %2,%5;" unit_save_m2n4(%%ymm4,%%ymm5) +#define SAVE_h_m2n8 SAVE_h_m2n4 unit_save_m2n4(%%ymm6,%%ymm7) +#define SAVE_h_m2n12 SAVE_h_m2n8 unit_save_m2n4(%%ymm8,%%ymm9) +#define SAVE_m2(ndim) SAVE_h_m2n##ndim "addq $32,%2;" +#define COMPUTE_m2(ndim) \ + INIT_m2n##ndim\ + "movq %%r13,%4; movq %%r14,%1;"\ + #ndim"002022:\n\t"\ + "testq %4,%4; jz "#ndim"002023f;"\ + KERNEL_k1m2n##ndim\ + "decq %4; jmp "#ndim"002022b;"\ + #ndim"002023:\n\t"\ + SAVE_m2(ndim) + +/* m = 1 *//* vmm0 for alpha, vmm1-vmm3 and vmm10-vmm15 for temporary use, vmm4-vmm6 for accumulators */ +#define KERNEL_k1m1n1 \ + "vmovsd (%0),%%xmm1; addq $8,%0;"\ + "vfmadd231sd (%1),%%xmm1,%%xmm4; addq $8,%1;" +#define KERNEL_k1m1n2 \ + "vmovddup (%0),%%xmm1; addq $8,%0;"\ + "vfmadd231pd (%1),%%xmm1,%%xmm4; addq $16,%1;" +#define unit_kernel_k1m1n4(c1,...) \ + "vmovupd ("#__VA_ARGS__"),%%ymm2; vfmadd231pd %%ymm1,%%ymm2,"#c1";" +#define KERNEL_h_k1m1n4 \ + "vbroadcastsd (%0),%%ymm1; addq $8,%0;"\ + unit_kernel_k1m1n4(%%ymm4,%1) +#define KERNEL_k1m1n4 KERNEL_h_k1m1n4 "addq $32,%1;" +#define KERNEL_h_k1m1n8 KERNEL_h_k1m1n4 unit_kernel_k1m1n4(%%ymm5,%1,%%r12,1) +#define KERNEL_k1m1n8 KERNEL_h_k1m1n8 "addq $32,%1;" +#define KERNEL_h_k1m1n12 KERNEL_h_k1m1n8 unit_kernel_k1m1n4(%%ymm6,%1,%%r12,2) +#define KERNEL_k1m1n12 KERNEL_h_k1m1n12 "addq $32,%1;" +#define INIT_m1n1 INIT_m2n1 +#define INIT_m1n2 INIT_m2n1 +#define INIT_m1n4 "vpxor %%ymm4,%%ymm4,%%ymm4;" +#define INIT_m1n8 INIT_m1n4 "vpxor %%ymm5,%%ymm5,%%ymm5;" +#define INIT_m1n12 INIT_m1n8 "vpxor %%ymm6,%%ymm6,%%ymm6;" +#define SAVE_h_m1n1 \ + "vmovddup %%xmm4,%%xmm4; vfmadd213pd (%2),%%xmm0,%%xmm4; vmovupd %%xmm4,(%2);" +#define SAVE_h_m1n2 \ + "vunpcklpd %%xmm4,%%xmm4,%%xmm1; vunpckhpd %%xmm4,%%xmm4,%%xmm2;"\ + "vfmadd213pd (%2),%%xmm0,%%xmm1; vmovupd %%xmm1,(%2);"\ + "vfmadd213pd (%2,%3,1),%%xmm0,%%xmm2; vmovupd %%xmm2,(%2,%3,1);" +#define unit_save_m1n4(c1) \ + "vunpcklpd "#c1","#c1",%%ymm1; vunpckhpd "#c1","#c1",%%ymm2;"\ + "vmovupd (%5),%%xmm3; vinsertf128 $1,(%5,%3,2),%%ymm3,%%ymm3;"\ + "vfmadd213pd %%ymm3,%%ymm0,%%ymm1; vmovupd %%xmm1,(%5); vextractf128 $1,%%ymm1,(%5,%3,2); addq %3,%5;"\ + "vmovupd (%5),%%xmm3; vinsertf128 $1,(%5,%3,2),%%ymm3,%%ymm3;"\ + "vfmadd213pd %%ymm3,%%ymm0,%%ymm2; vmovupd %%xmm2,(%5); vextractf128 $1,%%ymm2,(%5,%3,2); addq %3,%5; leaq (%5,%3,2),%5;" +#define SAVE_h_m1n4 "movq %2,%5;" unit_save_m1n4(%%ymm4) +#define SAVE_h_m1n8 SAVE_h_m1n4 unit_save_m1n4(%%ymm5) +#define SAVE_h_m1n12 SAVE_h_m1n8 unit_save_m1n4(%%ymm6) +#define SAVE_m1(ndim) SAVE_h_m1n##ndim "addq $16,%2;" +#define COMPUTE_m1(ndim) \ + INIT_m1n##ndim\ + "movq %%r13,%4; movq %%r14,%1;"\ + #ndim"001011:\n\t"\ + "testq %4,%4; jz "#ndim"001012f;"\ + KERNEL_k1m1n##ndim\ + "decq %4; jmp "#ndim"001011b;"\ + #ndim"001012:\n\t"\ + SAVE_m1(ndim) + +#define COMPUTE(ndim) {\ + next_b = b_pointer + ndim * K;\ + __asm__ __volatile__(\ + "vbroadcastf128 (%6),%%ymm0;"\ + "movq %4,%%r13; movq %4,%%r12; salq $5,%%r12; movq %1,%%r14; movq %7,%%r11;"\ + "cmpq $4,%7;jb 33101"#ndim"f;"\ + "33109"#ndim":\n\t"\ + COMPUTE_m4(ndim)\ + "subq $4,%7;cmpq $4,%7;jnb 33109"#ndim"b;"\ + "33101"#ndim":\n\t"\ + "cmpq $2,%7;jb 33104"#ndim"f;"\ + COMPUTE_m2(ndim)\ + "subq $2,%7;"\ + "33104"#ndim":\n\t"\ + "testq %7,%7;jz 33105"#ndim"f;"\ + COMPUTE_m1(ndim)\ + "33105"#ndim":\n\t"\ + "movq %%r13,%4; movq %%r14,%1; movq %%r11,%7;"\ + :"+r"(a_pointer),"+r"(b_pointer),"+r"(c_pointer),"+r"(ldc_in_bytes),"+r"(K),"+r"(ctemp),"+r"(const_val),"+r"(M),"+r"(next_b)\ + ::"r11","r12","r13","r14","r15","xmm0","xmm1","xmm2","xmm3","xmm4","xmm5","xmm6","xmm7","xmm8","xmm9","xmm10","xmm11","xmm12","xmm13","xmm14",\ + "xmm15","cc","memory");\ + a_pointer -= M * K; b_pointer += ndim * K; c_pointer += 2*(LDC * ndim - M);\ +} +int __attribute__ ((noinline)) +CNAME(BLASLONG m, BLASLONG n, BLASLONG k, double alphar, double alphai, double * __restrict__ A, double * __restrict__ B, double * __restrict__ C, BLASLONG LDC) +{ + if(m==0||n==0||k==0) return 0; + int64_t ldc_in_bytes = (int64_t)LDC * sizeof(double) * 2; + double constval[2]; constval[0] = alphar; constval[1] = alphai; + double *const_val=constval; + int64_t M = (int64_t)m, K = (int64_t)k; + BLASLONG n_count = n; + double *a_pointer = A,*b_pointer = B,*c_pointer = C,*ctemp = C,*next_b = B; + for(;n_count>11;n_count-=12) COMPUTE(12) + for(;n_count>7;n_count-=8) COMPUTE(8) + for(;n_count>3;n_count-=4) COMPUTE(4) + for(;n_count>1;n_count-=2) COMPUTE(2) + if(n_count>0) COMPUTE(1) + return 0; +} diff --git a/kernel/x86_64/zgemm_kernel_4x2_haswell.c b/kernel/x86_64/zgemm_kernel_4x2_haswell.c new file mode 100644 index 000000000..917a3fd48 --- /dev/null +++ b/kernel/x86_64/zgemm_kernel_4x2_haswell.c @@ -0,0 +1,240 @@ +#include "common.h" +#include + +/* recommended settings: GEMM_P = 192, GEMM_Q = 192 */ + +#if defined(NN) || defined(NT) || defined(TN) || defined(TT) + #define A_CONJ 0 + #define B_CONJ 0 +#endif +#if defined(RN) || defined(RT) || defined(CN) || defined(CT) + #define A_CONJ 1 + #define B_CONJ 0 +#endif +#if defined(NR) || defined(NC) || defined(TR) || defined(TC) + #define A_CONJ 0 + #define B_CONJ 1 +#endif +#if defined(RR) || defined(RC) || defined(CR) || defined(CC) + #define A_CONJ 1 + #define B_CONJ 1 +#endif + +/* %0 = a_ptr, %1 = b_ptr, %2 = c_ptr, %3 = c_tmp, %4 = ldc(bytes), %5 = k_counter, %6 = &alpha, %7 = m_counter, %8 = b_pref */ +/* r11 = m, r12 = k << 5, r13 = k, r14 = b_head, r15 = temp */ + +/* m=4, ymm 0-3 temp, ymm 4-15 acc */ +#if A_CONJ == B_CONJ + #define acc_m2n1_exp(ar,ai,b2,cl,cr) "vfmadd231pd %%ymm"#ar",%%ymm"#b2",%%ymm"#cl"; vfmadd231pd %%ymm"#ai",%%ymm"#b2",%%ymm"#cr";" + #define acc_m4n1_con(ua,la,b1,uc,lc) "vfmaddsub231pd %%ymm"#ua",%%ymm"#b1",%%ymm"#uc"; vfmaddsub231pd %%ymm"#la",%%ymm"#b1",%%ymm"#lc";" +#else + #define acc_m2n1_exp(ar,ai,b2,cl,cr) "vfmadd231pd %%ymm"#ar",%%ymm"#b2",%%ymm"#cl"; vfnmadd231pd %%ymm"#ai",%%ymm"#b2",%%ymm"#cr";" + #define acc_m4n1_con(ua,la,b1,uc,lc) "vfmsubadd231pd %%ymm"#ua",%%ymm"#b1",%%ymm"#uc"; vfmsubadd231pd %%ymm"#la",%%ymm"#b1",%%ymm"#lc";" +#endif +/* expanded accumulators for m4n1 and m4n2 */ +#define KERNEL_k1m4n1 \ + "vbroadcastf128 (%1),%%ymm0; addq $16,%1;"\ + "vmovddup (%0),%%ymm1; vmovddup 8(%0),%%ymm2;" acc_m2n1_exp(1,2,0,4,5)\ + "vmovddup 32(%0),%%ymm1; vmovddup 40(%0),%%ymm2;" acc_m2n1_exp(1,2,0,6,7)\ + "addq $64,%0;" +#define KERNEL_k1m4n2 \ + "vbroadcastf128 (%1),%%ymm0; vbroadcastf128 16(%1),%%ymm1; addq $32,%1;"\ + "vmovddup (%0),%%ymm2; vmovddup 8(%0),%%ymm3;" acc_m2n1_exp(2,3,0,4,5) acc_m2n1_exp(2,3,1,8,9)\ + "vmovddup 32(%0),%%ymm2; vmovddup 40(%0),%%ymm3;" acc_m2n1_exp(2,3,0,6,7) acc_m2n1_exp(2,3,1,10,11)\ + "addq $64,%0;" +/* contracted accumulators for m4n4 and m4n6 */ +#define acc_m4n2_con(ua,la,luc,llc,ruc,rlc,lboff,rboff,...) \ + "vbroadcastsd "#lboff"("#__VA_ARGS__"),%%ymm2;" acc_m4n1_con(ua,la,2,luc,llc)\ + "vbroadcastsd "#rboff"("#__VA_ARGS__"),%%ymm3;" acc_m4n1_con(ua,la,3,ruc,rlc) +#define KERNEL_1_k1m4n4 \ + "vmovupd (%0),%%ymm0; vmovupd 32(%0),%%ymm1; prefetcht0 512(%0); addq $64,%0;"\ + acc_m4n2_con(0,1,4,5,6,7,0,16,%1) acc_m4n2_con(0,1,8,9,10,11,0,16,%1,%%r12,1) +#define KERNEL_2_k1m4n4 \ + "vpermilpd $5,-64(%0),%%ymm0; vpermilpd $5,-32(%0),%%ymm1;"\ + acc_m4n2_con(0,1,4,5,6,7,8,24,%1) acc_m4n2_con(0,1,8,9,10,11,8,24,%1,%%r12,1) +#define KERNEL_1_k1m4n6 KERNEL_1_k1m4n4 acc_m4n2_con(0,1,12,13,14,15,0,16,%1,%%r12,2) +#define KERNEL_2_k1m4n6 KERNEL_2_k1m4n4 acc_m4n2_con(0,1,12,13,14,15,8,24,%1,%%r12,2) +#define KERNEL_k1m4n4 KERNEL_1_k1m4n4 KERNEL_2_k1m4n4 "addq $32,%1;" +#define KERNEL_k1m4n6 KERNEL_1_k1m4n6 KERNEL_2_k1m4n6 "addq $32,%1;" +#define zero_4ymm(no1,no2,no3,no4) \ + "vpxor %%ymm"#no1",%%ymm"#no1",%%ymm"#no1"; vpxor %%ymm"#no2",%%ymm"#no2",%%ymm"#no2";"\ + "vpxor %%ymm"#no3",%%ymm"#no3",%%ymm"#no3"; vpxor %%ymm"#no4",%%ymm"#no4",%%ymm"#no4";" +/* initialization and storage macros */ +#define INIT_m4n1 zero_4ymm(4,5,6,7) +#define INIT_m4n2 zero_4ymm(4,5,6,7) zero_4ymm(8,9,10,11) +#define INIT_m4n4 zero_4ymm(4,5,6,7) zero_4ymm(8,9,10,11) +#define INIT_m4n6 INIT_m4n4 zero_4ymm(12,13,14,15) +#if A_CONJ == B_CONJ + #define cont_expacc(cl,cr,dst) "vpermilpd $5,%%ymm"#cr",%%ymm"#cr"; vaddsubpd %%ymm"#cl",%%ymm"#cr",%%ymm"#dst";" +#else + #define cont_expacc(cl,cr,dst) "vpermilpd $5,%%ymm"#cr",%%ymm"#cr"; vaddsubpd %%ymm"#cr",%%ymm"#cl",%%ymm"#dst";" +#endif +#if A_CONJ == 0 + #define save_1ymm(c,tmp,off,alpr,alpi,...) \ + "vpermilpd $5,%%ymm"#c",%%ymm"#tmp"; vfmsubadd213pd "#off"("#__VA_ARGS__"),%%ymm"#alpr",%%ymm"#c";"\ + "vfmsubadd231pd %%ymm"#tmp",%%ymm"#alpi",%%ymm"#c"; vmovupd %%ymm"#c","#off"("#__VA_ARGS__");" +#else + #define save_1ymm(c,tmp,off,alpr,alpi,...) \ + "vpermilpd $5,%%ymm"#c",%%ymm"#tmp"; vfmaddsub213pd "#off"("#__VA_ARGS__"),%%ymm"#alpi",%%ymm"#tmp";"\ + "vfmaddsub231pd %%ymm"#c",%%ymm"#alpr",%%ymm"#tmp"; vmovupd %%ymm"#tmp","#off"("#__VA_ARGS__");" +#endif +#define save_init_m4 "movq %2,%3; addq $64,%2; vbroadcastsd (%6),%%ymm0; vbroadcastsd 8(%6),%%ymm1;" +#define SAVE_m4n1 save_init_m4 cont_expacc(4,5,4) cont_expacc(6,7,6) save_1ymm(4,2,0,0,1,%3) save_1ymm(6,3,32,0,1,%3) +#define SAVE_m4n2 SAVE_m4n1\ + cont_expacc(8,9,8) cont_expacc(10,11,10) save_1ymm(8,2,0,0,1,%3,%4,1) save_1ymm(10,3,32,0,1,%3,%4,1) +#define SAVE_m4n4 save_init_m4\ + save_1ymm(4,2,0,0,1,%3) save_1ymm(5,3,32,0,1,%3) save_1ymm(6,2,0,0,1,%3,%4,1) save_1ymm(7,3,32,0,1,%3,%4,1) "leaq (%3,%4,2),%3;"\ + save_1ymm(8,2,0,0,1,%3) save_1ymm(9,3,32,0,1,%3) save_1ymm(10,2,0,0,1,%3,%4,1) save_1ymm(11,3,32,0,1,%3,%4,1) +#define SAVE_m4n6 SAVE_m4n4 "leaq (%3,%4,2),%3;"\ + save_1ymm(12,2,0,0,1,%3) save_1ymm(13,3,32,0,1,%3) save_1ymm(14,2,0,0,1,%3,%4,1) save_1ymm(15,3,32,0,1,%3,%4,1) +#define COMPUTE_m4(ndim) \ + "movq %%r14,%1;" INIT_m4n##ndim "movq %2,%3; movq %%r13,%5;"\ + "testq %5,%5; jz "#ndim"4443f; cmpq $10,%5; jb "#ndim"4442f;"\ + "movq $10,%5; movq $84,%%r15;"\ + #ndim"4441:\n\t"\ + "prefetcht1 (%3); subq $63,%3; addq %%r15,%3;"\ + KERNEL_k1m4n##ndim KERNEL_k1m4n##ndim\ + "testq $12,%5; movq $84,%%r15; cmovz %4,%%r15; prefetcht1 (%8); addq $16,%8;"\ + KERNEL_k1m4n##ndim KERNEL_k1m4n##ndim\ + "addq $4,%5; cmpq %5,%%r13; jnb "#ndim"4441b;"\ + "movq %2,%3; negq %5; leaq 10(%%r13,%5,1),%5; prefetcht0 (%6); prefetcht0 15(%6);"\ + #ndim"4442:\n\t"\ + "prefetcht0 (%3); prefetcht0 63(%3); addq %4,%3;"\ + KERNEL_k1m4n##ndim "decq %5; jnz "#ndim"4442b;"\ + #ndim"4443:\n\t"\ + "prefetcht0 (%%r14); prefetcht0 64(%%r14);" SAVE_m4n##ndim + +/* m=2, ymm 0-3 temp, ymm 4-15 acc, expanded accumulators */ +#define KERNEL_k1m2n1 \ + "vmovddup (%0),%%ymm1; vmovddup 8(%0),%%ymm2; addq $32,%0;"\ + "vbroadcastf128 (%1),%%ymm0;" acc_m2n1_exp(1,2,0,4,5) "addq $16,%1;" +#define acc_m2n2_exp(c1l,c1r,c2l,c2r,...) \ + "vbroadcastf128 ("#__VA_ARGS__"),%%ymm2;" acc_m2n1_exp(0,1,2,c1l,c1r)\ + "vbroadcastf128 16("#__VA_ARGS__"),%%ymm3;" acc_m2n1_exp(0,1,3,c2l,c2r) +#define KERNEL_h_k1m2n2 \ + "vmovddup (%0),%%ymm0; vmovddup 8(%0),%%ymm1; addq $32,%0;" acc_m2n2_exp(4,5,6,7,%1) +#define KERNEL_h_k1m2n4 KERNEL_h_k1m2n2 acc_m2n2_exp(8,9,10,11,%1,%%r12,1) +#define KERNEL_h_k1m2n6 KERNEL_h_k1m2n4 acc_m2n2_exp(12,13,14,15,%1,%%r12,2) +#define KERNEL_k1m2n2 KERNEL_h_k1m2n2 "addq $32,%1;" +#define KERNEL_k1m2n4 KERNEL_h_k1m2n4 "addq $32,%1;" +#define KERNEL_k1m2n6 KERNEL_h_k1m2n6 "addq $32,%1;" +#define INIT_m2n1 "vpxor %%ymm4,%%ymm4,%%ymm4; vpxor %%ymm5,%%ymm5,%%ymm5;" +#define INIT_m2n2 zero_4ymm(4,5,6,7) +#define INIT_m2n4 INIT_m2n2 zero_4ymm(8,9,10,11) +#define INIT_m2n6 INIT_m2n4 zero_4ymm(12,13,14,15) +#define save_init_m2 "movq %2,%3; addq $32,%2; vbroadcastsd (%6),%%ymm0; vbroadcastsd 8(%6),%%ymm1;" +#define SAVE_m2n1 save_init_m2 cont_expacc(4,5,4) save_1ymm(4,2,0,0,1,%3) +#define SAVE_m2n2 SAVE_m2n1 cont_expacc(6,7,6) save_1ymm(6,3,0,0,1,%3,%4,1) +#define SAVE_m2n4 SAVE_m2n2 "leaq (%3,%4,2),%3;"\ + cont_expacc(8,9,8) cont_expacc(10,11,10) save_1ymm(8,2,0,0,1,%3) save_1ymm(10,3,0,0,1,%3,%4,1) +#define SAVE_m2n6 SAVE_m2n4 "leaq (%3,%4,2),%3;"\ + cont_expacc(12,13,12) cont_expacc(14,15,14) save_1ymm(12,2,0,0,1,%3) save_1ymm(14,3,0,0,1,%3,%4,1) +#define COMPUTE_m2(ndim) \ + "movq %%r14,%1;" INIT_m2n##ndim "movq %%r13,%5;"\ + "testq %5,%5; jz "#ndim"2222f;"\ + #ndim"2221:\n\t"\ + KERNEL_k1m2n##ndim\ + "decq %5; jnz "#ndim"2221b;"\ + #ndim"2222:\n\t"\ + SAVE_m2n##ndim + +/* m=1, vmm 0-3 temp, vmm 4-15 acc, expanded accumulators */ +#if A_CONJ == B_CONJ + #define acc_m1n1_exp(ar,ai,b2,cl,cr) "vfmadd231pd %%xmm"#ar",%%xmm"#b2",%%xmm"#cl"; vfmadd231pd %%xmm"#ai",%%xmm"#b2",%%xmm"#cr";" + #define acc_m1n2_exp(arb,aib,b4,cl,cr) "vfmadd231pd %%ymm"#arb",%%ymm"#b4",%%ymm"#cl"; vfmadd231pd %%ymm"#aib",%%ymm"#b4",%%ymm"#cr";" +#else + #define acc_m1n1_exp(ar,ai,b2,cl,cr) "vfmadd231pd %%xmm"#ar",%%xmm"#b2",%%xmm"#cl"; vfnmadd231pd %%xmm"#ai",%%xmm"#b2",%%xmm"#cr";" + #define acc_m1n2_exp(arb,aib,b4,cl,cr) "vfmadd231pd %%ymm"#arb",%%ymm"#b4",%%ymm"#cl"; vfnmadd231pd %%ymm"#aib",%%ymm"#b4",%%ymm"#cr";" +#endif +#define KERNEL_k1m1n1 \ + "vmovddup (%0),%%xmm0; vmovddup 8(%0),%%xmm1; addq $16,%0;"\ + "vmovupd (%1),%%xmm2; addq $16,%1;" acc_m1n1_exp(0,1,2,4,5) +#define KERNEL_h_k1m1n2 \ + "vbroadcastsd (%0),%%ymm0; vbroadcastsd 8(%0),%%ymm1; addq $16,%0;"\ + "vmovupd (%1),%%ymm2;" acc_m1n2_exp(0,1,2,4,5) +#define KERNEL_h_k1m1n4 KERNEL_h_k1m1n2 "vmovupd (%1,%%r12,1),%%ymm2;" acc_m1n2_exp(0,1,2,6,7) +#define KERNEL_h_k1m1n6 KERNEL_h_k1m1n4 "vmovupd (%1,%%r12,2),%%ymm2;" acc_m1n2_exp(0,1,2,8,9) +#define KERNEL_k1m1n2 KERNEL_h_k1m1n2 "addq $32,%1;" +#define KERNEL_k1m1n4 KERNEL_h_k1m1n4 "addq $32,%1;" +#define KERNEL_k1m1n6 KERNEL_h_k1m1n6 "addq $32,%1;" +#define INIT_m1n1 "vpxor %%xmm4,%%xmm4,%%xmm4; vpxor %%xmm5,%%xmm5,%%xmm5;" +#define INIT_m1n2 "vpxor %%ymm4,%%ymm4,%%ymm4; vpxor %%ymm5,%%ymm5,%%ymm5;" +#define INIT_m1n4 INIT_m1n2 "vpxor %%ymm6,%%ymm6,%%ymm6; vpxor %%ymm7,%%ymm7,%%ymm7;" +#define INIT_m1n6 INIT_m1n4 "vpxor %%ymm8,%%ymm8,%%ymm8; vpxor %%ymm9,%%ymm9,%%ymm9;" +#if A_CONJ == B_CONJ + #define cont_expxmmacc(cl,cr,dst) "vpermilpd $5,%%xmm"#cr",%%xmm"#cr"; vaddsubpd %%xmm"#cl",%%xmm"#cr",%%xmm"#dst";" +#else + #define cont_expxmmacc(cl,cr,dst) "vpermilpd $5,%%xmm"#cr",%%xmm"#cr"; vaddsubpd %%xmm"#cr",%%xmm"#cl",%%xmm"#dst";" +#endif +#if A_CONJ == 0 + #define save_m1n1(c,tmp,alpr,alpi) \ + "vpermilpd $5,%%xmm"#c",%%xmm"#tmp"; vfmsubadd213pd (%3),%%xmm"#alpr",%%xmm"#c";"\ + "vfmsubadd231pd %%xmm"#tmp",%%xmm"#alpi",%%xmm"#c"; vmovupd %%xmm"#c",(%3);" + #define save_m1n2(c,tmp1,tmp2,alpr,alpi) \ + "vpermilpd $5,%%ymm"#c",%%ymm"#tmp1"; vmovupd (%3),%%xmm"#tmp2"; vinsertf128 $1,(%3,%4,1),%%ymm"#tmp2",%%ymm"#tmp2";"\ + "vfmsubadd213pd %%ymm"#tmp2",%%ymm"#alpr",%%ymm"#c"; vfmsubadd231pd %%ymm"#tmp1",%%ymm"#alpi",%%ymm"#c";"\ + "vmovupd %%xmm"#c",(%3); vextractf128 $1,%%ymm"#c",(%3,%4,1); leaq (%3,%4,2),%3;" +#else + #define save_m1n1(c,tmp,alpr,alpi) \ + "vpermilpd $5,%%xmm"#c",%%xmm"#tmp"; vfmaddsub213pd (%3),%%xmm"#alpi",%%xmm"#tmp";"\ + "vfmaddsub231pd %%xmm"#c",%%xmm"#alpr",%%xmm"#tmp"; vmovupd %%xmm"#tmp",(%3);" + #define save_m1n2(c,tmp1,tmp2,alpr,alpi) \ + "vpermilpd $5,%%ymm"#c",%%ymm"#tmp1"; vmovupd (%3),%%xmm"#tmp2"; vinsertf128 $1,(%3,%4,1),%%ymm"#tmp2",%%ymm"#tmp2";"\ + "vfmaddsub213pd %%ymm"#tmp2",%%ymm"#alpi",%%ymm"#tmp1"; vfmaddsub231pd %%ymm"#c",%%ymm"#alpr",%%ymm"#tmp1";"\ + "vmovupd %%xmm"#tmp1",(%3); vextractf128 $1,%%ymm"#tmp1",(%3,%4,1); leaq (%3,%4,2),%3;" +#endif +#define save_init_m1 "movq %2,%3; addq $16,%2; vbroadcastsd (%6),%%ymm0; vbroadcastsd 8(%6),%%ymm1;" +#define SAVE_m1n1 save_init_m1 cont_expxmmacc(4,5,4) save_m1n1(4,2,0,1) +#define SAVE_m1n2 save_init_m1 cont_expacc(4,5,4) save_m1n2(4,2,3,0,1) +#define SAVE_m1n4 SAVE_m1n2 cont_expacc(6,7,6) save_m1n2(6,2,3,0,1) +#define SAVE_m1n6 SAVE_m1n4 cont_expacc(8,9,8) save_m1n2(8,2,3,0,1) +#define COMPUTE_m1(ndim) \ + "movq %%r14,%1;" INIT_m1n##ndim "movq %%r13,%5;"\ + "testq %5,%5; jz "#ndim"1112f;"\ + #ndim"1111:\n\t"\ + KERNEL_k1m1n##ndim\ + "decq %5; jnz "#ndim"1111b;"\ + #ndim"1112:\n\t"\ + SAVE_m1n##ndim + +#define COMPUTE(ndim) {\ + b_pref = b_ptr + ndim * K *2;\ + __asm__ __volatile__ (\ + "movq %1,%%r14; movq %5,%%r13; movq %5,%%r12; salq $5,%%r12; movq %7,%%r11;"\ + "cmpq $4,%7; jb "#ndim"9992f;"\ + #ndim"9991:\n\t"\ + COMPUTE_m4(ndim)\ + "subq $4,%7; cmpq $4,%7; jnb "#ndim"9991b;"\ + #ndim"9992:\n\t"\ + "cmpq $2,%7; jb "#ndim"9993f;"\ + COMPUTE_m2(ndim) "subq $2,%7;"\ + #ndim"9993:\n\t"\ + "testq %7,%7; jz "#ndim"9994f;"\ + COMPUTE_m1(ndim)\ + #ndim"9994:\n\t"\ + "movq %%r14,%1; movq %%r13,%5; movq %%r11,%7; vzeroupper;"\ + :"+r"(a_ptr),"+r"(b_ptr),"+r"(c_ptr),"+r"(c_tmp),"+r"(ldc_in_bytes),"+r"(K),"+r"(alp),"+r"(M),"+r"(b_pref)\ + ::"cc","memory","r11","r12","r13","r14","r15","xmm0","xmm1","xmm2","xmm3","xmm4","xmm5",\ + "xmm6","xmm7","xmm8","xmm9","xmm10","xmm11","xmm12","xmm13","xmm14","xmm15");\ + a_ptr -= M * K *2; b_ptr += ndim * K *2; c_ptr += (ndim * LDC - M) * 2;\ +} + +int __attribute__ ((noinline)) +CNAME(BLASLONG m, BLASLONG n, BLASLONG k, double alphar, double alphai, double * __restrict__ A, double * __restrict__ B, double * __restrict__ C, BLASLONG LDC) +{ + if(m==0||n==0||k==0||(alphar==0.0 && alphai==0.0)) return 0; + int64_t ldc_in_bytes = (int64_t)LDC * sizeof(double) * 2; +#if A_CONJ == B_CONJ + double const_val[2] = {-alphar, -alphai}; +#else + double const_val[2] = {alphar, alphai}; +#endif + int64_t M = (int64_t)m, K = (int64_t)k; + BLASLONG n_count = n; + double *a_ptr = A,*b_ptr = B,*c_ptr = C,*c_tmp = C,*alp = const_val,*b_pref = B; + for(;n_count>5;n_count-=6) COMPUTE(6) + for(;n_count>3;n_count-=4) COMPUTE(4) + for(;n_count>1;n_count-=2) COMPUTE(2) + if(n_count>0) COMPUTE(1) + return 0; +} diff --git a/kernel/x86_64/zgemm_kernel_4x2_skylakex.c b/kernel/x86_64/zgemm_kernel_4x2_skylakex.c new file mode 100644 index 000000000..0606a3f7c --- /dev/null +++ b/kernel/x86_64/zgemm_kernel_4x2_skylakex.c @@ -0,0 +1,283 @@ +#include "common.h" +#include + +#if defined(NN) || defined(NT) || defined(TN) || defined(TT) + #define ZGEMM_SKX_MODE 0 //not to do conjugation on a_block and b_block +#endif +#if defined(RN) || defined(RT) || defined(CN) || defined(CT) + #define ZGEMM_SKX_MODE 1 //do conjugation on a_block, not b_block +#endif +#if defined(NR) || defined(NC) || defined(TR) || defined(TC) + #define ZGEMM_SKX_MODE 2 //do conjugation on b_block, not a_block +#endif +#if defined(RR) || defined(RC) || defined(CR) || defined(CC) + #define ZGEMM_SKX_MODE 3 //do conjugation on a_block and b_block +#endif + +// recommended settings: GEMM_DEFAULT_Q = 128, GEMM_DEFAULT_P = 256 +/* %0=a_pointer, %1=b_pointer, %2=c_pointer, %3=c_store, %4=ldc(bytes), %5=&constval, %6 = k_counter, %7 = m_counter, %8 = b_pref */ +// const double constval[4] = {alpha_r, alpha_i, -1, 1}; +/* r11 = m; r12 = k * 32; r13 = k; r14 = b_head; r15 = %1 + r12 * 3; */ +#define GENERAL_INIT "movq %7,%%r11; movq %1,%%r14; movq %6,%%r13; movq %6,%%r12; salq $5,%%r12;" +#define GENERAL_RECOVER "movq %%r11,%7; movq %%r13,%6; movq %%r14,%1;" +#define CONSTZMM_INIT "vbroadcastsd (%5),%%zmm0; vbroadcastsd 8(%5),%%zmm1; vbroadcastf32x4 16(%5),%%zmm2;" +#define COMPUTE_INIT "movq %%r13,%6; movq %%r14,%1; leaq (%%r14,%%r12,2),%%r15; addq %%r12,%%r15;" + +/* m=4, zmm0=alpha_r, zmm1=alpha_i, zmm2={-1,1,...,-1,1}, zmm3-zmm7 for temporary use, zmm8-zmm31 for accumulators */ +#if ZGEMM_SKX_MODE == 0 || ZGEMM_SKX_MODE == 2 //not to do conjugation on a_block + #define unit_kernel_k1m4n1(a_r,a_i,b_off,c_le,c_ri,...) \ + "vbroadcastf32x4 "#b_off"("#__VA_ARGS__"),%%zmm3; vfmadd231pd "#a_r",%%zmm3,"#c_le"; vfmadd231pd "#a_i",%%zmm3,"#c_ri";" +#else //do conjugation on a_block + #define unit_kernel_k1m4n1(a_r,a_i,b_off,c_le,c_ri,...) \ + "vbroadcastf32x4 "#b_off"("#__VA_ARGS__"),%%zmm3; vfmadd231pd "#a_r",%%zmm3,"#c_le"; vfnmadd231pd "#a_i",%%zmm3,"#c_ri";" +#endif +#define KERNEL_h_k1m4n1 \ + "vmovddup (%0),%%zmm4; vmovddup 8(%0),%%zmm5; prefetcht0 512(%0); addq $64,%0;"\ + unit_kernel_k1m4n1(%%zmm4,%%zmm5,0,%%zmm8,%%zmm9,%1) +#define KERNEL_t_k1m4n1 KERNEL_h_k1m4n1 "addq $16,%1;" +#define KERNEL_h_k1m4n2 KERNEL_h_k1m4n1 unit_kernel_k1m4n1(%%zmm4,%%zmm5,16,%%zmm10,%%zmm11,%1) +#define KERNEL_t_k1m4n2 KERNEL_h_k1m4n2 "addq $32,%1;" +#define unit_kernel_k1m4n2(c1le,c1ri,c2le,c2ri,...) \ + unit_kernel_k1m4n1(%%zmm4,%%zmm5,0,c1le,c1ri,__VA_ARGS__)\ + unit_kernel_k1m4n1(%%zmm4,%%zmm5,16,c2le,c2ri,__VA_ARGS__) +#define KERNEL_h_k1m4n4 KERNEL_h_k1m4n2 unit_kernel_k1m4n2(%%zmm12,%%zmm13,%%zmm14,%%zmm15,%1,%%r12,1) +#define KERNEL_t_k1m4n4 KERNEL_h_k1m4n4 "addq $32,%1;" +#define KERNEL_t_k1m4n6 KERNEL_h_k1m4n4 unit_kernel_k1m4n2(%%zmm16,%%zmm17,%%zmm18,%%zmm19,%1,%%r12,2) "addq $32,%1;" +#define KERNEL_h_k1m4n8 KERNEL_t_k1m4n6 unit_kernel_k1m4n2(%%zmm20,%%zmm21,%%zmm22,%%zmm23,%%r15) +#define KERNEL_t_k1m4n8 KERNEL_h_k1m4n8 "addq $32,%%r15;" +#define KERNEL_h_k1m4n10 KERNEL_h_k1m4n8 unit_kernel_k1m4n2(%%zmm24,%%zmm25,%%zmm26,%%zmm27,%%r15,%%r12,1) +#define KERNEL_t_k1m4n10 KERNEL_h_k1m4n10 "addq $32,%%r15;" +#define KERNEL_h_k1m4n12 KERNEL_h_k1m4n10 unit_kernel_k1m4n2(%%zmm28,%%zmm29,%%zmm30,%%zmm31,%%r15,%%r12,2) +#define KERNEL_t_k1m4n12 KERNEL_h_k1m4n12 "addq $32,%%r15;" +#if ZGEMM_SKX_MODE == 0 || ZGEMM_SKX_MODE == 1 //not to do conjugation on b_block + #define unit_save_m4n1(c_le,c_ri,...) \ + "vpermilpd $85,"#c_ri","#c_ri"; vfmadd231pd "#c_ri",%%zmm2,"#c_le"; vpermilpd $85,"#c_le",%%zmm4;"\ + "vfmaddsub213pd ("#__VA_ARGS__"),%%zmm1,%%zmm4; vfmaddsub213pd %%zmm4,%%zmm0,"#c_le"; vmovupd "#c_le",("#__VA_ARGS__");" +#else //do conjugation on b_block + #define unit_save_m4n1(c_le,c_ri,...) \ + "vpermilpd $85,"#c_ri","#c_ri"; vfnmadd231pd "#c_ri",%%zmm2,"#c_le"; vpermilpd $85,"#c_le",%%zmm4;"\ + "vfmsubadd213pd ("#__VA_ARGS__"),%%zmm0,"#c_le"; vfmsubadd231pd %%zmm4,%%zmm1,"#c_le"; vmovupd "#c_le",("#__VA_ARGS__");" +#endif +#define SAVE_SETUP_m4 "movq %2,%3; addq $64,%2;" +#define SAVE_m4n1 SAVE_SETUP_m4 unit_save_m4n1(%%zmm8,%%zmm9,%3) +#define SAVE_m4n2 SAVE_m4n1 unit_save_m4n1(%%zmm10,%%zmm11,%3,%4,1) +#define unit_save_m4n2(c1le,c1ri,c2le,c2ri) \ + "leaq (%3,%4,2),%3;" unit_save_m4n1(c1le,c1ri,%3) unit_save_m4n1(c2le,c2ri,%3,%4,1) +#define SAVE_m4n4 SAVE_m4n2 unit_save_m4n2(%%zmm12,%%zmm13,%%zmm14,%%zmm15) +#define SAVE_m4n6 SAVE_m4n4 unit_save_m4n2(%%zmm16,%%zmm17,%%zmm18,%%zmm19) +#define SAVE_m4n8 SAVE_m4n6 unit_save_m4n2(%%zmm20,%%zmm21,%%zmm22,%%zmm23) +#define SAVE_m4n10 SAVE_m4n8 unit_save_m4n2(%%zmm24,%%zmm25,%%zmm26,%%zmm27) +#define SAVE_m4n12 SAVE_m4n10 unit_save_m4n2(%%zmm28,%%zmm29,%%zmm30,%%zmm31) +#define unit_init_m4n1(c_le,c_ri) "vpxorq "#c_le","#c_le","#c_le"; vpxorq "#c_ri","#c_ri","#c_ri";" +#define INIT_m4n1 unit_init_m4n1(%%zmm8,%%zmm9) +#define INIT_m4n2 INIT_m4n1 unit_init_m4n1(%%zmm10,%%zmm11) +#define INIT_m4n4 INIT_m4n2 unit_init_m4n1(%%zmm12,%%zmm13) unit_init_m4n1(%%zmm14,%%zmm15) +#define INIT_m4n6 INIT_m4n4 unit_init_m4n1(%%zmm16,%%zmm17) unit_init_m4n1(%%zmm18,%%zmm19) +#define INIT_m4n8 INIT_m4n6 unit_init_m4n1(%%zmm20,%%zmm21) unit_init_m4n1(%%zmm22,%%zmm23) +#define INIT_m4n10 INIT_m4n8 unit_init_m4n1(%%zmm24,%%zmm25) unit_init_m4n1(%%zmm26,%%zmm27) +#define INIT_m4n12 INIT_m4n10 unit_init_m4n1(%%zmm28,%%zmm29) unit_init_m4n1(%%zmm30,%%zmm31) +#define COMPUTE_m4(ndim) \ + INIT_m4n##ndim\ + COMPUTE_INIT "movq %2,%3;"\ + "cmpq $20,%6; jb "#ndim"88440f;"\ + #ndim"88449:\n\t"\ + KERNEL_t_k1m4n##ndim\ + KERNEL_t_k1m4n##ndim\ + KERNEL_t_k1m4n##ndim\ + "prefetcht1 (%3); prefetcht1 63(%3); addq %4,%3;"\ + KERNEL_t_k1m4n##ndim\ + KERNEL_t_k1m4n##ndim\ + KERNEL_t_k1m4n##ndim\ + "prefetcht1 (%8); addq $24,%8;"\ + "subq $6,%6; cmpq $20,%6; jnb "#ndim"88449b;"\ + "movq %2,%3;"\ + #ndim"88440:\n\t"\ + "testq %6,%6; jz "#ndim"88441f;"\ + "prefetcht0 (%3); prefetcht0 63(%3); addq %4,%3;"\ + KERNEL_t_k1m4n##ndim\ + "decq %6; jmp "#ndim"88440b;"\ + #ndim"88441:\n\t"\ + SAVE_m4n##ndim + +/* m=2, ymm0-ymm3 for temporary use, ymm4-ymm15 for accumulators */ +#if ZGEMM_SKX_MODE == 0 || ZGEMM_SKX_MODE == 3 //conjg_a == conjg_b; ap = permilpd($5,a0) + #define unit_kernel_k1m2n1(a0,ap,b_off_r,b_off_i,c1,...) \ + "vbroadcastsd "#b_off_i"("#__VA_ARGS__"),%%ymm2; vfmaddsub231pd "#ap",%%ymm2,"#c1";"\ + "vbroadcastsd "#b_off_r"("#__VA_ARGS__"),%%ymm2; vfmaddsub231pd "#a0",%%ymm2,"#c1";" +#else //conjg_a != conjg_b + #define unit_kernel_k1m2n1(a0,ap,b_off_r,b_off_i,c1,...) \ + "vbroadcastsd "#b_off_i"("#__VA_ARGS__"),%%ymm2; vfmsubadd231pd "#ap",%%ymm2,"#c1";"\ + "vbroadcastsd "#b_off_r"("#__VA_ARGS__"),%%ymm2; vfmsubadd231pd "#a0",%%ymm2,"#c1";" +#endif +#define KERNEL_h_k1m2n1 \ + "vmovupd (%0),%%ymm0; vpermilpd $5,%%ymm0,%%ymm1; addq $32,%0;"\ + unit_kernel_k1m2n1(%%ymm0,%%ymm1,0,8,%%ymm4,%1) +#define KERNEL_t_k1m2n1 KERNEL_h_k1m2n1 "addq $16,%1;" +#define KERNEL_h_k1m2n2 KERNEL_h_k1m2n1 unit_kernel_k1m2n1(%%ymm0,%%ymm1,16,24,%%ymm5,%1) +#define KERNEL_t_k1m2n2 KERNEL_h_k1m2n2 "addq $32,%1;" +#define unit_kernel_k1m2n2(c1,c2,...) \ + unit_kernel_k1m2n1(%%ymm0,%%ymm1,0,8,c1,__VA_ARGS__)\ + unit_kernel_k1m2n1(%%ymm0,%%ymm1,16,24,c2,__VA_ARGS__) +#define KERNEL_h_k1m2n4 KERNEL_h_k1m2n2 unit_kernel_k1m2n2(%%ymm6,%%ymm7,%1,%%r12,1) +#define KERNEL_t_k1m2n4 KERNEL_h_k1m2n4 "addq $32,%1;" +#define KERNEL_t_k1m2n6 KERNEL_h_k1m2n4 unit_kernel_k1m2n2(%%ymm8,%%ymm9,%1,%%r12,2) "addq $32,%1;" +#define KERNEL_h_k1m2n8 KERNEL_t_k1m2n6 unit_kernel_k1m2n2(%%ymm10,%%ymm11,%%r15) +#define KERNEL_t_k1m2n8 KERNEL_h_k1m2n8 "addq $32,%%r15;" +#define KERNEL_h_k1m2n10 KERNEL_h_k1m2n8 unit_kernel_k1m2n2(%%ymm12,%%ymm13,%%r15,%%r12,1) +#define KERNEL_t_k1m2n10 KERNEL_h_k1m2n10 "addq $32,%%r15;" +#define KERNEL_h_k1m2n12 KERNEL_h_k1m2n10 unit_kernel_k1m2n2(%%ymm14,%%ymm15,%%r15,%%r12,2) +#define KERNEL_t_k1m2n12 KERNEL_h_k1m2n12 "addq $32,%%r15;" +#if ZGEMM_SKX_MODE == 0 || ZGEMM_SKX_MODE == 2 //not to do conjugation on a_block + #define unit_save_m2n1(alp_r,alp_i,c1,...) \ + "vpermilpd $5,"#c1",%%ymm3; vfmaddsub213pd ("#__VA_ARGS__"),"#alp_i",%%ymm3;"\ + "vfmaddsub213pd %%ymm3,"#alp_r","#c1";vmovupd "#c1",("#__VA_ARGS__");" +#else //do conjugation on a_block + #define unit_save_m2n1(alp_r,alp_i,c1,...) \ + "vpermilpd $5,"#c1",%%ymm3; vfmsubadd213pd ("#__VA_ARGS__"),"#alp_r","#c1";"\ + "vfmsubadd231pd %%ymm3,"#alp_i","#c1";vmovupd "#c1",("#__VA_ARGS__");" +#endif +#define SAVE_SETUP_m2 "movq %2,%3; addq $32,%2; vbroadcastsd (%5),%%ymm0; vbroadcastsd 8(%5),%%ymm1;" +#define SAVE_m2n1 SAVE_SETUP_m2 unit_save_m2n1(%%ymm0,%%ymm1,%%ymm4,%3) +#define SAVE_m2n2 SAVE_m2n1 unit_save_m2n1(%%ymm0,%%ymm1,%%ymm5,%3,%4,1) +#define unit_save_m2n2(c1,c2) \ + "leaq (%3,%4,2),%3;" unit_save_m2n1(%%ymm0,%%ymm1,c1,%3) unit_save_m2n1(%%ymm0,%%ymm1,c2,%3,%4,1) +#define SAVE_m2n4 SAVE_m2n2 unit_save_m2n2(%%ymm6,%%ymm7) +#define SAVE_m2n6 SAVE_m2n4 unit_save_m2n2(%%ymm8,%%ymm9) +#define SAVE_m2n8 SAVE_m2n6 unit_save_m2n2(%%ymm10,%%ymm11) +#define SAVE_m2n10 SAVE_m2n8 unit_save_m2n2(%%ymm12,%%ymm13) +#define SAVE_m2n12 SAVE_m2n10 unit_save_m2n2(%%ymm14,%%ymm15) +#define INIT_m2n1 "vpxor %%ymm4,%%ymm4,%%ymm4;" +#define unit_init_m2n2(c1,c2) "vpxor "#c1","#c1","#c1"; vpxor "#c2","#c2","#c2";" +#define INIT_m2n2 unit_init_m2n2(%%ymm4,%%ymm5) +#define INIT_m2n4 INIT_m2n2 unit_init_m2n2(%%ymm6,%%ymm7) +#define INIT_m2n6 INIT_m2n4 unit_init_m2n2(%%ymm8,%%ymm9) +#define INIT_m2n8 INIT_m2n6 unit_init_m2n2(%%ymm10,%%ymm11) +#define INIT_m2n10 INIT_m2n8 unit_init_m2n2(%%ymm12,%%ymm13) +#define INIT_m2n12 INIT_m2n10 unit_init_m2n2(%%ymm14,%%ymm15) +#define COMPUTE_m2(ndim) \ + INIT_m2n##ndim\ + COMPUTE_INIT\ + #ndim"88220:\n\t"\ + "testq %6,%6; jz "#ndim"88221f;"\ + KERNEL_t_k1m2n##ndim\ + "decq %6; jmp "#ndim"88220b;"\ + #ndim"88221:\n\t"\ + SAVE_m2n##ndim + +/* m=1, ymm0-ymm3 and ymm10-ymm15 for temporary use, ymm4-ymm9 for accumulators */ +#if ZGEMM_SKX_MODE == 0 || ZGEMM_SKX_MODE == 3 //conjg_a == conjg_b; ap = permilpd($5,a0) + #define unit_kernel_k1m1n1(a0,ap,b_off_r,b_off_i,c1,...) \ + "vmovddup "#b_off_i"("#__VA_ARGS__"),%%xmm2; vfmaddsub231pd "#ap",%%xmm2,"#c1";"\ + "vmovddup "#b_off_r"("#__VA_ARGS__"),%%xmm2; vfmaddsub231pd "#a0",%%xmm2,"#c1";" + #define unit_kernel_k1m1n2(a0,ap,b_off_r,b_off_i,c1,...) \ + "vmovddup "#b_off_i"("#__VA_ARGS__"),%%ymm2; vfmaddsub231pd "#ap",%%ymm2,"#c1";"\ + "vmovddup "#b_off_r"("#__VA_ARGS__"),%%ymm2; vfmaddsub231pd "#a0",%%ymm2,"#c1";" +#else //conjg_a != conjg_b + #define unit_kernel_k1m1n1(a0,ap,b_off_r,b_off_i,c1,...) \ + "vmovddup "#b_off_i"("#__VA_ARGS__"),%%xmm2; vfmsubadd231pd "#ap",%%xmm2,"#c1";"\ + "vmovddup "#b_off_r"("#__VA_ARGS__"),%%xmm2; vfmsubadd231pd "#a0",%%xmm2,"#c1";" + #define unit_kernel_k1m1n2(a0,ap,b_off_r,b_off_i,c1,...) \ + "vmovddup "#b_off_i"("#__VA_ARGS__"),%%ymm2; vfmsubadd231pd "#ap",%%ymm2,"#c1";"\ + "vmovddup "#b_off_r"("#__VA_ARGS__"),%%ymm2; vfmsubadd231pd "#a0",%%ymm2,"#c1";" +#endif +#define KERNEL_h_k1m1n1 \ + "vmovupd (%0),%%xmm0; vpermilpd $5,%%xmm0,%%xmm1; addq $16,%0;"\ + unit_kernel_k1m1n1(%%xmm0,%%xmm1,0,8,%%xmm4,%1) +#define KERNEL_t_k1m1n1 KERNEL_h_k1m1n1 "addq $16,%1;" +#define KERNEL_h_k1m1n2 \ + "vbroadcastf128 (%0),%%ymm0; vpermilpd $5,%%ymm0,%%ymm1; addq $16,%0;"\ + unit_kernel_k1m1n2(%%ymm0,%%ymm1,0,8,%%ymm4,%1) +#define KERNEL_t_k1m1n2 KERNEL_h_k1m1n2 "addq $32,%1;" +#define KERNEL_h_k1m1n4 KERNEL_h_k1m1n2 unit_kernel_k1m1n2(%%ymm0,%%ymm1,0,8,%%ymm5,%1,%%r12,1) +#define KERNEL_t_k1m1n4 KERNEL_h_k1m1n4 "addq $32,%1;" +#define KERNEL_t_k1m1n6 KERNEL_h_k1m1n4 unit_kernel_k1m1n2(%%ymm0,%%ymm1,0,8,%%ymm6,%1,%%r12,2) "addq $32,%1;" +#define KERNEL_h_k1m1n8 KERNEL_t_k1m1n6 unit_kernel_k1m1n2(%%ymm0,%%ymm1,0,8,%%ymm7,%%r15) +#define KERNEL_t_k1m1n8 KERNEL_h_k1m1n8 "addq $32,%%r15;" +#define KERNEL_h_k1m1n10 KERNEL_h_k1m1n8 unit_kernel_k1m1n2(%%ymm0,%%ymm1,0,8,%%ymm8,%%r15,%%r12,1) +#define KERNEL_t_k1m1n10 KERNEL_h_k1m1n10 "addq $32,%%r15;" +#define KERNEL_h_k1m1n12 KERNEL_h_k1m1n10 unit_kernel_k1m1n2(%%ymm0,%%ymm1,0,8,%%ymm9,%%r15,%%r12,2) +#define KERNEL_t_k1m1n12 KERNEL_h_k1m1n12 "addq $32,%%r15;" +#if ZGEMM_SKX_MODE == 0 || ZGEMM_SKX_MODE == 2 //not to do conjugation on a_block + #define unit_save_m1n1(alp_r,alp_i,c1,...) \ + "vpermilpd $5,"#c1",%%xmm3; vfmaddsub213pd ("#__VA_ARGS__"),"#alp_i",%%xmm3;"\ + "vfmaddsub213pd %%xmm3,"#alp_r","#c1";vmovupd "#c1",("#__VA_ARGS__");" + #define unit_save_m1n2(alp_r,alp_i,c1) \ + "vpermilpd $5,"#c1",%%ymm3; vmovupd (%3),%%xmm2; vinsertf128 $1,(%3,%4,1),%%ymm2,%%ymm2;"\ + "vfmaddsub213pd %%ymm2,"#alp_i",%%ymm3; vfmaddsub231pd "#c1","#alp_r",%%ymm3;"\ + "vmovupd %%xmm3,(%3); vextractf128 $1,%%ymm3,(%3,%4,1); leaq (%3,%4,2),%3;" +#else //do conjugation on a_block + #define unit_save_m1n1(alp_r,alp_i,c1,...) \ + "vpermilpd $5,"#c1",%%xmm3; vfmsubadd213pd ("#__VA_ARGS__"),"#alp_r","#c1";"\ + "vfmsubadd231pd %%xmm3,"#alp_i","#c1";vmovupd "#c1",("#__VA_ARGS__");" + #define unit_save_m1n2(alp_r,alp_i,c1) \ + "vpermilpd $5,"#c1",%%ymm3; vmovupd (%3),%%xmm2; vinsertf128 $1,(%3,%4,1),%%ymm2,%%ymm2;"\ + "vfmsubadd213pd %%ymm2,"#alp_r","#c1"; vfmsubadd213pd "#c1","#alp_i",%%ymm3;"\ + "vmovupd %%xmm3,(%3); vextractf128 $1,%%ymm3,(%3,%4,1); leaq (%3,%4,2),%3;" +#endif +#define SAVE_SETUP_m1 "movq %2,%3; addq $16,%2; vbroadcastsd (%5),%%ymm0; vbroadcastsd 8(%5),%%ymm1;" +#define SAVE_m1n1 SAVE_SETUP_m1 unit_save_m1n1(%%xmm0,%%xmm1,%%xmm4,%3) +#define SAVE_m1n2 SAVE_SETUP_m1 unit_save_m1n2(%%ymm0,%%ymm1,%%ymm4) +#define SAVE_m1n4 SAVE_m1n2 unit_save_m1n2(%%ymm0,%%ymm1,%%ymm5) +#define SAVE_m1n6 SAVE_m1n4 unit_save_m1n2(%%ymm0,%%ymm1,%%ymm6) +#define SAVE_m1n8 SAVE_m1n6 unit_save_m1n2(%%ymm0,%%ymm1,%%ymm7) +#define SAVE_m1n10 SAVE_m1n8 unit_save_m1n2(%%ymm0,%%ymm1,%%ymm8) +#define SAVE_m1n12 SAVE_m1n10 unit_save_m1n2(%%ymm0,%%ymm1,%%ymm9) +#define INIT_m1n1 "vpxor %%xmm4,%%xmm4,%%xmm4;" +#define INIT_m1n2 INIT_m2n1 +#define INIT_m1n4 INIT_m1n2 "vpxor %%ymm5,%%ymm5,%%ymm5;" +#define INIT_m1n6 INIT_m1n4 "vpxor %%ymm6,%%ymm6,%%ymm6;" +#define INIT_m1n8 INIT_m1n6 "vpxor %%ymm7,%%ymm7,%%ymm7;" +#define INIT_m1n10 INIT_m1n8 "vpxor %%ymm8,%%ymm8,%%ymm8;" +#define INIT_m1n12 INIT_m1n10 "vpxor %%ymm9,%%ymm9,%%ymm9;" +#define COMPUTE_m1(ndim) \ + INIT_m1n##ndim\ + COMPUTE_INIT\ + #ndim"88110:\n\t"\ + "testq %6,%6; jz "#ndim"88111f;"\ + KERNEL_t_k1m1n##ndim\ + "decq %6; jmp "#ndim"88110b;"\ + #ndim"88111:\n\t"\ + SAVE_m1n##ndim + +#define COMPUTE(ndim) {\ + b_pref = b_pointer + ndim * K * 2;\ + __asm__ __volatile__(\ + GENERAL_INIT\ + CONSTZMM_INIT\ + "cmpq $4,%7;jb 33101"#ndim"f;"\ + "33109"#ndim":\n\t"\ + COMPUTE_m4(ndim)\ + "subq $4,%7;cmpq $4,%7;jnb 33109"#ndim"b;"\ + "33101"#ndim":\n\t"\ + "cmpq $2,%7;jb 33102"#ndim"f;"\ + COMPUTE_m2(ndim)\ + "subq $2,%7;"\ + "33102"#ndim":\n\t"\ + "testq %7,%7;jz 33103"#ndim"f;"\ + COMPUTE_m1(ndim)\ + "33103"#ndim":\n\t"\ + GENERAL_RECOVER\ + :"+r"(a_pointer),"+r"(b_pointer),"+r"(c_pointer),"+r"(c_store),"+r"(ldc_in_bytes),"+r"(constval),"+r"(K),"+r"(M),"+r"(b_pref)\ + ::"r11","r12","r13","r14","r15","zmm0","zmm1","zmm2","zmm3","zmm4","zmm5","zmm6","zmm7","zmm8","zmm9","zmm10","zmm11","zmm12","zmm13","zmm14",\ + "zmm15","zmm16","zmm17","zmm18","zmm19","zmm20","zmm21","zmm22","zmm23","zmm24","zmm25","zmm26","zmm27","zmm28","zmm29","zmm30","zmm31",\ + "cc","memory");\ + a_pointer -= M * K * 2; b_pointer += ndim * K * 2; c_pointer += (LDC * ndim - M) * 2;\ +} + +int __attribute__ ((noinline)) +CNAME(BLASLONG m, BLASLONG n, BLASLONG k, double alphar, double alphai, double * __restrict__ A, double * __restrict__ B, double * __restrict__ C, BLASLONG LDC) +{ + if(m==0||n==0||k==0) return 0; + int64_t ldc_in_bytes = (int64_t)LDC * sizeof(double) * 2; double const_val[4] = {alphar, alphai, -1, 1}; + int64_t M = (int64_t)m, K = (int64_t)k; + BLASLONG n_count = n; + double *a_pointer = A,*b_pointer = B,*c_pointer = C,*c_store = C,*constval = const_val,*b_pref = B; + for(;n_count>11;n_count-=12) COMPUTE(12) + for(;n_count>9;n_count-=10) COMPUTE(10) + for(;n_count>7;n_count-=8) COMPUTE(8) + for(;n_count>5;n_count-=6) COMPUTE(6) + for(;n_count>3;n_count-=4) COMPUTE(4) + for(;n_count>1;n_count-=2) COMPUTE(2) + if(n_count>0) COMPUTE(1) + return 0; +} diff --git a/kernel/x86_64/zgemv_n_4.c b/kernel/x86_64/zgemv_n_4.c index 0fedc496b..1f9d41859 100644 --- a/kernel/x86_64/zgemv_n_4.c +++ b/kernel/x86_64/zgemv_n_4.c @@ -30,7 +30,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" -#if defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) +#if defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) || defined (COOPERLAKE) #include "zgemv_n_microk_haswell-4.c" #elif defined(SANDYBRIDGE) #include "zgemv_n_microk_sandy-4.c" diff --git a/kernel/x86_64/zgemv_t_4.c b/kernel/x86_64/zgemv_t_4.c index 2ab7a671b..34f28b224 100644 --- a/kernel/x86_64/zgemv_t_4.c +++ b/kernel/x86_64/zgemv_t_4.c @@ -31,7 +31,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #if defined(BULLDOZER) || defined(PILEDRIVER) || defined(STEAMROLLER) || defined(EXCAVATOR) #include "zgemv_t_microk_bulldozer-4.c" -#elif defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) +#elif defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) || defined (COOPERLAKE) #include "zgemv_t_microk_haswell-4.c" #endif @@ -235,9 +235,9 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha_r, FLOAT alpha_i, if ( m < 1 ) return(0); if ( n < 1 ) return(0); - inc_x <<= 1; - inc_y <<= 1; - lda <<= 1; + inc_x *= 2; + inc_y *= 2; + lda <<= 1; lda4 = lda << 2; xbuffer = buffer; diff --git a/kernel/x86_64/znrm2.S b/kernel/x86_64/znrm2.S index 4115eab1d..0d2aa3480 100644 --- a/kernel/x86_64/znrm2.S +++ b/kernel/x86_64/znrm2.S @@ -50,6 +50,10 @@ PROLOGUE PROFCODE +#ifdef WINDOWS_ABI + emms +#endif + fldz testq M, M jle .L999 diff --git a/kernel/x86_64/zscal.S b/kernel/x86_64/zscal.S index 5282e0f72..5ed4c4576 100644 --- a/kernel/x86_64/zscal.S +++ b/kernel/x86_64/zscal.S @@ -50,6 +50,10 @@ PROLOGUE PROFCODE +#ifdef WINDOWS_ABI + emms +#endif + salq $ZBASE_SHIFT, INCX FLD 8(%rsp) diff --git a/kernel/x86_64/zscal.c b/kernel/x86_64/zscal.c index 2a6d0e4c7..09a702a81 100644 --- a/kernel/x86_64/zscal.c +++ b/kernel/x86_64/zscal.c @@ -28,7 +28,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" -#if defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) +#if defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) || defined (COOPERLAKE) #include "zscal_microk_haswell-2.c" #elif defined(BULLDOZER) || defined(PILEDRIVER) #include "zscal_microk_bulldozer-2.c" diff --git a/kernel/x86_64/zsum.S b/kernel/x86_64/zsum.S index 45e0ddff5..aa02637e4 100644 --- a/kernel/x86_64/zsum.S +++ b/kernel/x86_64/zsum.S @@ -50,6 +50,10 @@ PROLOGUE PROFCODE +#ifdef WINDOWS_ABI + emms +#endif + fldz testq M, M jle .L999 diff --git a/kernel/x86_64/zsymv_L_sse.S b/kernel/x86_64/zsymv_L_sse.S index e44bd7550..83ed41ba1 100644 --- a/kernel/x86_64/zsymv_L_sse.S +++ b/kernel/x86_64/zsymv_L_sse.S @@ -57,7 +57,7 @@ #define PREFETCHSIZE (16 * 24) #endif -#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) +#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) || defined (COOPERLAKE) #define PREFETCH prefetcht0 #define PREFETCHW prefetcht0 #define PREFETCHSIZE (16 * 24) diff --git a/kernel/x86_64/zsymv_L_sse2.S b/kernel/x86_64/zsymv_L_sse2.S index e9f330c36..7ed2faf0f 100644 --- a/kernel/x86_64/zsymv_L_sse2.S +++ b/kernel/x86_64/zsymv_L_sse2.S @@ -57,7 +57,7 @@ #define PREFETCHSIZE (16 * 24) #endif -#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) +#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) || defined (COOPERLAKE) #define PREFETCH prefetcht0 #define PREFETCHW prefetcht0 #define PREFETCHSIZE (16 * 24) diff --git a/kernel/x86_64/zsymv_U_sse.S b/kernel/x86_64/zsymv_U_sse.S index 9f0dead18..5945f3f81 100644 --- a/kernel/x86_64/zsymv_U_sse.S +++ b/kernel/x86_64/zsymv_U_sse.S @@ -57,7 +57,7 @@ #define PREFETCHSIZE (16 * 24) #endif -#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) +#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) || defined (COOPERLAKE) #define PREFETCH prefetcht0 #define PREFETCHW prefetcht0 #define PREFETCHSIZE (16 * 24) diff --git a/kernel/x86_64/zsymv_U_sse2.S b/kernel/x86_64/zsymv_U_sse2.S index b6106a37d..484d74f14 100644 --- a/kernel/x86_64/zsymv_U_sse2.S +++ b/kernel/x86_64/zsymv_U_sse2.S @@ -57,7 +57,7 @@ #define PREFETCHSIZE (16 * 24) #endif -#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) +#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) || defined (COOPERLAKE) #define PREFETCH prefetcht0 #define PREFETCHW prefetcht0 #define PREFETCHSIZE (16 * 24) diff --git a/kernel/zarch/KERNEL.Z13 b/kernel/zarch/KERNEL.Z13 index b1ffd3c54..3bcc32197 100644 --- a/kernel/zarch/KERNEL.Z13 +++ b/kernel/zarch/KERNEL.Z13 @@ -96,10 +96,10 @@ SGEMMINCOPY = ../generic/gemm_ncopy_8.c SGEMMITCOPY = ../generic/gemm_tcopy_8.c SGEMMONCOPY = ../generic/gemm_ncopy_4.c SGEMMOTCOPY = ../generic/gemm_tcopy_4.c -SGEMMINCOPYOBJ = sgemm_incopy.o -SGEMMITCOPYOBJ = sgemm_itcopy.o -SGEMMONCOPYOBJ = sgemm_oncopy.o -SGEMMOTCOPYOBJ = sgemm_otcopy.o +SGEMMINCOPYOBJ = sgemm_incopy$(TSUFFIX).$(SUFFIX) +SGEMMITCOPYOBJ = sgemm_itcopy$(TSUFFIX).$(SUFFIX) +SGEMMONCOPYOBJ = sgemm_oncopy$(TSUFFIX).$(SUFFIX) +SGEMMOTCOPYOBJ = sgemm_otcopy$(TSUFFIX).$(SUFFIX) @@ -108,16 +108,16 @@ DGEMMINCOPY = ../generic/gemm_ncopy_8.c DGEMMITCOPY = ../generic/gemm_tcopy_8.c DGEMMONCOPY = ../generic/gemm_ncopy_4.c DGEMMOTCOPY = ../generic/gemm_tcopy_4.c -DGEMMINCOPYOBJ = dgemm_incopy.o -DGEMMITCOPYOBJ = dgemm_itcopy.o -DGEMMONCOPYOBJ = dgemm_oncopy.o -DGEMMOTCOPYOBJ = dgemm_otcopy.o +DGEMMINCOPYOBJ = dgemm_incopy$(TSUFFIX).$(SUFFIX) +DGEMMITCOPYOBJ = dgemm_itcopy$(TSUFFIX).$(SUFFIX) +DGEMMONCOPYOBJ = dgemm_oncopy$(TSUFFIX).$(SUFFIX) +DGEMMOTCOPYOBJ = dgemm_otcopy$(TSUFFIX).$(SUFFIX) CGEMMKERNEL = ctrmm4x4V.S CGEMMONCOPY = ../generic/zgemm_ncopy_4.c CGEMMOTCOPY = ../generic/zgemm_tcopy_4.c -CGEMMONCOPYOBJ = cgemm_oncopy.o -CGEMMOTCOPYOBJ = cgemm_otcopy.o +CGEMMONCOPYOBJ = cgemm_oncopy$(TSUFFIX).$(SUFFIX) +CGEMMOTCOPYOBJ = cgemm_otcopy$(TSUFFIX).$(SUFFIX) ZGEMMKERNEL = ztrmm4x4V.S ZGEMMONCOPY = ../generic/zgemm_ncopy_4.c diff --git a/kernel/zarch/KERNEL.Z14 b/kernel/zarch/KERNEL.Z14 index 971896c2d..3510938a7 100644 --- a/kernel/zarch/KERNEL.Z14 +++ b/kernel/zarch/KERNEL.Z14 @@ -86,38 +86,38 @@ DGEMVTKERNEL = dgemv_t_4.c CGEMVTKERNEL = cgemv_t_4.c ZGEMVTKERNEL = zgemv_t_4.c -STRMMKERNEL = strmm8x4V.S -DTRMMKERNEL = trmm8x4V.S +STRMMKERNEL = gemm_vec.c +DTRMMKERNEL = gemm_vec.c CTRMMKERNEL = ctrmm4x4V.S ZTRMMKERNEL = ztrmm4x4V.S -SGEMMKERNEL = strmm8x4V.S -SGEMMINCOPY = ../generic/gemm_ncopy_8.c -SGEMMITCOPY = ../generic/gemm_tcopy_8.c -SGEMMONCOPY = ../generic/gemm_ncopy_4.c -SGEMMOTCOPY = ../generic/gemm_tcopy_4.c -SGEMMINCOPYOBJ = sgemm_incopy.o -SGEMMITCOPYOBJ = sgemm_itcopy.o -SGEMMONCOPYOBJ = sgemm_oncopy.o -SGEMMOTCOPYOBJ = sgemm_otcopy.o - - - -DGEMMKERNEL = gemm8x4V.S +SGEMMKERNEL = gemm_vec.c +ifneq ($(SGEMM_UNROLL_M),$(SGEMM_UNROLL_N)) +SGEMMINCOPY = ../generic/gemm_ncopy_$(SGEMM_UNROLL_M).c +SGEMMITCOPY = ../generic/gemm_tcopy_$(SGEMM_UNROLL_M).c +SGEMMINCOPYOBJ = sgemm_incopy$(TSUFFIX).$(SUFFIX) +SGEMMITCOPYOBJ = sgemm_itcopy$(TSUFFIX).$(SUFFIX) +endif +SGEMMONCOPY = ../generic/gemm_ncopy_$(SGEMM_UNROLL_N).c +SGEMMOTCOPY = ../generic/gemm_tcopy_$(SGEMM_UNROLL_N).c +SGEMMONCOPYOBJ = sgemm_oncopy$(TSUFFIX).$(SUFFIX) +SGEMMOTCOPYOBJ = sgemm_otcopy$(TSUFFIX).$(SUFFIX) + +DGEMMKERNEL = gemm_vec.c DGEMMINCOPY = ../generic/gemm_ncopy_8.c DGEMMITCOPY = ../generic/gemm_tcopy_8.c DGEMMONCOPY = ../generic/gemm_ncopy_4.c DGEMMOTCOPY = ../generic/gemm_tcopy_4.c -DGEMMINCOPYOBJ = dgemm_incopy.o -DGEMMITCOPYOBJ = dgemm_itcopy.o -DGEMMONCOPYOBJ = dgemm_oncopy.o -DGEMMOTCOPYOBJ = dgemm_otcopy.o +DGEMMINCOPYOBJ = dgemm_incopy$(TSUFFIX).$(SUFFIX) +DGEMMITCOPYOBJ = dgemm_itcopy$(TSUFFIX).$(SUFFIX) +DGEMMONCOPYOBJ = dgemm_oncopy$(TSUFFIX).$(SUFFIX) +DGEMMOTCOPYOBJ = dgemm_otcopy$(TSUFFIX).$(SUFFIX) CGEMMKERNEL = ctrmm4x4V.S CGEMMONCOPY = ../generic/zgemm_ncopy_4.c CGEMMOTCOPY = ../generic/zgemm_tcopy_4.c -CGEMMONCOPYOBJ = cgemm_oncopy.o -CGEMMOTCOPYOBJ = cgemm_otcopy.o +CGEMMONCOPYOBJ = cgemm_oncopy$(TSUFFIX).$(SUFFIX) +CGEMMOTCOPYOBJ = cgemm_otcopy$(TSUFFIX).$(SUFFIX) ZGEMMKERNEL = ztrmm4x4V.S ZGEMMONCOPY = ../generic/zgemm_ncopy_4.c @@ -145,7 +145,3 @@ ZTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c ZTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c ZTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c - - - - diff --git a/kernel/zarch/KERNEL.ZARCH_GENERIC b/kernel/zarch/KERNEL.ZARCH_GENERIC index 3bbeb9155..33850d0f7 100644 --- a/kernel/zarch/KERNEL.ZARCH_GENERIC +++ b/kernel/zarch/KERNEL.ZARCH_GENERIC @@ -94,26 +94,26 @@ ZTRMMKERNEL = ../generic/ztrmmkernel_2x2.c SGEMMKERNEL = ../generic/gemmkernel_2x2.c SGEMMONCOPY = ../generic/gemm_ncopy_2.c SGEMMOTCOPY = ../generic/gemm_tcopy_2.c -SGEMMONCOPYOBJ = sgemm_oncopy.o -SGEMMOTCOPYOBJ = sgemm_otcopy.o +SGEMMONCOPYOBJ = sgemm_oncopy$(TSUFFIX).$(SUFFIX) +SGEMMOTCOPYOBJ = sgemm_otcopy$(TSUFFIX).$(SUFFIX) DGEMMKERNEL = ../generic/gemmkernel_2x2.c DGEMMONCOPY = ../generic/gemm_ncopy_2.c DGEMMOTCOPY = ../generic/gemm_tcopy_2.c -DGEMMONCOPYOBJ = dgemm_oncopy.o -DGEMMOTCOPYOBJ = dgemm_otcopy.o +DGEMMONCOPYOBJ = dgemm_oncopy$(TSUFFIX).$(SUFFIX) +DGEMMOTCOPYOBJ = dgemm_otcopy$(TSUFFIX).$(SUFFIX) CGEMMKERNEL = ../generic/zgemmkernel_2x2.c CGEMMONCOPY = ../generic/zgemm_ncopy_2.c CGEMMOTCOPY = ../generic/zgemm_tcopy_2.c -CGEMMONCOPYOBJ = cgemm_oncopy.o -CGEMMOTCOPYOBJ = cgemm_otcopy.o +CGEMMONCOPYOBJ = cgemm_oncopy$(TSUFFIX).$(SUFFIX) +CGEMMOTCOPYOBJ = cgemm_otcopy$(TSUFFIX).$(SUFFIX) ZGEMMKERNEL = ../generic/zgemmkernel_2x2.c ZGEMMONCOPY = ../generic/zgemm_ncopy_2.c ZGEMMOTCOPY = ../generic/zgemm_tcopy_2.c -ZGEMMONCOPYOBJ = zgemm_oncopy.o -ZGEMMOTCOPYOBJ = zgemm_otcopy.o +ZGEMMONCOPYOBJ = zgemm_oncopy$(TSUFFIX).$(SUFFIX) +ZGEMMOTCOPYOBJ = zgemm_otcopy$(TSUFFIX).$(SUFFIX) STRSMKERNEL_LN = ../generic/trsm_kernel_LN.c STRSMKERNEL_LT = ../generic/trsm_kernel_LT.c diff --git a/kernel/zarch/camax.c b/kernel/zarch/camax.c index b10ca4752..018a9a9c0 100644 --- a/kernel/zarch/camax.c +++ b/kernel/zarch/camax.c @@ -136,7 +136,7 @@ static FLOAT camax_kernel_32(BLASLONG n, FLOAT *x) { "wfmaxsb %%v0,%%v0,%%v16,0\n\t" "ler %[amax],%%f0" : [amax] "=f"(amax),[n] "+&r"(n) - : "m"(*(const struct { FLOAT x[n * 2]; } *) x),[x] "a"(x) + : "m"(*(const FLOAT (*)[n * 2]) x),[x] "a"(x) : "cc", "r1", "v0", "v1", "v2", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); diff --git a/kernel/zarch/camin.c b/kernel/zarch/camin.c index 40945fae8..7b3b36630 100644 --- a/kernel/zarch/camin.c +++ b/kernel/zarch/camin.c @@ -136,7 +136,7 @@ static FLOAT camin_kernel_32(BLASLONG n, FLOAT *x) { "wfminsb %%v0,%%v0,%%v16,0\n\t" "ler %[amin],%%f0" : [amin] "=f"(amin),[n] "+&r"(n) - : "m"(*(const struct { FLOAT x[n * 2]; } *) x),[x] "a"(x) + : "m"(*(const FLOAT (*)[n * 2]) x),[x] "a"(x) : "cc", "r1", "v0", "v1", "v2", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); diff --git a/kernel/zarch/casum.c b/kernel/zarch/casum.c index e28f2018c..f3b9ed628 100644 --- a/kernel/zarch/casum.c +++ b/kernel/zarch/casum.c @@ -108,7 +108,7 @@ static FLOAT casum_kernel_32(BLASLONG n, FLOAT *x) { "vfasb %%v24,%%v24,%%v25\n\t" "vstef %%v24,%[asum],0" : [asum] "=Q"(asum),[n] "+&r"(n) - : "m"(*(const struct { FLOAT x[n * 2]; } *) x),[x] "a"(x) + : "m"(*(const FLOAT (*)[n * 2]) x),[x] "a"(x) : "cc", "r1", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); diff --git a/kernel/zarch/caxpy.c b/kernel/zarch/caxpy.c index 14a124ae2..c0a7a71f4 100644 --- a/kernel/zarch/caxpy.c +++ b/kernel/zarch/caxpy.c @@ -99,9 +99,9 @@ static void caxpy_kernel_16(BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) { "vst %%v19,112(%%r1,%[y])\n\t" "agfi %%r1,128\n\t" "brctg %[n],0b" - : "+m"(*(struct { FLOAT x[n * 2]; } *) y),[n] "+&r"(n) - : [y] "a"(y), "m"(*(const struct { FLOAT x[n * 2]; } *) x),[x] "a"(x), - "m"(*(const struct { FLOAT x[2]; } *) alpha),[alpha] "a"(alpha) + : "+m"(*(FLOAT (*)[n * 2]) y),[n] "+&r"(n) + : [y] "a"(y), "m"(*(const FLOAT (*)[n * 2]) x),[x] "a"(x), + "m"(*(const FLOAT (*)[2]) alpha),[alpha] "a"(alpha) : "cc", "r1", "v0", "v1", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); diff --git a/kernel/zarch/ccopy.c b/kernel/zarch/ccopy.c index 0a5e03992..9e08edc3b 100644 --- a/kernel/zarch/ccopy.c +++ b/kernel/zarch/ccopy.c @@ -36,9 +36,9 @@ static void ccopy_kernel_32(BLASLONG n, FLOAT *x, FLOAT *y) { "la %[x],256(%[x])\n\t" "la %[y],256(%[y])\n\t" "brctg %[n],0b" - : "=m"(*(struct { FLOAT x[n * 2]; } *) y),[x] "+&a"(x),[y] "+&a"(y), + : "=m"(*(FLOAT (*)[n * 2]) y),[x] "+&a"(x),[y] "+&a"(y), [n] "+&r"(n) - : "m"(*(const struct { FLOAT x[n * 2]; } *) x) + : "m"(*(const FLOAT (*)[n * 2]) x) : "cc"); } diff --git a/kernel/zarch/cdot.c b/kernel/zarch/cdot.c index d90f9c871..0d6dfbeb1 100644 --- a/kernel/zarch/cdot.c +++ b/kernel/zarch/cdot.c @@ -97,9 +97,9 @@ static void cdot_kernel_16(BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *d) { "vstef %%v24,4(%[d]),1\n\t" "vstef %%v25,8(%[d]),1\n\t" "vstef %%v25,12(%[d]),0" - : "=m"(*(struct { FLOAT x[4]; } *) d),[n] "+&r"(n) - : [d] "a"(d), "m"(*(const struct { FLOAT x[n * 2]; } *) x),[x] "a"(x), - "m"(*(const struct { FLOAT x[n * 2]; } *) y),[y] "a"(y) + : "=m"(*(FLOAT (*)[4]) d),[n] "+&r"(n) + : [d] "a"(d), "m"(*(const FLOAT (*)[n * 2]) x),[x] "a"(x), + "m"(*(const FLOAT (*)[n * 2]) y),[y] "a"(y) : "cc", "r1", "v0", "v1", "v2", "v3", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); diff --git a/kernel/zarch/cgemv_n_4.c b/kernel/zarch/cgemv_n_4.c index 5c36bc338..5fdf7717e 100644 --- a/kernel/zarch/cgemv_n_4.c +++ b/kernel/zarch/cgemv_n_4.c @@ -146,12 +146,12 @@ static void cgemv_kernel_4x4(BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y) { "vst %%v0,0(%%r1,%[y])\n\t" "agfi %%r1,16\n\t" "brctg %[n],0b\n\t" - : "+m"(*(struct { FLOAT x[n * 2]; } *) y),[n] "+&r"(n) - : [y] "a"(y), "m"(*(const struct { FLOAT x[n * 2]; } *) ap0),[ap0] "a"(ap0), - "m"(*(const struct { FLOAT x[n * 2]; } *) ap1),[ap1] "a"(ap1), - "m"(*(const struct { FLOAT x[n * 2]; } *) ap2),[ap2] "a"(ap2), - "m"(*(const struct { FLOAT x[n * 2]; } *) ap3),[ap3] "a"(ap3), - "m"(*(const struct { FLOAT x[8]; } *) x),[x] "a"(x) + : "+m"(*(FLOAT (*)[n * 2]) y),[n] "+&r"(n) + : [y] "a"(y), "m"(*(const FLOAT (*)[n * 2]) ap0),[ap0] "a"(ap0), + "m"(*(const FLOAT (*)[n * 2]) ap1),[ap1] "a"(ap1), + "m"(*(const FLOAT (*)[n * 2]) ap2),[ap2] "a"(ap2), + "m"(*(const FLOAT (*)[n * 2]) ap3),[ap3] "a"(ap3), + "m"(*(const FLOAT (*)[8]) x),[x] "a"(x) : "cc", "r1", "v0", "v1", "v2", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); @@ -238,10 +238,10 @@ static void cgemv_kernel_4x2(BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y) { "vst %%v0,0(%%r1,%[y])\n\t" "agfi %%r1,16\n\t" "brctg %[n],0b\n\t" - : "+m"(*(struct { FLOAT x[n * 2]; } *) y),[n] "+&r"(n) - : [y] "a"(y), "m"(*(const struct { FLOAT x[n * 2]; } *) ap0),[ap0] "a"(ap0), - "m"(*(const struct { FLOAT x[n * 2]; } *) ap1),[ap1] "a"(ap1), - "m"(*(const struct { FLOAT x[4]; } *) x),[x] "a"(x) + : "+m"(*(FLOAT (*)[n * 2]) y),[n] "+&r"(n) + : [y] "a"(y), "m"(*(const FLOAT (*)[n * 2]) ap0),[ap0] "a"(ap0), + "m"(*(const FLOAT (*)[n * 2]) ap1),[ap1] "a"(ap1), + "m"(*(const FLOAT (*)[4]) x),[x] "a"(x) : "cc", "r1", "v0", "v1", "v2", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); } @@ -307,9 +307,9 @@ static void cgemv_kernel_4x1(BLASLONG n, FLOAT *ap, FLOAT *x, FLOAT *y) { "vst %%v0,0(%%r1,%[y])\n\t" "agfi %%r1,16\n\t" "brctg %[n],0b\n\t" - : "+m"(*(struct { FLOAT x[n * 2]; } *) y),[n] "+&r"(n) - : [y] "a"(y), "m"(*(const struct { FLOAT x[n * 2]; } *) ap),[ap] "a"(ap), - "m"(*(const struct { FLOAT x[2]; } *) x),[x] "a"(x) + : "+m"(*(FLOAT (*)[n * 2]) y),[n] "+&r"(n) + : [y] "a"(y), "m"(*(const FLOAT (*)[n * 2]) ap),[ap] "a"(ap), + "m"(*(const FLOAT (*)[2]) x),[x] "a"(x) : "cc", "r1", "v0", "v1", "v2", "v16", "v17", "v18", "v19"); } @@ -350,8 +350,8 @@ static void add_y_4(BLASLONG n, FLOAT *src, FLOAT *dest, FLOAT alpha_r, "vst %%v23,16(%%r1,%[dest])\n\t" "agfi %%r1,32\n\t" "brctg %[n],0b" - : "+m"(*(struct { FLOAT x[n * 2]; } *) dest),[n] "+&r"(n) - : [dest] "a"(dest), "m"(*(const struct { FLOAT x[n * 2]; } *) src), + : "+m"(*(FLOAT (*)[n * 2]) dest),[n] "+&r"(n) + : [dest] "a"(dest), "m"(*(const FLOAT (*)[n * 2]) src), [src] "a"(src),[alpha_r] "Q"(alpha_r),[alpha_i] "Q"(alpha_i) : "cc", "r1", "v0", "v1", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); diff --git a/kernel/zarch/cgemv_t_4.c b/kernel/zarch/cgemv_t_4.c index e10edfab0..2bdac9ea1 100644 --- a/kernel/zarch/cgemv_t_4.c +++ b/kernel/zarch/cgemv_t_4.c @@ -159,13 +159,13 @@ static void cgemv_kernel_4x4(BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y, "vfmasb %%v23,%%v19,%%v21,%%v23\n\t" "vst %%v22,0(%[y])\n\t" "vst %%v23,16(%[y])" - : "+m"(*(struct { FLOAT x[8]; } *) y),[n] "+&r"(n) - : [y] "a"(y), "m"(*(const struct { FLOAT x[n * 2]; } *) ap0),[ap0] "a"(ap0), - "m"(*(const struct { FLOAT x[n * 2]; } *) ap1),[ap1] "a"(ap1), - "m"(*(const struct { FLOAT x[n * 2]; } *) ap2),[ap2] "a"(ap2), - "m"(*(const struct { FLOAT x[n * 2]; } *) ap3),[ap3] "a"(ap3), - "m"(*(const struct { FLOAT x[n * 2]; } *) x),[x] "a"(x), - "m"(*(const struct { FLOAT x[2]; } *) alpha),[alpha] "a"(alpha) + : "+m"(*(FLOAT (*)[8]) y),[n] "+&r"(n) + : [y] "a"(y), "m"(*(const FLOAT (*)[n * 2]) ap0),[ap0] "a"(ap0), + "m"(*(const FLOAT (*)[n * 2]) ap1),[ap1] "a"(ap1), + "m"(*(const FLOAT (*)[n * 2]) ap2),[ap2] "a"(ap2), + "m"(*(const FLOAT (*)[n * 2]) ap3),[ap3] "a"(ap3), + "m"(*(const FLOAT (*)[n * 2]) x),[x] "a"(x), + "m"(*(const FLOAT (*)[2]) alpha),[alpha] "a"(alpha) : "cc", "r1", "v0", "v1", "v2", "v3", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); @@ -271,11 +271,11 @@ static void cgemv_kernel_4x2(BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y, "vfmasb %%v20,%%v16,%%v18,%%v20\n\t" "vfmasb %%v20,%%v17,%%v19,%%v20\n\t" "vst %%v20,0(%[y])" - : "+m"(*(struct { FLOAT x[4]; } *) y),[n] "+&r"(n) - : [y] "a"(y), "m"(*(const struct { FLOAT x[n * 2]; } *) ap0),[ap0] "a"(ap0), - "m"(*(const struct { FLOAT x[n * 2]; } *) ap1),[ap1] "a"(ap1), - "m"(*(const struct { FLOAT x[n * 2]; } *) x),[x] "a"(x), - "m"(*(const struct { FLOAT x[2]; } *) alpha),[alpha] "a"(alpha) + : "+m"(*(FLOAT (*)[4]) y),[n] "+&r"(n) + : [y] "a"(y), "m"(*(const FLOAT (*)[n * 2]) ap0),[ap0] "a"(ap0), + "m"(*(const FLOAT (*)[n * 2]) ap1),[ap1] "a"(ap1), + "m"(*(const FLOAT (*)[n * 2]) x),[x] "a"(x), + "m"(*(const FLOAT (*)[2]) alpha),[alpha] "a"(alpha) : "cc", "r1", "v0", "v1", "v2", "v3", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); } @@ -361,10 +361,10 @@ static void cgemv_kernel_4x1(BLASLONG n, FLOAT *ap, FLOAT *x, FLOAT *y, "vfmasb %%v0,%%v16,%%v18,%%v0\n\t" "vfmasb %%v0,%%v17,%%v19,%%v0\n\t" "vsteg %%v0,0(%[y]),0" - : "+m"(*(struct { FLOAT x[2]; } *) y),[n] "+&r"(n) - : [y] "a"(y), "m"(*(const struct { FLOAT x[n * 2]; } *) ap),[ap] "a"(ap), - "m"(*(const struct { FLOAT x[n * 2]; } *) x),[x] "a"(x), - "m"(*(const struct { FLOAT x[2]; } *) alpha),[alpha] "a"(alpha) + : "+m"(*(FLOAT (*)[2]) y),[n] "+&r"(n) + : [y] "a"(y), "m"(*(const FLOAT (*)[n * 2]) ap),[ap] "a"(ap), + "m"(*(const FLOAT (*)[n * 2]) x),[x] "a"(x), + "m"(*(const FLOAT (*)[2]) alpha),[alpha] "a"(alpha) : "cc", "r1", "v0", "v1", "v2", "v3", "v16", "v17", "v18", "v19"); } diff --git a/kernel/zarch/crot.c b/kernel/zarch/crot.c index aab155f8b..5a0990f3d 100644 --- a/kernel/zarch/crot.c +++ b/kernel/zarch/crot.c @@ -169,8 +169,8 @@ static void crot_kernel_32(BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *c, FLOAT *s) { "vst %%v23, 240(%%r1,%[y])\n\t" "agfi %%r1,256\n\t" "brctg %[n],0b" - : "+m"(*(struct { FLOAT x[n * 2]; } *) x), - "+m"(*(struct { FLOAT x[n * 2]; } *) y),[n] "+&r"(n) + : "+m"(*(FLOAT (*)[n * 2]) x), + "+m"(*(FLOAT (*)[n * 2]) y),[n] "+&r"(n) : [x] "a"(x),[y] "a"(y),[c] "Q"(*c),[s] "Q"(*s) : "cc", "r1", "v0", "v1", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", diff --git a/kernel/zarch/cscal.c b/kernel/zarch/cscal.c index 9fc54cf29..57bb89c0a 100644 --- a/kernel/zarch/cscal.c +++ b/kernel/zarch/cscal.c @@ -25,67 +25,35 @@ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ +/* + * Avoid contraction of floating point operations, specifically fused + * multiply-add, because they can cause unexpected results in complex + * multiplication. + */ +#if defined(__GNUC__) && !defined(__clang__) +#pragma GCC optimize ("fp-contract=off") +#endif + +#if defined(__clang__) +#pragma clang fp contract(off) +#endif + #include "common.h" +#include "vector-common.h" -static void cscal_kernel_16(BLASLONG n, FLOAT *alpha, FLOAT *x) { - __asm__("vlrepf %%v0,0(%[alpha])\n\t" - "vlef %%v1,4(%[alpha]),0\n\t" - "vlef %%v1,4(%[alpha]),2\n\t" - "vflcsb %%v1,%%v1\n\t" - "vlef %%v1,4(%[alpha]),1\n\t" - "vlef %%v1,4(%[alpha]),3\n\t" - "srlg %[n],%[n],4\n\t" - "xgr %%r1,%%r1\n\t" - "0:\n\t" - "pfd 2, 1024(%%r1,%[x])\n\t" - "vl %%v16,0(%%r1,%[x])\n\t" - "vl %%v17,16(%%r1,%[x])\n\t" - "vl %%v18,32(%%r1,%[x])\n\t" - "vl %%v19,48(%%r1,%[x])\n\t" - "vl %%v20,64(%%r1,%[x])\n\t" - "vl %%v21,80(%%r1,%[x])\n\t" - "vl %%v22,96(%%r1,%[x])\n\t" - "vl %%v23,112(%%r1,%[x])\n\t" - "verllg %%v24,%%v16,32\n\t" - "verllg %%v25,%%v17,32\n\t" - "verllg %%v26,%%v18,32\n\t" - "verllg %%v27,%%v19,32\n\t" - "verllg %%v28,%%v20,32\n\t" - "verllg %%v29,%%v21,32\n\t" - "verllg %%v30,%%v22,32\n\t" - "verllg %%v31,%%v23,32\n\t" - "vfmsb %%v16,%%v16,%%v0\n\t" - "vfmsb %%v17,%%v17,%%v0\n\t" - "vfmsb %%v18,%%v18,%%v0\n\t" - "vfmsb %%v19,%%v19,%%v0\n\t" - "vfmsb %%v20,%%v20,%%v0\n\t" - "vfmsb %%v21,%%v21,%%v0\n\t" - "vfmsb %%v22,%%v22,%%v0\n\t" - "vfmsb %%v23,%%v23,%%v0\n\t" - "vfmasb %%v16,%%v24,%%v1,%%v16\n\t" - "vfmasb %%v17,%%v25,%%v1,%%v17\n\t" - "vfmasb %%v18,%%v26,%%v1,%%v18\n\t" - "vfmasb %%v19,%%v27,%%v1,%%v19\n\t" - "vfmasb %%v20,%%v28,%%v1,%%v20\n\t" - "vfmasb %%v21,%%v29,%%v1,%%v21\n\t" - "vfmasb %%v22,%%v30,%%v1,%%v22\n\t" - "vfmasb %%v23,%%v31,%%v1,%%v23\n\t" - "vst %%v16,0(%%r1,%[x])\n\t" - "vst %%v17,16(%%r1,%[x])\n\t" - "vst %%v18,32(%%r1,%[x])\n\t" - "vst %%v19,48(%%r1,%[x])\n\t" - "vst %%v20,64(%%r1,%[x])\n\t" - "vst %%v21,80(%%r1,%[x])\n\t" - "vst %%v22,96(%%r1,%[x])\n\t" - "vst %%v23,112(%%r1,%[x])\n\t" - "agfi %%r1,128\n\t" - "brctg %[n],0b" - : "+m"(*(struct { FLOAT x[n * 2]; } *) x),[n] "+&r"(n) - : [x] "a"(x), "m"(*(const struct { FLOAT x[2]; } *) alpha), - [alpha] "a"(alpha) - : "cc", "r1", "v0", "v1", "v16", "v17", "v18", "v19", "v20", "v21", - "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", - "v31"); +static void cscal_kernel_16(BLASLONG n, FLOAT da_r, FLOAT da_i, FLOAT *x) { + vector_float da_r_vec = vec_splats(da_r); + vector_float da_i_vec = { -da_i, da_i, -da_i, da_i }; + + vector_float *x_vec_ptr = (vector_float *)x; + +#pragma GCC unroll 16 + for (size_t i = 0; i < n/2; i++) { + vector_float x_vec = vec_load_hinted(x + i * VLEN_FLOATS); + vector_float x_swapped = {x_vec[1], x_vec[0], x_vec[3], x_vec[2]}; + + x_vec_ptr[i] = x_vec * da_r_vec + x_swapped * da_i_vec; + } } static void cscal_kernel_16_zero_r(BLASLONG n, FLOAT *alpha, FLOAT *x) { @@ -132,8 +100,8 @@ static void cscal_kernel_16_zero_r(BLASLONG n, FLOAT *alpha, FLOAT *x) { "vst %%v23,112(%%r1,%[x])\n\t" "agfi %%r1,128\n\t" "brctg %[n],0b" - : "+m"(*(struct { FLOAT x[n * 2]; } *) x),[n] "+&r"(n) - : [x] "a"(x), "m"(*(const struct { FLOAT x[2]; } *) alpha), + : "+m"(*(FLOAT (*)[n * 2]) x),[n] "+&r"(n) + : [x] "a"(x), "m"(*(const FLOAT (*)[2]) alpha), [alpha] "a"(alpha) : "cc", "r1", "v0", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); @@ -171,8 +139,8 @@ static void cscal_kernel_16_zero_i(BLASLONG n, FLOAT *alpha, FLOAT *x) { "vst %%v23,112(%%r1,%[x])\n\t" "agfi %%r1,128\n\t" "brctg %[n],0b" - : "+m"(*(struct { FLOAT x[n * 2]; } *) x),[n] "+&r"(n) - : [x] "a"(x), "m"(*(const struct { FLOAT x[2]; } *) alpha), + : "+m"(*(FLOAT (*)[n * 2]) x),[n] "+&r"(n) + : [x] "a"(x), "m"(*(const FLOAT (*)[2]) alpha), [alpha] "a"(alpha) : "cc", "r1", "v0", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); @@ -194,19 +162,17 @@ static void cscal_kernel_16_zero(BLASLONG n, FLOAT *x) { "vst %%v0,112(%%r1,%[x])\n\t" "agfi %%r1,128\n\t" "brctg %[n],0b" - : "=m"(*(struct { FLOAT x[n * 2]; } *) x),[n] "+&r"(n) + : "=m"(*(FLOAT (*)[n * 2]) x),[n] "+&r"(n) : [x] "a"(x) : "cc", "r1", "v0"); } -static void cscal_kernel_inc_8(BLASLONG n, FLOAT *alpha, FLOAT *x, +static void cscal_kernel_inc_8(BLASLONG n, FLOAT da_r, FLOAT da_i, FLOAT *x, BLASLONG inc_x) { BLASLONG i; BLASLONG inc_x2 = 2 * inc_x; BLASLONG inc_x3 = inc_x2 + inc_x; FLOAT t0, t1, t2, t3; - FLOAT da_r = alpha[0]; - FLOAT da_i = alpha[1]; for (i = 0; i < n; i += 4) { t0 = da_r * x[0] - da_i * x[1]; @@ -324,9 +290,7 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da_r, FLOAT da_i, BLASLONG n1 = n & -8; if (n1 > 0) { - alpha[0] = da_r; - alpha[1] = da_i; - cscal_kernel_inc_8(n1, alpha, x, inc_x); + cscal_kernel_inc_8(n1, da_r, da_i, x, inc_x); j = n1; i = n1 * inc_x; } @@ -362,7 +326,7 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da_r, FLOAT da_i, else if (da_i == 0) cscal_kernel_16_zero_i(n1, alpha, x); else - cscal_kernel_16(n1, alpha, x); + cscal_kernel_16(n1, da_r, da_i, x); i = n1 << 1; j = n1; diff --git a/kernel/zarch/csum.c b/kernel/zarch/csum.c index c0b8c6371..b076501aa 100644 --- a/kernel/zarch/csum.c +++ b/kernel/zarch/csum.c @@ -88,9 +88,9 @@ static FLOAT csum_kernel_32(BLASLONG n, FLOAT *x) { "vfasb %%v24,%%v24,%%v25\n\t" "vrepf %%v25,%%v24,2\n\t" "vfasb %%v24,%%v24,%%v25\n\t" - "vstef %%v24,%[asum],0" + "vstef %%v24,%[sum],0" : [sum] "=Q"(sum),[n] "+&r"(n) - : "m"(*(const struct { FLOAT x[n * 2]; } *) x),[x] "a"(x) + : "m"(*(const FLOAT (*)[n * 2]) x),[x] "a"(x) : "cc", "r1", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); diff --git a/kernel/zarch/cswap.c b/kernel/zarch/cswap.c index 198994e18..f3ab77ab5 100644 --- a/kernel/zarch/cswap.c +++ b/kernel/zarch/cswap.c @@ -99,8 +99,8 @@ static void cswap_kernel_32(BLASLONG n, FLOAT *x, FLOAT *y) { "vst %%v31, 240(%%r1,%[y])\n\t" "agfi %%r1,256\n\t" "brctg %[n],0b" - : "+m"(*(struct { FLOAT x[n * 2]; } *) x), - "+m"(*(struct { FLOAT x[n * 2]; } *) y),[n] "+&r"(n) + : "+m"(*(FLOAT (*)[n * 2]) x), + "+m"(*(FLOAT (*)[n * 2]) y),[n] "+&r"(n) : [x] "a"(x),[y] "a"(y) : "cc", "r1", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", diff --git a/kernel/zarch/ctrmm4x4V.S b/kernel/zarch/ctrmm4x4V.S index c0e4df17d..123f2ead0 100644 --- a/kernel/zarch/ctrmm4x4V.S +++ b/kernel/zarch/ctrmm4x4V.S @@ -198,7 +198,7 @@ ALIGN_4 RefreshTempBk LOCAL_VAR1,BK,OFF,4,4 nill LOCAL_VAR1,3 #else - la LOCAL_VAR1,3(0,0) + lghi LOCAL_VAR1,3 NGR LOCAL_VAR1,BK /*refresh BK*/ #endif jz .L4x4_BK_Store @@ -254,7 +254,7 @@ ALIGN_4 RefreshTempBk LOCAL_VAR1,BK,OFF,2,4 nill LOCAL_VAR1,3 #else -la LOCAL_VAR1,3(0,0) +lghi LOCAL_VAR1,3 NGR LOCAL_VAR1,BK /*refresh BK*/ #endif jz .L2x4_BK_Store @@ -305,7 +305,7 @@ ALIGN_4 RefreshTempBk LOCAL_VAR1,BK,OFF,1,4 nill LOCAL_VAR1,3 #else -la LOCAL_VAR1,3(0,0) +lghi LOCAL_VAR1,3 NGR LOCAL_VAR1,BK /*refresh BK*/ #endif jz .L1x4_BK_Store @@ -385,7 +385,7 @@ ALIGN_4 RefreshTempBk LOCAL_VAR1,BK,OFF,4,2 nill LOCAL_VAR1,3 #else -la LOCAL_VAR1,3(0,0) +lghi LOCAL_VAR1,3 NGR LOCAL_VAR1,BK /*refresh BK*/ #endif jz .L4x2_BK_Store @@ -442,7 +442,7 @@ ALIGN_4 RefreshTempBk LOCAL_VAR1,BK,OFF,2,2 nill LOCAL_VAR1,3 #else -la LOCAL_VAR1,3(0,0) +lghi LOCAL_VAR1,3 NGR LOCAL_VAR1,BK /*refresh BK*/ #endif jz .L2x2_BK_Store @@ -492,7 +492,7 @@ ALIGN_4 RefreshTempBk LOCAL_VAR1,BK,OFF,1,2 nill LOCAL_VAR1,3 #else -la LOCAL_VAR1,3(0,0) +lghi LOCAL_VAR1,3 NGR LOCAL_VAR1,BK /*refresh BK*/ #endif jz .L1x2_BK_Store @@ -568,7 +568,7 @@ ALIGN_4 RefreshTempBk LOCAL_VAR1,BK,OFF,4,1 nill LOCAL_VAR1,3 #else -la LOCAL_VAR1,3(0,0) +lghi LOCAL_VAR1,3 NGR LOCAL_VAR1,BK /*refresh BK*/ #endif jz .L4x1_BK_Store @@ -620,7 +620,7 @@ ALIGN_4 RefreshTempBk LOCAL_VAR1,BK,OFF,2,1 nill LOCAL_VAR1,3 #else -la LOCAL_VAR1,3(0,0) +lghi LOCAL_VAR1,3 NGR LOCAL_VAR1,BK /*refresh BK*/ #endif jz .L2x1_BK_Store @@ -670,7 +670,7 @@ ALIGN_4 RefreshTempBk LOCAL_VAR1,BK,OFF,1,1 nill LOCAL_VAR1,3 #else -la LOCAL_VAR1,3(0,0) +lghi LOCAL_VAR1,3 NGR LOCAL_VAR1,BK /*refresh BK*/ #endif jz .L1x1_BK_Store diff --git a/kernel/zarch/damax.c b/kernel/zarch/damax.c index caacb50dc..d19181cbe 100644 --- a/kernel/zarch/damax.c +++ b/kernel/zarch/damax.c @@ -76,7 +76,7 @@ static FLOAT damax_kernel_32(BLASLONG n, FLOAT *x) { "wfmaxdb %%v0,%%v0,%%v16,8\n\t" "lpdr %[amax],%%f0" : [amax] "=f"(amax),[n] "+&r"(n) - : "m"(*(const struct { FLOAT x[n]; } *) x),[x] "a"(x) + : "m"(*(const FLOAT (*)[n]) x),[x] "a"(x) : "cc", "r1", "v0", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); diff --git a/kernel/zarch/damax_z13.c b/kernel/zarch/damax_z13.c index f3db4c108..5bc0d1721 100644 --- a/kernel/zarch/damax_z13.c +++ b/kernel/zarch/damax_z13.c @@ -110,7 +110,7 @@ static FLOAT damax_kernel_32(BLASLONG n, FLOAT *x) { "vsel %%v0,%%v0,%%v16,%%v17\n\t" "ldr %[amax],%%f0" : [amax] "=f"(amax),[n] "+&r"(n) - : "m"(*(const struct { FLOAT x[n]; } *) x),[x] "a"(x) + : "m"(*(const FLOAT (*)[n]) x),[x] "a"(x) : "cc", "r1", "v0", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); diff --git a/kernel/zarch/damin.c b/kernel/zarch/damin.c index 0163a144b..4e0558af4 100644 --- a/kernel/zarch/damin.c +++ b/kernel/zarch/damin.c @@ -76,7 +76,7 @@ static FLOAT damin_kernel_32(BLASLONG n, FLOAT *x) { "wfmindb %%v0,%%v0,%%v16,8\n\t" "lpdr %[amin],%%f0" : [amin] "=f"(amin),[n] "+&r"(n) - : "m"(*(const struct { FLOAT x[n]; } *) x),[x] "a"(x) + : "m"(*(const FLOAT (*)[n]) x),[x] "a"(x) : "cc", "r1", "v0", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); diff --git a/kernel/zarch/damin_z13.c b/kernel/zarch/damin_z13.c index 4196b2e15..a7efd4b26 100644 --- a/kernel/zarch/damin_z13.c +++ b/kernel/zarch/damin_z13.c @@ -110,7 +110,7 @@ static FLOAT damin_kernel_32(BLASLONG n, FLOAT *x) { "vsel %%v0,%%v0,%%v16,%%v17\n\t" "ldr %[amin],%%f0" : [amin] "=f"(amin),[n] "+&r"(n) - : "m"(*(const struct { FLOAT x[n]; } *) x),[x] "a"(x) + : "m"(*(const FLOAT (*)[n]) x),[x] "a"(x) : "cc", "r1", "v0", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); diff --git a/kernel/zarch/dasum.c b/kernel/zarch/dasum.c index aa1382b10..9703cd3be 100644 --- a/kernel/zarch/dasum.c +++ b/kernel/zarch/dasum.c @@ -106,7 +106,7 @@ static FLOAT dasum_kernel_32(BLASLONG n, FLOAT *x) { "vfadb %%v24,%%v24,%%v25\n\t" "vsteg %%v24,%[asum],0" : [asum] "=Q"(asum),[n] "+&r"(n) - : "m"(*(const struct { FLOAT x[n]; } *) x),[x] "a"(x) + : "m"(*(const FLOAT (*)[n]) x),[x] "a"(x) : "cc", "r1", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); diff --git a/kernel/zarch/daxpy.c b/kernel/zarch/daxpy.c index 5b0208c20..4e59ef7c6 100644 --- a/kernel/zarch/daxpy.c +++ b/kernel/zarch/daxpy.c @@ -100,8 +100,8 @@ static void daxpy_kernel_32(BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) { "vst %%v27,240(%%r1,%[y])\n\t" "agfi %%r1,256\n\t" "brctg %[n],0b" - : "+m"(*(struct { FLOAT x[n]; } *) y),[n] "+&r"(n) - : [y] "a"(y), "m"(*(const struct { FLOAT x[n]; } *) x),[x] "a"(x), + : "+m"(*(FLOAT (*)[n]) y),[n] "+&r"(n) + : [y] "a"(y), "m"(*(const FLOAT (*)[n]) x),[x] "a"(x), [alpha] "Q"(*alpha) : "cc", "r1", "v0", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); diff --git a/kernel/zarch/dcopy.c b/kernel/zarch/dcopy.c index 691b90c64..3c546568f 100644 --- a/kernel/zarch/dcopy.c +++ b/kernel/zarch/dcopy.c @@ -36,8 +36,8 @@ static void dcopy_kernel_32(BLASLONG n, FLOAT *x, FLOAT *y) { "la %[x],256(%[x])\n\t" "la %[y],256(%[y])\n\t" "brctg %[n],0b" - : "=m"(*(struct { FLOAT x[n]; } *) y),[x] "+&a"(x),[y] "+&a"(y),[n] "+&r"(n) - : "m"(*(const struct { FLOAT x[n]; } *) x) + : "=m"(*(FLOAT (*)[n]) y),[x] "+&a"(x),[y] "+&a"(y),[n] "+&r"(n) + : "m"(*(const FLOAT (*)[n]) x) : "cc"); } diff --git a/kernel/zarch/ddot.c b/kernel/zarch/ddot.c index 9cad68f4b..c0ed8b72e 100644 --- a/kernel/zarch/ddot.c +++ b/kernel/zarch/ddot.c @@ -80,8 +80,8 @@ static FLOAT ddot_kernel_16(BLASLONG n, FLOAT *x, FLOAT *y) { "adbr %%f0,%%f1\n\t" "ldr %[dot],%%f0" : [dot] "=f"(dot),[n] "+&r"(n) - : "m"(*(const struct { FLOAT x[n]; } *) x),[x] "a"(x), - "m"(*(const struct { FLOAT x[n]; } *) y),[y] "a"(y) + : "m"(*(const FLOAT (*)[n]) x),[x] "a"(x), + "m"(*(const FLOAT (*)[n]) y),[y] "a"(y) : "cc", "r1", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); diff --git a/kernel/zarch/dgemv_n_4.c b/kernel/zarch/dgemv_n_4.c index 502ba837e..e1c5c4472 100644 --- a/kernel/zarch/dgemv_n_4.c +++ b/kernel/zarch/dgemv_n_4.c @@ -169,13 +169,13 @@ static void dgemv_kernel_4x4(BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y, "agfi %%r1,32\n\t" "brctg %%r0,2b\n\t" "3:\n\t" - "nop" - : "+m"(*(struct { FLOAT x[n]; } *) y) - : [y] "a"(y), "m"(*(const struct { FLOAT x[n]; } *) ap0),[ap0] "a"(ap0), - "m"(*(const struct { FLOAT x[n]; } *) ap1),[ap1] "a"(ap1), - "m"(*(const struct { FLOAT x[n]; } *) ap2),[ap2] "a"(ap2), - "m"(*(const struct { FLOAT x[n]; } *) ap3),[ap3] "a"(ap3), - "m"(*(const struct { FLOAT x[4]; } *) x),[x] "a"(x),[alpha] "Q"(*alpha), + "nop 0" + : "+m"(*(FLOAT (*)[n]) y) + : [y] "a"(y), "m"(*(const FLOAT (*)[n]) ap0),[ap0] "a"(ap0), + "m"(*(const FLOAT (*)[n]) ap1),[ap1] "a"(ap1), + "m"(*(const FLOAT (*)[n]) ap2),[ap2] "a"(ap2), + "m"(*(const FLOAT (*)[n]) ap3),[ap3] "a"(ap3), + "m"(*(const FLOAT (*)[4]) x),[x] "a"(x),[alpha] "Q"(*alpha), [n] "r"(n) : "cc", "r0", "r1", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", @@ -274,11 +274,11 @@ static void dgemv_kernel_4x2(BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y, "agfi %%r1,32\n\t" "brctg %%r0,2b\n\t" "3:\n\t" - "nop" - : "+m"(*(struct { FLOAT x[n]; } *) y) - : [y] "a"(y), "m"(*(const struct { FLOAT x[n]; } *) ap0),[ap0] "a"(ap0), - "m"(*(const struct { FLOAT x[n]; } *) ap1),[ap1] "a"(ap1), - "m"(*(const struct { FLOAT x[2]; } *) x),[x] "a"(x),[alpha] "Q"(*alpha), + "nop 0" + : "+m"(*(FLOAT (*)[n]) y) + : [y] "a"(y), "m"(*(const FLOAT (*)[n]) ap0),[ap0] "a"(ap0), + "m"(*(const FLOAT (*)[n]) ap1),[ap1] "a"(ap1), + "m"(*(const FLOAT (*)[2]) x),[x] "a"(x),[alpha] "Q"(*alpha), [n] "r"(n) : "cc", "r0", "r1", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", @@ -351,9 +351,9 @@ static void dgemv_kernel_4x1(BLASLONG n, FLOAT *a0, FLOAT *x, FLOAT *y, "agfi %%r1,32\n\t" "brctg %%r0,2b\n\t" "3:\n\t" - "nop" - : "+m"(*(struct { FLOAT x[n]; } *) y) - : [y] "a"(y), "m"(*(const struct { FLOAT x[n]; } *) a0),[a0] "a"(a0), + "nop 0" + : "+m"(*(FLOAT (*)[n]) y) + : [y] "a"(y), "m"(*(const FLOAT (*)[n]) a0),[a0] "a"(a0), "m"(*(const FLOAT (*)[1]) x),[x] "a"(x),[alpha] "Q"(*alpha), [n] "r"(n) : "cc", "r0", "r1", "v0", "v16", "v17", "v18", "v19", "v20", "v21", diff --git a/kernel/zarch/dgemv_t_4.c b/kernel/zarch/dgemv_t_4.c index de72a1798..513cffe5a 100644 --- a/kernel/zarch/dgemv_t_4.c +++ b/kernel/zarch/dgemv_t_4.c @@ -173,12 +173,12 @@ static void dgemv_kernel_4x4(BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y) { "vrepg %%v4,%%v3,1\n\t" "adbr %%f3,%%f4\n\t" "std %%f3,24(%[y])" - : "=m"(*(struct { FLOAT x[4]; } *) y) - : [y] "a"(y), "m"(*(const struct { FLOAT x[n]; } *) ap0),[ap0] "a"(ap0), - "m"(*(const struct { FLOAT x[n]; } *) ap1),[ap1] "a"(ap1), - "m"(*(const struct { FLOAT x[n]; } *) ap2),[ap2] "a"(ap2), - "m"(*(const struct { FLOAT x[n]; } *) ap3),[ap3] "a"(ap3), - "m"(*(const struct { FLOAT x[n]; } *) x),[x] "a"(x),[n] "r"(n) + : "=m"(*(FLOAT (*)[4]) y) + : [y] "a"(y), "m"(*(const FLOAT (*)[n]) ap0),[ap0] "a"(ap0), + "m"(*(const FLOAT (*)[n]) ap1),[ap1] "a"(ap1), + "m"(*(const FLOAT (*)[n]) ap2),[ap2] "a"(ap2), + "m"(*(const FLOAT (*)[n]) ap3),[ap3] "a"(ap3), + "m"(*(const FLOAT (*)[n]) x),[x] "a"(x),[n] "r"(n) : "cc", "r0", "r1", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); @@ -280,10 +280,10 @@ static void dgemv_kernel_4x2(BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y) { "vrepg %%v2,%%v1,1\n\t" "adbr %%f1,%%f2\n\t" "std %%f1,8(%[y])" - : "=m"(*(struct { FLOAT x[2]; } *) y) - : [y] "a"(y), "m"(*(const struct { FLOAT x[n]; } *) ap0),[ap0] "a"(ap0), - "m"(*(const struct { FLOAT x[n]; } *) ap1),[ap1] "a"(ap1), - "m"(*(const struct { FLOAT x[n]; } *) x),[x] "a"(x),[n] "r"(n) + : "=m"(*(FLOAT (*)[2]) y) + : [y] "a"(y), "m"(*(const FLOAT (*)[n]) ap0),[ap0] "a"(ap0), + "m"(*(const FLOAT (*)[n]) ap1),[ap1] "a"(ap1), + "m"(*(const FLOAT (*)[n]) x),[x] "a"(x),[n] "r"(n) : "cc", "r0", "r1", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); @@ -360,8 +360,8 @@ static void dgemv_kernel_4x1(BLASLONG n, FLOAT *a0, FLOAT *x, FLOAT *y) { "adbr %%f0,%%f1\n\t" "std %%f0,0(%[y])" : "=m"(*(FLOAT (*)[1]) y) - : [y] "a"(y), "m"(*(const struct { FLOAT x[n]; } *) a0),[a0] "a"(a0), - "m"(*(const struct { FLOAT x[n]; } *) x),[x] "a"(x),[n] "r"(n) + : [y] "a"(y), "m"(*(const FLOAT (*)[n]) a0),[a0] "a"(a0), + "m"(*(const FLOAT (*)[n]) x),[x] "a"(x),[n] "r"(n) : "cc", "r0", "r1", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); @@ -438,9 +438,9 @@ static void add_y_kernel_4(BLASLONG n, FLOAT da, FLOAT *src, FLOAT *dest) { "agfi %%r1,32\n\t" "brctg %%r0,2b\n\t" "3:\n\t" - "nop" - : "+m"(*(struct { FLOAT x[n]; } *) dest) - : [dest] "a"(dest),[da] "Q"(da), "m"(*(const struct { FLOAT x[n]; } *) src), + "nop 0" + : "+m"(*(FLOAT (*)[n]) dest) + : [dest] "a"(dest),[da] "Q"(da), "m"(*(const FLOAT (*)[n]) src), [src] "a"(src),[n] "r"(n) : "cc", "r0", "r1", "v0", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", diff --git a/kernel/zarch/dmax.c b/kernel/zarch/dmax.c index cdc8d5d08..4b76e0dd6 100644 --- a/kernel/zarch/dmax.c +++ b/kernel/zarch/dmax.c @@ -73,7 +73,7 @@ static FLOAT dmax_kernel_32(BLASLONG n, FLOAT *x) { "wfmaxdb %%v0,%%v0,%%v16,0\n\t" "ldr %[max],%%f0" : [max] "=f"(max),[n] "+&r"(n) - : "m"(*(const struct { FLOAT x[n]; } *) x),[x] "a"(x) + : "m"(*(const FLOAT (*)[n]) x),[x] "a"(x) : "cc", "r1", "v0", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); diff --git a/kernel/zarch/dmax_z13.c b/kernel/zarch/dmax_z13.c index c4e8d91f8..93acee2db 100644 --- a/kernel/zarch/dmax_z13.c +++ b/kernel/zarch/dmax_z13.c @@ -90,7 +90,7 @@ static FLOAT dmax_kernel_32(BLASLONG n, FLOAT *x) { "vsel %%v0,%%v0,%%v16,%%v17\n\t" "ldr %[max],%%f0" : [max] "=f"(max),[n] "+&r"(n) - : "m"(*(const struct { FLOAT x[n]; } *) x),[x] "a"(x) + : "m"(*(const FLOAT (*)[n]) x),[x] "a"(x) : "cc", "r1", "v0", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); diff --git a/kernel/zarch/dmin.c b/kernel/zarch/dmin.c index f9b129cbd..21d55f323 100644 --- a/kernel/zarch/dmin.c +++ b/kernel/zarch/dmin.c @@ -73,7 +73,7 @@ static FLOAT dmin_kernel_32(BLASLONG n, FLOAT *x) { "wfmindb %%v0,%%v0,%%v16,0\n\t" "ldr %[min],%%f0" : [min] "=f"(min),[n] "+&r"(n) - : "m"(*(const struct { FLOAT x[n]; } *) x),[x] "a"(x) + : "m"(*(const FLOAT (*)[n]) x),[x] "a"(x) : "cc", "r1", "v0", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); diff --git a/kernel/zarch/dmin_z13.c b/kernel/zarch/dmin_z13.c index 77f021c1d..7d2dae3fb 100644 --- a/kernel/zarch/dmin_z13.c +++ b/kernel/zarch/dmin_z13.c @@ -90,7 +90,7 @@ static FLOAT dmin_kernel_32(BLASLONG n, FLOAT *x) { "vsel %%v0,%%v0,%%v16,%%v17\n\t" "ldr %[min],%%f0" : [min] "=f"(min),[n] "+&r"(n) - : "m"(*(const struct { FLOAT x[n]; } *) x),[x] "a"(x) + : "m"(*(const FLOAT (*)[n]) x),[x] "a"(x) : "cc", "r1", "v0", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); diff --git a/kernel/zarch/drot.c b/kernel/zarch/drot.c index 11fbe15b6..9d6d1a80d 100644 --- a/kernel/zarch/drot.c +++ b/kernel/zarch/drot.c @@ -169,7 +169,7 @@ static void drot_kernel_32(BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *c, FLOAT *s) { "vst %%v23, 240(%%r1,%[y])\n\t" "agfi %%r1,256\n\t" "brctg %[n],0b" - : "+m"(*(struct { FLOAT x[n]; } *) x), "+m"(*(struct { FLOAT x[n]; } *) y), + : "+m"(*(FLOAT (*)[n]) x), "+m"(*(FLOAT (*)[n]) y), [n] "+&r"(n) : [x] "a"(x),[y] "a"(y),[c] "Q"(*c),[s] "Q"(*s) : "cc", "r1", "v0", "v1", "v16", "v17", "v18", "v19", "v20", "v21", diff --git a/kernel/zarch/dscal.c b/kernel/zarch/dscal.c index 2961eff20..a5a5e3468 100644 --- a/kernel/zarch/dscal.c +++ b/kernel/zarch/dscal.c @@ -59,7 +59,7 @@ static void dscal_kernel_16(BLASLONG n, FLOAT da, FLOAT *x) { "vst %%v31,112(%%r1,%[x])\n\t" "agfi %%r1,128\n\t" "brctg %[n],0b" - : "+m"(*(struct { FLOAT x[n]; } *) x),[n] "+&r"(n) + : "+m"(*(FLOAT (*)[n]) x),[n] "+&r"(n) : [x] "a"(x),[da] "Q"(da) : "cc", "r1", "v0", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); @@ -81,7 +81,7 @@ static void dscal_kernel_16_zero(BLASLONG n, FLOAT *x) { "vst %%v0,112(%%r1,%[x])\n\t" "agfi %%r1,128\n\t" "brctg %[n],0b" - : "=m"(*(struct { FLOAT x[n]; } *) x),[n] "+&r"(n) + : "=m"(*(FLOAT (*)[n]) x),[n] "+&r"(n) : [x] "a"(x) : "cc", "r1", "v0"); } diff --git a/kernel/zarch/dsdot.c b/kernel/zarch/dsdot.c index 5fa88c3b9..2952bcf42 100644 --- a/kernel/zarch/dsdot.c +++ b/kernel/zarch/dsdot.c @@ -112,8 +112,8 @@ static double dsdot_kernel_16(BLASLONG n, FLOAT *x, FLOAT *y) { "adbr %%f0,%%f1\n\t" "ldr %[dot],%%f0" : [dot] "=f"(dot),[n] "+&r"(n) - : "m"(*(const struct { FLOAT x[n]; } *) x),[x] "a"(x), - "m"(*(const struct { FLOAT x[n]; } *) y),[y] "a"(y) + : "m"(*(const FLOAT (*)[n]) x),[x] "a"(x), + "m"(*(const FLOAT (*)[n]) y),[y] "a"(y) : "cc", "r1", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); diff --git a/kernel/zarch/dsum.c b/kernel/zarch/dsum.c index 178bc3462..69b9f9b41 100644 --- a/kernel/zarch/dsum.c +++ b/kernel/zarch/dsum.c @@ -86,9 +86,9 @@ static FLOAT dsum_kernel_32(BLASLONG n, FLOAT *x) { "vfadb %%v24,%%v24,%%v31\n\t" "vrepg %%v25,%%v24,1\n\t" "vfadb %%v24,%%v24,%%v25\n\t" - "vsteg %%v24,%[asum],0" + "vsteg %%v24,%[sum],0" : [sum] "=Q"(sum),[n] "+&r"(n) - : "m"(*(const struct { FLOAT x[n]; } *) x),[x] "a"(x) + : "m"(*(const FLOAT (*)[n]) x),[x] "a"(x) : "cc", "r1", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); diff --git a/kernel/zarch/dswap.c b/kernel/zarch/dswap.c index f0c9ded51..46cbbba23 100644 --- a/kernel/zarch/dswap.c +++ b/kernel/zarch/dswap.c @@ -99,7 +99,7 @@ static void dswap_kernel_32(BLASLONG n, FLOAT *x, FLOAT *y) { "vst %%v31, 240(%%r1,%[y])\n\t" "agfi %%r1,256\n\t" "brctg %[n],0b" - : "+m"(*(struct { FLOAT x[n]; } *) x), "+m"(*(struct { FLOAT x[n]; } *) y), + : "+m"(*(FLOAT (*)[n]) x), "+m"(*(FLOAT (*)[n]) y), [n] "+&r"(n) : [x] "a"(x),[y] "a"(y) : "cc", "r1", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", diff --git a/kernel/zarch/gemm8x4V.S b/kernel/zarch/gemm8x4V.S index 27fd5f57b..633e60ea6 100644 --- a/kernel/zarch/gemm8x4V.S +++ b/kernel/zarch/gemm8x4V.S @@ -147,7 +147,7 @@ brctg LOCAL_VAR1,.L8x4_4_BK ALIGN_4 .L8x4_mod: -la LOCAL_VAR1,3(0,0) +lghi LOCAL_VAR1,3 NGR LOCAL_VAR1,BK /*refresh BK*/ jz .L8x4_BK_Store @@ -183,7 +183,7 @@ brctg LOCAL_VAR1,.L4x4_4_BK ALIGN_4 .L4x4_mod: -la LOCAL_VAR1,3(0,0) +lghi LOCAL_VAR1,3 NGR LOCAL_VAR1,BK /*refresh BK*/ jz .L4x4_BK_Store @@ -217,7 +217,7 @@ brctg LOCAL_VAR1,.L2x4_4_BK ALIGN_4 .L2x4_mod: -la LOCAL_VAR1,3(0,0) +lghi LOCAL_VAR1,3 NGR LOCAL_VAR1,BK /*refresh BK*/ jz .L2x4_BK_Store @@ -252,7 +252,7 @@ brctg LOCAL_VAR1,.L1x4_4_BK ALIGN_4 .L1x4_mod: -la LOCAL_VAR1,3(0,0) +lghi LOCAL_VAR1,3 NGR LOCAL_VAR1,BK /*refresh BK*/ jz .L1x4_BK_Store @@ -309,7 +309,7 @@ brctg LOCAL_VAR1,.L8x2_4_BK ALIGN_4 .L8x2_mod: -la LOCAL_VAR1,3(0,0) +lghi LOCAL_VAR1,3 NGR LOCAL_VAR1,BK /*refresh BK*/ jz .L8x2_BK_Store @@ -346,7 +346,7 @@ brctg LOCAL_VAR1,.L4x2_4_BK ALIGN_4 .L4x2_mod: -la LOCAL_VAR1,3(0,0) +lghi LOCAL_VAR1,3 NGR LOCAL_VAR1,BK /*refresh BK*/ jz .L4x2_BK_Store @@ -380,7 +380,7 @@ brctg LOCAL_VAR1,.L2x2_4_BK ALIGN_4 .L2x2_mod: -la LOCAL_VAR1,3(0,0) +lghi LOCAL_VAR1,3 NGR LOCAL_VAR1,BK /*refresh BK*/ jz .L2x2_BK_Store @@ -415,7 +415,7 @@ brctg LOCAL_VAR1,.L1x2_4_BK ALIGN_4 .L1x2_mod: -la LOCAL_VAR1,3(0,0) +lghi LOCAL_VAR1,3 NGR LOCAL_VAR1,BK /*refresh BK*/ jz .L1x2_BK_Store @@ -471,7 +471,7 @@ brctg LOCAL_VAR1,.L8x1_4_BK ALIGN_4 .L8x1_mod: -la LOCAL_VAR1,3(0,0) +lghi LOCAL_VAR1,3 NGR LOCAL_VAR1,BK /*refresh BK*/ jz .L8x1_BK_Store @@ -508,7 +508,7 @@ brctg LOCAL_VAR1,.L4x1_4_BK ALIGN_4 .L4x1_mod: -la LOCAL_VAR1,3(0,0) +lghi LOCAL_VAR1,3 NGR LOCAL_VAR1,BK /*refresh BK*/ jz .L4x1_BK_Store @@ -542,7 +542,7 @@ brctg LOCAL_VAR1,.L2x1_4_BK ALIGN_4 .L2x1_mod: -la LOCAL_VAR1,3(0,0) +lghi LOCAL_VAR1,3 NGR LOCAL_VAR1,BK /*refresh BK*/ jz .L2x1_BK_Store @@ -577,7 +577,7 @@ brctg LOCAL_VAR1,.L1x1_4_BK ALIGN_4 .L1x1_mod: -la LOCAL_VAR1,3(0,0) +lghi LOCAL_VAR1,3 NGR LOCAL_VAR1,BK /*refresh BK*/ jz .L1x1_BK_Store diff --git a/kernel/zarch/gemm_vec.c b/kernel/zarch/gemm_vec.c new file mode 100644 index 000000000..30f3171d2 --- /dev/null +++ b/kernel/zarch/gemm_vec.c @@ -0,0 +1,680 @@ +/* + * Copyright (c) IBM Corporation 2020. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * 3. Neither the name of the OpenBLAS project nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include "common.h" +#include "vector-common.h" + +#include +#include +#include + + +#ifdef COMPLEX +#error "Handling for complex numbers is not supported in this kernel" +#endif + +#ifdef DOUBLE +#define UNROLL_M DGEMM_DEFAULT_UNROLL_M +#define UNROLL_N DGEMM_DEFAULT_UNROLL_N +#else +#define UNROLL_M SGEMM_DEFAULT_UNROLL_M +#define UNROLL_N SGEMM_DEFAULT_UNROLL_N +#endif + +static const size_t unroll_m = UNROLL_M; +static const size_t unroll_n = UNROLL_N; + +/* Handling of triangular matrices */ +#ifdef TRMMKERNEL +static const bool trmm = true; +static const bool left = +#ifdef LEFT + true; +#else + false; +#endif + +static const bool backwards = +#if defined(LEFT) != defined(TRANSA) + true; +#else + false; +#endif + +#else +static const bool trmm = false; +static const bool left = false; +static const bool backwards = false; +#endif /* TRMMKERNEL */ + +/* + * Background: + * + * The algorithm of GotoBLAS / OpenBLAS breaks down the matrix multiplication + * problem by splitting all matrices into partitions multiple times, so that the + * submatrices fit into the L1 or L2 caches. As a result, each multiplication of + * submatrices can stream data fast from L1 and L2 caches. Inbetween, it copies + * and rearranges the submatrices to enable contiguous memory accesses to + * improve locality in both caches and TLBs. + * + * At the heart of the algorithm is this kernel, which multiplies, a "Block + * matrix" A (small dimensions) with a "Panel matrix" B (number of rows is + * small) and adds the result into a "Panel matrix" C; GotoBLAS calls this + * operation GEBP. This kernel further partitions GEBP twice, such that (1) + * submatrices of C and B fit into the L1 caches (GEBP_column_block) and (2) a + * block of C fits into the registers, while multiplying panels from A and B + * streamed from the L2 and L1 cache, respectively (GEBP_block). + * + * + * Algorithm GEBP(A, B, C, m, n, k, alpha): + * + * The problem is calculating C += alpha * (A * B) + * C is an m x n matrix, A is an m x k matrix, B is an k x n matrix. + * + * - C is in column-major-order, with an offset of ldc to the element in the + * next column (same row). + * - A is in row-major-order yet stores SGEMM_UNROLL_M elements of each column + * contiguously while walking along rows. + * - B is in column-major-order but packs SGEMM_UNROLL_N elements of a row + * contiguously. + * If the numbers of rows and columns are not multiples of SGEMM_UNROLL_M or + * SGEMM_UNROLL_N, the remaining elements are arranged in blocks with power-of-2 + * dimensions (e.g., 5 remaining columns would be in a block-of-4 and a + * block-of-1). + * + * Note that packing A and B into that form is taken care of by the caller in + * driver/level3/level3.c (actually done by "copy kernels"). + * + * Steps: + * - Partition C and B into blocks of n_r (SGEMM_UNROLL_N) columns, C_j and B_j. + * Now, B_j should fit into the L1 cache. + * - For each partition, calculate C_j += alpha * (A * B_j) by + * (1) Calculate C_aux := A * B_j (see below) + * (2) unpack C_j = C_j + alpha * C_aux + * + * + * Algorithm for Calculating C_aux: + * + * - Further partition C_aux and A into groups of m_r (SGEMM_UNROLL_M) rows, + * such that the m_r x n_r-submatrix of C_aux can be held in registers. Each + * submatrix of C_aux can be calculated independently, and the registers are + * added back into C_j. + * + * - For each row-block of C_aux: + * (uses a row block of A and full B_j) + * - stream over all columns of A, multiply with elements from B and + * accumulate in registers. (use different inner-kernels to exploit + * vectorization for varying block sizes) + * - add alpha * row block of C_aux back into C_j. + * + * Note that there are additional mechanics for handling triangular matrices, + * calculating B := alpha (A * B) where either of the matrices A or B can be + * triangular. In case of A, the macro "LEFT" is defined. In addition, A can + * optionally be transposed. + * The code effectively skips an "offset" number of columns in A and rows of B + * in each block, to save unnecessary work by exploiting the triangular nature. + * To handle all cases, the code discerns (1) a "left" mode when A is triangular + * and (2) "forward" / "backwards" modes where only the first "offset" + * columns/rows of A/B are used or where the first "offset" columns/rows are + * skipped, respectively. + * + * Reference: + * + * The summary above is based on staring at various kernel implementations and: + * K. Goto and R. A. Van de Geijn, Anatomy of High-Performance Matrix + * Multiplication, in ACM Transactions of Mathematical Software, Vol. 34, No. + * 3, May 2008. + */ + +/** + * Calculate for a row-block in C_i of size ROWSxCOLS using vector intrinsics. + * + * @param[in] A Pointer current block of input matrix A. + * @param[in] k Number of columns in A. + * @param[in] B Pointer current block of input matrix B. + * @param[inout] C Pointer current block of output matrix C. + * @param[in] ldc Offset between elements in adjacent columns in C. + * @param[in] alpha Scalar factor. + */ +#define VECTOR_BLOCK(ROWS, COLS) \ + static inline void GEBP_block_##ROWS##_##COLS( \ + FLOAT const *restrict A, BLASLONG bk, FLOAT const *restrict B, \ + FLOAT *restrict C, BLASLONG ldc, FLOAT alpha) { \ + _Static_assert( \ + ROWS % VLEN_FLOATS == 0, \ + "rows in block must be multiples of vector length"); \ + vector_float Caux[ROWS / VLEN_FLOATS][COLS]; \ + \ + for (BLASLONG i = 0; i < ROWS / VLEN_FLOATS; i++) { \ + vector_float A0 = \ + vec_load_hinted(A + i * VLEN_FLOATS); \ + for (BLASLONG j = 0; j < COLS; j++) \ + Caux[i][j] = A0 * B[j]; \ + } \ + \ + /* \ + * Stream over the row-block of A, which is packed \ + * column-by-column, multiply by coefficients in B and add up \ + * into temporaries Caux (which the compiler will hold in \ + * registers). Vectorization: Multiply column vectors from A \ + * with scalars from B and add up in column vectors of Caux. \ + * That equates to unrolling the loop over rows (in i) and \ + * executing each unrolled iteration as a vector element. \ + */ \ + for (BLASLONG k = 1; k < bk; k++) { \ + for (BLASLONG i = 0; i < ROWS / VLEN_FLOATS; i++) { \ + vector_float Ak = vec_load_hinted( \ + A + i * VLEN_FLOATS + k * ROWS); \ + \ + for (BLASLONG j = 0; j < COLS; j++) \ + Caux[i][j] += Ak * B[j + k * COLS]; \ + } \ + } \ + \ + /* \ + * Unpack row-block of C_aux into outer C_i, multiply by \ + * alpha and add up. \ + */ \ + for (BLASLONG j = 0; j < COLS; j++) { \ + for (BLASLONG i = 0; i < ROWS / VLEN_FLOATS; i++) { \ + vector_float *C_ij = \ + (vector_float *)(C + i * VLEN_FLOATS + \ + j * ldc); \ + if (trmm) { \ + *C_ij = alpha * Caux[i][j]; \ + } else { \ + *C_ij += alpha * Caux[i][j]; \ + } \ + } \ + } \ + } + + +#if UNROLL_M == 16 +VECTOR_BLOCK(16, 2) +VECTOR_BLOCK(16, 1) +#endif +#if UNROLL_N == 8 +VECTOR_BLOCK(8, 8) +VECTOR_BLOCK(4, 8) +#endif +#ifndef DOUBLE +VECTOR_BLOCK(8, 4) +#endif +VECTOR_BLOCK(8, 2) +VECTOR_BLOCK(8, 1) +VECTOR_BLOCK(4, 4) +VECTOR_BLOCK(4, 2) +VECTOR_BLOCK(4, 1) + +/** + * Calculate for a row-block in C_i of size ROWSxCOLS using scalar operations. + * Simple implementation for smaller block sizes + * + * @param[in] A Pointer current block of input matrix A. + * @param[in] k Number of columns in A. + * @param[in] B Pointer current block of input matrix B. + * @param[inout] C Pointer current block of output matrix C. + * @param[in] ldc Offset between elements in adjacent columns in C. + * @param[in] alpha Scalar factor. + */ +#define SCALAR_BLOCK(ROWS, COLS) \ + static inline void GEBP_block_##ROWS##_##COLS( \ + FLOAT const *restrict A, BLASLONG k, FLOAT const *restrict B, \ + FLOAT *restrict C, BLASLONG ldc, FLOAT alpha) { \ + FLOAT Caux[ROWS][COLS] __attribute__((aligned(16))); \ + \ + /* \ + * Peel off first iteration (i.e., column of A) for \ + * initializing Caux \ + */ \ + for (BLASLONG i = 0; i < ROWS; i++) \ + for (BLASLONG j = 0; j < COLS; j++) Caux[i][j] = A[i] * B[j]; \ + \ + for (BLASLONG kk = 1; kk < k; kk++) \ + for (BLASLONG i = 0; i < ROWS; i++) \ + for (BLASLONG j = 0; j < COLS; j++) \ + Caux[i][j] += A[i + kk * ROWS] * B[j + kk * COLS]; \ + \ + for (BLASLONG i = 0; i < ROWS; i++) \ + for (BLASLONG j = 0; j < COLS; j++) \ + if (trmm) { \ + C[i + j * ldc] = alpha * Caux[i][j]; \ + } else { \ + C[i + j * ldc] += alpha * Caux[i][j]; \ + } \ + } + +#ifdef DOUBLE +VECTOR_BLOCK(2, 4) +VECTOR_BLOCK(2, 2) +VECTOR_BLOCK(2, 1) +#else +SCALAR_BLOCK(2, 4) +SCALAR_BLOCK(2, 2) +SCALAR_BLOCK(2, 1) +#endif + +SCALAR_BLOCK(1, 4) +SCALAR_BLOCK(1, 2) +SCALAR_BLOCK(1, 1) + + +/** + * Calculate a row-block that fits 4x4 vector registers using a loop + * unrolled-by-2 with explicit interleaving to better overlap loads and + * computation. + * This function fits 16x4 blocks for SGEMM and 8x4 blocks for DGEMM. + */ +#ifdef DOUBLE +static inline void GEBP_block_8_4( +#else // float +static inline void GEBP_block_16_4( +#endif + FLOAT const *restrict A, BLASLONG bk, FLOAT const *restrict B, + FLOAT *restrict C, BLASLONG ldc, FLOAT alpha) { +#define VEC_ROWS 4 +#define VEC_COLS 4 +#define ROWS VEC_ROWS * VLEN_FLOATS +#define COLS (VEC_COLS) + + /* + * Hold intermediate results in vector registers. + * Since we need to force the compiler's hand in places, we need to use + * individual variables in contrast to the generic implementation's + * arrays. + */ +#define INIT_ROW_OF_C(ROW) \ + vector_float A##ROW = vec_load_hinted(A + ROW * VLEN_FLOATS); \ + vector_float C_##ROW##_0 = A##ROW * B[0]; \ + vector_float C_##ROW##_1 = A##ROW * B[1]; \ + vector_float C_##ROW##_2 = A##ROW * B[2]; \ + vector_float C_##ROW##_3 = A##ROW * B[3]; + + INIT_ROW_OF_C(0) + INIT_ROW_OF_C(1) + INIT_ROW_OF_C(2) + INIT_ROW_OF_C(3) +#undef INIT_ROW_OF_C + + if (bk > 1) { + BLASLONG k = 1; + vector_float Ak[VEC_ROWS], Aknext[VEC_ROWS]; + vector_float Bk[VEC_COLS], Bknext[VEC_COLS]; + + /* + * Note that in several places, we enforce an instruction + * sequence that we identified empirically by utilizing dummy + * asm statements. + */ + + for (BLASLONG j = 0; j < VEC_COLS; j++) + Bk[j] = vec_splats(B[j + k * COLS]); + asm(""); + + for (BLASLONG i = 0; i < VEC_ROWS; i++) + Ak[i] = vec_load_hinted(A + i * VLEN_FLOATS + k * ROWS); + + for (; k < (bk - 2); k += 2) { + /* + * Load inputs for (k+1) into registers. + * Loading from B first is advantageous. + */ + for (BLASLONG j = 0; j < VEC_COLS; j++) + Bknext[j] = vec_splats(B[j + (k + 1) * COLS]); + asm(""); + for (BLASLONG i = 0; i < VEC_ROWS; i++) + Aknext[i] = vec_load_hinted(A + i * VLEN_FLOATS + + (k + 1) * ROWS); + + /* + * To achieve better instruction-level parallelism, + * make sure to first load input data for (k+1) before + * initiating compute for k. We enforce that ordering + * with a pseudo asm statement. + * Note that we need to massage this particular "barrier" + * depending on the gcc version. + */ +#if __GNUC__ > 7 || defined(__clang__) +#define BARRIER_READ_BEFORE_COMPUTE(SUFFIX) \ + do { \ + asm("" \ + : "+v"(C_0_0), "+v"(C_0_1), "+v"(C_0_2), "+v"(C_0_3), "+v"(C_1_0), \ + "+v"(C_1_1), "+v"(C_1_2), "+v"(C_1_3) \ + : "v"(B##SUFFIX[0]), "v"(B##SUFFIX[1]), "v"(B##SUFFIX[2]), \ + "v"(B##SUFFIX[3]), "v"(A##SUFFIX[0]), "v"(A##SUFFIX[1]), \ + "v"(A##SUFFIX[2]), "v"(A##SUFFIX[3])); \ + asm("" \ + : "+v"(C_2_0), "+v"(C_2_1), "+v"(C_2_2), "+v"(C_2_3), "+v"(C_3_0), \ + "+v"(C_3_1), "+v"(C_3_2), "+v"(C_3_3) \ + : "v"(B##SUFFIX[0]), "v"(B##SUFFIX[1]), "v"(B##SUFFIX[2]), \ + "v"(B##SUFFIX[3]), "v"(A##SUFFIX[0]), "v"(A##SUFFIX[1]), \ + "v"(A##SUFFIX[2]), "v"(A##SUFFIX[3])); \ + } while (0) +#else // __GNUC__ <= 7 +#define BARRIER_READ_BEFORE_COMPUTE(SUFFIX) \ + do { \ + asm(""); \ + } while (0) +#endif + + BARRIER_READ_BEFORE_COMPUTE(knext); + + /* Compute for (k) */ + C_0_0 += Ak[0] * Bk[0]; + C_1_0 += Ak[1] * Bk[0]; + C_2_0 += Ak[2] * Bk[0]; + C_3_0 += Ak[3] * Bk[0]; + + C_0_1 += Ak[0] * Bk[1]; + C_1_1 += Ak[1] * Bk[1]; + C_2_1 += Ak[2] * Bk[1]; + C_3_1 += Ak[3] * Bk[1]; + + C_0_2 += Ak[0] * Bk[2]; + C_1_2 += Ak[1] * Bk[2]; + C_2_2 += Ak[2] * Bk[2]; + C_3_2 += Ak[3] * Bk[2]; + + C_0_3 += Ak[0] * Bk[3]; + C_1_3 += Ak[1] * Bk[3]; + C_2_3 += Ak[2] * Bk[3]; + C_3_3 += Ak[3] * Bk[3]; + + asm(""); + + /* + * Load inputs for (k+2) into registers. + * First load from B. + */ + for (BLASLONG j = 0; j < VEC_COLS; j++) + Bk[j] = vec_splats(B[j + (k + 2) * COLS]); + asm(""); + for (BLASLONG i = 0; i < VEC_ROWS; i++) + Ak[i] = vec_load_hinted(A + i * VLEN_FLOATS + (k + 2) * ROWS); + + /* + * As above, make sure to first schedule the loads for (k+2) + * before compute for (k+1). + */ + BARRIER_READ_BEFORE_COMPUTE(k); + + /* Compute on (k+1) */ + C_0_0 += Aknext[0] * Bknext[0]; + C_1_0 += Aknext[1] * Bknext[0]; + C_2_0 += Aknext[2] * Bknext[0]; + C_3_0 += Aknext[3] * Bknext[0]; + + C_0_1 += Aknext[0] * Bknext[1]; + C_1_1 += Aknext[1] * Bknext[1]; + C_2_1 += Aknext[2] * Bknext[1]; + C_3_1 += Aknext[3] * Bknext[1]; + + C_0_2 += Aknext[0] * Bknext[2]; + C_1_2 += Aknext[1] * Bknext[2]; + C_2_2 += Aknext[2] * Bknext[2]; + C_3_2 += Aknext[3] * Bknext[2]; + + C_0_3 += Aknext[0] * Bknext[3]; + C_1_3 += Aknext[1] * Bknext[3]; + C_2_3 += Aknext[2] * Bknext[3]; + C_3_3 += Aknext[3] * Bknext[3]; + } + + /* Wrapup remaining k's */ + for (; k < bk; k++) { + vector_float Ak; + +#define COMPUTE_WRAPUP_ROW(ROW) \ + Ak = vec_load_hinted(A + ROW * VLEN_FLOATS + k * ROWS); \ + C_##ROW##_0 += Ak * B[0 + k * COLS]; \ + C_##ROW##_1 += Ak * B[1 + k * COLS]; \ + C_##ROW##_2 += Ak * B[2 + k * COLS]; \ + C_##ROW##_3 += Ak * B[3 + k * COLS]; + + COMPUTE_WRAPUP_ROW(0) + COMPUTE_WRAPUP_ROW(1) + COMPUTE_WRAPUP_ROW(2) + COMPUTE_WRAPUP_ROW(3) +#undef COMPUTE_WRAPUP_ROW + } + } + + /* + * Unpack row-block of C_aux into outer C_i, multiply by + * alpha and add up (or assign for TRMM). + */ +#define WRITE_BACK_C(ROW, COL) \ + do { \ + vector_float *Cij = \ + (vector_float *)(C + ROW * VLEN_FLOATS + COL * ldc); \ + if (trmm) { \ + *Cij = alpha * C_##ROW##_##COL; \ + } else { \ + *Cij += alpha * C_##ROW##_##COL; \ + } \ + } while (0) + + WRITE_BACK_C(0, 0); WRITE_BACK_C(0, 1); WRITE_BACK_C(0, 2); WRITE_BACK_C(0, 3); + WRITE_BACK_C(1, 0); WRITE_BACK_C(1, 1); WRITE_BACK_C(1, 2); WRITE_BACK_C(1, 3); + WRITE_BACK_C(2, 0); WRITE_BACK_C(2, 1); WRITE_BACK_C(2, 2); WRITE_BACK_C(2, 3); + WRITE_BACK_C(3, 0); WRITE_BACK_C(3, 1); WRITE_BACK_C(3, 2); WRITE_BACK_C(3, 3); +#undef WRITE_BACK_C + +#undef ROWS +#undef VEC_ROWS +#undef COLS +#undef VEC_COLS +#undef BARRIER_READ_BEFORE_COMPUTE +} + +/** + * Handle calculation for row blocks in C_i of any size by dispatching into + * macro-defined (inline) functions or by deferring to a simple generic + * implementation. Note that the compiler can remove this awkward-looking + * dispatching code while inlineing. + * + * @param[in] m Number of rows in block C_i. + * @param[in] n Number of columns in block C_i. + * @param[in] first_row Index of first row of the block C_i (relative to C). + * @param[in] A Pointer to input matrix A (note: all of it). + * @param[in] k Number of columns in A and rows in B. + * @param[in] B Pointer to current column block (panel) of input matrix B. + * @param[inout] C Pointer to current column block (panel) of output matrix C. + * @param[in] ldc Offset between elements in adjacent columns in C. + * @param[in] alpha Scalar factor. + * @param[in] offset Number of columns of A and rows of B to skip (for triangular matrices). + * @param[in] off Running offset for handling triangular matrices. + */ +static inline void GEBP_block(BLASLONG m, BLASLONG n, + BLASLONG first_row, + const FLOAT * restrict A, BLASLONG k, + const FLOAT * restrict B, + FLOAT *restrict C, BLASLONG ldc, + FLOAT alpha, + BLASLONG offset, BLASLONG off) +{ + if (trmm && left) + off = offset + first_row; + + A += first_row * k; + C += first_row; + + if (trmm) { + if (backwards) { + A += off * m; + B += off * n; + k -= off; + } else { + if (left) { + k = off + m; + } else { + k = off + n; + } + } + } + + /* Dispatch into the implementation for each block size: */ + +#define BLOCK(bm, bn) \ + if (m == bm && n == bn) { \ + GEBP_block_##bm##_##bn(A, k, B, C, ldc, alpha); \ + return; \ + } + +#if UNROLL_M == 16 + BLOCK(16, 4); BLOCK(16, 2); BLOCK(16, 1); +#endif +#if UNROLL_N == 8 + BLOCK(8, 8); BLOCK(4, 8); +#endif + BLOCK(8, 4); BLOCK(8, 2); BLOCK(8, 1); + BLOCK(4, 4); BLOCK(4, 2); BLOCK(4, 1); + + BLOCK(2, 4); BLOCK(2, 2); BLOCK(2, 1); + + BLOCK(1, 4); BLOCK(1, 2); BLOCK(1, 1); + +#undef BLOCK +} + +/** + * Handle a column block (panel) of C and B while calculating C += alpha(A * B). + * + * @param[in] num_cols Number of columns in the block (in C and B). + * @param[in] first_col First column of the current block (in C and B). + * @param[in] A Pointer to input matrix A. + * @param[in] bk Number of columns in A and rows in B. + * @param[in] B Pointer to input matrix B (note: all of it). + * @param[in] bm Number of rows in C and A. + * @param[inout] C Pointer to output matrix C (note: all of it). + * @param[in] ldc Offset between elements in adjacent columns in C. + * @param[in] alpha Scalar factor. + * @param[in] offset Number of columns of A and rows of B to skip (for triangular matrices). + */ +static inline void GEBP_column_block(BLASLONG num_cols, BLASLONG first_col, + const FLOAT *restrict A, BLASLONG bk, + const FLOAT *restrict B, BLASLONG bm, + FLOAT *restrict C, BLASLONG ldc, + FLOAT alpha, + BLASLONG const offset) { + + FLOAT *restrict C_i = C + first_col * ldc; + /* + * B is in column-order with n_r packed row elements, which does + * not matter -- we always move in full such blocks of + * column*pack + */ + const FLOAT *restrict B_i = B + first_col * bk; + + BLASLONG off = 0; + if (trmm) { + if (left) { + off = offset; + } else { + off = -offset + first_col; + } + } + + /* + * Calculate C_aux := A * B_j + * then unpack C_i += alpha * C_aux. + * + * For that purpose, further partition C_aux and A into blocks + * of m_r (unroll_m) rows, or powers-of-2 if smaller. + */ + BLASLONG row = 0; + for (BLASLONG block_size = unroll_m; block_size > 0; block_size /= 2) + for (; bm - row >= block_size; row += block_size) + GEBP_block(block_size, num_cols, row, A, bk, B_i, C_i, + ldc, alpha, offset, off); +} + +/** + * Inner kernel for matrix-matrix multiplication. C += alpha (A * B) + * where C is an m-by-n matrix, A is m-by-k and B is k-by-n. Note that A, B, and + * C are pointers to submatrices of the actual matrices. + * + * For triangular matrix multiplication, calculate B := alpha (A * B) where A + * or B can be triangular (in case of A, the macro LEFT will be defined). + * + * @param[in] bm Number of rows in C and A. + * @param[in] bn Number of columns in C and B. + * @param[in] bk Number of columns in A and rows in B. + * @param[in] alpha Scalar factor. + * @param[in] ba Pointer to input matrix A. + * @param[in] bb Pointer to input matrix B. + * @param[inout] C Pointer to output matrix C. + * @param[in] ldc Offset between elements in adjacent columns in C. + * @param[in] offset Number of columns of A and rows of B to skip (for triangular matrices). + * @returns 0 on success. + */ +int CNAME(BLASLONG bm, BLASLONG bn, BLASLONG bk, FLOAT alpha, + FLOAT *restrict ba, FLOAT *restrict bb, + FLOAT *restrict C, BLASLONG ldc +#ifdef TRMMKERNEL + , BLASLONG offset +#endif + ) +{ + if ( (bm == 0) || (bn == 0) || (bk == 0) || (alpha == ZERO)) + return 0; + + /* + * interface code allocates buffers for ba and bb at page + * granularity (i.e., using mmap(MAP_ANONYMOUS), so enable the compiler + * to make use of the fact in vector load operations. + */ + ba = __builtin_assume_aligned(ba, 16); + bb = __builtin_assume_aligned(bb, 16); + + /* + * Use offset and off even when compiled as SGEMMKERNEL to simplify + * function signatures and function calls. + */ +#ifndef TRMMKERNEL + BLASLONG const offset = 0; +#endif + + /* + * Partition B and C into blocks of n_r (unroll_n) columns, called B_i + * and C_i. For each partition, calculate C_i += alpha * (A * B_j). + * + * For remaining columns that do not fill up a block of n_r, iteratively + * use smaller block sizes of powers of 2. + */ + BLASLONG col = 0; + for (BLASLONG block_size = unroll_n; block_size > 0; block_size /= 2) + for (; bn - col >= block_size; col += block_size) + GEBP_column_block(block_size, col, ba, bk, bb, bm, C, ldc, alpha, offset); + + return 0; +} diff --git a/kernel/zarch/icamax.c b/kernel/zarch/icamax.c index a2546b812..459196d00 100644 --- a/kernel/zarch/icamax.c +++ b/kernel/zarch/icamax.c @@ -213,9 +213,9 @@ static BLASLONG icamax_kernel_32(BLASLONG n, FLOAT *x, FLOAT *amax) { "ste %%f0,%[amax]\n\t" "vlgvg %[iamax],%%v1,0\n\t" "2:\n\t" - "nop" + "nop 0" : [iamax] "=r"(iamax),[amax] "=Q"(*amax),[n] "+&r"(n) - : "m"(*(const struct { FLOAT x[n * 2]; } *) x),[x] "a"(x) + : "m"(*(const FLOAT (*)[n * 2]) x),[x] "a"(x) : "cc", "r1", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); diff --git a/kernel/zarch/icamin.c b/kernel/zarch/icamin.c index 09654b742..9bcf3646b 100644 --- a/kernel/zarch/icamin.c +++ b/kernel/zarch/icamin.c @@ -213,9 +213,9 @@ static BLASLONG icamin_kernel_32(BLASLONG n, FLOAT *x, FLOAT *amin) { "ste %%f0,%[amin]\n\t" "vlgvg %[iamin],%%v1,0\n\t" "2:\n\t" - "nop" + "nop 0" : [iamin] "=r"(iamin),[amin] "=Q"(*amin),[n] "+&r"(n) - : "m"(*(const struct { FLOAT x[n * 2]; } *) x),[x] "a"(x) + : "m"(*(const FLOAT (*)[n * 2]) x),[x] "a"(x) : "cc", "r1", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); diff --git a/kernel/zarch/idamax.c b/kernel/zarch/idamax.c index b292c1d15..0f53488d3 100644 --- a/kernel/zarch/idamax.c +++ b/kernel/zarch/idamax.c @@ -160,9 +160,9 @@ static BLASLONG idamax_kernel_32(BLASLONG n, FLOAT *x, FLOAT *amax) { "std %%f0,%[amax]\n\t" "vlgvg %[iamax],%%v1,0\n\t" "2:\n\t" - "nop" + "nop 0" : [iamax] "=r"(iamax),[amax] "=Q"(*amax),[n] "+&r"(n) - : "m"(*(const struct { FLOAT x[n]; } *) x),[x] "a"(x) + : "m"(*(const FLOAT (*)[n]) x),[x] "a"(x) : "cc", "r1", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); diff --git a/kernel/zarch/idamin.c b/kernel/zarch/idamin.c index f9a8119e1..f48bde894 100644 --- a/kernel/zarch/idamin.c +++ b/kernel/zarch/idamin.c @@ -160,9 +160,9 @@ static BLASLONG idamin_kernel_32(BLASLONG n, FLOAT *x, FLOAT *amin) { "std %%f0,%[amin]\n\t" "vlgvg %[iamin],%%v1,0\n\t" "2:\n\t" - "nop" + "nop 0" : [iamin] "=r"(iamin),[amin] "=Q"(*amin),[n] "+&r"(n) - : "m"(*(const struct { FLOAT x[n]; } *) x),[x] "a"(x) + : "m"(*(const FLOAT (*)[n]) x),[x] "a"(x) : "cc", "r1", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); diff --git a/kernel/zarch/idmax.c b/kernel/zarch/idmax.c index 8f283bc17..1fdf1fa02 100644 --- a/kernel/zarch/idmax.c +++ b/kernel/zarch/idmax.c @@ -140,9 +140,9 @@ static BLASLONG idmax_kernel_32(BLASLONG n, FLOAT *x, FLOAT *max) { "std %%f0,%[max]\n\t" "vlgvg %[imax],%%v1,0\n\t" "2:\n\t" - "nop" + "nop 0" : [imax] "=r"(imax),[max] "=Q"(*max),[n] "+&r"(n) - : "m"(*(const struct { FLOAT x[n]; } *) x),[x] "a"(x) + : "m"(*(const FLOAT (*)[n]) x),[x] "a"(x) : "cc", "r1", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); diff --git a/kernel/zarch/idmin.c b/kernel/zarch/idmin.c index e4b7bb4fe..282f26bbd 100644 --- a/kernel/zarch/idmin.c +++ b/kernel/zarch/idmin.c @@ -140,9 +140,9 @@ static BLASLONG idmin_kernel_32(BLASLONG n, FLOAT *x, FLOAT *min) { "std %%f0,%[min]\n\t" "vlgvg %[imin],%%v1,0\n\t" "2:\n\t" - "nop" + "nop 0" : [imin] "=r"(imin),[min] "=Q"(*min),[n] "+&r"(n) - : "m"(*(const struct { FLOAT x[n]; } *) x),[x] "a"(x) + : "m"(*(const FLOAT (*)[n]) x),[x] "a"(x) : "cc", "r1", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); diff --git a/kernel/zarch/isamax.c b/kernel/zarch/isamax.c index ac86435d7..a30a96412 100644 --- a/kernel/zarch/isamax.c +++ b/kernel/zarch/isamax.c @@ -204,9 +204,9 @@ static BLASLONG isamax_kernel_64(BLASLONG n, FLOAT *x, FLOAT *amax) { "ste %%f0,%[amax]\n\t" "vlgvg %[iamax],%%v1,0\n\t" "2:\n\t" - "nop" + "nop 0" : [iamax] "=r"(iamax),[amax] "=Q"(*amax),[n] "+&r"(n) - : "m"(*(const struct { FLOAT x[n]; } *) x),[x] "a"(x) + : "m"(*(const FLOAT(*)[n]) x),[x] "a"(x) : "cc", "r1", "v0", "v1", "v2", "v4", "v5", "v6", "v7", "v8", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); diff --git a/kernel/zarch/isamin.c b/kernel/zarch/isamin.c index 3f2d039eb..b29027ff4 100644 --- a/kernel/zarch/isamin.c +++ b/kernel/zarch/isamin.c @@ -204,9 +204,9 @@ static BLASLONG isamin_kernel_64(BLASLONG n, FLOAT *x, FLOAT *amin) { "ste %%f0,%[amin]\n\t" "vlgvg %[iamin],%%v1,0\n\t" "2:\n\t" - "nop" + "nop 0" : [iamin] "=r"(iamin),[amin] "=Q"(*amin),[n] "+&r"(n) - : "m"(*(const struct { FLOAT x[n]; } *) x),[x] "a"(x) + : "m"(*(const FLOAT (*)[n]) x),[x] "a"(x) : "cc", "r1", "v0", "v1", "v2", "v4", "v5", "v6", "v7", "v8", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); diff --git a/kernel/zarch/ismax.c b/kernel/zarch/ismax.c index 41172c1bd..3d751ff6b 100644 --- a/kernel/zarch/ismax.c +++ b/kernel/zarch/ismax.c @@ -184,9 +184,9 @@ static BLASLONG ismax_kernel_64(BLASLONG n, FLOAT *x, FLOAT *max) { "ste %%f0,%[max]\n\t" "vlgvg %[imax],%%v1,0\n\t" "2:\n\t" - "nop" + "nop 0" : [imax] "=r"(imax),[max] "=Q"(*max),[n] "+&r"(n) - : "m"(*(const struct { FLOAT x[n]; } *) x),[x] "a"(x) + : "m"(*(const FLOAT (*)[n]) x),[x] "a"(x) : "cc", "r1", "v0", "v1", "v2", "v4", "v5", "v6", "v7", "v8", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); diff --git a/kernel/zarch/ismin.c b/kernel/zarch/ismin.c index e2684df41..e57c0bfa6 100644 --- a/kernel/zarch/ismin.c +++ b/kernel/zarch/ismin.c @@ -184,9 +184,9 @@ static BLASLONG ismin_kernel_64(BLASLONG n, FLOAT *x, FLOAT *min) { "ste %%f0,%[min]\n\t" "vlgvg %[imin],%%v1,0\n\t" "2:\n\t" - "nop" + "nop 0" : [imin] "=r"(imin),[min] "=Q"(*min),[n] "+&r"(n) - : "m"(*(const struct { FLOAT x[n]; } *) x),[x] "a"(x) + : "m"(*(const FLOAT (*)[n]) x),[x] "a"(x) : "cc", "r1", "v0", "v1", "v2", "v4", "v5", "v6", "v7", "v8", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); diff --git a/kernel/zarch/izamax.c b/kernel/zarch/izamax.c index daca1d6f7..fda76f471 100644 --- a/kernel/zarch/izamax.c +++ b/kernel/zarch/izamax.c @@ -157,9 +157,9 @@ static BLASLONG izamax_kernel_16(BLASLONG n, FLOAT *x, FLOAT *amax) { "std %%f0,%[amax]\n\t" "vlgvg %[iamax],%%v1,0\n\t" "2:\n\t" - "nop" + "nop 0" : [iamax] "=r"(iamax),[amax] "=Q"(*amax),[n] "+&r"(n) - : "m"(*(const struct { FLOAT x[n * 2]; } *) x),[x] "a"(x) + : "m"(*(const FLOAT (*)[n * 2]) x),[x] "a"(x) : "cc", "r1", "v0", "v1", "v2", "v3", "v4", "v5", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"); diff --git a/kernel/zarch/izamin.c b/kernel/zarch/izamin.c index 9ababb91f..412ab15ca 100644 --- a/kernel/zarch/izamin.c +++ b/kernel/zarch/izamin.c @@ -157,9 +157,9 @@ static BLASLONG izamin_kernel_16(BLASLONG n, FLOAT *x, FLOAT *amin) { "std %%f0,%[amin]\n\t" "vlgvg %[iamin],%%v1,0\n\t" "2:\n\t" - "nop" + "nop 0" : [iamin] "=r"(iamin),[amin] "=Q"(*amin),[n] "+&r"(n) - : "m"(*(const struct { FLOAT x[n * 2]; } *) x),[x] "a"(x) + : "m"(*(const FLOAT (*)[n * 2]) x),[x] "a"(x) : "cc", "r1", "v0", "v1", "v2", "v3", "v4", "v5", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"); diff --git a/kernel/zarch/samax.c b/kernel/zarch/samax.c index fdda6dd32..20da4406a 100644 --- a/kernel/zarch/samax.c +++ b/kernel/zarch/samax.c @@ -78,7 +78,7 @@ static FLOAT samax_kernel_64(BLASLONG n, FLOAT *x) { "wfmaxsb %%v0,%%v0,%%v16,8\n\t" "lper %[amax],%%f0" : [amax] "=f"(amax),[n] "+&r"(n) - : "m"(*(const struct { FLOAT x[n]; } *) x),[x] "a"(x) + : "m"(*(const FLOAT (*)[n]) x),[x] "a"(x) : "cc", "r1", "v0", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); diff --git a/kernel/zarch/samin.c b/kernel/zarch/samin.c index f05e851f9..e7e4fd9b7 100644 --- a/kernel/zarch/samin.c +++ b/kernel/zarch/samin.c @@ -78,7 +78,7 @@ static FLOAT samin_kernel_64(BLASLONG n, FLOAT *x) { "wfminsb %%v0,%%v0,%%v16,8\n\t" "lper %[amin],%%f0" : [amin] "=f"(amin),[n] "+&r"(n) - : "m"(*(const struct { FLOAT x[n]; } *) x),[x] "a"(x) + : "m"(*(const FLOAT (*)[n]) x),[x] "a"(x) : "cc", "r1", "v0", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); diff --git a/kernel/zarch/sasum.c b/kernel/zarch/sasum.c index d56f2697b..4cf74f351 100644 --- a/kernel/zarch/sasum.c +++ b/kernel/zarch/sasum.c @@ -108,7 +108,7 @@ static FLOAT sasum_kernel_64(BLASLONG n, FLOAT *x) { "vfasb %%v24,%%v24,%%v25\n\t" "vstef %%v24,%[asum],0" : [asum] "=Q"(asum),[n] "+&r"(n) - : "m"(*(const struct { FLOAT x[n]; } *) x),[x] "a"(x) + : "m"(*(const FLOAT (*)[n]) x),[x] "a"(x) : "cc", "r1", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); diff --git a/kernel/zarch/saxpy.c b/kernel/zarch/saxpy.c index ca34a47ff..8bcb1a61b 100644 --- a/kernel/zarch/saxpy.c +++ b/kernel/zarch/saxpy.c @@ -100,8 +100,8 @@ static void saxpy_kernel_64(BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) { "vst %%v27,240(%%r1,%[y])\n\t" "agfi %%r1,256\n\t" "brctg %[n],0b" - : "+m"(*(struct { FLOAT x[n]; } *) y),[n] "+&r"(n) - : [y] "a"(y), "m"(*(const struct { FLOAT x[n]; } *) x),[x] "a"(x), + : "+m"(*(FLOAT (*)[n]) y),[n] "+&r"(n) + : [y] "a"(y), "m"(*(const FLOAT (*)[n]) x),[x] "a"(x), [alpha] "Q"(*alpha) : "cc", "r1", "v0", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); diff --git a/kernel/zarch/scopy.c b/kernel/zarch/scopy.c index 5c453cfbb..631c9f929 100644 --- a/kernel/zarch/scopy.c +++ b/kernel/zarch/scopy.c @@ -36,8 +36,8 @@ static void scopy_kernel_64(BLASLONG n, FLOAT *x, FLOAT *y) { "la %[x],256(%[x])\n\t" "la %[y],256(%[y])\n\t" "brctg %[n],0b" - : "=m"(*(struct { FLOAT x[n]; } *) y),[x] "+&a"(x),[y] "+&a"(y),[n] "+&r"(n) - : "m"(*(const struct { FLOAT x[n]; } *) x) + : "=m"(*(FLOAT (*)[n]) y),[x] "+&a"(x),[y] "+&a"(y),[n] "+&r"(n) + : "m"(*(const FLOAT (*)[n]) x) : "cc"); } diff --git a/kernel/zarch/sdot.c b/kernel/zarch/sdot.c index d870b30f0..d27c17162 100644 --- a/kernel/zarch/sdot.c +++ b/kernel/zarch/sdot.c @@ -84,8 +84,8 @@ static FLOAT sdot_kernel_32(BLASLONG n, FLOAT *x, FLOAT *y) { "aebr %%f0,%%f3\n\t" "ler %[dot],%%f0" : [dot] "=f"(dot),[n] "+&r"(n) - : "m"(*(const struct { FLOAT x[n]; } *) x),[x] "a"(x), - "m"(*(const struct { FLOAT x[n]; } *) y),[y] "a"(y) + : "m"(*(const FLOAT (*)[n]) x),[x] "a"(x), + "m"(*(const FLOAT (*)[n]) y),[y] "a"(y) : "cc", "r1", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); diff --git a/kernel/zarch/sgemv_n_4.c b/kernel/zarch/sgemv_n_4.c index a1efef373..b4cfb61de 100644 --- a/kernel/zarch/sgemv_n_4.c +++ b/kernel/zarch/sgemv_n_4.c @@ -159,13 +159,13 @@ static void sgemv_kernel_4x4(BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y, "agfi %%r1,16\n\t" "brctg %%r0,2b\n\t" "3:\n\t" - "nop" - : "+m"(*(struct { FLOAT x[n]; } *) y) - : [y] "a"(y), "m"(*(const struct { FLOAT x[n]; } *) ap0),[ap0] "a"(ap0), - "m"(*(const struct { FLOAT x[n]; } *) ap1),[ap1] "a"(ap1), - "m"(*(const struct { FLOAT x[n]; } *) ap2),[ap2] "a"(ap2), - "m"(*(const struct { FLOAT x[n]; } *) ap3),[ap3] "a"(ap3), - "m"(*(const struct { FLOAT x[4]; } *) x),[x] "a"(x),[alpha] "Q"(*alpha), + "nop 0" + : "+m"(*(FLOAT (*)[n]) y) + : [y] "a"(y), "m"(*(const FLOAT (*)[n]) ap0),[ap0] "a"(ap0), + "m"(*(const FLOAT (*)[n]) ap1),[ap1] "a"(ap1), + "m"(*(const FLOAT (*)[n]) ap2),[ap2] "a"(ap2), + "m"(*(const FLOAT (*)[n]) ap3),[ap3] "a"(ap3), + "m"(*(const FLOAT (*)[4]) x),[x] "a"(x),[alpha] "Q"(*alpha), [n] "r"(n) : "cc", "r0", "r1", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", @@ -258,11 +258,11 @@ static void sgemv_kernel_4x2(BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y, "agfi %%r1,16\n\t" "brctg %%r0,2b\n\t" "3:\n\t" - "nop" - : "+m"(*(struct { FLOAT x[n]; } *) y) - : [y] "a"(y), "m"(*(const struct { FLOAT x[n]; } *) ap0),[ap0] "a"(ap0), - "m"(*(const struct { FLOAT x[n]; } *) ap1),[ap1] "a"(ap1), - "m"(*(const struct { FLOAT x[2]; } *) x),[x] "a"(x),[alpha] "Q"(*alpha), + "nop 0" + : "+m"(*(FLOAT (*)[n]) y) + : [y] "a"(y), "m"(*(const FLOAT (*)[n]) ap0),[ap0] "a"(ap0), + "m"(*(const FLOAT (*)[n]) ap1),[ap1] "a"(ap1), + "m"(*(const FLOAT (*)[2]) x),[x] "a"(x),[alpha] "Q"(*alpha), [n] "r"(n) : "cc", "r0", "r1", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", @@ -331,9 +331,9 @@ static void sgemv_kernel_4x1(BLASLONG n, FLOAT *a0, FLOAT *x, FLOAT *y, "agfi %%r1,16\n\t" "brctg %%r0,2b\n\t" "3:\n\t" - "nop" - : "+m"(*(struct { FLOAT x[n]; } *) y) - : [y] "a"(y), "m"(*(const struct { FLOAT x[n]; } *) a0),[a0] "a"(a0), + "nop 0" + : "+m"(*(FLOAT (*)[n]) y) + : [y] "a"(y), "m"(*(const FLOAT (*)[n]) a0),[a0] "a"(a0), "m"(*(const FLOAT (*)[1]) x),[x] "a"(x),[alpha] "Q"(*alpha), [n] "r"(n) : "cc", "r0", "r1", "v0", "v16", "v17", "v18", "v19", "v20", "v21", diff --git a/kernel/zarch/sgemv_t_4.c b/kernel/zarch/sgemv_t_4.c index 81d7c9fe7..3c708200c 100644 --- a/kernel/zarch/sgemv_t_4.c +++ b/kernel/zarch/sgemv_t_4.c @@ -172,12 +172,12 @@ static void sgemv_kernel_4x4(BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y) { "vrepg %%v4,%%v3,1\n\t" "aebr %%f3,%%f4\n\t" "ste %%f3,12(%[y])" - : "=m"(*(struct { FLOAT x[4]; } *) y) - : [y] "a"(y), "m"(*(const struct { FLOAT x[n]; } *) ap0),[ap0] "a"(ap0), - "m"(*(const struct { FLOAT x[n]; } *) ap1),[ap1] "a"(ap1), - "m"(*(const struct { FLOAT x[n]; } *) ap2),[ap2] "a"(ap2), - "m"(*(const struct { FLOAT x[n]; } *) ap3),[ap3] "a"(ap3), - "m"(*(const struct { FLOAT x[n]; } *) x),[x] "a"(x),[n] "r"(n) + : "=m"(*(FLOAT (*)[4]) y) + : [y] "a"(y), "m"(*(const FLOAT (*)[n]) ap0),[ap0] "a"(ap0), + "m"(*(const FLOAT (*)[n]) ap1),[ap1] "a"(ap1), + "m"(*(const FLOAT (*)[n]) ap2),[ap2] "a"(ap2), + "m"(*(const FLOAT (*)[n]) ap3),[ap3] "a"(ap3), + "m"(*(const FLOAT (*)[n]) x),[x] "a"(x),[n] "r"(n) : "cc", "r0", "r1", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); @@ -278,10 +278,10 @@ static void sgemv_kernel_4x2(BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y) { "vrepg %%v2,%%v1,1\n\t" "aebr %%f1,%%f2\n\t" "ste %%f1,4(%[y])" - : "=m"(*(struct { FLOAT x[2]; } *) y) - : [y] "a"(y), "m"(*(const struct { FLOAT x[n]; } *) ap0),[ap0] "a"(ap0), - "m"(*(const struct { FLOAT x[n]; } *) ap1),[ap1] "a"(ap1), - "m"(*(const struct { FLOAT x[n]; } *) x),[x] "a"(x),[n] "r"(n) + : "=m"(*(FLOAT (*)[2]) y) + : [y] "a"(y), "m"(*(const FLOAT (*)[n]) ap0),[ap0] "a"(ap0), + "m"(*(const FLOAT (*)[n]) ap1),[ap1] "a"(ap1), + "m"(*(const FLOAT (*)[n]) x),[x] "a"(x),[n] "r"(n) : "cc", "r0", "r1", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); @@ -357,8 +357,8 @@ static void sgemv_kernel_4x1(BLASLONG n, FLOAT *a0, FLOAT *x, FLOAT *y) { "aebr %%f0,%%f1\n\t" "ste %%f0,0(%[y])" : "=m"(*(FLOAT (*)[1]) y) - : [y] "a"(y), "m"(*(const struct { FLOAT x[n]; } *) a0),[a0] "a"(a0), - "m"(*(const struct { FLOAT x[n]; } *) x),[x] "a"(x),[n] "r"(n) + : [y] "a"(y), "m"(*(const FLOAT (*)[n]) a0),[a0] "a"(a0), + "m"(*(const FLOAT (*)[n]) x),[x] "a"(x),[n] "r"(n) : "cc", "r0", "r1", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); @@ -431,9 +431,9 @@ static void add_y_kernel_4(BLASLONG n, FLOAT da, FLOAT *src, FLOAT *dest) { "agfi %%r1,16\n\t" "brctg %%r0,2b\n\t" "3:\n\t" - "nop" - : "+m"(*(struct { FLOAT x[n]; } *) dest) - : [dest] "a"(dest),[da] "Q"(da), "m"(*(const struct { FLOAT x[n]; } *) src), + "nop 0" + : "+m"(*(FLOAT (*)[n]) dest) + : [dest] "a"(dest),[da] "Q"(da), "m"(*(const FLOAT (*)[n]) src), [src] "a"(src),[n] "r"(n) : "cc", "r0", "r1", "v0", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", diff --git a/kernel/zarch/smax.c b/kernel/zarch/smax.c index 7015aaa1d..0c7433cbc 100644 --- a/kernel/zarch/smax.c +++ b/kernel/zarch/smax.c @@ -75,7 +75,7 @@ static FLOAT smax_kernel_64(BLASLONG n, FLOAT *x) { "wfmaxsb %%v0,%%v0,%%v16,0\n\t" "ler %[max],%%f0" : [max] "=f"(max),[n] "+&r"(n) - : "m"(*(const struct { FLOAT x[n]; } *) x),[x] "a"(x) + : "m"(*(const FLOAT(*)[n]) x),[x] "a"(x) : "cc", "r1", "v0", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); diff --git a/kernel/zarch/smin.c b/kernel/zarch/smin.c index b6875c5c6..5e0f3860d 100644 --- a/kernel/zarch/smin.c +++ b/kernel/zarch/smin.c @@ -75,7 +75,7 @@ static FLOAT smin_kernel_64(BLASLONG n, FLOAT *x) { "wfminsb %%v0,%%v0,%%v16,0\n\t" "ler %[min],%%f0" : [min] "=f"(min),[n] "+&r"(n) - : "m"(*(const struct { FLOAT x[n]; } *) x),[x] "a"(x) + : "m"(*(const FLOAT (*)[n]) x),[x] "a"(x) : "cc", "r1", "v0", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); diff --git a/kernel/zarch/srot.c b/kernel/zarch/srot.c index 4f471d866..c235adcbe 100644 --- a/kernel/zarch/srot.c +++ b/kernel/zarch/srot.c @@ -169,7 +169,7 @@ static void srot_kernel_64(BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *c, FLOAT *s) { "vst %%v23, 240(%%r1,%[y])\n\t" "agfi %%r1,256\n\t" "brctg %[n],0b" - : "+m"(*(struct { FLOAT x[n]; } *) x), "+m"(*(struct { FLOAT x[n]; } *) y), + : "+m"(*(FLOAT (*)[n]) x), "+m"(*(FLOAT (*)[n]) y), [n] "+&r"(n) : [x] "a"(x),[y] "a"(y),[c] "Q"(*c),[s] "Q"(*s) : "cc", "r1", "v0", "v1", "v16", "v17", "v18", "v19", "v20", "v21", diff --git a/kernel/zarch/sscal.c b/kernel/zarch/sscal.c index 9b9930dc8..da2f49eaf 100644 --- a/kernel/zarch/sscal.c +++ b/kernel/zarch/sscal.c @@ -59,7 +59,7 @@ static void sscal_kernel_32(BLASLONG n, FLOAT da, FLOAT *x) { "vst %%v31,112(%%r1,%[x])\n\t" "agfi %%r1,128\n\t" "brctg %[n],0b" - : "+m"(*(struct { FLOAT x[n]; } *) x),[n] "+&r"(n) + : "+m"(*(FLOAT (*)[n]) x),[n] "+&r"(n) : [x] "a"(x),[da] "Q"(da) : "cc", "r1", "v0", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); @@ -81,7 +81,7 @@ static void sscal_kernel_32_zero(BLASLONG n, FLOAT *x) { "vst %%v0,112(%%r1,%[x])\n\t" "agfi %%r1,128\n\t" "brctg %[n],0b" - : "=m"(*(struct { FLOAT x[n]; } *) x),[n] "+&r"(n) + : "=m"(*(FLOAT (*)[n]) x),[n] "+&r"(n) : [x] "a"(x) : "cc", "r1", "v0"); } diff --git a/kernel/zarch/ssum.c b/kernel/zarch/ssum.c index a433ab592..02aabdff6 100644 --- a/kernel/zarch/ssum.c +++ b/kernel/zarch/ssum.c @@ -89,9 +89,9 @@ static FLOAT ssum_kernel_64(BLASLONG n, FLOAT *x) { "vfasb %%v24,%%v24,%%v25\n\t" "vrepf %%v25,%%v24,2\n\t" "vfasb %%v24,%%v24,%%v25\n\t" - "vstef %%v24,%[asum],0" + "vstef %%v24,%[sum],0" : [sum] "=Q"(sum),[n] "+&r"(n) - : "m"(*(const struct { FLOAT x[n]; } *) x),[x] "a"(x) + : "m"(*(const FLOAT (*)[n]) x),[x] "a"(x) : "cc", "r1", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); diff --git a/kernel/zarch/sswap.c b/kernel/zarch/sswap.c index 0c62f189d..ec860765a 100644 --- a/kernel/zarch/sswap.c +++ b/kernel/zarch/sswap.c @@ -99,7 +99,7 @@ static void sswap_kernel_64(BLASLONG n, FLOAT *x, FLOAT *y) { "vst %%v31, 240(%%r1,%[y])\n\t" "agfi %%r1,256\n\t" "brctg %[n],0b" - : "+m"(*(struct { FLOAT x[n]; } *) x), "+m"(*(struct { FLOAT x[n]; } *) y), + : "+m"(*(FLOAT (*)[n]) x), "+m"(*(FLOAT (*)[n]) y), [n] "+&r"(n) : [x] "a"(x),[y] "a"(y) : "cc", "r1", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", diff --git a/kernel/zarch/strmm8x4V.S b/kernel/zarch/strmm8x4V.S index f8e748167..e34a7a05a 100644 --- a/kernel/zarch/strmm8x4V.S +++ b/kernel/zarch/strmm8x4V.S @@ -186,7 +186,7 @@ ALIGN_4 RefreshTempBk LOCAL_VAR1,BK,OFF,8,4 nill LOCAL_VAR1,3 #else -la LOCAL_VAR1,3(0,0) +lghi LOCAL_VAR1,3 NGR LOCAL_VAR1,BK /*refresh BK*/ #endif jz .L8x4_BK_Store @@ -239,7 +239,7 @@ ALIGN_4 RefreshTempBk LOCAL_VAR1,BK,OFF,4,4 nill LOCAL_VAR1,3 #else - la LOCAL_VAR1,3(0,0) + lghi LOCAL_VAR1,3 NGR LOCAL_VAR1,BK /*refresh BK*/ #endif jz .L4x4_BK_Store @@ -290,7 +290,7 @@ ALIGN_4 RefreshTempBk LOCAL_VAR1,BK,OFF,2,4 nill LOCAL_VAR1,3 #else -la LOCAL_VAR1,3(0,0) +lghi LOCAL_VAR1,3 NGR LOCAL_VAR1,BK /*refresh BK*/ #endif jz .L2x4_BK_Store @@ -341,7 +341,7 @@ ALIGN_4 RefreshTempBk LOCAL_VAR1,BK,OFF,1,4 nill LOCAL_VAR1,3 #else -la LOCAL_VAR1,3(0,0) +lghi LOCAL_VAR1,3 NGR LOCAL_VAR1,BK /*refresh BK*/ #endif jz .L1x4_BK_Store @@ -423,7 +423,7 @@ ALIGN_4 RefreshTempBk LOCAL_VAR1,BK,OFF,8,2 nill LOCAL_VAR1,3 #else -la LOCAL_VAR1,3(0,0) +lghi LOCAL_VAR1,3 NGR LOCAL_VAR1,BK /*refresh BK*/ #endif jz .L8x2_BK_Store @@ -475,7 +475,7 @@ ALIGN_4 RefreshTempBk LOCAL_VAR1,BK,OFF,4,2 nill LOCAL_VAR1,3 #else -la LOCAL_VAR1,3(0,0) +lghi LOCAL_VAR1,3 NGR LOCAL_VAR1,BK /*refresh BK*/ #endif jz .L4x2_BK_Store @@ -525,7 +525,7 @@ ALIGN_4 RefreshTempBk LOCAL_VAR1,BK,OFF,2,2 nill LOCAL_VAR1,3 #else -la LOCAL_VAR1,3(0,0) +lghi LOCAL_VAR1,3 NGR LOCAL_VAR1,BK /*refresh BK*/ #endif jz .L2x2_BK_Store @@ -575,7 +575,7 @@ ALIGN_4 RefreshTempBk LOCAL_VAR1,BK,OFF,1,2 nill LOCAL_VAR1,3 #else -la LOCAL_VAR1,3(0,0) +lghi LOCAL_VAR1,3 NGR LOCAL_VAR1,BK /*refresh BK*/ #endif jz .L1x2_BK_Store @@ -655,7 +655,7 @@ ALIGN_4 RefreshTempBk LOCAL_VAR1,BK,OFF,8,1 nill LOCAL_VAR1,3 #else -la LOCAL_VAR1,3(0,0) +lghi LOCAL_VAR1,3 NGR LOCAL_VAR1,BK /*refresh BK*/ #endif jz .L8x1_BK_Store @@ -708,7 +708,7 @@ ALIGN_4 RefreshTempBk LOCAL_VAR1,BK,OFF,4,1 nill LOCAL_VAR1,3 #else -la LOCAL_VAR1,3(0,0) +lghi LOCAL_VAR1,3 NGR LOCAL_VAR1,BK /*refresh BK*/ #endif jz .L4x1_BK_Store @@ -757,7 +757,7 @@ ALIGN_4 RefreshTempBk LOCAL_VAR1,BK,OFF,2,1 nill LOCAL_VAR1,3 #else -la LOCAL_VAR1,3(0,0) +lghi LOCAL_VAR1,3 NGR LOCAL_VAR1,BK /*refresh BK*/ #endif jz .L2x1_BK_Store @@ -807,7 +807,7 @@ ALIGN_4 RefreshTempBk LOCAL_VAR1,BK,OFF,1,1 nill LOCAL_VAR1,3 #else -la LOCAL_VAR1,3(0,0) +lghi LOCAL_VAR1,3 NGR LOCAL_VAR1,BK /*refresh BK*/ #endif jz .L1x1_BK_Store diff --git a/kernel/zarch/vector-common.h b/kernel/zarch/vector-common.h new file mode 100644 index 000000000..140d39d7b --- /dev/null +++ b/kernel/zarch/vector-common.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) IBM Corporation 2020. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * 3. Neither the name of the OpenBLAS project nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include + +#define VLEN_BYTES 16 +#define VLEN_FLOATS (VLEN_BYTES / sizeof(FLOAT)) + +typedef FLOAT vector_float __attribute__ ((vector_size (VLEN_BYTES))); + +/** + * Load a vector into register, and hint on 8-byte alignment to improve + * performance. gcc-9 and newer will create these hints by itself. For older + * compiler versions, use inline assembly to explicitly express the hint. + * Provide explicit hex encoding to cater for binutils versions that do not know + * about vector-load with alignment hints yet. + * + * Note that, for block sizes where we apply vectorization, vectors in A will + * always be 8-byte aligned. + */ +static inline vector_float vec_load_hinted(FLOAT const *restrict a) { + vector_float const *restrict addr = (vector_float const *restrict)a; + vector_float y; + +#if __GNUC__ < 9 && !defined(__clang__) + // hex-encode vl %[out],%[addr],3 + asm(".insn vrx,0xe70000003006,%[out],%[addr],3" + : [ out ] "=v"(y) + : [ addr ] "R"(*addr)); +#else + y = *addr; +#endif + + return y; +} diff --git a/kernel/zarch/zamax.c b/kernel/zarch/zamax.c index aa04ab91f..98e40d073 100644 --- a/kernel/zarch/zamax.c +++ b/kernel/zarch/zamax.c @@ -114,7 +114,7 @@ static FLOAT zamax_kernel_16(BLASLONG n, FLOAT *x) { "wfmaxdb %%v0,%%v0,%%v16,0\n\t" "ldr %[amax],%%f0" : [amax] "=f"(amax),[n] "+&r"(n) - : "m"(*(const struct { FLOAT x[n * 2]; } *) x),[x] "a"(x) + : "m"(*(const FLOAT (*)[n * 2]) x),[x] "a"(x) : "cc", "r1", "v0", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); diff --git a/kernel/zarch/zamax_z13.c b/kernel/zarch/zamax_z13.c index 37278d6db..f727ad67a 100644 --- a/kernel/zarch/zamax_z13.c +++ b/kernel/zarch/zamax_z13.c @@ -123,7 +123,7 @@ static FLOAT zamax_kernel_16(BLASLONG n, FLOAT *x) { "vsel %%v0,%%v0,%%v16,%%v17\n\t" "ldr %[amax],%%f0" : [amax] "=f"(amax),[n] "+&r"(n) - : "m"(*(const struct { FLOAT x[n * 2]; } *) x),[x] "a"(x) + : "m"(*(const FLOAT (*)[n * 2]) x),[x] "a"(x) : "cc", "r1", "v0", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"); diff --git a/kernel/zarch/zamin.c b/kernel/zarch/zamin.c index 0b5402853..2e43fefd9 100644 --- a/kernel/zarch/zamin.c +++ b/kernel/zarch/zamin.c @@ -114,7 +114,7 @@ static FLOAT zamin_kernel_16(BLASLONG n, FLOAT *x) { "wfmindb %%v0,%%v0,%%v16,0\n\t" "ldr %[amin],%%f0" : [amin] "=f"(amin),[n] "+&r"(n) - : "m"(*(const struct { FLOAT x[n * 2]; } *) x),[x] "a"(x) + : "m"(*(const FLOAT (*)[n * 2]) x),[x] "a"(x) : "cc", "r1", "v0", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); diff --git a/kernel/zarch/zamin_z13.c b/kernel/zarch/zamin_z13.c index e37bb2236..e52802595 100644 --- a/kernel/zarch/zamin_z13.c +++ b/kernel/zarch/zamin_z13.c @@ -123,7 +123,7 @@ static FLOAT zamin_kernel_16(BLASLONG n, FLOAT *x) { "vsel %%v0,%%v0,%%v16,%%v17\n\t" "ldr %[amin],%%f0" : [amin] "=f"(amin),[n] "+&r"(n) - : "m"(*(const struct { FLOAT x[n * 2]; } *) x),[x] "a"(x) + : "m"(*(const FLOAT (*)[n * 2]) x),[x] "a"(x) : "cc", "r1", "v0", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"); diff --git a/kernel/zarch/zasum.c b/kernel/zarch/zasum.c index aeef8d77e..0003f38a5 100644 --- a/kernel/zarch/zasum.c +++ b/kernel/zarch/zasum.c @@ -106,7 +106,7 @@ static FLOAT zasum_kernel_16(BLASLONG n, FLOAT *x) { "vfadb %%v24,%%v24,%%v25\n\t" "vsteg %%v24,%[asum],0" : [asum] "=Q"(asum),[n] "+&r"(n) - : "m"(*(const struct { FLOAT x[n * 2]; } *) x),[x] "a"(x) + : "m"(*(const FLOAT (*)[n * 2]) x),[x] "a"(x) : "cc", "r1", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); diff --git a/kernel/zarch/zaxpy.c b/kernel/zarch/zaxpy.c index 9363ec32d..f2c115597 100644 --- a/kernel/zarch/zaxpy.c +++ b/kernel/zarch/zaxpy.c @@ -95,9 +95,9 @@ static void zaxpy_kernel_8(BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) { "vst %%v19,112(%%r1,%[y])\n\t" "agfi %%r1,128\n\t" "brctg %[n],0b" - : "+m"(*(struct { FLOAT x[n * 2]; } *) y),[n] "+&r"(n) - : [y] "a"(y), "m"(*(const struct { FLOAT x[n * 2]; } *) x),[x] "a"(x), - "m"(*(const struct { FLOAT x[2]; } *) alpha),[alpha] "a"(alpha) + : "+m"(*(FLOAT (*)[n * 2]) y),[n] "+&r"(n) + : [y] "a"(y), "m"(*(const FLOAT (*)[n * 2]) x),[x] "a"(x), + "m"(*(const FLOAT (*)[2]) alpha),[alpha] "a"(alpha) : "cc", "r1", "v0", "v1", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); diff --git a/kernel/zarch/zcopy.c b/kernel/zarch/zcopy.c index 5a46aec1c..d91d9f367 100644 --- a/kernel/zarch/zcopy.c +++ b/kernel/zarch/zcopy.c @@ -36,9 +36,9 @@ static void zcopy_kernel_16(BLASLONG n, FLOAT *x, FLOAT *y) { "la %[x],256(%[x])\n\t" "la %[y],256(%[y])\n\t" "brctg %[n],0b" - : "=m"(*(struct { FLOAT x[n * 2]; } *) y),[x] "+&a"(x),[y] "+&a"(y), + : "=m"(*(FLOAT (*)[n * 2]) y),[x] "+&a"(x),[y] "+&a"(y), [n] "+&r"(n) - : "m"(*(const struct { FLOAT x[n * 2]; } *) x) + : "m"(*(const FLOAT (*)[n * 2]) x) : "cc"); } diff --git a/kernel/zarch/zdot.c b/kernel/zarch/zdot.c index ac6e69c23..6b7144101 100644 --- a/kernel/zarch/zdot.c +++ b/kernel/zarch/zdot.c @@ -93,9 +93,9 @@ static void zdot_kernel_8(BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *d) { "vsteg %%v24,8(%[d]),1\n\t" "vsteg %%v25,16(%[d]),1\n\t" "vsteg %%v25,24(%[d]),0" - : "=m"(*(struct { FLOAT x[4]; } *) d),[n] "+&r"(n) - : [d] "a"(d), "m"(*(const struct { FLOAT x[n * 2]; } *) x),[x] "a"(x), - "m"(*(const struct { FLOAT x[n * 2]; } *) y),[y] "a"(y) + : "=m"(*(FLOAT (*)[4]) d),[n] "+&r"(n) + : [d] "a"(d), "m"(*(const FLOAT (*)[n * 2]) x),[x] "a"(x), + "m"(*(const FLOAT (*)[n * 2]) y),[y] "a"(y) : "cc", "r1", "v0", "v1", "v2", "v3", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); diff --git a/kernel/zarch/zgemv_n_4.c b/kernel/zarch/zgemv_n_4.c index 13045a359..2ef9b4de8 100644 --- a/kernel/zarch/zgemv_n_4.c +++ b/kernel/zarch/zgemv_n_4.c @@ -112,12 +112,12 @@ static void zgemv_kernel_4x4(BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y) { "vst %%v1,16(%%r1,%[y])\n\t" "agfi %%r1,32\n\t" "brctg %[n],0b" - : "+m"(*(struct { FLOAT x[n * 2]; } *) y),[n] "+&r"(n) - : [y] "a"(y), "m"(*(const struct { FLOAT x[n * 2]; } *) ap0),[ap0] "a"(ap0), - "m"(*(const struct { FLOAT x[n * 2]; } *) ap1),[ap1] "a"(ap1), - "m"(*(const struct { FLOAT x[n * 2]; } *) ap2),[ap2] "a"(ap2), - "m"(*(const struct { FLOAT x[n * 2]; } *) ap3),[ap3] "a"(ap3), - "m"(*(const struct { FLOAT x[8]; } *) x),[x] "a"(x) + : "+m"(*(FLOAT (*)[n * 2]) y),[n] "+&r"(n) + : [y] "a"(y), "m"(*(const FLOAT (*)[n * 2]) ap0),[ap0] "a"(ap0), + "m"(*(const FLOAT (*)[n * 2]) ap1),[ap1] "a"(ap1), + "m"(*(const FLOAT (*)[n * 2]) ap2),[ap2] "a"(ap2), + "m"(*(const FLOAT (*)[n * 2]) ap3),[ap3] "a"(ap3), + "m"(*(const FLOAT (*)[8]) x),[x] "a"(x) : "cc", "r1", "v0", "v1", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); @@ -172,10 +172,10 @@ static void zgemv_kernel_4x2(BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y) { "vst %%v1,16(%%r1,%[y])\n\t" "agfi %%r1,32\n\t" "brctg %[n],0b" - : "+m"(*(struct { FLOAT x[n * 2]; } *) y),[n] "+&r"(n) - : [y] "a"(y), "m"(*(const struct { FLOAT x[n * 2]; } *) ap0),[ap0] "a"(ap0), - "m"(*(const struct { FLOAT x[n * 2]; } *) ap1),[ap1] "a"(ap1), - "m"(*(const struct { FLOAT x[4]; } *) x),[x] "a"(x) + : "+m"(*(FLOAT (*)[n * 2]) y),[n] "+&r"(n) + : [y] "a"(y), "m"(*(const FLOAT (*)[n * 2]) ap0),[ap0] "a"(ap0), + "m"(*(const FLOAT (*)[n * 2]) ap1),[ap1] "a"(ap1), + "m"(*(const FLOAT (*)[4]) x),[x] "a"(x) : "cc", "r1", "v0", "v1", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"); } @@ -210,9 +210,9 @@ static void zgemv_kernel_4x1(BLASLONG n, FLOAT *ap, FLOAT *x, FLOAT *y) { "vst %%v1,16(%%r1,%[y])\n\t" "agfi %%r1,32\n\t" "brctg %[n],0b" - : "+m"(*(struct { FLOAT x[n * 2]; } *) y),[n] "+&r"(n) - : [y] "a"(y), "m"(*(const struct { FLOAT x[n * 2]; } *) ap),[ap] "a"(ap), - "m"(*(const struct { FLOAT x[2]; } *) x),[x] "a"(x) + : "+m"(*(FLOAT (*)[n * 2]) y),[n] "+&r"(n) + : [y] "a"(y), "m"(*(const FLOAT (*)[n * 2]) ap),[ap] "a"(ap), + "m"(*(const FLOAT (*)[2]) x),[x] "a"(x) : "cc", "r1", "v0", "v1", "v16", "v17", "v18", "v19", "v20", "v21"); } @@ -261,8 +261,8 @@ static void add_y_4(BLASLONG n, FLOAT *src, FLOAT *dest, FLOAT alpha_r, "vst %%v31,48(%%r1,%[dest])\n\t" "agfi %%r1,64\n\t" "brctg %[n],0b" - : "+m"(*(struct { FLOAT x[n * 2]; } *) dest),[n] "+&r"(n) - : [dest] "a"(dest), "m"(*(const struct { FLOAT x[n * 2]; } *) src), + : "+m"(*(FLOAT (*)[n * 2]) dest),[n] "+&r"(n) + : [dest] "a"(dest), "m"(*(const FLOAT (*)[n * 2]) src), [src] "a"(src),[alpha_r] "Q"(alpha_r),[alpha_i] "Q"(alpha_i) : "cc", "r1", "v0", "v1", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", diff --git a/kernel/zarch/zgemv_t_4.c b/kernel/zarch/zgemv_t_4.c index 031c31e29..c10769266 100644 --- a/kernel/zarch/zgemv_t_4.c +++ b/kernel/zarch/zgemv_t_4.c @@ -141,13 +141,13 @@ static void zgemv_kernel_4x4(BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y, "vst %%v27,16(%[y])\n\t" "vst %%v28,32(%[y])\n\t" "vst %%v29,48(%[y])" - : "+m"(*(struct { FLOAT x[8]; } *) y),[n] "+&r"(n) - : [y] "a"(y), "m"(*(const struct { FLOAT x[n * 2]; } *) ap0),[ap0] "a"(ap0), - "m"(*(const struct { FLOAT x[n * 2]; } *) ap1),[ap1] "a"(ap1), - "m"(*(const struct { FLOAT x[n * 2]; } *) ap2),[ap2] "a"(ap2), - "m"(*(const struct { FLOAT x[n * 2]; } *) ap3),[ap3] "a"(ap3), - "m"(*(const struct { FLOAT x[n * 2]; } *) x),[x] "a"(x), - "m"(*(const struct { FLOAT x[2]; } *) alpha),[alpha] "a"(alpha) + : "+m"(*(FLOAT (*)[8]) y),[n] "+&r"(n) + : [y] "a"(y), "m"(*(const FLOAT (*)[n * 2]) ap0),[ap0] "a"(ap0), + "m"(*(const FLOAT (*)[n * 2]) ap1),[ap1] "a"(ap1), + "m"(*(const FLOAT (*)[n * 2]) ap2),[ap2] "a"(ap2), + "m"(*(const FLOAT (*)[n * 2]) ap3),[ap3] "a"(ap3), + "m"(*(const FLOAT (*)[n * 2]) x),[x] "a"(x), + "m"(*(const FLOAT (*)[2]) alpha),[alpha] "a"(alpha) : "cc", "r1", "v0", "v1", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); @@ -229,11 +229,11 @@ static void zgemv_kernel_4x2(BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y, "vfmadb %%v23,%%v19,%%v21,%%v23\n\t" "vst %%v22,0(%[y])\n\t" "vst %%v23,16(%[y])\n\t" - : "+m"(*(struct { FLOAT x[4]; } *) y),[n] "+&r"(n) - : [y] "a"(y), "m"(*(const struct { FLOAT x[n * 2]; } *) ap0),[ap0] "a"(ap0), - "m"(*(const struct { FLOAT x[n * 2]; } *) ap1),[ap1] "a"(ap1), - "m"(*(const struct { FLOAT x[n * 2]; } *) x),[x] "a"(x), - "m"(*(const struct { FLOAT x[2]; } *) alpha),[alpha] "a"(alpha) + : "+m"(*(FLOAT (*)[4]) y),[n] "+&r"(n) + : [y] "a"(y), "m"(*(const FLOAT (*)[n * 2]) ap0),[ap0] "a"(ap0), + "m"(*(const FLOAT (*)[n * 2]) ap1),[ap1] "a"(ap1), + "m"(*(const FLOAT (*)[n * 2]) x),[x] "a"(x), + "m"(*(const FLOAT (*)[2]) alpha),[alpha] "a"(alpha) : "cc", "r1", "v0", "v1", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); } @@ -294,10 +294,10 @@ static void zgemv_kernel_4x1(BLASLONG n, FLOAT *ap, FLOAT *x, FLOAT *y, "vfmadb %%v0,%%v16,%%v18,%%v0\n\t" "vfmadb %%v0,%%v17,%%v19,%%v0\n\t" "vst %%v0,0(%[y])\n\t" - : "+m"(*(struct { FLOAT x[2]; } *) y),[n] "+&r"(n) - : [y] "a"(y), "m"(*(const struct { FLOAT x[n * 2]; } *) ap),[ap] "a"(ap), - "m"(*(const struct { FLOAT x[n * 2]; } *) x),[x] "a"(x), - "m"(*(const struct { FLOAT x[2]; } *) alpha),[alpha] "a"(alpha) + : "+m"(*(FLOAT (*)[2]) y),[n] "+&r"(n) + : [y] "a"(y), "m"(*(const FLOAT (*)[n * 2]) ap),[ap] "a"(ap), + "m"(*(const FLOAT (*)[n * 2]) x),[x] "a"(x), + "m"(*(const FLOAT (*)[2]) alpha),[alpha] "a"(alpha) : "cc", "r1", "v0", "v1", "v16", "v17", "v18", "v19"); } diff --git a/kernel/zarch/zrot.c b/kernel/zarch/zrot.c index 6284d5a47..3b87e356a 100644 --- a/kernel/zarch/zrot.c +++ b/kernel/zarch/zrot.c @@ -169,8 +169,8 @@ static void zrot_kernel_16(BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *c, FLOAT *s) { "vst %%v23, 240(%%r1,%[y])\n\t" "agfi %%r1,256\n\t" "brctg %[n],0b" - : "+m"(*(struct { FLOAT x[n * 2]; } *) x), - "+m"(*(struct { FLOAT x[n * 2]; } *) y),[n] "+&r"(n) + : "+m"(*(FLOAT (*)[n * 2]) x), + "+m"(*(FLOAT (*)[n * 2]) y),[n] "+&r"(n) : [x] "a"(x),[y] "a"(y),[c] "Q"(*c),[s] "Q"(*s) : "cc", "r1", "v0", "v1", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", diff --git a/kernel/zarch/zscal.c b/kernel/zarch/zscal.c index e497a6d7b..d39b8447e 100644 --- a/kernel/zarch/zscal.c +++ b/kernel/zarch/zscal.c @@ -25,65 +25,35 @@ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ +/* + * Avoid contraction of floating point operations, specifically fused + * multiply-add, because they can cause unexpected results in complex + * multiplication. + */ +#if defined(__GNUC__) && !defined(__clang__) +#pragma GCC optimize ("fp-contract=off") +#endif + +#if defined(__clang__) +#pragma clang fp contract(off) +#endif + #include "common.h" +#include "vector-common.h" -static void zscal_kernel_8(BLASLONG n, FLOAT *alpha, FLOAT *x) { - __asm__("vlrepg %%v0,0(%[alpha])\n\t" - "vleg %%v1,8(%[alpha]),0\n\t" - "wflcdb %%v1,%%v1\n\t" - "vleg %%v1,8(%[alpha]),1\n\t" - "srlg %[n],%[n],3\n\t" - "xgr %%r1,%%r1\n\t" - "0:\n\t" - "pfd 2, 1024(%%r1,%[x])\n\t" - "vl %%v16,0(%%r1,%[x])\n\t" - "vl %%v17,16(%%r1,%[x])\n\t" - "vl %%v18,32(%%r1,%[x])\n\t" - "vl %%v19,48(%%r1,%[x])\n\t" - "vl %%v20,64(%%r1,%[x])\n\t" - "vl %%v21,80(%%r1,%[x])\n\t" - "vl %%v22,96(%%r1,%[x])\n\t" - "vl %%v23,112(%%r1,%[x])\n\t" - "vpdi %%v24,%%v16,%%v16,4\n\t" - "vpdi %%v25,%%v17,%%v17,4\n\t" - "vpdi %%v26,%%v18,%%v18,4\n\t" - "vpdi %%v27,%%v19,%%v19,4\n\t" - "vpdi %%v28,%%v20,%%v20,4\n\t" - "vpdi %%v29,%%v21,%%v21,4\n\t" - "vpdi %%v30,%%v22,%%v22,4\n\t" - "vpdi %%v31,%%v23,%%v23,4\n\t" - "vfmdb %%v16,%%v16,%%v0\n\t" - "vfmdb %%v17,%%v17,%%v0\n\t" - "vfmdb %%v18,%%v18,%%v0\n\t" - "vfmdb %%v19,%%v19,%%v0\n\t" - "vfmdb %%v20,%%v20,%%v0\n\t" - "vfmdb %%v21,%%v21,%%v0\n\t" - "vfmdb %%v22,%%v22,%%v0\n\t" - "vfmdb %%v23,%%v23,%%v0\n\t" - "vfmadb %%v16,%%v24,%%v1,%%v16\n\t" - "vfmadb %%v17,%%v25,%%v1,%%v17\n\t" - "vfmadb %%v18,%%v26,%%v1,%%v18\n\t" - "vfmadb %%v19,%%v27,%%v1,%%v19\n\t" - "vfmadb %%v20,%%v28,%%v1,%%v20\n\t" - "vfmadb %%v21,%%v29,%%v1,%%v21\n\t" - "vfmadb %%v22,%%v30,%%v1,%%v22\n\t" - "vfmadb %%v23,%%v31,%%v1,%%v23\n\t" - "vst %%v16,0(%%r1,%[x])\n\t" - "vst %%v17,16(%%r1,%[x])\n\t" - "vst %%v18,32(%%r1,%[x])\n\t" - "vst %%v19,48(%%r1,%[x])\n\t" - "vst %%v20,64(%%r1,%[x])\n\t" - "vst %%v21,80(%%r1,%[x])\n\t" - "vst %%v22,96(%%r1,%[x])\n\t" - "vst %%v23,112(%%r1,%[x])\n\t" - "agfi %%r1,128\n\t" - "brctg %[n],0b" - : "+m"(*(struct { FLOAT x[n * 2]; } *) x),[n] "+&r"(n) - : [x] "a"(x), "m"(*(const struct { FLOAT x[2]; } *) alpha), - [alpha] "a"(alpha) - : "cc", "r1", "v0", "v1", "v16", "v17", "v18", "v19", "v20", "v21", - "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", - "v31"); +static void zscal_kernel_8(BLASLONG n, FLOAT da_r, FLOAT da_i, FLOAT *x) { + vector_float da_r_vec = vec_splats(da_r); + vector_float da_i_vec = { -da_i, da_i }; + + vector_float * x_vec_ptr = (vector_float *)x; + +#pragma GCC unroll 16 + for (size_t i = 0; i < n; i++) { + vector_float x_vec = vec_load_hinted(x + i * VLEN_FLOATS); + vector_float x_swapped = {x_vec[1], x_vec[0]}; + + x_vec_ptr[i] = x_vec * da_r_vec + x_swapped * da_i_vec; + } } static void zscal_kernel_8_zero_r(BLASLONG n, FLOAT *alpha, FLOAT *x) { @@ -128,8 +98,8 @@ static void zscal_kernel_8_zero_r(BLASLONG n, FLOAT *alpha, FLOAT *x) { "vst %%v23,112(%%r1,%[x])\n\t" "agfi %%r1,128\n\t" "brctg %[n],0b" - : "+m"(*(struct { FLOAT x[n * 2]; } *) x),[n] "+&r"(n) - : [x] "a"(x), "m"(*(const struct { FLOAT x[2]; } *) alpha), + : "+m"(*(FLOAT (*)[n * 2]) x),[n] "+&r"(n) + : [x] "a"(x), "m"(*(const FLOAT (*)[2]) alpha), [alpha] "a"(alpha) : "cc", "r1", "v0", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); @@ -167,8 +137,8 @@ static void zscal_kernel_8_zero_i(BLASLONG n, FLOAT *alpha, FLOAT *x) { "vst %%v23,112(%%r1,%[x])\n\t" "agfi %%r1,128\n\t" "brctg %[n],0b" - : "+m"(*(struct { FLOAT x[n * 2]; } *) x),[n] "+&r"(n) - : [x] "a"(x), "m"(*(const struct { FLOAT x[2]; } *) alpha), + : "+m"(*(FLOAT (*)[n * 2]) x),[n] "+&r"(n) + : [x] "a"(x), "m"(*(const FLOAT (*)[2]) alpha), [alpha] "a"(alpha) : "cc", "r1", "v0", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); @@ -190,19 +160,17 @@ static void zscal_kernel_8_zero(BLASLONG n, FLOAT *x) { "vst %%v0,112(%%r1,%[x])\n\t" "agfi %%r1,128\n\t" "brctg %[n],0b" - : "=m"(*(struct { FLOAT x[n * 2]; } *) x),[n] "+&r"(n) + : "=m"(*(FLOAT (*)[n * 2]) x),[n] "+&r"(n) : [x] "a"(x) : "cc", "r1", "v0"); } -static void zscal_kernel_inc_8(BLASLONG n, FLOAT *alpha, FLOAT *x, +static void zscal_kernel_inc_8(BLASLONG n, FLOAT da_r, FLOAT da_i, FLOAT *x, BLASLONG inc_x) { BLASLONG i; BLASLONG inc_x2 = 2 * inc_x; BLASLONG inc_x3 = inc_x2 + inc_x; FLOAT t0, t1, t2, t3; - FLOAT da_r = alpha[0]; - FLOAT da_i = alpha[1]; for (i = 0; i < n; i += 4) { t0 = da_r * x[0] - da_i * x[1]; @@ -320,9 +288,7 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da_r, FLOAT da_i, BLASLONG n1 = n & -8; if (n1 > 0) { - alpha[0] = da_r; - alpha[1] = da_i; - zscal_kernel_inc_8(n1, alpha, x, inc_x); + zscal_kernel_inc_8(n1, da_r, da_i, x, inc_x); j = n1; i = n1 * inc_x; } @@ -358,7 +324,7 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da_r, FLOAT da_i, else if (da_i == 0) zscal_kernel_8_zero_i(n1, alpha, x); else - zscal_kernel_8(n1, alpha, x); + zscal_kernel_8(n1, da_r, da_i, x); i = n1 << 1; j = n1; diff --git a/kernel/zarch/zsum.c b/kernel/zarch/zsum.c index 7cfc1f17f..b35832af8 100644 --- a/kernel/zarch/zsum.c +++ b/kernel/zarch/zsum.c @@ -87,9 +87,9 @@ static FLOAT zsum_kernel_16(BLASLONG n, FLOAT *x) { "vfadb %%v24,%%v24,%%v31\n\t" "vrepg %%v25,%%v24,1\n\t" "vfadb %%v24,%%v24,%%v25\n\t" - "vsteg %%v24,%[asum],0" + "vsteg %%v24,%[sum],0" : [sum] "=Q"(sum),[n] "+&r"(n) - : "m"(*(const struct { FLOAT x[n * 2]; } *) x),[x] "a"(x) + : "m"(*(const FLOAT (*)[n * 2]) x),[x] "a"(x) : "cc", "r1", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); diff --git a/kernel/zarch/zswap.c b/kernel/zarch/zswap.c index bc466866c..7a2d1f882 100644 --- a/kernel/zarch/zswap.c +++ b/kernel/zarch/zswap.c @@ -99,8 +99,8 @@ static void zswap_kernel_16(BLASLONG n, FLOAT *x, FLOAT *y) { "vst %%v31, 240(%%r1,%[y])\n\t" "agfi %%r1,256\n\t" "brctg %[n],0b" - : "+m"(*(struct { FLOAT x[n * 2]; } *) x), - "+m"(*(struct { FLOAT x[n * 2]; } *) y),[n] "+&r"(n) + : "+m"(*(FLOAT (*)[n * 2]) x), + "+m"(*(FLOAT (*)[n * 2]) y),[n] "+&r"(n) : [x] "a"(x),[y] "a"(y) : "cc", "r1", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", diff --git a/kernel/zarch/ztrmm4x4V.S b/kernel/zarch/ztrmm4x4V.S index 52ee15f06..6fd7f2509 100644 --- a/kernel/zarch/ztrmm4x4V.S +++ b/kernel/zarch/ztrmm4x4V.S @@ -196,7 +196,7 @@ ALIGN_4 RefreshTempBk LOCAL_VAR1,BK,OFF,4,4 nill LOCAL_VAR1,3 #else - la LOCAL_VAR1,3(0,0) + lghi LOCAL_VAR1,3 NGR LOCAL_VAR1,BK /*refresh BK*/ #endif jz .L4x4_BK_Store @@ -256,7 +256,7 @@ ALIGN_4 RefreshTempBk LOCAL_VAR1,BK,OFF,2,4 nill LOCAL_VAR1,3 #else -la LOCAL_VAR1,3(0,0) +lghi LOCAL_VAR1,3 NGR LOCAL_VAR1,BK /*refresh BK*/ #endif jz .L2x4_BK_Store @@ -307,7 +307,7 @@ ALIGN_4 RefreshTempBk LOCAL_VAR1,BK,OFF,1,4 nill LOCAL_VAR1,3 #else -la LOCAL_VAR1,3(0,0) +lghi LOCAL_VAR1,3 NGR LOCAL_VAR1,BK /*refresh BK*/ #endif jz .L1x4_BK_Store @@ -390,7 +390,7 @@ ALIGN_4 RefreshTempBk LOCAL_VAR1,BK,OFF,4,2 nill LOCAL_VAR1,3 #else -la LOCAL_VAR1,3(0,0) +lghi LOCAL_VAR1,3 NGR LOCAL_VAR1,BK /*refresh BK*/ #endif jz .L4x2_BK_Store @@ -447,7 +447,7 @@ ALIGN_4 RefreshTempBk LOCAL_VAR1,BK,OFF,2,2 nill LOCAL_VAR1,3 #else -la LOCAL_VAR1,3(0,0) +lghi LOCAL_VAR1,3 NGR LOCAL_VAR1,BK /*refresh BK*/ #endif jz .L2x2_BK_Store @@ -497,7 +497,7 @@ ALIGN_4 RefreshTempBk LOCAL_VAR1,BK,OFF,1,2 nill LOCAL_VAR1,3 #else -la LOCAL_VAR1,3(0,0) +lghi LOCAL_VAR1,3 NGR LOCAL_VAR1,BK /*refresh BK*/ #endif jz .L1x2_BK_Store @@ -573,7 +573,7 @@ ALIGN_4 RefreshTempBk LOCAL_VAR1,BK,OFF,4,1 nill LOCAL_VAR1,3 #else -la LOCAL_VAR1,3(0,0) +lghi LOCAL_VAR1,3 NGR LOCAL_VAR1,BK /*refresh BK*/ #endif jz .L4x1_BK_Store @@ -625,7 +625,7 @@ ALIGN_4 RefreshTempBk LOCAL_VAR1,BK,OFF,2,1 nill LOCAL_VAR1,3 #else -la LOCAL_VAR1,3(0,0) +lghi LOCAL_VAR1,3 NGR LOCAL_VAR1,BK /*refresh BK*/ #endif jz .L2x1_BK_Store @@ -675,7 +675,7 @@ ALIGN_4 RefreshTempBk LOCAL_VAR1,BK,OFF,1,1 nill LOCAL_VAR1,3 #else -la LOCAL_VAR1,3(0,0) +lghi LOCAL_VAR1,3 NGR LOCAL_VAR1,BK /*refresh BK*/ #endif jz .L1x1_BK_Store diff --git a/lapack-netlib/.appveyor.yml b/lapack-netlib/.appveyor.yml new file mode 100644 index 000000000..0c16dcf7b --- /dev/null +++ b/lapack-netlib/.appveyor.yml @@ -0,0 +1,38 @@ +image: +- Visual Studio 2017 + +configuration: Release +clone_depth: 3 + +matrix: + fast_finish: false + +skip_commits: +# Add [av skip] to commit messages + message: /\[av skip\]/ + +cache: + - '%APPVEYOR_BUILD_FOLDER%\build' + +environment: + global: + CONDA_INSTALL_LOCN: C:\\Miniconda36-x64 + +install: + - call %CONDA_INSTALL_LOCN%\Scripts\activate.bat + - conda config --add channels conda-forge --force + - conda install --yes --quiet flang jom + - call "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" amd64 + - set "LIB=%CONDA_INSTALL_LOCN%\Library\lib;%LIB%" + - set "CPATH=%CONDA_INSTALL_LOCN%\Library\include;%CPATH%" + +before_build: + - ps: if (-Not (Test-Path .\build)) { mkdir build } + - cd build + - cmake -G "NMake Makefiles JOM" -DCMAKE_Fortran_COMPILER=flang -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTING=ON .. + +build_script: + - cmake --build . + +test_script: + - ctest -j2 diff --git a/lapack-netlib/.gitignore b/lapack-netlib/.gitignore index 4ac90962e..015f09d77 100644 --- a/lapack-netlib/.gitignore +++ b/lapack-netlib/.gitignore @@ -35,3 +35,9 @@ LAPACKE/example/xexample* # SED SRC/*-e LAPACKE/src/*-e +build* + +# DOCS documentation +DOCS/man +DOCS/explore-html +output_err diff --git a/lapack-netlib/.travis.yml b/lapack-netlib/.travis.yml index 68cfa607a..04369dafb 100644 --- a/lapack-netlib/.travis.yml +++ b/lapack-netlib/.travis.yml @@ -1,33 +1,32 @@ -language: cpp +language: c +dist: xenial +group: travis_latest + +git: + depth: 3 + quiet: true addons: apt: - sources: - - george-edison55-precise-backports # cmake packages: - - cmake - - cmake-data - - gfortran - -os: - - linux - - osx - -env: - - CMAKE_BUILD_TYPE=Release - - CMAKE_BUILD_TYPE=Coverage + - gfortran -install: - - if [[ "$TRAVIS_OS_NAME" == "osx" ]]; - then - for pkg in gcc cmake; do - if brew list -1 | grep -q "^${pkg}\$"; then - brew outdated $pkg || brew upgrade $pkg; - else - brew install $pkg; - fi - done - fi +matrix: + include: + - os: linux + env: CMAKE_BUILD_TYPE=Release + - os: linux + env: CMAKE_BUILD_TYPE=Coverage + - os: osx + env: CMAKE_BUILD_TYPE=Release + before_install: + - brew update > /dev/null + - brew install gcc > /dev/null + - os: osx + env: CMAKE_BUILD_TYPE=Coverage + before_install: + - brew update > /dev/null + - brew install gcc > /dev/null script: - export PR=https://api.github.com/repos/$TRAVIS_REPO_SLUG/pulls/$TRAVIS_PULL_REQUEST diff --git a/lapack-netlib/BLAS/CMakeLists.txt b/lapack-netlib/BLAS/CMakeLists.txt index e122b2b33..ee5676fc6 100644 --- a/lapack-netlib/BLAS/CMakeLists.txt +++ b/lapack-netlib/BLAS/CMakeLists.txt @@ -6,4 +6,5 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/blas.pc.in ${CMAKE_CURRENT_BINARY_DIR install(FILES ${CMAKE_CURRENT_BINARY_DIR}/blas.pc DESTINATION ${PKG_CONFIG_DIR} + COMPONENT Development ) diff --git a/lapack-netlib/BLAS/Makefile b/lapack-netlib/BLAS/Makefile index f9c4b534c..088ea5d50 100644 --- a/lapack-netlib/BLAS/Makefile +++ b/lapack-netlib/BLAS/Makefile @@ -1,13 +1,18 @@ -include ../make.inc +TOPSRCDIR = .. +include $(TOPSRCDIR)/make.inc +.PHONY: all all: blas +.PHONY: blas blas: $(MAKE) -C SRC +.PHONY: blas_testing blas_testing: blas $(MAKE) -C TESTING run +.PHONY: clean cleanobj cleanlib cleanexe cleantest clean: $(MAKE) -C SRC clean $(MAKE) -C TESTING clean diff --git a/lapack-netlib/BLAS/SRC/Makefile b/lapack-netlib/BLAS/SRC/Makefile index a436365aa..66bb96421 100644 --- a/lapack-netlib/BLAS/SRC/Makefile +++ b/lapack-netlib/BLAS/SRC/Makefile @@ -1,5 +1,3 @@ -include ../../make.inc - ####################################################################### # This is the makefile to create a library for the BLAS. # The files are grouped as follows: @@ -55,6 +53,10 @@ include ../../make.inc # ####################################################################### +TOPSRCDIR = ../.. +include $(TOPSRCDIR)/make.inc + +.PHONY: all all: $(BLASLIB) #--------------------------------------------------------- @@ -138,33 +140,32 @@ ALLOBJ = $(SBLAS1) $(SBLAS2) $(SBLAS3) $(DBLAS1) $(DBLAS2) $(DBLAS3) \ $(ZBLAS2) $(ZBLAS3) $(ALLBLAS) $(BLASLIB): $(ALLOBJ) - $(ARCH) $(ARCHFLAGS) $@ $^ + $(AR) $(ARFLAGS) $@ $^ $(RANLIB) $@ +.PHONY: single double complex complex16 single: $(SBLAS1) $(ALLBLAS) $(SBLAS2) $(SBLAS3) - $(ARCH) $(ARCHFLAGS) $(BLASLIB) $^ + $(AR) $(ARFLAGS) $(BLASLIB) $^ $(RANLIB) $(BLASLIB) double: $(DBLAS1) $(ALLBLAS) $(DBLAS2) $(DBLAS3) - $(ARCH) $(ARCHFLAGS) $(BLASLIB) $^ + $(AR) $(ARFLAGS) $(BLASLIB) $^ $(RANLIB) $(BLASLIB) complex: $(CBLAS1) $(CB1AUX) $(ALLBLAS) $(CBLAS2) $(CBLAS3) - $(ARCH) $(ARCHFLAGS) $(BLASLIB) $^ + $(AR) $(ARFLAGS) $(BLASLIB) $^ $(RANLIB) $(BLASLIB) complex16: $(ZBLAS1) $(ZB1AUX) $(ALLBLAS) $(ZBLAS2) $(ZBLAS3) - $(ARCH) $(ARCHFLAGS) $(BLASLIB) $^ + $(AR) $(ARFLAGS) $(BLASLIB) $^ $(RANLIB) $(BLASLIB) FRC: @FRC=$(FRC) +.PHONY: clean cleanobj cleanlib clean: cleanobj cleanlib cleanobj: rm -f *.o cleanlib: #rm -f $(BLASLIB) # May point to a system lib, e.g. -lblas - -.f.o: - $(FORTRAN) $(OPTS) -c -o $@ $< diff --git a/lapack-netlib/BLAS/SRC/icamax.f b/lapack-netlib/BLAS/SRC/icamax.f index 8057ab095..02bc90ae4 100644 --- a/lapack-netlib/BLAS/SRC/icamax.f +++ b/lapack-netlib/BLAS/SRC/icamax.f @@ -43,7 +43,7 @@ *> \param[in] INCX *> \verbatim *> INCX is INTEGER -*> storage spacing between elements of SX +*> storage spacing between elements of CX *> \endverbatim * * Authors: diff --git a/lapack-netlib/BLAS/SRC/idamax.f b/lapack-netlib/BLAS/SRC/idamax.f index 7268534db..1578ea950 100644 --- a/lapack-netlib/BLAS/SRC/idamax.f +++ b/lapack-netlib/BLAS/SRC/idamax.f @@ -43,7 +43,7 @@ *> \param[in] INCX *> \verbatim *> INCX is INTEGER -*> storage spacing between elements of SX +*> storage spacing between elements of DX *> \endverbatim * * Authors: diff --git a/lapack-netlib/BLAS/SRC/izamax.f b/lapack-netlib/BLAS/SRC/izamax.f index 63d8e97de..c0aabb7bc 100644 --- a/lapack-netlib/BLAS/SRC/izamax.f +++ b/lapack-netlib/BLAS/SRC/izamax.f @@ -43,7 +43,7 @@ *> \param[in] INCX *> \verbatim *> INCX is INTEGER -*> storage spacing between elements of SX +*> storage spacing between elements of ZX *> \endverbatim * * Authors: diff --git a/lapack-netlib/BLAS/SRC/meson.build b/lapack-netlib/BLAS/SRC/meson.build new file mode 100644 index 000000000..8d96f2acd --- /dev/null +++ b/lapack-netlib/BLAS/SRC/meson.build @@ -0,0 +1,29 @@ +SBLAS1 = files('isamax.f', 'sasum.f', 'saxpy.f', 'scopy.f', 'sdot.f', 'snrm2.f', 'srot.f', 'srotg.f', 'sscal.f', 'sswap.f', 'sdsdot.f', 'srotmg.f', 'srotm.f') + +CBLAS1 = files('scabs1.f', 'scasum.f', 'scnrm2.f', 'icamax.f', 'caxpy.f', 'ccopy.f', 'cdotc.f', 'cdotu.f', 'csscal.f', 'crotg.f', 'cscal.f', 'cswap.f', 'csrot.f') + +DBLAS1 = files('idamax.f', 'dasum.f', 'daxpy.f', 'dcopy.f', 'ddot.f', 'dnrm2.f', 'drot.f', 'drotg.f', 'dscal.f', 'dsdot.f', 'dswap.f', 'drotmg.f', 'drotm.f') + +ZBLAS1 = files('dcabs1.f', 'dzasum.f', 'dznrm2.f', 'izamax.f', 'zaxpy.f', 'zcopy.f', 'zdotc.f', 'zdotu.f', 'zdscal.f', 'zrotg.f', 'zscal.f', 'zswap.f', 'zdrot.f') + +CB1AUX = files('isamax.f', 'sasum.f', 'saxpy.f', 'scopy.f', 'snrm2.f', 'sscal.f') + +ZB1AUX = files('idamax.f', 'dasum.f', 'daxpy.f', 'dcopy.f', 'dnrm2.f', 'dscal.f') + +ALLBLAS = files('lsame.f', 'xerbla.f', 'xerbla_array.f') + +SBLAS2 = files('sgemv.f', 'sgbmv.f', 'ssymv.f', 'ssbmv.f', 'sspmv.f', 'strmv.f', 'stbmv.f', 'stpmv.f', 'strsv.f', 'stbsv.f', 'stpsv.f', 'sger.f', 'ssyr.f', 'sspr.f', 'ssyr2.f', 'sspr2.f') + +CBLAS2 = files('cgemv.f', 'cgbmv.f', 'chemv.f', 'chbmv.f', 'chpmv.f', 'ctrmv.f', 'ctbmv.f', 'ctpmv.f', 'ctrsv.f', 'ctbsv.f', 'ctpsv.f', 'cgerc.f', 'cgeru.f', 'cher.f', 'chpr.f', 'cher2.f', 'chpr2.f') + +DBLAS2 = files('dgemv.f', 'dgbmv.f', 'dsymv.f', 'dsbmv.f', 'dspmv.f', 'dtrmv.f', 'dtbmv.f', 'dtpmv.f', 'dtrsv.f', 'dtbsv.f', 'dtpsv.f', 'dger.f', 'dsyr.f', 'dspr.f', 'dsyr2.f', 'dspr2.f') + +ZBLAS2 = files('zgemv.f', 'zgbmv.f', 'zhemv.f', 'zhbmv.f', 'zhpmv.f', 'ztrmv.f', 'ztbmv.f', 'ztpmv.f', 'ztrsv.f', 'ztbsv.f', 'ztpsv.f', 'zgerc.f', 'zgeru.f', 'zher.f', 'zhpr.f', 'zher2.f', 'zhpr2.f') + +SBLAS3 = files('sgemm.f', 'ssymm.f', 'ssyrk.f', 'ssyr2k.f', 'strmm.f', 'strsm.f') + +CBLAS3 = files('cgemm.f', 'csymm.f', 'csyrk.f', 'csyr2k.f', 'ctrmm.f', 'ctrsm.f', 'chemm.f', 'cherk.f', 'cher2k.f') + +DBLAS3 = files('dgemm.f', 'dsymm.f', 'dsyrk.f', 'dsyr2k.f', 'dtrmm.f', 'dtrsm.f') + +ZBLAS3 = files('zgemm.f', 'zsymm.f', 'zsyrk.f', 'zsyr2k.f', 'ztrmm.f', 'ztrsm.f', 'zhemm.f', 'zherk.f', 'zher2k.f') diff --git a/lapack-netlib/BLAS/SRC/sdsdot.f b/lapack-netlib/BLAS/SRC/sdsdot.f index a0ec32b6f..a491e6982 100644 --- a/lapack-netlib/BLAS/SRC/sdsdot.f +++ b/lapack-netlib/BLAS/SRC/sdsdot.f @@ -23,13 +23,13 @@ *> *> \verbatim *> -* Compute the inner product of two vectors with extended -* precision accumulation. -* -* Returns S.P. result with dot product accumulated in D.P. -* SDSDOT = SB + sum for I = 0 to N-1 of SX(LX+I*INCX)*SY(LY+I*INCY), -* where LX = 1 if INCX .GE. 0, else LX = 1+(1-N)*INCX, and LY is -* defined in a similar way using INCY. +*> Compute the inner product of two vectors with extended +*> precision accumulation. +*> +*> Returns S.P. result with dot product accumulated in D.P. +*> SDSDOT = SB + sum for I = 0 to N-1 of SX(LX+I*INCX)*SY(LY+I*INCY), +*> where LX = 1 if INCX .GE. 0, else LX = 1+(1-N)*INCX, and LY is +*> defined in a similar way using INCY. *> \endverbatim * * Arguments: @@ -77,7 +77,14 @@ *> \author Lawson, C. L., (JPL), Hanson, R. J., (SNLA), *> \author Kincaid, D. R., (U. of Texas), Krogh, F. T., (JPL) * -*> \ingroup complex_blas_level1 +*> \author Univ. of Tennessee +*> \author Univ. of California Berkeley +*> \author Univ. of Colorado Denver +*> \author NAG Ltd. +* +*> \date November 2017 +* +*> \ingroup single_blas_level1 * *> \par Further Details: * ===================== @@ -102,65 +109,7 @@ *> 920501 Reformatted the REFERENCES section. (WRB) *> 070118 Reformat to LAPACK coding style *> \endverbatim -* -* ===================================================================== -* -* .. Local Scalars .. -* DOUBLE PRECISION DSDOT -* INTEGER I,KX,KY,NS -* .. -* .. Intrinsic Functions .. -* INTRINSIC DBLE -* .. -* DSDOT = SB -* IF (N.LE.0) THEN -* SDSDOT = DSDOT -* RETURN -* END IF -* IF (INCX.EQ.INCY .AND. INCX.GT.0) THEN -* -* Code for equal and positive increments. -* -* NS = N*INCX -* DO I = 1,NS,INCX -* DSDOT = DSDOT + DBLE(SX(I))*DBLE(SY(I)) -* END DO -* ELSE -* -* Code for unequal or nonpositive increments. -* -* KX = 1 -* KY = 1 -* IF (INCX.LT.0) KX = 1 + (1-N)*INCX -* IF (INCY.LT.0) KY = 1 + (1-N)*INCY -* DO I = 1,N -* DSDOT = DSDOT + DBLE(SX(KX))*DBLE(SY(KY)) -* KX = KX + INCX -* KY = KY + INCY -* END DO -* END IF -* SDSDOT = DSDOT -* RETURN -* END -* -*> \par Purpose: -* ============= *> -*> \verbatim -*> \endverbatim -* -* Authors: -* ======== -* -*> \author Univ. of Tennessee -*> \author Univ. of California Berkeley -*> \author Univ. of Colorado Denver -*> \author NAG Ltd. -* -*> \date November 2017 -* -*> \ingroup single_blas_level1 -* * ===================================================================== REAL FUNCTION SDSDOT(N,SB,SX,INCX,SY,INCY) * @@ -175,71 +124,6 @@ * .. * .. Array Arguments .. REAL SX(*),SY(*) -* .. -* -* PURPOSE -* ======= -* -* Compute the inner product of two vectors with extended -* precision accumulation. -* -* Returns S.P. result with dot product accumulated in D.P. -* SDSDOT = SB + sum for I = 0 to N-1 of SX(LX+I*INCX)*SY(LY+I*INCY), -* where LX = 1 if INCX .GE. 0, else LX = 1+(1-N)*INCX, and LY is -* defined in a similar way using INCY. -* -* AUTHOR -* ====== -* Lawson, C. L., (JPL), Hanson, R. J., (SNLA), -* Kincaid, D. R., (U. of Texas), Krogh, F. T., (JPL) -* -* ARGUMENTS -* ========= -* -* N (input) INTEGER -* number of elements in input vector(s) -* -* SB (input) REAL -* single precision scalar to be added to inner product -* -* SX (input) REAL array, dimension (N) -* single precision vector with N elements -* -* INCX (input) INTEGER -* storage spacing between elements of SX -* -* SY (input) REAL array, dimension (N) -* single precision vector with N elements -* -* INCY (input) INTEGER -* storage spacing between elements of SY -* -* SDSDOT (output) REAL -* single precision dot product (SB if N .LE. 0) -* -* Further Details -* =============== -* -* REFERENCES -* -* C. L. Lawson, R. J. Hanson, D. R. Kincaid and F. T. -* Krogh, Basic linear algebra subprograms for Fortran -* usage, Algorithm No. 539, Transactions on Mathematical -* Software 5, 3 (September 1979), pp. 308-323. -* -* REVISION HISTORY (YYMMDD) -* -* 791001 DATE WRITTEN -* 890531 Changed all specific intrinsics to generic. (WRB) -* 890831 Modified array declarations. (WRB) -* 890831 REVISION DATE from Version 3.2 -* 891214 Prologue converted to Version 4.0 format. (BAB) -* 920310 Corrected definition of LX in DESCRIPTION. (WRB) -* 920501 Reformatted the REFERENCES section. (WRB) -* 070118 Reformat to LAPACK coding style -* -* ===================================================================== -* * .. Local Scalars .. DOUBLE PRECISION DSDOT INTEGER I,KX,KY,NS diff --git a/lapack-netlib/BLAS/TESTING/Makefile b/lapack-netlib/BLAS/TESTING/Makefile index 97150b1a3..5b3b0d6ee 100644 --- a/lapack-netlib/BLAS/TESTING/Makefile +++ b/lapack-netlib/BLAS/TESTING/Makefile @@ -1,5 +1,7 @@ -include ../../make.inc +TOPSRCDIR = ../.. +include $(TOPSRCDIR)/make.inc +.PHONY: all single double complex complex16 all: single double complex complex16 single: xblat1s xblat2s xblat3s double: xblat1d xblat2d xblat3d @@ -7,32 +9,33 @@ complex: xblat1c xblat2c xblat3c complex16: xblat1z xblat2z xblat3z xblat1s: sblat1.o $(BLASLIB) - $(LOADER) $(LOADOPTS) -o $@ $^ + $(FC) $(FFLAGS) $(LDFLAGS) -o $@ $^ xblat1d: dblat1.o $(BLASLIB) - $(LOADER) $(LOADOPTS) -o $@ $^ + $(FC) $(FFLAGS) $(LDFLAGS) -o $@ $^ xblat1c: cblat1.o $(BLASLIB) - $(LOADER) $(LOADOPTS) -o $@ $^ + $(FC) $(FFLAGS) $(LDFLAGS) -o $@ $^ xblat1z: zblat1.o $(BLASLIB) - $(LOADER) $(LOADOPTS) -o $@ $^ + $(FC) $(FFLAGS) $(LDFLAGS) -o $@ $^ xblat2s: sblat2.o $(BLASLIB) - $(LOADER) $(LOADOPTS) -o $@ $^ + $(FC) $(FFLAGS) $(LDFLAGS) -o $@ $^ xblat2d: dblat2.o $(BLASLIB) - $(LOADER) $(LOADOPTS) -o $@ $^ + $(FC) $(FFLAGS) $(LDFLAGS) -o $@ $^ xblat2c: cblat2.o $(BLASLIB) - $(LOADER) $(LOADOPTS) -o $@ $^ + $(FC) $(FFLAGS) $(LDFLAGS) -o $@ $^ xblat2z: zblat2.o $(BLASLIB) - $(LOADER) $(LOADOPTS) -o $@ $^ + $(FC) $(FFLAGS) $(LDFLAGS) -o $@ $^ xblat3s: sblat3.o $(BLASLIB) - $(LOADER) $(LOADOPTS) -o $@ $^ + $(FC) $(FFLAGS) $(LDFLAGS) -o $@ $^ xblat3d: dblat3.o $(BLASLIB) - $(LOADER) $(LOADOPTS) -o $@ $^ + $(FC) $(FFLAGS) $(LDFLAGS) -o $@ $^ xblat3c: cblat3.o $(BLASLIB) - $(LOADER) $(LOADOPTS) -o $@ $^ + $(FC) $(FFLAGS) $(LDFLAGS) -o $@ $^ xblat3z: zblat3.o $(BLASLIB) - $(LOADER) $(LOADOPTS) -o $@ $^ + $(FC) $(FFLAGS) $(LDFLAGS) -o $@ $^ +.PHONY: run run: all ./xblat1s > sblat1.out ./xblat1d > dblat1.out @@ -47,6 +50,7 @@ run: all ./xblat3c < cblat3.in ./xblat3z < zblat3.in +.PHONY: clean cleanobj cleanexe cleantest clean: cleanobj cleanexe cleantest cleanobj: rm -f *.o @@ -54,6 +58,3 @@ cleanexe: rm -f xblat* cleantest: rm -f *.out core - -.f.o: - $(FORTRAN) $(OPTS) -c -o $@ $< diff --git a/lapack-netlib/BLAS/TESTING/cblat1.f b/lapack-netlib/BLAS/TESTING/cblat1.f index 036dca3e0..ecf2a44cb 100644 --- a/lapack-netlib/BLAS/TESTING/cblat1.f +++ b/lapack-netlib/BLAS/TESTING/cblat1.f @@ -619,7 +619,7 @@ SUBROUTINE STEST1(SCOMP1,STRUE1,SSIZE,SFAC) * ************************* STEST1 ***************************** * -* THIS IS AN INTERFACE SUBROUTINE TO ACCOMODATE THE FORTRAN +* THIS IS AN INTERFACE SUBROUTINE TO ACCOMMODATE THE FORTRAN * REQUIREMENT THAT WHEN A DUMMY ARGUMENT IS AN ARRAY, THE * ACTUAL ARGUMENT MUST ALSO BE AN ARRAY OR AN ARRAY ELEMENT. * diff --git a/lapack-netlib/BLAS/TESTING/dblat1.f b/lapack-netlib/BLAS/TESTING/dblat1.f index f3255fef4..28af121cd 100644 --- a/lapack-netlib/BLAS/TESTING/dblat1.f +++ b/lapack-netlib/BLAS/TESTING/dblat1.f @@ -991,7 +991,7 @@ SUBROUTINE STEST1(SCOMP1,STRUE1,SSIZE,SFAC) * ************************* STEST1 ***************************** * -* THIS IS AN INTERFACE SUBROUTINE TO ACCOMODATE THE FORTRAN +* THIS IS AN INTERFACE SUBROUTINE TO ACCOMMODATE THE FORTRAN * REQUIREMENT THAT WHEN A DUMMY ARGUMENT IS AN ARRAY, THE * ACTUAL ARGUMENT MUST ALSO BE AN ARRAY OR AN ARRAY ELEMENT. * diff --git a/lapack-netlib/BLAS/TESTING/sblat1.f b/lapack-netlib/BLAS/TESTING/sblat1.f index a5c1c6af6..fe05bbe87 100644 --- a/lapack-netlib/BLAS/TESTING/sblat1.f +++ b/lapack-netlib/BLAS/TESTING/sblat1.f @@ -946,7 +946,7 @@ SUBROUTINE STEST1(SCOMP1,STRUE1,SSIZE,SFAC) * ************************* STEST1 ***************************** * -* THIS IS AN INTERFACE SUBROUTINE TO ACCOMODATE THE FORTRAN +* THIS IS AN INTERFACE SUBROUTINE TO ACCOMMODATE THE FORTRAN * REQUIREMENT THAT WHEN A DUMMY ARGUMENT IS AN ARRAY, THE * ACTUAL ARGUMENT MUST ALSO BE AN ARRAY OR AN ARRAY ELEMENT. * diff --git a/lapack-netlib/BLAS/TESTING/zblat1.f b/lapack-netlib/BLAS/TESTING/zblat1.f index 4b0bcf884..2d7b88490 100644 --- a/lapack-netlib/BLAS/TESTING/zblat1.f +++ b/lapack-netlib/BLAS/TESTING/zblat1.f @@ -619,7 +619,7 @@ SUBROUTINE STEST1(SCOMP1,STRUE1,SSIZE,SFAC) * ************************* STEST1 ***************************** * -* THIS IS AN INTERFACE SUBROUTINE TO ACCOMODATE THE FORTRAN +* THIS IS AN INTERFACE SUBROUTINE TO ACCOMMODATE THE FORTRAN * REQUIREMENT THAT WHEN A DUMMY ARGUMENT IS AN ARRAY, THE * ACTUAL ARGUMENT MUST ALSO BE AN ARRAY OR AN ARRAY ELEMENT. * diff --git a/lapack-netlib/CBLAS/CMakeLists.txt b/lapack-netlib/CBLAS/CMakeLists.txt index d9fa24530..04c5ab795 100644 --- a/lapack-netlib/CBLAS/CMakeLists.txt +++ b/lapack-netlib/CBLAS/CMakeLists.txt @@ -12,8 +12,10 @@ FortranCInterface_HEADER(${LAPACK_BINARY_DIR}/include/cblas_mangling.h SYMBOL_NAMESPACE "F77_") if(NOT FortranCInterface_GLOBAL_FOUND OR NOT FortranCInterface_MODULE_FOUND) message(WARNING "Reverting to pre-defined include/lapacke_mangling.h") - configure_file(include/lapacke_mangling_with_flags.h.in - ${LAPACK_BINARY_DIR}/include/lapacke_mangling.h) + configure_file(include/lapacke_mangling_with_flags.h.in + ${LAPACK_BINARY_DIR}/include/lapacke_mangling.h) + configure_file(include/cblas_mangling_with_flags.h.in + ${LAPACK_BINARY_DIR}/include/cblas_mangling.h) endif() include_directories(include ${LAPACK_BINARY_DIR}/include) @@ -28,7 +30,10 @@ endforeach() endmacro() append_subdir_files(CBLAS_INCLUDE "include") -install(FILES ${CBLAS_INCLUDE} ${LAPACK_BINARY_DIR}/include/cblas_mangling.h DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}) +install(FILES ${CBLAS_INCLUDE} ${LAPACK_BINARY_DIR}/include/cblas_mangling.h + DESTINATION ${CMAKE_INSTALL_INCLUDEDIR} + COMPONENT Development + ) # -------------------------------------------------- if(BUILD_TESTING) @@ -45,7 +50,9 @@ endif() set(_cblas_config_install_guard_target "") if(ALL_TARGETS) install(EXPORT cblas-targets - DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/cblas-${LAPACK_VERSION}) + DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/cblas-${LAPACK_VERSION} + COMPONENT Development + ) # Choose one of the cblas targets to use as a guard for # cblas-config.cmake to load targets from the install tree. list(GET ALL_TARGETS 0 _cblas_config_install_guard_target) @@ -82,4 +89,6 @@ install(FILES ) #install(EXPORT cblas-targets -# DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/cblas-${LAPACK_VERSION}) +# DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/cblas-${LAPACK_VERSION} +# COMPONENT Development +# ) diff --git a/lapack-netlib/CBLAS/Makefile b/lapack-netlib/CBLAS/Makefile index 513e8fc82..6e199cdce 100644 --- a/lapack-netlib/CBLAS/Makefile +++ b/lapack-netlib/CBLAS/Makefile @@ -1,19 +1,25 @@ -include ../make.inc +TOPSRCDIR = .. +include $(TOPSRCDIR)/make.inc +.PHONY: all all: cblas +.PHONY: cblas cblas: include/cblas_mangling.h $(MAKE) -C src include/cblas_mangling.h: include/cblas_mangling_with_flags.h.in - cp $< $@ + cp include/cblas_mangling_with_flags.h.in $@ +.PHONY: cblas_testing cblas_testing: cblas $(MAKE) -C testing run +.PHONY: cblas_example cblas_example: cblas $(MAKE) -C examples +.PHONY: clean cleanobj cleanlib cleanexe cleantest clean: $(MAKE) -C src clean $(MAKE) -C testing clean diff --git a/lapack-netlib/CBLAS/examples/Makefile b/lapack-netlib/CBLAS/examples/Makefile index 664b8bc57..84acd6561 100644 --- a/lapack-netlib/CBLAS/examples/Makefile +++ b/lapack-netlib/CBLAS/examples/Makefile @@ -1,17 +1,21 @@ -include ../../make.inc +TOPSRCDIR = ../.. +include $(TOPSRCDIR)/make.inc +.SUFFIXES: .c .o +.c.o: + $(CC) $(CFLAGS) -I../include -c -o $@ $< + +.PHONY: all all: cblas_ex1 cblas_ex2 cblas_ex1: cblas_example1.o $(CBLASLIB) $(BLASLIB) - $(LOADER) $(LOADOPTS) -o $@ $^ + $(FC) $(FFLAGS) $(LDFLAGS) -o $@ $^ cblas_ex2: cblas_example2.o $(CBLASLIB) $(BLASLIB) - $(LOADER) $(LOADOPTS) -o $@ $^ + $(FC) $(FFLAGS) $(LDFLAGS) -o $@ $^ +.PHONY: clean cleanobj cleanexe clean: cleanobj cleanexe cleanobj: rm -f *.o cleanexe: rm -f cblas_ex1 cblas_ex2 - -.c.o: - $(CC) $(CFLAGS) -I../include -c -o $@ $< diff --git a/lapack-netlib/CBLAS/examples/cblas_example1.c b/lapack-netlib/CBLAS/examples/cblas_example1.c index c3acd554d..3d5ed330c 100644 --- a/lapack-netlib/CBLAS/examples/cblas_example1.c +++ b/lapack-netlib/CBLAS/examples/cblas_example1.c @@ -47,7 +47,7 @@ int main ( ) a[m*3+1] = 6; a[m*3+2] = 7; a[m*3+3] = 8; - /* The elemetns of x and y */ + /* The elements of x and y */ x[0] = 1; x[1] = 2; x[2] = 1; diff --git a/lapack-netlib/CBLAS/src/Makefile b/lapack-netlib/CBLAS/src/Makefile index 6c0518ac7..7100568e4 100644 --- a/lapack-netlib/CBLAS/src/Makefile +++ b/lapack-netlib/CBLAS/src/Makefile @@ -1,7 +1,13 @@ # This Makefile compiles the CBLAS routines -include ../../make.inc +TOPSRCDIR = ../.. +include $(TOPSRCDIR)/make.inc +.SUFFIXES: .c .o +.c.o: + $(CC) $(CFLAGS) -I../include -c -o $@ $< + +.PHONY: all all: $(CBLASLIB) # Error handling routines for level 2 & 3 @@ -43,24 +49,25 @@ zlev1 = cblas_zswap.o cblas_zscal.o cblas_zdscal.o cblas_zcopy.o \ # Common files for level 1 single precision sclev1 = cblas_scasum.o scasumsub.o cblas_scnrm2.o scnrm2sub.o +.PHONY: slib1 dlib1 clib1 zlib1 # Single precision real slib1: $(slev1) $(sclev1) - $(ARCH) $(ARCHFLAGS) $(CBLASLIB) $^ + $(AR) $(ARFLAGS) $(CBLASLIB) $^ $(RANLIB) $(CBLASLIB) # Double precision real dlib1: $(dlev1) - $(ARCH) $(ARCHFLAGS) $(CBLASLIB) $^ + $(AR) $(ARFLAGS) $(CBLASLIB) $^ $(RANLIB) $(CBLASLIB) # Single precision complex clib1: $(clev1) $(sclev1) - $(ARCH) $(ARCHFLAGS) $(CBLASLIB) $^ + $(AR) $(ARFLAGS) $(CBLASLIB) $^ $(RANLIB) $(CBLASLIB) # Double precision complex zlib1: $(zlev1) - $(ARCH) $(ARCHFLAGS) $(CBLASLIB) $^ + $(AR) $(ARFLAGS) $(CBLASLIB) $^ $(RANLIB) $(CBLASLIB) # @@ -95,24 +102,25 @@ zlev2 = cblas_zgemv.o cblas_zgbmv.o cblas_zhemv.o cblas_zhbmv.o cblas_zhpmv.o \ cblas_ztpsv.o cblas_zgeru.o cblas_zgerc.o cblas_zher.o cblas_zher2.o \ cblas_zhpr.o cblas_zhpr2.o +.PHONY: slib2 dlib2 clib2 zlib2 # Single precision real slib2: $(slev2) $(errhand) - $(ARCH) $(ARCHFLAGS) $(CBLASLIB) $^ + $(AR) $(ARFLAGS) $(CBLASLIB) $^ $(RANLIB) $(CBLASLIB) # Double precision real dlib2: $(dlev2) $(errhand) - $(ARCH) $(ARCHFLAGS) $(CBLASLIB) $^ + $(AR) $(ARFLAGS) $(CBLASLIB) $^ $(RANLIB) $(CBLASLIB) # Single precision complex clib2: $(clev2) $(errhand) - $(ARCH) $(ARCHFLAGS) $(CBLASLIB) $^ + $(AR) $(ARFLAGS) $(CBLASLIB) $^ $(RANLIB) $(CBLASLIB) # Double precision complex zlib2: $(zlev2) $(errhand) - $(ARCH) $(ARCHFLAGS) $(CBLASLIB) $^ + $(AR) $(ARFLAGS) $(CBLASLIB) $^ $(RANLIB) $(CBLASLIB) # @@ -141,24 +149,25 @@ zlev3 = cblas_zgemm.o cblas_zsymm.o cblas_zhemm.o cblas_zherk.o \ cblas_zher2k.o cblas_ztrmm.o cblas_ztrsm.o cblas_zsyrk.o \ cblas_zsyr2k.o +.PHONY: slib3 dlib3 clib3 zlib3 # Single precision real slib3: $(slev3) $(errhand) - $(ARCH) $(ARCHFLAGS) $(CBLASLIB) $^ + $(AR) $(ARFLAGS) $(CBLASLIB) $^ $(RANLIB) $(CBLASLIB) # Double precision real dlib3: $(dlev3) $(errhand) - $(ARCH) $(ARCHFLAGS) $(CBLASLIB) $^ + $(AR) $(ARFLAGS) $(CBLASLIB) $^ $(RANLIB) $(CBLASLIB) # Single precision complex clib3: $(clev3) $(errhand) - $(ARCH) $(ARCHFLAGS) $(CBLASLIB) $^ + $(AR) $(ARFLAGS) $(CBLASLIB) $^ $(RANLIB) $(CBLASLIB) # Double precision complex zlib3: $(zlev3) $(errhand) - $(ARCH) $(ARCHFLAGS) $(CBLASLIB) $^ + $(AR) $(ARFLAGS) $(CBLASLIB) $^ $(RANLIB) $(CBLASLIB) @@ -166,36 +175,33 @@ alev1 = $(slev1) $(dlev1) $(clev1) $(zlev1) $(sclev1) alev2 = $(slev2) $(dlev2) $(clev2) $(zlev2) alev3 = $(slev3) $(dlev3) $(clev3) $(zlev3) +.PHONY: all1 all2 all3 # All level 1 all1: $(alev1) - $(ARCH) $(ARCHFLAGS) $(CBLASLIB) $^ + $(AR) $(ARFLAGS) $(CBLASLIB) $^ $(RANLIB) $(CBLASLIB) # All level 2 all2: $(alev2) $(errhand) - $(ARCH) $(ARCHFLAGS) $(CBLASLIB) $^ + $(AR) $(ARFLAGS) $(CBLASLIB) $^ $(RANLIB) $(CBLASLIB) # All level 3 all3: $(alev3) $(errhand) - $(ARCH) $(ARCHFLAGS) $(CBLASLIB) $^ + $(AR) $(ARFLAGS) $(CBLASLIB) $^ $(RANLIB) $(CBLASLIB) # All levels and precisions $(CBLASLIB): $(alev1) $(alev2) $(alev3) $(errhand) - $(ARCH) $(ARCHFLAGS) $@ $^ + $(AR) $(ARFLAGS) $@ $^ $(RANLIB) $@ FRC: @FRC=$(FRC) +.PHONY: clean cleanobj cleanlib clean: cleanobj cleanlib cleanobj: rm -f *.o cleanlib: rm -f $(CBLASLIB) - -.c.o: - $(CC) $(CFLAGS) -I../include -c -o $@ $< -.f.o: - $(FORTRAN) $(OPTS) -c -o $@ $< diff --git a/lapack-netlib/CBLAS/src/cblas_sgemm.c b/lapack-netlib/CBLAS/src/cblas_sgemm.c index c4a49a2db..51cd7d18a 100644 --- a/lapack-netlib/CBLAS/src/cblas_sgemm.c +++ b/lapack-netlib/CBLAS/src/cblas_sgemm.c @@ -91,7 +91,7 @@ void cblas_sgemm(const CBLAS_LAYOUT layout, const CBLAS_TRANSPOSE TransA, else { cblas_xerbla(2, "cblas_sgemm", - "Illegal TransA setting, %d\n", TransA); + "Illegal TransB setting, %d\n", TransB); CBLAS_CallFromC = 0; RowMajorStrg = 0; return; diff --git a/lapack-netlib/CBLAS/testing/Makefile b/lapack-netlib/CBLAS/testing/Makefile index 0182c3e88..e3b615b41 100644 --- a/lapack-netlib/CBLAS/testing/Makefile +++ b/lapack-netlib/CBLAS/testing/Makefile @@ -2,7 +2,12 @@ # The Makefile compiles c wrappers and testers for CBLAS. # -include ../../make.inc +TOPSRCDIR = ../.. +include $(TOPSRCDIR)/make.inc + +.SUFFIXES: .c .o +.c.o: + $(CC) $(CFLAGS) -I../include -c -o $@ $< # Archive files necessary to compile LIB = $(CBLASLIB) $(BLASLIB) @@ -27,6 +32,7 @@ ztestl1o = c_zblas1.o ztestl2o = c_zblas2.o c_z2chke.o auxiliary.o c_xerbla.o ztestl3o = c_zblas3.o c_z3chke.o auxiliary.o c_xerbla.o +.PHONY: all all1 all2 all3 all: all1 all2 all3 all1: xscblat1 xdcblat1 xccblat1 xzcblat1 all2: xscblat2 xdcblat2 xccblat2 xzcblat2 @@ -38,37 +44,38 @@ all3: xscblat3 xdcblat3 xccblat3 xzcblat3 # Single real xscblat1: c_sblat1.o $(stestl1o) $(LIB) - $(LOADER) $(LOADOPTS) -o $@ $^ + $(FC) $(FFLAGS) $(LDFLAGS) -o $@ $^ xscblat2: c_sblat2.o $(stestl2o) $(LIB) - $(LOADER) $(LOADOPTS) -o $@ $^ + $(FC) $(FFLAGS) $(LDFLAGS) -o $@ $^ xscblat3: c_sblat3.o $(stestl3o) $(LIB) - $(LOADER) $(LOADOPTS) -o $@ $^ + $(FC) $(FFLAGS) $(LDFLAGS) -o $@ $^ # Double real xdcblat1: c_dblat1.o $(dtestl1o) $(LIB) - $(LOADER) $(LOADOPTS) -o $@ $^ + $(FC) $(FFLAGS) $(LDFLAGS) -o $@ $^ xdcblat2: c_dblat2.o $(dtestl2o) $(LIB) - $(LOADER) $(LOADOPTS) -o $@ $^ + $(FC) $(FFLAGS) $(LDFLAGS) -o $@ $^ xdcblat3: c_dblat3.o $(dtestl3o) $(LIB) - $(LOADER) $(LOADOPTS) -o $@ $^ + $(FC) $(FFLAGS) $(LDFLAGS) -o $@ $^ # Single complex xccblat1: c_cblat1.o $(ctestl1o) $(LIB) - $(LOADER) $(LOADOPTS) -o $@ $^ + $(FC) $(FFLAGS) $(LDFLAGS) -o $@ $^ xccblat2: c_cblat2.o $(ctestl2o) $(LIB) - $(LOADER) $(LOADOPTS) -o $@ $^ + $(FC) $(FFLAGS) $(LDFLAGS) -o $@ $^ xccblat3: c_cblat3.o $(ctestl3o) $(LIB) - $(LOADER) $(LOADOPTS) -o $@ $^ + $(FC) $(FFLAGS) $(LDFLAGS) -o $@ $^ # Double complex xzcblat1: c_zblat1.o $(ztestl1o) $(LIB) - $(LOADER) $(LOADOPTS) -o $@ $^ + $(FC) $(FFLAGS) $(LDFLAGS) -o $@ $^ xzcblat2: c_zblat2.o $(ztestl2o) $(LIB) - $(LOADER) $(LOADOPTS) -o $@ $^ + $(FC) $(FFLAGS) $(LDFLAGS) -o $@ $^ xzcblat3: c_zblat3.o $(ztestl3o) $(LIB) - $(LOADER) $(LOADOPTS) -o $@ $^ + $(FC) $(FFLAGS) $(LDFLAGS) -o $@ $^ # RUN TESTS +.PHONY: run run: all @echo "--> TESTING CBLAS 1 - SINGLE PRECISION REAL <--" @./xscblat1 > stest1.out @@ -95,6 +102,7 @@ run: all @echo "--> TESTING CBLAS 3 - DOUBLE PRECISION COMPLEX <--" @./xzcblat3 < zin3 > ztest3.out +.PHONY: clean cleanobj cleanexe cleantest clean: cleanobj cleanexe cleantest cleanobj: rm -f *.o @@ -102,9 +110,3 @@ cleanexe: rm -f x* cleantest: rm -f *.out core - -.SUFFIXES: .o .f .c -.c.o: - $(CC) $(CFLAGS) -I../include -c -o $@ $< -.f.o: - $(FORTRAN) $(OPTS) -c -o $@ $< diff --git a/lapack-netlib/CBLAS/testing/c_cblat1.f b/lapack-netlib/CBLAS/testing/c_cblat1.f index c741ce506..1a123d74d 100644 --- a/lapack-netlib/CBLAS/testing/c_cblat1.f +++ b/lapack-netlib/CBLAS/testing/c_cblat1.f @@ -577,7 +577,7 @@ SUBROUTINE STEST1(SCOMP1,STRUE1,SSIZE,SFAC) * ************************* STEST1 ***************************** * -* THIS IS AN INTERFACE SUBROUTINE TO ACCOMODATE THE FORTRAN +* THIS IS AN INTERFACE SUBROUTINE TO ACCOMMODATE THE FORTRAN * REQUIREMENT THAT WHEN A DUMMY ARGUMENT IS AN ARRAY, THE * ACTUAL ARGUMENT MUST ALSO BE AN ARRAY OR AN ARRAY ELEMENT. * diff --git a/lapack-netlib/CBLAS/testing/c_dblat1.f b/lapack-netlib/CBLAS/testing/c_dblat1.f index c570a9140..4a71b4dcf 100644 --- a/lapack-netlib/CBLAS/testing/c_dblat1.f +++ b/lapack-netlib/CBLAS/testing/c_dblat1.f @@ -653,7 +653,7 @@ SUBROUTINE STEST1(SCOMP1,STRUE1,SSIZE,SFAC) * ************************* STEST1 ***************************** * -* THIS IS AN INTERFACE SUBROUTINE TO ACCOMODATE THE FORTRAN +* THIS IS AN INTERFACE SUBROUTINE TO ACCOMMODATE THE FORTRAN * REQUIREMENT THAT WHEN A DUMMY ARGUMENT IS AN ARRAY, THE * ACTUAL ARGUMENT MUST ALSO BE AN ARRAY OR AN ARRAY ELEMENT. * diff --git a/lapack-netlib/CBLAS/testing/c_sblat1.f b/lapack-netlib/CBLAS/testing/c_sblat1.f index 773787d6f..89902f12d 100644 --- a/lapack-netlib/CBLAS/testing/c_sblat1.f +++ b/lapack-netlib/CBLAS/testing/c_sblat1.f @@ -653,7 +653,7 @@ SUBROUTINE STEST1(SCOMP1,STRUE1,SSIZE,SFAC) * ************************* STEST1 ***************************** * -* THIS IS AN INTERFACE SUBROUTINE TO ACCOMODATE THE FORTRAN +* THIS IS AN INTERFACE SUBROUTINE TO ACCOMMODATE THE FORTRAN * REQUIREMENT THAT WHEN A DUMMY ARGUMENT IS AN ARRAY, THE * ACTUAL ARGUMENT MUST ALSO BE AN ARRAY OR AN ARRAY ELEMENT. * diff --git a/lapack-netlib/CBLAS/testing/c_zblat1.f b/lapack-netlib/CBLAS/testing/c_zblat1.f index 03753e782..cd0c8541d 100644 --- a/lapack-netlib/CBLAS/testing/c_zblat1.f +++ b/lapack-netlib/CBLAS/testing/c_zblat1.f @@ -577,7 +577,7 @@ SUBROUTINE STEST1(SCOMP1,STRUE1,SSIZE,SFAC) * ************************* STEST1 ***************************** * -* THIS IS AN INTERFACE SUBROUTINE TO ACCOMODATE THE FORTRAN +* THIS IS AN INTERFACE SUBROUTINE TO ACCOMMODATE THE FORTRAN * REQUIREMENT THAT WHEN A DUMMY ARGUMENT IS AN ARRAY, THE * ACTUAL ARGUMENT MUST ALSO BE AN ARRAY OR AN ARRAY ELEMENT. * diff --git a/lapack-netlib/CMAKE/CheckLAPACKCompilerFlags.cmake b/lapack-netlib/CMAKE/CheckLAPACKCompilerFlags.cmake index acc51629e..add0d1797 100644 --- a/lapack-netlib/CMAKE/CheckLAPACKCompilerFlags.cmake +++ b/lapack-netlib/CMAKE/CheckLAPACKCompilerFlags.cmake @@ -1,4 +1,4 @@ -# This module checks against various known compilers and thier respective +# This module checks against various known compilers and their respective # flags to determine any specific flags needing to be set. # # 1. If FPE traps are enabled either abort or disable them diff --git a/lapack-netlib/CMAKE/FindGcov.cmake b/lapack-netlib/CMAKE/FindGcov.cmake index 4807f903e..3d4c0a2a0 100644 --- a/lapack-netlib/CMAKE/FindGcov.cmake +++ b/lapack-netlib/CMAKE/FindGcov.cmake @@ -20,7 +20,7 @@ set(CMAKE_REQUIRED_QUIET ${codecov_FIND_QUIETLY}) get_property(ENABLED_LANGUAGES GLOBAL PROPERTY ENABLED_LANGUAGES) foreach (LANG ${ENABLED_LANGUAGES}) - # Gcov evaluation is dependend on the used compiler. Check gcov support for + # Gcov evaluation is dependent on the used compiler. Check gcov support for # each compiler that is used. If gcov binary was already found for this # compiler, do not try to find it again. if(NOT GCOV_${CMAKE_${LANG}_COMPILER_ID}_BIN) diff --git a/lapack-netlib/CMAKE/Findcodecov.cmake b/lapack-netlib/CMAKE/Findcodecov.cmake index 1f33b2c09..384064007 100644 --- a/lapack-netlib/CMAKE/Findcodecov.cmake +++ b/lapack-netlib/CMAKE/Findcodecov.cmake @@ -42,7 +42,7 @@ set(CMAKE_REQUIRED_QUIET ${codecov_FIND_QUIETLY}) get_property(ENABLED_LANGUAGES GLOBAL PROPERTY ENABLED_LANGUAGES) foreach (LANG ${ENABLED_LANGUAGES}) - # Coverage flags are not dependend on language, but the used compiler. So + # Coverage flags are not dependent on language, but the used compiler. So # instead of searching flags foreach language, search flags foreach compiler # used. set(COMPILER ${CMAKE_${LANG}_COMPILER_ID}) diff --git a/lapack-netlib/CMAKE/FortranMangling.cmake b/lapack-netlib/CMAKE/FortranMangling.cmake index d772dc9bb..734ab6f4c 100644 --- a/lapack-netlib/CMAKE/FortranMangling.cmake +++ b/lapack-netlib/CMAKE/FortranMangling.cmake @@ -24,7 +24,7 @@ message(STATUS "=========") set(F77_OUTPUT_EXE "/Fe" CACHE INTERNAL "Fortran compiler option for setting executable file name.") else() - # in other case, let user specify their fortran configrations. + # in other case, let user specify their fortran configurations. set(F77_OPTION_COMPILE "-c" CACHE STRING "Fortran compiler option for compiling without linking.") set(F77_OUTPUT_OBJ "-o" CACHE STRING diff --git a/lapack-netlib/CMAKE/lapack-config-build.cmake.in b/lapack-netlib/CMAKE/lapack-config-build.cmake.in index 1d084fe13..f7e041663 100644 --- a/lapack-netlib/CMAKE/lapack-config-build.cmake.in +++ b/lapack-netlib/CMAKE/lapack-config-build.cmake.in @@ -5,6 +5,10 @@ if(_LAPACK_TARGET AND NOT TARGET "${_LAPACK_TARGET}") endif() unset(_LAPACK_TARGET) +# Hint for project building against lapack +set(LAPACK_Fortran_COMPILER_ID "@CMAKE_Fortran_COMPILER_ID@") + # Report the blas and lapack raw or imported libraries. set(LAPACK_blas_LIBRARIES "@BLAS_LIBRARIES@") set(LAPACK_lapack_LIBRARIES "@LAPACK_LIBRARIES@") +set(LAPACK_LIBRARIES ${LAPACK_blas_LIBRARIES} ${LAPACK_lapack_LIBRARIES}) diff --git a/lapack-netlib/CMAKE/lapack-config-install.cmake.in b/lapack-netlib/CMAKE/lapack-config-install.cmake.in index 4e04f8711..3de7362ea 100644 --- a/lapack-netlib/CMAKE/lapack-config-install.cmake.in +++ b/lapack-netlib/CMAKE/lapack-config-install.cmake.in @@ -8,8 +8,12 @@ if(_LAPACK_TARGET AND NOT TARGET "${_LAPACK_TARGET}") endif() unset(_LAPACK_TARGET) +# Hint for project building against lapack +set(LAPACK_Fortran_COMPILER_ID "@CMAKE_Fortran_COMPILER_ID@") + # Report the blas and lapack raw or imported libraries. set(LAPACK_blas_LIBRARIES "@BLAS_LIBRARIES@") set(LAPACK_lapack_LIBRARIES "@LAPACK_LIBRARIES@") +set(LAPACK_LIBRARIES ${LAPACK_blas_LIBRARIES} ${LAPACK_lapack_LIBRARIES}) unset(_LAPACK_SELF_DIR) diff --git a/lapack-netlib/CMakeLists.txt b/lapack-netlib/CMakeLists.txt index caa0e7107..df43d91b1 100644 --- a/lapack-netlib/CMakeLists.txt +++ b/lapack-netlib/CMakeLists.txt @@ -3,7 +3,7 @@ cmake_minimum_required(VERSION 2.8.12) project(LAPACK Fortran C) set(LAPACK_MAJOR_VERSION 3) -set(LAPACK_MINOR_VERSION 8) +set(LAPACK_MINOR_VERSION 9) set(LAPACK_PATCH_VERSION 0) set( LAPACK_VERSION @@ -13,6 +13,9 @@ set( # Add the CMake directory for custon CMake modules set(CMAKE_MODULE_PATH "${LAPACK_SOURCE_DIR}/CMAKE" ${CMAKE_MODULE_PATH}) +# Export all symbols on Windows when building shared libraries +SET(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS TRUE) + # Set a default build type if none was specified if(NOT CMAKE_BUILD_TYPE AND NOT CMAKE_CONFIGURATION_TYPES) message(STATUS "Setting build type to 'Release' as none was specified.") @@ -21,8 +24,19 @@ if(NOT CMAKE_BUILD_TYPE AND NOT CMAKE_CONFIGURATION_TYPES) set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "Debug" "Release" "MinSizeRel" "RelWithDebInfo" "Coverage") endif() -string(TOUPPER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_UPPER) -if(${CMAKE_BUILD_TYPE_UPPER} STREQUAL "COVERAGE") +# Coverage +set(_is_coverage_build 0) +set(_msg "Checking if build type is 'Coverage'") +message(STATUS "${_msg}") +if(NOT CMAKE_CONFIGURATION_TYPES) + string(TOLOWER ${CMAKE_BUILD_TYPE} _build_type_lc) + if(${_build_type_lc} STREQUAL "coverage") + set(_is_coverage_build 1) + endif() +endif() +message(STATUS "${_msg}: ${_is_coverage_build}") + +if(_is_coverage_build) message(STATUS "Adding coverage") find_package(codecov) endif() @@ -58,18 +72,18 @@ include(PreventInSourceBuilds) include(PreventInBuildInstalls) if(UNIX) - if("${CMAKE_Fortran_COMPILER}" MATCHES "ifort") - set(CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} -fp-model strict") + if(CMAKE_Fortran_COMPILER_ID STREQUAL Intel) + list(APPEND CMAKE_Fortran_FLAGS "-fp-model strict") endif() - if("${CMAKE_Fortran_COMPILER}" MATCHES "xlf") - set(CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} -qnosave -qstrict=none") + if(CMAKE_Fortran_COMPILER_ID STREQUAL XL) + list(APPEND CMAKE_Fortran_FLAGS "-qnosave -qstrict=none") endif() # Delete libmtsk in linking sequence for Sun/Oracle Fortran Compiler. # This library is not present in the Sun package SolarisStudio12.3-linux-x86-bin string(REPLACE \;mtsk\; \; CMAKE_Fortran_IMPLICIT_LINK_LIBRARIES "${CMAKE_Fortran_IMPLICIT_LINK_LIBRARIES}") endif() -if(CMAKE_Fortran_COMPILER_ID STREQUAL "Compaq") +if(CMAKE_Fortran_COMPILER_ID STREQUAL Compaq) if(WIN32) if(CMAKE_GENERATOR STREQUAL "NMake Makefiles") get_filename_component(CMAKE_Fortran_COMPILER_CMDNAM ${CMAKE_Fortran_COMPILER} NAME_WE) @@ -96,24 +110,16 @@ if(CMAKE_Fortran_COMPILER_ID STREQUAL "Compaq") endif() endif() -# Get Python -message(STATUS "Looking for Python greater than 2.6 - ${PYTHONINTERP_FOUND}") -find_package(PythonInterp 2.7) # lapack_testing.py uses features from python 2.7 and greater -if(PYTHONINTERP_FOUND) - message(STATUS "Using Python version ${PYTHON_VERSION_STRING}") -else() - message(STATUS "No suitable Python version found, so skipping summary tests.") -endif() -# -------------------------------------------------- +# -------------------------------------------------- set(LAPACK_INSTALL_EXPORT_NAME lapack-targets) macro(lapack_install_library lib) install(TARGETS ${lib} EXPORT ${LAPACK_INSTALL_EXPORT_NAME} - ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} - LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} - RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} + ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} COMPONENT Development + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} COMPONENT RuntimeLibraries + RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT RuntimeLibraries ) endmacro() @@ -121,12 +127,22 @@ set(PKG_CONFIG_DIR ${CMAKE_INSTALL_LIBDIR}/pkgconfig) # -------------------------------------------------- # Testing -option(BUILD_TESTING "Build tests" OFF) -enable_testing() +option(BUILD_TESTING "Build tests" ${_is_coverage_build}) include(CTest) -enable_testing() message(STATUS "Build tests: ${BUILD_TESTING}") +# lapack_testing.py uses features from python 2.7 and greater +if(BUILD_TESTING) + set(_msg "Looking for Python >= 2.7 needed for summary tests") + message(STATUS "${_msg}") + find_package(PythonInterp 2.7 QUIET) + if(PYTHONINTERP_FOUND) + message(STATUS "${_msg} - found (${PYTHON_VERSION_STRING})") + else() + message(STATUS "${_msg} - not found (skipping summary tests)") + endif() +endif() + # -------------------------------------------------- # Organize output files. On Windows this also keeps .dll files next # to the .exe files that need them, making tests easy to run. @@ -299,16 +315,40 @@ if(LAPACKE) add_subdirectory(LAPACKE) endif() +#------------------------------------- +# BLAS++ / LAPACK++ +option(BLAS++ "Build BLAS++" OFF) +option(LAPACK++ "Build LAPACK++" OFF) + + +function(_display_cpp_implementation_msg name) + string(TOLOWER ${name} name_lc) + message(STATUS "${name}++ enable") + message(STATUS "----------------") + message(STATUS "Thank you for your interest in ${name}++, a newly developed C++ API for ${name} library") + message(STATUS "The objective of ${name}++ is to provide a convenient, performance oriented API for development in the C++ language, that, for the most part, preserves established conventions, while, at the same time, takes advantages of modern C++ features, such as: namespaces, templates, exceptions, etc.") + message(STATUS "We are still working on integrating ${name}++ in our library. For the moment, you can download directly ${name_lc}++ from https://bitbucket.org/icl/${name_lc}pp") + message(STATUS "For support ${name}++ related question, please email: slate-user@icl.utk.edu") + message(STATUS "----------------") +endfunction() +if(BLAS++) + _display_cpp_implementation_msg("BLAS") +endif() +if(LAPACK++) + _display_cpp_implementation_msg("LAPACK") +endif() + # -------------------------------------------------- # CPACK Packaging set(CPACK_PACKAGE_NAME "LAPACK") set(CPACK_PACKAGE_VENDOR "University of Tennessee, Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd") set(CPACK_PACKAGE_DESCRIPTION_SUMMARY "LAPACK- Linear Algebra Package") -set(CPACK_PACKAGE_VERSION_MAJOR 3) -set(CPACK_PACKAGE_VERSION_MINOR 5) -set(CPACK_PACKAGE_VERSION_PATCH 0) +set(CPACK_PACKAGE_VERSION_MAJOR ${LAPACK_MAJOR_VERSION}) +set(CPACK_PACKAGE_VERSION_MINOR ${LAPACK_MINOR_VERSION}) +set(CPACK_PACKAGE_VERSION_PATCH ${LAPACK_PATCH_VERSION}) set(CPACK_RESOURCE_FILE_LICENSE "${CMAKE_CURRENT_SOURCE_DIR}/LICENSE") +set(CPACK_MONOLITHIC_INSTALL ON) set(CPACK_PACKAGE_INSTALL_DIRECTORY "LAPACK") if(WIN32 AND NOT UNIX) # There is a bug in NSI that does not handle full unix paths properly. Make @@ -347,7 +387,9 @@ endif() set(_lapack_config_install_guard_target "") if(ALL_TARGETS) install(EXPORT lapack-targets - DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/lapack-${LAPACK_VERSION}) + DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/lapack-${LAPACK_VERSION} + COMPONENT Development + ) # Choose one of the lapack targets to use as a guard for # lapack-config.cmake to load targets from the install tree. @@ -382,6 +424,7 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/lapack.pc.in ${CMAKE_CURRENT_BINARY_D install(FILES ${CMAKE_CURRENT_BINARY_DIR}/lapack.pc DESTINATION ${PKG_CONFIG_DIR} + COMPONENT Development ) configure_file(${LAPACK_SOURCE_DIR}/CMAKE/lapack-config-install.cmake.in @@ -398,4 +441,6 @@ install(FILES ${LAPACK_BINARY_DIR}/CMakeFiles/lapack-config.cmake ${LAPACK_BINARY_DIR}/lapack-config-version.cmake DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/lapack-${LAPACK_VERSION} + COMPONENT Development ) + diff --git a/lapack-netlib/DOCS/Doxyfile b/lapack-netlib/DOCS/Doxyfile index 8f3558597..43cea43b5 100644 --- a/lapack-netlib/DOCS/Doxyfile +++ b/lapack-netlib/DOCS/Doxyfile @@ -38,7 +38,7 @@ PROJECT_NAME = LAPACK # could be handy for archiving the generated documentation or if some version # control system is used. -PROJECT_NUMBER = 3.8.0 +PROJECT_NUMBER = 3.9.0 # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer a diff --git a/lapack-netlib/DOCS/Doxyfile_man b/lapack-netlib/DOCS/Doxyfile_man index 6fb339a73..1767cf5f4 100644 --- a/lapack-netlib/DOCS/Doxyfile_man +++ b/lapack-netlib/DOCS/Doxyfile_man @@ -38,7 +38,7 @@ PROJECT_NAME = LAPACK # could be handy for archiving the generated documentation or if some version # control system is used. -PROJECT_NUMBER = 3.8.0 +PROJECT_NUMBER = 3.9.0 # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer a diff --git a/lapack-netlib/DOCS/lawn81.tex b/lapack-netlib/DOCS/lawn81.tex index 291735299..794c2a7aa 100644 --- a/lapack-netlib/DOCS/lawn81.tex +++ b/lapack-netlib/DOCS/lawn81.tex @@ -439,39 +439,39 @@ SHELL = /bin/sh \end{quote} and it will need to be modified to \texttt{SHELL = /sbin/sh} if you are installing LAPACK on an SGI architecture. -Second, you will -need to modify the \texttt{PLAT} definition, which is appended to all -library names, to specify the architecture to which you are installing -LAPACK. This features avoids confusion in library names when you are -installing LAPACK on more than one architecture. Next, you will need -to modify \texttt{FORTRAN}, \texttt{OPTS}, \texttt{DRVOPTS}, \texttt{NOOPT}, \texttt{LOADER}, -and \texttt{LOADOPTS} to specify +Next, you will need to modify \texttt{FC}, \texttt{FFLAGS}, +\texttt{FFLAGS\_DRV}, \texttt{FFLAGS\_NOOPT}, and \texttt{LDFLAGS} to specify the compiler, compiler options, compiler options for the testing and -timing\footnotemark[\value{footnote}] main programs, loader, loader options. -Next you will have to choose which function you will use to time in the \texttt{SECOND} and \texttt{DSECND} routines. +timing\footnotemark[\value{footnote}] main programs, and linker options. +Next you will have to choose which function you will use to time in the +\texttt{SECOND} and \texttt{DSECND} routines. \begin{verbatim} -#The Default : SECOND and DSECND will use a call to the EXTERNAL FUNCTION ETIME -TIMER = EXT_ETIME -# For RS6K : SECOND and DSECND will use a call to the EXTERNAL FUNCTION ETIME_ -# TIMER = EXT_ETIME_ -# For gfortran compiler: SECOND and DSECND will use the INTERNAL FUNCTION ETIME -# TIMER = INT_ETIME -# If your Fortran compiler does not provide etime (like Nag Fortran Compiler, etc...) -# SECOND and DSECND will use a call to the INTERNAL FUNCTION CPU_TIME -# TIMER = INT_CPU_TIME -# If neither of this works...you can use the NONE value... -# In that case, SECOND and DSECND will always return 0 -# TIMER = NONE +# Default: SECOND and DSECND will use a call to the +# EXTERNAL FUNCTION ETIME +#TIMER = EXT_ETIME +# For RS6K: SECOND and DSECND will use a call to the +# EXTERNAL FUNCTION ETIME_ +#TIMER = EXT_ETIME_ +# For gfortran compiler: SECOND and DSECND will use a call to the +# INTERNAL FUNCTION ETIME +TIMER = INT_ETIME +# If your Fortran compiler does not provide etime (like Nag Fortran +# Compiler, etc...) SECOND and DSECND will use a call to the +# INTERNAL FUNCTION CPU_TIME +#TIMER = INT_CPU_TIME +# If none of these work, you can use the NONE value. +# In that case, SECOND and DSECND will always return 0. +#TIMER = NONE \end{verbatim} Refer to the section~\ref{second} to get more information. -Next, you will need to modify \texttt{ARCH}, \texttt{ARCHFLAGS}, and \texttt{RANLIB} to specify archiver, +Next, you will need to modify \texttt{AR}, \texttt{ARFLAGS}, and \texttt{RANLIB} to specify archiver, archiver options, and ranlib for your machine. If your architecture does not require \texttt{ranlib} to be run after each archive command (as is the case with CRAY computers running UNICOS, Hewlett Packard computers running HP-UX, or SUN SPARCstations running Solaris), set -\texttt{ranlib=echo}. And finally, you must +\texttt{RANLIB = echo}. And finally, you must modify the \texttt{BLASLIB} definition to specify the BLAS library to which you will be linking. If an optimized version of the BLAS is available on your machine, you are highly recommended to link to that library. @@ -721,24 +721,24 @@ The version that will be used depends on the value of the TIMER variable in the \begin{itemize} \item If ETIME is available as an external function, set the value of the TIMER variable in your -make.inc to \texttt{EXT\_ETIME}:\texttt{second\_EXT\_ETIME.f} and \texttt{dsecnd\_EXT\_ETIME.f} will be used. +make.inc to \texttt{EXT\_ETIME}: \texttt{second\_EXT\_ETIME.f} and \texttt{dsecnd\_EXT\_ETIME.f} will be used. Usually on HPPA architectures, -the compiler and loader flag \texttt{+U77} should be included to access +the compiler and linker flag \texttt{+U77} should be included to access the function \texttt{ETIME}. \item If ETIME\_ is available as an external function, set the value of the TIMER variable in your make.inc -to \texttt{EXT\_ETIME\_}:\texttt{second\_EXT\_ETIME\_.f} and \texttt{dsecnd\_EXT\_ETIME\_.f} will be used. +to \texttt{EXT\_ETIME\_}: \texttt{second\_EXT\_ETIME\_.f} and \texttt{dsecnd\_EXT\_ETIME\_.f} will be used. It is the case on some IBM architectures such as IBM RS/6000s. \item If ETIME is available as an internal function, set the value of the TIMER variable in your make.inc -to \texttt{INT\_ETIME}:\texttt{second\_INT\_ETIME.f} and \texttt{dsecnd\_INT\_ETIME.f} will be used. +to \texttt{INT\_ETIME}: \texttt{second\_INT\_ETIME.f} and \texttt{dsecnd\_INT\_ETIME.f} will be used. This is the case with gfortan. \item If CPU\_TIME is available as an internal function, set the value of the TIMER variable in your make.inc -to \texttt{INT\_CPU\_TIME}:\texttt{second\_INT\_CPU\_TIME.f} and \texttt{dsecnd\_INT\_CPU\_TIME.f} will be used. +to \texttt{INT\_CPU\_TIME}: \texttt{second\_INT\_CPU\_TIME.f} and \texttt{dsecnd\_INT\_CPU\_TIME.f} will be used. \item If none of these function is available, set the value of the TIMER variable in your make.inc -to \texttt{NONE:}\texttt{second\_NONE.f} and \texttt{dsecnd\_NONE.f} will be used. +to \texttt{NONE}: \texttt{second\_NONE.f} and \texttt{dsecnd\_NONE.f} will be used. These routines will always return zero. \end{itemize} @@ -829,8 +829,8 @@ data type to the library if necessary. \end{itemize} \noindent -The BLAS library is created in \texttt{LAPACK/blas\_PLAT.a}, where -\texttt{PLAT} is the user-defined architecture suffix specified in the file +The BLAS library is created in \texttt{LAPACK/librefblas.a}, +or in the user-defined location specified by \texttt{BLASLIB} in the file \texttt{LAPACK/make.inc}. \subsection{Run the BLAS Test Programs}\label{testblas} @@ -882,8 +882,8 @@ data type to the library if necessary. \end{itemize} \noindent -The LAPACK library is created in \texttt{LAPACK/lapack\_PLAT.a}, where -\texttt{PLAT} is the user-defined architecture suffix specified in the file +The LAPACK library is created in \texttt{LAPACK/liblapack.a}, +or in the user-defined location specified by \texttt{LAPACKLIB} in the file \texttt{LAPACK/make.inc}. \subsection{Create the Test Matrix Generator Library} @@ -902,9 +902,9 @@ data type to the library if necessary. \end{itemize} \noindent -The test matrix generator library is created in \texttt{LAPACK/tmglib\_PLAT.a}, -where \texttt{PLAT} is the user-defined architecture suffix specified in the -file \texttt{LAPACK/make.inc}. +The test matrix generator library is created in \texttt{LAPACK/libtmglib.a}, +or in the user-defined location specified by \texttt{TMGLIB} in the file +\texttt{LAPACK/make.inc}. \subsection{Run the LAPACK Test Programs} @@ -1114,9 +1114,7 @@ To make a library of the instrumented LAPACK routines, first go to \texttt{LAPACK/TIMING/LIN/LINSRC} and type \texttt{make} followed by the data types desired, as in the examples of Section~\ref{toplevelmakefile}. The library of instrumented code is created in -\texttt{LAPACK/TIMING/LIN/linsrc\_PLAT.a}, -where \texttt{PLAT} is the user-defined architecture suffix specified in the -file \texttt{LAPACK/make.inc}. +\texttt{LAPACK/TIMING/LIN/linsrc.a}. \end{sloppypar} \item[b)] @@ -1251,9 +1249,7 @@ To make a library of the instrumented LAPACK routines, first go to \texttt{LAPACK/TIMING/EIG/EIGSRC} and type \texttt{make} followed by the data types desired, as in the examples of Section~\ref{toplevelmakefile}. The library of instrumented code is created in -\texttt{LAPACK/TIMING/EIG/eigsrc\_PLAT.a}, -where \texttt{PLAT} is the user-defined architecture suffix specified in the -file \texttt{LAPACK/make.inc}. +\texttt{LAPACK/TIMING/EIG/eigsrc.a}. \end{sloppypar} \item[b)] @@ -1389,7 +1385,7 @@ installing LAPACK on an SGI architecture. \section{ETIME} On HPPA architectures, -the compiler and loader flag \texttt{+U77} should be included to access +the compiler and linker flag \texttt{+U77} should be included to access the function \texttt{ETIME}. \section{ILAENV and IEEE-754 compliance} @@ -1494,13 +1490,13 @@ has two options: increase your stack size, or force all local variables to be allocated statically. On HPPA architectures, the -compiler and loader flag \texttt{-K} should be used when compiling these testing +compiler and linker flag \texttt{-K} should be used when compiling these testing and timing main programs to avoid such a stack overflow. I.e., set -\texttt{DRVOPTS = -K} in the \texttt{LAPACK/make.inc} file. +\texttt{FFLAGS\_DRV = -K} in the \texttt{LAPACK/make.inc} file. For similar reasons, -on SGI architectures, the compiler and loader flag \texttt{-static} should be -used. I.e., set \texttt{DRVOPTS = -static} in the \texttt{LAPACK/make.inc} file. +on SGI architectures, the compiler and linker flag \texttt{-static} should be +used. I.e., set \texttt{FFLAGS\_DRV = -static} in the \texttt{LAPACK/make.inc} file. \section{IEEE arithmetic} diff --git a/lapack-netlib/INSTALL/Makefile b/lapack-netlib/INSTALL/Makefile index 150a061d6..1007c1bca 100644 --- a/lapack-netlib/INSTALL/Makefile +++ b/lapack-netlib/INSTALL/Makefile @@ -1,30 +1,33 @@ -include ../make.inc +TOPSRCDIR = .. +include $(TOPSRCDIR)/make.inc +.PHONY: all testlsame testslamch testdlamch testsecond testdsecnd testieee testversion all: testlsame testslamch testdlamch testsecond testdsecnd testieee testversion testlsame: lsame.o lsametst.o - $(LOADER) $(LOADOPTS) -o $@ $^ + $(FC) $(FFLAGS) $(LDFLAGS) -o $@ $^ testslamch: slamch.o lsame.o slamchtst.o - $(LOADER) $(LOADOPTS) -o $@ $^ + $(FC) $(FFLAGS) $(LDFLAGS) -o $@ $^ testdlamch: dlamch.o lsame.o dlamchtst.o - $(LOADER) $(LOADOPTS) -o $@ $^ + $(FC) $(FFLAGS) $(LDFLAGS) -o $@ $^ testsecond: second_$(TIMER).o secondtst.o @echo "[INFO] : TIMER value: $(TIMER) (given by make.inc)" - $(LOADER) $(LOADOPTS) -o $@ $^ + $(FC) $(FFLAGS) $(LDFLAGS) -o $@ $^ testdsecnd: dsecnd_$(TIMER).o dsecndtst.o @echo "[INFO] : TIMER value: $(TIMER) (given by make.inc)" - $(LOADER) $(LOADOPTS) -o $@ $^ + $(FC) $(FFLAGS) $(LDFLAGS) -o $@ $^ testieee: tstiee.o - $(LOADER) $(LOADOPTS) -o $@ $^ + $(FC) $(FFLAGS) $(LDFLAGS) -o $@ $^ testversion: ilaver.o LAPACK_version.o - $(LOADER) $(LOADOPTS) -o $@ $^ + $(FC) $(FFLAGS) $(LDFLAGS) -o $@ $^ +.PHONY: run run: all ./testlsame ./testslamch @@ -34,6 +37,7 @@ run: all ./testieee ./testversion +.PHONY: clean cleanobj cleanexe cleantest clean: cleanobj cleanexe cleantest cleanobj: rm -f *.o @@ -42,9 +46,5 @@ cleanexe: cleantest: rm -f core -.SUFFIXES: .o .f -.f.o: - $(FORTRAN) $(OPTS) -c -o $@ $< - -slamch.o: slamch.f ; $(FORTRAN) $(NOOPT) -c -o $@ $< -dlamch.o: dlamch.f ; $(FORTRAN) $(NOOPT) -c -o $@ $< +slamch.o: slamch.f ; $(FC) $(FFLAGS_NOOPT) -c -o $@ $< +dlamch.o: dlamch.f ; $(FC) $(FFLAGS_NOOPT) -c -o $@ $< diff --git a/lapack-netlib/INSTALL/dlamch.f b/lapack-netlib/INSTALL/dlamch.f index 76f875cef..9073cd45e 100644 --- a/lapack-netlib/INSTALL/dlamch.f +++ b/lapack-netlib/INSTALL/dlamch.f @@ -10,6 +10,10 @@ * * DOUBLE PRECISION FUNCTION DLAMCH( CMACH ) * +* .. Scalar Arguments .. +* CHARACTER CMACH +* .. +* * *> \par Purpose: * ============= @@ -24,6 +28,7 @@ * *> \param[in] CMACH *> \verbatim +*> CMACH is CHARACTER*1 *> Specifies the value to be returned by DLAMCH: *> = 'E' or 'e', DLAMCH := eps *> = 'S' or 's , DLAMCH := sfmin diff --git a/lapack-netlib/INSTALL/dlamchf77.f b/lapack-netlib/INSTALL/dlamchf77.f index 3efd21535..37b30551f 100644 --- a/lapack-netlib/INSTALL/dlamchf77.f +++ b/lapack-netlib/INSTALL/dlamchf77.f @@ -10,6 +10,10 @@ * * DOUBLE PRECISION FUNCTION DLAMCH( CMACH ) * +* .. Scalar Arguments .. +* CHARACTER CMACH +* .. +* * *> \par Purpose: * ============= diff --git a/lapack-netlib/INSTALL/ilaver.f b/lapack-netlib/INSTALL/ilaver.f index e1d59f465..79fe597ae 100644 --- a/lapack-netlib/INSTALL/ilaver.f +++ b/lapack-netlib/INSTALL/ilaver.f @@ -25,12 +25,15 @@ * ========== * *> \param[out] VERS_MAJOR +*> VERS_MAJOR is INTEGER *> return the lapack major version *> *> \param[out] VERS_MINOR +*> VERS_MINOR is INTEGER *> return the lapack minor version from the major version *> *> \param[out] VERS_PATCH +*> VERS_PATCH is INTEGER *> return the lapack patch version from the minor version * * Authors: @@ -41,24 +44,23 @@ *> \author Univ. of Colorado Denver *> \author NAG Ltd. * -*> \date June 2017 +*> \date November 2019 * *> \ingroup auxOTHERauxiliary * * ===================================================================== SUBROUTINE ILAVER( VERS_MAJOR, VERS_MINOR, VERS_PATCH ) * -* -- LAPACK computational routine (version 3.7.1) -- +* -- LAPACK computational routine -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- -* June 2017 * * ===================================================================== * INTEGER VERS_MAJOR, VERS_MINOR, VERS_PATCH * ===================================================================== VERS_MAJOR = 3 - VERS_MINOR = 8 + VERS_MINOR = 9 VERS_PATCH = 0 * ===================================================================== * diff --git a/lapack-netlib/INSTALL/make.inc.ALPHA b/lapack-netlib/INSTALL/make.inc.ALPHA index 0ceeaa155..d6397e81d 100644 --- a/lapack-netlib/INSTALL/make.inc.ALPHA +++ b/lapack-netlib/INSTALL/make.inc.ALPHA @@ -8,30 +8,28 @@ SHELL = /bin/sh # CC is the C compiler, normally invoked with options CFLAGS. # -CC = cc +CC = cc CFLAGS = -O4 -# Modify the FORTRAN and OPTS definitions to refer to the compiler +# Modify the FC and FFLAGS definitions to the desired compiler # and desired compiler options for your machine. NOOPT refers to # the compiler options desired when NO OPTIMIZATION is selected. # -FORTRAN = f77 -OPTS = -O4 -fpe1 -DRVOPTS = $(OPTS) -NOOPT = +FC = f77 +FFLAGS = -O4 -fpe1 +FFLAGS_DRV = $(FFLAGS) +FFLAGS_NOOPT = -# Define LOADER and LOADOPTS to refer to the loader and desired -# load options for your machine. +# Define LDFLAGS to the desired linker options for your machine. # -LOADER = f77 -LOADOPTS = +LDFLAGS = # The archiver and the flag(s) to use when building an archive # (library). If your system has no ranlib, set RANLIB = echo. # -ARCH = ar -ARCHFLAGS = cr -RANLIB = ranlib +AR = ar +ARFLAGS = cr +RANLIB = ranlib # Timer for the SECOND and DSECND routines # @@ -74,9 +72,9 @@ TIMER = EXT_ETIME # machine-specific, optimized BLAS library should be used whenever # possible.) # -#BLASLIB = ../../librefblas.a +#BLASLIB = $(TOPSRCDIR)/librefblas.a BLASLIB = -ldxml -CBLASLIB = ../../libcblas.a -LAPACKLIB = liblapack.a -TMGLIB = libtmglib.a -LAPACKELIB = liblapacke.a +CBLASLIB = $(TOPSRCDIR)/libcblas.a +LAPACKLIB = $(TOPSRCDIR)/liblapack.a +TMGLIB = $(TOPSRCDIR)/libtmglib.a +LAPACKELIB = $(TOPSRCDIR)/liblapacke.a diff --git a/lapack-netlib/INSTALL/make.inc.HPPA b/lapack-netlib/INSTALL/make.inc.HPPA index 8eabbbdf4..6ee2b2dfb 100644 --- a/lapack-netlib/INSTALL/make.inc.HPPA +++ b/lapack-netlib/INSTALL/make.inc.HPPA @@ -8,30 +8,28 @@ SHELL = /bin/sh # CC is the C compiler, normally invoked with options CFLAGS. # -CC = cc +CC = cc CFLAGS = -# Modify the FORTRAN and OPTS definitions to refer to the compiler +# Modify the FC and FFLAGS definitions to the desired compiler # and desired compiler options for your machine. NOOPT refers to # the compiler options desired when NO OPTIMIZATION is selected. # -FORTRAN = f77 -OPTS = +O4 +U77 -DRVOPTS = $(OPTS) -K -NOOPT = +U77 +FC = f77 +FFLAGS = +O4 +U77 +FFLAGS_DRV = $(FFLAGS) -K +FFLAGS_NOOPT = +U77 -# Define LOADER and LOADOPTS to refer to the loader and desired -# load options for your machine. +# Define LDFLAGS to the desired linker options for your machine. # -LOADER = f77 -LOADOPTS = -Aa +U77 +LDFLAGS = # The archiver and the flag(s) to use when building an archive # (library). If your system has no ranlib, set RANLIB = echo. # -ARCH = ar -ARCHFLAGS = cr -RANLIB = echo +AR = ar +ARFLAGS = cr +RANLIB = echo # Timer for the SECOND and DSECND routines # @@ -74,9 +72,9 @@ TIMER = EXT_ETIME # machine-specific, optimized BLAS library should be used whenever # possible.) # -#BLASLIB = ../../librefblas.a +#BLASLIB = $(TOPSRCDIR)/librefblas.a BLASLIB = -lblas -CBLASLIB = ../../libcblas.a -LAPACKLIB = liblapack.a -TMGLIB = libtmglib.a -LAPACKELIB = liblapacke.a +CBLASLIB = $(TOPSRCDIR)/libcblas.a +LAPACKLIB = $(TOPSRCDIR)/liblapack.a +TMGLIB = $(TOPSRCDIR)/libtmglib.a +LAPACKELIB = $(TOPSRCDIR)/liblapacke.a diff --git a/lapack-netlib/INSTALL/make.inc.IRIX64 b/lapack-netlib/INSTALL/make.inc.IRIX64 index d9e71e1bf..59fe522eb 100644 --- a/lapack-netlib/INSTALL/make.inc.IRIX64 +++ b/lapack-netlib/INSTALL/make.inc.IRIX64 @@ -8,33 +8,30 @@ SHELL = /sbin/sh # CC is the C compiler, normally invoked with options CFLAGS. # -CC = cc +CC = cc CFLAGS = -O3 -# Modify the FORTRAN and OPTS definitions to refer to the compiler +# Modify the FC and FFLAGS definitions to the desired compiler # and desired compiler options for your machine. NOOPT refers to # the compiler options desired when NO OPTIMIZATION is selected. # -FORTRAN = f77 -OPTS = -O3 -64 -mips4 -r10000 -OPT:IEEE_NaN_inf=ON -#OPTS = -g -DEBUG:subscript_check=ON -trapuv -OPT:IEEE_NaN_inf=ON -DRVOPTS = $(OPTS) -static -NOOPT = -64 -mips4 -r10000 -OPT:IEEE_NaN_inf=ON -#NOOPT = -g -DEBUG:subscript_check=ON -trapuv -OPT:IEEE_NaN_inf=ON +FC = f77 +FFLAGS = -O3 -64 -mips4 -r10000 -OPT:IEEE_NaN_inf=ON +#FFLAGS = -g -DEBUG:subscript_check=ON -trapuv -OPT:IEEE_NaN_inf=ON +FFLAGS_DRV = $(FFLAGS) -static +FFLAGS_NOOPT = -64 -mips4 -r10000 -OPT:IEEE_NaN_inf=ON +#FFLAGS_NOOPT = -g -DEBUG:subscript_check=ON -trapuv -OPT:IEEE_NaN_inf=ON -# Define LOADER and LOADOPTS to refer to the loader and desired -# load options for your machine. +# Define LDFLAGS to the desired linker options for your machine. # -LOADER = f77 -LOADOPTS = -O3 -64 -mips4 -r10000 -OPT:IEEE_NaN_inf=ON -#LOADOPTS = -g -DEBUG:subscript_check=ON -trapuv -OPT:IEEE_NaN_inf=ON +LDFLAGS = # The archiver and the flag(s) to use when building an archive # (library). If your system has no ranlib, set RANLIB = echo. # -ARCH = ar -ARCHFLAGS = cr -RANLIB = echo +AR = ar +ARFLAGS = cr +RANLIB = echo # Timer for the SECOND and DSECND routines # @@ -78,8 +75,8 @@ TIMER = EXT_ETIME # possible.) # #BLASLIB = -lblas -BLASLIB = ../../librefblas.a -CBLASLIB = ../../libcblas.a -LAPACKLIB = liblapack.a -TMGLIB = libtmglib.a -LAPACKELIB = liblapacke.a +BLASLIB = $(TOPSRCDIR)/librefblas.a +CBLASLIB = $(TOPSRCDIR)/libcblas.a +LAPACKLIB = $(TOPSRCDIR)/liblapack.a +TMGLIB = $(TOPSRCDIR)/libtmglib.a +LAPACKELIB = $(TOPSRCDIR)/liblapacke.a diff --git a/lapack-netlib/INSTALL/make.inc.O2K b/lapack-netlib/INSTALL/make.inc.O2K index 3ffcadacc..3c3dbc800 100644 --- a/lapack-netlib/INSTALL/make.inc.O2K +++ b/lapack-netlib/INSTALL/make.inc.O2K @@ -8,33 +8,30 @@ SHELL = /sbin/sh # CC is the C compiler, normally invoked with options CFLAGS. # -CC = cc +CC = cc CFLAGS = -O3 -# Modify the FORTRAN and OPTS definitions to refer to the compiler +# Modify the FC and FFLAGS definitions to the desired compiler # and desired compiler options for your machine. NOOPT refers to # the compiler options desired when NO OPTIMIZATION is selected. # -FORTRAN = f77 -OPTS = -O3 -64 -mips4 -r10000 -#OPTS = -O3 -64 -mips4 -r10000 -mp -DRVOPTS = $(OPTS) -static -NOOPT = -64 -mips4 -r10000 -#NOOPT = -64 -mips4 -r10000 -mp +FC = f77 +FFLAGS = -O3 -64 -mips4 -r10000 +#FFLAGS = -O3 -64 -mips4 -r10000 -mp +FFLAGS_DRV = $(FFLAGS) -static +FFLAGS_NOOPT = -64 -mips4 -r10000 +#FFLAGS_NOOPT = -64 -mips4 -r10000 -mp -# Define LOADER and LOADOPTS to refer to the loader and desired -# load options for your machine. +# Define LDFLAGS to the desired linker options for your machine. # -LOADER = f77 -LOADOPTS = -O3 -64 -mips4 -r10000 -#LOADOPTS = -O3 -64 -mips4 -r10000 -mp +LDFLAGS = # The archiver and the flag(s) to use when building an archive # (library). If your system has no ranlib, set RANLIB = echo. # -ARCH = ar -ARCHFLAGS = cr -RANLIB = echo +AR = ar +ARFLAGS = cr +RANLIB = echo # Timer for the SECOND and DSECND routines # @@ -79,8 +76,8 @@ TIMER = EXT_ETIME # BLASLIB = -lblas #BLASLIB = -lblas_mp -#BLASLIB = ../../librefblas.a -CBLASLIB = ../../libcblas.a -LAPACKLIB = liblapack.a -TMGLIB = libtmglib.a -LAPACKELIB = liblapacke.a +#BLASLIB = $(TOPSRCDIR)/librefblas.a +CBLASLIB = $(TOPSRCDIR)/libcblas.a +LAPACKLIB = $(TOPSRCDIR)/liblapack.a +TMGLIB = $(TOPSRCDIR)/libtmglib.a +LAPACKELIB = $(TOPSRCDIR)/liblapacke.a diff --git a/lapack-netlib/INSTALL/make.inc.SGI5 b/lapack-netlib/INSTALL/make.inc.SGI5 index c7019ac16..1013cffdb 100644 --- a/lapack-netlib/INSTALL/make.inc.SGI5 +++ b/lapack-netlib/INSTALL/make.inc.SGI5 @@ -8,30 +8,28 @@ SHELL = /sbin/sh # CC is the C compiler, normally invoked with options CFLAGS. # -CC = cc +CC = cc CFLAGS = -O4 -# Modify the FORTRAN and OPTS definitions to refer to the compiler +# Modify the FC and FFLAGS definitions to the desired compiler # and desired compiler options for your machine. NOOPT refers to # the compiler options desired when NO OPTIMIZATION is selected. # -FORTRAN = f77 -OPTS = -O4 -DRVOPTS = $(OPTS) -static -NOOPT = +FC = f77 +FFLAGS = -O4 +FFLAGS_DRV = $(FFLAGS) -static +FFLAGS_NOOPT = -# Define LOADER and LOADOPTS to refer to the loader and desired -# load options for your machine. +# Define LDFLAGS to the desired linker options for your machine. # -LOADER = f77 -LOADOPTS = +LDFLAGS = # The archiver and the flag(s) to use when building an archive # (library). If your system has no ranlib, set RANLIB = echo. # -ARCH = ar -ARCHFLAGS = cr -RANLIB = echo +AR = ar +ARFLAGS = cr +RANLIB = echo # Timer for the SECOND and DSECND routines # @@ -75,8 +73,8 @@ TIMER = EXT_ETIME # possible.) # #BLASLIB = -lblas -BLASLIB = ../../librefblas.a -CBLASLIB = ../../libcblas.a -LAPACKLIB = liblapack.a -TMGLIB = libtmglib.a -LAPACKELIB = liblapacke.a +BLASLIB = $(TOPSRCDIR)/librefblas.a +CBLASLIB = $(TOPSRCDIR)/libcblas.a +LAPACKLIB = $(TOPSRCDIR)/liblapack.a +TMGLIB = $(TOPSRCDIR)/libtmglib.a +LAPACKELIB = $(TOPSRCDIR)/liblapacke.a diff --git a/lapack-netlib/INSTALL/make.inc.SUN4 b/lapack-netlib/INSTALL/make.inc.SUN4 index 4e44f1beb..2da0ecb65 100644 --- a/lapack-netlib/INSTALL/make.inc.SUN4 +++ b/lapack-netlib/INSTALL/make.inc.SUN4 @@ -8,30 +8,28 @@ SHELL = /bin/sh # CC is the C compiler, normally invoked with options CFLAGS. # -CC = cc +CC = cc CFLAGS = -O3 -# Modify the FORTRAN and OPTS definitions to refer to the compiler +# Modify the FC and FFLAGS definitions to the desired compiler # and desired compiler options for your machine. NOOPT refers to # the compiler options desired when NO OPTIMIZATION is selected. # -FORTRAN = f77 -OPTS = -dalign -O4 -fast -DRVOPTS = $(OPTS) -NOOPT = +FC = f77 +FFLAGS = -dalign -O4 -fast +FFLAGS_DRV = $(FFLAGS) +FFLAGS_NOOPT = -# Define LOADER and LOADOPTS to refer to the loader and desired -# load options for your machine. +# Define LDFLAGS to the desired linker options for your machine. # -LOADER = f77 -LOADOPTS = -dalign -O4 -fast +LDFLAGS = # The archiver and the flag(s) to use when building an archive # (library). If your system has no ranlib, set RANLIB = echo. # -ARCH = ar -ARCHFLAGS = cr -RANLIB = ranlib +AR = ar +ARFLAGS = cr +RANLIB = ranlib # Timer for the SECOND and DSECND routines # @@ -75,8 +73,8 @@ TIMER = EXT_ETIME # possible.) # #BLASLIB = -lblas -BLASLIB = ../../librefblas.a -CBLASLIB = ../../libcblas.a -LAPACKLIB = liblapack.a -TMGLIB = libtmglib.a -LAPACKELIB = liblapacke.a +BLASLIB = $(TOPSRCDIR)/librefblas.a +CBLASLIB = $(TOPSRCDIR)/libcblas.a +LAPACKLIB = $(TOPSRCDIR)/liblapack.a +TMGLIB = $(TOPSRCDIR)/libtmglib.a +LAPACKELIB = $(TOPSRCDIR)/liblapacke.a diff --git a/lapack-netlib/INSTALL/make.inc.SUN4SOL2 b/lapack-netlib/INSTALL/make.inc.SUN4SOL2 index e6d79add3..d2db07c61 100644 --- a/lapack-netlib/INSTALL/make.inc.SUN4SOL2 +++ b/lapack-netlib/INSTALL/make.inc.SUN4SOL2 @@ -8,34 +8,31 @@ SHELL = /bin/sh # CC is the C compiler, normally invoked with options CFLAGS. # -CC = cc +CC = cc CFLAGS = -O3 -# Modify the FORTRAN and OPTS definitions to refer to the compiler +# Modify the FC and FFLAGS definitions to the desired compiler # and desired compiler options for your machine. NOOPT refers to # the compiler options desired when NO OPTIMIZATION is selected. # -FORTRAN = f77 -#OPTS = -O4 -u -f -mt -#OPTS = -u -f -dalign -native -xO5 -xarch=v8plusa -OPTS = -u -f -dalign -native -xO2 -xarch=v8plusa -DRVOPTS = $(OPTS) -NOOPT = -u -f -#NOOPT = -u -f -mt +FC = f77 +#FFLAGS = -O4 -u -f -mt +#FFLAGS = -u -f -dalign -native -xO5 -xarch=v8plusa +FFLAGS = -u -f -dalign -native -xO2 -xarch=v8plusa +FFLAGS_DRV = $(FFLAGS) +FFLAGS_NOOPT = -u -f +#FFLAGS_NOOPT = -u -f -mt -# Define LOADER and LOADOPTS to refer to the loader and desired -# load options for your machine. +# Define LDFLAGS to the desired linker options for your machine. # -LOADER = f77 -#LOADOPTS = -mt -LOADOPTS = -f -dalign -native -xO2 -xarch=v8plusa +LDFLAGS = # The archiver and the flag(s) to use when building an archive # (library). If your system has no ranlib, set RANLIB = echo. # -ARCH = ar -ARCHFLAGS = cr -RANLIB = echo +AR = ar +ARFLAGS = cr +RANLIB = echo # Timer for the SECOND and DSECND routines # @@ -78,10 +75,10 @@ TIMER = EXT_ETIME # machine-specific, optimized BLAS library should be used whenever # possible.) # -#BLASLIB = ../../librefblas.a +#BLASLIB = $(TOPSRCDIR)/librefblas.a #BLASLIB = -xlic_lib=sunperf_mt BLASLIB = -xlic_lib=sunperf -CBLASLIB = ../../libcblas.a -LAPACKLIB = liblapack.a -TMGLIB = libtmglib.a -LAPACKELIB = liblapacke.a +CBLASLIB = $(TOPSRCDIR)/libcblas.a +LAPACKLIB = $(TOPSRCDIR)/liblapack.a +TMGLIB = $(TOPSRCDIR)/libtmglib.a +LAPACKELIB = $(TOPSRCDIR)/liblapacke.a diff --git a/lapack-netlib/INSTALL/make.inc.XLF b/lapack-netlib/INSTALL/make.inc.XLF index 9466ee332..cb9d791a7 100644 --- a/lapack-netlib/INSTALL/make.inc.XLF +++ b/lapack-netlib/INSTALL/make.inc.XLF @@ -8,31 +8,29 @@ SHELL = /bin/sh # CC is the C compiler, normally invoked with options CFLAGS. # -CC = xlc +CC = xlc CFLAGS = -O3 -qnosave -# Modify the FORTRAN and OPTS definitions to refer to the compiler +# Modify the FC and FFLAGS definitions to the desired compiler # and desired compiler options for your machine. NOOPT refers to # the compiler options desired when NO OPTIMIZATION is selected. # -FORTRAN = xlf -OPTS = -O3 -qfixed -qnosave +FC = xlf +FFLAGS = -O3 -qfixed -qnosave # For -O2, add -qstrict=none -DRVOPTS = $(OPTS) -NOOPT = -O0 -qfixed -qnosave +FFLAGS_DRV = $(FFLAGS) +FFLAGS_NOOPT = -O0 -qfixed -qnosave -# Define LOADER and LOADOPTS to refer to the loader and desired -# load options for your machine. +# Define LDFLAGS to the desired linker options for your machine. # -LOADER = xlf -LOADOPTS = -qnosave +LDFLAGS = # The archiver and the flag(s) to use when building an archive # (library). If your system has no ranlib, set RANLIB = echo. # -ARCH = ar -ARCHFLAGS = cr -RANLIB = ranlib +AR = ar +ARFLAGS = cr +RANLIB = ranlib # Timer for the SECOND and DSECND routines # @@ -75,9 +73,9 @@ TIMER = EXT_ETIME_ # machine-specific, optimized BLAS library should be used whenever # possible.) # -#BLASLIB = ../../librefblas.a +#BLASLIB = $(TOPSRCDIR)/librefblas.a BLASLIB = -lessl -CBLASLIB = ../../libcblas.a -LAPACKLIB = liblapack.a -TMGLIB = libtmglib.a -LAPACKELIB = liblapacke.a +CBLASLIB = $(TOPSRCDIR)/libcblas.a +LAPACKLIB = $(TOPSRCDIR)/liblapack.a +TMGLIB = $(TOPSRCDIR)/libtmglib.a +LAPACKELIB = $(TOPSRCDIR)/liblapacke.a diff --git a/lapack-netlib/INSTALL/make.inc.gfortran b/lapack-netlib/INSTALL/make.inc.gfortran index 39d98d4d4..104632747 100644 --- a/lapack-netlib/INSTALL/make.inc.gfortran +++ b/lapack-netlib/INSTALL/make.inc.gfortran @@ -8,10 +8,10 @@ SHELL = /bin/sh # CC is the C compiler, normally invoked with options CFLAGS. # -CC = gcc +CC = gcc CFLAGS = -O3 -# Modify the FORTRAN and OPTS definitions to refer to the compiler +# Modify the FC and FFLAGS definitions to the desired compiler # and desired compiler options for your machine. NOOPT refers to # the compiler options desired when NO OPTIMIZATION is selected. # @@ -19,23 +19,21 @@ CFLAGS = -O3 # and handle these quantities appropriately. As a consequence, one # should not compile LAPACK with flags such as -ffpe-trap=overflow. # -FORTRAN = gfortran -OPTS = -O2 -frecursive -DRVOPTS = $(OPTS) -NOOPT = -O0 -frecursive +FC = gfortran +FFLAGS = -O2 -frecursive +FFLAGS_DRV = $(FFLAGS) +FFLAGS_NOOPT = -O0 -frecursive -# Define LOADER and LOADOPTS to refer to the loader and desired -# load options for your machine. +# Define LDFLAGS to the desired linker options for your machine. # -LOADER = gfortran -LOADOPTS = +LDFLAGS = # The archiver and the flag(s) to use when building an archive # (library). If your system has no ranlib, set RANLIB = echo. # -ARCH = ar -ARCHFLAGS = cr -RANLIB = ranlib +AR = ar +ARFLAGS = cr +RANLIB = ranlib # Timer for the SECOND and DSECND routines # @@ -78,8 +76,8 @@ TIMER = INT_ETIME # machine-specific, optimized BLAS library should be used whenever # possible.) # -BLASLIB = ../../librefblas.a -CBLASLIB = ../../libcblas.a -LAPACKLIB = liblapack.a -TMGLIB = libtmglib.a -LAPACKELIB = liblapacke.a +BLASLIB = $(TOPSRCDIR)/librefblas.a +CBLASLIB = $(TOPSRCDIR)/libcblas.a +LAPACKLIB = $(TOPSRCDIR)/liblapack.a +TMGLIB = $(TOPSRCDIR)/libtmglib.a +LAPACKELIB = $(TOPSRCDIR)/liblapacke.a diff --git a/lapack-netlib/INSTALL/make.inc.gfortran_debug b/lapack-netlib/INSTALL/make.inc.gfortran_debug index 10e6381df..246060827 100644 --- a/lapack-netlib/INSTALL/make.inc.gfortran_debug +++ b/lapack-netlib/INSTALL/make.inc.gfortran_debug @@ -8,10 +8,10 @@ SHELL = /bin/sh # CC is the C compiler, normally invoked with options CFLAGS. # -CC = gcc +CC = gcc CFLAGS = -g -# Modify the FORTRAN and OPTS definitions to refer to the compiler +# Modify the FC and FFLAGS definitions to the desired compiler # and desired compiler options for your machine. NOOPT refers to # the compiler options desired when NO OPTIMIZATION is selected. # @@ -19,23 +19,21 @@ CFLAGS = -g # and handle these quantities appropriately. As a consequence, one # should not compile LAPACK with flags such as -ffpe-trap=overflow. # -FORTRAN = gfortran -fimplicit-none -g -frecursive -OPTS = -DRVOPTS = $(OPTS) -NOOPT = -g -O0 -frecursive +FC = gfortran +FFLAGS = -fimplicit-none -g -frecursive +FFLAGS_DRV = $(FFLAGS) +FFLAGS_NOOPT = $(FFLAGS) -O0 -# Define LOADER and LOADOPTS to refer to the loader and desired -# load options for your machine. +# Define LDFLAGS to the desired linker options for your machine. # -LOADER = gfortran -g -LOADOPTS = +LDFLAGS = # The archiver and the flag(s) to use when building an archive # (library). If your system has no ranlib, set RANLIB = echo. # -ARCH = ar -ARCHFLAGS = cr -RANLIB = ranlib +AR = ar +ARFLAGS = cr +RANLIB = ranlib # Timer for the SECOND and DSECND routines # @@ -78,8 +76,8 @@ TIMER = INT_CPU_TIME # machine-specific, optimized BLAS library should be used whenever # possible.) # -BLASLIB = ../../librefblas.a -CBLASLIB = ../../libcblas.a -LAPACKLIB = liblapack.a -TMGLIB = libtmglib.a -LAPACKELIB = liblapacke.a +BLASLIB = $(TOPSRCDIR)/librefblas.a +CBLASLIB = $(TOPSRCDIR)/libcblas.a +LAPACKLIB = $(TOPSRCDIR)/liblapack.a +TMGLIB = $(TOPSRCDIR)/libtmglib.a +LAPACKELIB = $(TOPSRCDIR)/liblapacke.a diff --git a/lapack-netlib/INSTALL/make.inc.ifort b/lapack-netlib/INSTALL/make.inc.ifort index b067bd484..801b46aa5 100644 --- a/lapack-netlib/INSTALL/make.inc.ifort +++ b/lapack-netlib/INSTALL/make.inc.ifort @@ -8,30 +8,28 @@ SHELL = /bin/sh # CC is the C compiler, normally invoked with options CFLAGS. # -CC = icc +CC = icc CFLAGS = -O3 -# Modify the FORTRAN and OPTS definitions to refer to the compiler +# Modify the FC and FFLAGS definitions to the desired compiler # and desired compiler options for your machine. NOOPT refers to # the compiler options desired when NO OPTIMIZATION is selected. # -FORTRAN = ifort -OPTS = -O3 -fp-model strict -assume protect_parens -DRVOPTS = $(OPTS) -NOOPT = -O0 -fp-model strict -assume protect_parens +FC = ifort +FFLAGS = -O3 -fp-model strict -assume protect_parens +FFLAGS_DRV = $(FFLAGS) +FFLAGS_NOOPT = -O0 -fp-model strict -assume protect_parens -# Define LOADER and LOADOPTS to refer to the loader and desired -# load options for your machine. +# Define LDFLAGS to the desired linker options for your machine. # -LOADER = ifort -LOADOPTS = +LDFLAGS = # The archiver and the flag(s) to use when building an archive # (library). If your system has no ranlib, set RANLIB = echo. # -ARCH = ar -ARCHFLAGS = cr -RANLIB = ranlib +AR = ar +ARFLAGS = cr +RANLIB = ranlib # Timer for the SECOND and DSECND routines # @@ -74,8 +72,8 @@ TIMER = EXT_ETIME # machine-specific, optimized BLAS library should be used whenever # possible.) # -BLASLIB = ../../librefblas.a -CBLASLIB = ../../libcblas.a -LAPACKLIB = liblapack.a -TMGLIB = libtmglib.a -LAPACKELIB = liblapacke.a +BLASLIB = $(TOPSRCDIR)/librefblas.a +CBLASLIB = $(TOPSRCDIR)/libcblas.a +LAPACKLIB = $(TOPSRCDIR)/liblapack.a +TMGLIB = $(TOPSRCDIR)/libtmglib.a +LAPACKELIB = $(TOPSRCDIR)/liblapacke.a diff --git a/lapack-netlib/INSTALL/make.inc.pgf95 b/lapack-netlib/INSTALL/make.inc.pgf95 index a9a5cec98..87b691cdd 100644 --- a/lapack-netlib/INSTALL/make.inc.pgf95 +++ b/lapack-netlib/INSTALL/make.inc.pgf95 @@ -8,30 +8,28 @@ SHELL = /bin/sh # CC is the C compiler, normally invoked with options CFLAGS. # -CC = pgcc +CC = pgcc CFLAGS = -# Modify the FORTRAN and OPTS definitions to refer to the compiler +# Modify the FC and FFLAGS definitions to the desired compiler # and desired compiler options for your machine. NOOPT refers to # the compiler options desired when NO OPTIMIZATION is selected. # -FORTRAN = pgf95 -OPTS = -O3 -DRVOPTS = $(OPTS) -NOOPT = -O0 +FC = pgf95 +FFLAGS = -O3 +FFLAGS_DRV = $(FFLAGS) +FFLAGS_NOOPT = -O0 -# Define LOADER and LOADOPTS to refer to the loader and desired -# load options for your machine. +# Define LDFLAGS to the desired linker options for your machine. # -LOADER = $(FORTRAN) -LOADOPTS = +LDFLAGS = # The archiver and the flag(s) to use when building an archive # (library). If your system has no ranlib, set RANLIB = echo. # -ARCH = ar -ARCHFLAGS = cr -RANLIB = echo +AR = ar +ARFLAGS = cr +RANLIB = echo # Timer for the SECOND and DSECND routines # @@ -74,8 +72,8 @@ TIMER = INT_CPU_TIME # machine-specific, optimized BLAS library should be used whenever # possible.) # -BLASLIB = ../../librefblas.a -CBLASLIB = ../../libcblas.a -LAPACKLIB = liblapack.a -TMGLIB = libtmglib.a -LAPACKELIB = liblapacke.a +BLASLIB = $(TOPSRCDIR)/librefblas.a +CBLASLIB = $(TOPSRCDIR)/libcblas.a +LAPACKLIB = $(TOPSRCDIR)/liblapack.a +TMGLIB = $(TOPSRCDIR)/libtmglib.a +LAPACKELIB = $(TOPSRCDIR)/liblapacke.a diff --git a/lapack-netlib/INSTALL/make.inc.pghpf b/lapack-netlib/INSTALL/make.inc.pghpf index 1d9bf549c..97d22d27d 100644 --- a/lapack-netlib/INSTALL/make.inc.pghpf +++ b/lapack-netlib/INSTALL/make.inc.pghpf @@ -8,30 +8,28 @@ SHELL = /bin/sh # CC is the C compiler, normally invoked with options CFLAGS. # -CC = pghpc +CC = pghpc CFLAGS = -# Modify the FORTRAN and OPTS definitions to refer to the compiler +# Modify the FC and FFLAGS definitions to the desired compiler # and desired compiler options for your machine. NOOPT refers to # the compiler options desired when NO OPTIMIZATION is selected. # -FORTRAN = pghpf -OPTS = -O4 -Mnohpfc -Mdclchk -DRVOPTS = $(OPTS) -NOOPT = -Mnohpfc -Mdclchk +FC = pghpf +FFLAGS = -O4 -Mnohpfc -Mdclchk +FFLAGS_DRV = $(FFLAGS) +FFLAGS_NOOPT = -Mnohpfc -Mdclchk -# Define LOADER and LOADOPTS to refer to the loader and desired -# load options for your machine. +# Define LDFLAGS to the desired linker options for your machine. # -LOADER = pghpf -LOADOPTS = +LDFLAGS = # The archiver and the flag(s) to use when building an archive # (library). If your system has no ranlib, set RANLIB = echo. # -ARCH = ar -ARCHFLAGS = cr -RANLIB = echo +AR = ar +ARFLAGS = cr +RANLIB = echo # Timer for the SECOND and DSECND routines # @@ -75,8 +73,8 @@ TIMER = EXT_ETIME # possible.) # #BLASLIB = -lessl -BLASLIB = ../../librefblas.a -CBLASLIB = ../../libcblas.a -LAPACKLIB = liblapack.a -TMGLIB = libtmglib.a -LAPACKELIB = liblapacke.a +BLASLIB = $(TOPSRCDIR)/librefblas.a +CBLASLIB = $(TOPSRCDIR)/libcblas.a +LAPACKLIB = $(TOPSRCDIR)/liblapack.a +TMGLIB = $(TOPSRCDIR)/libtmglib.a +LAPACKELIB = $(TOPSRCDIR)/liblapacke.a diff --git a/lapack-netlib/INSTALL/slamch.f b/lapack-netlib/INSTALL/slamch.f index 3282fa6a3..342f446ff 100644 --- a/lapack-netlib/INSTALL/slamch.f +++ b/lapack-netlib/INSTALL/slamch.f @@ -28,6 +28,7 @@ * *> \param[in] CMACH *> \verbatim +*> CMACH is CHARACTER*1 *> Specifies the value to be returned by SLAMCH: *> = 'E' or 'e', SLAMCH := eps *> = 'S' or 's , SLAMCH := sfmin diff --git a/lapack-netlib/LAPACKE/CMakeLists.txt b/lapack-netlib/LAPACKE/CMakeLists.txt index 42faef5dd..0589a74ba 100644 --- a/lapack-netlib/LAPACKE/CMakeLists.txt +++ b/lapack-netlib/LAPACKE/CMakeLists.txt @@ -16,18 +16,16 @@ if(NOT FortranCInterface_GLOBAL_FOUND OR NOT FortranCInterface_MODULE_FOUND) ${LAPACK_BINARY_DIR}/include/lapacke_mangling.h) endif() -if(WIN32 AND NOT UNIX) - add_definitions(-DHAVE_LAPACK_CONFIG_H -DLAPACK_COMPLEX_STRUCTURE) - message(STATUS "Windows BUILD") -endif() - -get_directory_property(DirDefs COMPILE_DEFINITIONS) - include_directories(include ${LAPACK_BINARY_DIR}/include) add_subdirectory(include) add_subdirectory(src) add_subdirectory(utils) +option(LAPACKE_BUILD_SINGLE "Build LAPACKE single precision real" ON) +option(LAPACKE_BUILD_DOUBLE "Build LAPACKE double precision real" ON) +option(LAPACKE_BUILD_COMPLEX "Build LAPACKE single precision complex" ON) +option(LAPACKE_BUILD_COMPLEX16 "Build LAPACKE double precision complex" ON) + macro(append_subdir_files variable dirname) get_directory_property(holder DIRECTORY ${dirname} DEFINITION ${variable}) foreach(depfile ${holder}) @@ -35,8 +33,29 @@ macro(append_subdir_files variable dirname) endforeach() endmacro() +message(STATUS "Build LAPACKE single precision real: ${LAPACKE_BUILD_SINGLE}") +message(STATUS "Build LAPACKE double precision real: ${LAPACKE_BUILD_DOUBLE}") +message(STATUS "Build LAPACKE single precision complex: ${LAPACKE_BUILD_COMPLEX}") +message(STATUS "Build LAPACKE double precision complex: ${LAPACKE_BUILD_COMPLEX16}") + append_subdir_files(LAPACKE_INCLUDE "include") append_subdir_files(SOURCES "src") +if (LAPACKE_BUILD_SINGLE) + append_subdir_files(SOURCES_SINGLE "src") + list(APPEND SOURCES ${SOURCES_SINGLE}) +endif() +if (LAPACKE_BUILD_DOUBLE) + append_subdir_files(SOURCES_DOUBLE "src") + list(APPEND SOURCES ${SOURCES_DOUBLE}) +endif() +if (LAPACKE_BUILD_COMPLEX) + append_subdir_files(SOURCES_COMPLEX "src") + list(APPEND SOURCES ${SOURCES_COMPLEX}) +endif() +if (LAPACKE_BUILD_COMPLEX16) + append_subdir_files(SOURCES_COMPLEX16 "src") + list(APPEND SOURCES ${SOURCES_COMPLEX16}) +endif() append_subdir_files(DEPRECATED "src") append_subdir_files(EXTENDED "src") append_subdir_files(MATGEN "src") @@ -61,9 +80,13 @@ set_target_properties( SOVERSION ${LAPACK_MAJOR_VERSION} ) target_include_directories(lapacke PUBLIC - $ + $ $ ) +if(WIN32 AND NOT UNIX) + target_compile_definitions(lapacke PUBLIC HAVE_LAPACK_CONFIG_H LAPACK_COMPLEX_STRUCTURE) + message(STATUS "Windows BUILD") +endif() if(LAPACKE_WITH_TMG) target_link_libraries(lapacke PRIVATE tmglib) @@ -71,7 +94,11 @@ endif() target_link_libraries(lapacke PRIVATE ${LAPACK_LIBRARIES}) lapack_install_library(lapacke) -install(FILES ${LAPACKE_INCLUDE} ${LAPACK_BINARY_DIR}/include/lapacke_mangling.h DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}) +install( + FILES ${LAPACKE_INCLUDE} ${LAPACK_BINARY_DIR}/include/lapacke_mangling.h + DESTINATION ${CMAKE_INSTALL_INCLUDEDIR} + COMPONENT Development + ) if(BUILD_TESTING) add_subdirectory(example) @@ -82,6 +109,7 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/lapacke.pc.in ${CMAKE_CURRENT_BINARY_ install(FILES ${CMAKE_CURRENT_BINARY_DIR}/lapacke.pc DESTINATION ${PKG_CONFIG_DIR} + COMPONENT Development ) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/cmake/lapacke-config-version.cmake.in @@ -95,7 +123,10 @@ install(FILES ${CMAKE_CURRENT_BINARY_DIR}/CMakeFiles/lapacke-config.cmake ${LAPACK_BINARY_DIR}/lapacke-config-version.cmake DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/lapacke-${LAPACK_VERSION} + COMPONENT Development ) install(EXPORT lapacke-targets - DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/lapacke-${LAPACK_VERSION}) + DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/lapacke-${LAPACK_VERSION} + COMPONENT Development + ) diff --git a/lapack-netlib/LAPACKE/Makefile b/lapack-netlib/LAPACKE/Makefile index 016f8a2f2..a358d7c9f 100644 --- a/lapack-netlib/LAPACKE/Makefile +++ b/lapack-netlib/LAPACKE/Makefile @@ -40,22 +40,26 @@ # To clean everything including lapacke library type # 'make cleanall' # -include ../make.inc +TOPSRCDIR = .. +include $(TOPSRCDIR)/make.inc +.PHONY: all all: lapacke +.PHONY: lapacke lapacke: include/lapacke_mangling.h $(MAKE) -C src $(MAKE) -C utils include/lapacke_mangling.h: include/lapacke_mangling_with_flags.h.in - cp $< $@ + cp include/lapacke_mangling_with_flags.h.in $@ +.PHONY: lapacke_example lapacke_example: lapacke $(MAKE) -C example -#clean: cleanlib -clean: cleanobj +.PHONY: clean cleanobj cleanlib cleanexe +clean: $(MAKE) -C src clean $(MAKE) -C utils clean $(MAKE) -C example clean @@ -64,6 +68,6 @@ cleanobj: $(MAKE) -C utils cleanobj $(MAKE) -C example cleanobj cleanlib: - rm -f ../$(LAPACKELIB) + $(MAKE) -C src cleanlib cleanexe: $(MAKE) -C example cleanexe diff --git a/lapack-netlib/LAPACKE/cmake/lapacke-config-build.cmake.in b/lapack-netlib/LAPACKE/cmake/lapacke-config-build.cmake.in index 6900f4533..0a1350172 100644 --- a/lapack-netlib/LAPACKE/cmake/lapacke-config-build.cmake.in +++ b/lapack-netlib/LAPACKE/cmake/lapacke-config-build.cmake.in @@ -7,8 +7,11 @@ if(NOT TARGET lapacke) include("@LAPACK_BINARY_DIR@/lapack-targets.cmake") endif() +# Hint for project building against lapack +set(LAPACKE_Fortran_COMPILER_ID ${LAPACK_Fortran_COMPILER_ID}) + # Report lapacke header search locations from build tree. set(LAPACKE_INCLUDE_DIRS "@LAPACK_BINARY_DIR@/include") # Report lapacke libraries. -set(LAPACKE_LIBRARIES lapacke) +set(LAPACKE_LIBRARIES lapacke ${LAPACK_LIBRARIES}) diff --git a/lapack-netlib/LAPACKE/cmake/lapacke-config-install.cmake.in b/lapack-netlib/LAPACKE/cmake/lapacke-config-install.cmake.in index caa459a24..57a5c2b2f 100644 --- a/lapack-netlib/LAPACKE/cmake/lapacke-config-install.cmake.in +++ b/lapack-netlib/LAPACKE/cmake/lapacke-config-install.cmake.in @@ -13,11 +13,14 @@ if(NOT TARGET lapacke) include(${_LAPACKE_SELF_DIR}/lapacke-targets.cmake) endif() +# Hint for project building against lapack +set(LAPACKE_Fortran_COMPILER_ID ${LAPACK_Fortran_COMPILER_ID}) + # Report lapacke header search locations. set(LAPACKE_INCLUDE_DIRS ${_LAPACKE_PREFIX}/include) # Report lapacke libraries. -set(LAPACKE_LIBRARIES lapacke) +set(LAPACKE_LIBRARIES lapacke ${LAPACK_LIBRARIES}) unset(_LAPACKE_PREFIX) unset(_LAPACKE_SELF_DIR) diff --git a/lapack-netlib/LAPACKE/example/Makefile b/lapack-netlib/LAPACKE/example/Makefile index f959a2be0..77526dc42 100644 --- a/lapack-netlib/LAPACKE/example/Makefile +++ b/lapack-netlib/LAPACKE/example/Makefile @@ -1,34 +1,38 @@ -include ../../make.inc +TOPSRCDIR = ../.. +include $(TOPSRCDIR)/make.inc +.SUFFIXES: .c .o +.c.o: + $(CC) $(CFLAGS) -I. -I../include -c -o $@ $< + +.PHONY: all all: xexample_DGESV_rowmajor \ xexample_DGESV_colmajor \ xexample_DGELS_rowmajor \ xexample_DGELS_colmajor -LIBRARIES = ../../$(LAPACKELIB) ../../$(LAPACKLIB) $(BLASLIB) +LIBRARIES = $(LAPACKELIB) $(LAPACKLIB) $(BLASLIB) # Double Precision Examples xexample_DGESV_rowmajor: example_DGESV_rowmajor.o lapacke_example_aux.o $(LIBRARIES) - $(LOADER) $(LOADOPTS) -o $@ $^ + $(FC) $(FFLAGS) $(LDFLAGS) -o $@ $^ ./$@ xexample_DGESV_colmajor: example_DGESV_colmajor.o lapacke_example_aux.o $(LIBRARIES) - $(LOADER) $(LOADOPTS) -o $@ $^ + $(FC) $(FFLAGS) $(LDFLAGS) -o $@ $^ ./$@ xexample_DGELS_rowmajor: example_DGELS_rowmajor.o lapacke_example_aux.o $(LIBRARIES) - $(LOADER) $(LOADOPTS) -o $@ $^ + $(FC) $(FFLAGS) $(LDFLAGS) -o $@ $^ ./$@ xexample_DGELS_colmajor: example_DGELS_colmajor.o lapacke_example_aux.o $(LIBRARIES) - $(LOADER) $(LOADOPTS) -o $@ $^ + $(FC) $(FFLAGS) $(LDFLAGS) -o $@ $^ ./$@ +.PHONY: clean cleanobj cleanexe clean: cleanobj cleanexe cleanobj: rm -f *.o cleanexe: rm -f x* - -.c.o: - $(CC) $(CFLAGS) -I. -I../include -c -o $@ $< diff --git a/lapack-netlib/LAPACKE/include/CMakeLists.txt b/lapack-netlib/LAPACKE/include/CMakeLists.txt index 4c30c0501..b690dc554 100644 --- a/lapack-netlib/LAPACKE/include/CMakeLists.txt +++ b/lapack-netlib/LAPACKE/include/CMakeLists.txt @@ -1,3 +1,3 @@ -set(LAPACKE_INCLUDE lapacke.h lapacke_config.h lapacke_utils.h) +set(LAPACKE_INCLUDE lapacke.h lapack.h lapacke_config.h lapacke_utils.h) file(COPY ${LAPACKE_INCLUDE} DESTINATION ${LAPACK_BINARY_DIR}/include) diff --git a/lapack-netlib/LAPACKE/include/lapack.h b/lapack-netlib/LAPACKE/include/lapack.h new file mode 100644 index 000000000..aedaa308d --- /dev/null +++ b/lapack-netlib/LAPACKE/include/lapack.h @@ -0,0 +1,13747 @@ +#ifndef LAPACK_H +#define LAPACK_H + +/* +* Turn on HAVE_LAPACK_CONFIG_H to redefine C-LAPACK datatypes +*/ +#ifdef HAVE_LAPACK_CONFIG_H +#include "lapacke_config.h" +#endif + +#include "lapacke_mangling.h" + +#include + +/* Complex types are structures equivalent to the +* Fortran complex types COMPLEX(4) and COMPLEX(8). +* +* One can also redefine the types with his own types +* for example by including in the code definitions like +* +* #define lapack_complex_float std::complex +* #define lapack_complex_double std::complex +* +* or define these types in the command line: +* +* -Dlapack_complex_float="std::complex" +* -Dlapack_complex_double="std::complex" +*/ + +#ifndef LAPACK_COMPLEX_CUSTOM + +/* Complex type (single precision) */ +#ifndef lapack_complex_float +#ifndef __cplusplus +#include +#else +#include +#endif +#define lapack_complex_float float _Complex +#endif + +#ifndef lapack_complex_float_real +#define lapack_complex_float_real(z) (creal(z)) +#endif + +#ifndef lapack_complex_float_imag +#define lapack_complex_float_imag(z) (cimag(z)) +#endif + +/* Complex type (double precision) */ +#ifndef lapack_complex_double +#ifndef __cplusplus +#include +#else +#include +#endif +#define lapack_complex_double double _Complex +#endif + +#ifndef lapack_complex_double_real +#define lapack_complex_double_real(z) (creal(z)) +#endif + +#ifndef lapack_complex_double_imag +#define lapack_complex_double_imag(z) (cimag(z)) +#endif + +#endif /* LAPACK_COMPLEX_CUSTOM */ + + +#ifdef __cplusplus +extern "C" { +#endif + +/*----------------------------------------------------------------------------*/ +#ifndef lapack_int +#define lapack_int int +#endif + +#ifndef lapack_logical +#define lapack_logical lapack_int +#endif + +/* f2c, hence clapack and MacOS Accelerate, returns double instead of float + * for sdot, slange, clange, etc. */ +#if defined(LAPACK_F2C) + typedef double lapack_float_return; +#else + typedef float lapack_float_return; +#endif + + +/* Callback logical functions of one, two, or three arguments are used +* to select eigenvalues to sort to the top left of the Schur form. +* The value is selected if function returns TRUE (non-zero). */ + +typedef lapack_logical (*LAPACK_S_SELECT2) ( const float*, const float* ); +typedef lapack_logical (*LAPACK_S_SELECT3) + ( const float*, const float*, const float* ); +typedef lapack_logical (*LAPACK_D_SELECT2) ( const double*, const double* ); +typedef lapack_logical (*LAPACK_D_SELECT3) + ( const double*, const double*, const double* ); + +typedef lapack_logical (*LAPACK_C_SELECT1) ( const lapack_complex_float* ); +typedef lapack_logical (*LAPACK_C_SELECT2) + ( const lapack_complex_float*, const lapack_complex_float* ); +typedef lapack_logical (*LAPACK_Z_SELECT1) ( const lapack_complex_double* ); +typedef lapack_logical (*LAPACK_Z_SELECT2) + ( const lapack_complex_double*, const lapack_complex_double* ); + +#define LAPACK_lsame LAPACK_GLOBAL(lsame,LSAME) +lapack_logical LAPACK_lsame( char* ca, char* cb, + lapack_int lca, lapack_int lcb ); + + +/*----------------------------------------------------------------------------*/ +/* This is in alphabetical order (ignoring leading precision). */ + +#define LAPACK_cbbcsd LAPACK_GLOBAL(cbbcsd,CBBCSD) +void LAPACK_cbbcsd( + char const* jobu1, char const* jobu2, char const* jobv1t, char const* jobv2t, char const* trans, + lapack_int const* m, lapack_int const* p, lapack_int const* q, + float* theta, + float* phi, + lapack_complex_float* U1, lapack_int const* ldu1, + lapack_complex_float* U2, lapack_int const* ldu2, + lapack_complex_float* V1T, lapack_int const* ldv1t, + lapack_complex_float* V2T, lapack_int const* ldv2t, + float* B11D, + float* B11E, + float* B12D, + float* B12E, + float* B21D, + float* B21E, + float* B22D, + float* B22E, + float* rwork, lapack_int const* lrwork, + lapack_int* info ); + +#define LAPACK_dbbcsd LAPACK_GLOBAL(dbbcsd,DBBCSD) +void LAPACK_dbbcsd( + char const* jobu1, char const* jobu2, char const* jobv1t, char const* jobv2t, char const* trans, + lapack_int const* m, lapack_int const* p, lapack_int const* q, + double* theta, + double* phi, + double* U1, lapack_int const* ldu1, + double* U2, lapack_int const* ldu2, + double* V1T, lapack_int const* ldv1t, + double* V2T, lapack_int const* ldv2t, + double* B11D, + double* B11E, + double* B12D, + double* B12E, + double* b21d, + double* b21e, + double* b22d, + double* b22e, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_sbbcsd LAPACK_GLOBAL(sbbcsd,SBBCSD) +void LAPACK_sbbcsd( + char const* jobu1, char const* jobu2, char const* jobv1t, char const* jobv2t, char const* trans, + lapack_int const* m, lapack_int const* p, lapack_int const* q, + float* theta, + float* phi, + float* U1, lapack_int const* ldu1, + float* U2, lapack_int const* ldu2, + float* V1T, lapack_int const* ldv1t, + float* V2T, lapack_int const* ldv2t, + float* B11D, + float* B11E, + float* B12D, + float* B12E, + float* B21D, + float* B21E, + float* B22D, + float* B22E, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zbbcsd LAPACK_GLOBAL(zbbcsd,ZBBCSD) +void LAPACK_zbbcsd( + char const* jobu1, char const* jobu2, char const* jobv1t, char const* jobv2t, char const* trans, + lapack_int const* m, lapack_int const* p, lapack_int const* q, + double* theta, + double* phi, + lapack_complex_double* U1, lapack_int const* ldu1, + lapack_complex_double* U2, lapack_int const* ldu2, + lapack_complex_double* V1T, lapack_int const* ldv1t, + lapack_complex_double* V2T, lapack_int const* ldv2t, + double* B11D, + double* B11E, + double* B12D, + double* B12E, + double* B21D, + double* B21E, + double* B22D, + double* B22E, + double* rwork, lapack_int const* lrwork, + lapack_int* info ); + +#define LAPACK_dbdsdc LAPACK_GLOBAL(dbdsdc,DBDSDC) +void LAPACK_dbdsdc( + char const* uplo, char const* compq, + lapack_int const* n, + double* D, + double* E, + double* U, lapack_int const* ldu, + double* VT, lapack_int const* ldvt, + double* Q, lapack_int* IQ, + double* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_sbdsdc LAPACK_GLOBAL(sbdsdc,SBDSDC) +void LAPACK_sbdsdc( + char const* uplo, char const* compq, + lapack_int const* n, + float* D, + float* E, + float* U, lapack_int const* ldu, + float* VT, lapack_int const* ldvt, + float* Q, lapack_int* IQ, + float* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_cbdsqr LAPACK_GLOBAL(cbdsqr,CBDSQR) +void LAPACK_cbdsqr( + char const* uplo, + lapack_int const* n, lapack_int const* ncvt, lapack_int const* nru, lapack_int const* ncc, + float* D, + float* E, + lapack_complex_float* VT, lapack_int const* ldvt, + lapack_complex_float* U, lapack_int const* ldu, + lapack_complex_float* C, lapack_int const* ldc, + float* rwork, + lapack_int* info ); + +#define LAPACK_dbdsqr LAPACK_GLOBAL(dbdsqr,DBDSQR) +void LAPACK_dbdsqr( + char const* uplo, + lapack_int const* n, lapack_int const* ncvt, lapack_int const* nru, lapack_int const* ncc, + double* D, + double* E, + double* VT, lapack_int const* ldvt, + double* U, lapack_int const* ldu, + double* C, lapack_int const* ldc, + double* work, + lapack_int* info ); + +#define LAPACK_sbdsqr LAPACK_GLOBAL(sbdsqr,SBDSQR) +void LAPACK_sbdsqr( + char const* uplo, + lapack_int const* n, lapack_int const* ncvt, lapack_int const* nru, lapack_int const* ncc, + float* D, + float* E, + float* VT, lapack_int const* ldvt, + float* U, lapack_int const* ldu, + float* C, lapack_int const* ldc, + float* work, + lapack_int* info ); + +#define LAPACK_zbdsqr LAPACK_GLOBAL(zbdsqr,ZBDSQR) +void LAPACK_zbdsqr( + char const* uplo, + lapack_int const* n, lapack_int const* ncvt, lapack_int const* nru, lapack_int const* ncc, + double* D, + double* E, + lapack_complex_double* VT, lapack_int const* ldvt, + lapack_complex_double* U, lapack_int const* ldu, + lapack_complex_double* C, lapack_int const* ldc, + double* rwork, + lapack_int* info ); + +#define LAPACK_dbdsvdx LAPACK_GLOBAL(dbdsvdx,DBDSVDX) +void LAPACK_dbdsvdx( + char const* uplo, char const* jobz, char const* range, + lapack_int const* n, + double const* D, + double const* E, + double const* vl, + double const* vu, lapack_int const* il, lapack_int const* iu, lapack_int* ns, + double* S, + double* Z, lapack_int const* ldz, + double* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_sbdsvdx LAPACK_GLOBAL(sbdsvdx,SBDSVDX) +void LAPACK_sbdsvdx( + char const* uplo, char const* jobz, char const* range, + lapack_int const* n, + float const* D, + float const* E, + float const* vl, + float const* vu, lapack_int const* il, lapack_int const* iu, lapack_int* ns, + float* S, + float* Z, lapack_int const* ldz, + float* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_ddisna LAPACK_GLOBAL(ddisna,DDISNA) +void LAPACK_ddisna( + char const* job, + lapack_int const* m, lapack_int const* n, + double const* D, + double* SEP, + lapack_int* info ); + +#define LAPACK_sdisna LAPACK_GLOBAL(sdisna,SDISNA) +void LAPACK_sdisna( + char const* job, + lapack_int const* m, lapack_int const* n, + float const* D, + float* SEP, + lapack_int* info ); + +#define LAPACK_cgbbrd LAPACK_GLOBAL(cgbbrd,CGBBRD) +void LAPACK_cgbbrd( + char const* vect, + lapack_int const* m, lapack_int const* n, lapack_int const* ncc, lapack_int const* kl, lapack_int const* ku, + lapack_complex_float* AB, lapack_int const* ldab, + float* D, + float* E, + lapack_complex_float* Q, lapack_int const* ldq, + lapack_complex_float* PT, lapack_int const* ldpt, + lapack_complex_float* C, lapack_int const* ldc, + lapack_complex_float* work, + float* rwork, + lapack_int* info ); + +#define LAPACK_dgbbrd LAPACK_GLOBAL(dgbbrd,DGBBRD) +void LAPACK_dgbbrd( + char const* vect, + lapack_int const* m, lapack_int const* n, lapack_int const* ncc, lapack_int const* kl, lapack_int const* ku, + double* AB, lapack_int const* ldab, + double* D, + double* E, + double* Q, lapack_int const* ldq, + double* PT, lapack_int const* ldpt, + double* C, lapack_int const* ldc, + double* work, + lapack_int* info ); + +#define LAPACK_sgbbrd LAPACK_GLOBAL(sgbbrd,SGBBRD) +void LAPACK_sgbbrd( + char const* vect, + lapack_int const* m, lapack_int const* n, lapack_int const* ncc, lapack_int const* kl, lapack_int const* ku, + float* AB, lapack_int const* ldab, + float* D, + float* E, + float* Q, lapack_int const* ldq, + float* PT, lapack_int const* ldpt, + float* C, lapack_int const* ldc, + float* work, + lapack_int* info ); + +#define LAPACK_zgbbrd LAPACK_GLOBAL(zgbbrd,ZGBBRD) +void LAPACK_zgbbrd( + char const* vect, + lapack_int const* m, lapack_int const* n, lapack_int const* ncc, lapack_int const* kl, lapack_int const* ku, + lapack_complex_double* AB, lapack_int const* ldab, + double* D, + double* E, + lapack_complex_double* Q, lapack_int const* ldq, + lapack_complex_double* PT, lapack_int const* ldpt, + lapack_complex_double* C, lapack_int const* ldc, + lapack_complex_double* work, + double* rwork, + lapack_int* info ); + +#define LAPACK_cgbcon LAPACK_GLOBAL(cgbcon,CGBCON) +void LAPACK_cgbcon( + char const* norm, + lapack_int const* n, lapack_int const* kl, lapack_int const* ku, + lapack_complex_float const* AB, lapack_int const* ldab, lapack_int const* ipiv, + float const* anorm, + float* rcond, + lapack_complex_float* work, + float* rwork, + lapack_int* info ); + +#define LAPACK_dgbcon LAPACK_GLOBAL(dgbcon,DGBCON) +void LAPACK_dgbcon( + char const* norm, + lapack_int const* n, lapack_int const* kl, lapack_int const* ku, + double const* AB, lapack_int const* ldab, lapack_int const* ipiv, + double const* anorm, + double* rcond, + double* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_sgbcon LAPACK_GLOBAL(sgbcon,SGBCON) +void LAPACK_sgbcon( + char const* norm, + lapack_int const* n, lapack_int const* kl, lapack_int const* ku, + float const* AB, lapack_int const* ldab, lapack_int const* ipiv, + float const* anorm, + float* rcond, + float* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_zgbcon LAPACK_GLOBAL(zgbcon,ZGBCON) +void LAPACK_zgbcon( + char const* norm, + lapack_int const* n, lapack_int const* kl, lapack_int const* ku, + lapack_complex_double const* AB, lapack_int const* ldab, lapack_int const* ipiv, + double const* anorm, + double* rcond, + lapack_complex_double* work, + double* rwork, + lapack_int* info ); + +#define LAPACK_cgbequ LAPACK_GLOBAL(cgbequ,CGBEQU) +void LAPACK_cgbequ( + lapack_int const* m, lapack_int const* n, lapack_int const* kl, lapack_int const* ku, + lapack_complex_float const* AB, lapack_int const* ldab, + float* R, + float* C, + float* rowcnd, + float* colcnd, + float* amax, + lapack_int* info ); + +#define LAPACK_dgbequ LAPACK_GLOBAL(dgbequ,DGBEQU) +void LAPACK_dgbequ( + lapack_int const* m, lapack_int const* n, lapack_int const* kl, lapack_int const* ku, + double const* AB, lapack_int const* ldab, + double* R, + double* C, + double* rowcnd, + double* colcnd, + double* amax, + lapack_int* info ); + +#define LAPACK_sgbequ LAPACK_GLOBAL(sgbequ,SGBEQU) +void LAPACK_sgbequ( + lapack_int const* m, lapack_int const* n, lapack_int const* kl, lapack_int const* ku, + float const* AB, lapack_int const* ldab, + float* R, + float* C, + float* rowcnd, + float* colcnd, + float* amax, + lapack_int* info ); + +#define LAPACK_zgbequ LAPACK_GLOBAL(zgbequ,ZGBEQU) +void LAPACK_zgbequ( + lapack_int const* m, lapack_int const* n, lapack_int const* kl, lapack_int const* ku, + lapack_complex_double const* AB, lapack_int const* ldab, + double* R, + double* C, + double* rowcnd, + double* colcnd, + double* amax, + lapack_int* info ); + +#define LAPACK_cgbequb LAPACK_GLOBAL(cgbequb,CGBEQUB) +void LAPACK_cgbequb( + lapack_int const* m, lapack_int const* n, lapack_int const* kl, lapack_int const* ku, + lapack_complex_float const* AB, lapack_int const* ldab, + float* R, + float* C, + float* rowcnd, + float* colcnd, + float* amax, + lapack_int* info ); + +#define LAPACK_dgbequb LAPACK_GLOBAL(dgbequb,DGBEQUB) +void LAPACK_dgbequb( + lapack_int const* m, lapack_int const* n, lapack_int const* kl, lapack_int const* ku, + double const* AB, lapack_int const* ldab, + double* R, + double* C, + double* rowcnd, + double* colcnd, + double* amax, + lapack_int* info ); + +#define LAPACK_sgbequb LAPACK_GLOBAL(sgbequb,SGBEQUB) +void LAPACK_sgbequb( + lapack_int const* m, lapack_int const* n, lapack_int const* kl, lapack_int const* ku, + float const* AB, lapack_int const* ldab, + float* R, + float* C, + float* rowcnd, + float* colcnd, + float* amax, + lapack_int* info ); + +#define LAPACK_zgbequb LAPACK_GLOBAL(zgbequb,ZGBEQUB) +void LAPACK_zgbequb( + lapack_int const* m, lapack_int const* n, lapack_int const* kl, lapack_int const* ku, + lapack_complex_double const* AB, lapack_int const* ldab, + double* R, + double* C, + double* rowcnd, + double* colcnd, + double* amax, + lapack_int* info ); + +#define LAPACK_cgbrfs LAPACK_GLOBAL(cgbrfs,CGBRFS) +void LAPACK_cgbrfs( + char const* trans, + lapack_int const* n, lapack_int const* kl, lapack_int const* ku, lapack_int const* nrhs, + lapack_complex_float const* AB, lapack_int const* ldab, + lapack_complex_float const* AFB, lapack_int const* ldafb, lapack_int const* ipiv, + lapack_complex_float const* B, lapack_int const* ldb, + lapack_complex_float* X, lapack_int const* ldx, + float* ferr, + float* berr, + lapack_complex_float* work, + float* rwork, + lapack_int* info ); + +#define LAPACK_dgbrfs LAPACK_GLOBAL(dgbrfs,DGBRFS) +void LAPACK_dgbrfs( + char const* trans, + lapack_int const* n, lapack_int const* kl, lapack_int const* ku, lapack_int const* nrhs, + double const* AB, lapack_int const* ldab, + double const* AFB, lapack_int const* ldafb, lapack_int const* ipiv, + double const* B, lapack_int const* ldb, + double* X, lapack_int const* ldx, + double* ferr, + double* berr, + double* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_sgbrfs LAPACK_GLOBAL(sgbrfs,SGBRFS) +void LAPACK_sgbrfs( + char const* trans, + lapack_int const* n, lapack_int const* kl, lapack_int const* ku, lapack_int const* nrhs, + float const* AB, lapack_int const* ldab, + float const* AFB, lapack_int const* ldafb, lapack_int const* ipiv, + float const* B, lapack_int const* ldb, + float* X, lapack_int const* ldx, + float* ferr, + float* berr, + float* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_zgbrfs LAPACK_GLOBAL(zgbrfs,ZGBRFS) +void LAPACK_zgbrfs( + char const* trans, + lapack_int const* n, lapack_int const* kl, lapack_int const* ku, lapack_int const* nrhs, + lapack_complex_double const* AB, lapack_int const* ldab, + lapack_complex_double const* AFB, lapack_int const* ldafb, lapack_int const* ipiv, + lapack_complex_double const* B, lapack_int const* ldb, + lapack_complex_double* X, lapack_int const* ldx, + double* ferr, + double* berr, + lapack_complex_double* work, + double* rwork, + lapack_int* info ); + +#define LAPACK_cgbrfsx LAPACK_GLOBAL(cgbrfsx,CGBRFSX) +void LAPACK_cgbrfsx( + char const* trans, char const* equed, + lapack_int const* n, lapack_int const* kl, lapack_int const* ku, lapack_int const* nrhs, + lapack_complex_float const* AB, lapack_int const* ldab, + lapack_complex_float const* AFB, lapack_int const* ldafb, lapack_int const* ipiv, + float* R, + float* C, + lapack_complex_float const* B, lapack_int const* ldb, + lapack_complex_float* X, lapack_int const* ldx, + float* rcond, + float* berr, lapack_int const* n_err_bnds, + float* err_bnds_norm, + float* err_bnds_comp, lapack_int const* nparams, + float* params, + lapack_complex_float* work, + float* rwork, + lapack_int* info ); + +#define LAPACK_dgbrfsx LAPACK_GLOBAL(dgbrfsx,DGBRFSX) +void LAPACK_dgbrfsx( + char const* trans, char const* equed, + lapack_int const* n, lapack_int const* kl, lapack_int const* ku, lapack_int const* nrhs, + double const* AB, lapack_int const* ldab, + double const* AFB, lapack_int const* ldafb, lapack_int const* ipiv, + double* R, + double* C, + double const* B, lapack_int const* ldb, + double* X, lapack_int const* ldx, + double* rcond, + double* berr, lapack_int const* n_err_bnds, + double* err_bnds_norm, + double* err_bnds_comp, lapack_int const* nparams, + double* params, + double* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_sgbrfsx LAPACK_GLOBAL(sgbrfsx,SGBRFSX) +void LAPACK_sgbrfsx( + char const* trans, char const* equed, + lapack_int const* n, lapack_int const* kl, lapack_int const* ku, lapack_int const* nrhs, + float const* AB, lapack_int const* ldab, + float const* AFB, lapack_int const* ldafb, lapack_int const* ipiv, + float* R, + float* C, + float const* B, lapack_int const* ldb, + float* X, lapack_int const* ldx, + float* rcond, + float* berr, lapack_int const* n_err_bnds, + float* err_bnds_norm, + float* err_bnds_comp, lapack_int const* nparams, + float* params, + float* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_zgbrfsx LAPACK_GLOBAL(zgbrfsx,ZGBRFSX) +void LAPACK_zgbrfsx( + char const* trans, char const* equed, + lapack_int const* n, lapack_int const* kl, lapack_int const* ku, lapack_int const* nrhs, + lapack_complex_double const* AB, lapack_int const* ldab, + lapack_complex_double const* AFB, lapack_int const* ldafb, lapack_int const* ipiv, + double* R, + double* C, + lapack_complex_double const* B, lapack_int const* ldb, + lapack_complex_double* X, lapack_int const* ldx, + double* rcond, + double* berr, lapack_int const* n_err_bnds, + double* err_bnds_norm, + double* err_bnds_comp, lapack_int const* nparams, + double* params, + lapack_complex_double* work, + double* rwork, + lapack_int* info ); + +#define LAPACK_cgbsv LAPACK_GLOBAL(cgbsv,CGBSV) +void LAPACK_cgbsv( + lapack_int const* n, lapack_int const* kl, lapack_int const* ku, lapack_int const* nrhs, + lapack_complex_float* AB, lapack_int const* ldab, lapack_int* ipiv, + lapack_complex_float* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_dgbsv LAPACK_GLOBAL(dgbsv,DGBSV) +void LAPACK_dgbsv( + lapack_int const* n, lapack_int const* kl, lapack_int const* ku, lapack_int const* nrhs, + double* AB, lapack_int const* ldab, lapack_int* ipiv, + double* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_sgbsv LAPACK_GLOBAL(sgbsv,SGBSV) +void LAPACK_sgbsv( + lapack_int const* n, lapack_int const* kl, lapack_int const* ku, lapack_int const* nrhs, + float* AB, lapack_int const* ldab, lapack_int* ipiv, + float* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_zgbsv LAPACK_GLOBAL(zgbsv,ZGBSV) +void LAPACK_zgbsv( + lapack_int const* n, lapack_int const* kl, lapack_int const* ku, lapack_int const* nrhs, + lapack_complex_double* AB, lapack_int const* ldab, lapack_int* ipiv, + lapack_complex_double* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_cgbsvx LAPACK_GLOBAL(cgbsvx,CGBSVX) +void LAPACK_cgbsvx( + char const* fact, char const* trans, + lapack_int const* n, lapack_int const* kl, lapack_int const* ku, lapack_int const* nrhs, + lapack_complex_float* AB, lapack_int const* ldab, + lapack_complex_float* AFB, lapack_int const* ldafb, lapack_int* ipiv, char* equed, + float* R, + float* C, + lapack_complex_float* B, + lapack_int const* ldb, + lapack_complex_float* X, lapack_int const* ldx, + float* rcond, + float* ferr, + float* berr, + lapack_complex_float* work, + float* rwork, + lapack_int* info ); + +#define LAPACK_dgbsvx LAPACK_GLOBAL(dgbsvx,DGBSVX) +void LAPACK_dgbsvx( + char const* fact, char const* trans, + lapack_int const* n, lapack_int const* kl, lapack_int const* ku, lapack_int const* nrhs, + double* AB, lapack_int const* ldab, + double* AFB, lapack_int const* ldafb, lapack_int* ipiv, char* equed, + double* R, + double* C, + double* B, + lapack_int const* ldb, + double* X, lapack_int const* ldx, + double* rcond, + double* ferr, + double* berr, + double* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_sgbsvx LAPACK_GLOBAL(sgbsvx,SGBSVX) +void LAPACK_sgbsvx( + char const* fact, char const* trans, + lapack_int const* n, lapack_int const* kl, lapack_int const* ku, lapack_int const* nrhs, + float* AB, lapack_int const* ldab, + float* AFB, lapack_int const* ldafb, lapack_int* ipiv, char* equed, + float* R, + float* C, + float* B, + lapack_int const* ldb, + float* X, lapack_int const* ldx, + float* rcond, + float* ferr, + float* berr, + float* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_zgbsvx LAPACK_GLOBAL(zgbsvx,ZGBSVX) +void LAPACK_zgbsvx( + char const* fact, char const* trans, + lapack_int const* n, lapack_int const* kl, lapack_int const* ku, lapack_int const* nrhs, + lapack_complex_double* AB, lapack_int const* ldab, + lapack_complex_double* AFB, lapack_int const* ldafb, lapack_int* ipiv, char* equed, + double* R, + double* C, + lapack_complex_double* B, + lapack_int const* ldb, + lapack_complex_double* X, lapack_int const* ldx, + double* rcond, + double* ferr, + double* berr, + lapack_complex_double* work, + double* rwork, + lapack_int* info ); + +#define LAPACK_cgbsvxx LAPACK_GLOBAL(cgbsvxx,CGBSVXX) +void LAPACK_cgbsvxx( + char const* fact, char const* trans, + lapack_int const* n, lapack_int const* kl, lapack_int const* ku, lapack_int const* nrhs, + lapack_complex_float* AB, lapack_int const* ldab, + lapack_complex_float* AFB, lapack_int const* ldafb, lapack_int* ipiv, char* equed, + float* R, + float* C, + lapack_complex_float* B, + lapack_int const* ldb, + lapack_complex_float* X, lapack_int const* ldx, + float* rcond, + float* rpvgrw, + float* berr, lapack_int const* n_err_bnds, + float* err_bnds_norm, + float* err_bnds_comp, lapack_int const* nparams, + float* params, + lapack_complex_float* work, + float* rwork, + lapack_int* info ); + +#define LAPACK_dgbsvxx LAPACK_GLOBAL(dgbsvxx,DGBSVXX) +void LAPACK_dgbsvxx( + char const* fact, char const* trans, + lapack_int const* n, lapack_int const* kl, lapack_int const* ku, lapack_int const* nrhs, + double* AB, lapack_int const* ldab, + double* AFB, lapack_int const* ldafb, lapack_int* ipiv, char* equed, + double* R, + double* C, + double* B, + lapack_int const* ldb, + double* X, lapack_int const* ldx, + double* rcond, + double* rpvgrw, + double* berr, lapack_int const* n_err_bnds, + double* err_bnds_norm, + double* err_bnds_comp, lapack_int const* nparams, + double* params, + double* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_sgbsvxx LAPACK_GLOBAL(sgbsvxx,SGBSVXX) +void LAPACK_sgbsvxx( + char const* fact, char const* trans, + lapack_int const* n, lapack_int const* kl, lapack_int const* ku, lapack_int const* nrhs, + float* AB, lapack_int const* ldab, + float* AFB, lapack_int const* ldafb, lapack_int* ipiv, char* equed, + float* R, + float* C, + float* B, + lapack_int const* ldb, + float* X, lapack_int const* ldx, + float* rcond, + float* rpvgrw, + float* berr, lapack_int const* n_err_bnds, + float* err_bnds_norm, + float* err_bnds_comp, lapack_int const* nparams, + float* params, + float* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_zgbsvxx LAPACK_GLOBAL(zgbsvxx,ZGBSVXX) +void LAPACK_zgbsvxx( + char const* fact, char const* trans, + lapack_int const* n, lapack_int const* kl, lapack_int const* ku, lapack_int const* nrhs, + lapack_complex_double* AB, lapack_int const* ldab, + lapack_complex_double* AFB, lapack_int const* ldafb, lapack_int* ipiv, char* equed, + double* R, + double* C, + lapack_complex_double* B, + lapack_int const* ldb, + lapack_complex_double* X, lapack_int const* ldx, + double* rcond, + double* rpvgrw, + double* berr, lapack_int const* n_err_bnds, + double* err_bnds_norm, + double* err_bnds_comp, lapack_int const* nparams, + double* params, + lapack_complex_double* work, + double* rwork, + lapack_int* info ); + +#define LAPACK_cgbtrf LAPACK_GLOBAL(cgbtrf,CGBTRF) +void LAPACK_cgbtrf( + lapack_int const* m, lapack_int const* n, lapack_int const* kl, lapack_int const* ku, + lapack_complex_float* AB, lapack_int const* ldab, lapack_int* ipiv, + lapack_int* info ); + +#define LAPACK_dgbtrf LAPACK_GLOBAL(dgbtrf,DGBTRF) +void LAPACK_dgbtrf( + lapack_int const* m, lapack_int const* n, lapack_int const* kl, lapack_int const* ku, + double* AB, lapack_int const* ldab, lapack_int* ipiv, + lapack_int* info ); + +#define LAPACK_sgbtrf LAPACK_GLOBAL(sgbtrf,SGBTRF) +void LAPACK_sgbtrf( + lapack_int const* m, lapack_int const* n, lapack_int const* kl, lapack_int const* ku, + float* AB, lapack_int const* ldab, lapack_int* ipiv, + lapack_int* info ); + +#define LAPACK_zgbtrf LAPACK_GLOBAL(zgbtrf,ZGBTRF) +void LAPACK_zgbtrf( + lapack_int const* m, lapack_int const* n, lapack_int const* kl, lapack_int const* ku, + lapack_complex_double* AB, lapack_int const* ldab, lapack_int* ipiv, + lapack_int* info ); + +#define LAPACK_cgbtrs LAPACK_GLOBAL(cgbtrs,CGBTRS) +void LAPACK_cgbtrs( + char const* trans, + lapack_int const* n, lapack_int const* kl, lapack_int const* ku, lapack_int const* nrhs, + lapack_complex_float const* AB, lapack_int const* ldab, lapack_int const* ipiv, + lapack_complex_float* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_dgbtrs LAPACK_GLOBAL(dgbtrs,DGBTRS) +void LAPACK_dgbtrs( + char const* trans, + lapack_int const* n, lapack_int const* kl, lapack_int const* ku, lapack_int const* nrhs, + double const* AB, lapack_int const* ldab, lapack_int const* ipiv, + double* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_sgbtrs LAPACK_GLOBAL(sgbtrs,SGBTRS) +void LAPACK_sgbtrs( + char const* trans, + lapack_int const* n, lapack_int const* kl, lapack_int const* ku, lapack_int const* nrhs, + float const* AB, lapack_int const* ldab, lapack_int const* ipiv, + float* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_zgbtrs LAPACK_GLOBAL(zgbtrs,ZGBTRS) +void LAPACK_zgbtrs( + char const* trans, + lapack_int const* n, lapack_int const* kl, lapack_int const* ku, lapack_int const* nrhs, + lapack_complex_double const* AB, lapack_int const* ldab, lapack_int const* ipiv, + lapack_complex_double* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_cgebak LAPACK_GLOBAL(cgebak,CGEBAK) +void LAPACK_cgebak( + char const* job, char const* side, + lapack_int const* n, lapack_int const* ilo, lapack_int const* ihi, + float const* scale, lapack_int const* m, + lapack_complex_float* V, lapack_int const* ldv, + lapack_int* info ); + +#define LAPACK_dgebak LAPACK_GLOBAL(dgebak,DGEBAK) +void LAPACK_dgebak( + char const* job, char const* side, + lapack_int const* n, lapack_int const* ilo, lapack_int const* ihi, + double const* scale, lapack_int const* m, + double* V, lapack_int const* ldv, + lapack_int* info ); + +#define LAPACK_sgebak LAPACK_GLOBAL(sgebak,SGEBAK) +void LAPACK_sgebak( + char const* job, char const* side, + lapack_int const* n, lapack_int const* ilo, lapack_int const* ihi, + float const* scale, lapack_int const* m, + float* V, lapack_int const* ldv, + lapack_int* info ); + +#define LAPACK_zgebak LAPACK_GLOBAL(zgebak,ZGEBAK) +void LAPACK_zgebak( + char const* job, char const* side, + lapack_int const* n, lapack_int const* ilo, lapack_int const* ihi, + double const* scale, lapack_int const* m, + lapack_complex_double* V, lapack_int const* ldv, + lapack_int* info ); + +#define LAPACK_cgebal LAPACK_GLOBAL(cgebal,CGEBAL) +void LAPACK_cgebal( + char const* job, + lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, lapack_int* ilo, lapack_int* ihi, + float* scale, + lapack_int* info ); + +#define LAPACK_dgebal LAPACK_GLOBAL(dgebal,DGEBAL) +void LAPACK_dgebal( + char const* job, + lapack_int const* n, + double* A, lapack_int const* lda, lapack_int* ilo, lapack_int* ihi, + double* scale, + lapack_int* info ); + +#define LAPACK_sgebal LAPACK_GLOBAL(sgebal,SGEBAL) +void LAPACK_sgebal( + char const* job, + lapack_int const* n, + float* A, lapack_int const* lda, lapack_int* ilo, lapack_int* ihi, + float* scale, + lapack_int* info ); + +#define LAPACK_zgebal LAPACK_GLOBAL(zgebal,ZGEBAL) +void LAPACK_zgebal( + char const* job, + lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, lapack_int* ilo, lapack_int* ihi, + double* scale, + lapack_int* info ); + +#define LAPACK_cgebrd LAPACK_GLOBAL(cgebrd,CGEBRD) +void LAPACK_cgebrd( + lapack_int const* m, lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, + float* D, + float* E, + lapack_complex_float* tauq, + lapack_complex_float* taup, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_dgebrd LAPACK_GLOBAL(dgebrd,DGEBRD) +void LAPACK_dgebrd( + lapack_int const* m, lapack_int const* n, + double* A, lapack_int const* lda, + double* D, + double* E, + double* tauq, + double* taup, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_sgebrd LAPACK_GLOBAL(sgebrd,SGEBRD) +void LAPACK_sgebrd( + lapack_int const* m, lapack_int const* n, + float* A, lapack_int const* lda, + float* D, + float* E, + float* tauq, + float* taup, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zgebrd LAPACK_GLOBAL(zgebrd,ZGEBRD) +void LAPACK_zgebrd( + lapack_int const* m, lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, + double* D, + double* E, + lapack_complex_double* tauq, + lapack_complex_double* taup, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_cgecon LAPACK_GLOBAL(cgecon,CGECON) +void LAPACK_cgecon( + char const* norm, + lapack_int const* n, + lapack_complex_float const* A, lapack_int const* lda, + float const* anorm, + float* rcond, + lapack_complex_float* work, + float* rwork, + lapack_int* info ); + +#define LAPACK_dgecon LAPACK_GLOBAL(dgecon,DGECON) +void LAPACK_dgecon( + char const* norm, + lapack_int const* n, + double const* A, lapack_int const* lda, + double const* anorm, + double* rcond, + double* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_sgecon LAPACK_GLOBAL(sgecon,SGECON) +void LAPACK_sgecon( + char const* norm, + lapack_int const* n, + float const* A, lapack_int const* lda, + float const* anorm, + float* rcond, + float* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_zgecon LAPACK_GLOBAL(zgecon,ZGECON) +void LAPACK_zgecon( + char const* norm, + lapack_int const* n, + lapack_complex_double const* A, lapack_int const* lda, + double const* anorm, + double* rcond, + lapack_complex_double* work, + double* rwork, + lapack_int* info ); + +#define LAPACK_cgeequ LAPACK_GLOBAL(cgeequ,CGEEQU) +void LAPACK_cgeequ( + lapack_int const* m, lapack_int const* n, + lapack_complex_float const* A, lapack_int const* lda, + float* R, + float* C, + float* rowcnd, + float* colcnd, + float* amax, + lapack_int* info ); + +#define LAPACK_dgeequ LAPACK_GLOBAL(dgeequ,DGEEQU) +void LAPACK_dgeequ( + lapack_int const* m, lapack_int const* n, + double const* A, lapack_int const* lda, + double* R, + double* C, + double* rowcnd, + double* colcnd, + double* amax, + lapack_int* info ); + +#define LAPACK_sgeequ LAPACK_GLOBAL(sgeequ,SGEEQU) +void LAPACK_sgeequ( + lapack_int const* m, lapack_int const* n, + float const* A, lapack_int const* lda, + float* R, + float* C, + float* rowcnd, + float* colcnd, + float* amax, + lapack_int* info ); + +#define LAPACK_zgeequ LAPACK_GLOBAL(zgeequ,ZGEEQU) +void LAPACK_zgeequ( + lapack_int const* m, lapack_int const* n, + lapack_complex_double const* A, lapack_int const* lda, + double* R, + double* C, + double* rowcnd, + double* colcnd, + double* amax, + lapack_int* info ); + +#define LAPACK_cgeequb LAPACK_GLOBAL(cgeequb,CGEEQUB) +void LAPACK_cgeequb( + lapack_int const* m, lapack_int const* n, + lapack_complex_float const* A, lapack_int const* lda, + float* R, + float* C, + float* rowcnd, + float* colcnd, + float* amax, + lapack_int* info ); + +#define LAPACK_dgeequb LAPACK_GLOBAL(dgeequb,DGEEQUB) +void LAPACK_dgeequb( + lapack_int const* m, lapack_int const* n, + double const* A, lapack_int const* lda, + double* R, + double* C, + double* rowcnd, + double* colcnd, + double* amax, + lapack_int* info ); + +#define LAPACK_sgeequb LAPACK_GLOBAL(sgeequb,SGEEQUB) +void LAPACK_sgeequb( + lapack_int const* m, lapack_int const* n, + float const* A, lapack_int const* lda, + float* R, + float* C, + float* rowcnd, + float* colcnd, + float* amax, + lapack_int* info ); + +#define LAPACK_zgeequb LAPACK_GLOBAL(zgeequb,ZGEEQUB) +void LAPACK_zgeequb( + lapack_int const* m, lapack_int const* n, + lapack_complex_double const* A, lapack_int const* lda, + double* R, + double* C, + double* rowcnd, + double* colcnd, + double* amax, + lapack_int* info ); + +#define LAPACK_cgees LAPACK_GLOBAL(cgees,CGEES) +void LAPACK_cgees( + char const* jobvs, char const* sort, LAPACK_C_SELECT1 select, + lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, lapack_int* sdim, + lapack_complex_float* W, + lapack_complex_float* VS, lapack_int const* ldvs, + lapack_complex_float* work, lapack_int const* lwork, + float* rwork, lapack_logical* BWORK, + lapack_int* info ); + +#define LAPACK_dgees LAPACK_GLOBAL(dgees,DGEES) +void LAPACK_dgees( + char const* jobvs, char const* sort, LAPACK_D_SELECT2 select, + lapack_int const* n, + double* A, lapack_int const* lda, lapack_int* sdim, + double* WR, + double* WI, + double* VS, lapack_int const* ldvs, + double* work, lapack_int const* lwork, lapack_logical* BWORK, + lapack_int* info ); + +#define LAPACK_sgees LAPACK_GLOBAL(sgees,SGEES) +void LAPACK_sgees( + char const* jobvs, char const* sort, LAPACK_S_SELECT2 select, + lapack_int const* n, + float* A, lapack_int const* lda, lapack_int* sdim, + float* WR, + float* WI, + float* VS, lapack_int const* ldvs, + float* work, lapack_int const* lwork, lapack_logical* BWORK, + lapack_int* info ); + +#define LAPACK_zgees LAPACK_GLOBAL(zgees,ZGEES) +void LAPACK_zgees( + char const* jobvs, char const* sort, LAPACK_Z_SELECT1 select, + lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, lapack_int* sdim, + lapack_complex_double* W, + lapack_complex_double* VS, lapack_int const* ldvs, + lapack_complex_double* work, lapack_int const* lwork, + double* rwork, lapack_logical* BWORK, + lapack_int* info ); + +#define LAPACK_cgeesx LAPACK_GLOBAL(cgeesx,CGEESX) +void LAPACK_cgeesx( + char const* jobvs, char const* sort, LAPACK_C_SELECT1 select, char const* sense, + lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, lapack_int* sdim, + lapack_complex_float* W, + lapack_complex_float* VS, lapack_int const* ldvs, + float* rconde, + float* rcondv, + lapack_complex_float* work, lapack_int const* lwork, + float* rwork, lapack_logical* BWORK, + lapack_int* info ); + +#define LAPACK_dgeesx LAPACK_GLOBAL(dgeesx,DGEESX) +void LAPACK_dgeesx( + char const* jobvs, char const* sort, LAPACK_D_SELECT2 select, char const* sense, + lapack_int const* n, + double* A, lapack_int const* lda, lapack_int* sdim, + double* WR, + double* WI, + double* VS, lapack_int const* ldvs, + double* rconde, + double* rcondv, + double* work, lapack_int const* lwork, + lapack_int* iwork, lapack_int const* liwork, lapack_logical* BWORK, + lapack_int* info ); + +#define LAPACK_sgeesx LAPACK_GLOBAL(sgeesx,SGEESX) +void LAPACK_sgeesx( + char const* jobvs, char const* sort, LAPACK_S_SELECT2 select, char const* sense, + lapack_int const* n, + float* A, lapack_int const* lda, lapack_int* sdim, + float* WR, + float* WI, + float* VS, lapack_int const* ldvs, + float* rconde, + float* rcondv, + float* work, lapack_int const* lwork, + lapack_int* iwork, lapack_int const* liwork, lapack_logical* BWORK, + lapack_int* info ); + +#define LAPACK_zgeesx LAPACK_GLOBAL(zgeesx,ZGEESX) +void LAPACK_zgeesx( + char const* jobvs, char const* sort, LAPACK_Z_SELECT1 select, char const* sense, + lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, lapack_int* sdim, + lapack_complex_double* W, + lapack_complex_double* VS, lapack_int const* ldvs, + double* rconde, + double* rcondv, + lapack_complex_double* work, lapack_int const* lwork, + double* rwork, lapack_logical* BWORK, + lapack_int* info ); + +#define LAPACK_cgeev LAPACK_GLOBAL(cgeev,CGEEV) +void LAPACK_cgeev( + char const* jobvl, char const* jobvr, + lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* W, + lapack_complex_float* VL, lapack_int const* ldvl, + lapack_complex_float* VR, lapack_int const* ldvr, + lapack_complex_float* work, lapack_int const* lwork, + float* rwork, + lapack_int* info ); + +#define LAPACK_dgeev LAPACK_GLOBAL(dgeev,DGEEV) +void LAPACK_dgeev( + char const* jobvl, char const* jobvr, + lapack_int const* n, + double* A, lapack_int const* lda, + double* WR, + double* WI, + double* VL, lapack_int const* ldvl, + double* VR, lapack_int const* ldvr, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_sgeev LAPACK_GLOBAL(sgeev,SGEEV) +void LAPACK_sgeev( + char const* jobvl, char const* jobvr, + lapack_int const* n, + float* A, lapack_int const* lda, + float* WR, + float* WI, + float* VL, lapack_int const* ldvl, + float* VR, lapack_int const* ldvr, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zgeev LAPACK_GLOBAL(zgeev,ZGEEV) +void LAPACK_zgeev( + char const* jobvl, char const* jobvr, + lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* W, + lapack_complex_double* VL, lapack_int const* ldvl, + lapack_complex_double* VR, lapack_int const* ldvr, + lapack_complex_double* work, lapack_int const* lwork, + double* rwork, + lapack_int* info ); + +#define LAPACK_cgeevx LAPACK_GLOBAL(cgeevx,CGEEVX) +void LAPACK_cgeevx( + char const* balanc, char const* jobvl, char const* jobvr, char const* sense, + lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* W, + lapack_complex_float* VL, lapack_int const* ldvl, + lapack_complex_float* VR, lapack_int const* ldvr, lapack_int* ilo, lapack_int* ihi, + float* scale, + float* abnrm, + float* rconde, + float* rcondv, + lapack_complex_float* work, lapack_int const* lwork, + float* rwork, + lapack_int* info ); + +#define LAPACK_dgeevx LAPACK_GLOBAL(dgeevx,DGEEVX) +void LAPACK_dgeevx( + char const* balanc, char const* jobvl, char const* jobvr, char const* sense, + lapack_int const* n, + double* A, lapack_int const* lda, + double* WR, + double* WI, + double* VL, lapack_int const* ldvl, + double* VR, lapack_int const* ldvr, lapack_int* ilo, lapack_int* ihi, + double* scale, + double* abnrm, + double* rconde, + double* rcondv, + double* work, lapack_int const* lwork, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_sgeevx LAPACK_GLOBAL(sgeevx,SGEEVX) +void LAPACK_sgeevx( + char const* balanc, char const* jobvl, char const* jobvr, char const* sense, + lapack_int const* n, + float* A, lapack_int const* lda, + float* WR, + float* WI, + float* VL, lapack_int const* ldvl, + float* VR, lapack_int const* ldvr, lapack_int* ilo, lapack_int* ihi, + float* scale, + float* abnrm, + float* rconde, + float* rcondv, + float* work, lapack_int const* lwork, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_zgeevx LAPACK_GLOBAL(zgeevx,ZGEEVX) +void LAPACK_zgeevx( + char const* balanc, char const* jobvl, char const* jobvr, char const* sense, + lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* W, + lapack_complex_double* VL, lapack_int const* ldvl, + lapack_complex_double* VR, lapack_int const* ldvr, lapack_int* ilo, lapack_int* ihi, + double* scale, + double* abnrm, + double* rconde, + double* rcondv, + lapack_complex_double* work, lapack_int const* lwork, + double* rwork, + lapack_int* info ); + +#define LAPACK_cgehrd LAPACK_GLOBAL(cgehrd,CGEHRD) +void LAPACK_cgehrd( + lapack_int const* n, lapack_int const* ilo, lapack_int const* ihi, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* tau, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_dgehrd LAPACK_GLOBAL(dgehrd,DGEHRD) +void LAPACK_dgehrd( + lapack_int const* n, lapack_int const* ilo, lapack_int const* ihi, + double* A, lapack_int const* lda, + double* tau, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_sgehrd LAPACK_GLOBAL(sgehrd,SGEHRD) +void LAPACK_sgehrd( + lapack_int const* n, lapack_int const* ilo, lapack_int const* ihi, + float* A, lapack_int const* lda, + float* tau, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zgehrd LAPACK_GLOBAL(zgehrd,ZGEHRD) +void LAPACK_zgehrd( + lapack_int const* n, lapack_int const* ilo, lapack_int const* ihi, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* tau, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_cgejsv LAPACK_GLOBAL(cgejsv,CGEJSV) +void LAPACK_cgejsv( + char const* joba, char const* jobu, char const* jobv, char const* jobr, char const* jobt, char const* jobp, + lapack_int const* m, lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, + float* SVA, + lapack_complex_float* U, lapack_int const* ldu, + lapack_complex_float* V, lapack_int const* ldv, + lapack_complex_float* cwork, lapack_int const* lwork, + float* rwork, lapack_int const* lrwork, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_dgejsv LAPACK_GLOBAL(dgejsv,DGEJSV) +void LAPACK_dgejsv( + char const* joba, char const* jobu, char const* jobv, char const* jobr, char const* jobt, char const* jobp, + lapack_int const* m, lapack_int const* n, + double* A, lapack_int const* lda, + double* SVA, + double* U, lapack_int const* ldu, + double* V, lapack_int const* ldv, + double* work, lapack_int const* lwork, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_sgejsv LAPACK_GLOBAL(sgejsv,SGEJSV) +void LAPACK_sgejsv( + char const* joba, char const* jobu, char const* jobv, char const* jobr, char const* jobt, char const* jobp, + lapack_int const* m, lapack_int const* n, + float* A, lapack_int const* lda, + float* SVA, + float* U, lapack_int const* ldu, + float* V, lapack_int const* ldv, + float* work, lapack_int const* lwork, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_zgejsv LAPACK_GLOBAL(zgejsv,ZGEJSV) +void LAPACK_zgejsv( + char const* joba, char const* jobu, char const* jobv, char const* jobr, char const* jobt, char const* jobp, + lapack_int const* m, lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, + double* SVA, + lapack_complex_double* U, lapack_int const* ldu, + lapack_complex_double* V, lapack_int const* ldv, + lapack_complex_double* cwork, lapack_int const* lwork, + double* rwork, lapack_int const* lrwork, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_cgelq LAPACK_GLOBAL(cgelq,CGELQ) +void LAPACK_cgelq( + lapack_int const* m, lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* T, lapack_int const* tsize, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_dgelq LAPACK_GLOBAL(dgelq,DGELQ) +void LAPACK_dgelq( + lapack_int const* m, lapack_int const* n, + double* A, lapack_int const* lda, + double* T, lapack_int const* tsize, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_sgelq LAPACK_GLOBAL(sgelq,SGELQ) +void LAPACK_sgelq( + lapack_int const* m, lapack_int const* n, + float* A, lapack_int const* lda, + float* T, lapack_int const* tsize, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zgelq LAPACK_GLOBAL(zgelq,ZGELQ) +void LAPACK_zgelq( + lapack_int const* m, lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* T, lapack_int const* tsize, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_cgelq2 LAPACK_GLOBAL(cgelq2,CGELQ2) +void LAPACK_cgelq2( + lapack_int const* m, lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* tau, + lapack_complex_float* work, + lapack_int* info ); + +#define LAPACK_dgelq2 LAPACK_GLOBAL(dgelq2,DGELQ2) +void LAPACK_dgelq2( + lapack_int const* m, lapack_int const* n, + double* A, lapack_int const* lda, + double* tau, + double* work, + lapack_int* info ); + +#define LAPACK_sgelq2 LAPACK_GLOBAL(sgelq2,SGELQ2) +void LAPACK_sgelq2( + lapack_int const* m, lapack_int const* n, + float* A, lapack_int const* lda, + float* tau, + float* work, + lapack_int* info ); + +#define LAPACK_zgelq2 LAPACK_GLOBAL(zgelq2,ZGELQ2) +void LAPACK_zgelq2( + lapack_int const* m, lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* tau, + lapack_complex_double* work, + lapack_int* info ); + +#define LAPACK_cgelqf LAPACK_GLOBAL(cgelqf,CGELQF) +void LAPACK_cgelqf( + lapack_int const* m, lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* tau, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_dgelqf LAPACK_GLOBAL(dgelqf,DGELQF) +void LAPACK_dgelqf( + lapack_int const* m, lapack_int const* n, + double* A, lapack_int const* lda, + double* tau, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_sgelqf LAPACK_GLOBAL(sgelqf,SGELQF) +void LAPACK_sgelqf( + lapack_int const* m, lapack_int const* n, + float* A, lapack_int const* lda, + float* tau, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zgelqf LAPACK_GLOBAL(zgelqf,ZGELQF) +void LAPACK_zgelqf( + lapack_int const* m, lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* tau, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_cgels LAPACK_GLOBAL(cgels,CGELS) +void LAPACK_cgels( + char const* trans, + lapack_int const* m, lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* B, lapack_int const* ldb, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_dgels LAPACK_GLOBAL(dgels,DGELS) +void LAPACK_dgels( + char const* trans, + lapack_int const* m, lapack_int const* n, lapack_int const* nrhs, + double* A, lapack_int const* lda, + double* B, lapack_int const* ldb, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_sgels LAPACK_GLOBAL(sgels,SGELS) +void LAPACK_sgels( + char const* trans, + lapack_int const* m, lapack_int const* n, lapack_int const* nrhs, + float* A, lapack_int const* lda, + float* B, lapack_int const* ldb, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zgels LAPACK_GLOBAL(zgels,ZGELS) +void LAPACK_zgels( + char const* trans, + lapack_int const* m, lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* B, lapack_int const* ldb, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_cgelsd LAPACK_GLOBAL(cgelsd,CGELSD) +void LAPACK_cgelsd( + lapack_int const* m, lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* B, lapack_int const* ldb, + float* S, + float const* rcond, lapack_int* rank, + lapack_complex_float* work, lapack_int const* lwork, + float* rwork, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_dgelsd LAPACK_GLOBAL(dgelsd,DGELSD) +void LAPACK_dgelsd( + lapack_int const* m, lapack_int const* n, lapack_int const* nrhs, + double* A, lapack_int const* lda, + double* B, lapack_int const* ldb, + double* S, + double const* rcond, lapack_int* rank, + double* work, lapack_int const* lwork, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_sgelsd LAPACK_GLOBAL(sgelsd,SGELSD) +void LAPACK_sgelsd( + lapack_int const* m, lapack_int const* n, lapack_int const* nrhs, + float* A, lapack_int const* lda, + float* B, lapack_int const* ldb, + float* S, + float const* rcond, lapack_int* rank, + float* work, lapack_int const* lwork, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_zgelsd LAPACK_GLOBAL(zgelsd,ZGELSD) +void LAPACK_zgelsd( + lapack_int const* m, lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* B, lapack_int const* ldb, + double* S, + double const* rcond, lapack_int* rank, + lapack_complex_double* work, lapack_int const* lwork, + double* rwork, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_cgelss LAPACK_GLOBAL(cgelss,CGELSS) +void LAPACK_cgelss( + lapack_int const* m, lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* B, lapack_int const* ldb, + float* S, + float const* rcond, lapack_int* rank, + lapack_complex_float* work, lapack_int const* lwork, + float* rwork, + lapack_int* info ); + +#define LAPACK_dgelss LAPACK_GLOBAL(dgelss,DGELSS) +void LAPACK_dgelss( + lapack_int const* m, lapack_int const* n, lapack_int const* nrhs, + double* A, lapack_int const* lda, + double* B, lapack_int const* ldb, + double* S, + double const* rcond, lapack_int* rank, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_sgelss LAPACK_GLOBAL(sgelss,SGELSS) +void LAPACK_sgelss( + lapack_int const* m, lapack_int const* n, lapack_int const* nrhs, + float* A, lapack_int const* lda, + float* B, lapack_int const* ldb, + float* S, + float const* rcond, lapack_int* rank, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zgelss LAPACK_GLOBAL(zgelss,ZGELSS) +void LAPACK_zgelss( + lapack_int const* m, lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* B, lapack_int const* ldb, + double* S, + double const* rcond, lapack_int* rank, + lapack_complex_double* work, lapack_int const* lwork, + double* rwork, + lapack_int* info ); + +#define LAPACK_cgelsy LAPACK_GLOBAL(cgelsy,CGELSY) +void LAPACK_cgelsy( + lapack_int const* m, lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* B, lapack_int const* ldb, lapack_int* JPVT, + float const* rcond, lapack_int* rank, + lapack_complex_float* work, lapack_int const* lwork, + float* rwork, + lapack_int* info ); + +#define LAPACK_dgelsy LAPACK_GLOBAL(dgelsy,DGELSY) +void LAPACK_dgelsy( + lapack_int const* m, lapack_int const* n, lapack_int const* nrhs, + double* A, lapack_int const* lda, + double* B, lapack_int const* ldb, lapack_int* JPVT, + double const* rcond, lapack_int* rank, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_sgelsy LAPACK_GLOBAL(sgelsy,SGELSY) +void LAPACK_sgelsy( + lapack_int const* m, lapack_int const* n, lapack_int const* nrhs, + float* A, lapack_int const* lda, + float* B, lapack_int const* ldb, lapack_int* JPVT, + float const* rcond, lapack_int* rank, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zgelsy LAPACK_GLOBAL(zgelsy,ZGELSY) +void LAPACK_zgelsy( + lapack_int const* m, lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* B, lapack_int const* ldb, lapack_int* JPVT, + double const* rcond, lapack_int* rank, + lapack_complex_double* work, lapack_int const* lwork, + double* rwork, + lapack_int* info ); + +#define LAPACK_cgemlq LAPACK_GLOBAL(cgemlq,CGEMLQ) +void LAPACK_cgemlq( + char const* side, char const* trans, + lapack_int const* m, lapack_int const* n, lapack_int const* k, + lapack_complex_float const* A, lapack_int const* lda, + lapack_complex_float const* T, lapack_int const* tsize, + lapack_complex_float* C, lapack_int const* ldc, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_dgemlq LAPACK_GLOBAL(dgemlq,DGEMLQ) +void LAPACK_dgemlq( + char const* side, char const* trans, + lapack_int const* m, lapack_int const* n, lapack_int const* k, + double const* A, lapack_int const* lda, + double const* T, lapack_int const* tsize, + double* C, lapack_int const* ldc, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_sgemlq LAPACK_GLOBAL(sgemlq,SGEMLQ) +void LAPACK_sgemlq( + char const* side, char const* trans, + lapack_int const* m, lapack_int const* n, lapack_int const* k, + float const* A, lapack_int const* lda, + float const* T, lapack_int const* tsize, + float* C, lapack_int const* ldc, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zgemlq LAPACK_GLOBAL(zgemlq,ZGEMLQ) +void LAPACK_zgemlq( + char const* side, char const* trans, + lapack_int const* m, lapack_int const* n, lapack_int const* k, + lapack_complex_double const* A, lapack_int const* lda, + lapack_complex_double const* T, lapack_int const* tsize, + lapack_complex_double* C, lapack_int const* ldc, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_cgemqr LAPACK_GLOBAL(cgemqr,CGEMQR) +void LAPACK_cgemqr( + char const* side, char const* trans, + lapack_int const* m, lapack_int const* n, lapack_int const* k, + lapack_complex_float const* A, lapack_int const* lda, + lapack_complex_float const* T, lapack_int const* tsize, + lapack_complex_float* C, lapack_int const* ldc, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_dgemqr LAPACK_GLOBAL(dgemqr,DGEMQR) +void LAPACK_dgemqr( + char const* side, char const* trans, + lapack_int const* m, lapack_int const* n, lapack_int const* k, + double const* A, lapack_int const* lda, + double const* T, lapack_int const* tsize, + double* C, lapack_int const* ldc, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_sgemqr LAPACK_GLOBAL(sgemqr,SGEMQR) +void LAPACK_sgemqr( + char const* side, char const* trans, + lapack_int const* m, lapack_int const* n, lapack_int const* k, + float const* A, lapack_int const* lda, + float const* T, lapack_int const* tsize, + float* C, lapack_int const* ldc, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zgemqr LAPACK_GLOBAL(zgemqr,ZGEMQR) +void LAPACK_zgemqr( + char const* side, char const* trans, + lapack_int const* m, lapack_int const* n, lapack_int const* k, + lapack_complex_double const* A, lapack_int const* lda, + lapack_complex_double const* T, lapack_int const* tsize, + lapack_complex_double* C, lapack_int const* ldc, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_cgemqrt LAPACK_GLOBAL(cgemqrt,CGEMQRT) +void LAPACK_cgemqrt( + char const* side, char const* trans, + lapack_int const* m, lapack_int const* n, lapack_int const* k, lapack_int const* nb, + lapack_complex_float const* V, lapack_int const* ldv, + lapack_complex_float const* T, lapack_int const* ldt, + lapack_complex_float* C, lapack_int const* ldc, + lapack_complex_float* work, + lapack_int* info ); + +#define LAPACK_dgemqrt LAPACK_GLOBAL(dgemqrt,DGEMQRT) +void LAPACK_dgemqrt( + char const* side, char const* trans, + lapack_int const* m, lapack_int const* n, lapack_int const* k, lapack_int const* nb, + double const* V, lapack_int const* ldv, + double const* T, lapack_int const* ldt, + double* C, lapack_int const* ldc, + double* work, + lapack_int* info ); + +#define LAPACK_sgemqrt LAPACK_GLOBAL(sgemqrt,SGEMQRT) +void LAPACK_sgemqrt( + char const* side, char const* trans, + lapack_int const* m, lapack_int const* n, lapack_int const* k, lapack_int const* nb, + float const* V, lapack_int const* ldv, + float const* T, lapack_int const* ldt, + float* C, lapack_int const* ldc, + float* work, + lapack_int* info ); + +#define LAPACK_zgemqrt LAPACK_GLOBAL(zgemqrt,ZGEMQRT) +void LAPACK_zgemqrt( + char const* side, char const* trans, + lapack_int const* m, lapack_int const* n, lapack_int const* k, lapack_int const* nb, + lapack_complex_double const* V, lapack_int const* ldv, + lapack_complex_double const* T, lapack_int const* ldt, + lapack_complex_double* C, lapack_int const* ldc, + lapack_complex_double* work, + lapack_int* info ); + +#define LAPACK_cgeql2 LAPACK_GLOBAL(cgeql2,CGEQL2) +void LAPACK_cgeql2( + lapack_int const* m, lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* tau, + lapack_complex_float* work, + lapack_int* info ); + +#define LAPACK_dgeql2 LAPACK_GLOBAL(dgeql2,DGEQL2) +void LAPACK_dgeql2( + lapack_int const* m, lapack_int const* n, + double* A, lapack_int const* lda, + double* tau, + double* work, + lapack_int* info ); + +#define LAPACK_sgeql2 LAPACK_GLOBAL(sgeql2,SGEQL2) +void LAPACK_sgeql2( + lapack_int const* m, lapack_int const* n, + float* A, lapack_int const* lda, + float* tau, + float* work, + lapack_int* info ); + +#define LAPACK_zgeql2 LAPACK_GLOBAL(zgeql2,ZGEQL2) +void LAPACK_zgeql2( + lapack_int const* m, lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* tau, + lapack_complex_double* work, + lapack_int* info ); + +#define LAPACK_cgeqlf LAPACK_GLOBAL(cgeqlf,CGEQLF) +void LAPACK_cgeqlf( + lapack_int const* m, lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* tau, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_dgeqlf LAPACK_GLOBAL(dgeqlf,DGEQLF) +void LAPACK_dgeqlf( + lapack_int const* m, lapack_int const* n, + double* A, lapack_int const* lda, + double* tau, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_sgeqlf LAPACK_GLOBAL(sgeqlf,SGEQLF) +void LAPACK_sgeqlf( + lapack_int const* m, lapack_int const* n, + float* A, lapack_int const* lda, + float* tau, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zgeqlf LAPACK_GLOBAL(zgeqlf,ZGEQLF) +void LAPACK_zgeqlf( + lapack_int const* m, lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* tau, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_sgeqpf LAPACK_GLOBAL(sgeqpf,SGEQPF) +void LAPACK_sgeqpf( lapack_int* m, lapack_int* n, float* a, lapack_int* lda, + lapack_int* jpvt, float* tau, float* work, + lapack_int *info ); + +#define LAPACK_dgeqpf LAPACK_GLOBAL(dgeqpf,DGEQPF) +void LAPACK_dgeqpf( lapack_int* m, lapack_int* n, double* a, lapack_int* lda, + lapack_int* jpvt, double* tau, double* work, + lapack_int *info ); + +#define LAPACK_cgeqpf LAPACK_GLOBAL(cgeqpf,CGEQPF) +void LAPACK_cgeqpf( lapack_int* m, lapack_int* n, lapack_complex_float* a, + lapack_int* lda, lapack_int* jpvt, + lapack_complex_float* tau, lapack_complex_float* work, + float* rwork, lapack_int *info ); + +#define LAPACK_zgeqpf LAPACK_GLOBAL(zgeqpf,ZGEQPF) +void LAPACK_zgeqpf( lapack_int* m, lapack_int* n, lapack_complex_double* a, + lapack_int* lda, lapack_int* jpvt, + lapack_complex_double* tau, lapack_complex_double* work, + double* rwork, lapack_int *info ); + +#define LAPACK_cgeqp3 LAPACK_GLOBAL(cgeqp3,CGEQP3) +void LAPACK_cgeqp3( + lapack_int const* m, lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, lapack_int* JPVT, + lapack_complex_float* tau, + lapack_complex_float* work, lapack_int const* lwork, + float* rwork, + lapack_int* info ); + +#define LAPACK_dgeqp3 LAPACK_GLOBAL(dgeqp3,DGEQP3) +void LAPACK_dgeqp3( + lapack_int const* m, lapack_int const* n, + double* A, lapack_int const* lda, lapack_int* JPVT, + double* tau, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_sgeqp3 LAPACK_GLOBAL(sgeqp3,SGEQP3) +void LAPACK_sgeqp3( + lapack_int const* m, lapack_int const* n, + float* A, lapack_int const* lda, lapack_int* JPVT, + float* tau, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zgeqp3 LAPACK_GLOBAL(zgeqp3,ZGEQP3) +void LAPACK_zgeqp3( + lapack_int const* m, lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, lapack_int* JPVT, + lapack_complex_double* tau, + lapack_complex_double* work, lapack_int const* lwork, + double* rwork, + lapack_int* info ); + +#define LAPACK_cgeqr LAPACK_GLOBAL(cgeqr,CGEQR) +void LAPACK_cgeqr( + lapack_int const* m, lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* T, lapack_int const* tsize, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_dgeqr LAPACK_GLOBAL(dgeqr,DGEQR) +void LAPACK_dgeqr( + lapack_int const* m, lapack_int const* n, + double* A, lapack_int const* lda, + double* T, lapack_int const* tsize, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_sgeqr LAPACK_GLOBAL(sgeqr,SGEQR) +void LAPACK_sgeqr( + lapack_int const* m, lapack_int const* n, + float* A, lapack_int const* lda, + float* T, lapack_int const* tsize, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zgeqr LAPACK_GLOBAL(zgeqr,ZGEQR) +void LAPACK_zgeqr( + lapack_int const* m, lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* T, lapack_int const* tsize, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_cgeqr2 LAPACK_GLOBAL(cgeqr2,CGEQR2) +void LAPACK_cgeqr2( + lapack_int const* m, lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* tau, + lapack_complex_float* work, + lapack_int* info ); + +#define LAPACK_dgeqr2 LAPACK_GLOBAL(dgeqr2,DGEQR2) +void LAPACK_dgeqr2( + lapack_int const* m, lapack_int const* n, + double* A, lapack_int const* lda, + double* tau, + double* work, + lapack_int* info ); + +#define LAPACK_sgeqr2 LAPACK_GLOBAL(sgeqr2,SGEQR2) +void LAPACK_sgeqr2( + lapack_int const* m, lapack_int const* n, + float* A, lapack_int const* lda, + float* tau, + float* work, + lapack_int* info ); + +#define LAPACK_zgeqr2 LAPACK_GLOBAL(zgeqr2,ZGEQR2) +void LAPACK_zgeqr2( + lapack_int const* m, lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* tau, + lapack_complex_double* work, + lapack_int* info ); + +#define LAPACK_cgeqrf LAPACK_GLOBAL(cgeqrf,CGEQRF) +void LAPACK_cgeqrf( + lapack_int const* m, lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* tau, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_dgeqrf LAPACK_GLOBAL(dgeqrf,DGEQRF) +void LAPACK_dgeqrf( + lapack_int const* m, lapack_int const* n, + double* A, lapack_int const* lda, + double* tau, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_sgeqrf LAPACK_GLOBAL(sgeqrf,SGEQRF) +void LAPACK_sgeqrf( + lapack_int const* m, lapack_int const* n, + float* A, lapack_int const* lda, + float* tau, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zgeqrf LAPACK_GLOBAL(zgeqrf,ZGEQRF) +void LAPACK_zgeqrf( + lapack_int const* m, lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* tau, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_cgeqrfp LAPACK_GLOBAL(cgeqrfp,CGEQRFP) +void LAPACK_cgeqrfp( + lapack_int const* m, lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* tau, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_dgeqrfp LAPACK_GLOBAL(dgeqrfp,DGEQRFP) +void LAPACK_dgeqrfp( + lapack_int const* m, lapack_int const* n, + double* A, lapack_int const* lda, + double* tau, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_sgeqrfp LAPACK_GLOBAL(sgeqrfp,SGEQRFP) +void LAPACK_sgeqrfp( + lapack_int const* m, lapack_int const* n, + float* A, lapack_int const* lda, + float* tau, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zgeqrfp LAPACK_GLOBAL(zgeqrfp,ZGEQRFP) +void LAPACK_zgeqrfp( + lapack_int const* m, lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* tau, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_cgeqrt LAPACK_GLOBAL(cgeqrt,CGEQRT) +void LAPACK_cgeqrt( + lapack_int const* m, lapack_int const* n, lapack_int const* nb, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* T, lapack_int const* ldt, + lapack_complex_float* work, + lapack_int* info ); + +#define LAPACK_dgeqrt LAPACK_GLOBAL(dgeqrt,DGEQRT) +void LAPACK_dgeqrt( + lapack_int const* m, lapack_int const* n, lapack_int const* nb, + double* A, lapack_int const* lda, + double* T, lapack_int const* ldt, + double* work, + lapack_int* info ); + +#define LAPACK_sgeqrt LAPACK_GLOBAL(sgeqrt,SGEQRT) +void LAPACK_sgeqrt( + lapack_int const* m, lapack_int const* n, lapack_int const* nb, + float* A, lapack_int const* lda, + float* T, lapack_int const* ldt, + float* work, + lapack_int* info ); + +#define LAPACK_zgeqrt LAPACK_GLOBAL(zgeqrt,ZGEQRT) +void LAPACK_zgeqrt( + lapack_int const* m, lapack_int const* n, lapack_int const* nb, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* T, lapack_int const* ldt, + lapack_complex_double* work, + lapack_int* info ); + +#define LAPACK_cgeqrt2 LAPACK_GLOBAL(cgeqrt2,CGEQRT2) +void LAPACK_cgeqrt2( + lapack_int const* m, lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* T, lapack_int const* ldt, + lapack_int* info ); + +#define LAPACK_dgeqrt2 LAPACK_GLOBAL(dgeqrt2,DGEQRT2) +void LAPACK_dgeqrt2( + lapack_int const* m, lapack_int const* n, + double* A, lapack_int const* lda, + double* T, lapack_int const* ldt, + lapack_int* info ); + +#define LAPACK_sgeqrt2 LAPACK_GLOBAL(sgeqrt2,SGEQRT2) +void LAPACK_sgeqrt2( + lapack_int const* m, lapack_int const* n, + float* A, lapack_int const* lda, + float* T, lapack_int const* ldt, + lapack_int* info ); + +#define LAPACK_zgeqrt2 LAPACK_GLOBAL(zgeqrt2,ZGEQRT2) +void LAPACK_zgeqrt2( + lapack_int const* m, lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* T, lapack_int const* ldt, + lapack_int* info ); + +#define LAPACK_cgeqrt3 LAPACK_GLOBAL(cgeqrt3,CGEQRT3) +void LAPACK_cgeqrt3( + lapack_int const* m, lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* T, lapack_int const* ldt, + lapack_int* info ); + +#define LAPACK_dgeqrt3 LAPACK_GLOBAL(dgeqrt3,DGEQRT3) +void LAPACK_dgeqrt3( + lapack_int const* m, lapack_int const* n, + double* A, lapack_int const* lda, + double* T, lapack_int const* ldt, + lapack_int* info ); + +#define LAPACK_sgeqrt3 LAPACK_GLOBAL(sgeqrt3,SGEQRT3) +void LAPACK_sgeqrt3( + lapack_int const* m, lapack_int const* n, + float* A, lapack_int const* lda, + float* T, lapack_int const* ldt, + lapack_int* info ); + +#define LAPACK_zgeqrt3 LAPACK_GLOBAL(zgeqrt3,ZGEQRT3) +void LAPACK_zgeqrt3( + lapack_int const* m, lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* T, lapack_int const* ldt, + lapack_int* info ); + +#define LAPACK_cgerfs LAPACK_GLOBAL(cgerfs,CGERFS) +void LAPACK_cgerfs( + char const* trans, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float const* A, lapack_int const* lda, + lapack_complex_float const* AF, lapack_int const* ldaf, lapack_int const* ipiv, + lapack_complex_float const* B, lapack_int const* ldb, + lapack_complex_float* X, lapack_int const* ldx, + float* ferr, + float* berr, + lapack_complex_float* work, + float* rwork, + lapack_int* info ); + +#define LAPACK_dgerfs LAPACK_GLOBAL(dgerfs,DGERFS) +void LAPACK_dgerfs( + char const* trans, + lapack_int const* n, lapack_int const* nrhs, + double const* A, lapack_int const* lda, + double const* AF, lapack_int const* ldaf, lapack_int const* ipiv, + double const* B, lapack_int const* ldb, + double* X, lapack_int const* ldx, + double* ferr, + double* berr, + double* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_sgerfs LAPACK_GLOBAL(sgerfs,SGERFS) +void LAPACK_sgerfs( + char const* trans, + lapack_int const* n, lapack_int const* nrhs, + float const* A, lapack_int const* lda, + float const* AF, lapack_int const* ldaf, lapack_int const* ipiv, + float const* B, lapack_int const* ldb, + float* X, lapack_int const* ldx, + float* ferr, + float* berr, + float* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_zgerfs LAPACK_GLOBAL(zgerfs,ZGERFS) +void LAPACK_zgerfs( + char const* trans, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double const* A, lapack_int const* lda, + lapack_complex_double const* AF, lapack_int const* ldaf, lapack_int const* ipiv, + lapack_complex_double const* B, lapack_int const* ldb, + lapack_complex_double* X, lapack_int const* ldx, + double* ferr, + double* berr, + lapack_complex_double* work, + double* rwork, + lapack_int* info ); + +#define LAPACK_cgerfsx LAPACK_GLOBAL(cgerfsx,CGERFSX) +void LAPACK_cgerfsx( + char const* trans, char const* equed, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float const* A, lapack_int const* lda, + lapack_complex_float const* AF, lapack_int const* ldaf, lapack_int const* ipiv, + float const* R, + float const* C, + lapack_complex_float const* B, lapack_int const* ldb, + lapack_complex_float* X, lapack_int const* ldx, + float* rcond, + float* berr, lapack_int const* n_err_bnds, + float* err_bnds_norm, + float* err_bnds_comp, lapack_int const* nparams, + float* params, + lapack_complex_float* work, + float* rwork, + lapack_int* info ); + +#define LAPACK_dgerfsx LAPACK_GLOBAL(dgerfsx,DGERFSX) +void LAPACK_dgerfsx( + char const* trans, char const* equed, + lapack_int const* n, lapack_int const* nrhs, + double const* A, lapack_int const* lda, + double const* AF, lapack_int const* ldaf, lapack_int const* ipiv, + double const* R, + double const* C, + double const* B, lapack_int const* ldb, + double* X, lapack_int const* ldx, + double* rcond, + double* berr, lapack_int const* n_err_bnds, + double* err_bnds_norm, + double* err_bnds_comp, lapack_int const* nparams, + double* params, + double* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_sgerfsx LAPACK_GLOBAL(sgerfsx,SGERFSX) +void LAPACK_sgerfsx( + char const* trans, char const* equed, + lapack_int const* n, lapack_int const* nrhs, + float const* A, lapack_int const* lda, + float const* AF, lapack_int const* ldaf, lapack_int const* ipiv, + float const* R, + float const* C, + float const* B, lapack_int const* ldb, + float* X, lapack_int const* ldx, + float* rcond, + float* berr, lapack_int const* n_err_bnds, + float* err_bnds_norm, + float* err_bnds_comp, lapack_int const* nparams, + float* params, + float* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_zgerfsx LAPACK_GLOBAL(zgerfsx,ZGERFSX) +void LAPACK_zgerfsx( + char const* trans, char const* equed, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double const* A, lapack_int const* lda, + lapack_complex_double const* AF, lapack_int const* ldaf, lapack_int const* ipiv, + double const* R, + double const* C, + lapack_complex_double const* B, lapack_int const* ldb, + lapack_complex_double* X, lapack_int const* ldx, + double* rcond, + double* berr, lapack_int const* n_err_bnds, + double* err_bnds_norm, + double* err_bnds_comp, lapack_int const* nparams, + double* params, + lapack_complex_double* work, + double* rwork, + lapack_int* info ); + +#define LAPACK_cgerq2 LAPACK_GLOBAL(cgerq2,CGERQ2) +void LAPACK_cgerq2( + lapack_int const* m, lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* tau, + lapack_complex_float* work, + lapack_int* info ); + +#define LAPACK_dgerq2 LAPACK_GLOBAL(dgerq2,DGERQ2) +void LAPACK_dgerq2( + lapack_int const* m, lapack_int const* n, + double* A, lapack_int const* lda, + double* tau, + double* work, + lapack_int* info ); + +#define LAPACK_sgerq2 LAPACK_GLOBAL(sgerq2,SGERQ2) +void LAPACK_sgerq2( + lapack_int const* m, lapack_int const* n, + float* A, lapack_int const* lda, + float* tau, + float* work, + lapack_int* info ); + +#define LAPACK_zgerq2 LAPACK_GLOBAL(zgerq2,ZGERQ2) +void LAPACK_zgerq2( + lapack_int const* m, lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* tau, + lapack_complex_double* work, + lapack_int* info ); + +#define LAPACK_cgerqf LAPACK_GLOBAL(cgerqf,CGERQF) +void LAPACK_cgerqf( + lapack_int const* m, lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* tau, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_dgerqf LAPACK_GLOBAL(dgerqf,DGERQF) +void LAPACK_dgerqf( + lapack_int const* m, lapack_int const* n, + double* A, lapack_int const* lda, + double* tau, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_sgerqf LAPACK_GLOBAL(sgerqf,SGERQF) +void LAPACK_sgerqf( + lapack_int const* m, lapack_int const* n, + float* A, lapack_int const* lda, + float* tau, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zgerqf LAPACK_GLOBAL(zgerqf,ZGERQF) +void LAPACK_zgerqf( + lapack_int const* m, lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* tau, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_cgesdd LAPACK_GLOBAL(cgesdd,CGESDD) +void LAPACK_cgesdd( + char const* jobz, + lapack_int const* m, lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, + float* S, + lapack_complex_float* U, lapack_int const* ldu, + lapack_complex_float* VT, lapack_int const* ldvt, + lapack_complex_float* work, lapack_int const* lwork, + float* rwork, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_dgesdd LAPACK_GLOBAL(dgesdd,DGESDD) +void LAPACK_dgesdd( + char const* jobz, + lapack_int const* m, lapack_int const* n, + double* A, lapack_int const* lda, + double* S, + double* U, lapack_int const* ldu, + double* VT, lapack_int const* ldvt, + double* work, lapack_int const* lwork, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_sgesdd LAPACK_GLOBAL(sgesdd,SGESDD) +void LAPACK_sgesdd( + char const* jobz, + lapack_int const* m, lapack_int const* n, + float* A, lapack_int const* lda, + float* S, + float* U, lapack_int const* ldu, + float* VT, lapack_int const* ldvt, + float* work, lapack_int const* lwork, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_zgesdd LAPACK_GLOBAL(zgesdd,ZGESDD) +void LAPACK_zgesdd( + char const* jobz, + lapack_int const* m, lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, + double* S, + lapack_complex_double* U, lapack_int const* ldu, + lapack_complex_double* VT, lapack_int const* ldvt, + lapack_complex_double* work, lapack_int const* lwork, + double* rwork, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_cgesv LAPACK_GLOBAL(cgesv,CGESV) +void LAPACK_cgesv( + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float* A, lapack_int const* lda, lapack_int* ipiv, + lapack_complex_float* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_dgesv LAPACK_GLOBAL(dgesv,DGESV) +void LAPACK_dgesv( + lapack_int const* n, lapack_int const* nrhs, + double* A, lapack_int const* lda, lapack_int* ipiv, + double* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_sgesv LAPACK_GLOBAL(sgesv,SGESV) +void LAPACK_sgesv( + lapack_int const* n, lapack_int const* nrhs, + float* A, lapack_int const* lda, lapack_int* ipiv, + float* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_zgesv LAPACK_GLOBAL(zgesv,ZGESV) +void LAPACK_zgesv( + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double* A, lapack_int const* lda, lapack_int* ipiv, + lapack_complex_double* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_dsgesv LAPACK_GLOBAL(dsgesv,DSGESV) +void LAPACK_dsgesv( + lapack_int const* n, lapack_int const* nrhs, + double* A, lapack_int const* lda, lapack_int* ipiv, + double const* B, lapack_int const* ldb, + double* X, lapack_int const* ldx, + double* work, + float* swork, lapack_int* iter, + lapack_int* info ); + +#define LAPACK_zcgesv LAPACK_GLOBAL(zcgesv,ZCGESV) +void LAPACK_zcgesv( + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double* A, lapack_int const* lda, lapack_int* ipiv, + lapack_complex_double const* B, lapack_int const* ldb, + lapack_complex_double* X, lapack_int const* ldx, + lapack_complex_double* work, + lapack_complex_float* swork, + double* rwork, lapack_int* iter, + lapack_int* info ); + +#define LAPACK_cgesvd LAPACK_GLOBAL(cgesvd,CGESVD) +void LAPACK_cgesvd( + char const* jobu, char const* jobvt, + lapack_int const* m, lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, + float* S, + lapack_complex_float* U, lapack_int const* ldu, + lapack_complex_float* VT, lapack_int const* ldvt, + lapack_complex_float* work, lapack_int const* lwork, + float* rwork, + lapack_int* info ); + +#define LAPACK_dgesvd LAPACK_GLOBAL(dgesvd,DGESVD) +void LAPACK_dgesvd( + char const* jobu, char const* jobvt, + lapack_int const* m, lapack_int const* n, + double* A, lapack_int const* lda, + double* S, + double* U, lapack_int const* ldu, + double* VT, lapack_int const* ldvt, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_sgesvd LAPACK_GLOBAL(sgesvd,SGESVD) +void LAPACK_sgesvd( + char const* jobu, char const* jobvt, + lapack_int const* m, lapack_int const* n, + float* A, lapack_int const* lda, + float* S, + float* U, lapack_int const* ldu, + float* VT, lapack_int const* ldvt, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zgesvd LAPACK_GLOBAL(zgesvd,ZGESVD) +void LAPACK_zgesvd( + char const* jobu, char const* jobvt, + lapack_int const* m, lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, + double* S, + lapack_complex_double* U, lapack_int const* ldu, + lapack_complex_double* VT, lapack_int const* ldvt, + lapack_complex_double* work, lapack_int const* lwork, + double* rwork, + lapack_int* info ); + +#define LAPACK_cgesvdq LAPACK_GLOBAL(cgesvdq,CGESVDQ) +void LAPACK_cgesvdq( + char const* joba, char const* jobp, char const* jobr, char const* jobu, char const* jobv, + lapack_int const* m, lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, + float* S, + lapack_complex_float* U, lapack_int const* ldu, + lapack_complex_float* V, lapack_int const* ldv, lapack_int* numrank, + lapack_int* iwork, lapack_int const* liwork, + lapack_complex_float* cwork, lapack_int* lcwork, + float* rwork, lapack_int const* lrwork, + lapack_int* info ); + +#define LAPACK_dgesvdq LAPACK_GLOBAL(dgesvdq,DGESVDQ) +void LAPACK_dgesvdq( + char const* joba, char const* jobp, char const* jobr, char const* jobu, char const* jobv, + lapack_int const* m, lapack_int const* n, + double* A, lapack_int const* lda, + double* S, + double* U, lapack_int const* ldu, + double* V, lapack_int const* ldv, lapack_int* numrank, + lapack_int* iwork, lapack_int const* liwork, + double* work, lapack_int* lwork, + double* rwork, lapack_int const* lrwork, + lapack_int* info ); + +#define LAPACK_sgesvdq LAPACK_GLOBAL(sgesvdq,SGESVDQ) +void LAPACK_sgesvdq( + char const* joba, char const* jobp, char const* jobr, char const* jobu, char const* jobv, + lapack_int const* m, lapack_int const* n, + float* A, lapack_int const* lda, + float* S, + float* U, lapack_int const* ldu, + float* V, lapack_int const* ldv, lapack_int* numrank, + lapack_int* iwork, lapack_int const* liwork, + float* work, lapack_int* lwork, + float* rwork, lapack_int const* lrwork, + lapack_int* info ); + +#define LAPACK_zgesvdq LAPACK_GLOBAL(zgesvdq,ZGESVDQ) +void LAPACK_zgesvdq( + char const* joba, char const* jobp, char const* jobr, char const* jobu, char const* jobv, + lapack_int const* m, lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, + double* S, + lapack_complex_double* U, lapack_int const* ldu, + lapack_complex_double* V, lapack_int const* ldv, lapack_int* numrank, + lapack_int* iwork, lapack_int const* liwork, + lapack_complex_double* cwork, lapack_int* lcwork, + double* rwork, lapack_int const* lrwork, + lapack_int* info ); + +#define LAPACK_cgesvdx LAPACK_GLOBAL(cgesvdx,CGESVDX) +void LAPACK_cgesvdx( + char const* jobu, char const* jobvt, char const* range, + lapack_int const* m, lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, + float const* vl, + float const* vu, lapack_int const* il, lapack_int const* iu, lapack_int* ns, + float* S, + lapack_complex_float* U, lapack_int const* ldu, + lapack_complex_float* VT, lapack_int const* ldvt, + lapack_complex_float* work, lapack_int const* lwork, + float* rwork, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_dgesvdx LAPACK_GLOBAL(dgesvdx,DGESVDX) +void LAPACK_dgesvdx( + char const* jobu, char const* jobvt, char const* range, + lapack_int const* m, lapack_int const* n, + double* A, lapack_int const* lda, + double const* vl, + double const* vu, lapack_int const* il, lapack_int const* iu, lapack_int* ns, + double* S, + double* U, lapack_int const* ldu, + double* VT, lapack_int const* ldvt, + double* work, lapack_int const* lwork, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_sgesvdx LAPACK_GLOBAL(sgesvdx,SGESVDX) +void LAPACK_sgesvdx( + char const* jobu, char const* jobvt, char const* range, + lapack_int const* m, lapack_int const* n, + float* A, lapack_int const* lda, + float const* vl, + float const* vu, lapack_int const* il, lapack_int const* iu, lapack_int* ns, + float* S, + float* U, lapack_int const* ldu, + float* VT, lapack_int const* ldvt, + float* work, lapack_int const* lwork, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_zgesvdx LAPACK_GLOBAL(zgesvdx,ZGESVDX) +void LAPACK_zgesvdx( + char const* jobu, char const* jobvt, char const* range, + lapack_int const* m, lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, + double const* vl, + double const* vu, lapack_int const* il, lapack_int const* iu, lapack_int* ns, + double* S, + lapack_complex_double* U, lapack_int const* ldu, + lapack_complex_double* VT, lapack_int const* ldvt, + lapack_complex_double* work, lapack_int const* lwork, + double* rwork, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_cgesvj LAPACK_GLOBAL(cgesvj,CGESVJ) +void LAPACK_cgesvj( + char const* joba, char const* jobu, char const* jobv, + lapack_int const* m, lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, + float* SVA, lapack_int const* mv, + lapack_complex_float* V, lapack_int const* ldv, + lapack_complex_float* cwork, lapack_int const* lwork, + float* rwork, lapack_int const* lrwork, + lapack_int* info ); + +#define LAPACK_dgesvj LAPACK_GLOBAL(dgesvj,DGESVJ) +void LAPACK_dgesvj( + char const* joba, char const* jobu, char const* jobv, + lapack_int const* m, lapack_int const* n, + double* A, lapack_int const* lda, + double* SVA, lapack_int const* mv, + double* V, lapack_int const* ldv, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_sgesvj LAPACK_GLOBAL(sgesvj,SGESVJ) +void LAPACK_sgesvj( + char const* joba, char const* jobu, char const* jobv, + lapack_int const* m, lapack_int const* n, + float* A, lapack_int const* lda, + float* SVA, lapack_int const* mv, + float* V, lapack_int const* ldv, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zgesvj LAPACK_GLOBAL(zgesvj,ZGESVJ) +void LAPACK_zgesvj( + char const* joba, char const* jobu, char const* jobv, + lapack_int const* m, lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, + double* SVA, lapack_int const* mv, + lapack_complex_double* V, lapack_int const* ldv, + lapack_complex_double* cwork, lapack_int const* lwork, + double* rwork, lapack_int const* lrwork, + lapack_int* info ); + +#define LAPACK_cgesvx LAPACK_GLOBAL(cgesvx,CGESVX) +void LAPACK_cgesvx( + char const* fact, char const* trans, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* AF, lapack_int const* ldaf, lapack_int* ipiv, char* equed, + float* R, + float* C, + lapack_complex_float* B, + lapack_int const* ldb, + lapack_complex_float* X, lapack_int const* ldx, + float* rcond, + float* ferr, + float* berr, + lapack_complex_float* work, + float* rwork, + lapack_int* info ); + +#define LAPACK_dgesvx LAPACK_GLOBAL(dgesvx,DGESVX) +void LAPACK_dgesvx( + char const* fact, char const* trans, + lapack_int const* n, lapack_int const* nrhs, + double* A, lapack_int const* lda, + double* AF, lapack_int const* ldaf, lapack_int* ipiv, char* equed, + double* R, + double* C, + double* B, + lapack_int const* ldb, + double* X, lapack_int const* ldx, + double* rcond, + double* ferr, + double* berr, + double* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_sgesvx LAPACK_GLOBAL(sgesvx,SGESVX) +void LAPACK_sgesvx( + char const* fact, char const* trans, + lapack_int const* n, lapack_int const* nrhs, + float* A, lapack_int const* lda, + float* AF, lapack_int const* ldaf, lapack_int* ipiv, char* equed, + float* R, + float* C, + float* B, + lapack_int const* ldb, + float* X, lapack_int const* ldx, + float* rcond, + float* ferr, + float* berr, + float* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_zgesvx LAPACK_GLOBAL(zgesvx,ZGESVX) +void LAPACK_zgesvx( + char const* fact, char const* trans, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* AF, lapack_int const* ldaf, lapack_int* ipiv, char* equed, + double* R, + double* C, + lapack_complex_double* B, + lapack_int const* ldb, + lapack_complex_double* X, lapack_int const* ldx, + double* rcond, + double* ferr, + double* berr, + lapack_complex_double* work, + double* rwork, + lapack_int* info ); + +#define LAPACK_cgesvxx LAPACK_GLOBAL(cgesvxx,CGESVXX) +void LAPACK_cgesvxx( + char const* fact, char const* trans, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* AF, lapack_int const* ldaf, lapack_int* ipiv, char* equed, + float* R, + float* C, + lapack_complex_float* B, + lapack_int const* ldb, + lapack_complex_float* X, lapack_int const* ldx, + float* rcond, + float* rpvgrw, + float* berr, lapack_int const* n_err_bnds, + float* err_bnds_norm, + float* err_bnds_comp, lapack_int const* nparams, + float* params, + lapack_complex_float* work, + float* rwork, + lapack_int* info ); + +#define LAPACK_dgesvxx LAPACK_GLOBAL(dgesvxx,DGESVXX) +void LAPACK_dgesvxx( + char const* fact, char const* trans, + lapack_int const* n, lapack_int const* nrhs, + double* A, lapack_int const* lda, + double* AF, lapack_int const* ldaf, lapack_int* ipiv, char* equed, + double* R, + double* C, + double* B, + lapack_int const* ldb, + double* X, lapack_int const* ldx, + double* rcond, + double* rpvgrw, + double* berr, lapack_int const* n_err_bnds, + double* err_bnds_norm, + double* err_bnds_comp, lapack_int const* nparams, + double* params, + double* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_sgesvxx LAPACK_GLOBAL(sgesvxx,SGESVXX) +void LAPACK_sgesvxx( + char const* fact, char const* trans, + lapack_int const* n, lapack_int const* nrhs, + float* A, lapack_int const* lda, + float* AF, lapack_int const* ldaf, lapack_int* ipiv, char* equed, + float* R, + float* C, + float* B, + lapack_int const* ldb, + float* X, lapack_int const* ldx, + float* rcond, + float* rpvgrw, + float* berr, lapack_int const* n_err_bnds, + float* err_bnds_norm, + float* err_bnds_comp, lapack_int const* nparams, + float* params, + float* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_zgesvxx LAPACK_GLOBAL(zgesvxx,ZGESVXX) +void LAPACK_zgesvxx( + char const* fact, char const* trans, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* AF, lapack_int const* ldaf, lapack_int* ipiv, char* equed, + double* R, + double* C, + lapack_complex_double* B, + lapack_int const* ldb, + lapack_complex_double* X, lapack_int const* ldx, + double* rcond, + double* rpvgrw, + double* berr, lapack_int const* n_err_bnds, + double* err_bnds_norm, + double* err_bnds_comp, lapack_int const* nparams, + double* params, + lapack_complex_double* work, + double* rwork, + lapack_int* info ); + +#define LAPACK_cgetf2 LAPACK_GLOBAL(cgetf2,CGETF2) +void LAPACK_cgetf2( + lapack_int const* m, lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, lapack_int* ipiv, + lapack_int* info ); + +#define LAPACK_dgetf2 LAPACK_GLOBAL(dgetf2,DGETF2) +void LAPACK_dgetf2( + lapack_int const* m, lapack_int const* n, + double* A, lapack_int const* lda, lapack_int* ipiv, + lapack_int* info ); + +#define LAPACK_sgetf2 LAPACK_GLOBAL(sgetf2,SGETF2) +void LAPACK_sgetf2( + lapack_int const* m, lapack_int const* n, + float* A, lapack_int const* lda, lapack_int* ipiv, + lapack_int* info ); + +#define LAPACK_zgetf2 LAPACK_GLOBAL(zgetf2,ZGETF2) +void LAPACK_zgetf2( + lapack_int const* m, lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, lapack_int* ipiv, + lapack_int* info ); + +#define LAPACK_cgetrf LAPACK_GLOBAL(cgetrf,CGETRF) +void LAPACK_cgetrf( + lapack_int const* m, lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, lapack_int* ipiv, + lapack_int* info ); + +#define LAPACK_dgetrf LAPACK_GLOBAL(dgetrf,DGETRF) +void LAPACK_dgetrf( + lapack_int const* m, lapack_int const* n, + double* A, lapack_int const* lda, lapack_int* ipiv, + lapack_int* info ); + +#define LAPACK_sgetrf LAPACK_GLOBAL(sgetrf,SGETRF) +void LAPACK_sgetrf( + lapack_int const* m, lapack_int const* n, + float* A, lapack_int const* lda, lapack_int* ipiv, + lapack_int* info ); + +#define LAPACK_zgetrf LAPACK_GLOBAL(zgetrf,ZGETRF) +void LAPACK_zgetrf( + lapack_int const* m, lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, lapack_int* ipiv, + lapack_int* info ); + +#define LAPACK_cgetrf2 LAPACK_GLOBAL(cgetrf2,CGETRF2) +void LAPACK_cgetrf2( + lapack_int const* m, lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, lapack_int* ipiv, + lapack_int* info ); + +#define LAPACK_dgetrf2 LAPACK_GLOBAL(dgetrf2,DGETRF2) +void LAPACK_dgetrf2( + lapack_int const* m, lapack_int const* n, + double* A, lapack_int const* lda, lapack_int* ipiv, + lapack_int* info ); + +#define LAPACK_sgetrf2 LAPACK_GLOBAL(sgetrf2,SGETRF2) +void LAPACK_sgetrf2( + lapack_int const* m, lapack_int const* n, + float* A, lapack_int const* lda, lapack_int* ipiv, + lapack_int* info ); + +#define LAPACK_zgetrf2 LAPACK_GLOBAL(zgetrf2,ZGETRF2) +void LAPACK_zgetrf2( + lapack_int const* m, lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, lapack_int* ipiv, + lapack_int* info ); + +#define LAPACK_cgetri LAPACK_GLOBAL(cgetri,CGETRI) +void LAPACK_cgetri( + lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, lapack_int const* ipiv, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_dgetri LAPACK_GLOBAL(dgetri,DGETRI) +void LAPACK_dgetri( + lapack_int const* n, + double* A, lapack_int const* lda, lapack_int const* ipiv, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_sgetri LAPACK_GLOBAL(sgetri,SGETRI) +void LAPACK_sgetri( + lapack_int const* n, + float* A, lapack_int const* lda, lapack_int const* ipiv, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zgetri LAPACK_GLOBAL(zgetri,ZGETRI) +void LAPACK_zgetri( + lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, lapack_int const* ipiv, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_cgetrs LAPACK_GLOBAL(cgetrs,CGETRS) +void LAPACK_cgetrs( + char const* trans, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float const* A, lapack_int const* lda, lapack_int const* ipiv, + lapack_complex_float* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_dgetrs LAPACK_GLOBAL(dgetrs,DGETRS) +void LAPACK_dgetrs( + char const* trans, + lapack_int const* n, lapack_int const* nrhs, + double const* A, lapack_int const* lda, lapack_int const* ipiv, + double* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_sgetrs LAPACK_GLOBAL(sgetrs,SGETRS) +void LAPACK_sgetrs( + char const* trans, + lapack_int const* n, lapack_int const* nrhs, + float const* A, lapack_int const* lda, lapack_int const* ipiv, + float* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_zgetrs LAPACK_GLOBAL(zgetrs,ZGETRS) +void LAPACK_zgetrs( + char const* trans, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double const* A, lapack_int const* lda, lapack_int const* ipiv, + lapack_complex_double* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_cgetsls LAPACK_GLOBAL(cgetsls,CGETSLS) +void LAPACK_cgetsls( + char const* trans, + lapack_int const* m, lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* B, lapack_int const* ldb, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_dgetsls LAPACK_GLOBAL(dgetsls,DGETSLS) +void LAPACK_dgetsls( + char const* trans, + lapack_int const* m, lapack_int const* n, lapack_int const* nrhs, + double* A, lapack_int const* lda, + double* B, lapack_int const* ldb, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_sgetsls LAPACK_GLOBAL(sgetsls,SGETSLS) +void LAPACK_sgetsls( + char const* trans, + lapack_int const* m, lapack_int const* n, lapack_int const* nrhs, + float* A, lapack_int const* lda, + float* B, lapack_int const* ldb, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zgetsls LAPACK_GLOBAL(zgetsls,ZGETSLS) +void LAPACK_zgetsls( + char const* trans, + lapack_int const* m, lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* B, lapack_int const* ldb, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_cggbak LAPACK_GLOBAL(cggbak,CGGBAK) +void LAPACK_cggbak( + char const* job, char const* side, + lapack_int const* n, lapack_int const* ilo, lapack_int const* ihi, + float const* lscale, + float const* rscale, lapack_int const* m, + lapack_complex_float* V, lapack_int const* ldv, + lapack_int* info ); + +#define LAPACK_dggbak LAPACK_GLOBAL(dggbak,DGGBAK) +void LAPACK_dggbak( + char const* job, char const* side, + lapack_int const* n, lapack_int const* ilo, lapack_int const* ihi, + double const* lscale, + double const* rscale, lapack_int const* m, + double* V, lapack_int const* ldv, + lapack_int* info ); + +#define LAPACK_sggbak LAPACK_GLOBAL(sggbak,SGGBAK) +void LAPACK_sggbak( + char const* job, char const* side, + lapack_int const* n, lapack_int const* ilo, lapack_int const* ihi, + float const* lscale, + float const* rscale, lapack_int const* m, + float* V, lapack_int const* ldv, + lapack_int* info ); + +#define LAPACK_zggbak LAPACK_GLOBAL(zggbak,ZGGBAK) +void LAPACK_zggbak( + char const* job, char const* side, + lapack_int const* n, lapack_int const* ilo, lapack_int const* ihi, + double const* lscale, + double const* rscale, lapack_int const* m, + lapack_complex_double* V, lapack_int const* ldv, + lapack_int* info ); + +#define LAPACK_cggbal LAPACK_GLOBAL(cggbal,CGGBAL) +void LAPACK_cggbal( + char const* job, + lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* B, lapack_int const* ldb, lapack_int* ilo, lapack_int* ihi, + float* lscale, + float* rscale, + float* work, + lapack_int* info ); + +#define LAPACK_dggbal LAPACK_GLOBAL(dggbal,DGGBAL) +void LAPACK_dggbal( + char const* job, + lapack_int const* n, + double* A, lapack_int const* lda, + double* B, lapack_int const* ldb, lapack_int* ilo, lapack_int* ihi, + double* lscale, + double* rscale, + double* work, + lapack_int* info ); + +#define LAPACK_sggbal LAPACK_GLOBAL(sggbal,SGGBAL) +void LAPACK_sggbal( + char const* job, + lapack_int const* n, + float* A, lapack_int const* lda, + float* B, lapack_int const* ldb, lapack_int* ilo, lapack_int* ihi, + float* lscale, + float* rscale, + float* work, + lapack_int* info ); + +#define LAPACK_zggbal LAPACK_GLOBAL(zggbal,ZGGBAL) +void LAPACK_zggbal( + char const* job, + lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* B, lapack_int const* ldb, lapack_int* ilo, lapack_int* ihi, + double* lscale, + double* rscale, + double* work, + lapack_int* info ); + +#define LAPACK_cgges LAPACK_GLOBAL(cgges,CGGES) +void LAPACK_cgges( + char const* jobvsl, char const* jobvsr, char const* sort, LAPACK_C_SELECT2 selctg, + lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* B, lapack_int const* ldb, lapack_int* sdim, + lapack_complex_float* alpha, + lapack_complex_float* beta, + lapack_complex_float* VSL, lapack_int const* ldvsl, + lapack_complex_float* VSR, lapack_int const* ldvsr, + lapack_complex_float* work, lapack_int const* lwork, + float* rwork, lapack_logical* BWORK, + lapack_int* info ); + +#define LAPACK_dgges LAPACK_GLOBAL(dgges,DGGES) +void LAPACK_dgges( + char const* jobvsl, char const* jobvsr, char const* sort, LAPACK_D_SELECT3 selctg, + lapack_int const* n, + double* A, lapack_int const* lda, + double* B, lapack_int const* ldb, lapack_int* sdim, + double* alphar, + double* alphai, + double* beta, + double* VSL, lapack_int const* ldvsl, + double* VSR, lapack_int const* ldvsr, + double* work, lapack_int const* lwork, lapack_logical* BWORK, + lapack_int* info ); + +#define LAPACK_sgges LAPACK_GLOBAL(sgges,SGGES) +void LAPACK_sgges( + char const* jobvsl, char const* jobvsr, char const* sort, LAPACK_S_SELECT3 selctg, + lapack_int const* n, + float* A, lapack_int const* lda, + float* B, lapack_int const* ldb, lapack_int* sdim, + float* alphar, + float* alphai, + float* beta, + float* VSL, lapack_int const* ldvsl, + float* VSR, lapack_int const* ldvsr, + float* work, lapack_int const* lwork, lapack_logical* BWORK, + lapack_int* info ); + +#define LAPACK_zgges LAPACK_GLOBAL(zgges,ZGGES) +void LAPACK_zgges( + char const* jobvsl, char const* jobvsr, char const* sort, LAPACK_Z_SELECT2 selctg, + lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* B, lapack_int const* ldb, lapack_int* sdim, + lapack_complex_double* alpha, + lapack_complex_double* beta, + lapack_complex_double* VSL, lapack_int const* ldvsl, + lapack_complex_double* VSR, lapack_int const* ldvsr, + lapack_complex_double* work, lapack_int const* lwork, + double* rwork, lapack_logical* BWORK, + lapack_int* info ); + +#define LAPACK_cgges3 LAPACK_GLOBAL(cgges3,CGGES3) +void LAPACK_cgges3( + char const* jobvsl, char const* jobvsr, char const* sort, LAPACK_C_SELECT2 selctg, + lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* B, lapack_int const* ldb, lapack_int* sdim, + lapack_complex_float* alpha, + lapack_complex_float* beta, + lapack_complex_float* VSL, lapack_int const* ldvsl, + lapack_complex_float* VSR, lapack_int const* ldvsr, + lapack_complex_float* work, lapack_int const* lwork, + float* rwork, lapack_logical* BWORK, + lapack_int* info ); + +#define LAPACK_dgges3 LAPACK_GLOBAL(dgges3,DGGES3) +void LAPACK_dgges3( + char const* jobvsl, char const* jobvsr, char const* sort, LAPACK_D_SELECT3 selctg, + lapack_int const* n, + double* A, lapack_int const* lda, + double* B, lapack_int const* ldb, lapack_int* sdim, + double* alphar, + double* alphai, + double* beta, + double* VSL, lapack_int const* ldvsl, + double* VSR, lapack_int const* ldvsr, + double* work, lapack_int const* lwork, lapack_logical* BWORK, + lapack_int* info ); + +#define LAPACK_sgges3 LAPACK_GLOBAL(sgges3,SGGES3) +void LAPACK_sgges3( + char const* jobvsl, char const* jobvsr, char const* sort, LAPACK_S_SELECT3 selctg, + lapack_int const* n, + float* A, lapack_int const* lda, + float* B, lapack_int const* ldb, lapack_int* sdim, + float* alphar, + float* alphai, + float* beta, + float* VSL, lapack_int const* ldvsl, + float* VSR, lapack_int const* ldvsr, + float* work, lapack_int const* lwork, lapack_logical* BWORK, + lapack_int* info ); + +#define LAPACK_zgges3 LAPACK_GLOBAL(zgges3,ZGGES3) +void LAPACK_zgges3( + char const* jobvsl, char const* jobvsr, char const* sort, LAPACK_Z_SELECT2 selctg, + lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* B, lapack_int const* ldb, lapack_int* sdim, + lapack_complex_double* alpha, + lapack_complex_double* beta, + lapack_complex_double* VSL, lapack_int const* ldvsl, + lapack_complex_double* VSR, lapack_int const* ldvsr, + lapack_complex_double* work, lapack_int const* lwork, + double* rwork, lapack_logical* BWORK, + lapack_int* info ); + +#define LAPACK_cggesx LAPACK_GLOBAL(cggesx,CGGESX) +void LAPACK_cggesx( + char const* jobvsl, char const* jobvsr, char const* sort, LAPACK_C_SELECT2 selctg, char const* sense, + lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* B, lapack_int const* ldb, lapack_int* sdim, + lapack_complex_float* alpha, + lapack_complex_float* beta, + lapack_complex_float* VSL, lapack_int const* ldvsl, + lapack_complex_float* VSR, lapack_int const* ldvsr, + float* rconde, + float* rcondv, + lapack_complex_float* work, lapack_int const* lwork, + float* rwork, + lapack_int* iwork, lapack_int const* liwork, lapack_logical* BWORK, + lapack_int* info ); + +#define LAPACK_dggesx LAPACK_GLOBAL(dggesx,DGGESX) +void LAPACK_dggesx( + char const* jobvsl, char const* jobvsr, char const* sort, LAPACK_D_SELECT3 selctg, char const* sense, + lapack_int const* n, + double* A, lapack_int const* lda, + double* B, lapack_int const* ldb, lapack_int* sdim, + double* alphar, + double* alphai, + double* beta, + double* VSL, lapack_int const* ldvsl, + double* VSR, lapack_int const* ldvsr, + double* rconde, + double* rcondv, + double* work, lapack_int const* lwork, + lapack_int* iwork, lapack_int const* liwork, lapack_logical* BWORK, + lapack_int* info ); + +#define LAPACK_sggesx LAPACK_GLOBAL(sggesx,SGGESX) +void LAPACK_sggesx( + char const* jobvsl, char const* jobvsr, char const* sort, LAPACK_S_SELECT3 selctg, char const* sense, + lapack_int const* n, + float* A, lapack_int const* lda, + float* B, lapack_int const* ldb, lapack_int* sdim, + float* alphar, + float* alphai, + float* beta, + float* VSL, lapack_int const* ldvsl, + float* VSR, lapack_int const* ldvsr, + float* rconde, + float* rcondv, + float* work, lapack_int const* lwork, + lapack_int* iwork, lapack_int const* liwork, lapack_logical* BWORK, + lapack_int* info ); + +#define LAPACK_zggesx LAPACK_GLOBAL(zggesx,ZGGESX) +void LAPACK_zggesx( + char const* jobvsl, char const* jobvsr, char const* sort, LAPACK_Z_SELECT2 selctg, char const* sense, + lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* B, lapack_int const* ldb, lapack_int* sdim, + lapack_complex_double* alpha, + lapack_complex_double* beta, + lapack_complex_double* VSL, lapack_int const* ldvsl, + lapack_complex_double* VSR, lapack_int const* ldvsr, + double* rconde, + double* rcondv, + lapack_complex_double* work, lapack_int const* lwork, + double* rwork, + lapack_int* iwork, lapack_int const* liwork, lapack_logical* BWORK, + lapack_int* info ); + +#define LAPACK_cggev LAPACK_GLOBAL(cggev,CGGEV) +void LAPACK_cggev( + char const* jobvl, char const* jobvr, + lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* B, lapack_int const* ldb, + lapack_complex_float* alpha, + lapack_complex_float* beta, + lapack_complex_float* VL, lapack_int const* ldvl, + lapack_complex_float* VR, lapack_int const* ldvr, + lapack_complex_float* work, lapack_int const* lwork, + float* rwork, + lapack_int* info ); + +#define LAPACK_dggev LAPACK_GLOBAL(dggev,DGGEV) +void LAPACK_dggev( + char const* jobvl, char const* jobvr, + lapack_int const* n, + double* A, lapack_int const* lda, + double* B, lapack_int const* ldb, + double* alphar, + double* alphai, + double* beta, + double* VL, lapack_int const* ldvl, + double* VR, lapack_int const* ldvr, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_sggev LAPACK_GLOBAL(sggev,SGGEV) +void LAPACK_sggev( + char const* jobvl, char const* jobvr, + lapack_int const* n, + float* A, lapack_int const* lda, + float* B, lapack_int const* ldb, + float* alphar, + float* alphai, + float* beta, + float* VL, lapack_int const* ldvl, + float* VR, lapack_int const* ldvr, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zggev LAPACK_GLOBAL(zggev,ZGGEV) +void LAPACK_zggev( + char const* jobvl, char const* jobvr, + lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* B, lapack_int const* ldb, + lapack_complex_double* alpha, + lapack_complex_double* beta, + lapack_complex_double* VL, lapack_int const* ldvl, + lapack_complex_double* VR, lapack_int const* ldvr, + lapack_complex_double* work, lapack_int const* lwork, + double* rwork, + lapack_int* info ); + +#define LAPACK_cggev3 LAPACK_GLOBAL(cggev3,CGGEV3) +void LAPACK_cggev3( + char const* jobvl, char const* jobvr, + lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* B, lapack_int const* ldb, + lapack_complex_float* alpha, + lapack_complex_float* beta, + lapack_complex_float* VL, lapack_int const* ldvl, + lapack_complex_float* VR, lapack_int const* ldvr, + lapack_complex_float* work, lapack_int const* lwork, + float* rwork, + lapack_int* info ); + +#define LAPACK_dggev3 LAPACK_GLOBAL(dggev3,DGGEV3) +void LAPACK_dggev3( + char const* jobvl, char const* jobvr, + lapack_int const* n, + double* A, lapack_int const* lda, + double* B, lapack_int const* ldb, + double* alphar, + double* alphai, + double* beta, + double* VL, lapack_int const* ldvl, + double* VR, lapack_int const* ldvr, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_sggev3 LAPACK_GLOBAL(sggev3,SGGEV3) +void LAPACK_sggev3( + char const* jobvl, char const* jobvr, + lapack_int const* n, + float* A, lapack_int const* lda, + float* B, lapack_int const* ldb, + float* alphar, + float* alphai, + float* beta, + float* VL, lapack_int const* ldvl, + float* VR, lapack_int const* ldvr, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zggev3 LAPACK_GLOBAL(zggev3,ZGGEV3) +void LAPACK_zggev3( + char const* jobvl, char const* jobvr, + lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* B, lapack_int const* ldb, + lapack_complex_double* alpha, + lapack_complex_double* beta, + lapack_complex_double* VL, lapack_int const* ldvl, + lapack_complex_double* VR, lapack_int const* ldvr, + lapack_complex_double* work, lapack_int const* lwork, + double* rwork, + lapack_int* info ); + +#define LAPACK_cggevx LAPACK_GLOBAL(cggevx,CGGEVX) +void LAPACK_cggevx( + char const* balanc, char const* jobvl, char const* jobvr, char const* sense, + lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* B, lapack_int const* ldb, + lapack_complex_float* alpha, + lapack_complex_float* beta, + lapack_complex_float* VL, lapack_int const* ldvl, + lapack_complex_float* VR, lapack_int const* ldvr, lapack_int* ilo, lapack_int* ihi, + float* lscale, + float* rscale, + float* abnrm, + float* bbnrm, + float* rconde, + float* rcondv, + lapack_complex_float* work, lapack_int const* lwork, + float* rwork, + lapack_int* iwork, lapack_logical* BWORK, + lapack_int* info ); + +#define LAPACK_dggevx LAPACK_GLOBAL(dggevx,DGGEVX) +void LAPACK_dggevx( + char const* balanc, char const* jobvl, char const* jobvr, char const* sense, + lapack_int const* n, + double* A, lapack_int const* lda, + double* B, lapack_int const* ldb, + double* alphar, + double* alphai, + double* beta, + double* VL, lapack_int const* ldvl, + double* VR, lapack_int const* ldvr, lapack_int* ilo, lapack_int* ihi, + double* lscale, + double* rscale, + double* abnrm, + double* bbnrm, + double* rconde, + double* rcondv, + double* work, lapack_int const* lwork, + lapack_int* iwork, lapack_logical* BWORK, + lapack_int* info ); + +#define LAPACK_sggevx LAPACK_GLOBAL(sggevx,SGGEVX) +void LAPACK_sggevx( + char const* balanc, char const* jobvl, char const* jobvr, char const* sense, + lapack_int const* n, + float* A, lapack_int const* lda, + float* B, lapack_int const* ldb, + float* alphar, + float* alphai, + float* beta, + float* VL, lapack_int const* ldvl, + float* VR, lapack_int const* ldvr, lapack_int* ilo, lapack_int* ihi, + float* lscale, + float* rscale, + float* abnrm, + float* bbnrm, + float* rconde, + float* rcondv, + float* work, lapack_int const* lwork, + lapack_int* iwork, lapack_logical* BWORK, + lapack_int* info ); + +#define LAPACK_zggevx LAPACK_GLOBAL(zggevx,ZGGEVX) +void LAPACK_zggevx( + char const* balanc, char const* jobvl, char const* jobvr, char const* sense, + lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* B, lapack_int const* ldb, + lapack_complex_double* alpha, + lapack_complex_double* beta, + lapack_complex_double* VL, lapack_int const* ldvl, + lapack_complex_double* VR, lapack_int const* ldvr, lapack_int* ilo, lapack_int* ihi, + double* lscale, + double* rscale, + double* abnrm, + double* bbnrm, + double* rconde, + double* rcondv, + lapack_complex_double* work, lapack_int const* lwork, + double* rwork, + lapack_int* iwork, lapack_logical* BWORK, + lapack_int* info ); + +#define LAPACK_cggglm LAPACK_GLOBAL(cggglm,CGGGLM) +void LAPACK_cggglm( + lapack_int const* n, lapack_int const* m, lapack_int const* p, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* B, lapack_int const* ldb, + lapack_complex_float* D, + lapack_complex_float* X, + lapack_complex_float* Y, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_dggglm LAPACK_GLOBAL(dggglm,DGGGLM) +void LAPACK_dggglm( + lapack_int const* n, lapack_int const* m, lapack_int const* p, + double* A, lapack_int const* lda, + double* B, lapack_int const* ldb, + double* D, + double* X, + double* Y, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_sggglm LAPACK_GLOBAL(sggglm,SGGGLM) +void LAPACK_sggglm( + lapack_int const* n, lapack_int const* m, lapack_int const* p, + float* A, lapack_int const* lda, + float* B, lapack_int const* ldb, + float* D, + float* X, + float* Y, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zggglm LAPACK_GLOBAL(zggglm,ZGGGLM) +void LAPACK_zggglm( + lapack_int const* n, lapack_int const* m, lapack_int const* p, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* B, lapack_int const* ldb, + lapack_complex_double* D, + lapack_complex_double* X, + lapack_complex_double* Y, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_cgghd3 LAPACK_GLOBAL(cgghd3,CGGHD3) +void LAPACK_cgghd3( + char const* compq, char const* compz, + lapack_int const* n, lapack_int const* ilo, lapack_int const* ihi, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* B, lapack_int const* ldb, + lapack_complex_float* Q, lapack_int const* ldq, + lapack_complex_float* Z, lapack_int const* ldz, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_dgghd3 LAPACK_GLOBAL(dgghd3,DGGHD3) +void LAPACK_dgghd3( + char const* compq, char const* compz, + lapack_int const* n, lapack_int const* ilo, lapack_int const* ihi, + double* A, lapack_int const* lda, + double* B, lapack_int const* ldb, + double* Q, lapack_int const* ldq, + double* Z, lapack_int const* ldz, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_sgghd3 LAPACK_GLOBAL(sgghd3,SGGHD3) +void LAPACK_sgghd3( + char const* compq, char const* compz, + lapack_int const* n, lapack_int const* ilo, lapack_int const* ihi, + float* A, lapack_int const* lda, + float* B, lapack_int const* ldb, + float* Q, lapack_int const* ldq, + float* Z, lapack_int const* ldz, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zgghd3 LAPACK_GLOBAL(zgghd3,ZGGHD3) +void LAPACK_zgghd3( + char const* compq, char const* compz, + lapack_int const* n, lapack_int const* ilo, lapack_int const* ihi, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* B, lapack_int const* ldb, + lapack_complex_double* Q, lapack_int const* ldq, + lapack_complex_double* Z, lapack_int const* ldz, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_cgghrd LAPACK_GLOBAL(cgghrd,CGGHRD) +void LAPACK_cgghrd( + char const* compq, char const* compz, + lapack_int const* n, lapack_int const* ilo, lapack_int const* ihi, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* B, lapack_int const* ldb, + lapack_complex_float* Q, lapack_int const* ldq, + lapack_complex_float* Z, lapack_int const* ldz, + lapack_int* info ); + +#define LAPACK_dgghrd LAPACK_GLOBAL(dgghrd,DGGHRD) +void LAPACK_dgghrd( + char const* compq, char const* compz, + lapack_int const* n, lapack_int const* ilo, lapack_int const* ihi, + double* A, lapack_int const* lda, + double* B, lapack_int const* ldb, + double* Q, lapack_int const* ldq, + double* Z, lapack_int const* ldz, + lapack_int* info ); + +#define LAPACK_sgghrd LAPACK_GLOBAL(sgghrd,SGGHRD) +void LAPACK_sgghrd( + char const* compq, char const* compz, + lapack_int const* n, lapack_int const* ilo, lapack_int const* ihi, + float* A, lapack_int const* lda, + float* B, lapack_int const* ldb, + float* Q, lapack_int const* ldq, + float* Z, lapack_int const* ldz, + lapack_int* info ); + +#define LAPACK_zgghrd LAPACK_GLOBAL(zgghrd,ZGGHRD) +void LAPACK_zgghrd( + char const* compq, char const* compz, + lapack_int const* n, lapack_int const* ilo, lapack_int const* ihi, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* B, lapack_int const* ldb, + lapack_complex_double* Q, lapack_int const* ldq, + lapack_complex_double* Z, lapack_int const* ldz, + lapack_int* info ); + +#define LAPACK_cgglse LAPACK_GLOBAL(cgglse,CGGLSE) +void LAPACK_cgglse( + lapack_int const* m, lapack_int const* n, lapack_int const* p, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* B, lapack_int const* ldb, + lapack_complex_float* C, + lapack_complex_float* D, + lapack_complex_float* X, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_dgglse LAPACK_GLOBAL(dgglse,DGGLSE) +void LAPACK_dgglse( + lapack_int const* m, lapack_int const* n, lapack_int const* p, + double* A, lapack_int const* lda, + double* B, lapack_int const* ldb, + double* C, + double* D, + double* X, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_sgglse LAPACK_GLOBAL(sgglse,SGGLSE) +void LAPACK_sgglse( + lapack_int const* m, lapack_int const* n, lapack_int const* p, + float* A, lapack_int const* lda, + float* B, lapack_int const* ldb, + float* C, + float* D, + float* X, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zgglse LAPACK_GLOBAL(zgglse,ZGGLSE) +void LAPACK_zgglse( + lapack_int const* m, lapack_int const* n, lapack_int const* p, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* B, lapack_int const* ldb, + lapack_complex_double* C, + lapack_complex_double* D, + lapack_complex_double* X, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_cggqrf LAPACK_GLOBAL(cggqrf,CGGQRF) +void LAPACK_cggqrf( + lapack_int const* n, lapack_int const* m, lapack_int const* p, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* taua, + lapack_complex_float* B, lapack_int const* ldb, + lapack_complex_float* taub, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_dggqrf LAPACK_GLOBAL(dggqrf,DGGQRF) +void LAPACK_dggqrf( + lapack_int const* n, lapack_int const* m, lapack_int const* p, + double* A, lapack_int const* lda, + double* taua, + double* B, lapack_int const* ldb, + double* taub, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_sggqrf LAPACK_GLOBAL(sggqrf,SGGQRF) +void LAPACK_sggqrf( + lapack_int const* n, lapack_int const* m, lapack_int const* p, + float* A, lapack_int const* lda, + float* taua, + float* B, lapack_int const* ldb, + float* taub, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zggqrf LAPACK_GLOBAL(zggqrf,ZGGQRF) +void LAPACK_zggqrf( + lapack_int const* n, lapack_int const* m, lapack_int const* p, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* taua, + lapack_complex_double* B, lapack_int const* ldb, + lapack_complex_double* taub, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_cggrqf LAPACK_GLOBAL(cggrqf,CGGRQF) +void LAPACK_cggrqf( + lapack_int const* m, lapack_int const* p, lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* taua, + lapack_complex_float* B, lapack_int const* ldb, + lapack_complex_float* taub, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_dggrqf LAPACK_GLOBAL(dggrqf,DGGRQF) +void LAPACK_dggrqf( + lapack_int const* m, lapack_int const* p, lapack_int const* n, + double* A, lapack_int const* lda, + double* taua, + double* B, lapack_int const* ldb, + double* taub, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_sggrqf LAPACK_GLOBAL(sggrqf,SGGRQF) +void LAPACK_sggrqf( + lapack_int const* m, lapack_int const* p, lapack_int const* n, + float* A, lapack_int const* lda, + float* taua, + float* B, lapack_int const* ldb, + float* taub, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zggrqf LAPACK_GLOBAL(zggrqf,ZGGRQF) +void LAPACK_zggrqf( + lapack_int const* m, lapack_int const* p, lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* taua, + lapack_complex_double* B, lapack_int const* ldb, + lapack_complex_double* taub, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_sggsvd LAPACK_GLOBAL(sggsvd,SGGSVD) +lapack_int LAPACK_sggsvd( + char const* jobu, char const* jobv, char const* jobq, + lapack_int const* m, lapack_int const* n, lapack_int const* p, + lapack_int* k, lapack_int* l, + float* a, lapack_int const* lda, + float* b, lapack_int const* ldb, + float* alpha, float* beta, + float* u, lapack_int const* ldu, + float* v, lapack_int const* ldv, + float* q, lapack_int const* ldq, + float* work, lapack_int* iwork, lapack_int* info ); + +#define LAPACK_dggsvd LAPACK_GLOBAL(dggsvd,DGGSVD) +lapack_int LAPACK_dggsvd( + char const* jobu, char const* jobv, char const* jobq, + lapack_int const* m, lapack_int const* n, lapack_int const* p, + lapack_int* k, lapack_int* l, + double* a, lapack_int const* lda, + double* b, lapack_int const* ldb, + double* alpha, double* beta, + double* u, lapack_int const* ldu, + double* v, lapack_int const* ldv, + double* q, lapack_int const* ldq, + double* work, lapack_int* iwork, lapack_int* info ); + +#define LAPACK_cggsvd LAPACK_GLOBAL(cggsvd,CGGSVD) +lapack_int LAPACK_cggsvd( + char const* jobu, char const* jobv, char const* jobq, + lapack_int const* m, lapack_int const* n, lapack_int const* p, + lapack_int* k, lapack_int* l, + lapack_complex_float* a, lapack_int const* lda, + lapack_complex_float* b, lapack_int const* ldb, + float* alpha, float* beta, + lapack_complex_float* u, lapack_int const* ldu, + lapack_complex_float* v, lapack_int const* ldv, + lapack_complex_float* q, lapack_int const* ldq, + lapack_complex_float* work, float* rwork, + lapack_int* iwork, lapack_int* info ); + +#define LAPACK_zggsvd LAPACK_GLOBAL(zggsvd,ZGGSVD) +lapack_int LAPACK_zggsvd( + char const* jobu, char const* jobv, char const* jobq, + lapack_int const* m, lapack_int const* n, lapack_int const* p, + lapack_int* k, lapack_int* l, + lapack_complex_double* a, lapack_int const* lda, + lapack_complex_double* b, lapack_int const* ldb, + double* alpha, double* beta, + lapack_complex_double* u, lapack_int const* ldu, + lapack_complex_double* v, lapack_int const* ldv, + lapack_complex_double* q, lapack_int const* ldq, + lapack_complex_double* work, double* rwork, + lapack_int* iwork, lapack_int* info ); + +#define LAPACK_cggsvd3 LAPACK_GLOBAL(cggsvd3,CGGSVD3) +void LAPACK_cggsvd3( + char const* jobu, char const* jobv, char const* jobq, + lapack_int const* m, lapack_int const* n, lapack_int const* p, lapack_int* k, lapack_int* l, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* B, lapack_int const* ldb, + float* alpha, + float* beta, + lapack_complex_float* U, lapack_int const* ldu, + lapack_complex_float* V, lapack_int const* ldv, + lapack_complex_float* Q, lapack_int const* ldq, + lapack_complex_float* work, lapack_int const* lwork, + float* rwork, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_dggsvd3 LAPACK_GLOBAL(dggsvd3,DGGSVD3) +void LAPACK_dggsvd3( + char const* jobu, char const* jobv, char const* jobq, + lapack_int const* m, lapack_int const* n, lapack_int const* p, lapack_int* k, lapack_int* l, + double* A, lapack_int const* lda, + double* B, lapack_int const* ldb, + double* alpha, + double* beta, + double* U, lapack_int const* ldu, + double* V, lapack_int const* ldv, + double* Q, lapack_int const* ldq, + double* work, lapack_int const* lwork, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_sggsvd3 LAPACK_GLOBAL(sggsvd3,SGGSVD3) +void LAPACK_sggsvd3( + char const* jobu, char const* jobv, char const* jobq, + lapack_int const* m, lapack_int const* n, lapack_int const* p, lapack_int* k, lapack_int* l, + float* A, lapack_int const* lda, + float* B, lapack_int const* ldb, + float* alpha, + float* beta, + float* U, lapack_int const* ldu, + float* V, lapack_int const* ldv, + float* Q, lapack_int const* ldq, + float* work, lapack_int const* lwork, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_zggsvd3 LAPACK_GLOBAL(zggsvd3,ZGGSVD3) +void LAPACK_zggsvd3( + char const* jobu, char const* jobv, char const* jobq, + lapack_int const* m, lapack_int const* n, lapack_int const* p, lapack_int* k, lapack_int* l, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* B, lapack_int const* ldb, + double* alpha, + double* beta, + lapack_complex_double* U, lapack_int const* ldu, + lapack_complex_double* V, lapack_int const* ldv, + lapack_complex_double* Q, lapack_int const* ldq, + lapack_complex_double* work, lapack_int const* lwork, + double* rwork, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_sggsvp LAPACK_GLOBAL(sggsvp,SGGSVP) +lapack_int LAPACK_sggsvp( + char const* jobu, char const* jobv, char const* jobq, + lapack_int const* m, lapack_int const* p, lapack_int const* n, + float* a, lapack_int const* lda, + float* b, lapack_int const* ldb, + float* tola, float* tolb, + lapack_int* k, lapack_int* l, + float* u, lapack_int const* ldu, + float* v, lapack_int const* ldv, + float* q, lapack_int const* ldq, + lapack_int* iwork, float* tau, + float* work, lapack_int* info ); + +#define LAPACK_dggsvp LAPACK_GLOBAL(dggsvp,DGGSVP) +lapack_int LAPACK_dggsvp( + char const* jobu, char const* jobv, char const* jobq, + lapack_int const* m, lapack_int const* p, lapack_int const* n, + double* a, lapack_int const* lda, + double* b, lapack_int const* ldb, + double* tola, double* tolb, + lapack_int* k, lapack_int* l, + double* u, lapack_int const* ldu, + double* v, lapack_int const* ldv, + double* q, lapack_int const* ldq, + lapack_int* iwork, double* tau, + double* work, lapack_int* info ); + +#define LAPACK_cggsvp LAPACK_GLOBAL(cggsvp,CGGSVP) +lapack_int LAPACK_cggsvp( + char const* jobu, char const* jobv, char const* jobq, + lapack_int const* m, lapack_int const* p, lapack_int const* n, + lapack_complex_float* a, lapack_int const* lda, + lapack_complex_float* b, lapack_int const* ldb, + float* tola, float* tolb, lapack_int* k, lapack_int* l, + lapack_complex_float* u, lapack_int const* ldu, + lapack_complex_float* v, lapack_int const* ldv, + lapack_complex_float* q, lapack_int const* ldq, + lapack_int* iwork, float* rwork, lapack_complex_float* tau, + lapack_complex_float* work, lapack_int* info ); + +#define LAPACK_zggsvp LAPACK_GLOBAL(zggsvp,ZGGSVP) +lapack_int LAPACK_zggsvp( + char const* jobu, char const* jobv, char const* jobq, + lapack_int const* m, lapack_int const* p, lapack_int const* n, + lapack_complex_double* a, lapack_int const* lda, + lapack_complex_double* b, lapack_int const* ldb, + double* tola, double* tolb, lapack_int* k, lapack_int* l, + lapack_complex_double* u, lapack_int const* ldu, + lapack_complex_double* v, lapack_int const* ldv, + lapack_complex_double* q, lapack_int const* ldq, + lapack_int* iwork, double* rwork, lapack_complex_double* tau, + lapack_complex_double* work, lapack_int* info ); + +#define LAPACK_cggsvp3 LAPACK_GLOBAL(cggsvp3,CGGSVP3) +void LAPACK_cggsvp3( + char const* jobu, char const* jobv, char const* jobq, + lapack_int const* m, lapack_int const* p, lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* B, lapack_int const* ldb, + float const* tola, + float const* tolb, lapack_int* k, lapack_int* l, + lapack_complex_float* U, lapack_int const* ldu, + lapack_complex_float* V, lapack_int const* ldv, + lapack_complex_float* Q, lapack_int const* ldq, + lapack_int* iwork, + float* rwork, + lapack_complex_float* tau, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_dggsvp3 LAPACK_GLOBAL(dggsvp3,DGGSVP3) +void LAPACK_dggsvp3( + char const* jobu, char const* jobv, char const* jobq, + lapack_int const* m, lapack_int const* p, lapack_int const* n, + double* A, lapack_int const* lda, + double* B, lapack_int const* ldb, + double const* tola, + double const* tolb, lapack_int* k, lapack_int* l, + double* U, lapack_int const* ldu, + double* V, lapack_int const* ldv, + double* Q, lapack_int const* ldq, + lapack_int* iwork, + double* tau, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_sggsvp3 LAPACK_GLOBAL(sggsvp3,SGGSVP3) +void LAPACK_sggsvp3( + char const* jobu, char const* jobv, char const* jobq, + lapack_int const* m, lapack_int const* p, lapack_int const* n, + float* A, lapack_int const* lda, + float* B, lapack_int const* ldb, + float const* tola, + float const* tolb, lapack_int* k, lapack_int* l, + float* U, lapack_int const* ldu, + float* V, lapack_int const* ldv, + float* Q, lapack_int const* ldq, + lapack_int* iwork, + float* tau, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zggsvp3 LAPACK_GLOBAL(zggsvp3,ZGGSVP3) +void LAPACK_zggsvp3( + char const* jobu, char const* jobv, char const* jobq, + lapack_int const* m, lapack_int const* p, lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* B, lapack_int const* ldb, + double const* tola, + double const* tolb, lapack_int* k, lapack_int* l, + lapack_complex_double* U, lapack_int const* ldu, + lapack_complex_double* V, lapack_int const* ldv, + lapack_complex_double* Q, lapack_int const* ldq, + lapack_int* iwork, + double* rwork, + lapack_complex_double* tau, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_cgtcon LAPACK_GLOBAL(cgtcon,CGTCON) +void LAPACK_cgtcon( + char const* norm, + lapack_int const* n, + lapack_complex_float const* DL, + lapack_complex_float const* D, + lapack_complex_float const* DU, + lapack_complex_float const* DU2, lapack_int const* ipiv, + float const* anorm, + float* rcond, + lapack_complex_float* work, + lapack_int* info ); + +#define LAPACK_dgtcon LAPACK_GLOBAL(dgtcon,DGTCON) +void LAPACK_dgtcon( + char const* norm, + lapack_int const* n, + double const* DL, + double const* D, + double const* DU, + double const* DU2, lapack_int const* ipiv, + double const* anorm, + double* rcond, + double* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_sgtcon LAPACK_GLOBAL(sgtcon,SGTCON) +void LAPACK_sgtcon( + char const* norm, + lapack_int const* n, + float const* DL, + float const* D, + float const* DU, + float const* DU2, lapack_int const* ipiv, + float const* anorm, + float* rcond, + float* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_zgtcon LAPACK_GLOBAL(zgtcon,ZGTCON) +void LAPACK_zgtcon( + char const* norm, + lapack_int const* n, + lapack_complex_double const* DL, + lapack_complex_double const* D, + lapack_complex_double const* DU, + lapack_complex_double const* DU2, lapack_int const* ipiv, + double const* anorm, + double* rcond, + lapack_complex_double* work, + lapack_int* info ); + +#define LAPACK_cgtrfs LAPACK_GLOBAL(cgtrfs,CGTRFS) +void LAPACK_cgtrfs( + char const* trans, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float const* DL, + lapack_complex_float const* D, + lapack_complex_float const* DU, + lapack_complex_float const* DLF, + lapack_complex_float const* DF, + lapack_complex_float const* DUF, + lapack_complex_float const* DU2, lapack_int const* ipiv, + lapack_complex_float const* B, lapack_int const* ldb, + lapack_complex_float* X, lapack_int const* ldx, + float* ferr, + float* berr, + lapack_complex_float* work, + float* rwork, + lapack_int* info ); + +#define LAPACK_dgtrfs LAPACK_GLOBAL(dgtrfs,DGTRFS) +void LAPACK_dgtrfs( + char const* trans, + lapack_int const* n, lapack_int const* nrhs, + double const* DL, + double const* D, + double const* DU, + double const* DLF, + double const* DF, + double const* DUF, + double const* DU2, lapack_int const* ipiv, + double const* B, lapack_int const* ldb, + double* X, lapack_int const* ldx, + double* ferr, + double* berr, + double* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_sgtrfs LAPACK_GLOBAL(sgtrfs,SGTRFS) +void LAPACK_sgtrfs( + char const* trans, + lapack_int const* n, lapack_int const* nrhs, + float const* DL, + float const* D, + float const* DU, + float const* DLF, + float const* DF, + float const* DUF, + float const* DU2, lapack_int const* ipiv, + float const* B, lapack_int const* ldb, + float* X, lapack_int const* ldx, + float* ferr, + float* berr, + float* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_zgtrfs LAPACK_GLOBAL(zgtrfs,ZGTRFS) +void LAPACK_zgtrfs( + char const* trans, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double const* DL, + lapack_complex_double const* D, + lapack_complex_double const* DU, + lapack_complex_double const* DLF, + lapack_complex_double const* DF, + lapack_complex_double const* DUF, + lapack_complex_double const* DU2, lapack_int const* ipiv, + lapack_complex_double const* B, lapack_int const* ldb, + lapack_complex_double* X, lapack_int const* ldx, + double* ferr, + double* berr, + lapack_complex_double* work, + double* rwork, + lapack_int* info ); + +#define LAPACK_cgtsv LAPACK_GLOBAL(cgtsv,CGTSV) +void LAPACK_cgtsv( + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float* DL, + lapack_complex_float* D, + lapack_complex_float* DU, + lapack_complex_float* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_dgtsv LAPACK_GLOBAL(dgtsv,DGTSV) +void LAPACK_dgtsv( + lapack_int const* n, lapack_int const* nrhs, + double* DL, + double* D, + double* DU, + double* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_sgtsv LAPACK_GLOBAL(sgtsv,SGTSV) +void LAPACK_sgtsv( + lapack_int const* n, lapack_int const* nrhs, + float* DL, + float* D, + float* DU, + float* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_zgtsv LAPACK_GLOBAL(zgtsv,ZGTSV) +void LAPACK_zgtsv( + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double* DL, + lapack_complex_double* D, + lapack_complex_double* DU, + lapack_complex_double* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_cgtsvx LAPACK_GLOBAL(cgtsvx,CGTSVX) +void LAPACK_cgtsvx( + char const* fact, char const* trans, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float const* DL, + lapack_complex_float const* D, + lapack_complex_float const* DU, + lapack_complex_float* DLF, + lapack_complex_float* DF, + lapack_complex_float* DUF, + lapack_complex_float* DU2, lapack_int* ipiv, + lapack_complex_float const* B, lapack_int const* ldb, + lapack_complex_float* X, lapack_int const* ldx, + float* rcond, + float* ferr, + float* berr, + lapack_complex_float* work, + float* rwork, + lapack_int* info ); + +#define LAPACK_dgtsvx LAPACK_GLOBAL(dgtsvx,DGTSVX) +void LAPACK_dgtsvx( + char const* fact, char const* trans, + lapack_int const* n, lapack_int const* nrhs, + double const* DL, + double const* D, + double const* DU, + double* DLF, + double* DF, + double* DUF, + double* DU2, lapack_int* ipiv, + double const* B, lapack_int const* ldb, + double* X, lapack_int const* ldx, + double* rcond, + double* ferr, + double* berr, + double* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_sgtsvx LAPACK_GLOBAL(sgtsvx,SGTSVX) +void LAPACK_sgtsvx( + char const* fact, char const* trans, + lapack_int const* n, lapack_int const* nrhs, + float const* DL, + float const* D, + float const* DU, + float* DLF, + float* DF, + float* DUF, + float* DU2, lapack_int* ipiv, + float const* B, lapack_int const* ldb, + float* X, lapack_int const* ldx, + float* rcond, + float* ferr, + float* berr, + float* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_zgtsvx LAPACK_GLOBAL(zgtsvx,ZGTSVX) +void LAPACK_zgtsvx( + char const* fact, char const* trans, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double const* DL, + lapack_complex_double const* D, + lapack_complex_double const* DU, + lapack_complex_double* DLF, + lapack_complex_double* DF, + lapack_complex_double* DUF, + lapack_complex_double* DU2, lapack_int* ipiv, + lapack_complex_double const* B, lapack_int const* ldb, + lapack_complex_double* X, lapack_int const* ldx, + double* rcond, + double* ferr, + double* berr, + lapack_complex_double* work, + double* rwork, + lapack_int* info ); + +#define LAPACK_cgttrf LAPACK_GLOBAL(cgttrf,CGTTRF) +void LAPACK_cgttrf( + lapack_int const* n, + lapack_complex_float* DL, + lapack_complex_float* D, + lapack_complex_float* DU, + lapack_complex_float* DU2, lapack_int* ipiv, + lapack_int* info ); + +#define LAPACK_dgttrf LAPACK_GLOBAL(dgttrf,DGTTRF) +void LAPACK_dgttrf( + lapack_int const* n, + double* DL, + double* D, + double* DU, + double* DU2, lapack_int* ipiv, + lapack_int* info ); + +#define LAPACK_sgttrf LAPACK_GLOBAL(sgttrf,SGTTRF) +void LAPACK_sgttrf( + lapack_int const* n, + float* DL, + float* D, + float* DU, + float* DU2, lapack_int* ipiv, + lapack_int* info ); + +#define LAPACK_zgttrf LAPACK_GLOBAL(zgttrf,ZGTTRF) +void LAPACK_zgttrf( + lapack_int const* n, + lapack_complex_double* DL, + lapack_complex_double* D, + lapack_complex_double* DU, + lapack_complex_double* DU2, lapack_int* ipiv, + lapack_int* info ); + +#define LAPACK_cgttrs LAPACK_GLOBAL(cgttrs,CGTTRS) +void LAPACK_cgttrs( + char const* trans, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float const* DL, + lapack_complex_float const* D, + lapack_complex_float const* DU, + lapack_complex_float const* DU2, lapack_int const* ipiv, + lapack_complex_float* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_dgttrs LAPACK_GLOBAL(dgttrs,DGTTRS) +void LAPACK_dgttrs( + char const* trans, + lapack_int const* n, lapack_int const* nrhs, + double const* DL, + double const* D, + double const* DU, + double const* DU2, lapack_int const* ipiv, + double* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_sgttrs LAPACK_GLOBAL(sgttrs,SGTTRS) +void LAPACK_sgttrs( + char const* trans, + lapack_int const* n, lapack_int const* nrhs, + float const* DL, + float const* D, + float const* DU, + float const* DU2, lapack_int const* ipiv, + float* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_zgttrs LAPACK_GLOBAL(zgttrs,ZGTTRS) +void LAPACK_zgttrs( + char const* trans, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double const* DL, + lapack_complex_double const* D, + lapack_complex_double const* DU, + lapack_complex_double const* DU2, lapack_int const* ipiv, + lapack_complex_double* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_chbev LAPACK_GLOBAL(chbev,CHBEV) +void LAPACK_chbev( + char const* jobz, char const* uplo, + lapack_int const* n, lapack_int const* kd, + lapack_complex_float* AB, lapack_int const* ldab, + float* W, + lapack_complex_float* Z, lapack_int const* ldz, + lapack_complex_float* work, + float* rwork, + lapack_int* info ); + +#define LAPACK_zhbev LAPACK_GLOBAL(zhbev,ZHBEV) +void LAPACK_zhbev( + char const* jobz, char const* uplo, + lapack_int const* n, lapack_int const* kd, + lapack_complex_double* AB, lapack_int const* ldab, + double* W, + lapack_complex_double* Z, lapack_int const* ldz, + lapack_complex_double* work, + double* rwork, + lapack_int* info ); + +#define LAPACK_chbev_2stage LAPACK_GLOBAL(chbev_2stage,CHBEV_2STAGE) +void LAPACK_chbev_2stage( + char const* jobz, char const* uplo, + lapack_int const* n, lapack_int const* kd, + lapack_complex_float* AB, lapack_int const* ldab, + float* W, + lapack_complex_float* Z, lapack_int const* ldz, + lapack_complex_float* work, lapack_int const* lwork, + float* rwork, + lapack_int* info ); + +#define LAPACK_zhbev_2stage LAPACK_GLOBAL(zhbev_2stage,ZHBEV_2STAGE) +void LAPACK_zhbev_2stage( + char const* jobz, char const* uplo, + lapack_int const* n, lapack_int const* kd, + lapack_complex_double* AB, lapack_int const* ldab, + double* W, + lapack_complex_double* Z, lapack_int const* ldz, + lapack_complex_double* work, lapack_int const* lwork, + double* rwork, + lapack_int* info ); + +#define LAPACK_chbevd LAPACK_GLOBAL(chbevd,CHBEVD) +void LAPACK_chbevd( + char const* jobz, char const* uplo, + lapack_int const* n, lapack_int const* kd, + lapack_complex_float* AB, lapack_int const* ldab, + float* W, + lapack_complex_float* Z, lapack_int const* ldz, + lapack_complex_float* work, lapack_int const* lwork, + float* rwork, lapack_int const* lrwork, + lapack_int* iwork, lapack_int const* liwork, + lapack_int* info ); + +#define LAPACK_zhbevd LAPACK_GLOBAL(zhbevd,ZHBEVD) +void LAPACK_zhbevd( + char const* jobz, char const* uplo, + lapack_int const* n, lapack_int const* kd, + lapack_complex_double* AB, lapack_int const* ldab, + double* W, + lapack_complex_double* Z, lapack_int const* ldz, + lapack_complex_double* work, lapack_int const* lwork, + double* rwork, lapack_int const* lrwork, + lapack_int* iwork, lapack_int const* liwork, + lapack_int* info ); + +#define LAPACK_chbevd_2stage LAPACK_GLOBAL(chbevd_2stage,CHBEVD_2STAGE) +void LAPACK_chbevd_2stage( + char const* jobz, char const* uplo, + lapack_int const* n, lapack_int const* kd, + lapack_complex_float* AB, lapack_int const* ldab, + float* W, + lapack_complex_float* Z, lapack_int const* ldz, + lapack_complex_float* work, lapack_int const* lwork, + float* rwork, lapack_int const* lrwork, + lapack_int* iwork, lapack_int const* liwork, + lapack_int* info ); + +#define LAPACK_zhbevd_2stage LAPACK_GLOBAL(zhbevd_2stage,ZHBEVD_2STAGE) +void LAPACK_zhbevd_2stage( + char const* jobz, char const* uplo, + lapack_int const* n, lapack_int const* kd, + lapack_complex_double* AB, lapack_int const* ldab, + double* W, + lapack_complex_double* Z, lapack_int const* ldz, + lapack_complex_double* work, lapack_int const* lwork, + double* rwork, lapack_int const* lrwork, + lapack_int* iwork, lapack_int const* liwork, + lapack_int* info ); + +#define LAPACK_chbevx LAPACK_GLOBAL(chbevx,CHBEVX) +void LAPACK_chbevx( + char const* jobz, char const* range, char const* uplo, + lapack_int const* n, lapack_int const* kd, + lapack_complex_float* AB, lapack_int const* ldab, + lapack_complex_float* Q, lapack_int const* ldq, + float const* vl, + float const* vu, lapack_int const* il, lapack_int const* iu, + float const* abstol, lapack_int* m, + float* W, + lapack_complex_float* Z, lapack_int const* ldz, + lapack_complex_float* work, + float* rwork, + lapack_int* iwork, lapack_int* IFAIL, + lapack_int* info ); + +#define LAPACK_zhbevx LAPACK_GLOBAL(zhbevx,ZHBEVX) +void LAPACK_zhbevx( + char const* jobz, char const* range, char const* uplo, + lapack_int const* n, lapack_int const* kd, + lapack_complex_double* AB, lapack_int const* ldab, + lapack_complex_double* Q, lapack_int const* ldq, + double const* vl, + double const* vu, lapack_int const* il, lapack_int const* iu, + double const* abstol, lapack_int* m, + double* W, + lapack_complex_double* Z, lapack_int const* ldz, + lapack_complex_double* work, + double* rwork, + lapack_int* iwork, lapack_int* IFAIL, + lapack_int* info ); + +#define LAPACK_chbevx_2stage LAPACK_GLOBAL(chbevx_2stage,CHBEVX_2STAGE) +void LAPACK_chbevx_2stage( + char const* jobz, char const* range, char const* uplo, + lapack_int const* n, lapack_int const* kd, + lapack_complex_float* AB, lapack_int const* ldab, + lapack_complex_float* Q, lapack_int const* ldq, + float const* vl, + float const* vu, lapack_int const* il, lapack_int const* iu, + float const* abstol, lapack_int* m, + float* W, + lapack_complex_float* Z, lapack_int const* ldz, + lapack_complex_float* work, lapack_int const* lwork, + float* rwork, + lapack_int* iwork, lapack_int* IFAIL, + lapack_int* info ); + +#define LAPACK_zhbevx_2stage LAPACK_GLOBAL(zhbevx_2stage,ZHBEVX_2STAGE) +void LAPACK_zhbevx_2stage( + char const* jobz, char const* range, char const* uplo, + lapack_int const* n, lapack_int const* kd, + lapack_complex_double* AB, lapack_int const* ldab, + lapack_complex_double* Q, lapack_int const* ldq, + double const* vl, + double const* vu, lapack_int const* il, lapack_int const* iu, + double const* abstol, lapack_int* m, + double* W, + lapack_complex_double* Z, lapack_int const* ldz, + lapack_complex_double* work, lapack_int const* lwork, + double* rwork, + lapack_int* iwork, lapack_int* IFAIL, + lapack_int* info ); + +#define LAPACK_chbgst LAPACK_GLOBAL(chbgst,CHBGST) +void LAPACK_chbgst( + char const* vect, char const* uplo, + lapack_int const* n, lapack_int const* ka, lapack_int const* kb, + lapack_complex_float* AB, lapack_int const* ldab, + lapack_complex_float const* BB, lapack_int const* ldbb, + lapack_complex_float* X, lapack_int const* ldx, + lapack_complex_float* work, + float* rwork, + lapack_int* info ); + +#define LAPACK_zhbgst LAPACK_GLOBAL(zhbgst,ZHBGST) +void LAPACK_zhbgst( + char const* vect, char const* uplo, + lapack_int const* n, lapack_int const* ka, lapack_int const* kb, + lapack_complex_double* AB, lapack_int const* ldab, + lapack_complex_double const* BB, lapack_int const* ldbb, + lapack_complex_double* X, lapack_int const* ldx, + lapack_complex_double* work, + double* rwork, + lapack_int* info ); + +#define LAPACK_chbgv LAPACK_GLOBAL(chbgv,CHBGV) +void LAPACK_chbgv( + char const* jobz, char const* uplo, + lapack_int const* n, lapack_int const* ka, lapack_int const* kb, + lapack_complex_float* AB, lapack_int const* ldab, + lapack_complex_float* BB, lapack_int const* ldbb, + float* W, + lapack_complex_float* Z, lapack_int const* ldz, + lapack_complex_float* work, + float* rwork, + lapack_int* info ); + +#define LAPACK_zhbgv LAPACK_GLOBAL(zhbgv,ZHBGV) +void LAPACK_zhbgv( + char const* jobz, char const* uplo, + lapack_int const* n, lapack_int const* ka, lapack_int const* kb, + lapack_complex_double* AB, lapack_int const* ldab, + lapack_complex_double* BB, lapack_int const* ldbb, + double* W, + lapack_complex_double* Z, lapack_int const* ldz, + lapack_complex_double* work, + double* rwork, + lapack_int* info ); + +#define LAPACK_chbgvd LAPACK_GLOBAL(chbgvd,CHBGVD) +void LAPACK_chbgvd( + char const* jobz, char const* uplo, + lapack_int const* n, lapack_int const* ka, lapack_int const* kb, + lapack_complex_float* AB, lapack_int const* ldab, + lapack_complex_float* BB, lapack_int const* ldbb, + float* W, + lapack_complex_float* Z, lapack_int const* ldz, + lapack_complex_float* work, lapack_int const* lwork, + float* rwork, lapack_int const* lrwork, + lapack_int* iwork, lapack_int const* liwork, + lapack_int* info ); + +#define LAPACK_zhbgvd LAPACK_GLOBAL(zhbgvd,ZHBGVD) +void LAPACK_zhbgvd( + char const* jobz, char const* uplo, + lapack_int const* n, lapack_int const* ka, lapack_int const* kb, + lapack_complex_double* AB, lapack_int const* ldab, + lapack_complex_double* BB, lapack_int const* ldbb, + double* W, + lapack_complex_double* Z, lapack_int const* ldz, + lapack_complex_double* work, lapack_int const* lwork, + double* rwork, lapack_int const* lrwork, + lapack_int* iwork, lapack_int const* liwork, + lapack_int* info ); + +#define LAPACK_chbgvx LAPACK_GLOBAL(chbgvx,CHBGVX) +void LAPACK_chbgvx( + char const* jobz, char const* range, char const* uplo, + lapack_int const* n, lapack_int const* ka, lapack_int const* kb, + lapack_complex_float* AB, lapack_int const* ldab, + lapack_complex_float* BB, lapack_int const* ldbb, + lapack_complex_float* Q, lapack_int const* ldq, + float const* vl, + float const* vu, lapack_int const* il, lapack_int const* iu, + float const* abstol, lapack_int* m, + float* W, + lapack_complex_float* Z, lapack_int const* ldz, + lapack_complex_float* work, + float* rwork, + lapack_int* iwork, lapack_int* IFAIL, + lapack_int* info ); + +#define LAPACK_zhbgvx LAPACK_GLOBAL(zhbgvx,ZHBGVX) +void LAPACK_zhbgvx( + char const* jobz, char const* range, char const* uplo, + lapack_int const* n, lapack_int const* ka, lapack_int const* kb, + lapack_complex_double* AB, lapack_int const* ldab, + lapack_complex_double* BB, lapack_int const* ldbb, + lapack_complex_double* Q, lapack_int const* ldq, + double const* vl, + double const* vu, lapack_int const* il, lapack_int const* iu, + double const* abstol, lapack_int* m, + double* W, + lapack_complex_double* Z, lapack_int const* ldz, + lapack_complex_double* work, + double* rwork, + lapack_int* iwork, lapack_int* IFAIL, + lapack_int* info ); + +#define LAPACK_chbtrd LAPACK_GLOBAL(chbtrd,CHBTRD) +void LAPACK_chbtrd( + char const* vect, char const* uplo, + lapack_int const* n, lapack_int const* kd, + lapack_complex_float* AB, lapack_int const* ldab, + float* D, + float* E, + lapack_complex_float* Q, lapack_int const* ldq, + lapack_complex_float* work, + lapack_int* info ); + +#define LAPACK_zhbtrd LAPACK_GLOBAL(zhbtrd,ZHBTRD) +void LAPACK_zhbtrd( + char const* vect, char const* uplo, + lapack_int const* n, lapack_int const* kd, + lapack_complex_double* AB, lapack_int const* ldab, + double* D, + double* E, + lapack_complex_double* Q, lapack_int const* ldq, + lapack_complex_double* work, + lapack_int* info ); + +#define LAPACK_checon LAPACK_GLOBAL(checon,CHECON) +void LAPACK_checon( + char const* uplo, + lapack_int const* n, + lapack_complex_float const* A, lapack_int const* lda, lapack_int const* ipiv, + float const* anorm, + float* rcond, + lapack_complex_float* work, + lapack_int* info ); + +#define LAPACK_zhecon LAPACK_GLOBAL(zhecon,ZHECON) +void LAPACK_zhecon( + char const* uplo, + lapack_int const* n, + lapack_complex_double const* A, lapack_int const* lda, lapack_int const* ipiv, + double const* anorm, + double* rcond, + lapack_complex_double* work, + lapack_int* info ); + +#define LAPACK_checon_3 LAPACK_GLOBAL(checon_3,CHECON_3) +void LAPACK_checon_3( + char const* uplo, + lapack_int const* n, + lapack_complex_float const* A, lapack_int const* lda, + lapack_complex_float const* E, lapack_int const* ipiv, + float const* anorm, + float* rcond, + lapack_complex_float* work, + lapack_int* info ); + +#define LAPACK_zhecon_3 LAPACK_GLOBAL(zhecon_3,ZHECON_3) +void LAPACK_zhecon_3( + char const* uplo, + lapack_int const* n, + lapack_complex_double const* A, lapack_int const* lda, + lapack_complex_double const* E, lapack_int const* ipiv, + double const* anorm, + double* rcond, + lapack_complex_double* work, + lapack_int* info ); + +#define LAPACK_cheequb LAPACK_GLOBAL(cheequb,CHEEQUB) +void LAPACK_cheequb( + char const* uplo, + lapack_int const* n, + lapack_complex_float const* A, lapack_int const* lda, + float* S, + float* scond, + float* amax, + lapack_complex_float* work, + lapack_int* info ); + +#define LAPACK_zheequb LAPACK_GLOBAL(zheequb,ZHEEQUB) +void LAPACK_zheequb( + char const* uplo, + lapack_int const* n, + lapack_complex_double const* A, lapack_int const* lda, + double* S, + double* scond, + double* amax, + lapack_complex_double* work, + lapack_int* info ); + +#define LAPACK_cheev LAPACK_GLOBAL(cheev,CHEEV) +void LAPACK_cheev( + char const* jobz, char const* uplo, + lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, + float* W, + lapack_complex_float* work, lapack_int const* lwork, + float* rwork, + lapack_int* info ); + +#define LAPACK_zheev LAPACK_GLOBAL(zheev,ZHEEV) +void LAPACK_zheev( + char const* jobz, char const* uplo, + lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, + double* W, + lapack_complex_double* work, lapack_int const* lwork, + double* rwork, + lapack_int* info ); + +#define LAPACK_cheev_2stage LAPACK_GLOBAL(cheev_2stage,CHEEV_2STAGE) +void LAPACK_cheev_2stage( + char const* jobz, char const* uplo, + lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, + float* W, + lapack_complex_float* work, lapack_int const* lwork, + float* rwork, + lapack_int* info ); + +#define LAPACK_zheev_2stage LAPACK_GLOBAL(zheev_2stage,ZHEEV_2STAGE) +void LAPACK_zheev_2stage( + char const* jobz, char const* uplo, + lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, + double* W, + lapack_complex_double* work, lapack_int const* lwork, + double* rwork, + lapack_int* info ); + +#define LAPACK_cheevd LAPACK_GLOBAL(cheevd,CHEEVD) +void LAPACK_cheevd( + char const* jobz, char const* uplo, + lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, + float* W, + lapack_complex_float* work, lapack_int const* lwork, + float* rwork, lapack_int const* lrwork, + lapack_int* iwork, lapack_int const* liwork, + lapack_int* info ); + +#define LAPACK_zheevd LAPACK_GLOBAL(zheevd,ZHEEVD) +void LAPACK_zheevd( + char const* jobz, char const* uplo, + lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, + double* W, + lapack_complex_double* work, lapack_int const* lwork, + double* rwork, lapack_int const* lrwork, + lapack_int* iwork, lapack_int const* liwork, + lapack_int* info ); + +#define LAPACK_cheevd_2stage LAPACK_GLOBAL(cheevd_2stage,CHEEVD_2STAGE) +void LAPACK_cheevd_2stage( + char const* jobz, char const* uplo, + lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, + float* W, + lapack_complex_float* work, lapack_int const* lwork, + float* rwork, lapack_int const* lrwork, + lapack_int* iwork, lapack_int const* liwork, + lapack_int* info ); + +#define LAPACK_zheevd_2stage LAPACK_GLOBAL(zheevd_2stage,ZHEEVD_2STAGE) +void LAPACK_zheevd_2stage( + char const* jobz, char const* uplo, + lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, + double* W, + lapack_complex_double* work, lapack_int const* lwork, + double* rwork, lapack_int const* lrwork, + lapack_int* iwork, lapack_int const* liwork, + lapack_int* info ); + +#define LAPACK_cheevr LAPACK_GLOBAL(cheevr,CHEEVR) +void LAPACK_cheevr( + char const* jobz, char const* range, char const* uplo, + lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, + float const* vl, + float const* vu, lapack_int const* il, lapack_int const* iu, + float const* abstol, lapack_int* m, + float* W, + lapack_complex_float* Z, lapack_int const* ldz, lapack_int* ISUPPZ, + lapack_complex_float* work, lapack_int const* lwork, + float* rwork, lapack_int const* lrwork, + lapack_int* iwork, lapack_int const* liwork, + lapack_int* info ); + +#define LAPACK_zheevr LAPACK_GLOBAL(zheevr,ZHEEVR) +void LAPACK_zheevr( + char const* jobz, char const* range, char const* uplo, + lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, + double const* vl, + double const* vu, lapack_int const* il, lapack_int const* iu, + double const* abstol, lapack_int* m, + double* W, + lapack_complex_double* Z, lapack_int const* ldz, lapack_int* ISUPPZ, + lapack_complex_double* work, lapack_int const* lwork, + double* rwork, lapack_int const* lrwork, + lapack_int* iwork, lapack_int const* liwork, + lapack_int* info ); + +#define LAPACK_cheevr_2stage LAPACK_GLOBAL(cheevr_2stage,CHEEVR_2STAGE) +void LAPACK_cheevr_2stage( + char const* jobz, char const* range, char const* uplo, + lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, + float const* vl, + float const* vu, lapack_int const* il, lapack_int const* iu, + float const* abstol, lapack_int* m, + float* W, + lapack_complex_float* Z, lapack_int const* ldz, lapack_int* ISUPPZ, + lapack_complex_float* work, lapack_int const* lwork, + float* rwork, lapack_int const* lrwork, + lapack_int* iwork, lapack_int const* liwork, + lapack_int* info ); + +#define LAPACK_zheevr_2stage LAPACK_GLOBAL(zheevr_2stage,ZHEEVR_2STAGE) +void LAPACK_zheevr_2stage( + char const* jobz, char const* range, char const* uplo, + lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, + double const* vl, + double const* vu, lapack_int const* il, lapack_int const* iu, + double const* abstol, lapack_int* m, + double* W, + lapack_complex_double* Z, lapack_int const* ldz, lapack_int* ISUPPZ, + lapack_complex_double* work, lapack_int const* lwork, + double* rwork, lapack_int const* lrwork, + lapack_int* iwork, lapack_int const* liwork, + lapack_int* info ); + +#define LAPACK_cheevx LAPACK_GLOBAL(cheevx,CHEEVX) +void LAPACK_cheevx( + char const* jobz, char const* range, char const* uplo, + lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, + float const* vl, + float const* vu, lapack_int const* il, lapack_int const* iu, + float const* abstol, lapack_int* m, + float* W, + lapack_complex_float* Z, lapack_int const* ldz, + lapack_complex_float* work, lapack_int const* lwork, + float* rwork, + lapack_int* iwork, lapack_int* IFAIL, + lapack_int* info ); + +#define LAPACK_zheevx LAPACK_GLOBAL(zheevx,ZHEEVX) +void LAPACK_zheevx( + char const* jobz, char const* range, char const* uplo, + lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, + double const* vl, + double const* vu, lapack_int const* il, lapack_int const* iu, + double const* abstol, lapack_int* m, + double* W, + lapack_complex_double* Z, lapack_int const* ldz, + lapack_complex_double* work, lapack_int const* lwork, + double* rwork, + lapack_int* iwork, lapack_int* IFAIL, + lapack_int* info ); + +#define LAPACK_cheevx_2stage LAPACK_GLOBAL(cheevx_2stage,CHEEVX_2STAGE) +void LAPACK_cheevx_2stage( + char const* jobz, char const* range, char const* uplo, + lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, + float const* vl, + float const* vu, lapack_int const* il, lapack_int const* iu, + float const* abstol, lapack_int* m, + float* W, + lapack_complex_float* Z, lapack_int const* ldz, + lapack_complex_float* work, lapack_int const* lwork, + float* rwork, + lapack_int* iwork, lapack_int* IFAIL, + lapack_int* info ); + +#define LAPACK_zheevx_2stage LAPACK_GLOBAL(zheevx_2stage,ZHEEVX_2STAGE) +void LAPACK_zheevx_2stage( + char const* jobz, char const* range, char const* uplo, + lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, + double const* vl, + double const* vu, lapack_int const* il, lapack_int const* iu, + double const* abstol, lapack_int* m, + double* W, + lapack_complex_double* Z, lapack_int const* ldz, + lapack_complex_double* work, lapack_int const* lwork, + double* rwork, + lapack_int* iwork, lapack_int* IFAIL, + lapack_int* info ); + +#define LAPACK_chegst LAPACK_GLOBAL(chegst,CHEGST) +void LAPACK_chegst( + lapack_int const* itype, char const* uplo, + lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_zhegst LAPACK_GLOBAL(zhegst,ZHEGST) +void LAPACK_zhegst( + lapack_int const* itype, char const* uplo, + lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_chegv LAPACK_GLOBAL(chegv,CHEGV) +void LAPACK_chegv( + lapack_int const* itype, char const* jobz, char const* uplo, + lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* B, lapack_int const* ldb, + float* W, + lapack_complex_float* work, lapack_int const* lwork, + float* rwork, + lapack_int* info ); + +#define LAPACK_zhegv LAPACK_GLOBAL(zhegv,ZHEGV) +void LAPACK_zhegv( + lapack_int const* itype, char const* jobz, char const* uplo, + lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* B, lapack_int const* ldb, + double* W, + lapack_complex_double* work, lapack_int const* lwork, + double* rwork, + lapack_int* info ); + +#define LAPACK_chegv_2stage LAPACK_GLOBAL(chegv_2stage,CHEGV_2STAGE) +void LAPACK_chegv_2stage( + lapack_int const* itype, char const* jobz, char const* uplo, + lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* B, lapack_int const* ldb, + float* W, + lapack_complex_float* work, lapack_int const* lwork, + float* rwork, + lapack_int* info ); + +#define LAPACK_zhegv_2stage LAPACK_GLOBAL(zhegv_2stage,ZHEGV_2STAGE) +void LAPACK_zhegv_2stage( + lapack_int const* itype, char const* jobz, char const* uplo, + lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* B, lapack_int const* ldb, + double* W, + lapack_complex_double* work, lapack_int const* lwork, + double* rwork, + lapack_int* info ); + +#define LAPACK_chegvd LAPACK_GLOBAL(chegvd,CHEGVD) +void LAPACK_chegvd( + lapack_int const* itype, char const* jobz, char const* uplo, + lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* B, lapack_int const* ldb, + float* W, + lapack_complex_float* work, lapack_int const* lwork, + float* rwork, lapack_int const* lrwork, + lapack_int* iwork, lapack_int const* liwork, + lapack_int* info ); + +#define LAPACK_zhegvd LAPACK_GLOBAL(zhegvd,ZHEGVD) +void LAPACK_zhegvd( + lapack_int const* itype, char const* jobz, char const* uplo, + lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* B, lapack_int const* ldb, + double* W, + lapack_complex_double* work, lapack_int const* lwork, + double* rwork, lapack_int const* lrwork, + lapack_int* iwork, lapack_int const* liwork, + lapack_int* info ); + +#define LAPACK_chegvx LAPACK_GLOBAL(chegvx,CHEGVX) +void LAPACK_chegvx( + lapack_int const* itype, char const* jobz, char const* range, char const* uplo, + lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* B, lapack_int const* ldb, + float const* vl, + float const* vu, lapack_int const* il, lapack_int const* iu, + float const* abstol, lapack_int* m, + float* W, + lapack_complex_float* Z, lapack_int const* ldz, + lapack_complex_float* work, lapack_int const* lwork, + float* rwork, + lapack_int* iwork, lapack_int* IFAIL, + lapack_int* info ); + +#define LAPACK_zhegvx LAPACK_GLOBAL(zhegvx,ZHEGVX) +void LAPACK_zhegvx( + lapack_int const* itype, char const* jobz, char const* range, char const* uplo, + lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* B, lapack_int const* ldb, + double const* vl, + double const* vu, lapack_int const* il, lapack_int const* iu, + double const* abstol, lapack_int* m, + double* W, + lapack_complex_double* Z, lapack_int const* ldz, + lapack_complex_double* work, lapack_int const* lwork, + double* rwork, + lapack_int* iwork, lapack_int* IFAIL, + lapack_int* info ); + +#define LAPACK_cherfs LAPACK_GLOBAL(cherfs,CHERFS) +void LAPACK_cherfs( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float const* A, lapack_int const* lda, + lapack_complex_float const* AF, lapack_int const* ldaf, lapack_int const* ipiv, + lapack_complex_float const* B, lapack_int const* ldb, + lapack_complex_float* X, lapack_int const* ldx, + float* ferr, + float* berr, + lapack_complex_float* work, + float* rwork, + lapack_int* info ); + +#define LAPACK_zherfs LAPACK_GLOBAL(zherfs,ZHERFS) +void LAPACK_zherfs( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double const* A, lapack_int const* lda, + lapack_complex_double const* AF, lapack_int const* ldaf, lapack_int const* ipiv, + lapack_complex_double const* B, lapack_int const* ldb, + lapack_complex_double* X, lapack_int const* ldx, + double* ferr, + double* berr, + lapack_complex_double* work, + double* rwork, + lapack_int* info ); + +#define LAPACK_cherfsx LAPACK_GLOBAL(cherfsx,CHERFSX) +void LAPACK_cherfsx( + char const* uplo, char const* equed, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float const* A, lapack_int const* lda, + lapack_complex_float const* AF, lapack_int const* ldaf, lapack_int const* ipiv, + float* S, + lapack_complex_float const* B, lapack_int const* ldb, + lapack_complex_float* X, lapack_int const* ldx, + float* rcond, + float* berr, lapack_int const* n_err_bnds, + float* err_bnds_norm, + float* err_bnds_comp, lapack_int const* nparams, + float* params, + lapack_complex_float* work, + float* rwork, + lapack_int* info ); + +#define LAPACK_zherfsx LAPACK_GLOBAL(zherfsx,ZHERFSX) +void LAPACK_zherfsx( + char const* uplo, char const* equed, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double const* A, lapack_int const* lda, + lapack_complex_double const* AF, lapack_int const* ldaf, lapack_int const* ipiv, + double* S, + lapack_complex_double const* B, lapack_int const* ldb, + lapack_complex_double* X, lapack_int const* ldx, + double* rcond, + double* berr, lapack_int const* n_err_bnds, + double* err_bnds_norm, + double* err_bnds_comp, lapack_int const* nparams, + double* params, + lapack_complex_double* work, + double* rwork, + lapack_int* info ); + +#define LAPACK_chesv LAPACK_GLOBAL(chesv,CHESV) +void LAPACK_chesv( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float* A, lapack_int const* lda, lapack_int* ipiv, + lapack_complex_float* B, lapack_int const* ldb, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zhesv LAPACK_GLOBAL(zhesv,ZHESV) +void LAPACK_zhesv( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double* A, lapack_int const* lda, lapack_int* ipiv, + lapack_complex_double* B, lapack_int const* ldb, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_chesv_aa LAPACK_GLOBAL(chesv_aa,CHESV_AA) +void LAPACK_chesv_aa( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float* A, lapack_int const* lda, lapack_int* ipiv, + lapack_complex_float* B, lapack_int const* ldb, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zhesv_aa LAPACK_GLOBAL(zhesv_aa,ZHESV_AA) +void LAPACK_zhesv_aa( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double* A, lapack_int const* lda, lapack_int* ipiv, + lapack_complex_double* B, lapack_int const* ldb, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_chesv_aa_2stage LAPACK_GLOBAL(chesv_aa_2stage,CHESV_AA_2STAGE) +void LAPACK_chesv_aa_2stage( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* TB, lapack_int const* ltb, lapack_int* ipiv, lapack_int* ipiv2, + lapack_complex_float* B, lapack_int const* ldb, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zhesv_aa_2stage LAPACK_GLOBAL(zhesv_aa_2stage,ZHESV_AA_2STAGE) +void LAPACK_zhesv_aa_2stage( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* TB, lapack_int const* ltb, lapack_int* ipiv, lapack_int* ipiv2, + lapack_complex_double* B, lapack_int const* ldb, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_chesv_rk LAPACK_GLOBAL(chesv_rk,CHESV_RK) +void LAPACK_chesv_rk( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* E, lapack_int* ipiv, + lapack_complex_float* B, lapack_int const* ldb, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zhesv_rk LAPACK_GLOBAL(zhesv_rk,ZHESV_RK) +void LAPACK_zhesv_rk( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* E, lapack_int* ipiv, + lapack_complex_double* B, lapack_int const* ldb, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_chesv_rook LAPACK_GLOBAL(chesv_rook,CHESV_ROOK) +void LAPACK_chesv_rook( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float* A, lapack_int const* lda, lapack_int* ipiv, + lapack_complex_float* B, lapack_int const* ldb, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zhesv_rook LAPACK_GLOBAL(zhesv_rook,ZHESV_ROOK) +void LAPACK_zhesv_rook( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double* A, lapack_int const* lda, lapack_int* ipiv, + lapack_complex_double* B, lapack_int const* ldb, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_chesvx LAPACK_GLOBAL(chesvx,CHESVX) +void LAPACK_chesvx( + char const* fact, char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float const* A, lapack_int const* lda, + lapack_complex_float* AF, lapack_int const* ldaf, lapack_int* ipiv, + lapack_complex_float const* B, lapack_int const* ldb, + lapack_complex_float* X, lapack_int const* ldx, + float* rcond, + float* ferr, + float* berr, + lapack_complex_float* work, lapack_int const* lwork, + float* rwork, + lapack_int* info ); + +#define LAPACK_zhesvx LAPACK_GLOBAL(zhesvx,ZHESVX) +void LAPACK_zhesvx( + char const* fact, char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double const* A, lapack_int const* lda, + lapack_complex_double* AF, lapack_int const* ldaf, lapack_int* ipiv, + lapack_complex_double const* B, lapack_int const* ldb, + lapack_complex_double* X, lapack_int const* ldx, + double* rcond, + double* ferr, + double* berr, + lapack_complex_double* work, lapack_int const* lwork, + double* rwork, + lapack_int* info ); + +#define LAPACK_chesvxx LAPACK_GLOBAL(chesvxx,CHESVXX) +void LAPACK_chesvxx( + char const* fact, char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* AF, lapack_int const* ldaf, lapack_int* ipiv, char* equed, + float* S, + lapack_complex_float* B, + lapack_int const* ldb, + lapack_complex_float* X, lapack_int const* ldx, + float* rcond, + float* rpvgrw, + float* berr, lapack_int const* n_err_bnds, + float* err_bnds_norm, + float* err_bnds_comp, lapack_int const* nparams, + float* params, + lapack_complex_float* work, + float* rwork, + lapack_int* info ); + +#define LAPACK_zhesvxx LAPACK_GLOBAL(zhesvxx,ZHESVXX) +void LAPACK_zhesvxx( + char const* fact, char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* AF, lapack_int const* ldaf, lapack_int* ipiv, char* equed, + double* S, + lapack_complex_double* B, + lapack_int const* ldb, + lapack_complex_double* X, lapack_int const* ldx, + double* rcond, + double* rpvgrw, + double* berr, lapack_int const* n_err_bnds, + double* err_bnds_norm, + double* err_bnds_comp, lapack_int const* nparams, + double* params, + lapack_complex_double* work, + double* rwork, + lapack_int* info ); + +#define LAPACK_cheswapr LAPACK_GLOBAL(cheswapr,CHESWAPR) +void LAPACK_cheswapr( + char const* uplo, + lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, lapack_int const* i1, lapack_int const* i2 ); + +#define LAPACK_zheswapr LAPACK_GLOBAL(zheswapr,ZHESWAPR) +void LAPACK_zheswapr( + char const* uplo, + lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, lapack_int const* i1, lapack_int const* i2 ); + +#define LAPACK_chetrd LAPACK_GLOBAL(chetrd,CHETRD) +void LAPACK_chetrd( + char const* uplo, + lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, + float* D, + float* E, + lapack_complex_float* tau, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zhetrd LAPACK_GLOBAL(zhetrd,ZHETRD) +void LAPACK_zhetrd( + char const* uplo, + lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, + double* D, + double* E, + lapack_complex_double* tau, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_chetrd_2stage LAPACK_GLOBAL(chetrd_2stage,CHETRD_2STAGE) +void LAPACK_chetrd_2stage( + char const* vect, char const* uplo, + lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, + float* D, + float* E, + lapack_complex_float* tau, + lapack_complex_float* HOUS2, lapack_int const* lhous2, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zhetrd_2stage LAPACK_GLOBAL(zhetrd_2stage,ZHETRD_2STAGE) +void LAPACK_zhetrd_2stage( + char const* vect, char const* uplo, + lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, + double* D, + double* E, + lapack_complex_double* tau, + lapack_complex_double* HOUS2, lapack_int const* lhous2, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_chetrf LAPACK_GLOBAL(chetrf,CHETRF) +void LAPACK_chetrf( + char const* uplo, + lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, lapack_int* ipiv, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zhetrf LAPACK_GLOBAL(zhetrf,ZHETRF) +void LAPACK_zhetrf( + char const* uplo, + lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, lapack_int* ipiv, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_chetrf_aa LAPACK_GLOBAL(chetrf_aa,CHETRF_AA) +void LAPACK_chetrf_aa( + char const* uplo, + lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, lapack_int* ipiv, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zhetrf_aa LAPACK_GLOBAL(zhetrf_aa,ZHETRF_AA) +void LAPACK_zhetrf_aa( + char const* uplo, + lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, lapack_int* ipiv, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_chetrf_aa_2stage LAPACK_GLOBAL(chetrf_aa_2stage,CHETRF_AA_2STAGE) +void LAPACK_chetrf_aa_2stage( + char const* uplo, + lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* TB, lapack_int const* ltb, lapack_int* ipiv, lapack_int* ipiv2, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zhetrf_aa_2stage LAPACK_GLOBAL(zhetrf_aa_2stage,ZHETRF_AA_2STAGE) +void LAPACK_zhetrf_aa_2stage( + char const* uplo, + lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* TB, lapack_int const* ltb, lapack_int* ipiv, lapack_int* ipiv2, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_chetrf_rk LAPACK_GLOBAL(chetrf_rk,CHETRF_RK) +void LAPACK_chetrf_rk( + char const* uplo, + lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* E, lapack_int* ipiv, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zhetrf_rk LAPACK_GLOBAL(zhetrf_rk,ZHETRF_RK) +void LAPACK_zhetrf_rk( + char const* uplo, + lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* E, lapack_int* ipiv, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_chetrf_rook LAPACK_GLOBAL(chetrf_rook,CHETRF_ROOK) +void LAPACK_chetrf_rook( + char const* uplo, + lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, lapack_int* ipiv, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zhetrf_rook LAPACK_GLOBAL(zhetrf_rook,ZHETRF_ROOK) +void LAPACK_zhetrf_rook( + char const* uplo, + lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, lapack_int* ipiv, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_chetri LAPACK_GLOBAL(chetri,CHETRI) +void LAPACK_chetri( + char const* uplo, + lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, lapack_int const* ipiv, + lapack_complex_float* work, + lapack_int* info ); + +#define LAPACK_zhetri LAPACK_GLOBAL(zhetri,ZHETRI) +void LAPACK_zhetri( + char const* uplo, + lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, lapack_int const* ipiv, + lapack_complex_double* work, + lapack_int* info ); + +#define LAPACK_chetri2 LAPACK_GLOBAL(chetri2,CHETRI2) +void LAPACK_chetri2( + char const* uplo, + lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, lapack_int const* ipiv, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zhetri2 LAPACK_GLOBAL(zhetri2,ZHETRI2) +void LAPACK_zhetri2( + char const* uplo, + lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, lapack_int const* ipiv, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_chetri2x LAPACK_GLOBAL(chetri2x,CHETRI2X) +void LAPACK_chetri2x( + char const* uplo, + lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, lapack_int const* ipiv, + lapack_complex_float* work, lapack_int const* nb, + lapack_int* info ); + +#define LAPACK_zhetri2x LAPACK_GLOBAL(zhetri2x,ZHETRI2X) +void LAPACK_zhetri2x( + char const* uplo, + lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, lapack_int const* ipiv, + lapack_complex_double* work, lapack_int const* nb, + lapack_int* info ); + +#define LAPACK_chetri_3 LAPACK_GLOBAL(chetri_3,CHETRI_3) +void LAPACK_chetri_3( + char const* uplo, + lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float const* E, lapack_int const* ipiv, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zhetri_3 LAPACK_GLOBAL(zhetri_3,ZHETRI_3) +void LAPACK_zhetri_3( + char const* uplo, + lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double const* E, lapack_int const* ipiv, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_chetrs LAPACK_GLOBAL(chetrs,CHETRS) +void LAPACK_chetrs( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float const* A, lapack_int const* lda, lapack_int const* ipiv, + lapack_complex_float* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_zhetrs LAPACK_GLOBAL(zhetrs,ZHETRS) +void LAPACK_zhetrs( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double const* A, lapack_int const* lda, lapack_int const* ipiv, + lapack_complex_double* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_chetrs2 LAPACK_GLOBAL(chetrs2,CHETRS2) +void LAPACK_chetrs2( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float const* A, lapack_int const* lda, lapack_int const* ipiv, + lapack_complex_float* B, lapack_int const* ldb, + lapack_complex_float* work, + lapack_int* info ); + +#define LAPACK_zhetrs2 LAPACK_GLOBAL(zhetrs2,ZHETRS2) +void LAPACK_zhetrs2( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double const* A, lapack_int const* lda, lapack_int const* ipiv, + lapack_complex_double* B, lapack_int const* ldb, + lapack_complex_double* work, + lapack_int* info ); + +#define LAPACK_chetrs_3 LAPACK_GLOBAL(chetrs_3,CHETRS_3) +void LAPACK_chetrs_3( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float const* A, lapack_int const* lda, + lapack_complex_float const* E, lapack_int const* ipiv, + lapack_complex_float* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_zhetrs_3 LAPACK_GLOBAL(zhetrs_3,ZHETRS_3) +void LAPACK_zhetrs_3( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double const* A, lapack_int const* lda, + lapack_complex_double const* E, lapack_int const* ipiv, + lapack_complex_double* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_chetrs_aa LAPACK_GLOBAL(chetrs_aa,CHETRS_AA) +void LAPACK_chetrs_aa( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float const* A, lapack_int const* lda, lapack_int const* ipiv, + lapack_complex_float* B, lapack_int const* ldb, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zhetrs_aa LAPACK_GLOBAL(zhetrs_aa,ZHETRS_AA) +void LAPACK_zhetrs_aa( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double const* A, lapack_int const* lda, lapack_int const* ipiv, + lapack_complex_double* B, lapack_int const* ldb, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_chetrs_aa_2stage LAPACK_GLOBAL(chetrs_aa_2stage,CHETRS_AA_2STAGE) +void LAPACK_chetrs_aa_2stage( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float const* A, lapack_int const* lda, + lapack_complex_float* TB, lapack_int const* ltb, lapack_int const* ipiv, lapack_int const* ipiv2, + lapack_complex_float* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_zhetrs_aa_2stage LAPACK_GLOBAL(zhetrs_aa_2stage,ZHETRS_AA_2STAGE) +void LAPACK_zhetrs_aa_2stage( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double const* A, lapack_int const* lda, + lapack_complex_double* TB, lapack_int const* ltb, lapack_int const* ipiv, lapack_int const* ipiv2, + lapack_complex_double* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_chetrs_rook LAPACK_GLOBAL(chetrs_rook,CHETRS_ROOK) +void LAPACK_chetrs_rook( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float const* A, lapack_int const* lda, lapack_int const* ipiv, + lapack_complex_float* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_zhetrs_rook LAPACK_GLOBAL(zhetrs_rook,ZHETRS_ROOK) +void LAPACK_zhetrs_rook( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double const* A, lapack_int const* lda, lapack_int const* ipiv, + lapack_complex_double* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_chfrk LAPACK_GLOBAL(chfrk,CHFRK) +void LAPACK_chfrk( + char const* transr, char const* uplo, char const* trans, + lapack_int const* n, lapack_int const* k, + float const* alpha, + lapack_complex_float const* A, lapack_int const* lda, + float const* beta, + lapack_complex_float* C ); + +#define LAPACK_zhfrk LAPACK_GLOBAL(zhfrk,ZHFRK) +void LAPACK_zhfrk( + char const* transr, char const* uplo, char const* trans, + lapack_int const* n, lapack_int const* k, + double const* alpha, + lapack_complex_double const* A, lapack_int const* lda, + double const* beta, + lapack_complex_double* C ); + +#define LAPACK_chgeqz LAPACK_GLOBAL(chgeqz,CHGEQZ) +void LAPACK_chgeqz( + char const* job, char const* compq, char const* compz, + lapack_int const* n, lapack_int const* ilo, lapack_int const* ihi, + lapack_complex_float* H, lapack_int const* ldh, + lapack_complex_float* T, lapack_int const* ldt, + lapack_complex_float* alpha, + lapack_complex_float* beta, + lapack_complex_float* Q, lapack_int const* ldq, + lapack_complex_float* Z, lapack_int const* ldz, + lapack_complex_float* work, lapack_int const* lwork, + float* rwork, + lapack_int* info ); + +#define LAPACK_dhgeqz LAPACK_GLOBAL(dhgeqz,DHGEQZ) +void LAPACK_dhgeqz( + char const* job, char const* compq, char const* compz, + lapack_int const* n, lapack_int const* ilo, lapack_int const* ihi, + double* H, lapack_int const* ldh, + double* T, lapack_int const* ldt, + double* alphar, + double* alphai, + double* beta, + double* Q, lapack_int const* ldq, + double* Z, lapack_int const* ldz, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_shgeqz LAPACK_GLOBAL(shgeqz,SHGEQZ) +void LAPACK_shgeqz( + char const* job, char const* compq, char const* compz, + lapack_int const* n, lapack_int const* ilo, lapack_int const* ihi, + float* H, lapack_int const* ldh, + float* T, lapack_int const* ldt, + float* alphar, + float* alphai, + float* beta, + float* Q, lapack_int const* ldq, + float* Z, lapack_int const* ldz, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zhgeqz LAPACK_GLOBAL(zhgeqz,ZHGEQZ) +void LAPACK_zhgeqz( + char const* job, char const* compq, char const* compz, + lapack_int const* n, lapack_int const* ilo, lapack_int const* ihi, + lapack_complex_double* H, lapack_int const* ldh, + lapack_complex_double* T, lapack_int const* ldt, + lapack_complex_double* alpha, + lapack_complex_double* beta, + lapack_complex_double* Q, lapack_int const* ldq, + lapack_complex_double* Z, lapack_int const* ldz, + lapack_complex_double* work, lapack_int const* lwork, + double* rwork, + lapack_int* info ); + +#define LAPACK_chpcon LAPACK_GLOBAL(chpcon,CHPCON) +void LAPACK_chpcon( + char const* uplo, + lapack_int const* n, + lapack_complex_float const* AP, lapack_int const* ipiv, + float const* anorm, + float* rcond, + lapack_complex_float* work, + lapack_int* info ); + +#define LAPACK_zhpcon LAPACK_GLOBAL(zhpcon,ZHPCON) +void LAPACK_zhpcon( + char const* uplo, + lapack_int const* n, + lapack_complex_double const* AP, lapack_int const* ipiv, + double const* anorm, + double* rcond, + lapack_complex_double* work, + lapack_int* info ); + +#define LAPACK_chpev LAPACK_GLOBAL(chpev,CHPEV) +void LAPACK_chpev( + char const* jobz, char const* uplo, + lapack_int const* n, + lapack_complex_float* AP, + float* W, + lapack_complex_float* Z, lapack_int const* ldz, + lapack_complex_float* work, + float* rwork, + lapack_int* info ); + +#define LAPACK_zhpev LAPACK_GLOBAL(zhpev,ZHPEV) +void LAPACK_zhpev( + char const* jobz, char const* uplo, + lapack_int const* n, + lapack_complex_double* AP, + double* W, + lapack_complex_double* Z, lapack_int const* ldz, + lapack_complex_double* work, + double* rwork, + lapack_int* info ); + +#define LAPACK_chpevd LAPACK_GLOBAL(chpevd,CHPEVD) +void LAPACK_chpevd( + char const* jobz, char const* uplo, + lapack_int const* n, + lapack_complex_float* AP, + float* W, + lapack_complex_float* Z, lapack_int const* ldz, + lapack_complex_float* work, lapack_int const* lwork, + float* rwork, lapack_int const* lrwork, + lapack_int* iwork, lapack_int const* liwork, + lapack_int* info ); + +#define LAPACK_zhpevd LAPACK_GLOBAL(zhpevd,ZHPEVD) +void LAPACK_zhpevd( + char const* jobz, char const* uplo, + lapack_int const* n, + lapack_complex_double* AP, + double* W, + lapack_complex_double* Z, lapack_int const* ldz, + lapack_complex_double* work, lapack_int const* lwork, + double* rwork, lapack_int const* lrwork, + lapack_int* iwork, lapack_int const* liwork, + lapack_int* info ); + +#define LAPACK_chpevx LAPACK_GLOBAL(chpevx,CHPEVX) +void LAPACK_chpevx( + char const* jobz, char const* range, char const* uplo, + lapack_int const* n, + lapack_complex_float* AP, + float const* vl, + float const* vu, lapack_int const* il, lapack_int const* iu, + float const* abstol, lapack_int* m, + float* W, + lapack_complex_float* Z, lapack_int const* ldz, + lapack_complex_float* work, + float* rwork, + lapack_int* iwork, lapack_int* IFAIL, + lapack_int* info ); + +#define LAPACK_zhpevx LAPACK_GLOBAL(zhpevx,ZHPEVX) +void LAPACK_zhpevx( + char const* jobz, char const* range, char const* uplo, + lapack_int const* n, + lapack_complex_double* AP, + double const* vl, + double const* vu, lapack_int const* il, lapack_int const* iu, + double const* abstol, lapack_int* m, + double* W, + lapack_complex_double* Z, lapack_int const* ldz, + lapack_complex_double* work, + double* rwork, + lapack_int* iwork, lapack_int* IFAIL, + lapack_int* info ); + +#define LAPACK_chpgst LAPACK_GLOBAL(chpgst,CHPGST) +void LAPACK_chpgst( + lapack_int const* itype, char const* uplo, + lapack_int const* n, + lapack_complex_float* AP, + lapack_complex_float const* BP, + lapack_int* info ); + +#define LAPACK_zhpgst LAPACK_GLOBAL(zhpgst,ZHPGST) +void LAPACK_zhpgst( + lapack_int const* itype, char const* uplo, + lapack_int const* n, + lapack_complex_double* AP, + lapack_complex_double const* BP, + lapack_int* info ); + +#define LAPACK_chpgv LAPACK_GLOBAL(chpgv,CHPGV) +void LAPACK_chpgv( + lapack_int const* itype, char const* jobz, char const* uplo, + lapack_int const* n, + lapack_complex_float* AP, + lapack_complex_float* BP, + float* W, + lapack_complex_float* Z, lapack_int const* ldz, + lapack_complex_float* work, + float* rwork, + lapack_int* info ); + +#define LAPACK_zhpgv LAPACK_GLOBAL(zhpgv,ZHPGV) +void LAPACK_zhpgv( + lapack_int const* itype, char const* jobz, char const* uplo, + lapack_int const* n, + lapack_complex_double* AP, + lapack_complex_double* BP, + double* W, + lapack_complex_double* Z, lapack_int const* ldz, + lapack_complex_double* work, + double* rwork, + lapack_int* info ); + +#define LAPACK_chpgvd LAPACK_GLOBAL(chpgvd,CHPGVD) +void LAPACK_chpgvd( + lapack_int const* itype, char const* jobz, char const* uplo, + lapack_int const* n, + lapack_complex_float* AP, + lapack_complex_float* BP, + float* W, + lapack_complex_float* Z, lapack_int const* ldz, + lapack_complex_float* work, lapack_int const* lwork, + float* rwork, lapack_int const* lrwork, + lapack_int* iwork, lapack_int const* liwork, + lapack_int* info ); + +#define LAPACK_zhpgvd LAPACK_GLOBAL(zhpgvd,ZHPGVD) +void LAPACK_zhpgvd( + lapack_int const* itype, char const* jobz, char const* uplo, + lapack_int const* n, + lapack_complex_double* AP, + lapack_complex_double* BP, + double* W, + lapack_complex_double* Z, lapack_int const* ldz, + lapack_complex_double* work, lapack_int const* lwork, + double* rwork, lapack_int const* lrwork, + lapack_int* iwork, lapack_int const* liwork, + lapack_int* info ); + +#define LAPACK_chpgvx LAPACK_GLOBAL(chpgvx,CHPGVX) +void LAPACK_chpgvx( + lapack_int const* itype, char const* jobz, char const* range, char const* uplo, + lapack_int const* n, + lapack_complex_float* AP, + lapack_complex_float* BP, + float const* vl, + float const* vu, lapack_int const* il, lapack_int const* iu, + float const* abstol, lapack_int* m, + float* W, + lapack_complex_float* Z, lapack_int const* ldz, + lapack_complex_float* work, + float* rwork, + lapack_int* iwork, lapack_int* IFAIL, + lapack_int* info ); + +#define LAPACK_zhpgvx LAPACK_GLOBAL(zhpgvx,ZHPGVX) +void LAPACK_zhpgvx( + lapack_int const* itype, char const* jobz, char const* range, char const* uplo, + lapack_int const* n, + lapack_complex_double* AP, + lapack_complex_double* BP, + double const* vl, + double const* vu, lapack_int const* il, lapack_int const* iu, + double const* abstol, lapack_int* m, + double* W, + lapack_complex_double* Z, lapack_int const* ldz, + lapack_complex_double* work, + double* rwork, + lapack_int* iwork, lapack_int* IFAIL, + lapack_int* info ); + +#define LAPACK_chprfs LAPACK_GLOBAL(chprfs,CHPRFS) +void LAPACK_chprfs( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float const* AP, + lapack_complex_float const* AFP, lapack_int const* ipiv, + lapack_complex_float const* B, lapack_int const* ldb, + lapack_complex_float* X, lapack_int const* ldx, + float* ferr, + float* berr, + lapack_complex_float* work, + float* rwork, + lapack_int* info ); + +#define LAPACK_zhprfs LAPACK_GLOBAL(zhprfs,ZHPRFS) +void LAPACK_zhprfs( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double const* AP, + lapack_complex_double const* AFP, lapack_int const* ipiv, + lapack_complex_double const* B, lapack_int const* ldb, + lapack_complex_double* X, lapack_int const* ldx, + double* ferr, + double* berr, + lapack_complex_double* work, + double* rwork, + lapack_int* info ); + +#define LAPACK_chpsv LAPACK_GLOBAL(chpsv,CHPSV) +void LAPACK_chpsv( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float* AP, lapack_int* ipiv, + lapack_complex_float* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_zhpsv LAPACK_GLOBAL(zhpsv,ZHPSV) +void LAPACK_zhpsv( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double* AP, lapack_int* ipiv, + lapack_complex_double* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_chpsvx LAPACK_GLOBAL(chpsvx,CHPSVX) +void LAPACK_chpsvx( + char const* fact, char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float const* AP, + lapack_complex_float* AFP, lapack_int* ipiv, + lapack_complex_float const* B, lapack_int const* ldb, + lapack_complex_float* X, lapack_int const* ldx, + float* rcond, + float* ferr, + float* berr, + lapack_complex_float* work, + float* rwork, + lapack_int* info ); + +#define LAPACK_zhpsvx LAPACK_GLOBAL(zhpsvx,ZHPSVX) +void LAPACK_zhpsvx( + char const* fact, char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double const* AP, + lapack_complex_double* AFP, lapack_int* ipiv, + lapack_complex_double const* B, lapack_int const* ldb, + lapack_complex_double* X, lapack_int const* ldx, + double* rcond, + double* ferr, + double* berr, + lapack_complex_double* work, + double* rwork, + lapack_int* info ); + +#define LAPACK_chptrd LAPACK_GLOBAL(chptrd,CHPTRD) +void LAPACK_chptrd( + char const* uplo, + lapack_int const* n, + lapack_complex_float* AP, + float* D, + float* E, + lapack_complex_float* tau, + lapack_int* info ); + +#define LAPACK_zhptrd LAPACK_GLOBAL(zhptrd,ZHPTRD) +void LAPACK_zhptrd( + char const* uplo, + lapack_int const* n, + lapack_complex_double* AP, + double* D, + double* E, + lapack_complex_double* tau, + lapack_int* info ); + +#define LAPACK_chptrf LAPACK_GLOBAL(chptrf,CHPTRF) +void LAPACK_chptrf( + char const* uplo, + lapack_int const* n, + lapack_complex_float* AP, lapack_int* ipiv, + lapack_int* info ); + +#define LAPACK_zhptrf LAPACK_GLOBAL(zhptrf,ZHPTRF) +void LAPACK_zhptrf( + char const* uplo, + lapack_int const* n, + lapack_complex_double* AP, lapack_int* ipiv, + lapack_int* info ); + +#define LAPACK_chptri LAPACK_GLOBAL(chptri,CHPTRI) +void LAPACK_chptri( + char const* uplo, + lapack_int const* n, + lapack_complex_float* AP, lapack_int const* ipiv, + lapack_complex_float* work, + lapack_int* info ); + +#define LAPACK_zhptri LAPACK_GLOBAL(zhptri,ZHPTRI) +void LAPACK_zhptri( + char const* uplo, + lapack_int const* n, + lapack_complex_double* AP, lapack_int const* ipiv, + lapack_complex_double* work, + lapack_int* info ); + +#define LAPACK_chptrs LAPACK_GLOBAL(chptrs,CHPTRS) +void LAPACK_chptrs( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float const* AP, lapack_int const* ipiv, + lapack_complex_float* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_zhptrs LAPACK_GLOBAL(zhptrs,ZHPTRS) +void LAPACK_zhptrs( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double const* AP, lapack_int const* ipiv, + lapack_complex_double* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_chsein LAPACK_GLOBAL(chsein,CHSEIN) +void LAPACK_chsein( + char const* side, char const* eigsrc, char const* initv, + lapack_logical const* select, + lapack_int const* n, + lapack_complex_float const* H, lapack_int const* ldh, + lapack_complex_float* W, + lapack_complex_float* VL, lapack_int const* ldvl, + lapack_complex_float* VR, lapack_int const* ldvr, lapack_int const* mm, lapack_int* m, + lapack_complex_float* work, + float* rwork, lapack_int* IFAILL, lapack_int* IFAILR, + lapack_int* info ); + +#define LAPACK_dhsein LAPACK_GLOBAL(dhsein,DHSEIN) +void LAPACK_dhsein( + char const* side, char const* eigsrc, char const* initv, + lapack_logical* select, + lapack_int const* n, + double const* H, lapack_int const* ldh, + double* WR, + double const* WI, + double* VL, lapack_int const* ldvl, + double* VR, lapack_int const* ldvr, lapack_int const* mm, lapack_int* m, + double* work, lapack_int* IFAILL, lapack_int* IFAILR, + lapack_int* info ); + +#define LAPACK_shsein LAPACK_GLOBAL(shsein,SHSEIN) +void LAPACK_shsein( + char const* side, char const* eigsrc, char const* initv, + lapack_logical* select, + lapack_int const* n, + float const* H, lapack_int const* ldh, + float* WR, + float const* WI, + float* VL, lapack_int const* ldvl, + float* VR, lapack_int const* ldvr, lapack_int const* mm, lapack_int* m, + float* work, lapack_int* IFAILL, lapack_int* IFAILR, + lapack_int* info ); + +#define LAPACK_zhsein LAPACK_GLOBAL(zhsein,ZHSEIN) +void LAPACK_zhsein( + char const* side, char const* eigsrc, char const* initv, + lapack_logical const* select, + lapack_int const* n, + lapack_complex_double const* H, lapack_int const* ldh, + lapack_complex_double* W, + lapack_complex_double* VL, lapack_int const* ldvl, + lapack_complex_double* VR, lapack_int const* ldvr, lapack_int const* mm, lapack_int* m, + lapack_complex_double* work, + double* rwork, lapack_int* IFAILL, lapack_int* IFAILR, + lapack_int* info ); + +#define LAPACK_chseqr LAPACK_GLOBAL(chseqr,CHSEQR) +void LAPACK_chseqr( + char const* job, char const* compz, + lapack_int const* n, lapack_int const* ilo, lapack_int const* ihi, + lapack_complex_float* H, lapack_int const* ldh, + lapack_complex_float* W, + lapack_complex_float* Z, lapack_int const* ldz, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_dhseqr LAPACK_GLOBAL(dhseqr,DHSEQR) +void LAPACK_dhseqr( + char const* job, char const* compz, + lapack_int const* n, lapack_int const* ilo, lapack_int const* ihi, + double* H, lapack_int const* ldh, + double* WR, + double* WI, + double* Z, lapack_int const* ldz, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_shseqr LAPACK_GLOBAL(shseqr,SHSEQR) +void LAPACK_shseqr( + char const* job, char const* compz, + lapack_int const* n, lapack_int const* ilo, lapack_int const* ihi, + float* H, lapack_int const* ldh, + float* WR, + float* WI, + float* Z, lapack_int const* ldz, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zhseqr LAPACK_GLOBAL(zhseqr,ZHSEQR) +void LAPACK_zhseqr( + char const* job, char const* compz, + lapack_int const* n, lapack_int const* ilo, lapack_int const* ihi, + lapack_complex_double* H, lapack_int const* ldh, + lapack_complex_double* W, + lapack_complex_double* Z, lapack_int const* ldz, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_clacgv LAPACK_GLOBAL(clacgv,CLACGV) +void LAPACK_clacgv( + lapack_int const* n, + lapack_complex_float* X, lapack_int const* incx ); + +#define LAPACK_zlacgv LAPACK_GLOBAL(zlacgv,ZLACGV) +void LAPACK_zlacgv( + lapack_int const* n, + lapack_complex_double* X, lapack_int const* incx ); + +#define LAPACK_clacn2 LAPACK_GLOBAL(clacn2,CLACN2) +void LAPACK_clacn2( + lapack_int const* n, + lapack_complex_float* V, + lapack_complex_float* X, + float* est, lapack_int* kase, lapack_int* ISAVE ); + +#define LAPACK_dlacn2 LAPACK_GLOBAL(dlacn2,DLACN2) +void LAPACK_dlacn2( + lapack_int const* n, + double* V, + double* X, lapack_int* ISGN, + double* est, lapack_int* kase, lapack_int* ISAVE ); + +#define LAPACK_slacn2 LAPACK_GLOBAL(slacn2,SLACN2) +void LAPACK_slacn2( + lapack_int const* n, + float* V, + float* X, lapack_int* ISGN, + float* est, lapack_int* kase, lapack_int* ISAVE ); + +#define LAPACK_zlacn2 LAPACK_GLOBAL(zlacn2,ZLACN2) +void LAPACK_zlacn2( + lapack_int const* n, + lapack_complex_double* V, + lapack_complex_double* X, + double* est, lapack_int* kase, lapack_int* ISAVE ); + +#define LAPACK_clacp2 LAPACK_GLOBAL(clacp2,CLACP2) +void LAPACK_clacp2( + char const* uplo, + lapack_int const* m, lapack_int const* n, + float const* A, lapack_int const* lda, + lapack_complex_float* B, lapack_int const* ldb ); + +#define LAPACK_zlacp2 LAPACK_GLOBAL(zlacp2,ZLACP2) +void LAPACK_zlacp2( + char const* uplo, + lapack_int const* m, lapack_int const* n, + double const* A, lapack_int const* lda, + lapack_complex_double* B, lapack_int const* ldb ); + +#define LAPACK_clacpy LAPACK_GLOBAL(clacpy,CLACPY) +void LAPACK_clacpy( + char const* uplo, + lapack_int const* m, lapack_int const* n, + lapack_complex_float const* A, lapack_int const* lda, + lapack_complex_float* B, lapack_int const* ldb ); + +#define LAPACK_dlacpy LAPACK_GLOBAL(dlacpy,DLACPY) +void LAPACK_dlacpy( + char const* uplo, + lapack_int const* m, lapack_int const* n, + double const* A, lapack_int const* lda, + double* B, lapack_int const* ldb ); + +#define LAPACK_slacpy LAPACK_GLOBAL(slacpy,SLACPY) +void LAPACK_slacpy( + char const* uplo, + lapack_int const* m, lapack_int const* n, + float const* A, lapack_int const* lda, + float* B, lapack_int const* ldb ); + +#define LAPACK_zlacpy LAPACK_GLOBAL(zlacpy,ZLACPY) +void LAPACK_zlacpy( + char const* uplo, + lapack_int const* m, lapack_int const* n, + lapack_complex_double const* A, lapack_int const* lda, + lapack_complex_double* B, lapack_int const* ldb ); + +#define LAPACK_clacrm LAPACK_GLOBAL(clacrm,CLACRM) +void LAPACK_clacrm( + lapack_int const* m, lapack_int const* n, + lapack_complex_float const* A, lapack_int const* lda, + float const* B, lapack_int const* ldb, + lapack_complex_float* C, lapack_int const* ldc, + float* rwork ); + +#define LAPACK_zlacrm LAPACK_GLOBAL(zlacrm,ZLACRM) +void LAPACK_zlacrm( + lapack_int const* m, lapack_int const* n, + lapack_complex_double const* A, lapack_int const* lda, + double const* B, lapack_int const* ldb, + lapack_complex_double* C, lapack_int const* ldc, + double* rwork ); + +#define LAPACK_zlag2c LAPACK_GLOBAL(zlag2c,ZLAG2C) +void LAPACK_zlag2c( + lapack_int const* m, lapack_int const* n, + lapack_complex_double const* A, lapack_int const* lda, + lapack_complex_float* SA, lapack_int const* ldsa, + lapack_int* info ); + +#define LAPACK_slag2d LAPACK_GLOBAL(slag2d,SLAG2D) +void LAPACK_slag2d( + lapack_int const* m, lapack_int const* n, + float const* SA, lapack_int const* ldsa, + double* A, lapack_int const* lda, + lapack_int* info ); + +#define LAPACK_dlag2s LAPACK_GLOBAL(dlag2s,DLAG2S) +void LAPACK_dlag2s( + lapack_int const* m, lapack_int const* n, + double const* A, lapack_int const* lda, + float* SA, lapack_int const* ldsa, + lapack_int* info ); + +#define LAPACK_clag2z LAPACK_GLOBAL(clag2z,CLAG2Z) +void LAPACK_clag2z( + lapack_int const* m, lapack_int const* n, + lapack_complex_float const* SA, lapack_int const* ldsa, + lapack_complex_double* A, lapack_int const* lda, + lapack_int* info ); + +#define LAPACK_clagge LAPACK_GLOBAL(clagge,CLAGGE) +void LAPACK_clagge( + lapack_int const* m, lapack_int const* n, lapack_int const* kl, lapack_int const* ku, + float const* D, + lapack_complex_float* A, lapack_int const* lda, lapack_int* iseed, + lapack_complex_float* work, + lapack_int* info ); + +#define LAPACK_dlagge LAPACK_GLOBAL(dlagge,DLAGGE) +void LAPACK_dlagge( + lapack_int const* m, lapack_int const* n, lapack_int const* kl, lapack_int const* ku, + double const* D, + double* A, lapack_int const* lda, lapack_int* iseed, + double* work, + lapack_int* info ); + +#define LAPACK_slagge LAPACK_GLOBAL(slagge,SLAGGE) +void LAPACK_slagge( + lapack_int const* m, lapack_int const* n, lapack_int const* kl, lapack_int const* ku, + float const* D, + float* A, lapack_int const* lda, lapack_int* iseed, + float* work, + lapack_int* info ); + +#define LAPACK_zlagge LAPACK_GLOBAL(zlagge,ZLAGGE) +void LAPACK_zlagge( + lapack_int const* m, lapack_int const* n, lapack_int const* kl, lapack_int const* ku, + double const* D, + lapack_complex_double* A, lapack_int const* lda, lapack_int* iseed, + lapack_complex_double* work, + lapack_int* info ); + +#define LAPACK_claghe LAPACK_GLOBAL(claghe,CLAGHE) +void LAPACK_claghe( + lapack_int const* n, lapack_int const* k, + float const* D, + lapack_complex_float* A, lapack_int const* lda, lapack_int* iseed, + lapack_complex_float* work, + lapack_int* info ); + +#define LAPACK_zlaghe LAPACK_GLOBAL(zlaghe,ZLAGHE) +void LAPACK_zlaghe( + lapack_int const* n, lapack_int const* k, + double const* D, + lapack_complex_double* A, lapack_int const* lda, lapack_int* iseed, + lapack_complex_double* work, + lapack_int* info ); + +#define LAPACK_clagsy LAPACK_GLOBAL(clagsy,CLAGSY) +void LAPACK_clagsy( + lapack_int const* n, lapack_int const* k, + float const* D, + lapack_complex_float* A, lapack_int const* lda, lapack_int* iseed, + lapack_complex_float* work, + lapack_int* info ); + +#define LAPACK_dlagsy LAPACK_GLOBAL(dlagsy,DLAGSY) +void LAPACK_dlagsy( + lapack_int const* n, lapack_int const* k, + double const* D, + double* A, lapack_int const* lda, lapack_int* iseed, + double* work, + lapack_int* info ); + +#define LAPACK_slagsy LAPACK_GLOBAL(slagsy,SLAGSY) +void LAPACK_slagsy( + lapack_int const* n, lapack_int const* k, + float const* D, + float* A, lapack_int const* lda, lapack_int* iseed, + float* work, + lapack_int* info ); + +#define LAPACK_zlagsy LAPACK_GLOBAL(zlagsy,ZLAGSY) +void LAPACK_zlagsy( + lapack_int const* n, lapack_int const* k, + double const* D, + lapack_complex_double* A, lapack_int const* lda, lapack_int* iseed, + lapack_complex_double* work, + lapack_int* info ); + +#define LAPACK_dlamch LAPACK_GLOBAL(dlamch,DLAMCH) +double LAPACK_dlamch( + char const* cmach ); + +#define LAPACK_slamch LAPACK_GLOBAL(slamch,SLAMCH) +lapack_float_return LAPACK_slamch( + char const* cmach ); + +#define LAPACK_clangb LAPACK_GLOBAL(clangb,CLANGB) +lapack_float_return LAPACK_clangb( + char const* norm, + lapack_int const* n, lapack_int const* kl, lapack_int const* ku, + lapack_complex_float const* AB, lapack_int const* ldab, + float* work ); + +#define LAPACK_dlangb LAPACK_GLOBAL(dlangb,DLANGB) +double LAPACK_dlangb( + char const* norm, + lapack_int const* n, lapack_int const* kl, lapack_int const* ku, + double const* AB, lapack_int const* ldab, + double* work ); + +#define LAPACK_slangb LAPACK_GLOBAL(slangb,SLANGB) +lapack_float_return LAPACK_slangb( + char const* norm, + lapack_int const* n, lapack_int const* kl, lapack_int const* ku, + float const* AB, lapack_int const* ldab, + float* work ); + +#define LAPACK_zlangb LAPACK_GLOBAL(zlangb,ZLANGB) +double LAPACK_zlangb( + char const* norm, + lapack_int const* n, lapack_int const* kl, lapack_int const* ku, + lapack_complex_double const* AB, lapack_int const* ldab, + double* work ); + +#define LAPACK_clange LAPACK_GLOBAL(clange,CLANGE) +lapack_float_return LAPACK_clange( + char const* norm, + lapack_int const* m, lapack_int const* n, + lapack_complex_float const* A, lapack_int const* lda, + float* work ); + +#define LAPACK_dlange LAPACK_GLOBAL(dlange,DLANGE) +double LAPACK_dlange( + char const* norm, + lapack_int const* m, lapack_int const* n, + double const* A, lapack_int const* lda, + double* work ); + +#define LAPACK_slange LAPACK_GLOBAL(slange,SLANGE) +lapack_float_return LAPACK_slange( + char const* norm, + lapack_int const* m, lapack_int const* n, + float const* A, lapack_int const* lda, + float* work ); + +#define LAPACK_zlange LAPACK_GLOBAL(zlange,ZLANGE) +double LAPACK_zlange( + char const* norm, + lapack_int const* m, lapack_int const* n, + lapack_complex_double const* A, lapack_int const* lda, + double* work ); + +#define LAPACK_clangt LAPACK_GLOBAL(clangt,CLANGT) +lapack_float_return LAPACK_clangt( + char const* norm, + lapack_int const* n, + lapack_complex_float const* DL, + lapack_complex_float const* D, + lapack_complex_float const* DU ); + +#define LAPACK_dlangt LAPACK_GLOBAL(dlangt,DLANGT) +double LAPACK_dlangt( + char const* norm, + lapack_int const* n, + double const* DL, + double const* D, + double const* DU ); + +#define LAPACK_slangt LAPACK_GLOBAL(slangt,SLANGT) +lapack_float_return LAPACK_slangt( + char const* norm, + lapack_int const* n, + float const* DL, + float const* D, + float const* DU ); + +#define LAPACK_zlangt LAPACK_GLOBAL(zlangt,ZLANGT) +double LAPACK_zlangt( + char const* norm, + lapack_int const* n, + lapack_complex_double const* DL, + lapack_complex_double const* D, + lapack_complex_double const* DU ); + +#define LAPACK_clanhb LAPACK_GLOBAL(clanhb,CLANHB) +lapack_float_return LAPACK_clanhb( + char const* norm, char const* uplo, + lapack_int const* n, lapack_int const* k, + lapack_complex_float const* AB, lapack_int const* ldab, + float* work ); + +#define LAPACK_zlanhb LAPACK_GLOBAL(zlanhb,ZLANHB) +double LAPACK_zlanhb( + char const* norm, char const* uplo, + lapack_int const* n, lapack_int const* k, + lapack_complex_double const* AB, lapack_int const* ldab, + double* work ); + +#define LAPACK_clanhe LAPACK_GLOBAL(clanhe,CLANHE) +lapack_float_return LAPACK_clanhe( + char const* norm, char const* uplo, + lapack_int const* n, + lapack_complex_float const* A, lapack_int const* lda, + float* work ); + +#define LAPACK_zlanhe LAPACK_GLOBAL(zlanhe,ZLANHE) +double LAPACK_zlanhe( + char const* norm, char const* uplo, + lapack_int const* n, + lapack_complex_double const* A, lapack_int const* lda, + double* work ); + +#define LAPACK_clanhp LAPACK_GLOBAL(clanhp,CLANHP) +lapack_float_return LAPACK_clanhp( + char const* norm, char const* uplo, + lapack_int const* n, + lapack_complex_float const* AP, + float* work ); + +#define LAPACK_zlanhp LAPACK_GLOBAL(zlanhp,ZLANHP) +double LAPACK_zlanhp( + char const* norm, char const* uplo, + lapack_int const* n, + lapack_complex_double const* AP, + double* work ); + +#define LAPACK_clanhs LAPACK_GLOBAL(clanhs,CLANHS) +lapack_float_return LAPACK_clanhs( + char const* norm, + lapack_int const* n, + lapack_complex_float const* A, lapack_int const* lda, + float* work ); + +#define LAPACK_dlanhs LAPACK_GLOBAL(dlanhs,DLANHS) +double LAPACK_dlanhs( + char const* norm, + lapack_int const* n, + double const* A, lapack_int const* lda, + double* work ); + +#define LAPACK_slanhs LAPACK_GLOBAL(slanhs,SLANHS) +lapack_float_return LAPACK_slanhs( + char const* norm, + lapack_int const* n, + float const* A, lapack_int const* lda, + float* work ); + +#define LAPACK_zlanhs LAPACK_GLOBAL(zlanhs,ZLANHS) +double LAPACK_zlanhs( + char const* norm, + lapack_int const* n, + lapack_complex_double const* A, lapack_int const* lda, + double* work ); + +#define LAPACK_clanht LAPACK_GLOBAL(clanht,CLANHT) +lapack_float_return LAPACK_clanht( + char const* norm, + lapack_int const* n, + float const* D, + lapack_complex_float const* E ); + +#define LAPACK_zlanht LAPACK_GLOBAL(zlanht,ZLANHT) +double LAPACK_zlanht( + char const* norm, + lapack_int const* n, + double const* D, + lapack_complex_double const* E ); + +#define LAPACK_clansb LAPACK_GLOBAL(clansb,CLANSB) +lapack_float_return LAPACK_clansb( + char const* norm, char const* uplo, + lapack_int const* n, lapack_int const* k, + lapack_complex_float const* AB, lapack_int const* ldab, + float* work ); + +#define LAPACK_dlansb LAPACK_GLOBAL(dlansb,DLANSB) +double LAPACK_dlansb( + char const* norm, char const* uplo, + lapack_int const* n, lapack_int const* k, + double const* AB, lapack_int const* ldab, + double* work ); + +#define LAPACK_slansb LAPACK_GLOBAL(slansb,SLANSB) +lapack_float_return LAPACK_slansb( + char const* norm, char const* uplo, + lapack_int const* n, lapack_int const* k, + float const* AB, lapack_int const* ldab, + float* work ); + +#define LAPACK_zlansb LAPACK_GLOBAL(zlansb,ZLANSB) +double LAPACK_zlansb( + char const* norm, char const* uplo, + lapack_int const* n, lapack_int const* k, + lapack_complex_double const* AB, lapack_int const* ldab, + double* work ); + +#define LAPACK_clansp LAPACK_GLOBAL(clansp,CLANSP) +lapack_float_return LAPACK_clansp( + char const* norm, char const* uplo, + lapack_int const* n, + lapack_complex_float const* AP, + float* work ); + +#define LAPACK_dlansp LAPACK_GLOBAL(dlansp,DLANSP) +double LAPACK_dlansp( + char const* norm, char const* uplo, + lapack_int const* n, + double const* AP, + double* work ); + +#define LAPACK_slansp LAPACK_GLOBAL(slansp,SLANSP) +lapack_float_return LAPACK_slansp( + char const* norm, char const* uplo, + lapack_int const* n, + float const* AP, + float* work ); + +#define LAPACK_zlansp LAPACK_GLOBAL(zlansp,ZLANSP) +double LAPACK_zlansp( + char const* norm, char const* uplo, + lapack_int const* n, + lapack_complex_double const* AP, + double* work ); + +#define LAPACK_dlanst LAPACK_GLOBAL(dlanst,DLANST) +double LAPACK_dlanst( + char const* norm, + lapack_int const* n, + double const* D, + double const* E ); + +#define LAPACK_slanst LAPACK_GLOBAL(slanst,SLANST) +lapack_float_return LAPACK_slanst( + char const* norm, + lapack_int const* n, + float const* D, + float const* E ); + +#define LAPACK_clansy LAPACK_GLOBAL(clansy,CLANSY) +lapack_float_return LAPACK_clansy( + char const* norm, char const* uplo, + lapack_int const* n, + lapack_complex_float const* A, lapack_int const* lda, + float* work ); + +#define LAPACK_dlansy LAPACK_GLOBAL(dlansy,DLANSY) +double LAPACK_dlansy( + char const* norm, char const* uplo, + lapack_int const* n, + double const* A, lapack_int const* lda, + double* work ); + +#define LAPACK_slansy LAPACK_GLOBAL(slansy,SLANSY) +lapack_float_return LAPACK_slansy( + char const* norm, char const* uplo, + lapack_int const* n, + float const* A, lapack_int const* lda, + float* work ); + +#define LAPACK_zlansy LAPACK_GLOBAL(zlansy,ZLANSY) +double LAPACK_zlansy( + char const* norm, char const* uplo, + lapack_int const* n, + lapack_complex_double const* A, lapack_int const* lda, + double* work ); + +#define LAPACK_clantb LAPACK_GLOBAL(clantb,CLANTB) +lapack_float_return LAPACK_clantb( + char const* norm, char const* uplo, char const* diag, + lapack_int const* n, lapack_int const* k, + lapack_complex_float const* AB, lapack_int const* ldab, + float* work ); + +#define LAPACK_dlantb LAPACK_GLOBAL(dlantb,DLANTB) +double LAPACK_dlantb( + char const* norm, char const* uplo, char const* diag, + lapack_int const* n, lapack_int const* k, + double const* AB, lapack_int const* ldab, + double* work ); + +#define LAPACK_slantb LAPACK_GLOBAL(slantb,SLANTB) +lapack_float_return LAPACK_slantb( + char const* norm, char const* uplo, char const* diag, + lapack_int const* n, lapack_int const* k, + float const* AB, lapack_int const* ldab, + float* work ); + +#define LAPACK_zlantb LAPACK_GLOBAL(zlantb,ZLANTB) +double LAPACK_zlantb( + char const* norm, char const* uplo, char const* diag, + lapack_int const* n, lapack_int const* k, + lapack_complex_double const* AB, lapack_int const* ldab, + double* work ); + +#define LAPACK_clantp LAPACK_GLOBAL(clantp,CLANTP) +lapack_float_return LAPACK_clantp( + char const* norm, char const* uplo, char const* diag, + lapack_int const* n, + lapack_complex_float const* AP, + float* work ); + +#define LAPACK_dlantp LAPACK_GLOBAL(dlantp,DLANTP) +double LAPACK_dlantp( + char const* norm, char const* uplo, char const* diag, + lapack_int const* n, + double const* AP, + double* work ); + +#define LAPACK_slantp LAPACK_GLOBAL(slantp,SLANTP) +lapack_float_return LAPACK_slantp( + char const* norm, char const* uplo, char const* diag, + lapack_int const* n, + float const* AP, + float* work ); + +#define LAPACK_zlantp LAPACK_GLOBAL(zlantp,ZLANTP) +double LAPACK_zlantp( + char const* norm, char const* uplo, char const* diag, + lapack_int const* n, + lapack_complex_double const* AP, + double* work ); + +#define LAPACK_clantr LAPACK_GLOBAL(clantr,CLANTR) +lapack_float_return LAPACK_clantr( + char const* norm, char const* uplo, char const* diag, + lapack_int const* m, lapack_int const* n, + lapack_complex_float const* A, lapack_int const* lda, + float* work ); + +#define LAPACK_dlantr LAPACK_GLOBAL(dlantr,DLANTR) +double LAPACK_dlantr( + char const* norm, char const* uplo, char const* diag, + lapack_int const* m, lapack_int const* n, + double const* A, lapack_int const* lda, + double* work ); + +#define LAPACK_slantr LAPACK_GLOBAL(slantr,SLANTR) +lapack_float_return LAPACK_slantr( + char const* norm, char const* uplo, char const* diag, + lapack_int const* m, lapack_int const* n, + float const* A, lapack_int const* lda, + float* work ); + +#define LAPACK_zlantr LAPACK_GLOBAL(zlantr,ZLANTR) +double LAPACK_zlantr( + char const* norm, char const* uplo, char const* diag, + lapack_int const* m, lapack_int const* n, + lapack_complex_double const* A, lapack_int const* lda, + double* work ); + +#define LAPACK_clapmr LAPACK_GLOBAL(clapmr,CLAPMR) +void LAPACK_clapmr( + lapack_logical const* forwrd, lapack_int const* m, lapack_int const* n, + lapack_complex_float* X, lapack_int const* ldx, lapack_int* K ); + +#define LAPACK_dlapmr LAPACK_GLOBAL(dlapmr,DLAPMR) +void LAPACK_dlapmr( + lapack_logical const* forwrd, lapack_int const* m, lapack_int const* n, + double* X, lapack_int const* ldx, lapack_int* K ); + +#define LAPACK_slapmr LAPACK_GLOBAL(slapmr,SLAPMR) +void LAPACK_slapmr( + lapack_logical const* forwrd, lapack_int const* m, lapack_int const* n, + float* X, lapack_int const* ldx, lapack_int* K ); + +#define LAPACK_zlapmr LAPACK_GLOBAL(zlapmr,ZLAPMR) +void LAPACK_zlapmr( + lapack_logical const* forwrd, lapack_int const* m, lapack_int const* n, + lapack_complex_double* X, lapack_int const* ldx, lapack_int* K ); + +#define LAPACK_clapmt LAPACK_GLOBAL(clapmt,CLAPMT) +void LAPACK_clapmt( + lapack_logical const* forwrd, lapack_int const* m, lapack_int const* n, + lapack_complex_float* X, lapack_int const* ldx, lapack_int* K ); + +#define LAPACK_dlapmt LAPACK_GLOBAL(dlapmt,DLAPMT) +void LAPACK_dlapmt( + lapack_logical const* forwrd, lapack_int const* m, lapack_int const* n, + double* X, lapack_int const* ldx, lapack_int* K ); + +#define LAPACK_slapmt LAPACK_GLOBAL(slapmt,SLAPMT) +void LAPACK_slapmt( + lapack_logical const* forwrd, lapack_int const* m, lapack_int const* n, + float* X, lapack_int const* ldx, lapack_int* K ); + +#define LAPACK_zlapmt LAPACK_GLOBAL(zlapmt,ZLAPMT) +void LAPACK_zlapmt( + lapack_logical const* forwrd, lapack_int const* m, lapack_int const* n, + lapack_complex_double* X, lapack_int const* ldx, lapack_int* K ); + +#define LAPACK_dlapy2 LAPACK_GLOBAL(dlapy2,DLAPY2) +double LAPACK_dlapy2( + double const* x, + double const* y ); + +#define LAPACK_slapy2 LAPACK_GLOBAL(slapy2,SLAPY2) +lapack_float_return LAPACK_slapy2( + float const* x, + float const* y ); + +#define LAPACK_dlapy3 LAPACK_GLOBAL(dlapy3,DLAPY3) +double LAPACK_dlapy3( + double const* x, + double const* y, + double const* z ); + +#define LAPACK_slapy3 LAPACK_GLOBAL(slapy3,SLAPY3) +lapack_float_return LAPACK_slapy3( + float const* x, + float const* y, + float const* z ); + +#define LAPACK_clarcm LAPACK_GLOBAL(clarcm,CLARCM) +void LAPACK_clarcm( + lapack_int const* m, lapack_int const* n, + float const* A, lapack_int const* lda, + lapack_complex_float const* B, lapack_int const* ldb, + lapack_complex_float* C, lapack_int const* ldc, + float* rwork ); + +#define LAPACK_zlarcm LAPACK_GLOBAL(zlarcm,ZLARCM) +void LAPACK_zlarcm( + lapack_int const* m, lapack_int const* n, + double const* A, lapack_int const* lda, + lapack_complex_double const* B, lapack_int const* ldb, + lapack_complex_double* C, lapack_int const* ldc, + double* rwork ); + +#define LAPACK_clarf LAPACK_GLOBAL(clarf,CLARF) +void LAPACK_clarf( + char const* side, + lapack_int const* m, lapack_int const* n, + lapack_complex_float const* V, lapack_int const* incv, + lapack_complex_float const* tau, + lapack_complex_float* C, lapack_int const* ldc, + lapack_complex_float* work ); + +#define LAPACK_dlarf LAPACK_GLOBAL(dlarf,DLARF) +void LAPACK_dlarf( + char const* side, + lapack_int const* m, lapack_int const* n, + double const* V, lapack_int const* incv, + double const* tau, + double* C, lapack_int const* ldc, + double* work ); + +#define LAPACK_slarf LAPACK_GLOBAL(slarf,SLARF) +void LAPACK_slarf( + char const* side, + lapack_int const* m, lapack_int const* n, + float const* V, lapack_int const* incv, + float const* tau, + float* C, lapack_int const* ldc, + float* work ); + +#define LAPACK_zlarf LAPACK_GLOBAL(zlarf,ZLARF) +void LAPACK_zlarf( + char const* side, + lapack_int const* m, lapack_int const* n, + lapack_complex_double const* V, lapack_int const* incv, + lapack_complex_double const* tau, + lapack_complex_double* C, lapack_int const* ldc, + lapack_complex_double* work ); + +#define LAPACK_clarfb LAPACK_GLOBAL(clarfb,CLARFB) +void LAPACK_clarfb( + char const* side, char const* trans, char const* direct, char const* storev, + lapack_int const* m, lapack_int const* n, lapack_int const* k, + lapack_complex_float const* V, lapack_int const* ldv, + lapack_complex_float const* T, lapack_int const* ldt, + lapack_complex_float* C, lapack_int const* ldc, + lapack_complex_float* work, lapack_int const* ldwork ); + +#define LAPACK_dlarfb LAPACK_GLOBAL(dlarfb,DLARFB) +void LAPACK_dlarfb( + char const* side, char const* trans, char const* direct, char const* storev, + lapack_int const* m, lapack_int const* n, lapack_int const* k, + double const* V, lapack_int const* ldv, + double const* T, lapack_int const* ldt, + double* C, lapack_int const* ldc, + double* work, lapack_int const* ldwork ); + +#define LAPACK_slarfb LAPACK_GLOBAL(slarfb,SLARFB) +void LAPACK_slarfb( + char const* side, char const* trans, char const* direct, char const* storev, + lapack_int const* m, lapack_int const* n, lapack_int const* k, + float const* V, lapack_int const* ldv, + float const* T, lapack_int const* ldt, + float* C, lapack_int const* ldc, + float* work, lapack_int const* ldwork ); + +#define LAPACK_zlarfb LAPACK_GLOBAL(zlarfb,ZLARFB) +void LAPACK_zlarfb( + char const* side, char const* trans, char const* direct, char const* storev, + lapack_int const* m, lapack_int const* n, lapack_int const* k, + lapack_complex_double const* V, lapack_int const* ldv, + lapack_complex_double const* T, lapack_int const* ldt, + lapack_complex_double* C, lapack_int const* ldc, + lapack_complex_double* work, lapack_int const* ldwork ); + +#define LAPACK_clarfg LAPACK_GLOBAL(clarfg,CLARFG) +void LAPACK_clarfg( + lapack_int const* n, + lapack_complex_float* alpha, + lapack_complex_float* X, lapack_int const* incx, + lapack_complex_float* tau ); + +#define LAPACK_dlarfg LAPACK_GLOBAL(dlarfg,DLARFG) +void LAPACK_dlarfg( + lapack_int const* n, + double* alpha, + double* X, lapack_int const* incx, + double* tau ); + +#define LAPACK_slarfg LAPACK_GLOBAL(slarfg,SLARFG) +void LAPACK_slarfg( + lapack_int const* n, + float* alpha, + float* X, lapack_int const* incx, + float* tau ); + +#define LAPACK_zlarfg LAPACK_GLOBAL(zlarfg,ZLARFG) +void LAPACK_zlarfg( + lapack_int const* n, + lapack_complex_double* alpha, + lapack_complex_double* X, lapack_int const* incx, + lapack_complex_double* tau ); + +#define LAPACK_clarft LAPACK_GLOBAL(clarft,CLARFT) +void LAPACK_clarft( + char const* direct, char const* storev, + lapack_int const* n, lapack_int const* k, + lapack_complex_float const* V, lapack_int const* ldv, + lapack_complex_float const* tau, + lapack_complex_float* T, lapack_int const* ldt ); + +#define LAPACK_dlarft LAPACK_GLOBAL(dlarft,DLARFT) +void LAPACK_dlarft( + char const* direct, char const* storev, + lapack_int const* n, lapack_int const* k, + double const* V, lapack_int const* ldv, + double const* tau, + double* T, lapack_int const* ldt ); + +#define LAPACK_slarft LAPACK_GLOBAL(slarft,SLARFT) +void LAPACK_slarft( + char const* direct, char const* storev, + lapack_int const* n, lapack_int const* k, + float const* V, lapack_int const* ldv, + float const* tau, + float* T, lapack_int const* ldt ); + +#define LAPACK_zlarft LAPACK_GLOBAL(zlarft,ZLARFT) +void LAPACK_zlarft( + char const* direct, char const* storev, + lapack_int const* n, lapack_int const* k, + lapack_complex_double const* V, lapack_int const* ldv, + lapack_complex_double const* tau, + lapack_complex_double* T, lapack_int const* ldt ); + +#define LAPACK_clarfx LAPACK_GLOBAL(clarfx,CLARFX) +void LAPACK_clarfx( + char const* side, + lapack_int const* m, lapack_int const* n, + lapack_complex_float const* V, + lapack_complex_float const* tau, + lapack_complex_float* C, lapack_int const* ldc, + lapack_complex_float* work ); + +#define LAPACK_dlarfx LAPACK_GLOBAL(dlarfx,DLARFX) +void LAPACK_dlarfx( + char const* side, + lapack_int const* m, lapack_int const* n, + double const* V, + double const* tau, + double* C, lapack_int const* ldc, + double* work ); + +#define LAPACK_slarfx LAPACK_GLOBAL(slarfx,SLARFX) +void LAPACK_slarfx( + char const* side, + lapack_int const* m, lapack_int const* n, + float const* V, + float const* tau, + float* C, lapack_int const* ldc, + float* work ); + +#define LAPACK_zlarfx LAPACK_GLOBAL(zlarfx,ZLARFX) +void LAPACK_zlarfx( + char const* side, + lapack_int const* m, lapack_int const* n, + lapack_complex_double const* V, + lapack_complex_double const* tau, + lapack_complex_double* C, lapack_int const* ldc, + lapack_complex_double* work ); + +#define LAPACK_clarnv LAPACK_GLOBAL(clarnv,CLARNV) +void LAPACK_clarnv( + lapack_int const* idist, lapack_int* iseed, lapack_int const* n, + lapack_complex_float* X ); + +#define LAPACK_dlarnv LAPACK_GLOBAL(dlarnv,DLARNV) +void LAPACK_dlarnv( + lapack_int const* idist, lapack_int* iseed, lapack_int const* n, + double* X ); + +#define LAPACK_slarnv LAPACK_GLOBAL(slarnv,SLARNV) +void LAPACK_slarnv( + lapack_int const* idist, lapack_int* iseed, lapack_int const* n, + float* X ); + +#define LAPACK_zlarnv LAPACK_GLOBAL(zlarnv,ZLARNV) +void LAPACK_zlarnv( + lapack_int const* idist, lapack_int* iseed, lapack_int const* n, + lapack_complex_double* X ); + +#define LAPACK_dlartgp LAPACK_GLOBAL(dlartgp,DLARTGP) +void LAPACK_dlartgp( + double const* f, + double const* g, + double* cs, + double* sn, + double* r ); + +#define LAPACK_slartgp LAPACK_GLOBAL(slartgp,SLARTGP) +void LAPACK_slartgp( + float const* f, + float const* g, + float* cs, + float* sn, + float* r ); + +#define LAPACK_dlartgs LAPACK_GLOBAL(dlartgs,DLARTGS) +void LAPACK_dlartgs( + double const* x, + double const* y, + double const* sigma, + double* cs, + double* sn ); + +#define LAPACK_slartgs LAPACK_GLOBAL(slartgs,SLARTGS) +void LAPACK_slartgs( + float const* x, + float const* y, + float const* sigma, + float* cs, + float* sn ); + +#define LAPACK_clascl LAPACK_GLOBAL(clascl,CLASCL) +void LAPACK_clascl( + char const* type, + lapack_int const* kl, lapack_int const* ku, + float const* cfrom, + float const* cto, lapack_int const* m, lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, + lapack_int* info ); + +#define LAPACK_dlascl LAPACK_GLOBAL(dlascl,DLASCL) +void LAPACK_dlascl( + char const* type, + lapack_int const* kl, lapack_int const* ku, + double const* cfrom, + double const* cto, lapack_int const* m, lapack_int const* n, + double* A, lapack_int const* lda, + lapack_int* info ); + +#define LAPACK_slascl LAPACK_GLOBAL(slascl,SLASCL) +void LAPACK_slascl( + char const* type, + lapack_int const* kl, lapack_int const* ku, + float const* cfrom, + float const* cto, lapack_int const* m, lapack_int const* n, + float* A, lapack_int const* lda, + lapack_int* info ); + +#define LAPACK_zlascl LAPACK_GLOBAL(zlascl,ZLASCL) +void LAPACK_zlascl( + char const* type, + lapack_int const* kl, lapack_int const* ku, + double const* cfrom, + double const* cto, lapack_int const* m, lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, + lapack_int* info ); + +#define LAPACK_claset LAPACK_GLOBAL(claset,CLASET) +void LAPACK_claset( + char const* uplo, + lapack_int const* m, lapack_int const* n, + lapack_complex_float const* alpha, + lapack_complex_float const* beta, + lapack_complex_float* A, lapack_int const* lda ); + +#define LAPACK_dlaset LAPACK_GLOBAL(dlaset,DLASET) +void LAPACK_dlaset( + char const* uplo, + lapack_int const* m, lapack_int const* n, + double const* alpha, + double const* beta, + double* A, lapack_int const* lda ); + +#define LAPACK_slaset LAPACK_GLOBAL(slaset,SLASET) +void LAPACK_slaset( + char const* uplo, + lapack_int const* m, lapack_int const* n, + float const* alpha, + float const* beta, + float* A, lapack_int const* lda ); + +#define LAPACK_zlaset LAPACK_GLOBAL(zlaset,ZLASET) +void LAPACK_zlaset( + char const* uplo, + lapack_int const* m, lapack_int const* n, + lapack_complex_double const* alpha, + lapack_complex_double const* beta, + lapack_complex_double* A, lapack_int const* lda ); + +#define LAPACK_dlasrt LAPACK_GLOBAL(dlasrt,DLASRT) +void LAPACK_dlasrt( + char const* id, + lapack_int const* n, + double* D, + lapack_int* info ); + +#define LAPACK_slasrt LAPACK_GLOBAL(slasrt,SLASRT) +void LAPACK_slasrt( + char const* id, + lapack_int const* n, + float* D, + lapack_int* info ); + +#define LAPACK_classq LAPACK_GLOBAL(classq,CLASSQ) +void LAPACK_classq( + lapack_int const* n, + lapack_complex_float const* X, lapack_int const* incx, + float* scale, + float* sumsq ); + +#define LAPACK_dlassq LAPACK_GLOBAL(dlassq,DLASSQ) +void LAPACK_dlassq( + lapack_int const* n, + double const* X, lapack_int const* incx, + double* scale, + double* sumsq ); + +#define LAPACK_slassq LAPACK_GLOBAL(slassq,SLASSQ) +void LAPACK_slassq( + lapack_int const* n, + float const* X, lapack_int const* incx, + float* scale, + float* sumsq ); + +#define LAPACK_zlassq LAPACK_GLOBAL(zlassq,ZLASSQ) +void LAPACK_zlassq( + lapack_int const* n, + lapack_complex_double const* X, lapack_int const* incx, + double* scale, + double* sumsq ); + +#define LAPACK_claswp LAPACK_GLOBAL(claswp,CLASWP) +void LAPACK_claswp( + lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, lapack_int const* k1, lapack_int const* k2, lapack_int const* ipiv, lapack_int const* incx ); + +#define LAPACK_dlaswp LAPACK_GLOBAL(dlaswp,DLASWP) +void LAPACK_dlaswp( + lapack_int const* n, + double* A, lapack_int const* lda, lapack_int const* k1, lapack_int const* k2, lapack_int const* ipiv, lapack_int const* incx ); + +#define LAPACK_slaswp LAPACK_GLOBAL(slaswp,SLASWP) +void LAPACK_slaswp( + lapack_int const* n, + float* A, lapack_int const* lda, lapack_int const* k1, lapack_int const* k2, lapack_int const* ipiv, lapack_int const* incx ); + +#define LAPACK_zlaswp LAPACK_GLOBAL(zlaswp,ZLASWP) +void LAPACK_zlaswp( + lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, lapack_int const* k1, lapack_int const* k2, lapack_int const* ipiv, lapack_int const* incx ); + +#define LAPACK_clatms LAPACK_GLOBAL(clatms,CLATMS) +void LAPACK_clatms( + lapack_int const* m, lapack_int const* n, char const* dist, + lapack_int* iseed, char const* sym, + float* D, + lapack_int const* mode, + float const* cond, + float const* dmax, lapack_int const* kl, lapack_int const* ku, char const* pack, + lapack_complex_float* A, + lapack_int const* lda, + lapack_complex_float* work, + lapack_int* info ); + +#define LAPACK_dlatms LAPACK_GLOBAL(dlatms,DLATMS) +void LAPACK_dlatms( + lapack_int const* m, lapack_int const* n, char const* dist, + lapack_int* iseed, char const* sym, + double* D, + lapack_int const* mode, + double const* cond, + double const* dmax, lapack_int const* kl, lapack_int const* ku, char const* pack, + double* A, + lapack_int const* lda, + double* work, + lapack_int* info ); + +#define LAPACK_slatms LAPACK_GLOBAL(slatms,SLATMS) +void LAPACK_slatms( + lapack_int const* m, lapack_int const* n, char const* dist, + lapack_int* iseed, char const* sym, + float* D, + lapack_int const* mode, + float const* cond, + float const* dmax, lapack_int const* kl, lapack_int const* ku, char const* pack, + float* A, + lapack_int const* lda, + float* work, + lapack_int* info ); + +#define LAPACK_zlatms LAPACK_GLOBAL(zlatms,ZLATMS) +void LAPACK_zlatms( + lapack_int const* m, lapack_int const* n, char const* dist, + lapack_int* iseed, char const* sym, + double* D, + lapack_int const* mode, + double const* cond, + double const* dmax, lapack_int const* kl, lapack_int const* ku, char const* pack, + lapack_complex_double* A, + lapack_int const* lda, + lapack_complex_double* work, + lapack_int* info ); + +#define LAPACK_clauum LAPACK_GLOBAL(clauum,CLAUUM) +void LAPACK_clauum( + char const* uplo, + lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, + lapack_int* info ); + +#define LAPACK_dlauum LAPACK_GLOBAL(dlauum,DLAUUM) +void LAPACK_dlauum( + char const* uplo, + lapack_int const* n, + double* A, lapack_int const* lda, + lapack_int* info ); + +#define LAPACK_slauum LAPACK_GLOBAL(slauum,SLAUUM) +void LAPACK_slauum( + char const* uplo, + lapack_int const* n, + float* A, lapack_int const* lda, + lapack_int* info ); + +#define LAPACK_zlauum LAPACK_GLOBAL(zlauum,ZLAUUM) +void LAPACK_zlauum( + char const* uplo, + lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, + lapack_int* info ); + +#define LAPACK_ilaver LAPACK_GLOBAL(ilaver,ILAVER) +void LAPACK_ilaver( + lapack_int* vers_major, lapack_int* vers_minor, lapack_int* vers_patch ); + +#define LAPACK_dopgtr LAPACK_GLOBAL(dopgtr,DOPGTR) +void LAPACK_dopgtr( + char const* uplo, + lapack_int const* n, + double const* AP, + double const* tau, + double* Q, lapack_int const* ldq, + double* work, + lapack_int* info ); + +#define LAPACK_sopgtr LAPACK_GLOBAL(sopgtr,SOPGTR) +void LAPACK_sopgtr( + char const* uplo, + lapack_int const* n, + float const* AP, + float const* tau, + float* Q, lapack_int const* ldq, + float* work, + lapack_int* info ); + +#define LAPACK_dopmtr LAPACK_GLOBAL(dopmtr,DOPMTR) +void LAPACK_dopmtr( + char const* side, char const* uplo, char const* trans, + lapack_int const* m, lapack_int const* n, + double const* AP, + double const* tau, + double* C, lapack_int const* ldc, + double* work, + lapack_int* info ); + +#define LAPACK_sopmtr LAPACK_GLOBAL(sopmtr,SOPMTR) +void LAPACK_sopmtr( + char const* side, char const* uplo, char const* trans, + lapack_int const* m, lapack_int const* n, + float const* AP, + float const* tau, + float* C, lapack_int const* ldc, + float* work, + lapack_int* info ); + +#define LAPACK_dorbdb LAPACK_GLOBAL(dorbdb,DORBDB) +void LAPACK_dorbdb( + char const* trans, char const* signs, + lapack_int const* m, lapack_int const* p, lapack_int const* q, + double* X11, lapack_int const* ldx11, + double* X12, lapack_int const* ldx12, + double* X21, lapack_int const* ldx21, + double* X22, lapack_int const* ldx22, + double* theta, + double* phi, + double* TAUP1, + double* TAUP2, + double* TAUQ1, + double* TAUQ2, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_sorbdb LAPACK_GLOBAL(sorbdb,SORBDB) +void LAPACK_sorbdb( + char const* trans, char const* signs, + lapack_int const* m, lapack_int const* p, lapack_int const* q, + float* X11, lapack_int const* ldx11, + float* X12, lapack_int const* ldx12, + float* X21, lapack_int const* ldx21, + float* X22, lapack_int const* ldx22, + float* theta, + float* phi, + float* TAUP1, + float* TAUP2, + float* TAUQ1, + float* TAUQ2, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_dorcsd LAPACK_GLOBAL(dorcsd,DORCSD) +void LAPACK_dorcsd( + char const* jobu1, char const* jobu2, char const* jobv1t, char const* jobv2t, char const* trans, char const* signs, + lapack_int const* m, lapack_int const* p, lapack_int const* q, + double* X11, lapack_int const* ldx11, + double* X12, lapack_int const* ldx12, + double* X21, lapack_int const* ldx21, + double* X22, lapack_int const* ldx22, + double* theta, + double* U1, lapack_int const* ldu1, + double* U2, lapack_int const* ldu2, + double* V1T, lapack_int const* ldv1t, + double* V2T, lapack_int const* ldv2t, + double* work, lapack_int const* lwork, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_sorcsd LAPACK_GLOBAL(sorcsd,SORCSD) +void LAPACK_sorcsd( + char const* jobu1, char const* jobu2, char const* jobv1t, char const* jobv2t, char const* trans, char const* signs, + lapack_int const* m, lapack_int const* p, lapack_int const* q, + float* X11, lapack_int const* ldx11, + float* X12, lapack_int const* ldx12, + float* X21, lapack_int const* ldx21, + float* X22, lapack_int const* ldx22, + float* theta, + float* U1, lapack_int const* ldu1, + float* U2, lapack_int const* ldu2, + float* V1T, lapack_int const* ldv1t, + float* V2T, lapack_int const* ldv2t, + float* work, lapack_int const* lwork, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_dorcsd2by1 LAPACK_GLOBAL(dorcsd2by1,DORCSD2BY1) +void LAPACK_dorcsd2by1( + char const* jobu1, char const* jobu2, char const* jobv1t, + lapack_int const* m, lapack_int const* p, lapack_int const* q, + double* X11, lapack_int const* ldx11, + double* X21, lapack_int const* ldx21, + double* theta, + double* U1, lapack_int const* ldu1, + double* U2, lapack_int const* ldu2, + double* V1T, lapack_int const* ldv1t, + double* work, lapack_int const* lwork, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_sorcsd2by1 LAPACK_GLOBAL(sorcsd2by1,SORCSD2BY1) +void LAPACK_sorcsd2by1( + char const* jobu1, char const* jobu2, char const* jobv1t, + lapack_int const* m, lapack_int const* p, lapack_int const* q, + float* X11, lapack_int const* ldx11, + float* X21, lapack_int const* ldx21, + float* theta, + float* U1, lapack_int const* ldu1, + float* U2, lapack_int const* ldu2, + float* V1T, lapack_int const* ldv1t, + float* work, lapack_int const* lwork, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_dorgbr LAPACK_GLOBAL(dorgbr,DORGBR) +void LAPACK_dorgbr( + char const* vect, + lapack_int const* m, lapack_int const* n, lapack_int const* k, + double* A, lapack_int const* lda, + double const* tau, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_sorgbr LAPACK_GLOBAL(sorgbr,SORGBR) +void LAPACK_sorgbr( + char const* vect, + lapack_int const* m, lapack_int const* n, lapack_int const* k, + float* A, lapack_int const* lda, + float const* tau, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_dorghr LAPACK_GLOBAL(dorghr,DORGHR) +void LAPACK_dorghr( + lapack_int const* n, lapack_int const* ilo, lapack_int const* ihi, + double* A, lapack_int const* lda, + double const* tau, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_sorghr LAPACK_GLOBAL(sorghr,SORGHR) +void LAPACK_sorghr( + lapack_int const* n, lapack_int const* ilo, lapack_int const* ihi, + float* A, lapack_int const* lda, + float const* tau, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_dorglq LAPACK_GLOBAL(dorglq,DORGLQ) +void LAPACK_dorglq( + lapack_int const* m, lapack_int const* n, lapack_int const* k, + double* A, lapack_int const* lda, + double const* tau, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_sorglq LAPACK_GLOBAL(sorglq,SORGLQ) +void LAPACK_sorglq( + lapack_int const* m, lapack_int const* n, lapack_int const* k, + float* A, lapack_int const* lda, + float const* tau, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_dorgql LAPACK_GLOBAL(dorgql,DORGQL) +void LAPACK_dorgql( + lapack_int const* m, lapack_int const* n, lapack_int const* k, + double* A, lapack_int const* lda, + double const* tau, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_sorgql LAPACK_GLOBAL(sorgql,SORGQL) +void LAPACK_sorgql( + lapack_int const* m, lapack_int const* n, lapack_int const* k, + float* A, lapack_int const* lda, + float const* tau, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_dorgqr LAPACK_GLOBAL(dorgqr,DORGQR) +void LAPACK_dorgqr( + lapack_int const* m, lapack_int const* n, lapack_int const* k, + double* A, lapack_int const* lda, + double const* tau, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_sorgqr LAPACK_GLOBAL(sorgqr,SORGQR) +void LAPACK_sorgqr( + lapack_int const* m, lapack_int const* n, lapack_int const* k, + float* A, lapack_int const* lda, + float const* tau, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_dorgrq LAPACK_GLOBAL(dorgrq,DORGRQ) +void LAPACK_dorgrq( + lapack_int const* m, lapack_int const* n, lapack_int const* k, + double* A, lapack_int const* lda, + double const* tau, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_sorgrq LAPACK_GLOBAL(sorgrq,SORGRQ) +void LAPACK_sorgrq( + lapack_int const* m, lapack_int const* n, lapack_int const* k, + float* A, lapack_int const* lda, + float const* tau, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_dorgtr LAPACK_GLOBAL(dorgtr,DORGTR) +void LAPACK_dorgtr( + char const* uplo, + lapack_int const* n, + double* A, lapack_int const* lda, + double const* tau, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_sorgtr LAPACK_GLOBAL(sorgtr,SORGTR) +void LAPACK_sorgtr( + char const* uplo, + lapack_int const* n, + float* A, lapack_int const* lda, + float const* tau, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_dormbr LAPACK_GLOBAL(dormbr,DORMBR) +void LAPACK_dormbr( + char const* vect, char const* side, char const* trans, + lapack_int const* m, lapack_int const* n, lapack_int const* k, + double const* A, lapack_int const* lda, + double const* tau, + double* C, lapack_int const* ldc, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_sormbr LAPACK_GLOBAL(sormbr,SORMBR) +void LAPACK_sormbr( + char const* vect, char const* side, char const* trans, + lapack_int const* m, lapack_int const* n, lapack_int const* k, + float const* A, lapack_int const* lda, + float const* tau, + float* C, lapack_int const* ldc, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_dormhr LAPACK_GLOBAL(dormhr,DORMHR) +void LAPACK_dormhr( + char const* side, char const* trans, + lapack_int const* m, lapack_int const* n, lapack_int const* ilo, lapack_int const* ihi, + double const* A, lapack_int const* lda, + double const* tau, + double* C, lapack_int const* ldc, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_sormhr LAPACK_GLOBAL(sormhr,SORMHR) +void LAPACK_sormhr( + char const* side, char const* trans, + lapack_int const* m, lapack_int const* n, lapack_int const* ilo, lapack_int const* ihi, + float const* A, lapack_int const* lda, + float const* tau, + float* C, lapack_int const* ldc, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_dormlq LAPACK_GLOBAL(dormlq,DORMLQ) +void LAPACK_dormlq( + char const* side, char const* trans, + lapack_int const* m, lapack_int const* n, lapack_int const* k, + double const* A, lapack_int const* lda, + double const* tau, + double* C, lapack_int const* ldc, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_sormlq LAPACK_GLOBAL(sormlq,SORMLQ) +void LAPACK_sormlq( + char const* side, char const* trans, + lapack_int const* m, lapack_int const* n, lapack_int const* k, + float const* A, lapack_int const* lda, + float const* tau, + float* C, lapack_int const* ldc, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_dormql LAPACK_GLOBAL(dormql,DORMQL) +void LAPACK_dormql( + char const* side, char const* trans, + lapack_int const* m, lapack_int const* n, lapack_int const* k, + double const* A, lapack_int const* lda, + double const* tau, + double* C, lapack_int const* ldc, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_sormql LAPACK_GLOBAL(sormql,SORMQL) +void LAPACK_sormql( + char const* side, char const* trans, + lapack_int const* m, lapack_int const* n, lapack_int const* k, + float const* A, lapack_int const* lda, + float const* tau, + float* C, lapack_int const* ldc, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_dormqr LAPACK_GLOBAL(dormqr,DORMQR) +void LAPACK_dormqr( + char const* side, char const* trans, + lapack_int const* m, lapack_int const* n, lapack_int const* k, + double const* A, lapack_int const* lda, + double const* tau, + double* C, lapack_int const* ldc, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_sormqr LAPACK_GLOBAL(sormqr,SORMQR) +void LAPACK_sormqr( + char const* side, char const* trans, + lapack_int const* m, lapack_int const* n, lapack_int const* k, + float const* A, lapack_int const* lda, + float const* tau, + float* C, lapack_int const* ldc, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_dormrq LAPACK_GLOBAL(dormrq,DORMRQ) +void LAPACK_dormrq( + char const* side, char const* trans, + lapack_int const* m, lapack_int const* n, lapack_int const* k, + double const* A, lapack_int const* lda, + double const* tau, + double* C, lapack_int const* ldc, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_sormrq LAPACK_GLOBAL(sormrq,SORMRQ) +void LAPACK_sormrq( + char const* side, char const* trans, + lapack_int const* m, lapack_int const* n, lapack_int const* k, + float const* A, lapack_int const* lda, + float const* tau, + float* C, lapack_int const* ldc, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_dormrz LAPACK_GLOBAL(dormrz,DORMRZ) +void LAPACK_dormrz( + char const* side, char const* trans, + lapack_int const* m, lapack_int const* n, lapack_int const* k, lapack_int const* l, + double const* A, lapack_int const* lda, + double const* tau, + double* C, lapack_int const* ldc, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_sormrz LAPACK_GLOBAL(sormrz,SORMRZ) +void LAPACK_sormrz( + char const* side, char const* trans, + lapack_int const* m, lapack_int const* n, lapack_int const* k, lapack_int const* l, + float const* A, lapack_int const* lda, + float const* tau, + float* C, lapack_int const* ldc, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_dormtr LAPACK_GLOBAL(dormtr,DORMTR) +void LAPACK_dormtr( + char const* side, char const* uplo, char const* trans, + lapack_int const* m, lapack_int const* n, + double const* A, lapack_int const* lda, + double const* tau, + double* C, lapack_int const* ldc, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_sormtr LAPACK_GLOBAL(sormtr,SORMTR) +void LAPACK_sormtr( + char const* side, char const* uplo, char const* trans, + lapack_int const* m, lapack_int const* n, + float const* A, lapack_int const* lda, + float const* tau, + float* C, lapack_int const* ldc, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_cpbcon LAPACK_GLOBAL(cpbcon,CPBCON) +void LAPACK_cpbcon( + char const* uplo, + lapack_int const* n, lapack_int const* kd, + lapack_complex_float const* AB, lapack_int const* ldab, + float const* anorm, + float* rcond, + lapack_complex_float* work, + float* rwork, + lapack_int* info ); + +#define LAPACK_dpbcon LAPACK_GLOBAL(dpbcon,DPBCON) +void LAPACK_dpbcon( + char const* uplo, + lapack_int const* n, lapack_int const* kd, + double const* AB, lapack_int const* ldab, + double const* anorm, + double* rcond, + double* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_spbcon LAPACK_GLOBAL(spbcon,SPBCON) +void LAPACK_spbcon( + char const* uplo, + lapack_int const* n, lapack_int const* kd, + float const* AB, lapack_int const* ldab, + float const* anorm, + float* rcond, + float* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_zpbcon LAPACK_GLOBAL(zpbcon,ZPBCON) +void LAPACK_zpbcon( + char const* uplo, + lapack_int const* n, lapack_int const* kd, + lapack_complex_double const* AB, lapack_int const* ldab, + double const* anorm, + double* rcond, + lapack_complex_double* work, + double* rwork, + lapack_int* info ); + +#define LAPACK_cpbequ LAPACK_GLOBAL(cpbequ,CPBEQU) +void LAPACK_cpbequ( + char const* uplo, + lapack_int const* n, lapack_int const* kd, + lapack_complex_float const* AB, lapack_int const* ldab, + float* S, + float* scond, + float* amax, + lapack_int* info ); + +#define LAPACK_dpbequ LAPACK_GLOBAL(dpbequ,DPBEQU) +void LAPACK_dpbequ( + char const* uplo, + lapack_int const* n, lapack_int const* kd, + double const* AB, lapack_int const* ldab, + double* S, + double* scond, + double* amax, + lapack_int* info ); + +#define LAPACK_spbequ LAPACK_GLOBAL(spbequ,SPBEQU) +void LAPACK_spbequ( + char const* uplo, + lapack_int const* n, lapack_int const* kd, + float const* AB, lapack_int const* ldab, + float* S, + float* scond, + float* amax, + lapack_int* info ); + +#define LAPACK_zpbequ LAPACK_GLOBAL(zpbequ,ZPBEQU) +void LAPACK_zpbequ( + char const* uplo, + lapack_int const* n, lapack_int const* kd, + lapack_complex_double const* AB, lapack_int const* ldab, + double* S, + double* scond, + double* amax, + lapack_int* info ); + +#define LAPACK_cpbrfs LAPACK_GLOBAL(cpbrfs,CPBRFS) +void LAPACK_cpbrfs( + char const* uplo, + lapack_int const* n, lapack_int const* kd, lapack_int const* nrhs, + lapack_complex_float const* AB, lapack_int const* ldab, + lapack_complex_float const* AFB, lapack_int const* ldafb, + lapack_complex_float const* B, lapack_int const* ldb, + lapack_complex_float* X, lapack_int const* ldx, + float* ferr, + float* berr, + lapack_complex_float* work, + float* rwork, + lapack_int* info ); + +#define LAPACK_dpbrfs LAPACK_GLOBAL(dpbrfs,DPBRFS) +void LAPACK_dpbrfs( + char const* uplo, + lapack_int const* n, lapack_int const* kd, lapack_int const* nrhs, + double const* AB, lapack_int const* ldab, + double const* AFB, lapack_int const* ldafb, + double const* B, lapack_int const* ldb, + double* X, lapack_int const* ldx, + double* ferr, + double* berr, + double* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_spbrfs LAPACK_GLOBAL(spbrfs,SPBRFS) +void LAPACK_spbrfs( + char const* uplo, + lapack_int const* n, lapack_int const* kd, lapack_int const* nrhs, + float const* AB, lapack_int const* ldab, + float const* AFB, lapack_int const* ldafb, + float const* B, lapack_int const* ldb, + float* X, lapack_int const* ldx, + float* ferr, + float* berr, + float* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_zpbrfs LAPACK_GLOBAL(zpbrfs,ZPBRFS) +void LAPACK_zpbrfs( + char const* uplo, + lapack_int const* n, lapack_int const* kd, lapack_int const* nrhs, + lapack_complex_double const* AB, lapack_int const* ldab, + lapack_complex_double const* AFB, lapack_int const* ldafb, + lapack_complex_double const* B, lapack_int const* ldb, + lapack_complex_double* X, lapack_int const* ldx, + double* ferr, + double* berr, + lapack_complex_double* work, + double* rwork, + lapack_int* info ); + +#define LAPACK_cpbstf LAPACK_GLOBAL(cpbstf,CPBSTF) +void LAPACK_cpbstf( + char const* uplo, + lapack_int const* n, lapack_int const* kd, + lapack_complex_float* AB, lapack_int const* ldab, + lapack_int* info ); + +#define LAPACK_dpbstf LAPACK_GLOBAL(dpbstf,DPBSTF) +void LAPACK_dpbstf( + char const* uplo, + lapack_int const* n, lapack_int const* kd, + double* AB, lapack_int const* ldab, + lapack_int* info ); + +#define LAPACK_spbstf LAPACK_GLOBAL(spbstf,SPBSTF) +void LAPACK_spbstf( + char const* uplo, + lapack_int const* n, lapack_int const* kd, + float* AB, lapack_int const* ldab, + lapack_int* info ); + +#define LAPACK_zpbstf LAPACK_GLOBAL(zpbstf,ZPBSTF) +void LAPACK_zpbstf( + char const* uplo, + lapack_int const* n, lapack_int const* kd, + lapack_complex_double* AB, lapack_int const* ldab, + lapack_int* info ); + +#define LAPACK_cpbsv LAPACK_GLOBAL(cpbsv,CPBSV) +void LAPACK_cpbsv( + char const* uplo, + lapack_int const* n, lapack_int const* kd, lapack_int const* nrhs, + lapack_complex_float* AB, lapack_int const* ldab, + lapack_complex_float* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_dpbsv LAPACK_GLOBAL(dpbsv,DPBSV) +void LAPACK_dpbsv( + char const* uplo, + lapack_int const* n, lapack_int const* kd, lapack_int const* nrhs, + double* AB, lapack_int const* ldab, + double* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_spbsv LAPACK_GLOBAL(spbsv,SPBSV) +void LAPACK_spbsv( + char const* uplo, + lapack_int const* n, lapack_int const* kd, lapack_int const* nrhs, + float* AB, lapack_int const* ldab, + float* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_zpbsv LAPACK_GLOBAL(zpbsv,ZPBSV) +void LAPACK_zpbsv( + char const* uplo, + lapack_int const* n, lapack_int const* kd, lapack_int const* nrhs, + lapack_complex_double* AB, lapack_int const* ldab, + lapack_complex_double* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_cpbsvx LAPACK_GLOBAL(cpbsvx,CPBSVX) +void LAPACK_cpbsvx( + char const* fact, char const* uplo, + lapack_int const* n, lapack_int const* kd, lapack_int const* nrhs, + lapack_complex_float* AB, lapack_int const* ldab, + lapack_complex_float* AFB, lapack_int const* ldafb, char* equed, + float* S, + lapack_complex_float* B, + lapack_int const* ldb, + lapack_complex_float* X, lapack_int const* ldx, + float* rcond, + float* ferr, + float* berr, + lapack_complex_float* work, + float* rwork, + lapack_int* info ); + +#define LAPACK_dpbsvx LAPACK_GLOBAL(dpbsvx,DPBSVX) +void LAPACK_dpbsvx( + char const* fact, char const* uplo, + lapack_int const* n, lapack_int const* kd, lapack_int const* nrhs, + double* AB, lapack_int const* ldab, + double* AFB, lapack_int const* ldafb, char* equed, + double* S, + double* B, + lapack_int const* ldb, + double* X, lapack_int const* ldx, + double* rcond, + double* ferr, + double* berr, + double* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_spbsvx LAPACK_GLOBAL(spbsvx,SPBSVX) +void LAPACK_spbsvx( + char const* fact, char const* uplo, + lapack_int const* n, lapack_int const* kd, lapack_int const* nrhs, + float* AB, lapack_int const* ldab, + float* AFB, lapack_int const* ldafb, char* equed, + float* S, + float* B, + lapack_int const* ldb, + float* X, lapack_int const* ldx, + float* rcond, + float* ferr, + float* berr, + float* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_zpbsvx LAPACK_GLOBAL(zpbsvx,ZPBSVX) +void LAPACK_zpbsvx( + char const* fact, char const* uplo, + lapack_int const* n, lapack_int const* kd, lapack_int const* nrhs, + lapack_complex_double* AB, lapack_int const* ldab, + lapack_complex_double* AFB, lapack_int const* ldafb, char* equed, + double* S, + lapack_complex_double* B, + lapack_int const* ldb, + lapack_complex_double* X, lapack_int const* ldx, + double* rcond, + double* ferr, + double* berr, + lapack_complex_double* work, + double* rwork, + lapack_int* info ); + +#define LAPACK_cpbtrf LAPACK_GLOBAL(cpbtrf,CPBTRF) +void LAPACK_cpbtrf( + char const* uplo, + lapack_int const* n, lapack_int const* kd, + lapack_complex_float* AB, lapack_int const* ldab, + lapack_int* info ); + +#define LAPACK_dpbtrf LAPACK_GLOBAL(dpbtrf,DPBTRF) +void LAPACK_dpbtrf( + char const* uplo, + lapack_int const* n, lapack_int const* kd, + double* AB, lapack_int const* ldab, + lapack_int* info ); + +#define LAPACK_spbtrf LAPACK_GLOBAL(spbtrf,SPBTRF) +void LAPACK_spbtrf( + char const* uplo, + lapack_int const* n, lapack_int const* kd, + float* AB, lapack_int const* ldab, + lapack_int* info ); + +#define LAPACK_zpbtrf LAPACK_GLOBAL(zpbtrf,ZPBTRF) +void LAPACK_zpbtrf( + char const* uplo, + lapack_int const* n, lapack_int const* kd, + lapack_complex_double* AB, lapack_int const* ldab, + lapack_int* info ); + +#define LAPACK_cpbtrs LAPACK_GLOBAL(cpbtrs,CPBTRS) +void LAPACK_cpbtrs( + char const* uplo, + lapack_int const* n, lapack_int const* kd, lapack_int const* nrhs, + lapack_complex_float const* AB, lapack_int const* ldab, + lapack_complex_float* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_dpbtrs LAPACK_GLOBAL(dpbtrs,DPBTRS) +void LAPACK_dpbtrs( + char const* uplo, + lapack_int const* n, lapack_int const* kd, lapack_int const* nrhs, + double const* AB, lapack_int const* ldab, + double* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_spbtrs LAPACK_GLOBAL(spbtrs,SPBTRS) +void LAPACK_spbtrs( + char const* uplo, + lapack_int const* n, lapack_int const* kd, lapack_int const* nrhs, + float const* AB, lapack_int const* ldab, + float* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_zpbtrs LAPACK_GLOBAL(zpbtrs,ZPBTRS) +void LAPACK_zpbtrs( + char const* uplo, + lapack_int const* n, lapack_int const* kd, lapack_int const* nrhs, + lapack_complex_double const* AB, lapack_int const* ldab, + lapack_complex_double* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_cpftrf LAPACK_GLOBAL(cpftrf,CPFTRF) +void LAPACK_cpftrf( + char const* transr, char const* uplo, + lapack_int const* n, + lapack_complex_float* A, + lapack_int* info ); + +#define LAPACK_dpftrf LAPACK_GLOBAL(dpftrf,DPFTRF) +void LAPACK_dpftrf( + char const* transr, char const* uplo, + lapack_int const* n, + double* A, + lapack_int* info ); + +#define LAPACK_spftrf LAPACK_GLOBAL(spftrf,SPFTRF) +void LAPACK_spftrf( + char const* transr, char const* uplo, + lapack_int const* n, + float* A, + lapack_int* info ); + +#define LAPACK_zpftrf LAPACK_GLOBAL(zpftrf,ZPFTRF) +void LAPACK_zpftrf( + char const* transr, char const* uplo, + lapack_int const* n, + lapack_complex_double* A, + lapack_int* info ); + +#define LAPACK_cpftri LAPACK_GLOBAL(cpftri,CPFTRI) +void LAPACK_cpftri( + char const* transr, char const* uplo, + lapack_int const* n, + lapack_complex_float* A, + lapack_int* info ); + +#define LAPACK_dpftri LAPACK_GLOBAL(dpftri,DPFTRI) +void LAPACK_dpftri( + char const* transr, char const* uplo, + lapack_int const* n, + double* A, + lapack_int* info ); + +#define LAPACK_spftri LAPACK_GLOBAL(spftri,SPFTRI) +void LAPACK_spftri( + char const* transr, char const* uplo, + lapack_int const* n, + float* A, + lapack_int* info ); + +#define LAPACK_zpftri LAPACK_GLOBAL(zpftri,ZPFTRI) +void LAPACK_zpftri( + char const* transr, char const* uplo, + lapack_int const* n, + lapack_complex_double* A, + lapack_int* info ); + +#define LAPACK_cpftrs LAPACK_GLOBAL(cpftrs,CPFTRS) +void LAPACK_cpftrs( + char const* transr, char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float const* A, + lapack_complex_float* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_dpftrs LAPACK_GLOBAL(dpftrs,DPFTRS) +void LAPACK_dpftrs( + char const* transr, char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + double const* A, + double* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_spftrs LAPACK_GLOBAL(spftrs,SPFTRS) +void LAPACK_spftrs( + char const* transr, char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + float const* A, + float* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_zpftrs LAPACK_GLOBAL(zpftrs,ZPFTRS) +void LAPACK_zpftrs( + char const* transr, char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double const* A, + lapack_complex_double* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_cpocon LAPACK_GLOBAL(cpocon,CPOCON) +void LAPACK_cpocon( + char const* uplo, + lapack_int const* n, + lapack_complex_float const* A, lapack_int const* lda, + float const* anorm, + float* rcond, + lapack_complex_float* work, + float* rwork, + lapack_int* info ); + +#define LAPACK_dpocon LAPACK_GLOBAL(dpocon,DPOCON) +void LAPACK_dpocon( + char const* uplo, + lapack_int const* n, + double const* A, lapack_int const* lda, + double const* anorm, + double* rcond, + double* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_spocon LAPACK_GLOBAL(spocon,SPOCON) +void LAPACK_spocon( + char const* uplo, + lapack_int const* n, + float const* A, lapack_int const* lda, + float const* anorm, + float* rcond, + float* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_zpocon LAPACK_GLOBAL(zpocon,ZPOCON) +void LAPACK_zpocon( + char const* uplo, + lapack_int const* n, + lapack_complex_double const* A, lapack_int const* lda, + double const* anorm, + double* rcond, + lapack_complex_double* work, + double* rwork, + lapack_int* info ); + +#define LAPACK_cpoequ LAPACK_GLOBAL(cpoequ,CPOEQU) +void LAPACK_cpoequ( + lapack_int const* n, + lapack_complex_float const* A, lapack_int const* lda, + float* S, + float* scond, + float* amax, + lapack_int* info ); + +#define LAPACK_dpoequ LAPACK_GLOBAL(dpoequ,DPOEQU) +void LAPACK_dpoequ( + lapack_int const* n, + double const* A, lapack_int const* lda, + double* S, + double* scond, + double* amax, + lapack_int* info ); + +#define LAPACK_spoequ LAPACK_GLOBAL(spoequ,SPOEQU) +void LAPACK_spoequ( + lapack_int const* n, + float const* A, lapack_int const* lda, + float* S, + float* scond, + float* amax, + lapack_int* info ); + +#define LAPACK_zpoequ LAPACK_GLOBAL(zpoequ,ZPOEQU) +void LAPACK_zpoequ( + lapack_int const* n, + lapack_complex_double const* A, lapack_int const* lda, + double* S, + double* scond, + double* amax, + lapack_int* info ); + +#define LAPACK_cpoequb LAPACK_GLOBAL(cpoequb,CPOEQUB) +void LAPACK_cpoequb( + lapack_int const* n, + lapack_complex_float const* A, lapack_int const* lda, + float* S, + float* scond, + float* amax, + lapack_int* info ); + +#define LAPACK_dpoequb LAPACK_GLOBAL(dpoequb,DPOEQUB) +void LAPACK_dpoequb( + lapack_int const* n, + double const* A, lapack_int const* lda, + double* S, + double* scond, + double* amax, + lapack_int* info ); + +#define LAPACK_spoequb LAPACK_GLOBAL(spoequb,SPOEQUB) +void LAPACK_spoequb( + lapack_int const* n, + float const* A, lapack_int const* lda, + float* S, + float* scond, + float* amax, + lapack_int* info ); + +#define LAPACK_zpoequb LAPACK_GLOBAL(zpoequb,ZPOEQUB) +void LAPACK_zpoequb( + lapack_int const* n, + lapack_complex_double const* A, lapack_int const* lda, + double* S, + double* scond, + double* amax, + lapack_int* info ); + +#define LAPACK_cporfs LAPACK_GLOBAL(cporfs,CPORFS) +void LAPACK_cporfs( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float const* A, lapack_int const* lda, + lapack_complex_float const* AF, lapack_int const* ldaf, + lapack_complex_float const* B, lapack_int const* ldb, + lapack_complex_float* X, lapack_int const* ldx, + float* ferr, + float* berr, + lapack_complex_float* work, + float* rwork, + lapack_int* info ); + +#define LAPACK_dporfs LAPACK_GLOBAL(dporfs,DPORFS) +void LAPACK_dporfs( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + double const* A, lapack_int const* lda, + double const* AF, lapack_int const* ldaf, + double const* B, lapack_int const* ldb, + double* X, lapack_int const* ldx, + double* ferr, + double* berr, + double* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_sporfs LAPACK_GLOBAL(sporfs,SPORFS) +void LAPACK_sporfs( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + float const* A, lapack_int const* lda, + float const* AF, lapack_int const* ldaf, + float const* B, lapack_int const* ldb, + float* X, lapack_int const* ldx, + float* ferr, + float* berr, + float* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_zporfs LAPACK_GLOBAL(zporfs,ZPORFS) +void LAPACK_zporfs( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double const* A, lapack_int const* lda, + lapack_complex_double const* AF, lapack_int const* ldaf, + lapack_complex_double const* B, lapack_int const* ldb, + lapack_complex_double* X, lapack_int const* ldx, + double* ferr, + double* berr, + lapack_complex_double* work, + double* rwork, + lapack_int* info ); + +#define LAPACK_cporfsx LAPACK_GLOBAL(cporfsx,CPORFSX) +void LAPACK_cporfsx( + char const* uplo, char const* equed, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float const* A, lapack_int const* lda, + lapack_complex_float const* AF, lapack_int const* ldaf, + float* S, + lapack_complex_float const* B, lapack_int const* ldb, + lapack_complex_float* X, lapack_int const* ldx, + float* rcond, + float* berr, lapack_int const* n_err_bnds, + float* err_bnds_norm, + float* err_bnds_comp, lapack_int const* nparams, + float* params, + lapack_complex_float* work, + float* rwork, + lapack_int* info ); + +#define LAPACK_dporfsx LAPACK_GLOBAL(dporfsx,DPORFSX) +void LAPACK_dporfsx( + char const* uplo, char const* equed, + lapack_int const* n, lapack_int const* nrhs, + double const* A, lapack_int const* lda, + double const* AF, lapack_int const* ldaf, + double* S, + double const* B, lapack_int const* ldb, + double* X, lapack_int const* ldx, + double* rcond, + double* berr, lapack_int const* n_err_bnds, + double* err_bnds_norm, + double* err_bnds_comp, lapack_int const* nparams, + double* params, + double* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_sporfsx LAPACK_GLOBAL(sporfsx,SPORFSX) +void LAPACK_sporfsx( + char const* uplo, char const* equed, + lapack_int const* n, lapack_int const* nrhs, + float const* A, lapack_int const* lda, + float const* AF, lapack_int const* ldaf, + float* S, + float const* B, lapack_int const* ldb, + float* X, lapack_int const* ldx, + float* rcond, + float* berr, lapack_int const* n_err_bnds, + float* err_bnds_norm, + float* err_bnds_comp, lapack_int const* nparams, + float* params, + float* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_zporfsx LAPACK_GLOBAL(zporfsx,ZPORFSX) +void LAPACK_zporfsx( + char const* uplo, char const* equed, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double const* A, lapack_int const* lda, + lapack_complex_double const* AF, lapack_int const* ldaf, + double* S, + lapack_complex_double const* B, lapack_int const* ldb, + lapack_complex_double* X, lapack_int const* ldx, + double* rcond, + double* berr, lapack_int const* n_err_bnds, + double* err_bnds_norm, + double* err_bnds_comp, lapack_int const* nparams, + double* params, + lapack_complex_double* work, + double* rwork, + lapack_int* info ); + +#define LAPACK_cposv LAPACK_GLOBAL(cposv,CPOSV) +void LAPACK_cposv( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_dposv LAPACK_GLOBAL(dposv,DPOSV) +void LAPACK_dposv( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + double* A, lapack_int const* lda, + double* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_sposv LAPACK_GLOBAL(sposv,SPOSV) +void LAPACK_sposv( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + float* A, lapack_int const* lda, + float* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_zposv LAPACK_GLOBAL(zposv,ZPOSV) +void LAPACK_zposv( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_dsposv LAPACK_GLOBAL(dsposv,DSPOSV) +void LAPACK_dsposv( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + double* A, lapack_int const* lda, + double const* B, lapack_int const* ldb, + double* X, lapack_int const* ldx, + double* work, + float* swork, lapack_int* iter, + lapack_int* info ); + +#define LAPACK_zcposv LAPACK_GLOBAL(zcposv,ZCPOSV) +void LAPACK_zcposv( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double const* B, lapack_int const* ldb, + lapack_complex_double* X, lapack_int const* ldx, + lapack_complex_double* work, + lapack_complex_float* swork, + double* rwork, lapack_int* iter, + lapack_int* info ); + +#define LAPACK_cposvx LAPACK_GLOBAL(cposvx,CPOSVX) +void LAPACK_cposvx( + char const* fact, char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* AF, lapack_int const* ldaf, char* equed, + float* S, + lapack_complex_float* B, + lapack_int const* ldb, + lapack_complex_float* X, lapack_int const* ldx, + float* rcond, + float* ferr, + float* berr, + lapack_complex_float* work, + float* rwork, + lapack_int* info ); + +#define LAPACK_dposvx LAPACK_GLOBAL(dposvx,DPOSVX) +void LAPACK_dposvx( + char const* fact, char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + double* A, lapack_int const* lda, + double* AF, lapack_int const* ldaf, char* equed, + double* S, + double* B, + lapack_int const* ldb, + double* X, lapack_int const* ldx, + double* rcond, + double* ferr, + double* berr, + double* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_sposvx LAPACK_GLOBAL(sposvx,SPOSVX) +void LAPACK_sposvx( + char const* fact, char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + float* A, lapack_int const* lda, + float* AF, lapack_int const* ldaf, char* equed, + float* S, + float* B, + lapack_int const* ldb, + float* X, lapack_int const* ldx, + float* rcond, + float* ferr, + float* berr, + float* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_zposvx LAPACK_GLOBAL(zposvx,ZPOSVX) +void LAPACK_zposvx( + char const* fact, char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* AF, lapack_int const* ldaf, char* equed, + double* S, + lapack_complex_double* B, + lapack_int const* ldb, + lapack_complex_double* X, lapack_int const* ldx, + double* rcond, + double* ferr, + double* berr, + lapack_complex_double* work, + double* rwork, + lapack_int* info ); + +#define LAPACK_cposvxx LAPACK_GLOBAL(cposvxx,CPOSVXX) +void LAPACK_cposvxx( + char const* fact, char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* AF, lapack_int const* ldaf, char* equed, + float* S, + lapack_complex_float* B, + lapack_int const* ldb, + lapack_complex_float* X, lapack_int const* ldx, + float* rcond, + float* rpvgrw, + float* berr, lapack_int const* n_err_bnds, + float* err_bnds_norm, + float* err_bnds_comp, lapack_int const* nparams, + float* params, + lapack_complex_float* work, + float* rwork, + lapack_int* info ); + +#define LAPACK_dposvxx LAPACK_GLOBAL(dposvxx,DPOSVXX) +void LAPACK_dposvxx( + char const* fact, char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + double* A, lapack_int const* lda, + double* AF, lapack_int const* ldaf, char* equed, + double* S, + double* B, + lapack_int const* ldb, + double* X, lapack_int const* ldx, + double* rcond, + double* rpvgrw, + double* berr, lapack_int const* n_err_bnds, + double* err_bnds_norm, + double* err_bnds_comp, lapack_int const* nparams, + double* params, + double* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_sposvxx LAPACK_GLOBAL(sposvxx,SPOSVXX) +void LAPACK_sposvxx( + char const* fact, char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + float* A, lapack_int const* lda, + float* AF, lapack_int const* ldaf, char* equed, + float* S, + float* B, + lapack_int const* ldb, + float* X, lapack_int const* ldx, + float* rcond, + float* rpvgrw, + float* berr, lapack_int const* n_err_bnds, + float* err_bnds_norm, + float* err_bnds_comp, lapack_int const* nparams, + float* params, + float* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_zposvxx LAPACK_GLOBAL(zposvxx,ZPOSVXX) +void LAPACK_zposvxx( + char const* fact, char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* AF, lapack_int const* ldaf, char* equed, + double* S, + lapack_complex_double* B, + lapack_int const* ldb, + lapack_complex_double* X, lapack_int const* ldx, + double* rcond, + double* rpvgrw, + double* berr, lapack_int const* n_err_bnds, + double* err_bnds_norm, + double* err_bnds_comp, lapack_int const* nparams, + double* params, + lapack_complex_double* work, + double* rwork, + lapack_int* info ); + +#define LAPACK_cpotf2 LAPACK_GLOBAL(cpotf2,CPOTF2) +void LAPACK_cpotf2( + char const* uplo, + lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, + lapack_int* info ); + +#define LAPACK_dpotf2 LAPACK_GLOBAL(dpotf2,DPOTF2) +void LAPACK_dpotf2( + char const* uplo, + lapack_int const* n, + double* A, lapack_int const* lda, + lapack_int* info ); + +#define LAPACK_spotf2 LAPACK_GLOBAL(spotf2,SPOTF2) +void LAPACK_spotf2( + char const* uplo, + lapack_int const* n, + float* A, lapack_int const* lda, + lapack_int* info ); + +#define LAPACK_zpotf2 LAPACK_GLOBAL(zpotf2,ZPOTF2) +void LAPACK_zpotf2( + char const* uplo, + lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, + lapack_int* info ); + +#define LAPACK_cpotrf LAPACK_GLOBAL(cpotrf,CPOTRF) +void LAPACK_cpotrf( + char const* uplo, + lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, + lapack_int* info ); + +#define LAPACK_dpotrf LAPACK_GLOBAL(dpotrf,DPOTRF) +void LAPACK_dpotrf( + char const* uplo, + lapack_int const* n, + double* A, lapack_int const* lda, + lapack_int* info ); + +#define LAPACK_spotrf LAPACK_GLOBAL(spotrf,SPOTRF) +void LAPACK_spotrf( + char const* uplo, + lapack_int const* n, + float* A, lapack_int const* lda, + lapack_int* info ); + +#define LAPACK_zpotrf LAPACK_GLOBAL(zpotrf,ZPOTRF) +void LAPACK_zpotrf( + char const* uplo, + lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, + lapack_int* info ); + +#define LAPACK_cpotrf2 LAPACK_GLOBAL(cpotrf2,CPOTRF2) +void LAPACK_cpotrf2( + char const* uplo, + lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, + lapack_int* info ); + +#define LAPACK_dpotrf2 LAPACK_GLOBAL(dpotrf2,DPOTRF2) +void LAPACK_dpotrf2( + char const* uplo, + lapack_int const* n, + double* A, lapack_int const* lda, + lapack_int* info ); + +#define LAPACK_spotrf2 LAPACK_GLOBAL(spotrf2,SPOTRF2) +void LAPACK_spotrf2( + char const* uplo, + lapack_int const* n, + float* A, lapack_int const* lda, + lapack_int* info ); + +#define LAPACK_zpotrf2 LAPACK_GLOBAL(zpotrf2,ZPOTRF2) +void LAPACK_zpotrf2( + char const* uplo, + lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, + lapack_int* info ); + +#define LAPACK_cpotri LAPACK_GLOBAL(cpotri,CPOTRI) +void LAPACK_cpotri( + char const* uplo, + lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, + lapack_int* info ); + +#define LAPACK_dpotri LAPACK_GLOBAL(dpotri,DPOTRI) +void LAPACK_dpotri( + char const* uplo, + lapack_int const* n, + double* A, lapack_int const* lda, + lapack_int* info ); + +#define LAPACK_spotri LAPACK_GLOBAL(spotri,SPOTRI) +void LAPACK_spotri( + char const* uplo, + lapack_int const* n, + float* A, lapack_int const* lda, + lapack_int* info ); + +#define LAPACK_zpotri LAPACK_GLOBAL(zpotri,ZPOTRI) +void LAPACK_zpotri( + char const* uplo, + lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, + lapack_int* info ); + +#define LAPACK_cpotrs LAPACK_GLOBAL(cpotrs,CPOTRS) +void LAPACK_cpotrs( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float const* A, lapack_int const* lda, + lapack_complex_float* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_dpotrs LAPACK_GLOBAL(dpotrs,DPOTRS) +void LAPACK_dpotrs( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + double const* A, lapack_int const* lda, + double* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_spotrs LAPACK_GLOBAL(spotrs,SPOTRS) +void LAPACK_spotrs( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + float const* A, lapack_int const* lda, + float* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_zpotrs LAPACK_GLOBAL(zpotrs,ZPOTRS) +void LAPACK_zpotrs( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double const* A, lapack_int const* lda, + lapack_complex_double* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_cppcon LAPACK_GLOBAL(cppcon,CPPCON) +void LAPACK_cppcon( + char const* uplo, + lapack_int const* n, + lapack_complex_float const* AP, + float const* anorm, + float* rcond, + lapack_complex_float* work, + float* rwork, + lapack_int* info ); + +#define LAPACK_dppcon LAPACK_GLOBAL(dppcon,DPPCON) +void LAPACK_dppcon( + char const* uplo, + lapack_int const* n, + double const* AP, + double const* anorm, + double* rcond, + double* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_sppcon LAPACK_GLOBAL(sppcon,SPPCON) +void LAPACK_sppcon( + char const* uplo, + lapack_int const* n, + float const* AP, + float const* anorm, + float* rcond, + float* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_zppcon LAPACK_GLOBAL(zppcon,ZPPCON) +void LAPACK_zppcon( + char const* uplo, + lapack_int const* n, + lapack_complex_double const* AP, + double const* anorm, + double* rcond, + lapack_complex_double* work, + double* rwork, + lapack_int* info ); + +#define LAPACK_cppequ LAPACK_GLOBAL(cppequ,CPPEQU) +void LAPACK_cppequ( + char const* uplo, + lapack_int const* n, + lapack_complex_float const* AP, + float* S, + float* scond, + float* amax, + lapack_int* info ); + +#define LAPACK_dppequ LAPACK_GLOBAL(dppequ,DPPEQU) +void LAPACK_dppequ( + char const* uplo, + lapack_int const* n, + double const* AP, + double* S, + double* scond, + double* amax, + lapack_int* info ); + +#define LAPACK_sppequ LAPACK_GLOBAL(sppequ,SPPEQU) +void LAPACK_sppequ( + char const* uplo, + lapack_int const* n, + float const* AP, + float* S, + float* scond, + float* amax, + lapack_int* info ); + +#define LAPACK_zppequ LAPACK_GLOBAL(zppequ,ZPPEQU) +void LAPACK_zppequ( + char const* uplo, + lapack_int const* n, + lapack_complex_double const* AP, + double* S, + double* scond, + double* amax, + lapack_int* info ); + +#define LAPACK_cpprfs LAPACK_GLOBAL(cpprfs,CPPRFS) +void LAPACK_cpprfs( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float const* AP, + lapack_complex_float const* AFP, + lapack_complex_float const* B, lapack_int const* ldb, + lapack_complex_float* X, lapack_int const* ldx, + float* ferr, + float* berr, + lapack_complex_float* work, + float* rwork, + lapack_int* info ); + +#define LAPACK_dpprfs LAPACK_GLOBAL(dpprfs,DPPRFS) +void LAPACK_dpprfs( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + double const* AP, + double const* AFP, + double const* B, lapack_int const* ldb, + double* X, lapack_int const* ldx, + double* ferr, + double* berr, + double* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_spprfs LAPACK_GLOBAL(spprfs,SPPRFS) +void LAPACK_spprfs( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + float const* AP, + float const* AFP, + float const* B, lapack_int const* ldb, + float* X, lapack_int const* ldx, + float* ferr, + float* berr, + float* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_zpprfs LAPACK_GLOBAL(zpprfs,ZPPRFS) +void LAPACK_zpprfs( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double const* AP, + lapack_complex_double const* AFP, + lapack_complex_double const* B, lapack_int const* ldb, + lapack_complex_double* X, lapack_int const* ldx, + double* ferr, + double* berr, + lapack_complex_double* work, + double* rwork, + lapack_int* info ); + +#define LAPACK_cppsv LAPACK_GLOBAL(cppsv,CPPSV) +void LAPACK_cppsv( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float* AP, + lapack_complex_float* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_dppsv LAPACK_GLOBAL(dppsv,DPPSV) +void LAPACK_dppsv( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + double* AP, + double* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_sppsv LAPACK_GLOBAL(sppsv,SPPSV) +void LAPACK_sppsv( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + float* AP, + float* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_zppsv LAPACK_GLOBAL(zppsv,ZPPSV) +void LAPACK_zppsv( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double* AP, + lapack_complex_double* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_cppsvx LAPACK_GLOBAL(cppsvx,CPPSVX) +void LAPACK_cppsvx( + char const* fact, char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float* AP, + lapack_complex_float* AFP, char* equed, + float* S, + lapack_complex_float* B, + lapack_int const* ldb, + lapack_complex_float* X, lapack_int const* ldx, + float* rcond, + float* ferr, + float* berr, + lapack_complex_float* work, + float* rwork, + lapack_int* info ); + +#define LAPACK_dppsvx LAPACK_GLOBAL(dppsvx,DPPSVX) +void LAPACK_dppsvx( + char const* fact, char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + double* AP, + double* AFP, char* equed, + double* S, + double* B, + lapack_int const* ldb, + double* X, lapack_int const* ldx, + double* rcond, + double* ferr, + double* berr, + double* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_sppsvx LAPACK_GLOBAL(sppsvx,SPPSVX) +void LAPACK_sppsvx( + char const* fact, char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + float* AP, + float* AFP, char* equed, + float* S, + float* B, + lapack_int const* ldb, + float* X, lapack_int const* ldx, + float* rcond, + float* ferr, + float* berr, + float* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_zppsvx LAPACK_GLOBAL(zppsvx,ZPPSVX) +void LAPACK_zppsvx( + char const* fact, char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double* AP, + lapack_complex_double* AFP, char* equed, + double* S, + lapack_complex_double* B, + lapack_int const* ldb, + lapack_complex_double* X, lapack_int const* ldx, + double* rcond, + double* ferr, + double* berr, + lapack_complex_double* work, + double* rwork, + lapack_int* info ); + +#define LAPACK_cpptrf LAPACK_GLOBAL(cpptrf,CPPTRF) +void LAPACK_cpptrf( + char const* uplo, + lapack_int const* n, + lapack_complex_float* AP, + lapack_int* info ); + +#define LAPACK_dpptrf LAPACK_GLOBAL(dpptrf,DPPTRF) +void LAPACK_dpptrf( + char const* uplo, + lapack_int const* n, + double* AP, + lapack_int* info ); + +#define LAPACK_spptrf LAPACK_GLOBAL(spptrf,SPPTRF) +void LAPACK_spptrf( + char const* uplo, + lapack_int const* n, + float* AP, + lapack_int* info ); + +#define LAPACK_zpptrf LAPACK_GLOBAL(zpptrf,ZPPTRF) +void LAPACK_zpptrf( + char const* uplo, + lapack_int const* n, + lapack_complex_double* AP, + lapack_int* info ); + +#define LAPACK_cpptri LAPACK_GLOBAL(cpptri,CPPTRI) +void LAPACK_cpptri( + char const* uplo, + lapack_int const* n, + lapack_complex_float* AP, + lapack_int* info ); + +#define LAPACK_dpptri LAPACK_GLOBAL(dpptri,DPPTRI) +void LAPACK_dpptri( + char const* uplo, + lapack_int const* n, + double* AP, + lapack_int* info ); + +#define LAPACK_spptri LAPACK_GLOBAL(spptri,SPPTRI) +void LAPACK_spptri( + char const* uplo, + lapack_int const* n, + float* AP, + lapack_int* info ); + +#define LAPACK_zpptri LAPACK_GLOBAL(zpptri,ZPPTRI) +void LAPACK_zpptri( + char const* uplo, + lapack_int const* n, + lapack_complex_double* AP, + lapack_int* info ); + +#define LAPACK_cpptrs LAPACK_GLOBAL(cpptrs,CPPTRS) +void LAPACK_cpptrs( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float const* AP, + lapack_complex_float* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_dpptrs LAPACK_GLOBAL(dpptrs,DPPTRS) +void LAPACK_dpptrs( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + double const* AP, + double* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_spptrs LAPACK_GLOBAL(spptrs,SPPTRS) +void LAPACK_spptrs( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + float const* AP, + float* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_zpptrs LAPACK_GLOBAL(zpptrs,ZPPTRS) +void LAPACK_zpptrs( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double const* AP, + lapack_complex_double* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_cpstrf LAPACK_GLOBAL(cpstrf,CPSTRF) +void LAPACK_cpstrf( + char const* uplo, + lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, lapack_int* piv, lapack_int* rank, + float const* tol, + float* work, + lapack_int* info ); + +#define LAPACK_dpstrf LAPACK_GLOBAL(dpstrf,DPSTRF) +void LAPACK_dpstrf( + char const* uplo, + lapack_int const* n, + double* A, lapack_int const* lda, lapack_int* piv, lapack_int* rank, + double const* tol, + double* work, + lapack_int* info ); + +#define LAPACK_spstrf LAPACK_GLOBAL(spstrf,SPSTRF) +void LAPACK_spstrf( + char const* uplo, + lapack_int const* n, + float* A, lapack_int const* lda, lapack_int* piv, lapack_int* rank, + float const* tol, + float* work, + lapack_int* info ); + +#define LAPACK_zpstrf LAPACK_GLOBAL(zpstrf,ZPSTRF) +void LAPACK_zpstrf( + char const* uplo, + lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, lapack_int* piv, lapack_int* rank, + double const* tol, + double* work, + lapack_int* info ); + +#define LAPACK_cptcon LAPACK_GLOBAL(cptcon,CPTCON) +void LAPACK_cptcon( + lapack_int const* n, + float const* D, + lapack_complex_float const* E, + float const* anorm, + float* rcond, + float* rwork, + lapack_int* info ); + +#define LAPACK_dptcon LAPACK_GLOBAL(dptcon,DPTCON) +void LAPACK_dptcon( + lapack_int const* n, + double const* D, + double const* E, + double const* anorm, + double* rcond, + double* work, + lapack_int* info ); + +#define LAPACK_sptcon LAPACK_GLOBAL(sptcon,SPTCON) +void LAPACK_sptcon( + lapack_int const* n, + float const* D, + float const* E, + float const* anorm, + float* rcond, + float* work, + lapack_int* info ); + +#define LAPACK_zptcon LAPACK_GLOBAL(zptcon,ZPTCON) +void LAPACK_zptcon( + lapack_int const* n, + double const* D, + lapack_complex_double const* E, + double const* anorm, + double* rcond, + double* rwork, + lapack_int* info ); + +#define LAPACK_cpteqr LAPACK_GLOBAL(cpteqr,CPTEQR) +void LAPACK_cpteqr( + char const* compz, + lapack_int const* n, + float* D, + float* E, + lapack_complex_float* Z, lapack_int const* ldz, + float* work, + lapack_int* info ); + +#define LAPACK_dpteqr LAPACK_GLOBAL(dpteqr,DPTEQR) +void LAPACK_dpteqr( + char const* compz, + lapack_int const* n, + double* D, + double* E, + double* Z, lapack_int const* ldz, + double* work, + lapack_int* info ); + +#define LAPACK_spteqr LAPACK_GLOBAL(spteqr,SPTEQR) +void LAPACK_spteqr( + char const* compz, + lapack_int const* n, + float* D, + float* E, + float* Z, lapack_int const* ldz, + float* work, + lapack_int* info ); + +#define LAPACK_zpteqr LAPACK_GLOBAL(zpteqr,ZPTEQR) +void LAPACK_zpteqr( + char const* compz, + lapack_int const* n, + double* D, + double* E, + lapack_complex_double* Z, lapack_int const* ldz, + double* work, + lapack_int* info ); + +#define LAPACK_cptrfs LAPACK_GLOBAL(cptrfs,CPTRFS) +void LAPACK_cptrfs( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + float const* D, + lapack_complex_float const* E, + float const* DF, + lapack_complex_float const* EF, + lapack_complex_float const* B, lapack_int const* ldb, + lapack_complex_float* X, lapack_int const* ldx, + float* ferr, + float* berr, + lapack_complex_float* work, + float* rwork, + lapack_int* info ); + +#define LAPACK_dptrfs LAPACK_GLOBAL(dptrfs,DPTRFS) +void LAPACK_dptrfs( + lapack_int const* n, lapack_int const* nrhs, + double const* D, + double const* E, + double const* DF, + double const* EF, + double const* B, lapack_int const* ldb, + double* X, lapack_int const* ldx, + double* ferr, + double* berr, + double* work, + lapack_int* info ); + +#define LAPACK_sptrfs LAPACK_GLOBAL(sptrfs,SPTRFS) +void LAPACK_sptrfs( + lapack_int const* n, lapack_int const* nrhs, + float const* D, + float const* E, + float const* DF, + float const* EF, + float const* B, lapack_int const* ldb, + float* X, lapack_int const* ldx, + float* ferr, + float* berr, + float* work, + lapack_int* info ); + +#define LAPACK_zptrfs LAPACK_GLOBAL(zptrfs,ZPTRFS) +void LAPACK_zptrfs( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + double const* D, + lapack_complex_double const* E, + double const* DF, + lapack_complex_double const* EF, + lapack_complex_double const* B, lapack_int const* ldb, + lapack_complex_double* X, lapack_int const* ldx, + double* ferr, + double* berr, + lapack_complex_double* work, + double* rwork, + lapack_int* info ); + +#define LAPACK_cptsv LAPACK_GLOBAL(cptsv,CPTSV) +void LAPACK_cptsv( + lapack_int const* n, lapack_int const* nrhs, + float* D, + lapack_complex_float* E, + lapack_complex_float* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_dptsv LAPACK_GLOBAL(dptsv,DPTSV) +void LAPACK_dptsv( + lapack_int const* n, lapack_int const* nrhs, + double* D, + double* E, + double* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_sptsv LAPACK_GLOBAL(sptsv,SPTSV) +void LAPACK_sptsv( + lapack_int const* n, lapack_int const* nrhs, + float* D, + float* E, + float* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_zptsv LAPACK_GLOBAL(zptsv,ZPTSV) +void LAPACK_zptsv( + lapack_int const* n, lapack_int const* nrhs, + double* D, + lapack_complex_double* E, + lapack_complex_double* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_cptsvx LAPACK_GLOBAL(cptsvx,CPTSVX) +void LAPACK_cptsvx( + char const* fact, + lapack_int const* n, lapack_int const* nrhs, + float const* D, + lapack_complex_float const* E, + float* DF, + lapack_complex_float* EF, + lapack_complex_float const* B, lapack_int const* ldb, + lapack_complex_float* X, lapack_int const* ldx, + float* rcond, + float* ferr, + float* berr, + lapack_complex_float* work, + float* rwork, + lapack_int* info ); + +#define LAPACK_dptsvx LAPACK_GLOBAL(dptsvx,DPTSVX) +void LAPACK_dptsvx( + char const* fact, + lapack_int const* n, lapack_int const* nrhs, + double const* D, + double const* E, + double* DF, + double* EF, + double const* B, lapack_int const* ldb, + double* X, lapack_int const* ldx, + double* rcond, + double* ferr, + double* berr, + double* work, + lapack_int* info ); + +#define LAPACK_sptsvx LAPACK_GLOBAL(sptsvx,SPTSVX) +void LAPACK_sptsvx( + char const* fact, + lapack_int const* n, lapack_int const* nrhs, + float const* D, + float const* E, + float* DF, + float* EF, + float const* B, lapack_int const* ldb, + float* X, lapack_int const* ldx, + float* rcond, + float* ferr, + float* berr, + float* work, + lapack_int* info ); + +#define LAPACK_zptsvx LAPACK_GLOBAL(zptsvx,ZPTSVX) +void LAPACK_zptsvx( + char const* fact, + lapack_int const* n, lapack_int const* nrhs, + double const* D, + lapack_complex_double const* E, + double* DF, + lapack_complex_double* EF, + lapack_complex_double const* B, lapack_int const* ldb, + lapack_complex_double* X, lapack_int const* ldx, + double* rcond, + double* ferr, + double* berr, + lapack_complex_double* work, + double* rwork, + lapack_int* info ); + +#define LAPACK_cpttrf LAPACK_GLOBAL(cpttrf,CPTTRF) +void LAPACK_cpttrf( + lapack_int const* n, + float* D, + lapack_complex_float* E, + lapack_int* info ); + +#define LAPACK_dpttrf LAPACK_GLOBAL(dpttrf,DPTTRF) +void LAPACK_dpttrf( + lapack_int const* n, + double* D, + double* E, + lapack_int* info ); + +#define LAPACK_spttrf LAPACK_GLOBAL(spttrf,SPTTRF) +void LAPACK_spttrf( + lapack_int const* n, + float* D, + float* E, + lapack_int* info ); + +#define LAPACK_zpttrf LAPACK_GLOBAL(zpttrf,ZPTTRF) +void LAPACK_zpttrf( + lapack_int const* n, + double* D, + lapack_complex_double* E, + lapack_int* info ); + +#define LAPACK_cpttrs LAPACK_GLOBAL(cpttrs,CPTTRS) +void LAPACK_cpttrs( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + float const* D, + lapack_complex_float const* E, + lapack_complex_float* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_dpttrs LAPACK_GLOBAL(dpttrs,DPTTRS) +void LAPACK_dpttrs( + lapack_int const* n, lapack_int const* nrhs, + double const* D, + double const* E, + double* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_spttrs LAPACK_GLOBAL(spttrs,SPTTRS) +void LAPACK_spttrs( + lapack_int const* n, lapack_int const* nrhs, + float const* D, + float const* E, + float* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_zpttrs LAPACK_GLOBAL(zpttrs,ZPTTRS) +void LAPACK_zpttrs( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + double const* D, + lapack_complex_double const* E, + lapack_complex_double* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_dsbev LAPACK_GLOBAL(dsbev,DSBEV) +void LAPACK_dsbev( + char const* jobz, char const* uplo, + lapack_int const* n, lapack_int const* kd, + double* AB, lapack_int const* ldab, + double* W, + double* Z, lapack_int const* ldz, + double* work, + lapack_int* info ); + +#define LAPACK_ssbev LAPACK_GLOBAL(ssbev,SSBEV) +void LAPACK_ssbev( + char const* jobz, char const* uplo, + lapack_int const* n, lapack_int const* kd, + float* AB, lapack_int const* ldab, + float* W, + float* Z, lapack_int const* ldz, + float* work, + lapack_int* info ); + +#define LAPACK_dsbev_2stage LAPACK_GLOBAL(dsbev_2stage,DSBEV_2STAGE) +void LAPACK_dsbev_2stage( + char const* jobz, char const* uplo, + lapack_int const* n, lapack_int const* kd, + double* AB, lapack_int const* ldab, + double* W, + double* Z, lapack_int const* ldz, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_ssbev_2stage LAPACK_GLOBAL(ssbev_2stage,SSBEV_2STAGE) +void LAPACK_ssbev_2stage( + char const* jobz, char const* uplo, + lapack_int const* n, lapack_int const* kd, + float* AB, lapack_int const* ldab, + float* W, + float* Z, lapack_int const* ldz, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_dsbevd LAPACK_GLOBAL(dsbevd,DSBEVD) +void LAPACK_dsbevd( + char const* jobz, char const* uplo, + lapack_int const* n, lapack_int const* kd, + double* AB, lapack_int const* ldab, + double* W, + double* Z, lapack_int const* ldz, + double* work, lapack_int const* lwork, + lapack_int* iwork, lapack_int const* liwork, + lapack_int* info ); + +#define LAPACK_ssbevd LAPACK_GLOBAL(ssbevd,SSBEVD) +void LAPACK_ssbevd( + char const* jobz, char const* uplo, + lapack_int const* n, lapack_int const* kd, + float* AB, lapack_int const* ldab, + float* W, + float* Z, lapack_int const* ldz, + float* work, lapack_int const* lwork, + lapack_int* iwork, lapack_int const* liwork, + lapack_int* info ); + +#define LAPACK_dsbevd_2stage LAPACK_GLOBAL(dsbevd_2stage,DSBEVD_2STAGE) +void LAPACK_dsbevd_2stage( + char const* jobz, char const* uplo, + lapack_int const* n, lapack_int const* kd, + double* AB, lapack_int const* ldab, + double* W, + double* Z, lapack_int const* ldz, + double* work, lapack_int const* lwork, + lapack_int* iwork, lapack_int const* liwork, + lapack_int* info ); + +#define LAPACK_ssbevd_2stage LAPACK_GLOBAL(ssbevd_2stage,SSBEVD_2STAGE) +void LAPACK_ssbevd_2stage( + char const* jobz, char const* uplo, + lapack_int const* n, lapack_int const* kd, + float* AB, lapack_int const* ldab, + float* W, + float* Z, lapack_int const* ldz, + float* work, lapack_int const* lwork, + lapack_int* iwork, lapack_int const* liwork, + lapack_int* info ); + +#define LAPACK_dsbevx LAPACK_GLOBAL(dsbevx,DSBEVX) +void LAPACK_dsbevx( + char const* jobz, char const* range, char const* uplo, + lapack_int const* n, lapack_int const* kd, + double* AB, lapack_int const* ldab, + double* Q, lapack_int const* ldq, + double const* vl, + double const* vu, lapack_int const* il, lapack_int const* iu, + double const* abstol, lapack_int* m, + double* W, + double* Z, lapack_int const* ldz, + double* work, + lapack_int* iwork, lapack_int* IFAIL, + lapack_int* info ); + +#define LAPACK_ssbevx LAPACK_GLOBAL(ssbevx,SSBEVX) +void LAPACK_ssbevx( + char const* jobz, char const* range, char const* uplo, + lapack_int const* n, lapack_int const* kd, + float* AB, lapack_int const* ldab, + float* Q, lapack_int const* ldq, + float const* vl, + float const* vu, lapack_int const* il, lapack_int const* iu, + float const* abstol, lapack_int* m, + float* W, + float* Z, lapack_int const* ldz, + float* work, + lapack_int* iwork, lapack_int* IFAIL, + lapack_int* info ); + +#define LAPACK_dsbevx_2stage LAPACK_GLOBAL(dsbevx_2stage,DSBEVX_2STAGE) +void LAPACK_dsbevx_2stage( + char const* jobz, char const* range, char const* uplo, + lapack_int const* n, lapack_int const* kd, + double* AB, lapack_int const* ldab, + double* Q, lapack_int const* ldq, + double const* vl, + double const* vu, lapack_int const* il, lapack_int const* iu, + double const* abstol, lapack_int* m, + double* W, + double* Z, lapack_int const* ldz, + double* work, lapack_int const* lwork, + lapack_int* iwork, lapack_int* IFAIL, + lapack_int* info ); + +#define LAPACK_ssbevx_2stage LAPACK_GLOBAL(ssbevx_2stage,SSBEVX_2STAGE) +void LAPACK_ssbevx_2stage( + char const* jobz, char const* range, char const* uplo, + lapack_int const* n, lapack_int const* kd, + float* AB, lapack_int const* ldab, + float* Q, lapack_int const* ldq, + float const* vl, + float const* vu, lapack_int const* il, lapack_int const* iu, + float const* abstol, lapack_int* m, + float* W, + float* Z, lapack_int const* ldz, + float* work, lapack_int const* lwork, + lapack_int* iwork, lapack_int* IFAIL, + lapack_int* info ); + +#define LAPACK_dsbgst LAPACK_GLOBAL(dsbgst,DSBGST) +void LAPACK_dsbgst( + char const* vect, char const* uplo, + lapack_int const* n, lapack_int const* ka, lapack_int const* kb, + double* AB, lapack_int const* ldab, + double const* BB, lapack_int const* ldbb, + double* X, lapack_int const* ldx, + double* work, + lapack_int* info ); + +#define LAPACK_ssbgst LAPACK_GLOBAL(ssbgst,SSBGST) +void LAPACK_ssbgst( + char const* vect, char const* uplo, + lapack_int const* n, lapack_int const* ka, lapack_int const* kb, + float* AB, lapack_int const* ldab, + float const* BB, lapack_int const* ldbb, + float* X, lapack_int const* ldx, + float* work, + lapack_int* info ); + +#define LAPACK_dsbgv LAPACK_GLOBAL(dsbgv,DSBGV) +void LAPACK_dsbgv( + char const* jobz, char const* uplo, + lapack_int const* n, lapack_int const* ka, lapack_int const* kb, + double* AB, lapack_int const* ldab, + double* BB, lapack_int const* ldbb, + double* W, + double* Z, lapack_int const* ldz, + double* work, + lapack_int* info ); + +#define LAPACK_ssbgv LAPACK_GLOBAL(ssbgv,SSBGV) +void LAPACK_ssbgv( + char const* jobz, char const* uplo, + lapack_int const* n, lapack_int const* ka, lapack_int const* kb, + float* AB, lapack_int const* ldab, + float* BB, lapack_int const* ldbb, + float* W, + float* Z, lapack_int const* ldz, + float* work, + lapack_int* info ); + +#define LAPACK_dsbgvd LAPACK_GLOBAL(dsbgvd,DSBGVD) +void LAPACK_dsbgvd( + char const* jobz, char const* uplo, + lapack_int const* n, lapack_int const* ka, lapack_int const* kb, + double* AB, lapack_int const* ldab, + double* BB, lapack_int const* ldbb, + double* W, + double* Z, lapack_int const* ldz, + double* work, lapack_int const* lwork, + lapack_int* iwork, lapack_int const* liwork, + lapack_int* info ); + +#define LAPACK_ssbgvd LAPACK_GLOBAL(ssbgvd,SSBGVD) +void LAPACK_ssbgvd( + char const* jobz, char const* uplo, + lapack_int const* n, lapack_int const* ka, lapack_int const* kb, + float* AB, lapack_int const* ldab, + float* BB, lapack_int const* ldbb, + float* W, + float* Z, lapack_int const* ldz, + float* work, lapack_int const* lwork, + lapack_int* iwork, lapack_int const* liwork, + lapack_int* info ); + +#define LAPACK_dsbgvx LAPACK_GLOBAL(dsbgvx,DSBGVX) +void LAPACK_dsbgvx( + char const* jobz, char const* range, char const* uplo, + lapack_int const* n, lapack_int const* ka, lapack_int const* kb, + double* AB, lapack_int const* ldab, + double* BB, lapack_int const* ldbb, + double* Q, lapack_int const* ldq, + double const* vl, + double const* vu, lapack_int const* il, lapack_int const* iu, + double const* abstol, lapack_int* m, + double* W, + double* Z, lapack_int const* ldz, + double* work, + lapack_int* iwork, lapack_int* IFAIL, + lapack_int* info ); + +#define LAPACK_ssbgvx LAPACK_GLOBAL(ssbgvx,SSBGVX) +void LAPACK_ssbgvx( + char const* jobz, char const* range, char const* uplo, + lapack_int const* n, lapack_int const* ka, lapack_int const* kb, + float* AB, lapack_int const* ldab, + float* BB, lapack_int const* ldbb, + float* Q, lapack_int const* ldq, + float const* vl, + float const* vu, lapack_int const* il, lapack_int const* iu, + float const* abstol, lapack_int* m, + float* W, + float* Z, lapack_int const* ldz, + float* work, + lapack_int* iwork, lapack_int* IFAIL, + lapack_int* info ); + +#define LAPACK_dsbtrd LAPACK_GLOBAL(dsbtrd,DSBTRD) +void LAPACK_dsbtrd( + char const* vect, char const* uplo, + lapack_int const* n, lapack_int const* kd, + double* AB, lapack_int const* ldab, + double* D, + double* E, + double* Q, lapack_int const* ldq, + double* work, + lapack_int* info ); + +#define LAPACK_ssbtrd LAPACK_GLOBAL(ssbtrd,SSBTRD) +void LAPACK_ssbtrd( + char const* vect, char const* uplo, + lapack_int const* n, lapack_int const* kd, + float* AB, lapack_int const* ldab, + float* D, + float* E, + float* Q, lapack_int const* ldq, + float* work, + lapack_int* info ); + +#define LAPACK_dsfrk LAPACK_GLOBAL(dsfrk,DSFRK) +void LAPACK_dsfrk( + char const* transr, char const* uplo, char const* trans, + lapack_int const* n, lapack_int const* k, + double const* alpha, + double const* A, lapack_int const* lda, + double const* beta, + double* C ); + +#define LAPACK_ssfrk LAPACK_GLOBAL(ssfrk,SSFRK) +void LAPACK_ssfrk( + char const* transr, char const* uplo, char const* trans, + lapack_int const* n, lapack_int const* k, + float const* alpha, + float const* A, lapack_int const* lda, + float const* beta, + float* C ); + +#define LAPACK_cspcon LAPACK_GLOBAL(cspcon,CSPCON) +void LAPACK_cspcon( + char const* uplo, + lapack_int const* n, + lapack_complex_float const* AP, lapack_int const* ipiv, + float const* anorm, + float* rcond, + lapack_complex_float* work, + lapack_int* info ); + +#define LAPACK_dspcon LAPACK_GLOBAL(dspcon,DSPCON) +void LAPACK_dspcon( + char const* uplo, + lapack_int const* n, + double const* AP, lapack_int const* ipiv, + double const* anorm, + double* rcond, + double* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_sspcon LAPACK_GLOBAL(sspcon,SSPCON) +void LAPACK_sspcon( + char const* uplo, + lapack_int const* n, + float const* AP, lapack_int const* ipiv, + float const* anorm, + float* rcond, + float* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_zspcon LAPACK_GLOBAL(zspcon,ZSPCON) +void LAPACK_zspcon( + char const* uplo, + lapack_int const* n, + lapack_complex_double const* AP, lapack_int const* ipiv, + double const* anorm, + double* rcond, + lapack_complex_double* work, + lapack_int* info ); + +#define LAPACK_dspev LAPACK_GLOBAL(dspev,DSPEV) +void LAPACK_dspev( + char const* jobz, char const* uplo, + lapack_int const* n, + double* AP, + double* W, + double* Z, lapack_int const* ldz, + double* work, + lapack_int* info ); + +#define LAPACK_sspev LAPACK_GLOBAL(sspev,SSPEV) +void LAPACK_sspev( + char const* jobz, char const* uplo, + lapack_int const* n, + float* AP, + float* W, + float* Z, lapack_int const* ldz, + float* work, + lapack_int* info ); + +#define LAPACK_dspevd LAPACK_GLOBAL(dspevd,DSPEVD) +void LAPACK_dspevd( + char const* jobz, char const* uplo, + lapack_int const* n, + double* AP, + double* W, + double* Z, lapack_int const* ldz, + double* work, lapack_int const* lwork, + lapack_int* iwork, lapack_int const* liwork, + lapack_int* info ); + +#define LAPACK_sspevd LAPACK_GLOBAL(sspevd,SSPEVD) +void LAPACK_sspevd( + char const* jobz, char const* uplo, + lapack_int const* n, + float* AP, + float* W, + float* Z, lapack_int const* ldz, + float* work, lapack_int const* lwork, + lapack_int* iwork, lapack_int const* liwork, + lapack_int* info ); + +#define LAPACK_dspevx LAPACK_GLOBAL(dspevx,DSPEVX) +void LAPACK_dspevx( + char const* jobz, char const* range, char const* uplo, + lapack_int const* n, + double* AP, + double const* vl, + double const* vu, lapack_int const* il, lapack_int const* iu, + double const* abstol, lapack_int* m, + double* W, + double* Z, lapack_int const* ldz, + double* work, + lapack_int* iwork, lapack_int* IFAIL, + lapack_int* info ); + +#define LAPACK_sspevx LAPACK_GLOBAL(sspevx,SSPEVX) +void LAPACK_sspevx( + char const* jobz, char const* range, char const* uplo, + lapack_int const* n, + float* AP, + float const* vl, + float const* vu, lapack_int const* il, lapack_int const* iu, + float const* abstol, lapack_int* m, + float* W, + float* Z, lapack_int const* ldz, + float* work, + lapack_int* iwork, lapack_int* IFAIL, + lapack_int* info ); + +#define LAPACK_dspgst LAPACK_GLOBAL(dspgst,DSPGST) +void LAPACK_dspgst( + lapack_int const* itype, char const* uplo, + lapack_int const* n, + double* AP, + double const* BP, + lapack_int* info ); + +#define LAPACK_sspgst LAPACK_GLOBAL(sspgst,SSPGST) +void LAPACK_sspgst( + lapack_int const* itype, char const* uplo, + lapack_int const* n, + float* AP, + float const* BP, + lapack_int* info ); + +#define LAPACK_dspgv LAPACK_GLOBAL(dspgv,DSPGV) +void LAPACK_dspgv( + lapack_int const* itype, char const* jobz, char const* uplo, + lapack_int const* n, + double* AP, + double* BP, + double* W, + double* Z, lapack_int const* ldz, + double* work, + lapack_int* info ); + +#define LAPACK_sspgv LAPACK_GLOBAL(sspgv,SSPGV) +void LAPACK_sspgv( + lapack_int const* itype, char const* jobz, char const* uplo, + lapack_int const* n, + float* AP, + float* BP, + float* W, + float* Z, lapack_int const* ldz, + float* work, + lapack_int* info ); + +#define LAPACK_dspgvd LAPACK_GLOBAL(dspgvd,DSPGVD) +void LAPACK_dspgvd( + lapack_int const* itype, char const* jobz, char const* uplo, + lapack_int const* n, + double* AP, + double* BP, + double* W, + double* Z, lapack_int const* ldz, + double* work, lapack_int const* lwork, + lapack_int* iwork, lapack_int const* liwork, + lapack_int* info ); + +#define LAPACK_sspgvd LAPACK_GLOBAL(sspgvd,SSPGVD) +void LAPACK_sspgvd( + lapack_int const* itype, char const* jobz, char const* uplo, + lapack_int const* n, + float* AP, + float* BP, + float* W, + float* Z, lapack_int const* ldz, + float* work, lapack_int const* lwork, + lapack_int* iwork, lapack_int const* liwork, + lapack_int* info ); + +#define LAPACK_dspgvx LAPACK_GLOBAL(dspgvx,DSPGVX) +void LAPACK_dspgvx( + lapack_int const* itype, char const* jobz, char const* range, char const* uplo, + lapack_int const* n, + double* AP, + double* BP, + double const* vl, + double const* vu, lapack_int const* il, lapack_int const* iu, + double const* abstol, lapack_int* m, + double* W, + double* Z, lapack_int const* ldz, + double* work, + lapack_int* iwork, lapack_int* IFAIL, + lapack_int* info ); + +#define LAPACK_sspgvx LAPACK_GLOBAL(sspgvx,SSPGVX) +void LAPACK_sspgvx( + lapack_int const* itype, char const* jobz, char const* range, char const* uplo, + lapack_int const* n, + float* AP, + float* BP, + float const* vl, + float const* vu, lapack_int const* il, lapack_int const* iu, + float const* abstol, lapack_int* m, + float* W, + float* Z, lapack_int const* ldz, + float* work, + lapack_int* iwork, lapack_int* IFAIL, + lapack_int* info ); + +#define LAPACK_csprfs LAPACK_GLOBAL(csprfs,CSPRFS) +void LAPACK_csprfs( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float const* AP, + lapack_complex_float const* AFP, lapack_int const* ipiv, + lapack_complex_float const* B, lapack_int const* ldb, + lapack_complex_float* X, lapack_int const* ldx, + float* ferr, + float* berr, + lapack_complex_float* work, + float* rwork, + lapack_int* info ); + +#define LAPACK_dsprfs LAPACK_GLOBAL(dsprfs,DSPRFS) +void LAPACK_dsprfs( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + double const* AP, + double const* AFP, lapack_int const* ipiv, + double const* B, lapack_int const* ldb, + double* X, lapack_int const* ldx, + double* ferr, + double* berr, + double* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_ssprfs LAPACK_GLOBAL(ssprfs,SSPRFS) +void LAPACK_ssprfs( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + float const* AP, + float const* AFP, lapack_int const* ipiv, + float const* B, lapack_int const* ldb, + float* X, lapack_int const* ldx, + float* ferr, + float* berr, + float* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_zsprfs LAPACK_GLOBAL(zsprfs,ZSPRFS) +void LAPACK_zsprfs( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double const* AP, + lapack_complex_double const* AFP, lapack_int const* ipiv, + lapack_complex_double const* B, lapack_int const* ldb, + lapack_complex_double* X, lapack_int const* ldx, + double* ferr, + double* berr, + lapack_complex_double* work, + double* rwork, + lapack_int* info ); + +#define LAPACK_cspsv LAPACK_GLOBAL(cspsv,CSPSV) +void LAPACK_cspsv( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float* AP, lapack_int* ipiv, + lapack_complex_float* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_dspsv LAPACK_GLOBAL(dspsv,DSPSV) +void LAPACK_dspsv( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + double* AP, lapack_int* ipiv, + double* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_sspsv LAPACK_GLOBAL(sspsv,SSPSV) +void LAPACK_sspsv( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + float* AP, lapack_int* ipiv, + float* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_zspsv LAPACK_GLOBAL(zspsv,ZSPSV) +void LAPACK_zspsv( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double* AP, lapack_int* ipiv, + lapack_complex_double* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_cspsvx LAPACK_GLOBAL(cspsvx,CSPSVX) +void LAPACK_cspsvx( + char const* fact, char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float const* AP, + lapack_complex_float* AFP, lapack_int* ipiv, + lapack_complex_float const* B, lapack_int const* ldb, + lapack_complex_float* X, lapack_int const* ldx, + float* rcond, + float* ferr, + float* berr, + lapack_complex_float* work, + float* rwork, + lapack_int* info ); + +#define LAPACK_dspsvx LAPACK_GLOBAL(dspsvx,DSPSVX) +void LAPACK_dspsvx( + char const* fact, char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + double const* AP, + double* AFP, lapack_int* ipiv, + double const* B, lapack_int const* ldb, + double* X, lapack_int const* ldx, + double* rcond, + double* ferr, + double* berr, + double* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_sspsvx LAPACK_GLOBAL(sspsvx,SSPSVX) +void LAPACK_sspsvx( + char const* fact, char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + float const* AP, + float* AFP, lapack_int* ipiv, + float const* B, lapack_int const* ldb, + float* X, lapack_int const* ldx, + float* rcond, + float* ferr, + float* berr, + float* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_zspsvx LAPACK_GLOBAL(zspsvx,ZSPSVX) +void LAPACK_zspsvx( + char const* fact, char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double const* AP, + lapack_complex_double* AFP, lapack_int* ipiv, + lapack_complex_double const* B, lapack_int const* ldb, + lapack_complex_double* X, lapack_int const* ldx, + double* rcond, + double* ferr, + double* berr, + lapack_complex_double* work, + double* rwork, + lapack_int* info ); + +#define LAPACK_dsptrd LAPACK_GLOBAL(dsptrd,DSPTRD) +void LAPACK_dsptrd( + char const* uplo, + lapack_int const* n, + double* AP, + double* D, + double* E, + double* tau, + lapack_int* info ); + +#define LAPACK_ssptrd LAPACK_GLOBAL(ssptrd,SSPTRD) +void LAPACK_ssptrd( + char const* uplo, + lapack_int const* n, + float* AP, + float* D, + float* E, + float* tau, + lapack_int* info ); + +#define LAPACK_csptrf LAPACK_GLOBAL(csptrf,CSPTRF) +void LAPACK_csptrf( + char const* uplo, + lapack_int const* n, + lapack_complex_float* AP, lapack_int* ipiv, + lapack_int* info ); + +#define LAPACK_dsptrf LAPACK_GLOBAL(dsptrf,DSPTRF) +void LAPACK_dsptrf( + char const* uplo, + lapack_int const* n, + double* AP, lapack_int* ipiv, + lapack_int* info ); + +#define LAPACK_ssptrf LAPACK_GLOBAL(ssptrf,SSPTRF) +void LAPACK_ssptrf( + char const* uplo, + lapack_int const* n, + float* AP, lapack_int* ipiv, + lapack_int* info ); + +#define LAPACK_zsptrf LAPACK_GLOBAL(zsptrf,ZSPTRF) +void LAPACK_zsptrf( + char const* uplo, + lapack_int const* n, + lapack_complex_double* AP, lapack_int* ipiv, + lapack_int* info ); + +#define LAPACK_csptri LAPACK_GLOBAL(csptri,CSPTRI) +void LAPACK_csptri( + char const* uplo, + lapack_int const* n, + lapack_complex_float* AP, lapack_int const* ipiv, + lapack_complex_float* work, + lapack_int* info ); + +#define LAPACK_dsptri LAPACK_GLOBAL(dsptri,DSPTRI) +void LAPACK_dsptri( + char const* uplo, + lapack_int const* n, + double* AP, lapack_int const* ipiv, + double* work, + lapack_int* info ); + +#define LAPACK_ssptri LAPACK_GLOBAL(ssptri,SSPTRI) +void LAPACK_ssptri( + char const* uplo, + lapack_int const* n, + float* AP, lapack_int const* ipiv, + float* work, + lapack_int* info ); + +#define LAPACK_zsptri LAPACK_GLOBAL(zsptri,ZSPTRI) +void LAPACK_zsptri( + char const* uplo, + lapack_int const* n, + lapack_complex_double* AP, lapack_int const* ipiv, + lapack_complex_double* work, + lapack_int* info ); + +#define LAPACK_csptrs LAPACK_GLOBAL(csptrs,CSPTRS) +void LAPACK_csptrs( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float const* AP, lapack_int const* ipiv, + lapack_complex_float* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_dsptrs LAPACK_GLOBAL(dsptrs,DSPTRS) +void LAPACK_dsptrs( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + double const* AP, lapack_int const* ipiv, + double* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_ssptrs LAPACK_GLOBAL(ssptrs,SSPTRS) +void LAPACK_ssptrs( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + float const* AP, lapack_int const* ipiv, + float* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_zsptrs LAPACK_GLOBAL(zsptrs,ZSPTRS) +void LAPACK_zsptrs( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double const* AP, lapack_int const* ipiv, + lapack_complex_double* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_dstebz LAPACK_GLOBAL(dstebz,DSTEBZ) +void LAPACK_dstebz( + char const* range, char const* order, + lapack_int const* n, + double const* vl, + double const* vu, lapack_int const* il, lapack_int const* iu, + double const* abstol, + double const* D, + double const* E, lapack_int* m, lapack_int* nsplit, + double* W, lapack_int* IBLOCK, lapack_int* ISPLIT, + double* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_sstebz LAPACK_GLOBAL(sstebz,SSTEBZ) +void LAPACK_sstebz( + char const* range, char const* order, + lapack_int const* n, + float const* vl, + float const* vu, lapack_int const* il, lapack_int const* iu, + float const* abstol, + float const* D, + float const* E, lapack_int* m, lapack_int* nsplit, + float* W, lapack_int* IBLOCK, lapack_int* ISPLIT, + float* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_cstedc LAPACK_GLOBAL(cstedc,CSTEDC) +void LAPACK_cstedc( + char const* compz, + lapack_int const* n, + float* D, + float* E, + lapack_complex_float* Z, lapack_int const* ldz, + lapack_complex_float* work, lapack_int const* lwork, + float* rwork, lapack_int const* lrwork, + lapack_int* iwork, lapack_int const* liwork, + lapack_int* info ); + +#define LAPACK_dstedc LAPACK_GLOBAL(dstedc,DSTEDC) +void LAPACK_dstedc( + char const* compz, + lapack_int const* n, + double* D, + double* E, + double* Z, lapack_int const* ldz, + double* work, lapack_int const* lwork, + lapack_int* iwork, lapack_int const* liwork, + lapack_int* info ); + +#define LAPACK_sstedc LAPACK_GLOBAL(sstedc,SSTEDC) +void LAPACK_sstedc( + char const* compz, + lapack_int const* n, + float* D, + float* E, + float* Z, lapack_int const* ldz, + float* work, lapack_int const* lwork, + lapack_int* iwork, lapack_int const* liwork, + lapack_int* info ); + +#define LAPACK_zstedc LAPACK_GLOBAL(zstedc,ZSTEDC) +void LAPACK_zstedc( + char const* compz, + lapack_int const* n, + double* D, + double* E, + lapack_complex_double* Z, lapack_int const* ldz, + lapack_complex_double* work, lapack_int const* lwork, + double* rwork, lapack_int const* lrwork, + lapack_int* iwork, lapack_int const* liwork, + lapack_int* info ); + +#define LAPACK_cstegr LAPACK_GLOBAL(cstegr,CSTEGR) +void LAPACK_cstegr( + char const* jobz, char const* range, + lapack_int const* n, + float* D, + float* E, + float const* vl, + float const* vu, lapack_int const* il, lapack_int const* iu, + float const* abstol, lapack_int* m, + float* W, + lapack_complex_float* Z, lapack_int const* ldz, lapack_int* ISUPPZ, + float* work, lapack_int const* lwork, + lapack_int* iwork, lapack_int const* liwork, + lapack_int* info ); + +#define LAPACK_dstegr LAPACK_GLOBAL(dstegr,DSTEGR) +void LAPACK_dstegr( + char const* jobz, char const* range, + lapack_int const* n, + double* D, + double* E, + double const* vl, + double const* vu, lapack_int const* il, lapack_int const* iu, + double const* abstol, lapack_int* m, + double* W, + double* Z, lapack_int const* ldz, lapack_int* ISUPPZ, + double* work, lapack_int const* lwork, + lapack_int* iwork, lapack_int const* liwork, + lapack_int* info ); + +#define LAPACK_sstegr LAPACK_GLOBAL(sstegr,SSTEGR) +void LAPACK_sstegr( + char const* jobz, char const* range, + lapack_int const* n, + float* D, + float* E, + float const* vl, + float const* vu, lapack_int const* il, lapack_int const* iu, + float const* abstol, lapack_int* m, + float* W, + float* Z, lapack_int const* ldz, lapack_int* ISUPPZ, + float* work, lapack_int const* lwork, + lapack_int* iwork, lapack_int const* liwork, + lapack_int* info ); + +#define LAPACK_zstegr LAPACK_GLOBAL(zstegr,ZSTEGR) +void LAPACK_zstegr( + char const* jobz, char const* range, + lapack_int const* n, + double* D, + double* E, + double const* vl, + double const* vu, lapack_int const* il, lapack_int const* iu, + double const* abstol, lapack_int* m, + double* W, + lapack_complex_double* Z, lapack_int const* ldz, lapack_int* ISUPPZ, + double* work, lapack_int const* lwork, + lapack_int* iwork, lapack_int const* liwork, + lapack_int* info ); + +#define LAPACK_cstein LAPACK_GLOBAL(cstein,CSTEIN) +void LAPACK_cstein( + lapack_int const* n, + float const* D, + float const* E, lapack_int const* m, + float const* W, lapack_int const* IBLOCK, lapack_int const* ISPLIT, + lapack_complex_float* Z, lapack_int const* ldz, + float* work, + lapack_int* iwork, lapack_int* IFAIL, + lapack_int* info ); + +#define LAPACK_dstein LAPACK_GLOBAL(dstein,DSTEIN) +void LAPACK_dstein( + lapack_int const* n, + double const* D, + double const* E, lapack_int const* m, + double const* W, lapack_int const* IBLOCK, lapack_int const* ISPLIT, + double* Z, lapack_int const* ldz, + double* work, + lapack_int* iwork, lapack_int* IFAIL, + lapack_int* info ); + +#define LAPACK_sstein LAPACK_GLOBAL(sstein,SSTEIN) +void LAPACK_sstein( + lapack_int const* n, + float const* D, + float const* E, lapack_int const* m, + float const* W, lapack_int const* IBLOCK, lapack_int const* ISPLIT, + float* Z, lapack_int const* ldz, + float* work, + lapack_int* iwork, lapack_int* IFAIL, + lapack_int* info ); + +#define LAPACK_zstein LAPACK_GLOBAL(zstein,ZSTEIN) +void LAPACK_zstein( + lapack_int const* n, + double const* D, + double const* E, lapack_int const* m, + double const* W, lapack_int const* IBLOCK, lapack_int const* ISPLIT, + lapack_complex_double* Z, lapack_int const* ldz, + double* work, + lapack_int* iwork, lapack_int* IFAIL, + lapack_int* info ); + +#define LAPACK_cstemr LAPACK_GLOBAL(cstemr,CSTEMR) +void LAPACK_cstemr( + char const* jobz, char const* range, + lapack_int const* n, + float* D, + float* E, + float const* vl, + float const* vu, lapack_int const* il, lapack_int const* iu, lapack_int* m, + float* W, + lapack_complex_float* Z, lapack_int const* ldz, lapack_int const* nzc, lapack_int* ISUPPZ, lapack_logical* tryrac, + float* work, lapack_int const* lwork, + lapack_int* iwork, lapack_int const* liwork, + lapack_int* info ); + +#define LAPACK_dstemr LAPACK_GLOBAL(dstemr,DSTEMR) +void LAPACK_dstemr( + char const* jobz, char const* range, + lapack_int const* n, + double* D, + double* E, + double const* vl, + double const* vu, lapack_int const* il, lapack_int const* iu, lapack_int* m, + double* W, + double* Z, lapack_int const* ldz, lapack_int const* nzc, lapack_int* ISUPPZ, lapack_logical* tryrac, + double* work, lapack_int const* lwork, + lapack_int* iwork, lapack_int const* liwork, + lapack_int* info ); + +#define LAPACK_sstemr LAPACK_GLOBAL(sstemr,SSTEMR) +void LAPACK_sstemr( + char const* jobz, char const* range, + lapack_int const* n, + float* D, + float* E, + float const* vl, + float const* vu, lapack_int const* il, lapack_int const* iu, lapack_int* m, + float* W, + float* Z, lapack_int const* ldz, lapack_int const* nzc, lapack_int* ISUPPZ, lapack_logical* tryrac, + float* work, lapack_int const* lwork, + lapack_int* iwork, lapack_int const* liwork, + lapack_int* info ); + +#define LAPACK_zstemr LAPACK_GLOBAL(zstemr,ZSTEMR) +void LAPACK_zstemr( + char const* jobz, char const* range, + lapack_int const* n, + double* D, + double* E, + double const* vl, + double const* vu, lapack_int const* il, lapack_int const* iu, lapack_int* m, + double* W, + lapack_complex_double* Z, lapack_int const* ldz, lapack_int const* nzc, lapack_int* ISUPPZ, lapack_logical* tryrac, + double* work, lapack_int const* lwork, + lapack_int* iwork, lapack_int const* liwork, + lapack_int* info ); + +#define LAPACK_csteqr LAPACK_GLOBAL(csteqr,CSTEQR) +void LAPACK_csteqr( + char const* compz, + lapack_int const* n, + float* D, + float* E, + lapack_complex_float* Z, lapack_int const* ldz, + float* work, + lapack_int* info ); + +#define LAPACK_dsteqr LAPACK_GLOBAL(dsteqr,DSTEQR) +void LAPACK_dsteqr( + char const* compz, + lapack_int const* n, + double* D, + double* E, + double* Z, lapack_int const* ldz, + double* work, + lapack_int* info ); + +#define LAPACK_ssteqr LAPACK_GLOBAL(ssteqr,SSTEQR) +void LAPACK_ssteqr( + char const* compz, + lapack_int const* n, + float* D, + float* E, + float* Z, lapack_int const* ldz, + float* work, + lapack_int* info ); + +#define LAPACK_zsteqr LAPACK_GLOBAL(zsteqr,ZSTEQR) +void LAPACK_zsteqr( + char const* compz, + lapack_int const* n, + double* D, + double* E, + lapack_complex_double* Z, lapack_int const* ldz, + double* work, + lapack_int* info ); + +#define LAPACK_dsterf LAPACK_GLOBAL(dsterf,DSTERF) +void LAPACK_dsterf( + lapack_int const* n, + double* D, + double* E, + lapack_int* info ); + +#define LAPACK_ssterf LAPACK_GLOBAL(ssterf,SSTERF) +void LAPACK_ssterf( + lapack_int const* n, + float* D, + float* E, + lapack_int* info ); + +#define LAPACK_dstev LAPACK_GLOBAL(dstev,DSTEV) +void LAPACK_dstev( + char const* jobz, + lapack_int const* n, + double* D, + double* E, + double* Z, lapack_int const* ldz, + double* work, + lapack_int* info ); + +#define LAPACK_sstev LAPACK_GLOBAL(sstev,SSTEV) +void LAPACK_sstev( + char const* jobz, + lapack_int const* n, + float* D, + float* E, + float* Z, lapack_int const* ldz, + float* work, + lapack_int* info ); + +#define LAPACK_dstevd LAPACK_GLOBAL(dstevd,DSTEVD) +void LAPACK_dstevd( + char const* jobz, + lapack_int const* n, + double* D, + double* E, + double* Z, lapack_int const* ldz, + double* work, lapack_int const* lwork, + lapack_int* iwork, lapack_int const* liwork, + lapack_int* info ); + +#define LAPACK_sstevd LAPACK_GLOBAL(sstevd,SSTEVD) +void LAPACK_sstevd( + char const* jobz, + lapack_int const* n, + float* D, + float* E, + float* Z, lapack_int const* ldz, + float* work, lapack_int const* lwork, + lapack_int* iwork, lapack_int const* liwork, + lapack_int* info ); + +#define LAPACK_dstevr LAPACK_GLOBAL(dstevr,DSTEVR) +void LAPACK_dstevr( + char const* jobz, char const* range, + lapack_int const* n, + double* D, + double* E, + double const* vl, + double const* vu, lapack_int const* il, lapack_int const* iu, + double const* abstol, lapack_int* m, + double* W, + double* Z, lapack_int const* ldz, lapack_int* ISUPPZ, + double* work, lapack_int const* lwork, + lapack_int* iwork, lapack_int const* liwork, + lapack_int* info ); + +#define LAPACK_sstevr LAPACK_GLOBAL(sstevr,SSTEVR) +void LAPACK_sstevr( + char const* jobz, char const* range, + lapack_int const* n, + float* D, + float* E, + float const* vl, + float const* vu, lapack_int const* il, lapack_int const* iu, + float const* abstol, lapack_int* m, + float* W, + float* Z, lapack_int const* ldz, lapack_int* ISUPPZ, + float* work, lapack_int const* lwork, + lapack_int* iwork, lapack_int const* liwork, + lapack_int* info ); + +#define LAPACK_dstevx LAPACK_GLOBAL(dstevx,DSTEVX) +void LAPACK_dstevx( + char const* jobz, char const* range, + lapack_int const* n, + double* D, + double* E, + double const* vl, + double const* vu, lapack_int const* il, lapack_int const* iu, + double const* abstol, lapack_int* m, + double* W, + double* Z, lapack_int const* ldz, + double* work, + lapack_int* iwork, lapack_int* IFAIL, + lapack_int* info ); + +#define LAPACK_sstevx LAPACK_GLOBAL(sstevx,SSTEVX) +void LAPACK_sstevx( + char const* jobz, char const* range, + lapack_int const* n, + float* D, + float* E, + float const* vl, + float const* vu, lapack_int const* il, lapack_int const* iu, + float const* abstol, lapack_int* m, + float* W, + float* Z, lapack_int const* ldz, + float* work, + lapack_int* iwork, lapack_int* IFAIL, + lapack_int* info ); + +#define LAPACK_csycon LAPACK_GLOBAL(csycon,CSYCON) +void LAPACK_csycon( + char const* uplo, + lapack_int const* n, + lapack_complex_float const* A, lapack_int const* lda, lapack_int const* ipiv, + float const* anorm, + float* rcond, + lapack_complex_float* work, + lapack_int* info ); + +#define LAPACK_dsycon LAPACK_GLOBAL(dsycon,DSYCON) +void LAPACK_dsycon( + char const* uplo, + lapack_int const* n, + double const* A, lapack_int const* lda, lapack_int const* ipiv, + double const* anorm, + double* rcond, + double* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_ssycon LAPACK_GLOBAL(ssycon,SSYCON) +void LAPACK_ssycon( + char const* uplo, + lapack_int const* n, + float const* A, lapack_int const* lda, lapack_int const* ipiv, + float const* anorm, + float* rcond, + float* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_zsycon LAPACK_GLOBAL(zsycon,ZSYCON) +void LAPACK_zsycon( + char const* uplo, + lapack_int const* n, + lapack_complex_double const* A, lapack_int const* lda, lapack_int const* ipiv, + double const* anorm, + double* rcond, + lapack_complex_double* work, + lapack_int* info ); + +#define LAPACK_csycon_3 LAPACK_GLOBAL(csycon_3,CSYCON_3) +void LAPACK_csycon_3( + char const* uplo, + lapack_int const* n, + lapack_complex_float const* A, lapack_int const* lda, + lapack_complex_float const* E, lapack_int const* ipiv, + float const* anorm, + float* rcond, + lapack_complex_float* work, + lapack_int* info ); + +#define LAPACK_dsycon_3 LAPACK_GLOBAL(dsycon_3,DSYCON_3) +void LAPACK_dsycon_3( + char const* uplo, + lapack_int const* n, + double const* A, lapack_int const* lda, + double const* E, lapack_int const* ipiv, + double const* anorm, + double* rcond, + double* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_ssycon_3 LAPACK_GLOBAL(ssycon_3,SSYCON_3) +void LAPACK_ssycon_3( + char const* uplo, + lapack_int const* n, + float const* A, lapack_int const* lda, + float const* E, lapack_int const* ipiv, + float const* anorm, + float* rcond, + float* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_zsycon_3 LAPACK_GLOBAL(zsycon_3,ZSYCON_3) +void LAPACK_zsycon_3( + char const* uplo, + lapack_int const* n, + lapack_complex_double const* A, lapack_int const* lda, + lapack_complex_double const* E, lapack_int const* ipiv, + double const* anorm, + double* rcond, + lapack_complex_double* work, + lapack_int* info ); + +#define LAPACK_csyconv LAPACK_GLOBAL(csyconv,CSYCONV) +void LAPACK_csyconv( + char const* uplo, char const* way, + lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, lapack_int const* ipiv, + lapack_complex_float* E, + lapack_int* info ); + +#define LAPACK_dsyconv LAPACK_GLOBAL(dsyconv,DSYCONV) +void LAPACK_dsyconv( + char const* uplo, char const* way, + lapack_int const* n, + double* A, lapack_int const* lda, lapack_int const* ipiv, + double* E, + lapack_int* info ); + +#define LAPACK_ssyconv LAPACK_GLOBAL(ssyconv,SSYCONV) +void LAPACK_ssyconv( + char const* uplo, char const* way, + lapack_int const* n, + float* A, lapack_int const* lda, lapack_int const* ipiv, + float* E, + lapack_int* info ); + +#define LAPACK_zsyconv LAPACK_GLOBAL(zsyconv,ZSYCONV) +void LAPACK_zsyconv( + char const* uplo, char const* way, + lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, lapack_int const* ipiv, + lapack_complex_double* E, + lapack_int* info ); + +#define LAPACK_csyequb LAPACK_GLOBAL(csyequb,CSYEQUB) +void LAPACK_csyequb( + char const* uplo, + lapack_int const* n, + lapack_complex_float const* A, lapack_int const* lda, + float* S, + float* scond, + float* amax, + lapack_complex_float* work, + lapack_int* info ); + +#define LAPACK_dsyequb LAPACK_GLOBAL(dsyequb,DSYEQUB) +void LAPACK_dsyequb( + char const* uplo, + lapack_int const* n, + double const* A, lapack_int const* lda, + double* S, + double* scond, + double* amax, + double* work, + lapack_int* info ); + +#define LAPACK_ssyequb LAPACK_GLOBAL(ssyequb,SSYEQUB) +void LAPACK_ssyequb( + char const* uplo, + lapack_int const* n, + float const* A, lapack_int const* lda, + float* S, + float* scond, + float* amax, + float* work, + lapack_int* info ); + +#define LAPACK_zsyequb LAPACK_GLOBAL(zsyequb,ZSYEQUB) +void LAPACK_zsyequb( + char const* uplo, + lapack_int const* n, + lapack_complex_double const* A, lapack_int const* lda, + double* S, + double* scond, + double* amax, + lapack_complex_double* work, + lapack_int* info ); + +#define LAPACK_dsyev LAPACK_GLOBAL(dsyev,DSYEV) +void LAPACK_dsyev( + char const* jobz, char const* uplo, + lapack_int const* n, + double* A, lapack_int const* lda, + double* W, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_ssyev LAPACK_GLOBAL(ssyev,SSYEV) +void LAPACK_ssyev( + char const* jobz, char const* uplo, + lapack_int const* n, + float* A, lapack_int const* lda, + float* W, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_dsyev_2stage LAPACK_GLOBAL(dsyev_2stage,DSYEV_2STAGE) +void LAPACK_dsyev_2stage( + char const* jobz, char const* uplo, + lapack_int const* n, + double* A, lapack_int const* lda, + double* W, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_ssyev_2stage LAPACK_GLOBAL(ssyev_2stage,SSYEV_2STAGE) +void LAPACK_ssyev_2stage( + char const* jobz, char const* uplo, + lapack_int const* n, + float* A, lapack_int const* lda, + float* W, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_dsyevd LAPACK_GLOBAL(dsyevd,DSYEVD) +void LAPACK_dsyevd( + char const* jobz, char const* uplo, + lapack_int const* n, + double* A, lapack_int const* lda, + double* W, + double* work, lapack_int const* lwork, + lapack_int* iwork, lapack_int const* liwork, + lapack_int* info ); + +#define LAPACK_ssyevd LAPACK_GLOBAL(ssyevd,SSYEVD) +void LAPACK_ssyevd( + char const* jobz, char const* uplo, + lapack_int const* n, + float* A, lapack_int const* lda, + float* W, + float* work, lapack_int const* lwork, + lapack_int* iwork, lapack_int const* liwork, + lapack_int* info ); + +#define LAPACK_dsyevd_2stage LAPACK_GLOBAL(dsyevd_2stage,DSYEVD_2STAGE) +void LAPACK_dsyevd_2stage( + char const* jobz, char const* uplo, + lapack_int const* n, + double* A, lapack_int const* lda, + double* W, + double* work, lapack_int const* lwork, + lapack_int* iwork, lapack_int const* liwork, + lapack_int* info ); + +#define LAPACK_ssyevd_2stage LAPACK_GLOBAL(ssyevd_2stage,SSYEVD_2STAGE) +void LAPACK_ssyevd_2stage( + char const* jobz, char const* uplo, + lapack_int const* n, + float* A, lapack_int const* lda, + float* W, + float* work, lapack_int const* lwork, + lapack_int* iwork, lapack_int const* liwork, + lapack_int* info ); + +#define LAPACK_dsyevr LAPACK_GLOBAL(dsyevr,DSYEVR) +void LAPACK_dsyevr( + char const* jobz, char const* range, char const* uplo, + lapack_int const* n, + double* A, lapack_int const* lda, + double const* vl, + double const* vu, lapack_int const* il, lapack_int const* iu, + double const* abstol, lapack_int* m, + double* W, + double* Z, lapack_int const* ldz, lapack_int* ISUPPZ, + double* work, lapack_int const* lwork, + lapack_int* iwork, lapack_int const* liwork, + lapack_int* info ); + +#define LAPACK_ssyevr LAPACK_GLOBAL(ssyevr,SSYEVR) +void LAPACK_ssyevr( + char const* jobz, char const* range, char const* uplo, + lapack_int const* n, + float* A, lapack_int const* lda, + float const* vl, + float const* vu, lapack_int const* il, lapack_int const* iu, + float const* abstol, lapack_int* m, + float* W, + float* Z, lapack_int const* ldz, lapack_int* ISUPPZ, + float* work, lapack_int const* lwork, + lapack_int* iwork, lapack_int const* liwork, + lapack_int* info ); + +#define LAPACK_dsyevr_2stage LAPACK_GLOBAL(dsyevr_2stage,DSYEVR_2STAGE) +void LAPACK_dsyevr_2stage( + char const* jobz, char const* range, char const* uplo, + lapack_int const* n, + double* A, lapack_int const* lda, + double const* vl, + double const* vu, lapack_int const* il, lapack_int const* iu, + double const* abstol, lapack_int* m, + double* W, + double* Z, lapack_int const* ldz, lapack_int* ISUPPZ, + double* work, lapack_int const* lwork, + lapack_int* iwork, lapack_int const* liwork, + lapack_int* info ); + +#define LAPACK_ssyevr_2stage LAPACK_GLOBAL(ssyevr_2stage,SSYEVR_2STAGE) +void LAPACK_ssyevr_2stage( + char const* jobz, char const* range, char const* uplo, + lapack_int const* n, + float* A, lapack_int const* lda, + float const* vl, + float const* vu, lapack_int const* il, lapack_int const* iu, + float const* abstol, lapack_int* m, + float* W, + float* Z, lapack_int const* ldz, lapack_int* ISUPPZ, + float* work, lapack_int const* lwork, + lapack_int* iwork, lapack_int const* liwork, + lapack_int* info ); + +#define LAPACK_dsyevx LAPACK_GLOBAL(dsyevx,DSYEVX) +void LAPACK_dsyevx( + char const* jobz, char const* range, char const* uplo, + lapack_int const* n, + double* A, lapack_int const* lda, + double const* vl, + double const* vu, lapack_int const* il, lapack_int const* iu, + double const* abstol, lapack_int* m, + double* W, + double* Z, lapack_int const* ldz, + double* work, lapack_int const* lwork, + lapack_int* iwork, lapack_int* IFAIL, + lapack_int* info ); + +#define LAPACK_ssyevx LAPACK_GLOBAL(ssyevx,SSYEVX) +void LAPACK_ssyevx( + char const* jobz, char const* range, char const* uplo, + lapack_int const* n, + float* A, lapack_int const* lda, + float const* vl, + float const* vu, lapack_int const* il, lapack_int const* iu, + float const* abstol, lapack_int* m, + float* W, + float* Z, lapack_int const* ldz, + float* work, lapack_int const* lwork, + lapack_int* iwork, lapack_int* IFAIL, + lapack_int* info ); + +#define LAPACK_dsyevx_2stage LAPACK_GLOBAL(dsyevx_2stage,DSYEVX_2STAGE) +void LAPACK_dsyevx_2stage( + char const* jobz, char const* range, char const* uplo, + lapack_int const* n, + double* A, lapack_int const* lda, + double const* vl, + double const* vu, lapack_int const* il, lapack_int const* iu, + double const* abstol, lapack_int* m, + double* W, + double* Z, lapack_int const* ldz, + double* work, lapack_int const* lwork, + lapack_int* iwork, lapack_int* IFAIL, + lapack_int* info ); + +#define LAPACK_ssyevx_2stage LAPACK_GLOBAL(ssyevx_2stage,SSYEVX_2STAGE) +void LAPACK_ssyevx_2stage( + char const* jobz, char const* range, char const* uplo, + lapack_int const* n, + float* A, lapack_int const* lda, + float const* vl, + float const* vu, lapack_int const* il, lapack_int const* iu, + float const* abstol, lapack_int* m, + float* W, + float* Z, lapack_int const* ldz, + float* work, lapack_int const* lwork, + lapack_int* iwork, lapack_int* IFAIL, + lapack_int* info ); + +#define LAPACK_dsygst LAPACK_GLOBAL(dsygst,DSYGST) +void LAPACK_dsygst( + lapack_int const* itype, char const* uplo, + lapack_int const* n, + double* A, lapack_int const* lda, + double const* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_ssygst LAPACK_GLOBAL(ssygst,SSYGST) +void LAPACK_ssygst( + lapack_int const* itype, char const* uplo, + lapack_int const* n, + float* A, lapack_int const* lda, + float const* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_dsygv LAPACK_GLOBAL(dsygv,DSYGV) +void LAPACK_dsygv( + lapack_int const* itype, char const* jobz, char const* uplo, + lapack_int const* n, + double* A, lapack_int const* lda, + double* B, lapack_int const* ldb, + double* W, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_ssygv LAPACK_GLOBAL(ssygv,SSYGV) +void LAPACK_ssygv( + lapack_int const* itype, char const* jobz, char const* uplo, + lapack_int const* n, + float* A, lapack_int const* lda, + float* B, lapack_int const* ldb, + float* W, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_dsygv_2stage LAPACK_GLOBAL(dsygv_2stage,DSYGV_2STAGE) +void LAPACK_dsygv_2stage( + lapack_int const* itype, char const* jobz, char const* uplo, + lapack_int const* n, + double* A, lapack_int const* lda, + double* B, lapack_int const* ldb, + double* W, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_ssygv_2stage LAPACK_GLOBAL(ssygv_2stage,SSYGV_2STAGE) +void LAPACK_ssygv_2stage( + lapack_int const* itype, char const* jobz, char const* uplo, + lapack_int const* n, + float* A, lapack_int const* lda, + float* B, lapack_int const* ldb, + float* W, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_dsygvd LAPACK_GLOBAL(dsygvd,DSYGVD) +void LAPACK_dsygvd( + lapack_int const* itype, char const* jobz, char const* uplo, + lapack_int const* n, + double* A, lapack_int const* lda, + double* B, lapack_int const* ldb, + double* W, + double* work, lapack_int const* lwork, + lapack_int* iwork, lapack_int const* liwork, + lapack_int* info ); + +#define LAPACK_ssygvd LAPACK_GLOBAL(ssygvd,SSYGVD) +void LAPACK_ssygvd( + lapack_int const* itype, char const* jobz, char const* uplo, + lapack_int const* n, + float* A, lapack_int const* lda, + float* B, lapack_int const* ldb, + float* W, + float* work, lapack_int const* lwork, + lapack_int* iwork, lapack_int const* liwork, + lapack_int* info ); + +#define LAPACK_dsygvx LAPACK_GLOBAL(dsygvx,DSYGVX) +void LAPACK_dsygvx( + lapack_int const* itype, char const* jobz, char const* range, char const* uplo, + lapack_int const* n, + double* A, lapack_int const* lda, + double* B, lapack_int const* ldb, + double const* vl, + double const* vu, lapack_int const* il, lapack_int const* iu, + double const* abstol, lapack_int* m, + double* W, + double* Z, lapack_int const* ldz, + double* work, lapack_int const* lwork, + lapack_int* iwork, lapack_int* IFAIL, + lapack_int* info ); + +#define LAPACK_ssygvx LAPACK_GLOBAL(ssygvx,SSYGVX) +void LAPACK_ssygvx( + lapack_int const* itype, char const* jobz, char const* range, char const* uplo, + lapack_int const* n, + float* A, lapack_int const* lda, + float* B, lapack_int const* ldb, + float const* vl, + float const* vu, lapack_int const* il, lapack_int const* iu, + float const* abstol, lapack_int* m, + float* W, + float* Z, lapack_int const* ldz, + float* work, lapack_int const* lwork, + lapack_int* iwork, lapack_int* IFAIL, + lapack_int* info ); + +#define LAPACK_csyr LAPACK_GLOBAL(csyr,CSYR) +void LAPACK_csyr( + char const* uplo, + lapack_int const* n, + lapack_complex_float const* alpha, + lapack_complex_float const* X, lapack_int const* incx, + lapack_complex_float* A, lapack_int const* lda ); + +#define LAPACK_zsyr LAPACK_GLOBAL(zsyr,ZSYR) +void LAPACK_zsyr( + char const* uplo, + lapack_int const* n, + lapack_complex_double const* alpha, + lapack_complex_double const* X, lapack_int const* incx, + lapack_complex_double* A, lapack_int const* lda ); + +#define LAPACK_csyrfs LAPACK_GLOBAL(csyrfs,CSYRFS) +void LAPACK_csyrfs( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float const* A, lapack_int const* lda, + lapack_complex_float const* AF, lapack_int const* ldaf, lapack_int const* ipiv, + lapack_complex_float const* B, lapack_int const* ldb, + lapack_complex_float* X, lapack_int const* ldx, + float* ferr, + float* berr, + lapack_complex_float* work, + float* rwork, + lapack_int* info ); + +#define LAPACK_dsyrfs LAPACK_GLOBAL(dsyrfs,DSYRFS) +void LAPACK_dsyrfs( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + double const* A, lapack_int const* lda, + double const* AF, lapack_int const* ldaf, lapack_int const* ipiv, + double const* B, lapack_int const* ldb, + double* X, lapack_int const* ldx, + double* ferr, + double* berr, + double* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_ssyrfs LAPACK_GLOBAL(ssyrfs,SSYRFS) +void LAPACK_ssyrfs( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + float const* A, lapack_int const* lda, + float const* AF, lapack_int const* ldaf, lapack_int const* ipiv, + float const* B, lapack_int const* ldb, + float* X, lapack_int const* ldx, + float* ferr, + float* berr, + float* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_zsyrfs LAPACK_GLOBAL(zsyrfs,ZSYRFS) +void LAPACK_zsyrfs( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double const* A, lapack_int const* lda, + lapack_complex_double const* AF, lapack_int const* ldaf, lapack_int const* ipiv, + lapack_complex_double const* B, lapack_int const* ldb, + lapack_complex_double* X, lapack_int const* ldx, + double* ferr, + double* berr, + lapack_complex_double* work, + double* rwork, + lapack_int* info ); + +#define LAPACK_csyrfsx LAPACK_GLOBAL(csyrfsx,CSYRFSX) +void LAPACK_csyrfsx( + char const* uplo, char const* equed, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float const* A, lapack_int const* lda, + lapack_complex_float const* AF, lapack_int const* ldaf, lapack_int const* ipiv, + float* S, + lapack_complex_float const* B, lapack_int const* ldb, + lapack_complex_float* X, lapack_int const* ldx, + float* rcond, + float* berr, lapack_int const* n_err_bnds, + float* err_bnds_norm, + float* err_bnds_comp, lapack_int const* nparams, + float* params, + lapack_complex_float* work, + float* rwork, + lapack_int* info ); + +#define LAPACK_dsyrfsx LAPACK_GLOBAL(dsyrfsx,DSYRFSX) +void LAPACK_dsyrfsx( + char const* uplo, char const* equed, + lapack_int const* n, lapack_int const* nrhs, + double const* A, lapack_int const* lda, + double const* AF, lapack_int const* ldaf, lapack_int const* ipiv, + double* S, + double const* B, lapack_int const* ldb, + double* X, lapack_int const* ldx, + double* rcond, + double* berr, lapack_int const* n_err_bnds, + double* err_bnds_norm, + double* err_bnds_comp, lapack_int const* nparams, + double* params, + double* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_ssyrfsx LAPACK_GLOBAL(ssyrfsx,SSYRFSX) +void LAPACK_ssyrfsx( + char const* uplo, char const* equed, + lapack_int const* n, lapack_int const* nrhs, + float const* A, lapack_int const* lda, + float const* AF, lapack_int const* ldaf, lapack_int const* ipiv, + float* S, + float const* B, lapack_int const* ldb, + float* X, lapack_int const* ldx, + float* rcond, + float* berr, lapack_int const* n_err_bnds, + float* err_bnds_norm, + float* err_bnds_comp, lapack_int const* nparams, + float* params, + float* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_zsyrfsx LAPACK_GLOBAL(zsyrfsx,ZSYRFSX) +void LAPACK_zsyrfsx( + char const* uplo, char const* equed, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double const* A, lapack_int const* lda, + lapack_complex_double const* AF, lapack_int const* ldaf, lapack_int const* ipiv, + double* S, + lapack_complex_double const* B, lapack_int const* ldb, + lapack_complex_double* X, lapack_int const* ldx, + double* rcond, + double* berr, lapack_int const* n_err_bnds, + double* err_bnds_norm, + double* err_bnds_comp, lapack_int const* nparams, + double* params, + lapack_complex_double* work, + double* rwork, + lapack_int* info ); + +#define LAPACK_csysv LAPACK_GLOBAL(csysv,CSYSV) +void LAPACK_csysv( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float* A, lapack_int const* lda, lapack_int* ipiv, + lapack_complex_float* B, lapack_int const* ldb, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_dsysv LAPACK_GLOBAL(dsysv,DSYSV) +void LAPACK_dsysv( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + double* A, lapack_int const* lda, lapack_int* ipiv, + double* B, lapack_int const* ldb, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_ssysv LAPACK_GLOBAL(ssysv,SSYSV) +void LAPACK_ssysv( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + float* A, lapack_int const* lda, lapack_int* ipiv, + float* B, lapack_int const* ldb, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zsysv LAPACK_GLOBAL(zsysv,ZSYSV) +void LAPACK_zsysv( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double* A, lapack_int const* lda, lapack_int* ipiv, + lapack_complex_double* B, lapack_int const* ldb, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_csysv_aa LAPACK_GLOBAL(csysv_aa,CSYSV_AA) +void LAPACK_csysv_aa( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float* A, lapack_int const* lda, lapack_int* ipiv, + lapack_complex_float* B, lapack_int const* ldb, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_dsysv_aa LAPACK_GLOBAL(dsysv_aa,DSYSV_AA) +void LAPACK_dsysv_aa( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + double* A, lapack_int const* lda, lapack_int* ipiv, + double* B, lapack_int const* ldb, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_ssysv_aa LAPACK_GLOBAL(ssysv_aa,SSYSV_AA) +void LAPACK_ssysv_aa( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + float* A, lapack_int const* lda, lapack_int* ipiv, + float* B, lapack_int const* ldb, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zsysv_aa LAPACK_GLOBAL(zsysv_aa,ZSYSV_AA) +void LAPACK_zsysv_aa( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double* A, lapack_int const* lda, lapack_int* ipiv, + lapack_complex_double* B, lapack_int const* ldb, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_csysv_aa_2stage LAPACK_GLOBAL(csysv_aa_2stage,CSYSV_AA_2STAGE) +void LAPACK_csysv_aa_2stage( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* TB, lapack_int const* ltb, lapack_int* ipiv, lapack_int* ipiv2, + lapack_complex_float* B, lapack_int const* ldb, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_dsysv_aa_2stage LAPACK_GLOBAL(dsysv_aa_2stage,DSYSV_AA_2STAGE) +void LAPACK_dsysv_aa_2stage( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + double* A, lapack_int const* lda, + double* TB, lapack_int const* ltb, lapack_int* ipiv, lapack_int* ipiv2, + double* B, lapack_int const* ldb, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_ssysv_aa_2stage LAPACK_GLOBAL(ssysv_aa_2stage,SSYSV_AA_2STAGE) +void LAPACK_ssysv_aa_2stage( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + float* A, lapack_int const* lda, + float* TB, lapack_int const* ltb, lapack_int* ipiv, lapack_int* ipiv2, + float* B, lapack_int const* ldb, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zsysv_aa_2stage LAPACK_GLOBAL(zsysv_aa_2stage,ZSYSV_AA_2STAGE) +void LAPACK_zsysv_aa_2stage( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* TB, lapack_int const* ltb, lapack_int* ipiv, lapack_int* ipiv2, + lapack_complex_double* B, lapack_int const* ldb, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_csysv_rk LAPACK_GLOBAL(csysv_rk,CSYSV_RK) +void LAPACK_csysv_rk( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* E, lapack_int* ipiv, + lapack_complex_float* B, lapack_int const* ldb, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_dsysv_rk LAPACK_GLOBAL(dsysv_rk,DSYSV_RK) +void LAPACK_dsysv_rk( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + double* A, lapack_int const* lda, + double* E, lapack_int* ipiv, + double* B, lapack_int const* ldb, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_ssysv_rk LAPACK_GLOBAL(ssysv_rk,SSYSV_RK) +void LAPACK_ssysv_rk( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + float* A, lapack_int const* lda, + float* E, lapack_int* ipiv, + float* B, lapack_int const* ldb, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zsysv_rk LAPACK_GLOBAL(zsysv_rk,ZSYSV_RK) +void LAPACK_zsysv_rk( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* E, lapack_int* ipiv, + lapack_complex_double* B, lapack_int const* ldb, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_csysv_rook LAPACK_GLOBAL(csysv_rook,CSYSV_ROOK) +void LAPACK_csysv_rook( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float* A, lapack_int const* lda, lapack_int* ipiv, + lapack_complex_float* B, lapack_int const* ldb, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_dsysv_rook LAPACK_GLOBAL(dsysv_rook,DSYSV_ROOK) +void LAPACK_dsysv_rook( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + double* A, lapack_int const* lda, lapack_int* ipiv, + double* B, lapack_int const* ldb, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_ssysv_rook LAPACK_GLOBAL(ssysv_rook,SSYSV_ROOK) +void LAPACK_ssysv_rook( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + float* A, lapack_int const* lda, lapack_int* ipiv, + float* B, lapack_int const* ldb, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zsysv_rook LAPACK_GLOBAL(zsysv_rook,ZSYSV_ROOK) +void LAPACK_zsysv_rook( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double* A, lapack_int const* lda, lapack_int* ipiv, + lapack_complex_double* B, lapack_int const* ldb, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_csysvx LAPACK_GLOBAL(csysvx,CSYSVX) +void LAPACK_csysvx( + char const* fact, char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float const* A, lapack_int const* lda, + lapack_complex_float* AF, lapack_int const* ldaf, lapack_int* ipiv, + lapack_complex_float const* B, lapack_int const* ldb, + lapack_complex_float* X, lapack_int const* ldx, + float* rcond, + float* ferr, + float* berr, + lapack_complex_float* work, lapack_int const* lwork, + float* rwork, + lapack_int* info ); + +#define LAPACK_dsysvx LAPACK_GLOBAL(dsysvx,DSYSVX) +void LAPACK_dsysvx( + char const* fact, char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + double const* A, lapack_int const* lda, + double* AF, lapack_int const* ldaf, lapack_int* ipiv, + double const* B, lapack_int const* ldb, + double* X, lapack_int const* ldx, + double* rcond, + double* ferr, + double* berr, + double* work, lapack_int const* lwork, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_ssysvx LAPACK_GLOBAL(ssysvx,SSYSVX) +void LAPACK_ssysvx( + char const* fact, char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + float const* A, lapack_int const* lda, + float* AF, lapack_int const* ldaf, lapack_int* ipiv, + float const* B, lapack_int const* ldb, + float* X, lapack_int const* ldx, + float* rcond, + float* ferr, + float* berr, + float* work, lapack_int const* lwork, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_zsysvx LAPACK_GLOBAL(zsysvx,ZSYSVX) +void LAPACK_zsysvx( + char const* fact, char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double const* A, lapack_int const* lda, + lapack_complex_double* AF, lapack_int const* ldaf, lapack_int* ipiv, + lapack_complex_double const* B, lapack_int const* ldb, + lapack_complex_double* X, lapack_int const* ldx, + double* rcond, + double* ferr, + double* berr, + lapack_complex_double* work, lapack_int const* lwork, + double* rwork, + lapack_int* info ); + +#define LAPACK_csysvxx LAPACK_GLOBAL(csysvxx,CSYSVXX) +void LAPACK_csysvxx( + char const* fact, char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* AF, lapack_int const* ldaf, lapack_int* ipiv, char* equed, + float* S, + lapack_complex_float* B, + lapack_int const* ldb, + lapack_complex_float* X, lapack_int const* ldx, + float* rcond, + float* rpvgrw, + float* berr, lapack_int const* n_err_bnds, + float* err_bnds_norm, + float* err_bnds_comp, lapack_int const* nparams, + float* params, + lapack_complex_float* work, + float* rwork, + lapack_int* info ); + +#define LAPACK_dsysvxx LAPACK_GLOBAL(dsysvxx,DSYSVXX) +void LAPACK_dsysvxx( + char const* fact, char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + double* A, lapack_int const* lda, + double* AF, lapack_int const* ldaf, lapack_int* ipiv, char* equed, + double* S, + double* B, + lapack_int const* ldb, + double* X, lapack_int const* ldx, + double* rcond, + double* rpvgrw, + double* berr, lapack_int const* n_err_bnds, + double* err_bnds_norm, + double* err_bnds_comp, lapack_int const* nparams, + double* params, + double* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_ssysvxx LAPACK_GLOBAL(ssysvxx,SSYSVXX) +void LAPACK_ssysvxx( + char const* fact, char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + float* A, lapack_int const* lda, + float* AF, lapack_int const* ldaf, lapack_int* ipiv, char* equed, + float* S, + float* B, + lapack_int const* ldb, + float* X, lapack_int const* ldx, + float* rcond, + float* rpvgrw, + float* berr, lapack_int const* n_err_bnds, + float* err_bnds_norm, + float* err_bnds_comp, lapack_int const* nparams, + float* params, + float* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_zsysvxx LAPACK_GLOBAL(zsysvxx,ZSYSVXX) +void LAPACK_zsysvxx( + char const* fact, char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* AF, lapack_int const* ldaf, lapack_int* ipiv, char* equed, + double* S, + lapack_complex_double* B, + lapack_int const* ldb, + lapack_complex_double* X, lapack_int const* ldx, + double* rcond, + double* rpvgrw, + double* berr, lapack_int const* n_err_bnds, + double* err_bnds_norm, + double* err_bnds_comp, lapack_int const* nparams, + double* params, + lapack_complex_double* work, + double* rwork, + lapack_int* info ); + +#define LAPACK_csyswapr LAPACK_GLOBAL(csyswapr,CSYSWAPR) +void LAPACK_csyswapr( + char const* uplo, + lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, lapack_int const* i1, lapack_int const* i2 ); + +#define LAPACK_dsyswapr LAPACK_GLOBAL(dsyswapr,DSYSWAPR) +void LAPACK_dsyswapr( + char const* uplo, + lapack_int const* n, + double* A, lapack_int const* lda, lapack_int const* i1, lapack_int const* i2 ); + +#define LAPACK_ssyswapr LAPACK_GLOBAL(ssyswapr,SSYSWAPR) +void LAPACK_ssyswapr( + char const* uplo, + lapack_int const* n, + float* A, lapack_int const* lda, lapack_int const* i1, lapack_int const* i2 ); + +#define LAPACK_zsyswapr LAPACK_GLOBAL(zsyswapr,ZSYSWAPR) +void LAPACK_zsyswapr( + char const* uplo, + lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, lapack_int const* i1, lapack_int const* i2 ); + +#define LAPACK_dsytrd LAPACK_GLOBAL(dsytrd,DSYTRD) +void LAPACK_dsytrd( + char const* uplo, + lapack_int const* n, + double* A, lapack_int const* lda, + double* D, + double* E, + double* tau, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_ssytrd LAPACK_GLOBAL(ssytrd,SSYTRD) +void LAPACK_ssytrd( + char const* uplo, + lapack_int const* n, + float* A, lapack_int const* lda, + float* D, + float* E, + float* tau, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_dsytrd_2stage LAPACK_GLOBAL(dsytrd_2stage,DSYTRD_2STAGE) +void LAPACK_dsytrd_2stage( + char const* vect, char const* uplo, + lapack_int const* n, + double* A, lapack_int const* lda, + double* D, + double* E, + double* tau, + double* HOUS2, lapack_int const* lhous2, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_ssytrd_2stage LAPACK_GLOBAL(ssytrd_2stage,SSYTRD_2STAGE) +void LAPACK_ssytrd_2stage( + char const* vect, char const* uplo, + lapack_int const* n, + float* A, lapack_int const* lda, + float* D, + float* E, + float* tau, + float* HOUS2, lapack_int const* lhous2, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_csytrf LAPACK_GLOBAL(csytrf,CSYTRF) +void LAPACK_csytrf( + char const* uplo, + lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, lapack_int* ipiv, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_dsytrf LAPACK_GLOBAL(dsytrf,DSYTRF) +void LAPACK_dsytrf( + char const* uplo, + lapack_int const* n, + double* A, lapack_int const* lda, lapack_int* ipiv, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_ssytrf LAPACK_GLOBAL(ssytrf,SSYTRF) +void LAPACK_ssytrf( + char const* uplo, + lapack_int const* n, + float* A, lapack_int const* lda, lapack_int* ipiv, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zsytrf LAPACK_GLOBAL(zsytrf,ZSYTRF) +void LAPACK_zsytrf( + char const* uplo, + lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, lapack_int* ipiv, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_csytrf_aa LAPACK_GLOBAL(csytrf_aa,CSYTRF_AA) +void LAPACK_csytrf_aa( + char const* uplo, + lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, lapack_int* ipiv, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_dsytrf_aa LAPACK_GLOBAL(dsytrf_aa,DSYTRF_AA) +void LAPACK_dsytrf_aa( + char const* uplo, + lapack_int const* n, + double* A, lapack_int const* lda, lapack_int* ipiv, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_ssytrf_aa LAPACK_GLOBAL(ssytrf_aa,SSYTRF_AA) +void LAPACK_ssytrf_aa( + char const* uplo, + lapack_int const* n, + float* A, lapack_int const* lda, lapack_int* ipiv, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zsytrf_aa LAPACK_GLOBAL(zsytrf_aa,ZSYTRF_AA) +void LAPACK_zsytrf_aa( + char const* uplo, + lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, lapack_int* ipiv, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_csytrf_aa_2stage LAPACK_GLOBAL(csytrf_aa_2stage,CSYTRF_AA_2STAGE) +void LAPACK_csytrf_aa_2stage( + char const* uplo, + lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* TB, lapack_int const* ltb, lapack_int* ipiv, lapack_int* ipiv2, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_dsytrf_aa_2stage LAPACK_GLOBAL(dsytrf_aa_2stage,DSYTRF_AA_2STAGE) +void LAPACK_dsytrf_aa_2stage( + char const* uplo, + lapack_int const* n, + double* A, lapack_int const* lda, + double* TB, lapack_int const* ltb, lapack_int* ipiv, lapack_int* ipiv2, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_ssytrf_aa_2stage LAPACK_GLOBAL(ssytrf_aa_2stage,SSYTRF_AA_2STAGE) +void LAPACK_ssytrf_aa_2stage( + char const* uplo, + lapack_int const* n, + float* A, lapack_int const* lda, + float* TB, lapack_int const* ltb, lapack_int* ipiv, lapack_int* ipiv2, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zsytrf_aa_2stage LAPACK_GLOBAL(zsytrf_aa_2stage,ZSYTRF_AA_2STAGE) +void LAPACK_zsytrf_aa_2stage( + char const* uplo, + lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* TB, lapack_int const* ltb, lapack_int* ipiv, lapack_int* ipiv2, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_csytrf_rk LAPACK_GLOBAL(csytrf_rk,CSYTRF_RK) +void LAPACK_csytrf_rk( + char const* uplo, + lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* E, lapack_int* ipiv, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_dsytrf_rk LAPACK_GLOBAL(dsytrf_rk,DSYTRF_RK) +void LAPACK_dsytrf_rk( + char const* uplo, + lapack_int const* n, + double* A, lapack_int const* lda, + double* E, lapack_int* ipiv, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_ssytrf_rk LAPACK_GLOBAL(ssytrf_rk,SSYTRF_RK) +void LAPACK_ssytrf_rk( + char const* uplo, + lapack_int const* n, + float* A, lapack_int const* lda, + float* E, lapack_int* ipiv, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zsytrf_rk LAPACK_GLOBAL(zsytrf_rk,ZSYTRF_RK) +void LAPACK_zsytrf_rk( + char const* uplo, + lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* E, lapack_int* ipiv, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_csytrf_rook LAPACK_GLOBAL(csytrf_rook,CSYTRF_ROOK) +void LAPACK_csytrf_rook( + char const* uplo, + lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, lapack_int* ipiv, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_dsytrf_rook LAPACK_GLOBAL(dsytrf_rook,DSYTRF_ROOK) +void LAPACK_dsytrf_rook( + char const* uplo, + lapack_int const* n, + double* A, lapack_int const* lda, lapack_int* ipiv, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_ssytrf_rook LAPACK_GLOBAL(ssytrf_rook,SSYTRF_ROOK) +void LAPACK_ssytrf_rook( + char const* uplo, + lapack_int const* n, + float* A, lapack_int const* lda, lapack_int* ipiv, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zsytrf_rook LAPACK_GLOBAL(zsytrf_rook,ZSYTRF_ROOK) +void LAPACK_zsytrf_rook( + char const* uplo, + lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, lapack_int* ipiv, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_csytri LAPACK_GLOBAL(csytri,CSYTRI) +void LAPACK_csytri( + char const* uplo, + lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, lapack_int const* ipiv, + lapack_complex_float* work, + lapack_int* info ); + +#define LAPACK_dsytri LAPACK_GLOBAL(dsytri,DSYTRI) +void LAPACK_dsytri( + char const* uplo, + lapack_int const* n, + double* A, lapack_int const* lda, lapack_int const* ipiv, + double* work, + lapack_int* info ); + +#define LAPACK_ssytri LAPACK_GLOBAL(ssytri,SSYTRI) +void LAPACK_ssytri( + char const* uplo, + lapack_int const* n, + float* A, lapack_int const* lda, lapack_int const* ipiv, + float* work, + lapack_int* info ); + +#define LAPACK_zsytri LAPACK_GLOBAL(zsytri,ZSYTRI) +void LAPACK_zsytri( + char const* uplo, + lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, lapack_int const* ipiv, + lapack_complex_double* work, + lapack_int* info ); + +#define LAPACK_csytri2 LAPACK_GLOBAL(csytri2,CSYTRI2) +void LAPACK_csytri2( + char const* uplo, + lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, lapack_int const* ipiv, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_dsytri2 LAPACK_GLOBAL(dsytri2,DSYTRI2) +void LAPACK_dsytri2( + char const* uplo, + lapack_int const* n, + double* A, lapack_int const* lda, lapack_int const* ipiv, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_ssytri2 LAPACK_GLOBAL(ssytri2,SSYTRI2) +void LAPACK_ssytri2( + char const* uplo, + lapack_int const* n, + float* A, lapack_int const* lda, lapack_int const* ipiv, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zsytri2 LAPACK_GLOBAL(zsytri2,ZSYTRI2) +void LAPACK_zsytri2( + char const* uplo, + lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, lapack_int const* ipiv, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_csytri2x LAPACK_GLOBAL(csytri2x,CSYTRI2X) +void LAPACK_csytri2x( + char const* uplo, + lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, lapack_int const* ipiv, + lapack_complex_float* work, lapack_int const* nb, + lapack_int* info ); + +#define LAPACK_dsytri2x LAPACK_GLOBAL(dsytri2x,DSYTRI2X) +void LAPACK_dsytri2x( + char const* uplo, + lapack_int const* n, + double* A, lapack_int const* lda, lapack_int const* ipiv, + double* work, lapack_int const* nb, + lapack_int* info ); + +#define LAPACK_ssytri2x LAPACK_GLOBAL(ssytri2x,SSYTRI2X) +void LAPACK_ssytri2x( + char const* uplo, + lapack_int const* n, + float* A, lapack_int const* lda, lapack_int const* ipiv, + float* work, lapack_int const* nb, + lapack_int* info ); + +#define LAPACK_zsytri2x LAPACK_GLOBAL(zsytri2x,ZSYTRI2X) +void LAPACK_zsytri2x( + char const* uplo, + lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, lapack_int const* ipiv, + lapack_complex_double* work, lapack_int const* nb, + lapack_int* info ); + +#define LAPACK_csytri_3 LAPACK_GLOBAL(csytri_3,CSYTRI_3) +void LAPACK_csytri_3( + char const* uplo, + lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float const* E, lapack_int const* ipiv, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_dsytri_3 LAPACK_GLOBAL(dsytri_3,DSYTRI_3) +void LAPACK_dsytri_3( + char const* uplo, + lapack_int const* n, + double* A, lapack_int const* lda, + double const* E, lapack_int const* ipiv, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_ssytri_3 LAPACK_GLOBAL(ssytri_3,SSYTRI_3) +void LAPACK_ssytri_3( + char const* uplo, + lapack_int const* n, + float* A, lapack_int const* lda, + float const* E, lapack_int const* ipiv, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zsytri_3 LAPACK_GLOBAL(zsytri_3,ZSYTRI_3) +void LAPACK_zsytri_3( + char const* uplo, + lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double const* E, lapack_int const* ipiv, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_csytrs LAPACK_GLOBAL(csytrs,CSYTRS) +void LAPACK_csytrs( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float const* A, lapack_int const* lda, lapack_int const* ipiv, + lapack_complex_float* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_dsytrs LAPACK_GLOBAL(dsytrs,DSYTRS) +void LAPACK_dsytrs( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + double const* A, lapack_int const* lda, lapack_int const* ipiv, + double* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_ssytrs LAPACK_GLOBAL(ssytrs,SSYTRS) +void LAPACK_ssytrs( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + float const* A, lapack_int const* lda, lapack_int const* ipiv, + float* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_zsytrs LAPACK_GLOBAL(zsytrs,ZSYTRS) +void LAPACK_zsytrs( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double const* A, lapack_int const* lda, lapack_int const* ipiv, + lapack_complex_double* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_csytrs2 LAPACK_GLOBAL(csytrs2,CSYTRS2) +void LAPACK_csytrs2( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float* A, lapack_int const* lda, lapack_int const* ipiv, + lapack_complex_float* B, lapack_int const* ldb, + lapack_complex_float* work, + lapack_int* info ); + +#define LAPACK_dsytrs2 LAPACK_GLOBAL(dsytrs2,DSYTRS2) +void LAPACK_dsytrs2( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + double* A, lapack_int const* lda, lapack_int const* ipiv, + double* B, lapack_int const* ldb, + double* work, + lapack_int* info ); + +#define LAPACK_ssytrs2 LAPACK_GLOBAL(ssytrs2,SSYTRS2) +void LAPACK_ssytrs2( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + float* A, lapack_int const* lda, lapack_int const* ipiv, + float* B, lapack_int const* ldb, + float* work, + lapack_int* info ); + +#define LAPACK_zsytrs2 LAPACK_GLOBAL(zsytrs2,ZSYTRS2) +void LAPACK_zsytrs2( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double* A, lapack_int const* lda, lapack_int const* ipiv, + lapack_complex_double* B, lapack_int const* ldb, + lapack_complex_double* work, + lapack_int* info ); + +#define LAPACK_csytrs_3 LAPACK_GLOBAL(csytrs_3,CSYTRS_3) +void LAPACK_csytrs_3( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float const* A, lapack_int const* lda, + lapack_complex_float const* E, lapack_int const* ipiv, + lapack_complex_float* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_dsytrs_3 LAPACK_GLOBAL(dsytrs_3,DSYTRS_3) +void LAPACK_dsytrs_3( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + double const* A, lapack_int const* lda, + double const* E, lapack_int const* ipiv, + double* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_ssytrs_3 LAPACK_GLOBAL(ssytrs_3,SSYTRS_3) +void LAPACK_ssytrs_3( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + float const* A, lapack_int const* lda, + float const* E, lapack_int const* ipiv, + float* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_zsytrs_3 LAPACK_GLOBAL(zsytrs_3,ZSYTRS_3) +void LAPACK_zsytrs_3( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double const* A, lapack_int const* lda, + lapack_complex_double const* E, lapack_int const* ipiv, + lapack_complex_double* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_csytrs_aa LAPACK_GLOBAL(csytrs_aa,CSYTRS_AA) +void LAPACK_csytrs_aa( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float const* A, lapack_int const* lda, lapack_int const* ipiv, + lapack_complex_float* B, lapack_int const* ldb, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_dsytrs_aa LAPACK_GLOBAL(dsytrs_aa,DSYTRS_AA) +void LAPACK_dsytrs_aa( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + double const* A, lapack_int const* lda, lapack_int const* ipiv, + double* B, lapack_int const* ldb, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_ssytrs_aa LAPACK_GLOBAL(ssytrs_aa,SSYTRS_AA) +void LAPACK_ssytrs_aa( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + float const* A, lapack_int const* lda, lapack_int const* ipiv, + float* B, lapack_int const* ldb, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zsytrs_aa LAPACK_GLOBAL(zsytrs_aa,ZSYTRS_AA) +void LAPACK_zsytrs_aa( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double const* A, lapack_int const* lda, lapack_int const* ipiv, + lapack_complex_double* B, lapack_int const* ldb, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_csytrs_aa_2stage LAPACK_GLOBAL(csytrs_aa_2stage,CSYTRS_AA_2STAGE) +void LAPACK_csytrs_aa_2stage( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float const* A, lapack_int const* lda, + lapack_complex_float* TB, lapack_int const* ltb, lapack_int const* ipiv, lapack_int const* ipiv2, + lapack_complex_float* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_dsytrs_aa_2stage LAPACK_GLOBAL(dsytrs_aa_2stage,DSYTRS_AA_2STAGE) +void LAPACK_dsytrs_aa_2stage( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + double const* A, lapack_int const* lda, + double* TB, lapack_int const* ltb, lapack_int const* ipiv, lapack_int const* ipiv2, + double* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_ssytrs_aa_2stage LAPACK_GLOBAL(ssytrs_aa_2stage,SSYTRS_AA_2STAGE) +void LAPACK_ssytrs_aa_2stage( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + float const* A, lapack_int const* lda, + float* TB, lapack_int const* ltb, lapack_int const* ipiv, lapack_int const* ipiv2, + float* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_zsytrs_aa_2stage LAPACK_GLOBAL(zsytrs_aa_2stage,ZSYTRS_AA_2STAGE) +void LAPACK_zsytrs_aa_2stage( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double const* A, lapack_int const* lda, + lapack_complex_double* TB, lapack_int const* ltb, lapack_int const* ipiv, lapack_int const* ipiv2, + lapack_complex_double* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_csytrs_rook LAPACK_GLOBAL(csytrs_rook,CSYTRS_ROOK) +void LAPACK_csytrs_rook( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float const* A, lapack_int const* lda, lapack_int const* ipiv, + lapack_complex_float* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_dsytrs_rook LAPACK_GLOBAL(dsytrs_rook,DSYTRS_ROOK) +void LAPACK_dsytrs_rook( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + double const* A, lapack_int const* lda, lapack_int const* ipiv, + double* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_ssytrs_rook LAPACK_GLOBAL(ssytrs_rook,SSYTRS_ROOK) +void LAPACK_ssytrs_rook( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + float const* A, lapack_int const* lda, lapack_int const* ipiv, + float* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_zsytrs_rook LAPACK_GLOBAL(zsytrs_rook,ZSYTRS_ROOK) +void LAPACK_zsytrs_rook( + char const* uplo, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double const* A, lapack_int const* lda, lapack_int const* ipiv, + lapack_complex_double* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_ctbcon LAPACK_GLOBAL(ctbcon,CTBCON) +void LAPACK_ctbcon( + char const* norm, char const* uplo, char const* diag, + lapack_int const* n, lapack_int const* kd, + lapack_complex_float const* AB, lapack_int const* ldab, + float* rcond, + lapack_complex_float* work, + float* rwork, + lapack_int* info ); + +#define LAPACK_dtbcon LAPACK_GLOBAL(dtbcon,DTBCON) +void LAPACK_dtbcon( + char const* norm, char const* uplo, char const* diag, + lapack_int const* n, lapack_int const* kd, + double const* AB, lapack_int const* ldab, + double* rcond, + double* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_stbcon LAPACK_GLOBAL(stbcon,STBCON) +void LAPACK_stbcon( + char const* norm, char const* uplo, char const* diag, + lapack_int const* n, lapack_int const* kd, + float const* AB, lapack_int const* ldab, + float* rcond, + float* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_ztbcon LAPACK_GLOBAL(ztbcon,ZTBCON) +void LAPACK_ztbcon( + char const* norm, char const* uplo, char const* diag, + lapack_int const* n, lapack_int const* kd, + lapack_complex_double const* AB, lapack_int const* ldab, + double* rcond, + lapack_complex_double* work, + double* rwork, + lapack_int* info ); + +#define LAPACK_ctbrfs LAPACK_GLOBAL(ctbrfs,CTBRFS) +void LAPACK_ctbrfs( + char const* uplo, char const* trans, char const* diag, + lapack_int const* n, lapack_int const* kd, lapack_int const* nrhs, + lapack_complex_float const* AB, lapack_int const* ldab, + lapack_complex_float const* B, lapack_int const* ldb, + lapack_complex_float const* X, lapack_int const* ldx, + float* ferr, + float* berr, + lapack_complex_float* work, + float* rwork, + lapack_int* info ); + +#define LAPACK_dtbrfs LAPACK_GLOBAL(dtbrfs,DTBRFS) +void LAPACK_dtbrfs( + char const* uplo, char const* trans, char const* diag, + lapack_int const* n, lapack_int const* kd, lapack_int const* nrhs, + double const* AB, lapack_int const* ldab, + double const* B, lapack_int const* ldb, + double const* X, lapack_int const* ldx, + double* ferr, + double* berr, + double* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_stbrfs LAPACK_GLOBAL(stbrfs,STBRFS) +void LAPACK_stbrfs( + char const* uplo, char const* trans, char const* diag, + lapack_int const* n, lapack_int const* kd, lapack_int const* nrhs, + float const* AB, lapack_int const* ldab, + float const* B, lapack_int const* ldb, + float const* X, lapack_int const* ldx, + float* ferr, + float* berr, + float* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_ztbrfs LAPACK_GLOBAL(ztbrfs,ZTBRFS) +void LAPACK_ztbrfs( + char const* uplo, char const* trans, char const* diag, + lapack_int const* n, lapack_int const* kd, lapack_int const* nrhs, + lapack_complex_double const* AB, lapack_int const* ldab, + lapack_complex_double const* B, lapack_int const* ldb, + lapack_complex_double const* X, lapack_int const* ldx, + double* ferr, + double* berr, + lapack_complex_double* work, + double* rwork, + lapack_int* info ); + +#define LAPACK_ctbtrs LAPACK_GLOBAL(ctbtrs,CTBTRS) +void LAPACK_ctbtrs( + char const* uplo, char const* trans, char const* diag, + lapack_int const* n, lapack_int const* kd, lapack_int const* nrhs, + lapack_complex_float const* AB, lapack_int const* ldab, + lapack_complex_float* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_dtbtrs LAPACK_GLOBAL(dtbtrs,DTBTRS) +void LAPACK_dtbtrs( + char const* uplo, char const* trans, char const* diag, + lapack_int const* n, lapack_int const* kd, lapack_int const* nrhs, + double const* AB, lapack_int const* ldab, + double* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_stbtrs LAPACK_GLOBAL(stbtrs,STBTRS) +void LAPACK_stbtrs( + char const* uplo, char const* trans, char const* diag, + lapack_int const* n, lapack_int const* kd, lapack_int const* nrhs, + float const* AB, lapack_int const* ldab, + float* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_ztbtrs LAPACK_GLOBAL(ztbtrs,ZTBTRS) +void LAPACK_ztbtrs( + char const* uplo, char const* trans, char const* diag, + lapack_int const* n, lapack_int const* kd, lapack_int const* nrhs, + lapack_complex_double const* AB, lapack_int const* ldab, + lapack_complex_double* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_ctfsm LAPACK_GLOBAL(ctfsm,CTFSM) +void LAPACK_ctfsm( + char const* transr, char const* side, char const* uplo, char const* trans, char const* diag, + lapack_int const* m, lapack_int const* n, + lapack_complex_float const* alpha, + lapack_complex_float const* A, + lapack_complex_float* B, lapack_int const* ldb ); + +#define LAPACK_dtfsm LAPACK_GLOBAL(dtfsm,DTFSM) +void LAPACK_dtfsm( + char const* transr, char const* side, char const* uplo, char const* trans, char const* diag, + lapack_int const* m, lapack_int const* n, + double const* alpha, + double const* A, + double* B, lapack_int const* ldb ); + +#define LAPACK_stfsm LAPACK_GLOBAL(stfsm,STFSM) +void LAPACK_stfsm( + char const* transr, char const* side, char const* uplo, char const* trans, char const* diag, + lapack_int const* m, lapack_int const* n, + float const* alpha, + float const* A, + float* B, lapack_int const* ldb ); + +#define LAPACK_ztfsm LAPACK_GLOBAL(ztfsm,ZTFSM) +void LAPACK_ztfsm( + char const* transr, char const* side, char const* uplo, char const* trans, char const* diag, + lapack_int const* m, lapack_int const* n, + lapack_complex_double const* alpha, + lapack_complex_double const* A, + lapack_complex_double* B, lapack_int const* ldb ); + +#define LAPACK_ctftri LAPACK_GLOBAL(ctftri,CTFTRI) +void LAPACK_ctftri( + char const* transr, char const* uplo, char const* diag, + lapack_int const* n, + lapack_complex_float* A, + lapack_int* info ); + +#define LAPACK_dtftri LAPACK_GLOBAL(dtftri,DTFTRI) +void LAPACK_dtftri( + char const* transr, char const* uplo, char const* diag, + lapack_int const* n, + double* A, + lapack_int* info ); + +#define LAPACK_stftri LAPACK_GLOBAL(stftri,STFTRI) +void LAPACK_stftri( + char const* transr, char const* uplo, char const* diag, + lapack_int const* n, + float* A, + lapack_int* info ); + +#define LAPACK_ztftri LAPACK_GLOBAL(ztftri,ZTFTRI) +void LAPACK_ztftri( + char const* transr, char const* uplo, char const* diag, + lapack_int const* n, + lapack_complex_double* A, + lapack_int* info ); + +#define LAPACK_ctfttp LAPACK_GLOBAL(ctfttp,CTFTTP) +void LAPACK_ctfttp( + char const* transr, char const* uplo, + lapack_int const* n, + lapack_complex_float const* ARF, + lapack_complex_float* AP, + lapack_int* info ); + +#define LAPACK_dtfttp LAPACK_GLOBAL(dtfttp,DTFTTP) +void LAPACK_dtfttp( + char const* transr, char const* uplo, + lapack_int const* n, + double const* ARF, + double* AP, + lapack_int* info ); + +#define LAPACK_stfttp LAPACK_GLOBAL(stfttp,STFTTP) +void LAPACK_stfttp( + char const* transr, char const* uplo, + lapack_int const* n, + float const* ARF, + float* AP, + lapack_int* info ); + +#define LAPACK_ztfttp LAPACK_GLOBAL(ztfttp,ZTFTTP) +void LAPACK_ztfttp( + char const* transr, char const* uplo, + lapack_int const* n, + lapack_complex_double const* ARF, + lapack_complex_double* AP, + lapack_int* info ); + +#define LAPACK_ctfttr LAPACK_GLOBAL(ctfttr,CTFTTR) +void LAPACK_ctfttr( + char const* transr, char const* uplo, + lapack_int const* n, + lapack_complex_float const* ARF, + lapack_complex_float* A, lapack_int const* lda, + lapack_int* info ); + +#define LAPACK_dtfttr LAPACK_GLOBAL(dtfttr,DTFTTR) +void LAPACK_dtfttr( + char const* transr, char const* uplo, + lapack_int const* n, + double const* ARF, + double* A, lapack_int const* lda, + lapack_int* info ); + +#define LAPACK_stfttr LAPACK_GLOBAL(stfttr,STFTTR) +void LAPACK_stfttr( + char const* transr, char const* uplo, + lapack_int const* n, + float const* ARF, + float* A, lapack_int const* lda, + lapack_int* info ); + +#define LAPACK_ztfttr LAPACK_GLOBAL(ztfttr,ZTFTTR) +void LAPACK_ztfttr( + char const* transr, char const* uplo, + lapack_int const* n, + lapack_complex_double const* ARF, + lapack_complex_double* A, lapack_int const* lda, + lapack_int* info ); + +#define LAPACK_ctgevc LAPACK_GLOBAL(ctgevc,CTGEVC) +void LAPACK_ctgevc( + char const* side, char const* howmny, + lapack_logical const* select, + lapack_int const* n, + lapack_complex_float const* S, lapack_int const* lds, + lapack_complex_float const* P, lapack_int const* ldp, + lapack_complex_float* VL, lapack_int const* ldvl, + lapack_complex_float* VR, lapack_int const* ldvr, lapack_int const* mm, lapack_int* m, + lapack_complex_float* work, + float* rwork, + lapack_int* info ); + +#define LAPACK_dtgevc LAPACK_GLOBAL(dtgevc,DTGEVC) +void LAPACK_dtgevc( + char const* side, char const* howmny, + lapack_logical const* select, + lapack_int const* n, + double const* S, lapack_int const* lds, + double const* P, lapack_int const* ldp, + double* VL, lapack_int const* ldvl, + double* VR, lapack_int const* ldvr, lapack_int const* mm, lapack_int* m, + double* work, + lapack_int* info ); + +#define LAPACK_stgevc LAPACK_GLOBAL(stgevc,STGEVC) +void LAPACK_stgevc( + char const* side, char const* howmny, + lapack_logical const* select, + lapack_int const* n, + float const* S, lapack_int const* lds, + float const* P, lapack_int const* ldp, + float* VL, lapack_int const* ldvl, + float* VR, lapack_int const* ldvr, lapack_int const* mm, lapack_int* m, + float* work, + lapack_int* info ); + +#define LAPACK_ztgevc LAPACK_GLOBAL(ztgevc,ZTGEVC) +void LAPACK_ztgevc( + char const* side, char const* howmny, + lapack_logical const* select, + lapack_int const* n, + lapack_complex_double const* S, lapack_int const* lds, + lapack_complex_double const* P, lapack_int const* ldp, + lapack_complex_double* VL, lapack_int const* ldvl, + lapack_complex_double* VR, lapack_int const* ldvr, lapack_int const* mm, lapack_int* m, + lapack_complex_double* work, + double* rwork, + lapack_int* info ); + +#define LAPACK_ctgexc LAPACK_GLOBAL(ctgexc,CTGEXC) +void LAPACK_ctgexc( + lapack_logical const* wantq, lapack_logical const* wantz, lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* B, lapack_int const* ldb, + lapack_complex_float* Q, lapack_int const* ldq, + lapack_complex_float* Z, lapack_int const* ldz, lapack_int const* ifst, lapack_int* ilst, + lapack_int* info ); + +#define LAPACK_dtgexc LAPACK_GLOBAL(dtgexc,DTGEXC) +void LAPACK_dtgexc( + lapack_logical const* wantq, lapack_logical const* wantz, lapack_int const* n, + double* A, lapack_int const* lda, + double* B, lapack_int const* ldb, + double* Q, lapack_int const* ldq, + double* Z, lapack_int const* ldz, lapack_int* ifst, lapack_int* ilst, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_stgexc LAPACK_GLOBAL(stgexc,STGEXC) +void LAPACK_stgexc( + lapack_logical const* wantq, lapack_logical const* wantz, lapack_int const* n, + float* A, lapack_int const* lda, + float* B, lapack_int const* ldb, + float* Q, lapack_int const* ldq, + float* Z, lapack_int const* ldz, lapack_int* ifst, lapack_int* ilst, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_ztgexc LAPACK_GLOBAL(ztgexc,ZTGEXC) +void LAPACK_ztgexc( + lapack_logical const* wantq, lapack_logical const* wantz, lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* B, lapack_int const* ldb, + lapack_complex_double* Q, lapack_int const* ldq, + lapack_complex_double* Z, lapack_int const* ldz, lapack_int const* ifst, lapack_int* ilst, + lapack_int* info ); + +#define LAPACK_ctgsen LAPACK_GLOBAL(ctgsen,CTGSEN) +void LAPACK_ctgsen( + lapack_int const* ijob, lapack_logical const* wantq, lapack_logical const* wantz, lapack_logical const* select, lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* B, lapack_int const* ldb, + lapack_complex_float* alpha, + lapack_complex_float* beta, + lapack_complex_float* Q, lapack_int const* ldq, + lapack_complex_float* Z, lapack_int const* ldz, lapack_int* m, + float* pl, + float* pr, + float* DIF, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* iwork, lapack_int const* liwork, + lapack_int* info ); + +#define LAPACK_dtgsen LAPACK_GLOBAL(dtgsen,DTGSEN) +void LAPACK_dtgsen( + lapack_int const* ijob, lapack_logical const* wantq, lapack_logical const* wantz, lapack_logical const* select, lapack_int const* n, + double* A, lapack_int const* lda, + double* B, lapack_int const* ldb, + double* alphar, + double* alphai, + double* beta, + double* Q, lapack_int const* ldq, + double* Z, lapack_int const* ldz, lapack_int* m, + double* pl, + double* pr, + double* DIF, + double* work, lapack_int const* lwork, + lapack_int* iwork, lapack_int const* liwork, + lapack_int* info ); + +#define LAPACK_stgsen LAPACK_GLOBAL(stgsen,STGSEN) +void LAPACK_stgsen( + lapack_int const* ijob, lapack_logical const* wantq, lapack_logical const* wantz, lapack_logical const* select, lapack_int const* n, + float* A, lapack_int const* lda, + float* B, lapack_int const* ldb, + float* alphar, + float* alphai, + float* beta, + float* Q, lapack_int const* ldq, + float* Z, lapack_int const* ldz, lapack_int* m, + float* pl, + float* pr, + float* DIF, + float* work, lapack_int const* lwork, + lapack_int* iwork, lapack_int const* liwork, + lapack_int* info ); + +#define LAPACK_ztgsen LAPACK_GLOBAL(ztgsen,ZTGSEN) +void LAPACK_ztgsen( + lapack_int const* ijob, lapack_logical const* wantq, lapack_logical const* wantz, lapack_logical const* select, lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* B, lapack_int const* ldb, + lapack_complex_double* alpha, + lapack_complex_double* beta, + lapack_complex_double* Q, lapack_int const* ldq, + lapack_complex_double* Z, lapack_int const* ldz, lapack_int* m, + double* pl, + double* pr, + double* DIF, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* iwork, lapack_int const* liwork, + lapack_int* info ); + +#define LAPACK_ctgsja LAPACK_GLOBAL(ctgsja,CTGSJA) +void LAPACK_ctgsja( + char const* jobu, char const* jobv, char const* jobq, + lapack_int const* m, lapack_int const* p, lapack_int const* n, lapack_int const* k, lapack_int const* l, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* B, lapack_int const* ldb, + float const* tola, + float const* tolb, + float* alpha, + float* beta, + lapack_complex_float* U, lapack_int const* ldu, + lapack_complex_float* V, lapack_int const* ldv, + lapack_complex_float* Q, lapack_int const* ldq, + lapack_complex_float* work, lapack_int* ncycle, + lapack_int* info ); + +#define LAPACK_dtgsja LAPACK_GLOBAL(dtgsja,DTGSJA) +void LAPACK_dtgsja( + char const* jobu, char const* jobv, char const* jobq, + lapack_int const* m, lapack_int const* p, lapack_int const* n, lapack_int const* k, lapack_int const* l, + double* A, lapack_int const* lda, + double* B, lapack_int const* ldb, + double const* tola, + double const* tolb, + double* alpha, + double* beta, + double* U, lapack_int const* ldu, + double* V, lapack_int const* ldv, + double* Q, lapack_int const* ldq, + double* work, lapack_int* ncycle, + lapack_int* info ); + +#define LAPACK_stgsja LAPACK_GLOBAL(stgsja,STGSJA) +void LAPACK_stgsja( + char const* jobu, char const* jobv, char const* jobq, + lapack_int const* m, lapack_int const* p, lapack_int const* n, lapack_int const* k, lapack_int const* l, + float* A, lapack_int const* lda, + float* B, lapack_int const* ldb, + float const* tola, + float const* tolb, + float* alpha, + float* beta, + float* U, lapack_int const* ldu, + float* V, lapack_int const* ldv, + float* Q, lapack_int const* ldq, + float* work, lapack_int* ncycle, + lapack_int* info ); + +#define LAPACK_ztgsja LAPACK_GLOBAL(ztgsja,ZTGSJA) +void LAPACK_ztgsja( + char const* jobu, char const* jobv, char const* jobq, + lapack_int const* m, lapack_int const* p, lapack_int const* n, lapack_int const* k, lapack_int const* l, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* B, lapack_int const* ldb, + double const* tola, + double const* tolb, + double* alpha, + double* beta, + lapack_complex_double* U, lapack_int const* ldu, + lapack_complex_double* V, lapack_int const* ldv, + lapack_complex_double* Q, lapack_int const* ldq, + lapack_complex_double* work, lapack_int* ncycle, + lapack_int* info ); + +#define LAPACK_ctgsna LAPACK_GLOBAL(ctgsna,CTGSNA) +void LAPACK_ctgsna( + char const* job, char const* howmny, + lapack_logical const* select, + lapack_int const* n, + lapack_complex_float const* A, lapack_int const* lda, + lapack_complex_float const* B, lapack_int const* ldb, + lapack_complex_float const* VL, lapack_int const* ldvl, + lapack_complex_float const* VR, lapack_int const* ldvr, + float* S, + float* DIF, lapack_int const* mm, lapack_int* m, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_dtgsna LAPACK_GLOBAL(dtgsna,DTGSNA) +void LAPACK_dtgsna( + char const* job, char const* howmny, + lapack_logical const* select, + lapack_int const* n, + double const* A, lapack_int const* lda, + double const* B, lapack_int const* ldb, + double const* VL, lapack_int const* ldvl, + double const* VR, lapack_int const* ldvr, + double* S, + double* DIF, lapack_int const* mm, lapack_int* m, + double* work, lapack_int const* lwork, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_stgsna LAPACK_GLOBAL(stgsna,STGSNA) +void LAPACK_stgsna( + char const* job, char const* howmny, + lapack_logical const* select, + lapack_int const* n, + float const* A, lapack_int const* lda, + float const* B, lapack_int const* ldb, + float const* VL, lapack_int const* ldvl, + float const* VR, lapack_int const* ldvr, + float* S, + float* DIF, lapack_int const* mm, lapack_int* m, + float* work, lapack_int const* lwork, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_ztgsna LAPACK_GLOBAL(ztgsna,ZTGSNA) +void LAPACK_ztgsna( + char const* job, char const* howmny, + lapack_logical const* select, + lapack_int const* n, + lapack_complex_double const* A, lapack_int const* lda, + lapack_complex_double const* B, lapack_int const* ldb, + lapack_complex_double const* VL, lapack_int const* ldvl, + lapack_complex_double const* VR, lapack_int const* ldvr, + double* S, + double* DIF, lapack_int const* mm, lapack_int* m, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_ctgsyl LAPACK_GLOBAL(ctgsyl,CTGSYL) +void LAPACK_ctgsyl( + char const* trans, + lapack_int const* ijob, lapack_int const* m, lapack_int const* n, + lapack_complex_float const* A, lapack_int const* lda, + lapack_complex_float const* B, lapack_int const* ldb, + lapack_complex_float* C, lapack_int const* ldc, + lapack_complex_float const* D, lapack_int const* ldd, + lapack_complex_float const* E, lapack_int const* lde, + lapack_complex_float* F, lapack_int const* ldf, + float* dif, + float* scale, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_dtgsyl LAPACK_GLOBAL(dtgsyl,DTGSYL) +void LAPACK_dtgsyl( + char const* trans, + lapack_int const* ijob, lapack_int const* m, lapack_int const* n, + double const* A, lapack_int const* lda, + double const* B, lapack_int const* ldb, + double* C, lapack_int const* ldc, + double const* D, lapack_int const* ldd, + double const* E, lapack_int const* lde, + double* F, lapack_int const* ldf, + double* dif, + double* scale, + double* work, lapack_int const* lwork, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_stgsyl LAPACK_GLOBAL(stgsyl,STGSYL) +void LAPACK_stgsyl( + char const* trans, + lapack_int const* ijob, lapack_int const* m, lapack_int const* n, + float const* A, lapack_int const* lda, + float const* B, lapack_int const* ldb, + float* C, lapack_int const* ldc, + float const* D, lapack_int const* ldd, + float const* E, lapack_int const* lde, + float* F, lapack_int const* ldf, + float* dif, + float* scale, + float* work, lapack_int const* lwork, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_ztgsyl LAPACK_GLOBAL(ztgsyl,ZTGSYL) +void LAPACK_ztgsyl( + char const* trans, + lapack_int const* ijob, lapack_int const* m, lapack_int const* n, + lapack_complex_double const* A, lapack_int const* lda, + lapack_complex_double const* B, lapack_int const* ldb, + lapack_complex_double* C, lapack_int const* ldc, + lapack_complex_double const* D, lapack_int const* ldd, + lapack_complex_double const* E, lapack_int const* lde, + lapack_complex_double* F, lapack_int const* ldf, + double* dif, + double* scale, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_ctpcon LAPACK_GLOBAL(ctpcon,CTPCON) +void LAPACK_ctpcon( + char const* norm, char const* uplo, char const* diag, + lapack_int const* n, + lapack_complex_float const* AP, + float* rcond, + lapack_complex_float* work, + float* rwork, + lapack_int* info ); + +#define LAPACK_dtpcon LAPACK_GLOBAL(dtpcon,DTPCON) +void LAPACK_dtpcon( + char const* norm, char const* uplo, char const* diag, + lapack_int const* n, + double const* AP, + double* rcond, + double* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_stpcon LAPACK_GLOBAL(stpcon,STPCON) +void LAPACK_stpcon( + char const* norm, char const* uplo, char const* diag, + lapack_int const* n, + float const* AP, + float* rcond, + float* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_ztpcon LAPACK_GLOBAL(ztpcon,ZTPCON) +void LAPACK_ztpcon( + char const* norm, char const* uplo, char const* diag, + lapack_int const* n, + lapack_complex_double const* AP, + double* rcond, + lapack_complex_double* work, + double* rwork, + lapack_int* info ); + +#define LAPACK_ctplqt LAPACK_GLOBAL(ctplqt,CTPLQT) +void LAPACK_ctplqt( + lapack_int const* m, lapack_int const* n, lapack_int const* l, lapack_int const* mb, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* B, lapack_int const* ldb, + lapack_complex_float* T, lapack_int const* ldt, + lapack_complex_float* work, + lapack_int* info ); + +#define LAPACK_dtplqt LAPACK_GLOBAL(dtplqt,DTPLQT) +void LAPACK_dtplqt( + lapack_int const* m, lapack_int const* n, lapack_int const* l, lapack_int const* mb, + double* A, lapack_int const* lda, + double* B, lapack_int const* ldb, + double* T, lapack_int const* ldt, + double* work, + lapack_int* info ); + +#define LAPACK_stplqt LAPACK_GLOBAL(stplqt,STPLQT) +void LAPACK_stplqt( + lapack_int const* m, lapack_int const* n, lapack_int const* l, lapack_int const* mb, + float* A, lapack_int const* lda, + float* B, lapack_int const* ldb, + float* T, lapack_int const* ldt, + float* work, + lapack_int* info ); + +#define LAPACK_ztplqt LAPACK_GLOBAL(ztplqt,ZTPLQT) +void LAPACK_ztplqt( + lapack_int const* m, lapack_int const* n, lapack_int const* l, lapack_int const* mb, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* B, lapack_int const* ldb, + lapack_complex_double* T, lapack_int const* ldt, + lapack_complex_double* work, + lapack_int* info ); + +#define LAPACK_ctplqt2 LAPACK_GLOBAL(ctplqt2,CTPLQT2) +void LAPACK_ctplqt2( + lapack_int const* m, lapack_int const* n, lapack_int const* l, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* B, lapack_int const* ldb, + lapack_complex_float* T, lapack_int const* ldt, + lapack_int* info ); + +#define LAPACK_dtplqt2 LAPACK_GLOBAL(dtplqt2,DTPLQT2) +void LAPACK_dtplqt2( + lapack_int const* m, lapack_int const* n, lapack_int const* l, + double* A, lapack_int const* lda, + double* B, lapack_int const* ldb, + double* T, lapack_int const* ldt, + lapack_int* info ); + +#define LAPACK_stplqt2 LAPACK_GLOBAL(stplqt2,STPLQT2) +void LAPACK_stplqt2( + lapack_int const* m, lapack_int const* n, lapack_int const* l, + float* A, lapack_int const* lda, + float* B, lapack_int const* ldb, + float* T, lapack_int const* ldt, + lapack_int* info ); + +#define LAPACK_ztplqt2 LAPACK_GLOBAL(ztplqt2,ZTPLQT2) +void LAPACK_ztplqt2( + lapack_int const* m, lapack_int const* n, lapack_int const* l, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* B, lapack_int const* ldb, + lapack_complex_double* T, lapack_int const* ldt, + lapack_int* info ); + +#define LAPACK_ctpmlqt LAPACK_GLOBAL(ctpmlqt,CTPMLQT) +void LAPACK_ctpmlqt( + char const* side, char const* trans, + lapack_int const* m, lapack_int const* n, lapack_int const* k, lapack_int const* l, lapack_int const* mb, + lapack_complex_float const* V, lapack_int const* ldv, + lapack_complex_float const* T, lapack_int const* ldt, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* B, lapack_int const* ldb, + lapack_complex_float* work, + lapack_int* info ); + +#define LAPACK_dtpmlqt LAPACK_GLOBAL(dtpmlqt,DTPMLQT) +void LAPACK_dtpmlqt( + char const* side, char const* trans, + lapack_int const* m, lapack_int const* n, lapack_int const* k, lapack_int const* l, lapack_int const* mb, + double const* V, lapack_int const* ldv, + double const* T, lapack_int const* ldt, + double* A, lapack_int const* lda, + double* B, lapack_int const* ldb, + double* work, + lapack_int* info ); + +#define LAPACK_stpmlqt LAPACK_GLOBAL(stpmlqt,STPMLQT) +void LAPACK_stpmlqt( + char const* side, char const* trans, + lapack_int const* m, lapack_int const* n, lapack_int const* k, lapack_int const* l, lapack_int const* mb, + float const* V, lapack_int const* ldv, + float const* T, lapack_int const* ldt, + float* A, lapack_int const* lda, + float* B, lapack_int const* ldb, + float* work, + lapack_int* info ); + +#define LAPACK_ztpmlqt LAPACK_GLOBAL(ztpmlqt,ZTPMLQT) +void LAPACK_ztpmlqt( + char const* side, char const* trans, + lapack_int const* m, lapack_int const* n, lapack_int const* k, lapack_int const* l, lapack_int const* mb, + lapack_complex_double const* V, lapack_int const* ldv, + lapack_complex_double const* T, lapack_int const* ldt, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* B, lapack_int const* ldb, + lapack_complex_double* work, + lapack_int* info ); + +#define LAPACK_ctpmqrt LAPACK_GLOBAL(ctpmqrt,CTPMQRT) +void LAPACK_ctpmqrt( + char const* side, char const* trans, + lapack_int const* m, lapack_int const* n, lapack_int const* k, lapack_int const* l, lapack_int const* nb, + lapack_complex_float const* V, lapack_int const* ldv, + lapack_complex_float const* T, lapack_int const* ldt, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* B, lapack_int const* ldb, + lapack_complex_float* work, + lapack_int* info ); + +#define LAPACK_dtpmqrt LAPACK_GLOBAL(dtpmqrt,DTPMQRT) +void LAPACK_dtpmqrt( + char const* side, char const* trans, + lapack_int const* m, lapack_int const* n, lapack_int const* k, lapack_int const* l, lapack_int const* nb, + double const* V, lapack_int const* ldv, + double const* T, lapack_int const* ldt, + double* A, lapack_int const* lda, + double* B, lapack_int const* ldb, + double* work, + lapack_int* info ); + +#define LAPACK_stpmqrt LAPACK_GLOBAL(stpmqrt,STPMQRT) +void LAPACK_stpmqrt( + char const* side, char const* trans, + lapack_int const* m, lapack_int const* n, lapack_int const* k, lapack_int const* l, lapack_int const* nb, + float const* V, lapack_int const* ldv, + float const* T, lapack_int const* ldt, + float* A, lapack_int const* lda, + float* B, lapack_int const* ldb, + float* work, + lapack_int* info ); + +#define LAPACK_ztpmqrt LAPACK_GLOBAL(ztpmqrt,ZTPMQRT) +void LAPACK_ztpmqrt( + char const* side, char const* trans, + lapack_int const* m, lapack_int const* n, lapack_int const* k, lapack_int const* l, lapack_int const* nb, + lapack_complex_double const* V, lapack_int const* ldv, + lapack_complex_double const* T, lapack_int const* ldt, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* B, lapack_int const* ldb, + lapack_complex_double* work, + lapack_int* info ); + +#define LAPACK_ctpqrt LAPACK_GLOBAL(ctpqrt,CTPQRT) +void LAPACK_ctpqrt( + lapack_int const* m, lapack_int const* n, lapack_int const* l, lapack_int const* nb, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* B, lapack_int const* ldb, + lapack_complex_float* T, lapack_int const* ldt, + lapack_complex_float* work, + lapack_int* info ); + +#define LAPACK_dtpqrt LAPACK_GLOBAL(dtpqrt,DTPQRT) +void LAPACK_dtpqrt( + lapack_int const* m, lapack_int const* n, lapack_int const* l, lapack_int const* nb, + double* A, lapack_int const* lda, + double* B, lapack_int const* ldb, + double* T, lapack_int const* ldt, + double* work, + lapack_int* info ); + +#define LAPACK_stpqrt LAPACK_GLOBAL(stpqrt,STPQRT) +void LAPACK_stpqrt( + lapack_int const* m, lapack_int const* n, lapack_int const* l, lapack_int const* nb, + float* A, lapack_int const* lda, + float* B, lapack_int const* ldb, + float* T, lapack_int const* ldt, + float* work, + lapack_int* info ); + +#define LAPACK_ztpqrt LAPACK_GLOBAL(ztpqrt,ZTPQRT) +void LAPACK_ztpqrt( + lapack_int const* m, lapack_int const* n, lapack_int const* l, lapack_int const* nb, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* B, lapack_int const* ldb, + lapack_complex_double* T, lapack_int const* ldt, + lapack_complex_double* work, + lapack_int* info ); + +#define LAPACK_ctpqrt2 LAPACK_GLOBAL(ctpqrt2,CTPQRT2) +void LAPACK_ctpqrt2( + lapack_int const* m, lapack_int const* n, lapack_int const* l, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* B, lapack_int const* ldb, + lapack_complex_float* T, lapack_int const* ldt, + lapack_int* info ); + +#define LAPACK_dtpqrt2 LAPACK_GLOBAL(dtpqrt2,DTPQRT2) +void LAPACK_dtpqrt2( + lapack_int const* m, lapack_int const* n, lapack_int const* l, + double* A, lapack_int const* lda, + double* B, lapack_int const* ldb, + double* T, lapack_int const* ldt, + lapack_int* info ); + +#define LAPACK_stpqrt2 LAPACK_GLOBAL(stpqrt2,STPQRT2) +void LAPACK_stpqrt2( + lapack_int const* m, lapack_int const* n, lapack_int const* l, + float* A, lapack_int const* lda, + float* B, lapack_int const* ldb, + float* T, lapack_int const* ldt, + lapack_int* info ); + +#define LAPACK_ztpqrt2 LAPACK_GLOBAL(ztpqrt2,ZTPQRT2) +void LAPACK_ztpqrt2( + lapack_int const* m, lapack_int const* n, lapack_int const* l, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* B, lapack_int const* ldb, + lapack_complex_double* T, lapack_int const* ldt, + lapack_int* info ); + +#define LAPACK_ctprfb LAPACK_GLOBAL(ctprfb,CTPRFB) +void LAPACK_ctprfb( + char const* side, char const* trans, char const* direct, char const* storev, + lapack_int const* m, lapack_int const* n, lapack_int const* k, lapack_int const* l, + lapack_complex_float const* V, lapack_int const* ldv, + lapack_complex_float const* T, lapack_int const* ldt, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* B, lapack_int const* ldb, + lapack_complex_float* work, lapack_int const* ldwork ); + +#define LAPACK_dtprfb LAPACK_GLOBAL(dtprfb,DTPRFB) +void LAPACK_dtprfb( + char const* side, char const* trans, char const* direct, char const* storev, + lapack_int const* m, lapack_int const* n, lapack_int const* k, lapack_int const* l, + double const* V, lapack_int const* ldv, + double const* T, lapack_int const* ldt, + double* A, lapack_int const* lda, + double* B, lapack_int const* ldb, + double* work, lapack_int const* ldwork ); + +#define LAPACK_stprfb LAPACK_GLOBAL(stprfb,STPRFB) +void LAPACK_stprfb( + char const* side, char const* trans, char const* direct, char const* storev, + lapack_int const* m, lapack_int const* n, lapack_int const* k, lapack_int const* l, + float const* V, lapack_int const* ldv, + float const* T, lapack_int const* ldt, + float* A, lapack_int const* lda, + float* B, lapack_int const* ldb, + float* work, lapack_int const* ldwork ); + +#define LAPACK_ztprfb LAPACK_GLOBAL(ztprfb,ZTPRFB) +void LAPACK_ztprfb( + char const* side, char const* trans, char const* direct, char const* storev, + lapack_int const* m, lapack_int const* n, lapack_int const* k, lapack_int const* l, + lapack_complex_double const* V, lapack_int const* ldv, + lapack_complex_double const* T, lapack_int const* ldt, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* B, lapack_int const* ldb, + lapack_complex_double* work, lapack_int const* ldwork ); + +#define LAPACK_ctprfs LAPACK_GLOBAL(ctprfs,CTPRFS) +void LAPACK_ctprfs( + char const* uplo, char const* trans, char const* diag, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float const* AP, + lapack_complex_float const* B, lapack_int const* ldb, + lapack_complex_float const* X, lapack_int const* ldx, + float* ferr, + float* berr, + lapack_complex_float* work, + float* rwork, + lapack_int* info ); + +#define LAPACK_dtprfs LAPACK_GLOBAL(dtprfs,DTPRFS) +void LAPACK_dtprfs( + char const* uplo, char const* trans, char const* diag, + lapack_int const* n, lapack_int const* nrhs, + double const* AP, + double const* B, lapack_int const* ldb, + double const* X, lapack_int const* ldx, + double* ferr, + double* berr, + double* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_stprfs LAPACK_GLOBAL(stprfs,STPRFS) +void LAPACK_stprfs( + char const* uplo, char const* trans, char const* diag, + lapack_int const* n, lapack_int const* nrhs, + float const* AP, + float const* B, lapack_int const* ldb, + float const* X, lapack_int const* ldx, + float* ferr, + float* berr, + float* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_ztprfs LAPACK_GLOBAL(ztprfs,ZTPRFS) +void LAPACK_ztprfs( + char const* uplo, char const* trans, char const* diag, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double const* AP, + lapack_complex_double const* B, lapack_int const* ldb, + lapack_complex_double const* X, lapack_int const* ldx, + double* ferr, + double* berr, + lapack_complex_double* work, + double* rwork, + lapack_int* info ); + +#define LAPACK_ctptri LAPACK_GLOBAL(ctptri,CTPTRI) +void LAPACK_ctptri( + char const* uplo, char const* diag, + lapack_int const* n, + lapack_complex_float* AP, + lapack_int* info ); + +#define LAPACK_dtptri LAPACK_GLOBAL(dtptri,DTPTRI) +void LAPACK_dtptri( + char const* uplo, char const* diag, + lapack_int const* n, + double* AP, + lapack_int* info ); + +#define LAPACK_stptri LAPACK_GLOBAL(stptri,STPTRI) +void LAPACK_stptri( + char const* uplo, char const* diag, + lapack_int const* n, + float* AP, + lapack_int* info ); + +#define LAPACK_ztptri LAPACK_GLOBAL(ztptri,ZTPTRI) +void LAPACK_ztptri( + char const* uplo, char const* diag, + lapack_int const* n, + lapack_complex_double* AP, + lapack_int* info ); + +#define LAPACK_ctptrs LAPACK_GLOBAL(ctptrs,CTPTRS) +void LAPACK_ctptrs( + char const* uplo, char const* trans, char const* diag, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float const* AP, + lapack_complex_float* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_dtptrs LAPACK_GLOBAL(dtptrs,DTPTRS) +void LAPACK_dtptrs( + char const* uplo, char const* trans, char const* diag, + lapack_int const* n, lapack_int const* nrhs, + double const* AP, + double* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_stptrs LAPACK_GLOBAL(stptrs,STPTRS) +void LAPACK_stptrs( + char const* uplo, char const* trans, char const* diag, + lapack_int const* n, lapack_int const* nrhs, + float const* AP, + float* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_ztptrs LAPACK_GLOBAL(ztptrs,ZTPTRS) +void LAPACK_ztptrs( + char const* uplo, char const* trans, char const* diag, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double const* AP, + lapack_complex_double* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_ctpttf LAPACK_GLOBAL(ctpttf,CTPTTF) +void LAPACK_ctpttf( + char const* transr, char const* uplo, + lapack_int const* n, + lapack_complex_float const* AP, + lapack_complex_float* ARF, + lapack_int* info ); + +#define LAPACK_dtpttf LAPACK_GLOBAL(dtpttf,DTPTTF) +void LAPACK_dtpttf( + char const* transr, char const* uplo, + lapack_int const* n, + double const* AP, + double* ARF, + lapack_int* info ); + +#define LAPACK_stpttf LAPACK_GLOBAL(stpttf,STPTTF) +void LAPACK_stpttf( + char const* transr, char const* uplo, + lapack_int const* n, + float const* AP, + float* ARF, + lapack_int* info ); + +#define LAPACK_ztpttf LAPACK_GLOBAL(ztpttf,ZTPTTF) +void LAPACK_ztpttf( + char const* transr, char const* uplo, + lapack_int const* n, + lapack_complex_double const* AP, + lapack_complex_double* ARF, + lapack_int* info ); + +#define LAPACK_ctpttr LAPACK_GLOBAL(ctpttr,CTPTTR) +void LAPACK_ctpttr( + char const* uplo, + lapack_int const* n, + lapack_complex_float const* AP, + lapack_complex_float* A, lapack_int const* lda, + lapack_int* info ); + +#define LAPACK_dtpttr LAPACK_GLOBAL(dtpttr,DTPTTR) +void LAPACK_dtpttr( + char const* uplo, + lapack_int const* n, + double const* AP, + double* A, lapack_int const* lda, + lapack_int* info ); + +#define LAPACK_stpttr LAPACK_GLOBAL(stpttr,STPTTR) +void LAPACK_stpttr( + char const* uplo, + lapack_int const* n, + float const* AP, + float* A, lapack_int const* lda, + lapack_int* info ); + +#define LAPACK_ztpttr LAPACK_GLOBAL(ztpttr,ZTPTTR) +void LAPACK_ztpttr( + char const* uplo, + lapack_int const* n, + lapack_complex_double const* AP, + lapack_complex_double* A, lapack_int const* lda, + lapack_int* info ); + +#define LAPACK_ctrcon LAPACK_GLOBAL(ctrcon,CTRCON) +void LAPACK_ctrcon( + char const* norm, char const* uplo, char const* diag, + lapack_int const* n, + lapack_complex_float const* A, lapack_int const* lda, + float* rcond, + lapack_complex_float* work, + float* rwork, + lapack_int* info ); + +#define LAPACK_dtrcon LAPACK_GLOBAL(dtrcon,DTRCON) +void LAPACK_dtrcon( + char const* norm, char const* uplo, char const* diag, + lapack_int const* n, + double const* A, lapack_int const* lda, + double* rcond, + double* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_strcon LAPACK_GLOBAL(strcon,STRCON) +void LAPACK_strcon( + char const* norm, char const* uplo, char const* diag, + lapack_int const* n, + float const* A, lapack_int const* lda, + float* rcond, + float* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_ztrcon LAPACK_GLOBAL(ztrcon,ZTRCON) +void LAPACK_ztrcon( + char const* norm, char const* uplo, char const* diag, + lapack_int const* n, + lapack_complex_double const* A, lapack_int const* lda, + double* rcond, + lapack_complex_double* work, + double* rwork, + lapack_int* info ); + +#define LAPACK_ctrevc LAPACK_GLOBAL(ctrevc,CTREVC) +void LAPACK_ctrevc( + char const* side, char const* howmny, + lapack_logical const* select, + lapack_int const* n, + lapack_complex_float* T, lapack_int const* ldt, + lapack_complex_float* VL, lapack_int const* ldvl, + lapack_complex_float* VR, lapack_int const* ldvr, lapack_int const* mm, lapack_int* m, + lapack_complex_float* work, + float* rwork, + lapack_int* info ); + +#define LAPACK_dtrevc LAPACK_GLOBAL(dtrevc,DTREVC) +void LAPACK_dtrevc( + char const* side, char const* howmny, + lapack_logical* select, + lapack_int const* n, + double const* T, lapack_int const* ldt, + double* VL, lapack_int const* ldvl, + double* VR, lapack_int const* ldvr, lapack_int const* mm, lapack_int* m, + double* work, + lapack_int* info ); + +#define LAPACK_strevc LAPACK_GLOBAL(strevc,STREVC) +void LAPACK_strevc( + char const* side, char const* howmny, + lapack_logical* select, + lapack_int const* n, + float const* T, lapack_int const* ldt, + float* VL, lapack_int const* ldvl, + float* VR, lapack_int const* ldvr, lapack_int const* mm, lapack_int* m, + float* work, + lapack_int* info ); + +#define LAPACK_ztrevc LAPACK_GLOBAL(ztrevc,ZTREVC) +void LAPACK_ztrevc( + char const* side, char const* howmny, + lapack_logical const* select, + lapack_int const* n, + lapack_complex_double* T, lapack_int const* ldt, + lapack_complex_double* VL, lapack_int const* ldvl, + lapack_complex_double* VR, lapack_int const* ldvr, lapack_int const* mm, lapack_int* m, + lapack_complex_double* work, + double* rwork, + lapack_int* info ); + +#define LAPACK_ctrevc3 LAPACK_GLOBAL(ctrevc3,CTREVC3) +void LAPACK_ctrevc3( + char const* side, char const* howmny, + lapack_logical const* select, + lapack_int const* n, + lapack_complex_float* T, lapack_int const* ldt, + lapack_complex_float* VL, lapack_int const* ldvl, + lapack_complex_float* VR, lapack_int const* ldvr, lapack_int const* mm, lapack_int* m, + lapack_complex_float* work, lapack_int const* lwork, + float* rwork, lapack_int const* lrwork, + lapack_int* info ); + +#define LAPACK_dtrevc3 LAPACK_GLOBAL(dtrevc3,DTREVC3) +void LAPACK_dtrevc3( + char const* side, char const* howmny, + lapack_logical* select, + lapack_int const* n, + double const* T, lapack_int const* ldt, + double* VL, lapack_int const* ldvl, + double* VR, lapack_int const* ldvr, lapack_int const* mm, lapack_int* m, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_strevc3 LAPACK_GLOBAL(strevc3,STREVC3) +void LAPACK_strevc3( + char const* side, char const* howmny, + lapack_logical* select, + lapack_int const* n, + float const* T, lapack_int const* ldt, + float* VL, lapack_int const* ldvl, + float* VR, lapack_int const* ldvr, lapack_int const* mm, lapack_int* m, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_ztrevc3 LAPACK_GLOBAL(ztrevc3,ZTREVC3) +void LAPACK_ztrevc3( + char const* side, char const* howmny, + lapack_logical const* select, + lapack_int const* n, + lapack_complex_double* T, lapack_int const* ldt, + lapack_complex_double* VL, lapack_int const* ldvl, + lapack_complex_double* VR, lapack_int const* ldvr, lapack_int const* mm, lapack_int* m, + lapack_complex_double* work, lapack_int const* lwork, + double* rwork, lapack_int const* lrwork, + lapack_int* info ); + +#define LAPACK_ctrexc LAPACK_GLOBAL(ctrexc,CTREXC) +void LAPACK_ctrexc( + char const* compq, + lapack_int const* n, + lapack_complex_float* T, lapack_int const* ldt, + lapack_complex_float* Q, lapack_int const* ldq, lapack_int const* ifst, lapack_int const* ilst, + lapack_int* info ); + +#define LAPACK_dtrexc LAPACK_GLOBAL(dtrexc,DTREXC) +void LAPACK_dtrexc( + char const* compq, + lapack_int const* n, + double* T, lapack_int const* ldt, + double* Q, lapack_int const* ldq, lapack_int* ifst, lapack_int* ilst, + double* work, + lapack_int* info ); + +#define LAPACK_strexc LAPACK_GLOBAL(strexc,STREXC) +void LAPACK_strexc( + char const* compq, + lapack_int const* n, + float* T, lapack_int const* ldt, + float* Q, lapack_int const* ldq, lapack_int* ifst, lapack_int* ilst, + float* work, + lapack_int* info ); + +#define LAPACK_ztrexc LAPACK_GLOBAL(ztrexc,ZTREXC) +void LAPACK_ztrexc( + char const* compq, + lapack_int const* n, + lapack_complex_double* T, lapack_int const* ldt, + lapack_complex_double* Q, lapack_int const* ldq, lapack_int const* ifst, lapack_int const* ilst, + lapack_int* info ); + +#define LAPACK_ctrrfs LAPACK_GLOBAL(ctrrfs,CTRRFS) +void LAPACK_ctrrfs( + char const* uplo, char const* trans, char const* diag, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float const* A, lapack_int const* lda, + lapack_complex_float const* B, lapack_int const* ldb, + lapack_complex_float const* X, lapack_int const* ldx, + float* ferr, + float* berr, + lapack_complex_float* work, + float* rwork, + lapack_int* info ); + +#define LAPACK_dtrrfs LAPACK_GLOBAL(dtrrfs,DTRRFS) +void LAPACK_dtrrfs( + char const* uplo, char const* trans, char const* diag, + lapack_int const* n, lapack_int const* nrhs, + double const* A, lapack_int const* lda, + double const* B, lapack_int const* ldb, + double const* X, lapack_int const* ldx, + double* ferr, + double* berr, + double* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_strrfs LAPACK_GLOBAL(strrfs,STRRFS) +void LAPACK_strrfs( + char const* uplo, char const* trans, char const* diag, + lapack_int const* n, lapack_int const* nrhs, + float const* A, lapack_int const* lda, + float const* B, lapack_int const* ldb, + float const* X, lapack_int const* ldx, + float* ferr, + float* berr, + float* work, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_ztrrfs LAPACK_GLOBAL(ztrrfs,ZTRRFS) +void LAPACK_ztrrfs( + char const* uplo, char const* trans, char const* diag, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double const* A, lapack_int const* lda, + lapack_complex_double const* B, lapack_int const* ldb, + lapack_complex_double const* X, lapack_int const* ldx, + double* ferr, + double* berr, + lapack_complex_double* work, + double* rwork, + lapack_int* info ); + +#define LAPACK_ctrsen LAPACK_GLOBAL(ctrsen,CTRSEN) +void LAPACK_ctrsen( + char const* job, char const* compq, + lapack_logical const* select, + lapack_int const* n, + lapack_complex_float* T, lapack_int const* ldt, + lapack_complex_float* Q, lapack_int const* ldq, + lapack_complex_float* W, lapack_int* m, + float* s, + float* sep, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_dtrsen LAPACK_GLOBAL(dtrsen,DTRSEN) +void LAPACK_dtrsen( + char const* job, char const* compq, + lapack_logical const* select, + lapack_int const* n, + double* T, lapack_int const* ldt, + double* Q, lapack_int const* ldq, + double* WR, + double* WI, lapack_int* m, + double* s, + double* sep, + double* work, lapack_int const* lwork, + lapack_int* iwork, lapack_int const* liwork, + lapack_int* info ); + +#define LAPACK_strsen LAPACK_GLOBAL(strsen,STRSEN) +void LAPACK_strsen( + char const* job, char const* compq, + lapack_logical const* select, + lapack_int const* n, + float* T, lapack_int const* ldt, + float* Q, lapack_int const* ldq, + float* WR, + float* WI, lapack_int* m, + float* s, + float* sep, + float* work, lapack_int const* lwork, + lapack_int* iwork, lapack_int const* liwork, + lapack_int* info ); + +#define LAPACK_ztrsen LAPACK_GLOBAL(ztrsen,ZTRSEN) +void LAPACK_ztrsen( + char const* job, char const* compq, + lapack_logical const* select, + lapack_int const* n, + lapack_complex_double* T, lapack_int const* ldt, + lapack_complex_double* Q, lapack_int const* ldq, + lapack_complex_double* W, lapack_int* m, + double* s, + double* sep, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_ctrsna LAPACK_GLOBAL(ctrsna,CTRSNA) +void LAPACK_ctrsna( + char const* job, char const* howmny, + lapack_logical const* select, + lapack_int const* n, + lapack_complex_float const* T, lapack_int const* ldt, + lapack_complex_float const* VL, lapack_int const* ldvl, + lapack_complex_float const* VR, lapack_int const* ldvr, + float* S, + float* SEP, lapack_int const* mm, lapack_int* m, + lapack_complex_float* work, lapack_int const* ldwork, + float* rwork, + lapack_int* info ); + +#define LAPACK_dtrsna LAPACK_GLOBAL(dtrsna,DTRSNA) +void LAPACK_dtrsna( + char const* job, char const* howmny, + lapack_logical const* select, + lapack_int const* n, + double const* T, lapack_int const* ldt, + double const* VL, lapack_int const* ldvl, + double const* VR, lapack_int const* ldvr, + double* S, + double* SEP, lapack_int const* mm, lapack_int* m, + double* work, lapack_int const* ldwork, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_strsna LAPACK_GLOBAL(strsna,STRSNA) +void LAPACK_strsna( + char const* job, char const* howmny, + lapack_logical const* select, + lapack_int const* n, + float const* T, lapack_int const* ldt, + float const* VL, lapack_int const* ldvl, + float const* VR, lapack_int const* ldvr, + float* S, + float* SEP, lapack_int const* mm, lapack_int* m, + float* work, lapack_int const* ldwork, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_ztrsna LAPACK_GLOBAL(ztrsna,ZTRSNA) +void LAPACK_ztrsna( + char const* job, char const* howmny, + lapack_logical const* select, + lapack_int const* n, + lapack_complex_double const* T, lapack_int const* ldt, + lapack_complex_double const* VL, lapack_int const* ldvl, + lapack_complex_double const* VR, lapack_int const* ldvr, + double* S, + double* SEP, lapack_int const* mm, lapack_int* m, + lapack_complex_double* work, lapack_int const* ldwork, + double* rwork, + lapack_int* info ); + +#define LAPACK_ctrsyl LAPACK_GLOBAL(ctrsyl,CTRSYL) +void LAPACK_ctrsyl( + char const* trana, char const* tranb, + lapack_int const* isgn, lapack_int const* m, lapack_int const* n, + lapack_complex_float const* A, lapack_int const* lda, + lapack_complex_float const* B, lapack_int const* ldb, + lapack_complex_float* C, lapack_int const* ldc, + float* scale, + lapack_int* info ); + +#define LAPACK_dtrsyl LAPACK_GLOBAL(dtrsyl,DTRSYL) +void LAPACK_dtrsyl( + char const* trana, char const* tranb, + lapack_int const* isgn, lapack_int const* m, lapack_int const* n, + double const* A, lapack_int const* lda, + double const* B, lapack_int const* ldb, + double* C, lapack_int const* ldc, + double* scale, + lapack_int* info ); + +#define LAPACK_strsyl LAPACK_GLOBAL(strsyl,STRSYL) +void LAPACK_strsyl( + char const* trana, char const* tranb, + lapack_int const* isgn, lapack_int const* m, lapack_int const* n, + float const* A, lapack_int const* lda, + float const* B, lapack_int const* ldb, + float* C, lapack_int const* ldc, + float* scale, + lapack_int* info ); + +#define LAPACK_ztrsyl LAPACK_GLOBAL(ztrsyl,ZTRSYL) +void LAPACK_ztrsyl( + char const* trana, char const* tranb, + lapack_int const* isgn, lapack_int const* m, lapack_int const* n, + lapack_complex_double const* A, lapack_int const* lda, + lapack_complex_double const* B, lapack_int const* ldb, + lapack_complex_double* C, lapack_int const* ldc, + double* scale, + lapack_int* info ); + +#define LAPACK_ctrtri LAPACK_GLOBAL(ctrtri,CTRTRI) +void LAPACK_ctrtri( + char const* uplo, char const* diag, + lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, + lapack_int* info ); + +#define LAPACK_dtrtri LAPACK_GLOBAL(dtrtri,DTRTRI) +void LAPACK_dtrtri( + char const* uplo, char const* diag, + lapack_int const* n, + double* A, lapack_int const* lda, + lapack_int* info ); + +#define LAPACK_strtri LAPACK_GLOBAL(strtri,STRTRI) +void LAPACK_strtri( + char const* uplo, char const* diag, + lapack_int const* n, + float* A, lapack_int const* lda, + lapack_int* info ); + +#define LAPACK_ztrtri LAPACK_GLOBAL(ztrtri,ZTRTRI) +void LAPACK_ztrtri( + char const* uplo, char const* diag, + lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, + lapack_int* info ); + +#define LAPACK_ctrtrs LAPACK_GLOBAL(ctrtrs,CTRTRS) +void LAPACK_ctrtrs( + char const* uplo, char const* trans, char const* diag, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_float const* A, lapack_int const* lda, + lapack_complex_float* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_dtrtrs LAPACK_GLOBAL(dtrtrs,DTRTRS) +void LAPACK_dtrtrs( + char const* uplo, char const* trans, char const* diag, + lapack_int const* n, lapack_int const* nrhs, + double const* A, lapack_int const* lda, + double* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_strtrs LAPACK_GLOBAL(strtrs,STRTRS) +void LAPACK_strtrs( + char const* uplo, char const* trans, char const* diag, + lapack_int const* n, lapack_int const* nrhs, + float const* A, lapack_int const* lda, + float* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_ztrtrs LAPACK_GLOBAL(ztrtrs,ZTRTRS) +void LAPACK_ztrtrs( + char const* uplo, char const* trans, char const* diag, + lapack_int const* n, lapack_int const* nrhs, + lapack_complex_double const* A, lapack_int const* lda, + lapack_complex_double* B, lapack_int const* ldb, + lapack_int* info ); + +#define LAPACK_ctrttf LAPACK_GLOBAL(ctrttf,CTRTTF) +void LAPACK_ctrttf( + char const* transr, char const* uplo, + lapack_int const* n, + lapack_complex_float const* A, lapack_int const* lda, + lapack_complex_float* ARF, + lapack_int* info ); + +#define LAPACK_dtrttf LAPACK_GLOBAL(dtrttf,DTRTTF) +void LAPACK_dtrttf( + char const* transr, char const* uplo, + lapack_int const* n, + double const* A, lapack_int const* lda, + double* ARF, + lapack_int* info ); + +#define LAPACK_strttf LAPACK_GLOBAL(strttf,STRTTF) +void LAPACK_strttf( + char const* transr, char const* uplo, + lapack_int const* n, + float const* A, lapack_int const* lda, + float* ARF, + lapack_int* info ); + +#define LAPACK_ztrttf LAPACK_GLOBAL(ztrttf,ZTRTTF) +void LAPACK_ztrttf( + char const* transr, char const* uplo, + lapack_int const* n, + lapack_complex_double const* A, lapack_int const* lda, + lapack_complex_double* ARF, + lapack_int* info ); + +#define LAPACK_ctrttp LAPACK_GLOBAL(ctrttp,CTRTTP) +void LAPACK_ctrttp( + char const* uplo, + lapack_int const* n, + lapack_complex_float const* A, lapack_int const* lda, + lapack_complex_float* AP, + lapack_int* info ); + +#define LAPACK_dtrttp LAPACK_GLOBAL(dtrttp,DTRTTP) +void LAPACK_dtrttp( + char const* uplo, + lapack_int const* n, + double const* A, lapack_int const* lda, + double* AP, + lapack_int* info ); + +#define LAPACK_strttp LAPACK_GLOBAL(strttp,STRTTP) +void LAPACK_strttp( + char const* uplo, + lapack_int const* n, + float const* A, lapack_int const* lda, + float* AP, + lapack_int* info ); + +#define LAPACK_ztrttp LAPACK_GLOBAL(ztrttp,ZTRTTP) +void LAPACK_ztrttp( + char const* uplo, + lapack_int const* n, + lapack_complex_double const* A, lapack_int const* lda, + lapack_complex_double* AP, + lapack_int* info ); + +#define LAPACK_ctzrzf LAPACK_GLOBAL(ctzrzf,CTZRZF) +void LAPACK_ctzrzf( + lapack_int const* m, lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float* tau, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_dtzrzf LAPACK_GLOBAL(dtzrzf,DTZRZF) +void LAPACK_dtzrzf( + lapack_int const* m, lapack_int const* n, + double* A, lapack_int const* lda, + double* tau, + double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_stzrzf LAPACK_GLOBAL(stzrzf,STZRZF) +void LAPACK_stzrzf( + lapack_int const* m, lapack_int const* n, + float* A, lapack_int const* lda, + float* tau, + float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_ztzrzf LAPACK_GLOBAL(ztzrzf,ZTZRZF) +void LAPACK_ztzrzf( + lapack_int const* m, lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double* tau, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_cunbdb LAPACK_GLOBAL(cunbdb,CUNBDB) +void LAPACK_cunbdb( + char const* trans, char const* signs, + lapack_int const* m, lapack_int const* p, lapack_int const* q, + lapack_complex_float* X11, lapack_int const* ldx11, + lapack_complex_float* X12, lapack_int const* ldx12, + lapack_complex_float* X21, lapack_int const* ldx21, + lapack_complex_float* X22, lapack_int const* ldx22, + float* theta, + float* phi, + lapack_complex_float* TAUP1, + lapack_complex_float* TAUP2, + lapack_complex_float* TAUQ1, + lapack_complex_float* TAUQ2, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zunbdb LAPACK_GLOBAL(zunbdb,ZUNBDB) +void LAPACK_zunbdb( + char const* trans, char const* signs, + lapack_int const* m, lapack_int const* p, lapack_int const* q, + lapack_complex_double* X11, lapack_int const* ldx11, + lapack_complex_double* X12, lapack_int const* ldx12, + lapack_complex_double* X21, lapack_int const* ldx21, + lapack_complex_double* X22, lapack_int const* ldx22, + double* theta, + double* phi, + lapack_complex_double* TAUP1, + lapack_complex_double* TAUP2, + lapack_complex_double* TAUQ1, + lapack_complex_double* TAUQ2, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_cuncsd LAPACK_GLOBAL(cuncsd,CUNCSD) +void LAPACK_cuncsd( + char const* jobu1, char const* jobu2, char const* jobv1t, char const* jobv2t, char const* trans, char const* signs, + lapack_int const* m, lapack_int const* p, lapack_int const* q, + lapack_complex_float* X11, lapack_int const* ldx11, + lapack_complex_float* X12, lapack_int const* ldx12, + lapack_complex_float* X21, lapack_int const* ldx21, + lapack_complex_float* X22, lapack_int const* ldx22, + float* theta, + lapack_complex_float* U1, lapack_int const* ldu1, + lapack_complex_float* U2, lapack_int const* ldu2, + lapack_complex_float* V1T, lapack_int const* ldv1t, + lapack_complex_float* V2T, lapack_int const* ldv2t, + lapack_complex_float* work, lapack_int const* lwork, + float* rwork, lapack_int const* lrwork, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_zuncsd LAPACK_GLOBAL(zuncsd,ZUNCSD) +void LAPACK_zuncsd( + char const* jobu1, char const* jobu2, char const* jobv1t, char const* jobv2t, char const* trans, char const* signs, + lapack_int const* m, lapack_int const* p, lapack_int const* q, + lapack_complex_double* X11, lapack_int const* ldx11, + lapack_complex_double* X12, lapack_int const* ldx12, + lapack_complex_double* X21, lapack_int const* ldx21, + lapack_complex_double* X22, lapack_int const* ldx22, + double* theta, + lapack_complex_double* U1, lapack_int const* ldu1, + lapack_complex_double* U2, lapack_int const* ldu2, + lapack_complex_double* V1T, lapack_int const* ldv1t, + lapack_complex_double* V2T, lapack_int const* ldv2t, + lapack_complex_double* work, lapack_int const* lwork, + double* rwork, lapack_int const* lrwork, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_cuncsd2by1 LAPACK_GLOBAL(cuncsd2by1,CUNCSD2BY1) +void LAPACK_cuncsd2by1( + char const* jobu1, char const* jobu2, char const* jobv1t, + lapack_int const* m, lapack_int const* p, lapack_int const* q, + lapack_complex_float* X11, lapack_int const* ldx11, + lapack_complex_float* X21, lapack_int const* ldx21, + float* theta, + lapack_complex_float* U1, lapack_int const* ldu1, + lapack_complex_float* U2, lapack_int const* ldu2, + lapack_complex_float* V1T, lapack_int const* ldv1t, + lapack_complex_float* work, lapack_int const* lwork, + float* rwork, lapack_int const* lrwork, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_zuncsd2by1 LAPACK_GLOBAL(zuncsd2by1,ZUNCSD2BY1) +void LAPACK_zuncsd2by1( + char const* jobu1, char const* jobu2, char const* jobv1t, + lapack_int const* m, lapack_int const* p, lapack_int const* q, + lapack_complex_double* X11, lapack_int const* ldx11, + lapack_complex_double* X21, lapack_int const* ldx21, + double* theta, + lapack_complex_double* U1, lapack_int const* ldu1, + lapack_complex_double* U2, lapack_int const* ldu2, + lapack_complex_double* V1T, lapack_int const* ldv1t, + lapack_complex_double* work, lapack_int const* lwork, + double* rwork, lapack_int const* lrwork, + lapack_int* iwork, + lapack_int* info ); + +#define LAPACK_cungbr LAPACK_GLOBAL(cungbr,CUNGBR) +void LAPACK_cungbr( + char const* vect, + lapack_int const* m, lapack_int const* n, lapack_int const* k, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float const* tau, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zungbr LAPACK_GLOBAL(zungbr,ZUNGBR) +void LAPACK_zungbr( + char const* vect, + lapack_int const* m, lapack_int const* n, lapack_int const* k, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double const* tau, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_cunghr LAPACK_GLOBAL(cunghr,CUNGHR) +void LAPACK_cunghr( + lapack_int const* n, lapack_int const* ilo, lapack_int const* ihi, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float const* tau, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zunghr LAPACK_GLOBAL(zunghr,ZUNGHR) +void LAPACK_zunghr( + lapack_int const* n, lapack_int const* ilo, lapack_int const* ihi, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double const* tau, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_cunglq LAPACK_GLOBAL(cunglq,CUNGLQ) +void LAPACK_cunglq( + lapack_int const* m, lapack_int const* n, lapack_int const* k, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float const* tau, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zunglq LAPACK_GLOBAL(zunglq,ZUNGLQ) +void LAPACK_zunglq( + lapack_int const* m, lapack_int const* n, lapack_int const* k, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double const* tau, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_cungql LAPACK_GLOBAL(cungql,CUNGQL) +void LAPACK_cungql( + lapack_int const* m, lapack_int const* n, lapack_int const* k, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float const* tau, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zungql LAPACK_GLOBAL(zungql,ZUNGQL) +void LAPACK_zungql( + lapack_int const* m, lapack_int const* n, lapack_int const* k, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double const* tau, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_cungqr LAPACK_GLOBAL(cungqr,CUNGQR) +void LAPACK_cungqr( + lapack_int const* m, lapack_int const* n, lapack_int const* k, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float const* tau, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zungqr LAPACK_GLOBAL(zungqr,ZUNGQR) +void LAPACK_zungqr( + lapack_int const* m, lapack_int const* n, lapack_int const* k, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double const* tau, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_cungrq LAPACK_GLOBAL(cungrq,CUNGRQ) +void LAPACK_cungrq( + lapack_int const* m, lapack_int const* n, lapack_int const* k, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float const* tau, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zungrq LAPACK_GLOBAL(zungrq,ZUNGRQ) +void LAPACK_zungrq( + lapack_int const* m, lapack_int const* n, lapack_int const* k, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double const* tau, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_cungtr LAPACK_GLOBAL(cungtr,CUNGTR) +void LAPACK_cungtr( + char const* uplo, + lapack_int const* n, + lapack_complex_float* A, lapack_int const* lda, + lapack_complex_float const* tau, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zungtr LAPACK_GLOBAL(zungtr,ZUNGTR) +void LAPACK_zungtr( + char const* uplo, + lapack_int const* n, + lapack_complex_double* A, lapack_int const* lda, + lapack_complex_double const* tau, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_cunmbr LAPACK_GLOBAL(cunmbr,CUNMBR) +void LAPACK_cunmbr( + char const* vect, char const* side, char const* trans, + lapack_int const* m, lapack_int const* n, lapack_int const* k, + lapack_complex_float const* A, lapack_int const* lda, + lapack_complex_float const* tau, + lapack_complex_float* C, lapack_int const* ldc, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zunmbr LAPACK_GLOBAL(zunmbr,ZUNMBR) +void LAPACK_zunmbr( + char const* vect, char const* side, char const* trans, + lapack_int const* m, lapack_int const* n, lapack_int const* k, + lapack_complex_double const* A, lapack_int const* lda, + lapack_complex_double const* tau, + lapack_complex_double* C, lapack_int const* ldc, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_cunmhr LAPACK_GLOBAL(cunmhr,CUNMHR) +void LAPACK_cunmhr( + char const* side, char const* trans, + lapack_int const* m, lapack_int const* n, lapack_int const* ilo, lapack_int const* ihi, + lapack_complex_float const* A, lapack_int const* lda, + lapack_complex_float const* tau, + lapack_complex_float* C, lapack_int const* ldc, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zunmhr LAPACK_GLOBAL(zunmhr,ZUNMHR) +void LAPACK_zunmhr( + char const* side, char const* trans, + lapack_int const* m, lapack_int const* n, lapack_int const* ilo, lapack_int const* ihi, + lapack_complex_double const* A, lapack_int const* lda, + lapack_complex_double const* tau, + lapack_complex_double* C, lapack_int const* ldc, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_cunmlq LAPACK_GLOBAL(cunmlq,CUNMLQ) +void LAPACK_cunmlq( + char const* side, char const* trans, + lapack_int const* m, lapack_int const* n, lapack_int const* k, + lapack_complex_float const* A, lapack_int const* lda, + lapack_complex_float const* tau, + lapack_complex_float* C, lapack_int const* ldc, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zunmlq LAPACK_GLOBAL(zunmlq,ZUNMLQ) +void LAPACK_zunmlq( + char const* side, char const* trans, + lapack_int const* m, lapack_int const* n, lapack_int const* k, + lapack_complex_double const* A, lapack_int const* lda, + lapack_complex_double const* tau, + lapack_complex_double* C, lapack_int const* ldc, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_cunmql LAPACK_GLOBAL(cunmql,CUNMQL) +void LAPACK_cunmql( + char const* side, char const* trans, + lapack_int const* m, lapack_int const* n, lapack_int const* k, + lapack_complex_float const* A, lapack_int const* lda, + lapack_complex_float const* tau, + lapack_complex_float* C, lapack_int const* ldc, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zunmql LAPACK_GLOBAL(zunmql,ZUNMQL) +void LAPACK_zunmql( + char const* side, char const* trans, + lapack_int const* m, lapack_int const* n, lapack_int const* k, + lapack_complex_double const* A, lapack_int const* lda, + lapack_complex_double const* tau, + lapack_complex_double* C, lapack_int const* ldc, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_cunmqr LAPACK_GLOBAL(cunmqr,CUNMQR) +void LAPACK_cunmqr( + char const* side, char const* trans, + lapack_int const* m, lapack_int const* n, lapack_int const* k, + lapack_complex_float const* A, lapack_int const* lda, + lapack_complex_float const* tau, + lapack_complex_float* C, lapack_int const* ldc, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zunmqr LAPACK_GLOBAL(zunmqr,ZUNMQR) +void LAPACK_zunmqr( + char const* side, char const* trans, + lapack_int const* m, lapack_int const* n, lapack_int const* k, + lapack_complex_double const* A, lapack_int const* lda, + lapack_complex_double const* tau, + lapack_complex_double* C, lapack_int const* ldc, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_cunmrq LAPACK_GLOBAL(cunmrq,CUNMRQ) +void LAPACK_cunmrq( + char const* side, char const* trans, + lapack_int const* m, lapack_int const* n, lapack_int const* k, + lapack_complex_float const* A, lapack_int const* lda, + lapack_complex_float const* tau, + lapack_complex_float* C, lapack_int const* ldc, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zunmrq LAPACK_GLOBAL(zunmrq,ZUNMRQ) +void LAPACK_zunmrq( + char const* side, char const* trans, + lapack_int const* m, lapack_int const* n, lapack_int const* k, + lapack_complex_double const* A, lapack_int const* lda, + lapack_complex_double const* tau, + lapack_complex_double* C, lapack_int const* ldc, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_cunmrz LAPACK_GLOBAL(cunmrz,CUNMRZ) +void LAPACK_cunmrz( + char const* side, char const* trans, + lapack_int const* m, lapack_int const* n, lapack_int const* k, lapack_int const* l, + lapack_complex_float const* A, lapack_int const* lda, + lapack_complex_float const* tau, + lapack_complex_float* C, lapack_int const* ldc, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zunmrz LAPACK_GLOBAL(zunmrz,ZUNMRZ) +void LAPACK_zunmrz( + char const* side, char const* trans, + lapack_int const* m, lapack_int const* n, lapack_int const* k, lapack_int const* l, + lapack_complex_double const* A, lapack_int const* lda, + lapack_complex_double const* tau, + lapack_complex_double* C, lapack_int const* ldc, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_cunmtr LAPACK_GLOBAL(cunmtr,CUNMTR) +void LAPACK_cunmtr( + char const* side, char const* uplo, char const* trans, + lapack_int const* m, lapack_int const* n, + lapack_complex_float const* A, lapack_int const* lda, + lapack_complex_float const* tau, + lapack_complex_float* C, lapack_int const* ldc, + lapack_complex_float* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_zunmtr LAPACK_GLOBAL(zunmtr,ZUNMTR) +void LAPACK_zunmtr( + char const* side, char const* uplo, char const* trans, + lapack_int const* m, lapack_int const* n, + lapack_complex_double const* A, lapack_int const* lda, + lapack_complex_double const* tau, + lapack_complex_double* C, lapack_int const* ldc, + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + +#define LAPACK_cupgtr LAPACK_GLOBAL(cupgtr,CUPGTR) +void LAPACK_cupgtr( + char const* uplo, + lapack_int const* n, + lapack_complex_float const* AP, + lapack_complex_float const* tau, + lapack_complex_float* Q, lapack_int const* ldq, + lapack_complex_float* work, + lapack_int* info ); + +#define LAPACK_zupgtr LAPACK_GLOBAL(zupgtr,ZUPGTR) +void LAPACK_zupgtr( + char const* uplo, + lapack_int const* n, + lapack_complex_double const* AP, + lapack_complex_double const* tau, + lapack_complex_double* Q, lapack_int const* ldq, + lapack_complex_double* work, + lapack_int* info ); + +#define LAPACK_cupmtr LAPACK_GLOBAL(cupmtr,CUPMTR) +void LAPACK_cupmtr( + char const* side, char const* uplo, char const* trans, + lapack_int const* m, lapack_int const* n, + lapack_complex_float const* AP, + lapack_complex_float const* tau, + lapack_complex_float* C, lapack_int const* ldc, + lapack_complex_float* work, + lapack_int* info ); + +#define LAPACK_zupmtr LAPACK_GLOBAL(zupmtr,ZUPMTR) +void LAPACK_zupmtr( + char const* side, char const* uplo, char const* trans, + lapack_int const* m, lapack_int const* n, + lapack_complex_double const* AP, + lapack_complex_double const* tau, + lapack_complex_double* C, lapack_int const* ldc, + lapack_complex_double* work, + lapack_int* info ); + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* LAPACK_H */ diff --git a/lapack-netlib/LAPACKE/include/lapacke.h b/lapack-netlib/LAPACKE/include/lapacke.h index c5ea465e0..012c104bb 100644 --- a/lapack-netlib/LAPACKE/include/lapacke.h +++ b/lapack-netlib/LAPACKE/include/lapacke.h @@ -34,81 +34,7 @@ #ifndef _LAPACKE_H_ #define _LAPACKE_H_ -/* -* Turn on HAVE_LAPACK_CONFIG_H to redefine C-LAPACK datatypes -*/ -#ifdef HAVE_LAPACK_CONFIG_H -#include "lapacke_config.h" -#endif - -#include - -#ifndef lapack_int -#define lapack_int int -#endif - -#ifndef lapack_logical -#define lapack_logical lapack_int -#endif - -/* Complex types are structures equivalent to the -* Fortran complex types COMPLEX(4) and COMPLEX(8). -* -* One can also redefine the types with his own types -* for example by including in the code definitions like -* -* #define lapack_complex_float std::complex -* #define lapack_complex_double std::complex -* -* or define these types in the command line: -* -* -Dlapack_complex_float="std::complex" -* -Dlapack_complex_double="std::complex" -*/ - -#ifndef LAPACK_COMPLEX_CUSTOM - -/* Complex type (single precision) */ -#ifndef lapack_complex_float -#ifndef __cplusplus -#include -#else -#include -#endif -#define lapack_complex_float float _Complex -#endif - -#ifndef lapack_complex_float_real -#define lapack_complex_float_real(z) (creal(z)) -#endif - -#ifndef lapack_complex_float_imag -#define lapack_complex_float_imag(z) (cimag(z)) -#endif - -lapack_complex_float lapack_make_complex_float( float re, float im ); - -/* Complex type (double precision) */ -#ifndef lapack_complex_double -#ifndef __cplusplus -#include -#else -#include -#endif -#define lapack_complex_double double _Complex -#endif - -#ifndef lapack_complex_double_real -#define lapack_complex_double_real(z) (creal(z)) -#endif - -#ifndef lapack_complex_double_imag -#define lapack_complex_double_imag(z) (cimag(z)) -#endif - -lapack_complex_double lapack_make_complex_double( double re, double im ); - -#endif +#include "lapack.h" #ifdef __cplusplus extern "C" { @@ -130,29 +56,8 @@ extern "C" { #define LAPACK_WORK_MEMORY_ERROR -1010 #define LAPACK_TRANSPOSE_MEMORY_ERROR -1011 -/* Callback logical functions of one, two, or three arguments are used -* to select eigenvalues to sort to the top left of the Schur form. -* The value is selected if function returns TRUE (non-zero). */ - -typedef lapack_logical (*LAPACK_S_SELECT2) ( const float*, const float* ); -typedef lapack_logical (*LAPACK_S_SELECT3) - ( const float*, const float*, const float* ); -typedef lapack_logical (*LAPACK_D_SELECT2) ( const double*, const double* ); -typedef lapack_logical (*LAPACK_D_SELECT3) - ( const double*, const double*, const double* ); - -typedef lapack_logical (*LAPACK_C_SELECT1) ( const lapack_complex_float* ); -typedef lapack_logical (*LAPACK_C_SELECT2) - ( const lapack_complex_float*, const lapack_complex_float* ); -typedef lapack_logical (*LAPACK_Z_SELECT1) ( const lapack_complex_double* ); -typedef lapack_logical (*LAPACK_Z_SELECT2) - ( const lapack_complex_double*, const lapack_complex_double* ); - -#include "lapacke_mangling.h" - -#define LAPACK_lsame LAPACK_GLOBAL(lsame,LSAME) -lapack_logical LAPACK_lsame( char* ca, char* cb, - lapack_int lca, lapack_int lcb ); +lapack_complex_float lapack_make_complex_float( float re, float im ); +lapack_complex_double lapack_make_complex_double( double re, double im ); /* C-LAPACK function prototypes */ @@ -1034,6 +939,25 @@ lapack_int LAPACKE_zgesvdx( int matrix_layout, char jobu, char jobvt, char range lapack_complex_double* vt, lapack_int ldvt, lapack_int* superb ); +lapack_int LAPACKE_sgesvdq( int matrix_layout, char joba, char jobp, char jobr, char jobu, char jobv, + lapack_int m, lapack_int n, float* a, lapack_int lda, + float* s, float* u, lapack_int ldu, float* v, + lapack_int ldv, lapack_int* numrank ); +lapack_int LAPACKE_dgesvdq( int matrix_layout, char joba, char jobp, char jobr, char jobu, char jobv, + lapack_int m, lapack_int n, double* a, + lapack_int lda, double* s, double* u, lapack_int ldu, + double* v, lapack_int ldv, lapack_int* numrank); +lapack_int LAPACKE_cgesvdq( int matrix_layout, char joba, char jobp, char jobr, char jobu, char jobv, + lapack_int m, lapack_int n, lapack_complex_float* a, + lapack_int lda, float* s, lapack_complex_float* u, + lapack_int ldu, lapack_complex_float* v, + lapack_int ldv, lapack_int* numrank ); +lapack_int LAPACKE_zgesvdq( int matrix_layout, char joba, char jobp, char jobr, char jobu, char jobv, + lapack_int m, lapack_int n, lapack_complex_double* a, + lapack_int lda, double* s, lapack_complex_double* u, + lapack_int ldu, lapack_complex_double* v, + lapack_int ldv, lapack_int* numrank ); + lapack_int LAPACKE_sgesvj( int matrix_layout, char joba, char jobu, char jobv, lapack_int m, lapack_int n, float* a, lapack_int lda, float* sva, lapack_int mv, float* v, lapack_int ldv, @@ -1943,11 +1867,11 @@ lapack_int LAPACKE_zheevx( int matrix_layout, char jobz, char range, char uplo, lapack_int LAPACKE_chegst( int matrix_layout, lapack_int itype, char uplo, lapack_int n, lapack_complex_float* a, - lapack_int lda, const lapack_complex_float* b, + lapack_int lda, lapack_complex_float* b, lapack_int ldb ); lapack_int LAPACKE_zhegst( int matrix_layout, lapack_int itype, char uplo, lapack_int n, lapack_complex_double* a, - lapack_int lda, const lapack_complex_double* b, + lapack_int lda, lapack_complex_double* b, lapack_int ldb ); lapack_int LAPACKE_chegv( int matrix_layout, lapack_int itype, char jobz, @@ -5824,6 +5748,45 @@ lapack_int LAPACKE_zgesvdx_work( int matrix_layout, char jobu, char jobvt, char lapack_complex_double* work, lapack_int lwork, double* rwork, lapack_int* iwork ); +lapack_int LAPACKE_sgesvdq_work( int matrix_layout, char joba, char jobp, + char jobr, char jobu, char jobv, + lapack_int m, lapack_int n, float* a, + lapack_int lda, float* s, float* u, + lapack_int ldu, float* v, lapack_int ldv, + lapack_int* numrank, + lapack_int* iwork, lapack_int liwork, + float* work, lapack_int lwork, + float* rwork, lapack_int lrwork); +lapack_int LAPACKE_dgesvdq_work( int matrix_layout, char joba, char jobp, + char jobr, char jobu, char jobv, + lapack_int m, lapack_int n, double* a, + lapack_int lda, double* s, double* u, + lapack_int ldu, double* v, lapack_int ldv, + lapack_int* numrank, + lapack_int* iwork, lapack_int liwork, + double* work, lapack_int lwork, + double* rwork, lapack_int lrwork); +lapack_int LAPACKE_cgesvdq_work( int matrix_layout, char joba, char jobp, + char jobr, char jobu, char jobv, + lapack_int m, lapack_int n, + lapack_complex_float* a, lapack_int lda, + float* s, lapack_complex_float* u, + lapack_int ldu, lapack_complex_float* v, + lapack_int ldv, lapack_int* numrank, + lapack_int* iwork, lapack_int liwork, + lapack_complex_float* cwork, lapack_int lcwork, + float* rwork, lapack_int lrwork); +lapack_int LAPACKE_zgesvdq_work( int matrix_layout, char joba, char jobp, + char jobr, char jobu, char jobv, + lapack_int m, lapack_int n, + lapack_complex_double* a, lapack_int lda, + double* s, lapack_complex_double* u, + lapack_int ldu, lapack_complex_double* v, + lapack_int ldv, lapack_int* numrank, + lapack_int* iwork, lapack_int liwork, + lapack_complex_double* cwork, lapack_int lcwork, + double* rwork, lapack_int lrwork); + lapack_int LAPACKE_sgesvj_work( int matrix_layout, char joba, char jobu, char jobv, lapack_int m, lapack_int n, float* a, lapack_int lda, float* sva, lapack_int mv, @@ -6969,11 +6932,11 @@ lapack_int LAPACKE_zheevx_work( int matrix_layout, char jobz, char range, lapack_int LAPACKE_chegst_work( int matrix_layout, lapack_int itype, char uplo, lapack_int n, lapack_complex_float* a, - lapack_int lda, const lapack_complex_float* b, + lapack_int lda, lapack_complex_float* b, lapack_int ldb ); lapack_int LAPACKE_zhegst_work( int matrix_layout, lapack_int itype, char uplo, lapack_int n, lapack_complex_double* a, - lapack_int lda, const lapack_complex_double* b, + lapack_int lda, lapack_complex_double* b, lapack_int ldb ); lapack_int LAPACKE_chegv_work( int matrix_layout, lapack_int itype, char jobz, @@ -10590,11 +10553,11 @@ lapack_int LAPACKE_csytri2x_work( int matrix_layout, char uplo, lapack_int n, const lapack_int* ipiv, lapack_complex_float* work, lapack_int nb ); lapack_int LAPACKE_csytrs2( int matrix_layout, char uplo, lapack_int n, - lapack_int nrhs, const lapack_complex_float* a, + lapack_int nrhs, lapack_complex_float* a, lapack_int lda, const lapack_int* ipiv, lapack_complex_float* b, lapack_int ldb ); lapack_int LAPACKE_csytrs2_work( int matrix_layout, char uplo, lapack_int n, - lapack_int nrhs, const lapack_complex_float* a, + lapack_int nrhs, lapack_complex_float* a, lapack_int lda, const lapack_int* ipiv, lapack_complex_float* b, lapack_int ldb, lapack_complex_float* work ); @@ -10755,10 +10718,10 @@ lapack_int LAPACKE_dsytri2x_work( int matrix_layout, char uplo, lapack_int n, const lapack_int* ipiv, double* work, lapack_int nb ); lapack_int LAPACKE_dsytrs2( int matrix_layout, char uplo, lapack_int n, - lapack_int nrhs, const double* a, lapack_int lda, + lapack_int nrhs, double* a, lapack_int lda, const lapack_int* ipiv, double* b, lapack_int ldb ); lapack_int LAPACKE_dsytrs2_work( int matrix_layout, char uplo, lapack_int n, - lapack_int nrhs, const double* a, + lapack_int nrhs, double* a, lapack_int lda, const lapack_int* ipiv, double* b, lapack_int ldb, double* work ); lapack_int LAPACKE_sbbcsd( int matrix_layout, char jobu1, char jobu2, @@ -10850,10 +10813,10 @@ lapack_int LAPACKE_ssytri2x_work( int matrix_layout, char uplo, lapack_int n, const lapack_int* ipiv, float* work, lapack_int nb ); lapack_int LAPACKE_ssytrs2( int matrix_layout, char uplo, lapack_int n, - lapack_int nrhs, const float* a, lapack_int lda, + lapack_int nrhs, float* a, lapack_int lda, const lapack_int* ipiv, float* b, lapack_int ldb ); lapack_int LAPACKE_ssytrs2_work( int matrix_layout, char uplo, lapack_int n, - lapack_int nrhs, const float* a, + lapack_int nrhs, float* a, lapack_int lda, const lapack_int* ipiv, float* b, lapack_int ldb, float* work ); lapack_int LAPACKE_zbbcsd( int matrix_layout, char jobu1, char jobu2, @@ -10935,11 +10898,11 @@ lapack_int LAPACKE_zsytri2x_work( int matrix_layout, char uplo, lapack_int n, const lapack_int* ipiv, lapack_complex_double* work, lapack_int nb ); lapack_int LAPACKE_zsytrs2( int matrix_layout, char uplo, lapack_int n, - lapack_int nrhs, const lapack_complex_double* a, + lapack_int nrhs, lapack_complex_double* a, lapack_int lda, const lapack_int* ipiv, lapack_complex_double* b, lapack_int ldb ); lapack_int LAPACKE_zsytrs2_work( int matrix_layout, char uplo, lapack_int n, - lapack_int nrhs, const lapack_complex_double* a, + lapack_int nrhs, lapack_complex_double* a, lapack_int lda, const lapack_int* ipiv, lapack_complex_double* b, lapack_int ldb, lapack_complex_double* work ); @@ -12609,6852 +12572,10 @@ lapack_int LAPACKE_zhetrs_aa_2stage_work( int matrix_layout, char uplo, lapack_i lapack_int lda, lapack_complex_double* tb, lapack_int ltb, lapack_int* ipiv, lapack_int* ipiv2, lapack_complex_double* b, lapack_int ldb ); - -#define LAPACK_sgetrf LAPACK_GLOBAL(sgetrf,SGETRF) -#define LAPACK_dgetrf LAPACK_GLOBAL(dgetrf,DGETRF) -#define LAPACK_cgetrf LAPACK_GLOBAL(cgetrf,CGETRF) -#define LAPACK_zgetrf LAPACK_GLOBAL(zgetrf,ZGETRF) -#define LAPACK_sgetrf2 LAPACK_GLOBAL(sgetrf2,SGETRF2) -#define LAPACK_dgetrf2 LAPACK_GLOBAL(dgetrf2,DGETRF2) -#define LAPACK_cgetrf2 LAPACK_GLOBAL(cgetrf2,CGETRF2) -#define LAPACK_zgetrf2 LAPACK_GLOBAL(zgetrf2,ZGETRF2) -#define LAPACK_sgbtrf LAPACK_GLOBAL(sgbtrf,SGBTRF) -#define LAPACK_dgbtrf LAPACK_GLOBAL(dgbtrf,DGBTRF) -#define LAPACK_cgbtrf LAPACK_GLOBAL(cgbtrf,CGBTRF) -#define LAPACK_zgbtrf LAPACK_GLOBAL(zgbtrf,ZGBTRF) -#define LAPACK_sgttrf LAPACK_GLOBAL(sgttrf,SGTTRF) -#define LAPACK_dgttrf LAPACK_GLOBAL(dgttrf,DGTTRF) -#define LAPACK_cgttrf LAPACK_GLOBAL(cgttrf,CGTTRF) -#define LAPACK_zgttrf LAPACK_GLOBAL(zgttrf,ZGTTRF) -#define LAPACK_spotrf LAPACK_GLOBAL(spotrf,SPOTRF) -#define LAPACK_dpotrf LAPACK_GLOBAL(dpotrf,DPOTRF) -#define LAPACK_cpotrf LAPACK_GLOBAL(cpotrf,CPOTRF) -#define LAPACK_zpotrf LAPACK_GLOBAL(zpotrf,ZPOTRF) -#define LAPACK_spotrf2 LAPACK_GLOBAL(spotrf2,SPOTRF2) -#define LAPACK_dpotrf2 LAPACK_GLOBAL(dpotrf2,DPOTRF2) -#define LAPACK_cpotrf2 LAPACK_GLOBAL(cpotrf2,CPOTRF2) -#define LAPACK_zpotrf2 LAPACK_GLOBAL(zpotrf2,ZPOTRF2) -#define LAPACK_dpstrf LAPACK_GLOBAL(dpstrf,DPSTRF) -#define LAPACK_spstrf LAPACK_GLOBAL(spstrf,SPSTRF) -#define LAPACK_zpstrf LAPACK_GLOBAL(zpstrf,ZPSTRF) -#define LAPACK_cpstrf LAPACK_GLOBAL(cpstrf,CPSTRF) -#define LAPACK_dpftrf LAPACK_GLOBAL(dpftrf,DPFTRF) -#define LAPACK_spftrf LAPACK_GLOBAL(spftrf,SPFTRF) -#define LAPACK_zpftrf LAPACK_GLOBAL(zpftrf,ZPFTRF) -#define LAPACK_cpftrf LAPACK_GLOBAL(cpftrf,CPFTRF) -#define LAPACK_spptrf LAPACK_GLOBAL(spptrf,SPPTRF) -#define LAPACK_dpptrf LAPACK_GLOBAL(dpptrf,DPPTRF) -#define LAPACK_cpptrf LAPACK_GLOBAL(cpptrf,CPPTRF) -#define LAPACK_zpptrf LAPACK_GLOBAL(zpptrf,ZPPTRF) -#define LAPACK_spbtrf LAPACK_GLOBAL(spbtrf,SPBTRF) -#define LAPACK_dpbtrf LAPACK_GLOBAL(dpbtrf,DPBTRF) -#define LAPACK_cpbtrf LAPACK_GLOBAL(cpbtrf,CPBTRF) -#define LAPACK_zpbtrf LAPACK_GLOBAL(zpbtrf,ZPBTRF) -#define LAPACK_spttrf LAPACK_GLOBAL(spttrf,SPTTRF) -#define LAPACK_dpttrf LAPACK_GLOBAL(dpttrf,DPTTRF) -#define LAPACK_cpttrf LAPACK_GLOBAL(cpttrf,CPTTRF) -#define LAPACK_zpttrf LAPACK_GLOBAL(zpttrf,ZPTTRF) -#define LAPACK_ssytrf LAPACK_GLOBAL(ssytrf,SSYTRF) -#define LAPACK_ssytrf_rook LAPACK_GLOBAL(ssytrf_rook,SSYTRF_ROOK) -#define LAPACK_dsytrf LAPACK_GLOBAL(dsytrf,DSYTRF) -#define LAPACK_dsytrf_rook LAPACK_GLOBAL(dsytrf_rook,DSYTRF_ROOK) -#define LAPACK_csytrf LAPACK_GLOBAL(csytrf,CSYTRF) -#define LAPACK_csytrf_rook LAPACK_GLOBAL(csytrf_rook,CSYTRF_ROOK) -#define LAPACK_zsytrf LAPACK_GLOBAL(zsytrf,ZSYTRF) -#define LAPACK_zsytrf_rook LAPACK_GLOBAL(zsytrf_rook,ZSYTRF_ROOK) -#define LAPACK_chetrf LAPACK_GLOBAL(chetrf,CHETRF) -#define LAPACK_chetrf_rook LAPACK_GLOBAL(chetrf_rook,CHETRF_ROOK) -#define LAPACK_zhetrf LAPACK_GLOBAL(zhetrf,ZHETRF) -#define LAPACK_zhetrf_rook LAPACK_GLOBAL(zhetrf_rook,ZHETRF_ROOK) -#define LAPACK_ssptrf LAPACK_GLOBAL(ssptrf,SSPTRF) -#define LAPACK_dsptrf LAPACK_GLOBAL(dsptrf,DSPTRF) -#define LAPACK_csptrf LAPACK_GLOBAL(csptrf,CSPTRF) -#define LAPACK_zsptrf LAPACK_GLOBAL(zsptrf,ZSPTRF) -#define LAPACK_chptrf LAPACK_GLOBAL(chptrf,CHPTRF) -#define LAPACK_zhptrf LAPACK_GLOBAL(zhptrf,ZHPTRF) -#define LAPACK_sgetrs LAPACK_GLOBAL(sgetrs,SGETRS) -#define LAPACK_dgetrs LAPACK_GLOBAL(dgetrs,DGETRS) -#define LAPACK_cgetrs LAPACK_GLOBAL(cgetrs,CGETRS) -#define LAPACK_zgetrs LAPACK_GLOBAL(zgetrs,ZGETRS) -#define LAPACK_sgbtrs LAPACK_GLOBAL(sgbtrs,SGBTRS) -#define LAPACK_dgbtrs LAPACK_GLOBAL(dgbtrs,DGBTRS) -#define LAPACK_cgbtrs LAPACK_GLOBAL(cgbtrs,CGBTRS) -#define LAPACK_zgbtrs LAPACK_GLOBAL(zgbtrs,ZGBTRS) -#define LAPACK_sgttrs LAPACK_GLOBAL(sgttrs,SGTTRS) -#define LAPACK_dgttrs LAPACK_GLOBAL(dgttrs,DGTTRS) -#define LAPACK_cgttrs LAPACK_GLOBAL(cgttrs,CGTTRS) -#define LAPACK_zgttrs LAPACK_GLOBAL(zgttrs,ZGTTRS) -#define LAPACK_spotrs LAPACK_GLOBAL(spotrs,SPOTRS) -#define LAPACK_dpotrs LAPACK_GLOBAL(dpotrs,DPOTRS) -#define LAPACK_cpotrs LAPACK_GLOBAL(cpotrs,CPOTRS) -#define LAPACK_zpotrs LAPACK_GLOBAL(zpotrs,ZPOTRS) -#define LAPACK_dpftrs LAPACK_GLOBAL(dpftrs,DPFTRS) -#define LAPACK_spftrs LAPACK_GLOBAL(spftrs,SPFTRS) -#define LAPACK_zpftrs LAPACK_GLOBAL(zpftrs,ZPFTRS) -#define LAPACK_cpftrs LAPACK_GLOBAL(cpftrs,CPFTRS) -#define LAPACK_spptrs LAPACK_GLOBAL(spptrs,SPPTRS) -#define LAPACK_dpptrs LAPACK_GLOBAL(dpptrs,DPPTRS) -#define LAPACK_cpptrs LAPACK_GLOBAL(cpptrs,CPPTRS) -#define LAPACK_zpptrs LAPACK_GLOBAL(zpptrs,ZPPTRS) -#define LAPACK_spbtrs LAPACK_GLOBAL(spbtrs,SPBTRS) -#define LAPACK_dpbtrs LAPACK_GLOBAL(dpbtrs,DPBTRS) -#define LAPACK_cpbtrs LAPACK_GLOBAL(cpbtrs,CPBTRS) -#define LAPACK_zpbtrs LAPACK_GLOBAL(zpbtrs,ZPBTRS) -#define LAPACK_spttrs LAPACK_GLOBAL(spttrs,SPTTRS) -#define LAPACK_dpttrs LAPACK_GLOBAL(dpttrs,DPTTRS) -#define LAPACK_cpttrs LAPACK_GLOBAL(cpttrs,CPTTRS) -#define LAPACK_zpttrs LAPACK_GLOBAL(zpttrs,ZPTTRS) -#define LAPACK_ssytrs LAPACK_GLOBAL(ssytrs,SSYTRS) -#define LAPACK_ssytrs_rook LAPACK_GLOBAL(ssytrs_rook,SSYTRS_ROOK) -#define LAPACK_dsytrs LAPACK_GLOBAL(dsytrs,DSYTRS) -#define LAPACK_dsytrs_rook LAPACK_GLOBAL(dsytrs_rook,DSYTRS_ROOK) -#define LAPACK_csytrs LAPACK_GLOBAL(csytrs,CSYTRS) -#define LAPACK_csytrs_rook LAPACK_GLOBAL(csytrs_rook,CSYTRS_ROOK) -#define LAPACK_zsytrs LAPACK_GLOBAL(zsytrs,ZSYTRS) -#define LAPACK_zsytrs_rook LAPACK_GLOBAL(zsytrs_rook,ZSYTRS_ROOK) -#define LAPACK_chetrs LAPACK_GLOBAL(chetrs,CHETRS) -#define LAPACK_chetrs_rook LAPACK_GLOBAL(chetrs_rook,CHETRS_ROOK) -#define LAPACK_zhetrs LAPACK_GLOBAL(zhetrs,ZHETRS) -#define LAPACK_zhetrs_rook LAPACK_GLOBAL(zhetrs_rook,ZHETRS_ROOK) -#define LAPACK_ssptrs LAPACK_GLOBAL(ssptrs,SSPTRS) -#define LAPACK_dsptrs LAPACK_GLOBAL(dsptrs,DSPTRS) -#define LAPACK_csptrs LAPACK_GLOBAL(csptrs,CSPTRS) -#define LAPACK_zsptrs LAPACK_GLOBAL(zsptrs,ZSPTRS) -#define LAPACK_chptrs LAPACK_GLOBAL(chptrs,CHPTRS) -#define LAPACK_zhptrs LAPACK_GLOBAL(zhptrs,ZHPTRS) -#define LAPACK_strtrs LAPACK_GLOBAL(strtrs,STRTRS) -#define LAPACK_dtrtrs LAPACK_GLOBAL(dtrtrs,DTRTRS) -#define LAPACK_ctrtrs LAPACK_GLOBAL(ctrtrs,CTRTRS) -#define LAPACK_ztrtrs LAPACK_GLOBAL(ztrtrs,ZTRTRS) -#define LAPACK_stptrs LAPACK_GLOBAL(stptrs,STPTRS) -#define LAPACK_dtptrs LAPACK_GLOBAL(dtptrs,DTPTRS) -#define LAPACK_ctptrs LAPACK_GLOBAL(ctptrs,CTPTRS) -#define LAPACK_ztptrs LAPACK_GLOBAL(ztptrs,ZTPTRS) -#define LAPACK_stbtrs LAPACK_GLOBAL(stbtrs,STBTRS) -#define LAPACK_dtbtrs LAPACK_GLOBAL(dtbtrs,DTBTRS) -#define LAPACK_ctbtrs LAPACK_GLOBAL(ctbtrs,CTBTRS) -#define LAPACK_ztbtrs LAPACK_GLOBAL(ztbtrs,ZTBTRS) -#define LAPACK_sgecon LAPACK_GLOBAL(sgecon,SGECON) -#define LAPACK_dgecon LAPACK_GLOBAL(dgecon,DGECON) -#define LAPACK_cgecon LAPACK_GLOBAL(cgecon,CGECON) -#define LAPACK_zgecon LAPACK_GLOBAL(zgecon,ZGECON) -#define LAPACK_sgbcon LAPACK_GLOBAL(sgbcon,SGBCON) -#define LAPACK_dgbcon LAPACK_GLOBAL(dgbcon,DGBCON) -#define LAPACK_cgbcon LAPACK_GLOBAL(cgbcon,CGBCON) -#define LAPACK_zgbcon LAPACK_GLOBAL(zgbcon,ZGBCON) -#define LAPACK_sgtcon LAPACK_GLOBAL(sgtcon,SGTCON) -#define LAPACK_dgtcon LAPACK_GLOBAL(dgtcon,DGTCON) -#define LAPACK_cgtcon LAPACK_GLOBAL(cgtcon,CGTCON) -#define LAPACK_zgtcon LAPACK_GLOBAL(zgtcon,ZGTCON) -#define LAPACK_spocon LAPACK_GLOBAL(spocon,SPOCON) -#define LAPACK_dpocon LAPACK_GLOBAL(dpocon,DPOCON) -#define LAPACK_cpocon LAPACK_GLOBAL(cpocon,CPOCON) -#define LAPACK_zpocon LAPACK_GLOBAL(zpocon,ZPOCON) -#define LAPACK_sppcon LAPACK_GLOBAL(sppcon,SPPCON) -#define LAPACK_dppcon LAPACK_GLOBAL(dppcon,DPPCON) -#define LAPACK_cppcon LAPACK_GLOBAL(cppcon,CPPCON) -#define LAPACK_zppcon LAPACK_GLOBAL(zppcon,ZPPCON) -#define LAPACK_spbcon LAPACK_GLOBAL(spbcon,SPBCON) -#define LAPACK_dpbcon LAPACK_GLOBAL(dpbcon,DPBCON) -#define LAPACK_cpbcon LAPACK_GLOBAL(cpbcon,CPBCON) -#define LAPACK_zpbcon LAPACK_GLOBAL(zpbcon,ZPBCON) -#define LAPACK_sptcon LAPACK_GLOBAL(sptcon,SPTCON) -#define LAPACK_dptcon LAPACK_GLOBAL(dptcon,DPTCON) -#define LAPACK_cptcon LAPACK_GLOBAL(cptcon,CPTCON) -#define LAPACK_zptcon LAPACK_GLOBAL(zptcon,ZPTCON) -#define LAPACK_ssycon LAPACK_GLOBAL(ssycon,SSYCON) -#define LAPACK_dsycon LAPACK_GLOBAL(dsycon,DSYCON) -#define LAPACK_csycon LAPACK_GLOBAL(csycon,CSYCON) -#define LAPACK_zsycon LAPACK_GLOBAL(zsycon,ZSYCON) -#define LAPACK_checon LAPACK_GLOBAL(checon,CHECON) -#define LAPACK_zhecon LAPACK_GLOBAL(zhecon,ZHECON) -#define LAPACK_sspcon LAPACK_GLOBAL(sspcon,SSPCON) -#define LAPACK_dspcon LAPACK_GLOBAL(dspcon,DSPCON) -#define LAPACK_cspcon LAPACK_GLOBAL(cspcon,CSPCON) -#define LAPACK_zspcon LAPACK_GLOBAL(zspcon,ZSPCON) -#define LAPACK_chpcon LAPACK_GLOBAL(chpcon,CHPCON) -#define LAPACK_zhpcon LAPACK_GLOBAL(zhpcon,ZHPCON) -#define LAPACK_strcon LAPACK_GLOBAL(strcon,STRCON) -#define LAPACK_dtrcon LAPACK_GLOBAL(dtrcon,DTRCON) -#define LAPACK_ctrcon LAPACK_GLOBAL(ctrcon,CTRCON) -#define LAPACK_ztrcon LAPACK_GLOBAL(ztrcon,ZTRCON) -#define LAPACK_stpcon LAPACK_GLOBAL(stpcon,STPCON) -#define LAPACK_dtpcon LAPACK_GLOBAL(dtpcon,DTPCON) -#define LAPACK_ctpcon LAPACK_GLOBAL(ctpcon,CTPCON) -#define LAPACK_ztpcon LAPACK_GLOBAL(ztpcon,ZTPCON) -#define LAPACK_stbcon LAPACK_GLOBAL(stbcon,STBCON) -#define LAPACK_dtbcon LAPACK_GLOBAL(dtbcon,DTBCON) -#define LAPACK_ctbcon LAPACK_GLOBAL(ctbcon,CTBCON) -#define LAPACK_ztbcon LAPACK_GLOBAL(ztbcon,ZTBCON) -#define LAPACK_sgerfs LAPACK_GLOBAL(sgerfs,SGERFS) -#define LAPACK_dgerfs LAPACK_GLOBAL(dgerfs,DGERFS) -#define LAPACK_cgerfs LAPACK_GLOBAL(cgerfs,CGERFS) -#define LAPACK_zgerfs LAPACK_GLOBAL(zgerfs,ZGERFS) -#define LAPACK_dgerfsx LAPACK_GLOBAL(dgerfsx,DGERFSX) -#define LAPACK_sgerfsx LAPACK_GLOBAL(sgerfsx,SGERFSX) -#define LAPACK_zgerfsx LAPACK_GLOBAL(zgerfsx,ZGERFSX) -#define LAPACK_cgerfsx LAPACK_GLOBAL(cgerfsx,CGERFSX) -#define LAPACK_sgbrfs LAPACK_GLOBAL(sgbrfs,SGBRFS) -#define LAPACK_dgbrfs LAPACK_GLOBAL(dgbrfs,DGBRFS) -#define LAPACK_cgbrfs LAPACK_GLOBAL(cgbrfs,CGBRFS) -#define LAPACK_zgbrfs LAPACK_GLOBAL(zgbrfs,ZGBRFS) -#define LAPACK_dgbrfsx LAPACK_GLOBAL(dgbrfsx,DGBRFSX) -#define LAPACK_sgbrfsx LAPACK_GLOBAL(sgbrfsx,SGBRFSX) -#define LAPACK_zgbrfsx LAPACK_GLOBAL(zgbrfsx,ZGBRFSX) -#define LAPACK_cgbrfsx LAPACK_GLOBAL(cgbrfsx,CGBRFSX) -#define LAPACK_sgtrfs LAPACK_GLOBAL(sgtrfs,SGTRFS) -#define LAPACK_dgtrfs LAPACK_GLOBAL(dgtrfs,DGTRFS) -#define LAPACK_cgtrfs LAPACK_GLOBAL(cgtrfs,CGTRFS) -#define LAPACK_zgtrfs LAPACK_GLOBAL(zgtrfs,ZGTRFS) -#define LAPACK_sporfs LAPACK_GLOBAL(sporfs,SPORFS) -#define LAPACK_dporfs LAPACK_GLOBAL(dporfs,DPORFS) -#define LAPACK_cporfs LAPACK_GLOBAL(cporfs,CPORFS) -#define LAPACK_zporfs LAPACK_GLOBAL(zporfs,ZPORFS) -#define LAPACK_dporfsx LAPACK_GLOBAL(dporfsx,DPORFSX) -#define LAPACK_sporfsx LAPACK_GLOBAL(sporfsx,SPORFSX) -#define LAPACK_zporfsx LAPACK_GLOBAL(zporfsx,ZPORFSX) -#define LAPACK_cporfsx LAPACK_GLOBAL(cporfsx,CPORFSX) -#define LAPACK_spprfs LAPACK_GLOBAL(spprfs,SPPRFS) -#define LAPACK_dpprfs LAPACK_GLOBAL(dpprfs,DPPRFS) -#define LAPACK_cpprfs LAPACK_GLOBAL(cpprfs,CPPRFS) -#define LAPACK_zpprfs LAPACK_GLOBAL(zpprfs,ZPPRFS) -#define LAPACK_spbrfs LAPACK_GLOBAL(spbrfs,SPBRFS) -#define LAPACK_dpbrfs LAPACK_GLOBAL(dpbrfs,DPBRFS) -#define LAPACK_cpbrfs LAPACK_GLOBAL(cpbrfs,CPBRFS) -#define LAPACK_zpbrfs LAPACK_GLOBAL(zpbrfs,ZPBRFS) -#define LAPACK_sptrfs LAPACK_GLOBAL(sptrfs,SPTRFS) -#define LAPACK_dptrfs LAPACK_GLOBAL(dptrfs,DPTRFS) -#define LAPACK_cptrfs LAPACK_GLOBAL(cptrfs,CPTRFS) -#define LAPACK_zptrfs LAPACK_GLOBAL(zptrfs,ZPTRFS) -#define LAPACK_ssyrfs LAPACK_GLOBAL(ssyrfs,SSYRFS) -#define LAPACK_dsyrfs LAPACK_GLOBAL(dsyrfs,DSYRFS) -#define LAPACK_csyrfs LAPACK_GLOBAL(csyrfs,CSYRFS) -#define LAPACK_zsyrfs LAPACK_GLOBAL(zsyrfs,ZSYRFS) -#define LAPACK_dsyrfsx LAPACK_GLOBAL(dsyrfsx,DSYRFSX) -#define LAPACK_ssyrfsx LAPACK_GLOBAL(ssyrfsx,SSYRFSX) -#define LAPACK_zsyrfsx LAPACK_GLOBAL(zsyrfsx,ZSYRFSX) -#define LAPACK_csyrfsx LAPACK_GLOBAL(csyrfsx,CSYRFSX) -#define LAPACK_cherfs LAPACK_GLOBAL(cherfs,CHERFS) -#define LAPACK_zherfs LAPACK_GLOBAL(zherfs,ZHERFS) -#define LAPACK_zherfsx LAPACK_GLOBAL(zherfsx,ZHERFSX) -#define LAPACK_cherfsx LAPACK_GLOBAL(cherfsx,CHERFSX) -#define LAPACK_ssprfs LAPACK_GLOBAL(ssprfs,SSPRFS) -#define LAPACK_dsprfs LAPACK_GLOBAL(dsprfs,DSPRFS) -#define LAPACK_csprfs LAPACK_GLOBAL(csprfs,CSPRFS) -#define LAPACK_zsprfs LAPACK_GLOBAL(zsprfs,ZSPRFS) -#define LAPACK_chprfs LAPACK_GLOBAL(chprfs,CHPRFS) -#define LAPACK_zhprfs LAPACK_GLOBAL(zhprfs,ZHPRFS) -#define LAPACK_strrfs LAPACK_GLOBAL(strrfs,STRRFS) -#define LAPACK_dtrrfs LAPACK_GLOBAL(dtrrfs,DTRRFS) -#define LAPACK_ctrrfs LAPACK_GLOBAL(ctrrfs,CTRRFS) -#define LAPACK_ztrrfs LAPACK_GLOBAL(ztrrfs,ZTRRFS) -#define LAPACK_stprfs LAPACK_GLOBAL(stprfs,STPRFS) -#define LAPACK_dtprfs LAPACK_GLOBAL(dtprfs,DTPRFS) -#define LAPACK_ctprfs LAPACK_GLOBAL(ctprfs,CTPRFS) -#define LAPACK_ztprfs LAPACK_GLOBAL(ztprfs,ZTPRFS) -#define LAPACK_stbrfs LAPACK_GLOBAL(stbrfs,STBRFS) -#define LAPACK_dtbrfs LAPACK_GLOBAL(dtbrfs,DTBRFS) -#define LAPACK_ctbrfs LAPACK_GLOBAL(ctbrfs,CTBRFS) -#define LAPACK_ztbrfs LAPACK_GLOBAL(ztbrfs,ZTBRFS) -#define LAPACK_sgetri LAPACK_GLOBAL(sgetri,SGETRI) -#define LAPACK_dgetri LAPACK_GLOBAL(dgetri,DGETRI) -#define LAPACK_cgetri LAPACK_GLOBAL(cgetri,CGETRI) -#define LAPACK_zgetri LAPACK_GLOBAL(zgetri,ZGETRI) -#define LAPACK_spotri LAPACK_GLOBAL(spotri,SPOTRI) -#define LAPACK_dpotri LAPACK_GLOBAL(dpotri,DPOTRI) -#define LAPACK_cpotri LAPACK_GLOBAL(cpotri,CPOTRI) -#define LAPACK_zpotri LAPACK_GLOBAL(zpotri,ZPOTRI) -#define LAPACK_dpftri LAPACK_GLOBAL(dpftri,DPFTRI) -#define LAPACK_spftri LAPACK_GLOBAL(spftri,SPFTRI) -#define LAPACK_zpftri LAPACK_GLOBAL(zpftri,ZPFTRI) -#define LAPACK_cpftri LAPACK_GLOBAL(cpftri,CPFTRI) -#define LAPACK_spptri LAPACK_GLOBAL(spptri,SPPTRI) -#define LAPACK_dpptri LAPACK_GLOBAL(dpptri,DPPTRI) -#define LAPACK_cpptri LAPACK_GLOBAL(cpptri,CPPTRI) -#define LAPACK_zpptri LAPACK_GLOBAL(zpptri,ZPPTRI) -#define LAPACK_ssytri LAPACK_GLOBAL(ssytri,SSYTRI) -#define LAPACK_dsytri LAPACK_GLOBAL(dsytri,DSYTRI) -#define LAPACK_csytri LAPACK_GLOBAL(csytri,CSYTRI) -#define LAPACK_zsytri LAPACK_GLOBAL(zsytri,ZSYTRI) -#define LAPACK_chetri LAPACK_GLOBAL(chetri,CHETRI) -#define LAPACK_zhetri LAPACK_GLOBAL(zhetri,ZHETRI) -#define LAPACK_ssptri LAPACK_GLOBAL(ssptri,SSPTRI) -#define LAPACK_dsptri LAPACK_GLOBAL(dsptri,DSPTRI) -#define LAPACK_csptri LAPACK_GLOBAL(csptri,CSPTRI) -#define LAPACK_zsptri LAPACK_GLOBAL(zsptri,ZSPTRI) -#define LAPACK_chptri LAPACK_GLOBAL(chptri,CHPTRI) -#define LAPACK_zhptri LAPACK_GLOBAL(zhptri,ZHPTRI) -#define LAPACK_strtri LAPACK_GLOBAL(strtri,STRTRI) -#define LAPACK_dtrtri LAPACK_GLOBAL(dtrtri,DTRTRI) -#define LAPACK_ctrtri LAPACK_GLOBAL(ctrtri,CTRTRI) -#define LAPACK_ztrtri LAPACK_GLOBAL(ztrtri,ZTRTRI) -#define LAPACK_dtftri LAPACK_GLOBAL(dtftri,DTFTRI) -#define LAPACK_stftri LAPACK_GLOBAL(stftri,STFTRI) -#define LAPACK_ztftri LAPACK_GLOBAL(ztftri,ZTFTRI) -#define LAPACK_ctftri LAPACK_GLOBAL(ctftri,CTFTRI) -#define LAPACK_stptri LAPACK_GLOBAL(stptri,STPTRI) -#define LAPACK_dtptri LAPACK_GLOBAL(dtptri,DTPTRI) -#define LAPACK_ctptri LAPACK_GLOBAL(ctptri,CTPTRI) -#define LAPACK_ztptri LAPACK_GLOBAL(ztptri,ZTPTRI) -#define LAPACK_sgeequ LAPACK_GLOBAL(sgeequ,SGEEQU) -#define LAPACK_dgeequ LAPACK_GLOBAL(dgeequ,DGEEQU) -#define LAPACK_cgeequ LAPACK_GLOBAL(cgeequ,CGEEQU) -#define LAPACK_zgeequ LAPACK_GLOBAL(zgeequ,ZGEEQU) -#define LAPACK_dgeequb LAPACK_GLOBAL(dgeequb,DGEEQUB) -#define LAPACK_sgeequb LAPACK_GLOBAL(sgeequb,SGEEQUB) -#define LAPACK_zgeequb LAPACK_GLOBAL(zgeequb,ZGEEQUB) -#define LAPACK_cgeequb LAPACK_GLOBAL(cgeequb,CGEEQUB) -#define LAPACK_sgbequ LAPACK_GLOBAL(sgbequ,SGBEQU) -#define LAPACK_dgbequ LAPACK_GLOBAL(dgbequ,DGBEQU) -#define LAPACK_cgbequ LAPACK_GLOBAL(cgbequ,CGBEQU) -#define LAPACK_zgbequ LAPACK_GLOBAL(zgbequ,ZGBEQU) -#define LAPACK_dgbequb LAPACK_GLOBAL(dgbequb,DGBEQUB) -#define LAPACK_sgbequb LAPACK_GLOBAL(sgbequb,SGBEQUB) -#define LAPACK_zgbequb LAPACK_GLOBAL(zgbequb,ZGBEQUB) -#define LAPACK_cgbequb LAPACK_GLOBAL(cgbequb,CGBEQUB) -#define LAPACK_spoequ LAPACK_GLOBAL(spoequ,SPOEQU) -#define LAPACK_dpoequ LAPACK_GLOBAL(dpoequ,DPOEQU) -#define LAPACK_cpoequ LAPACK_GLOBAL(cpoequ,CPOEQU) -#define LAPACK_zpoequ LAPACK_GLOBAL(zpoequ,ZPOEQU) -#define LAPACK_dpoequb LAPACK_GLOBAL(dpoequb,DPOEQUB) -#define LAPACK_spoequb LAPACK_GLOBAL(spoequb,SPOEQUB) -#define LAPACK_zpoequb LAPACK_GLOBAL(zpoequb,ZPOEQUB) -#define LAPACK_cpoequb LAPACK_GLOBAL(cpoequb,CPOEQUB) -#define LAPACK_sppequ LAPACK_GLOBAL(sppequ,SPPEQU) -#define LAPACK_dppequ LAPACK_GLOBAL(dppequ,DPPEQU) -#define LAPACK_cppequ LAPACK_GLOBAL(cppequ,CPPEQU) -#define LAPACK_zppequ LAPACK_GLOBAL(zppequ,ZPPEQU) -#define LAPACK_spbequ LAPACK_GLOBAL(spbequ,SPBEQU) -#define LAPACK_dpbequ LAPACK_GLOBAL(dpbequ,DPBEQU) -#define LAPACK_cpbequ LAPACK_GLOBAL(cpbequ,CPBEQU) -#define LAPACK_zpbequ LAPACK_GLOBAL(zpbequ,ZPBEQU) -#define LAPACK_dsyequb LAPACK_GLOBAL(dsyequb,DSYEQUB) -#define LAPACK_ssyequb LAPACK_GLOBAL(ssyequb,SSYEQUB) -#define LAPACK_zsyequb LAPACK_GLOBAL(zsyequb,ZSYEQUB) -#define LAPACK_csyequb LAPACK_GLOBAL(csyequb,CSYEQUB) -#define LAPACK_zheequb LAPACK_GLOBAL(zheequb,ZHEEQUB) -#define LAPACK_cheequb LAPACK_GLOBAL(cheequb,CHEEQUB) -#define LAPACK_sgesv LAPACK_GLOBAL(sgesv,SGESV) -#define LAPACK_dgesv LAPACK_GLOBAL(dgesv,DGESV) -#define LAPACK_cgesv LAPACK_GLOBAL(cgesv,CGESV) -#define LAPACK_zgesv LAPACK_GLOBAL(zgesv,ZGESV) -#define LAPACK_dsgesv LAPACK_GLOBAL(dsgesv,DSGESV) -#define LAPACK_zcgesv LAPACK_GLOBAL(zcgesv,ZCGESV) -#define LAPACK_sgesvx LAPACK_GLOBAL(sgesvx,SGESVX) -#define LAPACK_dgesvx LAPACK_GLOBAL(dgesvx,DGESVX) -#define LAPACK_cgesvx LAPACK_GLOBAL(cgesvx,CGESVX) -#define LAPACK_zgesvx LAPACK_GLOBAL(zgesvx,ZGESVX) -#define LAPACK_dgesvxx LAPACK_GLOBAL(dgesvxx,DGESVXX) -#define LAPACK_sgesvxx LAPACK_GLOBAL(sgesvxx,SGESVXX) -#define LAPACK_zgesvxx LAPACK_GLOBAL(zgesvxx,ZGESVXX) -#define LAPACK_cgesvxx LAPACK_GLOBAL(cgesvxx,CGESVXX) -#define LAPACK_sgbsv LAPACK_GLOBAL(sgbsv,SGBSV) -#define LAPACK_dgbsv LAPACK_GLOBAL(dgbsv,DGBSV) -#define LAPACK_cgbsv LAPACK_GLOBAL(cgbsv,CGBSV) -#define LAPACK_zgbsv LAPACK_GLOBAL(zgbsv,ZGBSV) -#define LAPACK_sgbsvx LAPACK_GLOBAL(sgbsvx,SGBSVX) -#define LAPACK_dgbsvx LAPACK_GLOBAL(dgbsvx,DGBSVX) -#define LAPACK_cgbsvx LAPACK_GLOBAL(cgbsvx,CGBSVX) -#define LAPACK_zgbsvx LAPACK_GLOBAL(zgbsvx,ZGBSVX) -#define LAPACK_dgbsvxx LAPACK_GLOBAL(dgbsvxx,DGBSVXX) -#define LAPACK_sgbsvxx LAPACK_GLOBAL(sgbsvxx,SGBSVXX) -#define LAPACK_zgbsvxx LAPACK_GLOBAL(zgbsvxx,ZGBSVXX) -#define LAPACK_cgbsvxx LAPACK_GLOBAL(cgbsvxx,CGBSVXX) -#define LAPACK_sgtsv LAPACK_GLOBAL(sgtsv,SGTSV) -#define LAPACK_dgtsv LAPACK_GLOBAL(dgtsv,DGTSV) -#define LAPACK_cgtsv LAPACK_GLOBAL(cgtsv,CGTSV) -#define LAPACK_zgtsv LAPACK_GLOBAL(zgtsv,ZGTSV) -#define LAPACK_sgtsvx LAPACK_GLOBAL(sgtsvx,SGTSVX) -#define LAPACK_dgtsvx LAPACK_GLOBAL(dgtsvx,DGTSVX) -#define LAPACK_cgtsvx LAPACK_GLOBAL(cgtsvx,CGTSVX) -#define LAPACK_zgtsvx LAPACK_GLOBAL(zgtsvx,ZGTSVX) -#define LAPACK_sposv LAPACK_GLOBAL(sposv,SPOSV) -#define LAPACK_dposv LAPACK_GLOBAL(dposv,DPOSV) -#define LAPACK_cposv LAPACK_GLOBAL(cposv,CPOSV) -#define LAPACK_zposv LAPACK_GLOBAL(zposv,ZPOSV) -#define LAPACK_dsposv LAPACK_GLOBAL(dsposv,DSPOSV) -#define LAPACK_zcposv LAPACK_GLOBAL(zcposv,ZCPOSV) -#define LAPACK_sposvx LAPACK_GLOBAL(sposvx,SPOSVX) -#define LAPACK_dposvx LAPACK_GLOBAL(dposvx,DPOSVX) -#define LAPACK_cposvx LAPACK_GLOBAL(cposvx,CPOSVX) -#define LAPACK_zposvx LAPACK_GLOBAL(zposvx,ZPOSVX) -#define LAPACK_dposvxx LAPACK_GLOBAL(dposvxx,DPOSVXX) -#define LAPACK_sposvxx LAPACK_GLOBAL(sposvxx,SPOSVXX) -#define LAPACK_zposvxx LAPACK_GLOBAL(zposvxx,ZPOSVXX) -#define LAPACK_cposvxx LAPACK_GLOBAL(cposvxx,CPOSVXX) -#define LAPACK_sppsv LAPACK_GLOBAL(sppsv,SPPSV) -#define LAPACK_dppsv LAPACK_GLOBAL(dppsv,DPPSV) -#define LAPACK_cppsv LAPACK_GLOBAL(cppsv,CPPSV) -#define LAPACK_zppsv LAPACK_GLOBAL(zppsv,ZPPSV) -#define LAPACK_sppsvx LAPACK_GLOBAL(sppsvx,SPPSVX) -#define LAPACK_dppsvx LAPACK_GLOBAL(dppsvx,DPPSVX) -#define LAPACK_cppsvx LAPACK_GLOBAL(cppsvx,CPPSVX) -#define LAPACK_zppsvx LAPACK_GLOBAL(zppsvx,ZPPSVX) -#define LAPACK_spbsv LAPACK_GLOBAL(spbsv,SPBSV) -#define LAPACK_dpbsv LAPACK_GLOBAL(dpbsv,DPBSV) -#define LAPACK_cpbsv LAPACK_GLOBAL(cpbsv,CPBSV) -#define LAPACK_zpbsv LAPACK_GLOBAL(zpbsv,ZPBSV) -#define LAPACK_spbsvx LAPACK_GLOBAL(spbsvx,SPBSVX) -#define LAPACK_dpbsvx LAPACK_GLOBAL(dpbsvx,DPBSVX) -#define LAPACK_cpbsvx LAPACK_GLOBAL(cpbsvx,CPBSVX) -#define LAPACK_zpbsvx LAPACK_GLOBAL(zpbsvx,ZPBSVX) -#define LAPACK_sptsv LAPACK_GLOBAL(sptsv,SPTSV) -#define LAPACK_dptsv LAPACK_GLOBAL(dptsv,DPTSV) -#define LAPACK_cptsv LAPACK_GLOBAL(cptsv,CPTSV) -#define LAPACK_zptsv LAPACK_GLOBAL(zptsv,ZPTSV) -#define LAPACK_sptsvx LAPACK_GLOBAL(sptsvx,SPTSVX) -#define LAPACK_dptsvx LAPACK_GLOBAL(dptsvx,DPTSVX) -#define LAPACK_cptsvx LAPACK_GLOBAL(cptsvx,CPTSVX) -#define LAPACK_zptsvx LAPACK_GLOBAL(zptsvx,ZPTSVX) -#define LAPACK_ssysv LAPACK_GLOBAL(ssysv,SSYSV) -#define LAPACK_dsysv LAPACK_GLOBAL(dsysv,DSYSV) -#define LAPACK_csysv LAPACK_GLOBAL(csysv,CSYSV) -#define LAPACK_zsysv LAPACK_GLOBAL(zsysv,ZSYSV) -#define LAPACK_ssysvx LAPACK_GLOBAL(ssysvx,SSYSVX) -#define LAPACK_dsysvx LAPACK_GLOBAL(dsysvx,DSYSVX) -#define LAPACK_csysvx LAPACK_GLOBAL(csysvx,CSYSVX) -#define LAPACK_zsysvx LAPACK_GLOBAL(zsysvx,ZSYSVX) -#define LAPACK_dsysvxx LAPACK_GLOBAL(dsysvxx,DSYSVXX) -#define LAPACK_ssysvxx LAPACK_GLOBAL(ssysvxx,SSYSVXX) -#define LAPACK_zsysvxx LAPACK_GLOBAL(zsysvxx,ZSYSVXX) -#define LAPACK_csysvxx LAPACK_GLOBAL(csysvxx,CSYSVXX) -#define LAPACK_chesv LAPACK_GLOBAL(chesv,CHESV) -#define LAPACK_zhesv LAPACK_GLOBAL(zhesv,ZHESV) -#define LAPACK_chesvx LAPACK_GLOBAL(chesvx,CHESVX) -#define LAPACK_zhesvx LAPACK_GLOBAL(zhesvx,ZHESVX) -#define LAPACK_zhesvxx LAPACK_GLOBAL(zhesvxx,ZHESVXX) -#define LAPACK_chesvxx LAPACK_GLOBAL(chesvxx,CHESVXX) -#define LAPACK_sspsv LAPACK_GLOBAL(sspsv,SSPSV) -#define LAPACK_dspsv LAPACK_GLOBAL(dspsv,DSPSV) -#define LAPACK_cspsv LAPACK_GLOBAL(cspsv,CSPSV) -#define LAPACK_zspsv LAPACK_GLOBAL(zspsv,ZSPSV) -#define LAPACK_sspsvx LAPACK_GLOBAL(sspsvx,SSPSVX) -#define LAPACK_dspsvx LAPACK_GLOBAL(dspsvx,DSPSVX) -#define LAPACK_cspsvx LAPACK_GLOBAL(cspsvx,CSPSVX) -#define LAPACK_zspsvx LAPACK_GLOBAL(zspsvx,ZSPSVX) -#define LAPACK_chpsv LAPACK_GLOBAL(chpsv,CHPSV) -#define LAPACK_zhpsv LAPACK_GLOBAL(zhpsv,ZHPSV) -#define LAPACK_chpsvx LAPACK_GLOBAL(chpsvx,CHPSVX) -#define LAPACK_zhpsvx LAPACK_GLOBAL(zhpsvx,ZHPSVX) -#define LAPACK_sgeqrf LAPACK_GLOBAL(sgeqrf,SGEQRF) -#define LAPACK_dgeqrf LAPACK_GLOBAL(dgeqrf,DGEQRF) -#define LAPACK_cgeqrf LAPACK_GLOBAL(cgeqrf,CGEQRF) -#define LAPACK_zgeqrf LAPACK_GLOBAL(zgeqrf,ZGEQRF) -#define LAPACK_sgeqpf LAPACK_GLOBAL(sgeqpf,SGEQPF) -#define LAPACK_dgeqpf LAPACK_GLOBAL(dgeqpf,DGEQPF) -#define LAPACK_cgeqpf LAPACK_GLOBAL(cgeqpf,CGEQPF) -#define LAPACK_zgeqpf LAPACK_GLOBAL(zgeqpf,ZGEQPF) -#define LAPACK_sgeqp3 LAPACK_GLOBAL(sgeqp3,SGEQP3) -#define LAPACK_dgeqp3 LAPACK_GLOBAL(dgeqp3,DGEQP3) -#define LAPACK_cgeqp3 LAPACK_GLOBAL(cgeqp3,CGEQP3) -#define LAPACK_zgeqp3 LAPACK_GLOBAL(zgeqp3,ZGEQP3) -#define LAPACK_sorgqr LAPACK_GLOBAL(sorgqr,SORGQR) -#define LAPACK_dorgqr LAPACK_GLOBAL(dorgqr,DORGQR) -#define LAPACK_sormqr LAPACK_GLOBAL(sormqr,SORMQR) -#define LAPACK_dormqr LAPACK_GLOBAL(dormqr,DORMQR) -#define LAPACK_cungqr LAPACK_GLOBAL(cungqr,CUNGQR) -#define LAPACK_zungqr LAPACK_GLOBAL(zungqr,ZUNGQR) -#define LAPACK_cunmqr LAPACK_GLOBAL(cunmqr,CUNMQR) -#define LAPACK_zunmqr LAPACK_GLOBAL(zunmqr,ZUNMQR) -#define LAPACK_sgelqf LAPACK_GLOBAL(sgelqf,SGELQF) -#define LAPACK_dgelqf LAPACK_GLOBAL(dgelqf,DGELQF) -#define LAPACK_cgelqf LAPACK_GLOBAL(cgelqf,CGELQF) -#define LAPACK_zgelqf LAPACK_GLOBAL(zgelqf,ZGELQF) -#define LAPACK_sorglq LAPACK_GLOBAL(sorglq,SORGLQ) -#define LAPACK_dorglq LAPACK_GLOBAL(dorglq,DORGLQ) -#define LAPACK_sormlq LAPACK_GLOBAL(sormlq,SORMLQ) -#define LAPACK_dormlq LAPACK_GLOBAL(dormlq,DORMLQ) -#define LAPACK_cunglq LAPACK_GLOBAL(cunglq,CUNGLQ) -#define LAPACK_zunglq LAPACK_GLOBAL(zunglq,ZUNGLQ) -#define LAPACK_cunmlq LAPACK_GLOBAL(cunmlq,CUNMLQ) -#define LAPACK_zunmlq LAPACK_GLOBAL(zunmlq,ZUNMLQ) -#define LAPACK_sgeqlf LAPACK_GLOBAL(sgeqlf,SGEQLF) -#define LAPACK_dgeqlf LAPACK_GLOBAL(dgeqlf,DGEQLF) -#define LAPACK_cgeqlf LAPACK_GLOBAL(cgeqlf,CGEQLF) -#define LAPACK_zgeqlf LAPACK_GLOBAL(zgeqlf,ZGEQLF) -#define LAPACK_sorgql LAPACK_GLOBAL(sorgql,SORGQL) -#define LAPACK_dorgql LAPACK_GLOBAL(dorgql,DORGQL) -#define LAPACK_cungql LAPACK_GLOBAL(cungql,CUNGQL) -#define LAPACK_zungql LAPACK_GLOBAL(zungql,ZUNGQL) -#define LAPACK_sormql LAPACK_GLOBAL(sormql,SORMQL) -#define LAPACK_dormql LAPACK_GLOBAL(dormql,DORMQL) -#define LAPACK_cunmql LAPACK_GLOBAL(cunmql,CUNMQL) -#define LAPACK_zunmql LAPACK_GLOBAL(zunmql,ZUNMQL) -#define LAPACK_sgerqf LAPACK_GLOBAL(sgerqf,SGERQF) -#define LAPACK_dgerqf LAPACK_GLOBAL(dgerqf,DGERQF) -#define LAPACK_cgerqf LAPACK_GLOBAL(cgerqf,CGERQF) -#define LAPACK_zgerqf LAPACK_GLOBAL(zgerqf,ZGERQF) -#define LAPACK_sorgrq LAPACK_GLOBAL(sorgrq,SORGRQ) -#define LAPACK_dorgrq LAPACK_GLOBAL(dorgrq,DORGRQ) -#define LAPACK_cungrq LAPACK_GLOBAL(cungrq,CUNGRQ) -#define LAPACK_zungrq LAPACK_GLOBAL(zungrq,ZUNGRQ) -#define LAPACK_sormrq LAPACK_GLOBAL(sormrq,SORMRQ) -#define LAPACK_dormrq LAPACK_GLOBAL(dormrq,DORMRQ) -#define LAPACK_cunmrq LAPACK_GLOBAL(cunmrq,CUNMRQ) -#define LAPACK_zunmrq LAPACK_GLOBAL(zunmrq,ZUNMRQ) -#define LAPACK_stzrzf LAPACK_GLOBAL(stzrzf,STZRZF) -#define LAPACK_dtzrzf LAPACK_GLOBAL(dtzrzf,DTZRZF) -#define LAPACK_ctzrzf LAPACK_GLOBAL(ctzrzf,CTZRZF) -#define LAPACK_ztzrzf LAPACK_GLOBAL(ztzrzf,ZTZRZF) -#define LAPACK_sormrz LAPACK_GLOBAL(sormrz,SORMRZ) -#define LAPACK_dormrz LAPACK_GLOBAL(dormrz,DORMRZ) -#define LAPACK_cunmrz LAPACK_GLOBAL(cunmrz,CUNMRZ) -#define LAPACK_zunmrz LAPACK_GLOBAL(zunmrz,ZUNMRZ) -#define LAPACK_sggqrf LAPACK_GLOBAL(sggqrf,SGGQRF) -#define LAPACK_dggqrf LAPACK_GLOBAL(dggqrf,DGGQRF) -#define LAPACK_cggqrf LAPACK_GLOBAL(cggqrf,CGGQRF) -#define LAPACK_zggqrf LAPACK_GLOBAL(zggqrf,ZGGQRF) -#define LAPACK_sggrqf LAPACK_GLOBAL(sggrqf,SGGRQF) -#define LAPACK_dggrqf LAPACK_GLOBAL(dggrqf,DGGRQF) -#define LAPACK_cggrqf LAPACK_GLOBAL(cggrqf,CGGRQF) -#define LAPACK_zggrqf LAPACK_GLOBAL(zggrqf,ZGGRQF) -#define LAPACK_sgebrd LAPACK_GLOBAL(sgebrd,SGEBRD) -#define LAPACK_dgebrd LAPACK_GLOBAL(dgebrd,DGEBRD) -#define LAPACK_cgebrd LAPACK_GLOBAL(cgebrd,CGEBRD) -#define LAPACK_zgebrd LAPACK_GLOBAL(zgebrd,ZGEBRD) -#define LAPACK_sgbbrd LAPACK_GLOBAL(sgbbrd,SGBBRD) -#define LAPACK_dgbbrd LAPACK_GLOBAL(dgbbrd,DGBBRD) -#define LAPACK_cgbbrd LAPACK_GLOBAL(cgbbrd,CGBBRD) -#define LAPACK_zgbbrd LAPACK_GLOBAL(zgbbrd,ZGBBRD) -#define LAPACK_sorgbr LAPACK_GLOBAL(sorgbr,SORGBR) -#define LAPACK_dorgbr LAPACK_GLOBAL(dorgbr,DORGBR) -#define LAPACK_sormbr LAPACK_GLOBAL(sormbr,SORMBR) -#define LAPACK_dormbr LAPACK_GLOBAL(dormbr,DORMBR) -#define LAPACK_cungbr LAPACK_GLOBAL(cungbr,CUNGBR) -#define LAPACK_zungbr LAPACK_GLOBAL(zungbr,ZUNGBR) -#define LAPACK_cunmbr LAPACK_GLOBAL(cunmbr,CUNMBR) -#define LAPACK_zunmbr LAPACK_GLOBAL(zunmbr,ZUNMBR) -#define LAPACK_sbdsqr LAPACK_GLOBAL(sbdsqr,SBDSQR) -#define LAPACK_dbdsqr LAPACK_GLOBAL(dbdsqr,DBDSQR) -#define LAPACK_cbdsqr LAPACK_GLOBAL(cbdsqr,CBDSQR) -#define LAPACK_zbdsqr LAPACK_GLOBAL(zbdsqr,ZBDSQR) -#define LAPACK_sbdsdc LAPACK_GLOBAL(sbdsdc,SBDSDC) -#define LAPACK_dbdsdc LAPACK_GLOBAL(dbdsdc,DBDSDC) -#define LAPACK_sbdsvdx LAPACK_GLOBAL(sbdsvdx,SBDSVDX) -#define LAPACK_dbdsvdx LAPACK_GLOBAL(dbdsvdx,DBDSVDX) -#define LAPACK_ssytrd LAPACK_GLOBAL(ssytrd,SSYTRD) -#define LAPACK_dsytrd LAPACK_GLOBAL(dsytrd,DSYTRD) -#define LAPACK_sorgtr LAPACK_GLOBAL(sorgtr,SORGTR) -#define LAPACK_dorgtr LAPACK_GLOBAL(dorgtr,DORGTR) -#define LAPACK_sormtr LAPACK_GLOBAL(sormtr,SORMTR) -#define LAPACK_dormtr LAPACK_GLOBAL(dormtr,DORMTR) -#define LAPACK_chetrd LAPACK_GLOBAL(chetrd,CHETRD) -#define LAPACK_zhetrd LAPACK_GLOBAL(zhetrd,ZHETRD) -#define LAPACK_cungtr LAPACK_GLOBAL(cungtr,CUNGTR) -#define LAPACK_zungtr LAPACK_GLOBAL(zungtr,ZUNGTR) -#define LAPACK_cunmtr LAPACK_GLOBAL(cunmtr,CUNMTR) -#define LAPACK_zunmtr LAPACK_GLOBAL(zunmtr,ZUNMTR) -#define LAPACK_ssptrd LAPACK_GLOBAL(ssptrd,SSPTRD) -#define LAPACK_dsptrd LAPACK_GLOBAL(dsptrd,DSPTRD) -#define LAPACK_sopgtr LAPACK_GLOBAL(sopgtr,SOPGTR) -#define LAPACK_dopgtr LAPACK_GLOBAL(dopgtr,DOPGTR) -#define LAPACK_sopmtr LAPACK_GLOBAL(sopmtr,SOPMTR) -#define LAPACK_dopmtr LAPACK_GLOBAL(dopmtr,DOPMTR) -#define LAPACK_chptrd LAPACK_GLOBAL(chptrd,CHPTRD) -#define LAPACK_zhptrd LAPACK_GLOBAL(zhptrd,ZHPTRD) -#define LAPACK_cupgtr LAPACK_GLOBAL(cupgtr,CUPGTR) -#define LAPACK_zupgtr LAPACK_GLOBAL(zupgtr,ZUPGTR) -#define LAPACK_cupmtr LAPACK_GLOBAL(cupmtr,CUPMTR) -#define LAPACK_zupmtr LAPACK_GLOBAL(zupmtr,ZUPMTR) -#define LAPACK_ssbtrd LAPACK_GLOBAL(ssbtrd,SSBTRD) -#define LAPACK_dsbtrd LAPACK_GLOBAL(dsbtrd,DSBTRD) -#define LAPACK_chbtrd LAPACK_GLOBAL(chbtrd,CHBTRD) -#define LAPACK_zhbtrd LAPACK_GLOBAL(zhbtrd,ZHBTRD) -#define LAPACK_ssterf LAPACK_GLOBAL(ssterf,SSTERF) -#define LAPACK_dsterf LAPACK_GLOBAL(dsterf,DSTERF) -#define LAPACK_ssteqr LAPACK_GLOBAL(ssteqr,SSTEQR) -#define LAPACK_dsteqr LAPACK_GLOBAL(dsteqr,DSTEQR) -#define LAPACK_csteqr LAPACK_GLOBAL(csteqr,CSTEQR) -#define LAPACK_zsteqr LAPACK_GLOBAL(zsteqr,ZSTEQR) -#define LAPACK_sstemr LAPACK_GLOBAL(sstemr,SSTEMR) -#define LAPACK_dstemr LAPACK_GLOBAL(dstemr,DSTEMR) -#define LAPACK_cstemr LAPACK_GLOBAL(cstemr,CSTEMR) -#define LAPACK_zstemr LAPACK_GLOBAL(zstemr,ZSTEMR) -#define LAPACK_sstedc LAPACK_GLOBAL(sstedc,SSTEDC) -#define LAPACK_dstedc LAPACK_GLOBAL(dstedc,DSTEDC) -#define LAPACK_cstedc LAPACK_GLOBAL(cstedc,CSTEDC) -#define LAPACK_zstedc LAPACK_GLOBAL(zstedc,ZSTEDC) -#define LAPACK_sstegr LAPACK_GLOBAL(sstegr,SSTEGR) -#define LAPACK_dstegr LAPACK_GLOBAL(dstegr,DSTEGR) -#define LAPACK_cstegr LAPACK_GLOBAL(cstegr,CSTEGR) -#define LAPACK_zstegr LAPACK_GLOBAL(zstegr,ZSTEGR) -#define LAPACK_spteqr LAPACK_GLOBAL(spteqr,SPTEQR) -#define LAPACK_dpteqr LAPACK_GLOBAL(dpteqr,DPTEQR) -#define LAPACK_cpteqr LAPACK_GLOBAL(cpteqr,CPTEQR) -#define LAPACK_zpteqr LAPACK_GLOBAL(zpteqr,ZPTEQR) -#define LAPACK_sstebz LAPACK_GLOBAL(sstebz,SSTEBZ) -#define LAPACK_dstebz LAPACK_GLOBAL(dstebz,DSTEBZ) -#define LAPACK_sstein LAPACK_GLOBAL(sstein,SSTEIN) -#define LAPACK_dstein LAPACK_GLOBAL(dstein,DSTEIN) -#define LAPACK_cstein LAPACK_GLOBAL(cstein,CSTEIN) -#define LAPACK_zstein LAPACK_GLOBAL(zstein,ZSTEIN) -#define LAPACK_sdisna LAPACK_GLOBAL(sdisna,SDISNA) -#define LAPACK_ddisna LAPACK_GLOBAL(ddisna,DDISNA) -#define LAPACK_ssygst LAPACK_GLOBAL(ssygst,SSYGST) -#define LAPACK_dsygst LAPACK_GLOBAL(dsygst,DSYGST) -#define LAPACK_chegst LAPACK_GLOBAL(chegst,CHEGST) -#define LAPACK_zhegst LAPACK_GLOBAL(zhegst,ZHEGST) -#define LAPACK_sspgst LAPACK_GLOBAL(sspgst,SSPGST) -#define LAPACK_dspgst LAPACK_GLOBAL(dspgst,DSPGST) -#define LAPACK_chpgst LAPACK_GLOBAL(chpgst,CHPGST) -#define LAPACK_zhpgst LAPACK_GLOBAL(zhpgst,ZHPGST) -#define LAPACK_ssbgst LAPACK_GLOBAL(ssbgst,SSBGST) -#define LAPACK_dsbgst LAPACK_GLOBAL(dsbgst,DSBGST) -#define LAPACK_chbgst LAPACK_GLOBAL(chbgst,CHBGST) -#define LAPACK_zhbgst LAPACK_GLOBAL(zhbgst,ZHBGST) -#define LAPACK_spbstf LAPACK_GLOBAL(spbstf,SPBSTF) -#define LAPACK_dpbstf LAPACK_GLOBAL(dpbstf,DPBSTF) -#define LAPACK_cpbstf LAPACK_GLOBAL(cpbstf,CPBSTF) -#define LAPACK_zpbstf LAPACK_GLOBAL(zpbstf,ZPBSTF) -#define LAPACK_sgehrd LAPACK_GLOBAL(sgehrd,SGEHRD) -#define LAPACK_dgehrd LAPACK_GLOBAL(dgehrd,DGEHRD) -#define LAPACK_cgehrd LAPACK_GLOBAL(cgehrd,CGEHRD) -#define LAPACK_zgehrd LAPACK_GLOBAL(zgehrd,ZGEHRD) -#define LAPACK_sorghr LAPACK_GLOBAL(sorghr,SORGHR) -#define LAPACK_dorghr LAPACK_GLOBAL(dorghr,DORGHR) -#define LAPACK_sormhr LAPACK_GLOBAL(sormhr,SORMHR) -#define LAPACK_dormhr LAPACK_GLOBAL(dormhr,DORMHR) -#define LAPACK_cunghr LAPACK_GLOBAL(cunghr,CUNGHR) -#define LAPACK_zunghr LAPACK_GLOBAL(zunghr,ZUNGHR) -#define LAPACK_cunmhr LAPACK_GLOBAL(cunmhr,CUNMHR) -#define LAPACK_zunmhr LAPACK_GLOBAL(zunmhr,ZUNMHR) -#define LAPACK_sgebal LAPACK_GLOBAL(sgebal,SGEBAL) -#define LAPACK_dgebal LAPACK_GLOBAL(dgebal,DGEBAL) -#define LAPACK_cgebal LAPACK_GLOBAL(cgebal,CGEBAL) -#define LAPACK_zgebal LAPACK_GLOBAL(zgebal,ZGEBAL) -#define LAPACK_sgebak LAPACK_GLOBAL(sgebak,SGEBAK) -#define LAPACK_dgebak LAPACK_GLOBAL(dgebak,DGEBAK) -#define LAPACK_cgebak LAPACK_GLOBAL(cgebak,CGEBAK) -#define LAPACK_zgebak LAPACK_GLOBAL(zgebak,ZGEBAK) -#define LAPACK_shseqr LAPACK_GLOBAL(shseqr,SHSEQR) -#define LAPACK_dhseqr LAPACK_GLOBAL(dhseqr,DHSEQR) -#define LAPACK_chseqr LAPACK_GLOBAL(chseqr,CHSEQR) -#define LAPACK_zhseqr LAPACK_GLOBAL(zhseqr,ZHSEQR) -#define LAPACK_shsein LAPACK_GLOBAL(shsein,SHSEIN) -#define LAPACK_dhsein LAPACK_GLOBAL(dhsein,DHSEIN) -#define LAPACK_chsein LAPACK_GLOBAL(chsein,CHSEIN) -#define LAPACK_zhsein LAPACK_GLOBAL(zhsein,ZHSEIN) -#define LAPACK_strevc LAPACK_GLOBAL(strevc,STREVC) -#define LAPACK_dtrevc LAPACK_GLOBAL(dtrevc,DTREVC) -#define LAPACK_ctrevc LAPACK_GLOBAL(ctrevc,CTREVC) -#define LAPACK_ztrevc LAPACK_GLOBAL(ztrevc,ZTREVC) -#define LAPACK_strsna LAPACK_GLOBAL(strsna,STRSNA) -#define LAPACK_dtrsna LAPACK_GLOBAL(dtrsna,DTRSNA) -#define LAPACK_ctrsna LAPACK_GLOBAL(ctrsna,CTRSNA) -#define LAPACK_ztrsna LAPACK_GLOBAL(ztrsna,ZTRSNA) -#define LAPACK_strexc LAPACK_GLOBAL(strexc,STREXC) -#define LAPACK_dtrexc LAPACK_GLOBAL(dtrexc,DTREXC) -#define LAPACK_ctrexc LAPACK_GLOBAL(ctrexc,CTREXC) -#define LAPACK_ztrexc LAPACK_GLOBAL(ztrexc,ZTREXC) -#define LAPACK_strsen LAPACK_GLOBAL(strsen,STRSEN) -#define LAPACK_dtrsen LAPACK_GLOBAL(dtrsen,DTRSEN) -#define LAPACK_ctrsen LAPACK_GLOBAL(ctrsen,CTRSEN) -#define LAPACK_ztrsen LAPACK_GLOBAL(ztrsen,ZTRSEN) -#define LAPACK_strsyl LAPACK_GLOBAL(strsyl,STRSYL) -#define LAPACK_dtrsyl LAPACK_GLOBAL(dtrsyl,DTRSYL) -#define LAPACK_ctrsyl LAPACK_GLOBAL(ctrsyl,CTRSYL) -#define LAPACK_ztrsyl LAPACK_GLOBAL(ztrsyl,ZTRSYL) -#define LAPACK_sgghrd LAPACK_GLOBAL(sgghrd,SGGHRD) -#define LAPACK_dgghrd LAPACK_GLOBAL(dgghrd,DGGHRD) -#define LAPACK_cgghrd LAPACK_GLOBAL(cgghrd,CGGHRD) -#define LAPACK_zgghrd LAPACK_GLOBAL(zgghrd,ZGGHRD) -#define LAPACK_sgghd3 LAPACK_GLOBAL(sgghd3,SGGHD3) -#define LAPACK_dgghd3 LAPACK_GLOBAL(dgghd3,DGGHD3) -#define LAPACK_cgghd3 LAPACK_GLOBAL(cgghd3,CGGHD3) -#define LAPACK_zgghd3 LAPACK_GLOBAL(zgghd3,ZGGHD3) -#define LAPACK_sggbal LAPACK_GLOBAL(sggbal,SGGBAL) -#define LAPACK_dggbal LAPACK_GLOBAL(dggbal,DGGBAL) -#define LAPACK_cggbal LAPACK_GLOBAL(cggbal,CGGBAL) -#define LAPACK_zggbal LAPACK_GLOBAL(zggbal,ZGGBAL) -#define LAPACK_sggbak LAPACK_GLOBAL(sggbak,SGGBAK) -#define LAPACK_dggbak LAPACK_GLOBAL(dggbak,DGGBAK) -#define LAPACK_cggbak LAPACK_GLOBAL(cggbak,CGGBAK) -#define LAPACK_zggbak LAPACK_GLOBAL(zggbak,ZGGBAK) -#define LAPACK_shgeqz LAPACK_GLOBAL(shgeqz,SHGEQZ) -#define LAPACK_dhgeqz LAPACK_GLOBAL(dhgeqz,DHGEQZ) -#define LAPACK_chgeqz LAPACK_GLOBAL(chgeqz,CHGEQZ) -#define LAPACK_zhgeqz LAPACK_GLOBAL(zhgeqz,ZHGEQZ) -#define LAPACK_stgevc LAPACK_GLOBAL(stgevc,STGEVC) -#define LAPACK_dtgevc LAPACK_GLOBAL(dtgevc,DTGEVC) -#define LAPACK_ctgevc LAPACK_GLOBAL(ctgevc,CTGEVC) -#define LAPACK_ztgevc LAPACK_GLOBAL(ztgevc,ZTGEVC) -#define LAPACK_stgexc LAPACK_GLOBAL(stgexc,STGEXC) -#define LAPACK_dtgexc LAPACK_GLOBAL(dtgexc,DTGEXC) -#define LAPACK_ctgexc LAPACK_GLOBAL(ctgexc,CTGEXC) -#define LAPACK_ztgexc LAPACK_GLOBAL(ztgexc,ZTGEXC) -#define LAPACK_stgsen LAPACK_GLOBAL(stgsen,STGSEN) -#define LAPACK_dtgsen LAPACK_GLOBAL(dtgsen,DTGSEN) -#define LAPACK_ctgsen LAPACK_GLOBAL(ctgsen,CTGSEN) -#define LAPACK_ztgsen LAPACK_GLOBAL(ztgsen,ZTGSEN) -#define LAPACK_stgsyl LAPACK_GLOBAL(stgsyl,STGSYL) -#define LAPACK_dtgsyl LAPACK_GLOBAL(dtgsyl,DTGSYL) -#define LAPACK_ctgsyl LAPACK_GLOBAL(ctgsyl,CTGSYL) -#define LAPACK_ztgsyl LAPACK_GLOBAL(ztgsyl,ZTGSYL) -#define LAPACK_stgsna LAPACK_GLOBAL(stgsna,STGSNA) -#define LAPACK_dtgsna LAPACK_GLOBAL(dtgsna,DTGSNA) -#define LAPACK_ctgsna LAPACK_GLOBAL(ctgsna,CTGSNA) -#define LAPACK_ztgsna LAPACK_GLOBAL(ztgsna,ZTGSNA) -#define LAPACK_sggsvp LAPACK_GLOBAL(sggsvp,SGGSVP) -#define LAPACK_dggsvp LAPACK_GLOBAL(dggsvp,DGGSVP) -#define LAPACK_cggsvp LAPACK_GLOBAL(cggsvp,CGGSVP) -#define LAPACK_zggsvp LAPACK_GLOBAL(zggsvp,ZGGSVP) -#define LAPACK_sggsvp3 LAPACK_GLOBAL(sggsvp3,SGGSVP3) -#define LAPACK_dggsvp3 LAPACK_GLOBAL(dggsvp3,DGGSVP3) -#define LAPACK_cggsvp3 LAPACK_GLOBAL(cggsvp3,CGGSVP3) -#define LAPACK_zggsvp3 LAPACK_GLOBAL(zggsvp3,ZGGSVP3) -#define LAPACK_stgsja LAPACK_GLOBAL(stgsja,STGSJA) -#define LAPACK_dtgsja LAPACK_GLOBAL(dtgsja,DTGSJA) -#define LAPACK_ctgsja LAPACK_GLOBAL(ctgsja,CTGSJA) -#define LAPACK_ztgsja LAPACK_GLOBAL(ztgsja,ZTGSJA) -#define LAPACK_sgels LAPACK_GLOBAL(sgels,SGELS) -#define LAPACK_dgels LAPACK_GLOBAL(dgels,DGELS) -#define LAPACK_cgels LAPACK_GLOBAL(cgels,CGELS) -#define LAPACK_zgels LAPACK_GLOBAL(zgels,ZGELS) -#define LAPACK_sgelsy LAPACK_GLOBAL(sgelsy,SGELSY) -#define LAPACK_dgelsy LAPACK_GLOBAL(dgelsy,DGELSY) -#define LAPACK_cgelsy LAPACK_GLOBAL(cgelsy,CGELSY) -#define LAPACK_zgelsy LAPACK_GLOBAL(zgelsy,ZGELSY) -#define LAPACK_sgelss LAPACK_GLOBAL(sgelss,SGELSS) -#define LAPACK_dgelss LAPACK_GLOBAL(dgelss,DGELSS) -#define LAPACK_cgelss LAPACK_GLOBAL(cgelss,CGELSS) -#define LAPACK_zgelss LAPACK_GLOBAL(zgelss,ZGELSS) -#define LAPACK_sgelsd LAPACK_GLOBAL(sgelsd,SGELSD) -#define LAPACK_dgelsd LAPACK_GLOBAL(dgelsd,DGELSD) -#define LAPACK_cgelsd LAPACK_GLOBAL(cgelsd,CGELSD) -#define LAPACK_zgelsd LAPACK_GLOBAL(zgelsd,ZGELSD) -#define LAPACK_sgglse LAPACK_GLOBAL(sgglse,SGGLSE) -#define LAPACK_dgglse LAPACK_GLOBAL(dgglse,DGGLSE) -#define LAPACK_cgglse LAPACK_GLOBAL(cgglse,CGGLSE) -#define LAPACK_zgglse LAPACK_GLOBAL(zgglse,ZGGLSE) -#define LAPACK_sggglm LAPACK_GLOBAL(sggglm,SGGGLM) -#define LAPACK_dggglm LAPACK_GLOBAL(dggglm,DGGGLM) -#define LAPACK_cggglm LAPACK_GLOBAL(cggglm,CGGGLM) -#define LAPACK_zggglm LAPACK_GLOBAL(zggglm,ZGGGLM) -#define LAPACK_ssyev LAPACK_GLOBAL(ssyev,SSYEV) -#define LAPACK_dsyev LAPACK_GLOBAL(dsyev,DSYEV) -#define LAPACK_cheev LAPACK_GLOBAL(cheev,CHEEV) -#define LAPACK_zheev LAPACK_GLOBAL(zheev,ZHEEV) -#define LAPACK_ssyev_2stage LAPACK_GLOBAL(ssyev_2stage,SSYEV_2STAGE) -#define LAPACK_dsyev_2stage LAPACK_GLOBAL(dsyev_2stage,DSYEV_2STAGE) -#define LAPACK_cheev_2stage LAPACK_GLOBAL(cheev_2stage,CHEEV_2STAGE) -#define LAPACK_zheev_2stage LAPACK_GLOBAL(zheev_2stage,ZHEEV_2STAGE) -#define LAPACK_ssyevd LAPACK_GLOBAL(ssyevd,SSYEVD) -#define LAPACK_dsyevd LAPACK_GLOBAL(dsyevd,DSYEVD) -#define LAPACK_cheevd LAPACK_GLOBAL(cheevd,CHEEVD) -#define LAPACK_zheevd LAPACK_GLOBAL(zheevd,ZHEEVD) -#define LAPACK_ssyevd_2stage LAPACK_GLOBAL(ssyevd_2stage,SSYEVD_2STAGE) -#define LAPACK_dsyevd_2stage LAPACK_GLOBAL(dsyevd_2stage,DSYEVD_2STAGE) -#define LAPACK_cheevd_2stage LAPACK_GLOBAL(cheevd_2stage,CHEEVD_2STAGE) -#define LAPACK_zheevd_2stage LAPACK_GLOBAL(zheevd_2stage,ZHEEVD_2STAGE) -#define LAPACK_ssyevx LAPACK_GLOBAL(ssyevx,SSYEVX) -#define LAPACK_dsyevx LAPACK_GLOBAL(dsyevx,DSYEVX) -#define LAPACK_cheevx LAPACK_GLOBAL(cheevx,CHEEVX) -#define LAPACK_zheevx LAPACK_GLOBAL(zheevx,ZHEEVX) -#define LAPACK_ssyevx_2stage LAPACK_GLOBAL(ssyevx_2stage,SSYEVX_2STAGE) -#define LAPACK_dsyevx_2stage LAPACK_GLOBAL(dsyevx_2stage,DSYEVX_2STAGE) -#define LAPACK_cheevx_2stage LAPACK_GLOBAL(cheevx_2stage,CHEEVX_2STAGE) -#define LAPACK_zheevx_2stage LAPACK_GLOBAL(zheevx_2stage,ZHEEVX_2STAGE) -#define LAPACK_ssyevr LAPACK_GLOBAL(ssyevr,SSYEVR) -#define LAPACK_dsyevr LAPACK_GLOBAL(dsyevr,DSYEVR) -#define LAPACK_cheevr LAPACK_GLOBAL(cheevr,CHEEVR) -#define LAPACK_zheevr LAPACK_GLOBAL(zheevr,ZHEEVR) -#define LAPACK_ssyevr_2stage LAPACK_GLOBAL(ssyevr_2stage,SSYEVR_2STAGE) -#define LAPACK_dsyevr_2stage LAPACK_GLOBAL(dsyevr_2stage,DSYEVR_2STAGE) -#define LAPACK_cheevr_2stage LAPACK_GLOBAL(cheevr_2stage,CHEEVR_2STAGE) -#define LAPACK_zheevr_2stage LAPACK_GLOBAL(zheevr_2stage,ZHEEVR_2STAGE) -#define LAPACK_sspev LAPACK_GLOBAL(sspev,SSPEV) -#define LAPACK_dspev LAPACK_GLOBAL(dspev,DSPEV) -#define LAPACK_chpev LAPACK_GLOBAL(chpev,CHPEV) -#define LAPACK_zhpev LAPACK_GLOBAL(zhpev,ZHPEV) -#define LAPACK_sspevd LAPACK_GLOBAL(sspevd,SSPEVD) -#define LAPACK_dspevd LAPACK_GLOBAL(dspevd,DSPEVD) -#define LAPACK_chpevd LAPACK_GLOBAL(chpevd,CHPEVD) -#define LAPACK_zhpevd LAPACK_GLOBAL(zhpevd,ZHPEVD) -#define LAPACK_sspevx LAPACK_GLOBAL(sspevx,SSPEVX) -#define LAPACK_dspevx LAPACK_GLOBAL(dspevx,DSPEVX) -#define LAPACK_chpevx LAPACK_GLOBAL(chpevx,CHPEVX) -#define LAPACK_zhpevx LAPACK_GLOBAL(zhpevx,ZHPEVX) -#define LAPACK_ssbev LAPACK_GLOBAL(ssbev,SSBEV) -#define LAPACK_dsbev LAPACK_GLOBAL(dsbev,DSBEV) -#define LAPACK_chbev LAPACK_GLOBAL(chbev,CHBEV) -#define LAPACK_zhbev LAPACK_GLOBAL(zhbev,ZHBEV) -#define LAPACK_ssbev_2stage LAPACK_GLOBAL(ssbev_2stage,SSBEV_2STAGE) -#define LAPACK_dsbev_2stage LAPACK_GLOBAL(dsbev_2stage,DSBEV_2STAGE) -#define LAPACK_chbev_2stage LAPACK_GLOBAL(chbev_2stage,CHBEV_2STAGE) -#define LAPACK_zhbev_2stage LAPACK_GLOBAL(zhbev_2stage,ZHBEV_2STAGE) -#define LAPACK_ssbevd LAPACK_GLOBAL(ssbevd,SSBEVD) -#define LAPACK_dsbevd LAPACK_GLOBAL(dsbevd,DSBEVD) -#define LAPACK_chbevd LAPACK_GLOBAL(chbevd,CHBEVD) -#define LAPACK_zhbevd LAPACK_GLOBAL(zhbevd,ZHBEVD) -#define LAPACK_ssbevd_2stage LAPACK_GLOBAL(ssbevd_2stage,SSBEVD_2STAGE) -#define LAPACK_dsbevd_2stage LAPACK_GLOBAL(dsbevd_2stage,DSBEVD_2STAGE) -#define LAPACK_chbevd_2stage LAPACK_GLOBAL(chbevd_2stage,CHBEVD_2STAGE) -#define LAPACK_zhbevd_2stage LAPACK_GLOBAL(zhbevd_2stage,ZHBEVD_2STAGE) -#define LAPACK_ssbevx LAPACK_GLOBAL(ssbevx,SSBEVX) -#define LAPACK_dsbevx LAPACK_GLOBAL(dsbevx,DSBEVX) -#define LAPACK_chbevx LAPACK_GLOBAL(chbevx,CHBEVX) -#define LAPACK_zhbevx LAPACK_GLOBAL(zhbevx,ZHBEVX) -#define LAPACK_ssbevx_2stage LAPACK_GLOBAL(ssbevx_2stage,SSBEVX_2STAGE) -#define LAPACK_dsbevx_2stage LAPACK_GLOBAL(dsbevx_2stage,DSBEVX_2STAGE) -#define LAPACK_chbevx_2stage LAPACK_GLOBAL(chbevx_2stage,CHBEVX_2STAGE) -#define LAPACK_zhbevx_2stage LAPACK_GLOBAL(zhbevx_2stage,ZHBEVX_2STAGE) -#define LAPACK_sstev LAPACK_GLOBAL(sstev,SSTEV) -#define LAPACK_dstev LAPACK_GLOBAL(dstev,DSTEV) -#define LAPACK_sstevd LAPACK_GLOBAL(sstevd,SSTEVD) -#define LAPACK_dstevd LAPACK_GLOBAL(dstevd,DSTEVD) -#define LAPACK_sstevx LAPACK_GLOBAL(sstevx,SSTEVX) -#define LAPACK_dstevx LAPACK_GLOBAL(dstevx,DSTEVX) -#define LAPACK_sstevr LAPACK_GLOBAL(sstevr,SSTEVR) -#define LAPACK_dstevr LAPACK_GLOBAL(dstevr,DSTEVR) -#define LAPACK_sgees LAPACK_GLOBAL(sgees,SGEES) -#define LAPACK_dgees LAPACK_GLOBAL(dgees,DGEES) -#define LAPACK_cgees LAPACK_GLOBAL(cgees,CGEES) -#define LAPACK_zgees LAPACK_GLOBAL(zgees,ZGEES) -#define LAPACK_sgeesx LAPACK_GLOBAL(sgeesx,SGEESX) -#define LAPACK_dgeesx LAPACK_GLOBAL(dgeesx,DGEESX) -#define LAPACK_cgeesx LAPACK_GLOBAL(cgeesx,CGEESX) -#define LAPACK_zgeesx LAPACK_GLOBAL(zgeesx,ZGEESX) -#define LAPACK_sgeev LAPACK_GLOBAL(sgeev,SGEEV) -#define LAPACK_dgeev LAPACK_GLOBAL(dgeev,DGEEV) -#define LAPACK_cgeev LAPACK_GLOBAL(cgeev,CGEEV) -#define LAPACK_zgeev LAPACK_GLOBAL(zgeev,ZGEEV) -#define LAPACK_sgeevx LAPACK_GLOBAL(sgeevx,SGEEVX) -#define LAPACK_dgeevx LAPACK_GLOBAL(dgeevx,DGEEVX) -#define LAPACK_cgeevx LAPACK_GLOBAL(cgeevx,CGEEVX) -#define LAPACK_zgeevx LAPACK_GLOBAL(zgeevx,ZGEEVX) -#define LAPACK_sgesvd LAPACK_GLOBAL(sgesvd,SGESVD) -#define LAPACK_dgesvd LAPACK_GLOBAL(dgesvd,DGESVD) -#define LAPACK_cgesvd LAPACK_GLOBAL(cgesvd,CGESVD) -#define LAPACK_zgesvd LAPACK_GLOBAL(zgesvd,ZGESVD) -#define LAPACK_sgesvdx LAPACK_GLOBAL(sgesvdx,SGESVDX) -#define LAPACK_dgesvdx LAPACK_GLOBAL(dgesvdx,DGESVDX) -#define LAPACK_cgesvdx LAPACK_GLOBAL(cgesvdx,CGESVDX) -#define LAPACK_zgesvdx LAPACK_GLOBAL(zgesvdx,ZGESVDX) -#define LAPACK_sgesdd LAPACK_GLOBAL(sgesdd,SGESDD) -#define LAPACK_dgesdd LAPACK_GLOBAL(dgesdd,DGESDD) -#define LAPACK_cgesdd LAPACK_GLOBAL(cgesdd,CGESDD) -#define LAPACK_zgesdd LAPACK_GLOBAL(zgesdd,ZGESDD) -#define LAPACK_sgejsv LAPACK_GLOBAL(sgejsv,SGEJSV) -#define LAPACK_dgejsv LAPACK_GLOBAL(dgejsv,DGEJSV) -#define LAPACK_cgejsv LAPACK_GLOBAL(cgejsv,CGEJSV) -#define LAPACK_zgejsv LAPACK_GLOBAL(zgejsv,ZGEJSV) -#define LAPACK_sgesvj LAPACK_GLOBAL(sgesvj,SGESVJ) -#define LAPACK_dgesvj LAPACK_GLOBAL(dgesvj,DGESVJ) -#define LAPACK_cgesvj LAPACK_GLOBAL(cgesvj,CGESVJ) -#define LAPACK_zgesvj LAPACK_GLOBAL(zgesvj,ZGESVJ) -#define LAPACK_sggsvd LAPACK_GLOBAL(sggsvd,SGGSVD) -#define LAPACK_dggsvd LAPACK_GLOBAL(dggsvd,DGGSVD) -#define LAPACK_cggsvd LAPACK_GLOBAL(cggsvd,CGGSVD) -#define LAPACK_zggsvd LAPACK_GLOBAL(zggsvd,ZGGSVD) -#define LAPACK_ssygv LAPACK_GLOBAL(ssygv,SSYGV) -#define LAPACK_dsygv LAPACK_GLOBAL(dsygv,DSYGV) -#define LAPACK_chegv LAPACK_GLOBAL(chegv,CHEGV) -#define LAPACK_zhegv LAPACK_GLOBAL(zhegv,ZHEGV) -#define LAPACK_ssygv_2stage LAPACK_GLOBAL(ssygv_2stage,SSYGV_2STAGE) -#define LAPACK_dsygv_2stage LAPACK_GLOBAL(dsygv_2stage,DSYGV_2STAGE) -#define LAPACK_chegv_2stage LAPACK_GLOBAL(chegv_2stage,CHEGV_2STAGE) -#define LAPACK_zhegv_2stage LAPACK_GLOBAL(zhegv_2stage,ZHEGV_2STAGE) -#define LAPACK_ssygvd LAPACK_GLOBAL(ssygvd,SSYGVD) -#define LAPACK_dsygvd LAPACK_GLOBAL(dsygvd,DSYGVD) -#define LAPACK_chegvd LAPACK_GLOBAL(chegvd,CHEGVD) -#define LAPACK_zhegvd LAPACK_GLOBAL(zhegvd,ZHEGVD) -#define LAPACK_ssygvx LAPACK_GLOBAL(ssygvx,SSYGVX) -#define LAPACK_dsygvx LAPACK_GLOBAL(dsygvx,DSYGVX) -#define LAPACK_chegvx LAPACK_GLOBAL(chegvx,CHEGVX) -#define LAPACK_zhegvx LAPACK_GLOBAL(zhegvx,ZHEGVX) -#define LAPACK_sspgv LAPACK_GLOBAL(sspgv,SSPGV) -#define LAPACK_dspgv LAPACK_GLOBAL(dspgv,DSPGV) -#define LAPACK_chpgv LAPACK_GLOBAL(chpgv,CHPGV) -#define LAPACK_zhpgv LAPACK_GLOBAL(zhpgv,ZHPGV) -#define LAPACK_sspgvd LAPACK_GLOBAL(sspgvd,SSPGVD) -#define LAPACK_dspgvd LAPACK_GLOBAL(dspgvd,DSPGVD) -#define LAPACK_chpgvd LAPACK_GLOBAL(chpgvd,CHPGVD) -#define LAPACK_zhpgvd LAPACK_GLOBAL(zhpgvd,ZHPGVD) -#define LAPACK_sspgvx LAPACK_GLOBAL(sspgvx,SSPGVX) -#define LAPACK_dspgvx LAPACK_GLOBAL(dspgvx,DSPGVX) -#define LAPACK_chpgvx LAPACK_GLOBAL(chpgvx,CHPGVX) -#define LAPACK_zhpgvx LAPACK_GLOBAL(zhpgvx,ZHPGVX) -#define LAPACK_ssbgv LAPACK_GLOBAL(ssbgv,SSBGV) -#define LAPACK_dsbgv LAPACK_GLOBAL(dsbgv,DSBGV) -#define LAPACK_chbgv LAPACK_GLOBAL(chbgv,CHBGV) -#define LAPACK_zhbgv LAPACK_GLOBAL(zhbgv,ZHBGV) -#define LAPACK_ssbgvd LAPACK_GLOBAL(ssbgvd,SSBGVD) -#define LAPACK_dsbgvd LAPACK_GLOBAL(dsbgvd,DSBGVD) -#define LAPACK_chbgvd LAPACK_GLOBAL(chbgvd,CHBGVD) -#define LAPACK_zhbgvd LAPACK_GLOBAL(zhbgvd,ZHBGVD) -#define LAPACK_ssbgvx LAPACK_GLOBAL(ssbgvx,SSBGVX) -#define LAPACK_dsbgvx LAPACK_GLOBAL(dsbgvx,DSBGVX) -#define LAPACK_chbgvx LAPACK_GLOBAL(chbgvx,CHBGVX) -#define LAPACK_zhbgvx LAPACK_GLOBAL(zhbgvx,ZHBGVX) -#define LAPACK_sgges LAPACK_GLOBAL(sgges,SGGES) -#define LAPACK_dgges LAPACK_GLOBAL(dgges,DGGES) -#define LAPACK_cgges LAPACK_GLOBAL(cgges,CGGES) -#define LAPACK_zgges LAPACK_GLOBAL(zgges,ZGGES) -#define LAPACK_sgges3 LAPACK_GLOBAL(sgges3,SGGES3) -#define LAPACK_dgges3 LAPACK_GLOBAL(dgges3,DGGES3) -#define LAPACK_cgges3 LAPACK_GLOBAL(cgges3,CGGES3) -#define LAPACK_zgges3 LAPACK_GLOBAL(zgges3,ZGGES3) -#define LAPACK_sggesx LAPACK_GLOBAL(sggesx,SGGESX) -#define LAPACK_dggesx LAPACK_GLOBAL(dggesx,DGGESX) -#define LAPACK_cggesx LAPACK_GLOBAL(cggesx,CGGESX) -#define LAPACK_zggesx LAPACK_GLOBAL(zggesx,ZGGESX) -#define LAPACK_sggev LAPACK_GLOBAL(sggev,SGGEV) -#define LAPACK_dggev LAPACK_GLOBAL(dggev,DGGEV) -#define LAPACK_cggev LAPACK_GLOBAL(cggev,CGGEV) -#define LAPACK_zggev LAPACK_GLOBAL(zggev,ZGGEV) -#define LAPACK_sggev3 LAPACK_GLOBAL(sggev3,SGGEV3) -#define LAPACK_dggev3 LAPACK_GLOBAL(dggev3,DGGEV3) -#define LAPACK_cggev3 LAPACK_GLOBAL(cggev3,CGGEV3) -#define LAPACK_zggev3 LAPACK_GLOBAL(zggev3,ZGGEV3) -#define LAPACK_sggevx LAPACK_GLOBAL(sggevx,SGGEVX) -#define LAPACK_dggevx LAPACK_GLOBAL(dggevx,DGGEVX) -#define LAPACK_cggevx LAPACK_GLOBAL(cggevx,CGGEVX) -#define LAPACK_zggevx LAPACK_GLOBAL(zggevx,ZGGEVX) -#define LAPACK_dsfrk LAPACK_GLOBAL(dsfrk,DSFRK) -#define LAPACK_ssfrk LAPACK_GLOBAL(ssfrk,SSFRK) -#define LAPACK_zhfrk LAPACK_GLOBAL(zhfrk,ZHFRK) -#define LAPACK_chfrk LAPACK_GLOBAL(chfrk,CHFRK) -#define LAPACK_dtfsm LAPACK_GLOBAL(dtfsm,DTFSM) -#define LAPACK_stfsm LAPACK_GLOBAL(stfsm,STFSM) -#define LAPACK_ztfsm LAPACK_GLOBAL(ztfsm,ZTFSM) -#define LAPACK_ctfsm LAPACK_GLOBAL(ctfsm,CTFSM) -#define LAPACK_dtfttp LAPACK_GLOBAL(dtfttp,DTFTTP) -#define LAPACK_stfttp LAPACK_GLOBAL(stfttp,STFTTP) -#define LAPACK_ztfttp LAPACK_GLOBAL(ztfttp,ZTFTTP) -#define LAPACK_ctfttp LAPACK_GLOBAL(ctfttp,CTFTTP) -#define LAPACK_dtfttr LAPACK_GLOBAL(dtfttr,DTFTTR) -#define LAPACK_stfttr LAPACK_GLOBAL(stfttr,STFTTR) -#define LAPACK_ztfttr LAPACK_GLOBAL(ztfttr,ZTFTTR) -#define LAPACK_ctfttr LAPACK_GLOBAL(ctfttr,CTFTTR) -#define LAPACK_dtpttf LAPACK_GLOBAL(dtpttf,DTPTTF) -#define LAPACK_stpttf LAPACK_GLOBAL(stpttf,STPTTF) -#define LAPACK_ztpttf LAPACK_GLOBAL(ztpttf,ZTPTTF) -#define LAPACK_ctpttf LAPACK_GLOBAL(ctpttf,CTPTTF) -#define LAPACK_dtpttr LAPACK_GLOBAL(dtpttr,DTPTTR) -#define LAPACK_stpttr LAPACK_GLOBAL(stpttr,STPTTR) -#define LAPACK_ztpttr LAPACK_GLOBAL(ztpttr,ZTPTTR) -#define LAPACK_ctpttr LAPACK_GLOBAL(ctpttr,CTPTTR) -#define LAPACK_dtrttf LAPACK_GLOBAL(dtrttf,DTRTTF) -#define LAPACK_strttf LAPACK_GLOBAL(strttf,STRTTF) -#define LAPACK_ztrttf LAPACK_GLOBAL(ztrttf,ZTRTTF) -#define LAPACK_ctrttf LAPACK_GLOBAL(ctrttf,CTRTTF) -#define LAPACK_dtrttp LAPACK_GLOBAL(dtrttp,DTRTTP) -#define LAPACK_strttp LAPACK_GLOBAL(strttp,STRTTP) -#define LAPACK_ztrttp LAPACK_GLOBAL(ztrttp,ZTRTTP) -#define LAPACK_ctrttp LAPACK_GLOBAL(ctrttp,CTRTTP) -#define LAPACK_sgeqrfp LAPACK_GLOBAL(sgeqrfp,SGEQRFP) -#define LAPACK_dgeqrfp LAPACK_GLOBAL(dgeqrfp,DGEQRFP) -#define LAPACK_cgeqrfp LAPACK_GLOBAL(cgeqrfp,CGEQRFP) -#define LAPACK_zgeqrfp LAPACK_GLOBAL(zgeqrfp,ZGEQRFP) -#define LAPACK_clacgv LAPACK_GLOBAL(clacgv,CLACGV) -#define LAPACK_zlacgv LAPACK_GLOBAL(zlacgv,ZLACGV) -#define LAPACK_slarnv LAPACK_GLOBAL(slarnv,SLARNV) -#define LAPACK_dlarnv LAPACK_GLOBAL(dlarnv,DLARNV) -#define LAPACK_clarnv LAPACK_GLOBAL(clarnv,CLARNV) -#define LAPACK_zlarnv LAPACK_GLOBAL(zlarnv,ZLARNV) -#define LAPACK_sgeqr2 LAPACK_GLOBAL(sgeqr2,SGEQR2) -#define LAPACK_dgeqr2 LAPACK_GLOBAL(dgeqr2,DGEQR2) -#define LAPACK_cgeqr2 LAPACK_GLOBAL(cgeqr2,CGEQR2) -#define LAPACK_zgeqr2 LAPACK_GLOBAL(zgeqr2,ZGEQR2) -#define LAPACK_slacn2 LAPACK_GLOBAL(slacn2,SLACN2) -#define LAPACK_dlacn2 LAPACK_GLOBAL(dlacn2,DLACN2) -#define LAPACK_clacn2 LAPACK_GLOBAL(clacn2,CLACN2) -#define LAPACK_zlacn2 LAPACK_GLOBAL(zlacn2,ZLACN2) -#define LAPACK_slacpy LAPACK_GLOBAL(slacpy,SLACPY) -#define LAPACK_dlacpy LAPACK_GLOBAL(dlacpy,DLACPY) -#define LAPACK_clacpy LAPACK_GLOBAL(clacpy,CLACPY) -#define LAPACK_zlacpy LAPACK_GLOBAL(zlacpy,ZLACPY) -#define LAPACK_clacp2 LAPACK_GLOBAL(clacp2,CLACP2) -#define LAPACK_zlacp2 LAPACK_GLOBAL(zlacp2,ZLACP2) -#define LAPACK_sgetf2 LAPACK_GLOBAL(sgetf2,SGETF2) -#define LAPACK_dgetf2 LAPACK_GLOBAL(dgetf2,DGETF2) -#define LAPACK_cgetf2 LAPACK_GLOBAL(cgetf2,CGETF2) -#define LAPACK_zgetf2 LAPACK_GLOBAL(zgetf2,ZGETF2) -#define LAPACK_slaswp LAPACK_GLOBAL(slaswp,SLASWP) -#define LAPACK_dlaswp LAPACK_GLOBAL(dlaswp,DLASWP) -#define LAPACK_claswp LAPACK_GLOBAL(claswp,CLASWP) -#define LAPACK_zlaswp LAPACK_GLOBAL(zlaswp,ZLASWP) -#define LAPACK_slange LAPACK_GLOBAL(slange,SLANGE) -#define LAPACK_dlange LAPACK_GLOBAL(dlange,DLANGE) -#define LAPACK_clange LAPACK_GLOBAL(clange,CLANGE) -#define LAPACK_zlange LAPACK_GLOBAL(zlange,ZLANGE) -#define LAPACK_clanhe LAPACK_GLOBAL(clanhe,CLANHE) -#define LAPACK_zlanhe LAPACK_GLOBAL(zlanhe,ZLANHE) -#define LAPACK_clarcm LAPACK_GLOBAL(clarcm,CLARCM) -#define LAPACK_zlarcm LAPACK_GLOBAL(zlarcm,ZLARCM) -#define LAPACK_clacrm LAPACK_GLOBAL(clacrm,CLACRM) -#define LAPACK_zlacrm LAPACK_GLOBAL(zlacrm,ZLACRM) -#define LAPACK_slansy LAPACK_GLOBAL(slansy,SLANSY) -#define LAPACK_dlansy LAPACK_GLOBAL(dlansy,DLANSY) -#define LAPACK_clansy LAPACK_GLOBAL(clansy,CLANSY) -#define LAPACK_zlansy LAPACK_GLOBAL(zlansy,ZLANSY) -#define LAPACK_slantr LAPACK_GLOBAL(slantr,SLANTR) -#define LAPACK_dlantr LAPACK_GLOBAL(dlantr,DLANTR) -#define LAPACK_clantr LAPACK_GLOBAL(clantr,CLANTR) -#define LAPACK_zlantr LAPACK_GLOBAL(zlantr,ZLANTR) -#define LAPACK_slamch LAPACK_GLOBAL(slamch,SLAMCH) -#define LAPACK_dlamch LAPACK_GLOBAL(dlamch,DLAMCH) -#define LAPACK_sgelq2 LAPACK_GLOBAL(sgelq2,SGELQ2) -#define LAPACK_dgelq2 LAPACK_GLOBAL(dgelq2,DGELQ2) -#define LAPACK_cgelq2 LAPACK_GLOBAL(cgelq2,CGELQ2) -#define LAPACK_zgelq2 LAPACK_GLOBAL(zgelq2,ZGELQ2) -#define LAPACK_slarfb LAPACK_GLOBAL(slarfb,SLARFB) -#define LAPACK_dlarfb LAPACK_GLOBAL(dlarfb,DLARFB) -#define LAPACK_clarfb LAPACK_GLOBAL(clarfb,CLARFB) -#define LAPACK_zlarfb LAPACK_GLOBAL(zlarfb,ZLARFB) -#define LAPACK_slarfg LAPACK_GLOBAL(slarfg,SLARFG) -#define LAPACK_dlarfg LAPACK_GLOBAL(dlarfg,DLARFG) -#define LAPACK_clarfg LAPACK_GLOBAL(clarfg,CLARFG) -#define LAPACK_zlarfg LAPACK_GLOBAL(zlarfg,ZLARFG) -#define LAPACK_slassq LAPACK_GLOBAL(slassq,SLASSQ) -#define LAPACK_dlassq LAPACK_GLOBAL(dlassq,DLASSQ) -#define LAPACK_classq LAPACK_GLOBAL(classq,CLASSQ) -#define LAPACK_zlassq LAPACK_GLOBAL(zlassq,ZLASSQ) -#define LAPACK_slarft LAPACK_GLOBAL(slarft,SLARFT) -#define LAPACK_dlarft LAPACK_GLOBAL(dlarft,DLARFT) -#define LAPACK_clarft LAPACK_GLOBAL(clarft,CLARFT) -#define LAPACK_zlarft LAPACK_GLOBAL(zlarft,ZLARFT) -#define LAPACK_slarfx LAPACK_GLOBAL(slarfx,SLARFX) -#define LAPACK_dlarfx LAPACK_GLOBAL(dlarfx,DLARFX) -#define LAPACK_clarfx LAPACK_GLOBAL(clarfx,CLARFX) -#define LAPACK_zlarfx LAPACK_GLOBAL(zlarfx,ZLARFX) -#define LAPACK_slatms LAPACK_GLOBAL(slatms,SLATMS) -#define LAPACK_dlatms LAPACK_GLOBAL(dlatms,DLATMS) -#define LAPACK_clatms LAPACK_GLOBAL(clatms,CLATMS) -#define LAPACK_zlatms LAPACK_GLOBAL(zlatms,ZLATMS) -#define LAPACK_slag2d LAPACK_GLOBAL(slag2d,SLAG2D) -#define LAPACK_dlag2s LAPACK_GLOBAL(dlag2s,DLAG2S) -#define LAPACK_clag2z LAPACK_GLOBAL(clag2z,CLAG2Z) -#define LAPACK_zlag2c LAPACK_GLOBAL(zlag2c,ZLAG2C) -#define LAPACK_slauum LAPACK_GLOBAL(slauum,SLAUUM) -#define LAPACK_dlauum LAPACK_GLOBAL(dlauum,DLAUUM) -#define LAPACK_clauum LAPACK_GLOBAL(clauum,CLAUUM) -#define LAPACK_zlauum LAPACK_GLOBAL(zlauum,ZLAUUM) -#define LAPACK_slagge LAPACK_GLOBAL(slagge,SLAGGE) -#define LAPACK_dlagge LAPACK_GLOBAL(dlagge,DLAGGE) -#define LAPACK_clagge LAPACK_GLOBAL(clagge,CLAGGE) -#define LAPACK_zlagge LAPACK_GLOBAL(zlagge,ZLAGGE) -#define LAPACK_slascl LAPACK_GLOBAL(slascl,SLASCL) -#define LAPACK_dlascl LAPACK_GLOBAL(dlascl,DLASCL) -#define LAPACK_clascl LAPACK_GLOBAL(clascl,CLASCL) -#define LAPACK_zlascl LAPACK_GLOBAL(zlascl,ZLASCL) -#define LAPACK_slaset LAPACK_GLOBAL(slaset,SLASET) -#define LAPACK_dlaset LAPACK_GLOBAL(dlaset,DLASET) -#define LAPACK_claset LAPACK_GLOBAL(claset,CLASET) -#define LAPACK_zlaset LAPACK_GLOBAL(zlaset,ZLASET) -#define LAPACK_slasrt LAPACK_GLOBAL(slasrt,SLASRT) -#define LAPACK_dlasrt LAPACK_GLOBAL(dlasrt,DLASRT) -#define LAPACK_slagsy LAPACK_GLOBAL(slagsy,SLAGSY) -#define LAPACK_dlagsy LAPACK_GLOBAL(dlagsy,DLAGSY) -#define LAPACK_clagsy LAPACK_GLOBAL(clagsy,CLAGSY) -#define LAPACK_zlagsy LAPACK_GLOBAL(zlagsy,ZLAGSY) -#define LAPACK_claghe LAPACK_GLOBAL(claghe,CLAGHE) -#define LAPACK_zlaghe LAPACK_GLOBAL(zlaghe,ZLAGHE) -#define LAPACK_slapmr LAPACK_GLOBAL(slapmr,SLAPMR) -#define LAPACK_dlapmr LAPACK_GLOBAL(dlapmr,DLAPMR) -#define LAPACK_clapmr LAPACK_GLOBAL(clapmr,CLAPMR) -#define LAPACK_zlapmr LAPACK_GLOBAL(zlapmr,ZLAPMR) -#define LAPACK_slapmt LAPACK_GLOBAL(slapmt,SLAPMT) -#define LAPACK_dlapmt LAPACK_GLOBAL(dlapmt,DLAPMT) -#define LAPACK_clapmt LAPACK_GLOBAL(clapmt,CLAPMT) -#define LAPACK_zlapmt LAPACK_GLOBAL(zlapmt,ZLAPMT) -#define LAPACK_slapy2 LAPACK_GLOBAL(slapy2,SLAPY2) -#define LAPACK_dlapy2 LAPACK_GLOBAL(dlapy2,DLAPY2) -#define LAPACK_slapy3 LAPACK_GLOBAL(slapy3,SLAPY3) -#define LAPACK_dlapy3 LAPACK_GLOBAL(dlapy3,DLAPY3) -#define LAPACK_slartgp LAPACK_GLOBAL(slartgp,SLARTGP) -#define LAPACK_dlartgp LAPACK_GLOBAL(dlartgp,DLARTGP) -#define LAPACK_slartgs LAPACK_GLOBAL(slartgs,SLARTGS) -#define LAPACK_dlartgs LAPACK_GLOBAL(dlartgs,DLARTGS) -// LAPACK 3.3.0 -#define LAPACK_cbbcsd LAPACK_GLOBAL(cbbcsd,CBBCSD) -#define LAPACK_cheswapr LAPACK_GLOBAL(cheswapr,CHESWAPR) -#define LAPACK_chetri2 LAPACK_GLOBAL(chetri2,CHETRI2) -#define LAPACK_chetri2x LAPACK_GLOBAL(chetri2x,CHETRI2X) -#define LAPACK_chetrs2 LAPACK_GLOBAL(chetrs2,CHETRS2) -#define LAPACK_csyconv LAPACK_GLOBAL(csyconv,CSYCONV) -#define LAPACK_csyswapr LAPACK_GLOBAL(csyswapr,CSYSWAPR) -#define LAPACK_csytri2 LAPACK_GLOBAL(csytri2,CSYTRI2) -#define LAPACK_csytri2x LAPACK_GLOBAL(csytri2x,CSYTRI2X) -#define LAPACK_csytrs2 LAPACK_GLOBAL(csytrs2,CSYTRS2) -#define LAPACK_cunbdb LAPACK_GLOBAL(cunbdb,CUNBDB) -#define LAPACK_cuncsd LAPACK_GLOBAL(cuncsd,CUNCSD) -#define LAPACK_cuncsd2by1 LAPACK_GLOBAL(cuncsd2by1,CUNCSD2BY1) -#define LAPACK_dbbcsd LAPACK_GLOBAL(dbbcsd,DBBCSD) -#define LAPACK_dorbdb LAPACK_GLOBAL(dorbdb,DORBDB) -#define LAPACK_dorcsd LAPACK_GLOBAL(dorcsd,DORCSD) -#define LAPACK_dorcsd2by1 LAPACK_GLOBAL(dorcsd2by1,DORCSD2BY1) -#define LAPACK_dsyconv LAPACK_GLOBAL(dsyconv,DSYCONV) -#define LAPACK_dsyswapr LAPACK_GLOBAL(dsyswapr,DSYSWAPR) -#define LAPACK_dsytri2 LAPACK_GLOBAL(dsytri2,DSYTRI2) -#define LAPACK_dsytri2x LAPACK_GLOBAL(dsytri2x,DSYTRI2X) -#define LAPACK_dsytrs2 LAPACK_GLOBAL(dsytrs2,DSYTRS2) -#define LAPACK_sbbcsd LAPACK_GLOBAL(sbbcsd,SBBCSD) -#define LAPACK_sorbdb LAPACK_GLOBAL(sorbdb,SORBDB) -#define LAPACK_sorcsd LAPACK_GLOBAL(sorcsd,SORCSD) -#define LAPACK_sorcsd2by1 LAPACK_GLOBAL(sorcsd2by1,SORCSD2BY1) -#define LAPACK_ssyconv LAPACK_GLOBAL(ssyconv,SSYCONV) -#define LAPACK_ssyswapr LAPACK_GLOBAL(ssyswapr,SSYSWAPR) -#define LAPACK_ssytri2 LAPACK_GLOBAL(ssytri2,SSYTRI2) -#define LAPACK_ssytri2x LAPACK_GLOBAL(ssytri2x,SSYTRI2X) -#define LAPACK_ssytrs2 LAPACK_GLOBAL(ssytrs2,SSYTRS2) -#define LAPACK_zbbcsd LAPACK_GLOBAL(zbbcsd,ZBBCSD) -#define LAPACK_zheswapr LAPACK_GLOBAL(zheswapr,ZHESWAPR) -#define LAPACK_zhetri2 LAPACK_GLOBAL(zhetri2,ZHETRI2) -#define LAPACK_zhetri2x LAPACK_GLOBAL(zhetri2x,ZHETRI2X) -#define LAPACK_zhetrs2 LAPACK_GLOBAL(zhetrs2,ZHETRS2) -#define LAPACK_zsyconv LAPACK_GLOBAL(zsyconv,ZSYCONV) -#define LAPACK_zsyswapr LAPACK_GLOBAL(zsyswapr,ZSYSWAPR) -#define LAPACK_zsytri2 LAPACK_GLOBAL(zsytri2,ZSYTRI2) -#define LAPACK_zsytri2x LAPACK_GLOBAL(zsytri2x,ZSYTRI2X) -#define LAPACK_zsytrs2 LAPACK_GLOBAL(zsytrs2,ZSYTRS2) -#define LAPACK_zunbdb LAPACK_GLOBAL(zunbdb,ZUNBDB) -#define LAPACK_zuncsd LAPACK_GLOBAL(zuncsd,ZUNCSD) -#define LAPACK_zuncsd2by1 LAPACK_GLOBAL(zuncsd2by1,ZUNCSD2BY1) -// LAPACK 3.4.0 -#define LAPACK_sgemqrt LAPACK_GLOBAL(sgemqrt,SGEMQRT) -#define LAPACK_dgemqrt LAPACK_GLOBAL(dgemqrt,DGEMQRT) -#define LAPACK_cgemqrt LAPACK_GLOBAL(cgemqrt,CGEMQRT) -#define LAPACK_zgemqrt LAPACK_GLOBAL(zgemqrt,ZGEMQRT) -#define LAPACK_sgeqrt LAPACK_GLOBAL(sgeqrt,SGEQRT) -#define LAPACK_dgeqrt LAPACK_GLOBAL(dgeqrt,DGEQRT) -#define LAPACK_cgeqrt LAPACK_GLOBAL(cgeqrt,CGEQRT) -#define LAPACK_zgeqrt LAPACK_GLOBAL(zgeqrt,ZGEQRT) -#define LAPACK_sgeqrt2 LAPACK_GLOBAL(sgeqrt2,SGEQRT2) -#define LAPACK_dgeqrt2 LAPACK_GLOBAL(dgeqrt2,DGEQRT2) -#define LAPACK_cgeqrt2 LAPACK_GLOBAL(cgeqrt2,CGEQRT2) -#define LAPACK_zgeqrt2 LAPACK_GLOBAL(zgeqrt2,ZGEQRT2) -#define LAPACK_sgeqrt3 LAPACK_GLOBAL(sgeqrt3,SGEQRT3) -#define LAPACK_dgeqrt3 LAPACK_GLOBAL(dgeqrt3,DGEQRT3) -#define LAPACK_cgeqrt3 LAPACK_GLOBAL(cgeqrt3,CGEQRT3) -#define LAPACK_zgeqrt3 LAPACK_GLOBAL(zgeqrt3,ZGEQRT3) -#define LAPACK_stpmqrt LAPACK_GLOBAL(stpmqrt,STPMQRT) -#define LAPACK_dtpmqrt LAPACK_GLOBAL(dtpmqrt,DTPMQRT) -#define LAPACK_ctpmqrt LAPACK_GLOBAL(ctpmqrt,CTPMQRT) -#define LAPACK_ztpmqrt LAPACK_GLOBAL(ztpmqrt,ZTPMQRT) -#define LAPACK_stpqrt LAPACK_GLOBAL(stpqrt,STPQRT) -#define LAPACK_dtpqrt LAPACK_GLOBAL(dtpqrt,DTPQRT) -#define LAPACK_ctpqrt LAPACK_GLOBAL(ctpqrt,CTPQRT) -#define LAPACK_ztpqrt LAPACK_GLOBAL(ztpqrt,ZTPQRT) -#define LAPACK_stpqrt2 LAPACK_GLOBAL(stpqrt2,STPQRT2) -#define LAPACK_dtpqrt2 LAPACK_GLOBAL(dtpqrt2,DTPQRT2) -#define LAPACK_ctpqrt2 LAPACK_GLOBAL(ctpqrt2,CTPQRT2) -#define LAPACK_ztpqrt2 LAPACK_GLOBAL(ztpqrt2,ZTPQRT2) -#define LAPACK_stprfb LAPACK_GLOBAL(stprfb,STPRFB) -#define LAPACK_dtprfb LAPACK_GLOBAL(dtprfb,DTPRFB) -#define LAPACK_ctprfb LAPACK_GLOBAL(ctprfb,CTPRFB) -#define LAPACK_ztprfb LAPACK_GLOBAL(ztprfb,ZTPRFB) -// LAPACK 3.5.0 -#define LAPACK_ssysv_rook LAPACK_GLOBAL(ssysv_rook,SSYSV_ROOK) -#define LAPACK_dsysv_rook LAPACK_GLOBAL(dsysv_rook,DSYSV_ROOK) -#define LAPACK_csysv_rook LAPACK_GLOBAL(csysv_rook,CSYSV_ROOK) -#define LAPACK_zsysv_rook LAPACK_GLOBAL(zsysv_rook,ZSYSV_ROOK) -#define LAPACK_csyr LAPACK_GLOBAL(csyr,CSYR) -#define LAPACK_zsyr LAPACK_GLOBAL(zsyr,ZSYR) -#define LAPACK_ilaver LAPACK_GLOBAL(ilaver,ILAVER) -// LAPACK 3.6.0 -#define LAPACK_sggsvd3 LAPACK_GLOBAL(sggsvd3,SGGSVD3) -#define LAPACK_dggsvd3 LAPACK_GLOBAL(dggsvd3,DGGSVD3) -#define LAPACK_cggsvd3 LAPACK_GLOBAL(cggsvd3,CGGSVD3) -#define LAPACK_zggsvd3 LAPACK_GLOBAL(zggsvd3,ZGGSVD3) -// LAPACK 3.7.0 -#define LAPACK_ssysv_aa LAPACK_GLOBAL(ssysv_aa,SSYSV_AA) -#define LAPACK_dsysv_aa LAPACK_GLOBAL(dsysv_aa,DSYSV_AA) -#define LAPACK_chesv_aa LAPACK_GLOBAL(chesv_aa,CHESV_AA) -#define LAPACK_zsysv_aa LAPACK_GLOBAL(zsysv_aa,ZSYSV_AA) -#define LAPACK_csysv_aa LAPACK_GLOBAL(csysv_aa,CSYSV_AA) -#define LAPACK_zhesv_aa LAPACK_GLOBAL(zhesv_aa,ZHESV_AA) -#define LAPACK_ssytrs_aa LAPACK_GLOBAL(ssytrs_aa,SSYTRS_AA) -#define LAPACK_dsytrs_aa LAPACK_GLOBAL(dsytrs_aa,DSYTRS_AA) -#define LAPACK_csytrs_aa LAPACK_GLOBAL(csytrs_aa,CSYTRS_AA) -#define LAPACK_zsytrs_aa LAPACK_GLOBAL(zsytrs_aa,ZSYTRS_AA) -#define LAPACK_chetrs_aa LAPACK_GLOBAL(chetrs_aa,CHETRS_AA) -#define LAPACK_zhetrs_aa LAPACK_GLOBAL(zhetrs_aa,ZHETRS_AA) -#define LAPACK_ssytrf_aa LAPACK_GLOBAL(ssytrf_aa,SSYTRF_AA) -#define LAPACK_dsytrf_aa LAPACK_GLOBAL(dsytrf_aa,DSYTRF_AA) -#define LAPACK_csytrf_aa LAPACK_GLOBAL(csytrf_aa,CSYTRF_AA) -#define LAPACK_zsytrf_aa LAPACK_GLOBAL(zsytrf_aa,ZSYTRF_AA) -#define LAPACK_chetrf_aa LAPACK_GLOBAL(chetrf_aa,CHETRF_AA) -#define LAPACK_zhetrf_aa LAPACK_GLOBAL(zhetrf_aa,ZHETRF_AA) - -#define LAPACK_ssysv_rk LAPACK_GLOBAL(ssysv_rk,SSYSV_RK) -#define LAPACK_dsysv_rk LAPACK_GLOBAL(dsysv_rk,DSYSV_RK) -#define LAPACK_chesv_rk LAPACK_GLOBAL(chesv_rk,CHESV_RK) -#define LAPACK_zsysv_rk LAPACK_GLOBAL(zsysv_rk,ZSYSV_RK) -#define LAPACK_csysv_rk LAPACK_GLOBAL(csysv_rk,CSYSV_RK) -#define LAPACK_zhesv_rk LAPACK_GLOBAL(zhesv_rk,ZHESV_RK) -#define LAPACK_ssytrf_rk LAPACK_GLOBAL(ssytrf_rk,SSYTRF_RK) -#define LAPACK_dsytrf_rk LAPACK_GLOBAL(dsytrf_rk,DSYTRF_RK) -#define LAPACK_csytrf_rk LAPACK_GLOBAL(csytrf_rk,CSYTRF_RK) -#define LAPACK_zsytrf_rk LAPACK_GLOBAL(zsytrf_rk,ZSYTRF_RK) -#define LAPACK_chetrf_rk LAPACK_GLOBAL(chetrf_rk,CHETRF_RK) -#define LAPACK_zhetrf_rk LAPACK_GLOBAL(zhetrf_rk,ZHETRF_RK) -#define LAPACK_ssytrs_3 LAPACK_GLOBAL(ssytrs_3,SSYTRS_3) -#define LAPACK_dsytrs_3 LAPACK_GLOBAL(dsytrs_3,DSYTRS_3) -#define LAPACK_csytrs_3 LAPACK_GLOBAL(csytrs_3,CSYTRS_3) -#define LAPACK_zsytrs_3 LAPACK_GLOBAL(zsytrs_3,ZSYTRS_3) -#define LAPACK_chetrs_3 LAPACK_GLOBAL(chetrs_3,CHETRS_3) -#define LAPACK_zhetrs_3 LAPACK_GLOBAL(zhetrs_3,ZHETRS_3) -#define LAPACK_ssytri_3 LAPACK_GLOBAL(ssytri_3,SSYTRI_3) -#define LAPACK_dsytri_3 LAPACK_GLOBAL(dsytri_3,DSYTRI_3) -#define LAPACK_csytri_3 LAPACK_GLOBAL(csytri_3,CSYTRI_3) -#define LAPACK_zsytri_3 LAPACK_GLOBAL(zsytri_3,ZSYTRI_3) -#define LAPACK_chetri_3 LAPACK_GLOBAL(chetri_3,CHETRI_3) -#define LAPACK_zhetri_3 LAPACK_GLOBAL(zhetri_3,ZHETRI_3) -#define LAPACK_ssycon_3 LAPACK_GLOBAL(ssycon_3,SSYCON_3) -#define LAPACK_dsycon_3 LAPACK_GLOBAL(dsycon_3,DSYCON_3) -#define LAPACK_csycon_3 LAPACK_GLOBAL(csycon_3,CSYCON_3) -#define LAPACK_zsycon_3 LAPACK_GLOBAL(zsycon_3,ZSYCON_3) -#define LAPACK_checon_3 LAPACK_GLOBAL(checon_3,CHECON_3) -#define LAPACK_zhecon_3 LAPACK_GLOBAL(zhecon_3,ZHECON_3) -#define LAPACK_sgelq LAPACK_GLOBAL(sgelq,SGELQ) -#define LAPACK_dgelq LAPACK_GLOBAL(dgelq,DGELQ) -#define LAPACK_cgelq LAPACK_GLOBAL(cgelq,CGELQ) -#define LAPACK_zgelq LAPACK_GLOBAL(zgelq,ZGELQ) -#define LAPACK_sgemlq LAPACK_GLOBAL(sgemlq,SGEMLQ) -#define LAPACK_dgemlq LAPACK_GLOBAL(dgemlq,DGEMLQ) -#define LAPACK_cgemlq LAPACK_GLOBAL(cgemlq,CGEMLQ) -#define LAPACK_zgemlq LAPACK_GLOBAL(zgemlq,ZGEMLQ) -#define LAPACK_sgeqr LAPACK_GLOBAL(sgeqr,SGEQR) -#define LAPACK_dgeqr LAPACK_GLOBAL(dgeqr,DGEQR) -#define LAPACK_cgeqr LAPACK_GLOBAL(cgeqr,CGEQR) -#define LAPACK_zgeqr LAPACK_GLOBAL(zgeqr,ZGEQR) -#define LAPACK_sgemqr LAPACK_GLOBAL(sgemqr,SGEMQR) -#define LAPACK_dgemqr LAPACK_GLOBAL(dgemqr,DGEMQR) -#define LAPACK_cgemqr LAPACK_GLOBAL(cgemqr,CGEMQR) -#define LAPACK_zgemqr LAPACK_GLOBAL(zgemqr,ZGEMQR) -#define LAPACK_sgetsls LAPACK_GLOBAL(sgetsls,SGETSLS) -#define LAPACK_dgetsls LAPACK_GLOBAL(dgetsls,DGETSLS) -#define LAPACK_cgetsls LAPACK_GLOBAL(cgetsls,CGETSLS) -#define LAPACK_zgetsls LAPACK_GLOBAL(zgetsls,ZGETSLS) - -// LAPACK 3.8.0 -#define LAPACK_ssysv_aa_2stage LAPACK_GLOBAL(ssysv_aa_2stage,SSYSV_AA_2STAGE) -#define LAPACK_dsysv_aa_2stage LAPACK_GLOBAL(dsysv_aa_2stage,DSYSV_AA_2STAGE) -#define LAPACK_chesv_aa_2stage LAPACK_GLOBAL(chesv_aa_2stage,CHESV_AA_2STAGE) -#define LAPACK_zsysv_aa_2stage LAPACK_GLOBAL(zsysv_aa_2stage,ZSYSV_AA_2STAGE) -#define LAPACK_csysv_aa_2stage LAPACK_GLOBAL(csysv_aa_2stage,CSYSV_AA_2STAGE) -#define LAPACK_zhesv_aa_2stage LAPACK_GLOBAL(zhesv_aa_2stage,ZHESV_AA_2STAGE) -#define LAPACK_ssytrs_aa_2stage LAPACK_GLOBAL(ssytrs_aa_2stage,SSYTRS_AA_2STAGE) -#define LAPACK_dsytrs_aa_2stage LAPACK_GLOBAL(dsytrs_aa_2stage,DSYTRS_AA_2STAGE) -#define LAPACK_csytrs_aa_2stage LAPACK_GLOBAL(csytrs_aa_2stage,CSYTRS_AA_2STAGE) -#define LAPACK_zsytrs_aa_2stage LAPACK_GLOBAL(zsytrs_aa_2stage,ZSYTRS_AA_2STAGE) -#define LAPACK_chetrs_aa_2stage LAPACK_GLOBAL(chetrs_aa_2stage,CHETRS_AA_2STAGE) -#define LAPACK_zhetrs_aa_2stage LAPACK_GLOBAL(zhetrs_aa_2stage,ZHETRS_AA_2STAGE) -#define LAPACK_ssytrf_aa_2stage LAPACK_GLOBAL(ssytrf_aa_2stage,SSYTRF_AA_2STAGE) -#define LAPACK_dsytrf_aa_2stage LAPACK_GLOBAL(dsytrf_aa_2stage,DSYTRF_AA_2STAGE) -#define LAPACK_csytrf_aa_2stage LAPACK_GLOBAL(csytrf_aa_2stage,CSYTRF_AA_2STAGE) -#define LAPACK_zsytrf_aa_2stage LAPACK_GLOBAL(zsytrf_aa_2stage,ZSYTRF_AA_2STAGE) -#define LAPACK_chetrf_aa_2stage LAPACK_GLOBAL(chetrf_aa_2stage,CHETRF_AA_2STAGE) -#define LAPACK_zhetrf_aa_2stage LAPACK_GLOBAL(zhetrf_aa_2stage,ZHETRF_AA_2STAGE) - - -void LAPACK_sgetrf( lapack_int* m, lapack_int* n, float* a, lapack_int* lda, - lapack_int* ipiv, lapack_int *info ); -void LAPACK_dgetrf( lapack_int* m, lapack_int* n, double* a, lapack_int* lda, - lapack_int* ipiv, lapack_int *info ); -void LAPACK_cgetrf( lapack_int* m, lapack_int* n, lapack_complex_float* a, - lapack_int* lda, lapack_int* ipiv, lapack_int *info ); -void LAPACK_zgetrf( lapack_int* m, lapack_int* n, lapack_complex_double* a, - lapack_int* lda, lapack_int* ipiv, lapack_int *info ); -void LAPACK_sgetrf2( lapack_int* m, lapack_int* n, float* a, lapack_int* lda, - lapack_int* ipiv, lapack_int *info ); -void LAPACK_dgetrf2( lapack_int* m, lapack_int* n, double* a, lapack_int* lda, - lapack_int* ipiv, lapack_int *info ); -void LAPACK_cgetrf2( lapack_int* m, lapack_int* n, lapack_complex_float* a, - lapack_int* lda, lapack_int* ipiv, lapack_int *info ); -void LAPACK_zgetrf2( lapack_int* m, lapack_int* n, lapack_complex_double* a, - lapack_int* lda, lapack_int* ipiv, lapack_int *info ); -void LAPACK_sgbtrf( lapack_int* m, lapack_int* n, lapack_int* kl, - lapack_int* ku, float* ab, lapack_int* ldab, - lapack_int* ipiv, lapack_int *info ); -void LAPACK_dgbtrf( lapack_int* m, lapack_int* n, lapack_int* kl, - lapack_int* ku, double* ab, lapack_int* ldab, - lapack_int* ipiv, lapack_int *info ); -void LAPACK_cgbtrf( lapack_int* m, lapack_int* n, lapack_int* kl, - lapack_int* ku, lapack_complex_float* ab, lapack_int* ldab, - lapack_int* ipiv, lapack_int *info ); -void LAPACK_zgbtrf( lapack_int* m, lapack_int* n, lapack_int* kl, - lapack_int* ku, lapack_complex_double* ab, lapack_int* ldab, - lapack_int* ipiv, lapack_int *info ); -void LAPACK_sgttrf( lapack_int* n, float* dl, float* d, float* du, float* du2, - lapack_int* ipiv, lapack_int *info ); -void LAPACK_dgttrf( lapack_int* n, double* dl, double* d, double* du, - double* du2, lapack_int* ipiv, lapack_int *info ); -void LAPACK_cgttrf( lapack_int* n, lapack_complex_float* dl, - lapack_complex_float* d, lapack_complex_float* du, - lapack_complex_float* du2, lapack_int* ipiv, - lapack_int *info ); -void LAPACK_zgttrf( lapack_int* n, lapack_complex_double* dl, - lapack_complex_double* d, lapack_complex_double* du, - lapack_complex_double* du2, lapack_int* ipiv, - lapack_int *info ); -void LAPACK_spotrf2( char* uplo, lapack_int* n, float* a, lapack_int* lda, - lapack_int *info ); -void LAPACK_dpotrf2( char* uplo, lapack_int* n, double* a, lapack_int* lda, - lapack_int *info ); -void LAPACK_cpotrf2( char* uplo, lapack_int* n, lapack_complex_float* a, - lapack_int* lda, lapack_int *info ); -void LAPACK_zpotrf2( char* uplo, lapack_int* n, lapack_complex_double* a, - lapack_int* lda, lapack_int *info ); -void LAPACK_spotrf( char* uplo, lapack_int* n, float* a, lapack_int* lda, - lapack_int *info ); -void LAPACK_dpotrf( char* uplo, lapack_int* n, double* a, lapack_int* lda, - lapack_int *info ); -void LAPACK_cpotrf( char* uplo, lapack_int* n, lapack_complex_float* a, - lapack_int* lda, lapack_int *info ); -void LAPACK_zpotrf( char* uplo, lapack_int* n, lapack_complex_double* a, - lapack_int* lda, lapack_int *info ); -void LAPACK_dpstrf( char* uplo, lapack_int* n, double* a, lapack_int* lda, - lapack_int* piv, lapack_int* rank, double* tol, - double* work, lapack_int *info ); -void LAPACK_spstrf( char* uplo, lapack_int* n, float* a, lapack_int* lda, - lapack_int* piv, lapack_int* rank, float* tol, float* work, - lapack_int *info ); -void LAPACK_zpstrf( char* uplo, lapack_int* n, lapack_complex_double* a, - lapack_int* lda, lapack_int* piv, lapack_int* rank, - double* tol, double* work, lapack_int *info ); -void LAPACK_cpstrf( char* uplo, lapack_int* n, lapack_complex_float* a, - lapack_int* lda, lapack_int* piv, lapack_int* rank, - float* tol, float* work, lapack_int *info ); -void LAPACK_dpftrf( char* transr, char* uplo, lapack_int* n, double* a, - lapack_int *info ); -void LAPACK_spftrf( char* transr, char* uplo, lapack_int* n, float* a, - lapack_int *info ); -void LAPACK_zpftrf( char* transr, char* uplo, lapack_int* n, - lapack_complex_double* a, lapack_int *info ); -void LAPACK_cpftrf( char* transr, char* uplo, lapack_int* n, - lapack_complex_float* a, lapack_int *info ); -void LAPACK_spptrf( char* uplo, lapack_int* n, float* ap, lapack_int *info ); -void LAPACK_dpptrf( char* uplo, lapack_int* n, double* ap, lapack_int *info ); -void LAPACK_cpptrf( char* uplo, lapack_int* n, lapack_complex_float* ap, - lapack_int *info ); -void LAPACK_zpptrf( char* uplo, lapack_int* n, lapack_complex_double* ap, - lapack_int *info ); -void LAPACK_spbtrf( char* uplo, lapack_int* n, lapack_int* kd, float* ab, - lapack_int* ldab, lapack_int *info ); -void LAPACK_dpbtrf( char* uplo, lapack_int* n, lapack_int* kd, double* ab, - lapack_int* ldab, lapack_int *info ); -void LAPACK_cpbtrf( char* uplo, lapack_int* n, lapack_int* kd, - lapack_complex_float* ab, lapack_int* ldab, - lapack_int *info ); -void LAPACK_zpbtrf( char* uplo, lapack_int* n, lapack_int* kd, - lapack_complex_double* ab, lapack_int* ldab, - lapack_int *info ); -void LAPACK_spttrf( lapack_int* n, float* d, float* e, lapack_int *info ); -void LAPACK_dpttrf( lapack_int* n, double* d, double* e, lapack_int *info ); -void LAPACK_cpttrf( lapack_int* n, float* d, lapack_complex_float* e, - lapack_int *info ); -void LAPACK_zpttrf( lapack_int* n, double* d, lapack_complex_double* e, - lapack_int *info ); -void LAPACK_ssytrf( char* uplo, lapack_int* n, float* a, lapack_int* lda, - lapack_int* ipiv, float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_dsytrf( char* uplo, lapack_int* n, double* a, lapack_int* lda, - lapack_int* ipiv, double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_csytrf( char* uplo, lapack_int* n, lapack_complex_float* a, - lapack_int* lda, lapack_int* ipiv, - lapack_complex_float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_zsytrf( char* uplo, lapack_int* n, lapack_complex_double* a, - lapack_int* lda, lapack_int* ipiv, - lapack_complex_double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_chetrf( char* uplo, lapack_int* n, lapack_complex_float* a, - lapack_int* lda, lapack_int* ipiv, - lapack_complex_float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_zhetrf( char* uplo, lapack_int* n, lapack_complex_double* a, - lapack_int* lda, lapack_int* ipiv, - lapack_complex_double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_ssptrf( char* uplo, lapack_int* n, float* ap, lapack_int* ipiv, - lapack_int *info ); -void LAPACK_dsptrf( char* uplo, lapack_int* n, double* ap, lapack_int* ipiv, - lapack_int *info ); -void LAPACK_csptrf( char* uplo, lapack_int* n, lapack_complex_float* ap, - lapack_int* ipiv, lapack_int *info ); -void LAPACK_zsptrf( char* uplo, lapack_int* n, lapack_complex_double* ap, - lapack_int* ipiv, lapack_int *info ); -void LAPACK_chptrf( char* uplo, lapack_int* n, lapack_complex_float* ap, - lapack_int* ipiv, lapack_int *info ); -void LAPACK_zhptrf( char* uplo, lapack_int* n, lapack_complex_double* ap, - lapack_int* ipiv, lapack_int *info ); -void LAPACK_sgetrs( char* trans, lapack_int* n, lapack_int* nrhs, - const float* a, lapack_int* lda, const lapack_int* ipiv, - float* b, lapack_int* ldb, lapack_int *info ); -void LAPACK_dgetrs( char* trans, lapack_int* n, lapack_int* nrhs, - const double* a, lapack_int* lda, const lapack_int* ipiv, - double* b, lapack_int* ldb, lapack_int *info ); -void LAPACK_cgetrs( char* trans, lapack_int* n, lapack_int* nrhs, - const lapack_complex_float* a, lapack_int* lda, - const lapack_int* ipiv, lapack_complex_float* b, - lapack_int* ldb, lapack_int *info ); -void LAPACK_zgetrs( char* trans, lapack_int* n, lapack_int* nrhs, - const lapack_complex_double* a, lapack_int* lda, - const lapack_int* ipiv, lapack_complex_double* b, - lapack_int* ldb, lapack_int *info ); -void LAPACK_sgbtrs( char* trans, lapack_int* n, lapack_int* kl, lapack_int* ku, - lapack_int* nrhs, const float* ab, lapack_int* ldab, - const lapack_int* ipiv, float* b, lapack_int* ldb, - lapack_int *info ); -void LAPACK_dgbtrs( char* trans, lapack_int* n, lapack_int* kl, lapack_int* ku, - lapack_int* nrhs, const double* ab, lapack_int* ldab, - const lapack_int* ipiv, double* b, lapack_int* ldb, - lapack_int *info ); -void LAPACK_cgbtrs( char* trans, lapack_int* n, lapack_int* kl, lapack_int* ku, - lapack_int* nrhs, const lapack_complex_float* ab, - lapack_int* ldab, const lapack_int* ipiv, - lapack_complex_float* b, lapack_int* ldb, - lapack_int *info ); -void LAPACK_zgbtrs( char* trans, lapack_int* n, lapack_int* kl, lapack_int* ku, - lapack_int* nrhs, const lapack_complex_double* ab, - lapack_int* ldab, const lapack_int* ipiv, - lapack_complex_double* b, lapack_int* ldb, - lapack_int *info ); -void LAPACK_sgttrs( char* trans, lapack_int* n, lapack_int* nrhs, - const float* dl, const float* d, const float* du, - const float* du2, const lapack_int* ipiv, float* b, - lapack_int* ldb, lapack_int *info ); -void LAPACK_dgttrs( char* trans, lapack_int* n, lapack_int* nrhs, - const double* dl, const double* d, const double* du, - const double* du2, const lapack_int* ipiv, double* b, - lapack_int* ldb, lapack_int *info ); -void LAPACK_cgttrs( char* trans, lapack_int* n, lapack_int* nrhs, - const lapack_complex_float* dl, - const lapack_complex_float* d, - const lapack_complex_float* du, - const lapack_complex_float* du2, const lapack_int* ipiv, - lapack_complex_float* b, lapack_int* ldb, - lapack_int *info ); -void LAPACK_zgttrs( char* trans, lapack_int* n, lapack_int* nrhs, - const lapack_complex_double* dl, - const lapack_complex_double* d, - const lapack_complex_double* du, - const lapack_complex_double* du2, const lapack_int* ipiv, - lapack_complex_double* b, lapack_int* ldb, - lapack_int *info ); -void LAPACK_spotrs( char* uplo, lapack_int* n, lapack_int* nrhs, const float* a, - lapack_int* lda, float* b, lapack_int* ldb, - lapack_int *info ); -void LAPACK_dpotrs( char* uplo, lapack_int* n, lapack_int* nrhs, - const double* a, lapack_int* lda, double* b, - lapack_int* ldb, lapack_int *info ); -void LAPACK_cpotrs( char* uplo, lapack_int* n, lapack_int* nrhs, - const lapack_complex_float* a, lapack_int* lda, - lapack_complex_float* b, lapack_int* ldb, - lapack_int *info ); -void LAPACK_zpotrs( char* uplo, lapack_int* n, lapack_int* nrhs, - const lapack_complex_double* a, lapack_int* lda, - lapack_complex_double* b, lapack_int* ldb, - lapack_int *info ); -void LAPACK_dpftrs( char* transr, char* uplo, lapack_int* n, lapack_int* nrhs, - const double* a, double* b, lapack_int* ldb, - lapack_int *info ); -void LAPACK_spftrs( char* transr, char* uplo, lapack_int* n, lapack_int* nrhs, - const float* a, float* b, lapack_int* ldb, - lapack_int *info ); -void LAPACK_zpftrs( char* transr, char* uplo, lapack_int* n, lapack_int* nrhs, - const lapack_complex_double* a, lapack_complex_double* b, - lapack_int* ldb, lapack_int *info ); -void LAPACK_cpftrs( char* transr, char* uplo, lapack_int* n, lapack_int* nrhs, - const lapack_complex_float* a, lapack_complex_float* b, - lapack_int* ldb, lapack_int *info ); -void LAPACK_spptrs( char* uplo, lapack_int* n, lapack_int* nrhs, - const float* ap, float* b, lapack_int* ldb, - lapack_int *info ); -void LAPACK_dpptrs( char* uplo, lapack_int* n, lapack_int* nrhs, - const double* ap, double* b, lapack_int* ldb, - lapack_int *info ); -void LAPACK_cpptrs( char* uplo, lapack_int* n, lapack_int* nrhs, - const lapack_complex_float* ap, lapack_complex_float* b, - lapack_int* ldb, lapack_int *info ); -void LAPACK_zpptrs( char* uplo, lapack_int* n, lapack_int* nrhs, - const lapack_complex_double* ap, lapack_complex_double* b, - lapack_int* ldb, lapack_int *info ); -void LAPACK_spbtrs( char* uplo, lapack_int* n, lapack_int* kd, lapack_int* nrhs, - const float* ab, lapack_int* ldab, float* b, - lapack_int* ldb, lapack_int *info ); -void LAPACK_dpbtrs( char* uplo, lapack_int* n, lapack_int* kd, lapack_int* nrhs, - const double* ab, lapack_int* ldab, double* b, - lapack_int* ldb, lapack_int *info ); -void LAPACK_cpbtrs( char* uplo, lapack_int* n, lapack_int* kd, lapack_int* nrhs, - const lapack_complex_float* ab, lapack_int* ldab, - lapack_complex_float* b, lapack_int* ldb, - lapack_int *info ); -void LAPACK_zpbtrs( char* uplo, lapack_int* n, lapack_int* kd, lapack_int* nrhs, - const lapack_complex_double* ab, lapack_int* ldab, - lapack_complex_double* b, lapack_int* ldb, - lapack_int *info ); -void LAPACK_spttrs( lapack_int* n, lapack_int* nrhs, const float* d, - const float* e, float* b, lapack_int* ldb, - lapack_int *info ); -void LAPACK_dpttrs( lapack_int* n, lapack_int* nrhs, const double* d, - const double* e, double* b, lapack_int* ldb, - lapack_int *info ); -void LAPACK_cpttrs( char* uplo, lapack_int* n, lapack_int* nrhs, const float* d, - const lapack_complex_float* e, lapack_complex_float* b, - lapack_int* ldb, lapack_int *info ); -void LAPACK_zpttrs( char* uplo, lapack_int* n, lapack_int* nrhs, - const double* d, const lapack_complex_double* e, - lapack_complex_double* b, lapack_int* ldb, - lapack_int *info ); -void LAPACK_ssytrs( char* uplo, lapack_int* n, lapack_int* nrhs, const float* a, - lapack_int* lda, const lapack_int* ipiv, float* b, - lapack_int* ldb, lapack_int *info ); -void LAPACK_dsytrs( char* uplo, lapack_int* n, lapack_int* nrhs, - const double* a, lapack_int* lda, const lapack_int* ipiv, - double* b, lapack_int* ldb, lapack_int *info ); -void LAPACK_csytrs( char* uplo, lapack_int* n, lapack_int* nrhs, - const lapack_complex_float* a, lapack_int* lda, - const lapack_int* ipiv, lapack_complex_float* b, - lapack_int* ldb, lapack_int *info ); -void LAPACK_zsytrs( char* uplo, lapack_int* n, lapack_int* nrhs, - const lapack_complex_double* a, lapack_int* lda, - const lapack_int* ipiv, lapack_complex_double* b, - lapack_int* ldb, lapack_int *info ); -void LAPACK_chetrs( char* uplo, lapack_int* n, lapack_int* nrhs, - const lapack_complex_float* a, lapack_int* lda, - const lapack_int* ipiv, lapack_complex_float* b, - lapack_int* ldb, lapack_int *info ); -void LAPACK_zhetrs( char* uplo, lapack_int* n, lapack_int* nrhs, - const lapack_complex_double* a, lapack_int* lda, - const lapack_int* ipiv, lapack_complex_double* b, - lapack_int* ldb, lapack_int *info ); -void LAPACK_ssptrs( char* uplo, lapack_int* n, lapack_int* nrhs, - const float* ap, const lapack_int* ipiv, float* b, - lapack_int* ldb, lapack_int *info ); -void LAPACK_dsptrs( char* uplo, lapack_int* n, lapack_int* nrhs, - const double* ap, const lapack_int* ipiv, double* b, - lapack_int* ldb, lapack_int *info ); -void LAPACK_csptrs( char* uplo, lapack_int* n, lapack_int* nrhs, - const lapack_complex_float* ap, const lapack_int* ipiv, - lapack_complex_float* b, lapack_int* ldb, - lapack_int *info ); -void LAPACK_zsptrs( char* uplo, lapack_int* n, lapack_int* nrhs, - const lapack_complex_double* ap, const lapack_int* ipiv, - lapack_complex_double* b, lapack_int* ldb, - lapack_int *info ); -void LAPACK_chptrs( char* uplo, lapack_int* n, lapack_int* nrhs, - const lapack_complex_float* ap, const lapack_int* ipiv, - lapack_complex_float* b, lapack_int* ldb, - lapack_int *info ); -void LAPACK_zhptrs( char* uplo, lapack_int* n, lapack_int* nrhs, - const lapack_complex_double* ap, const lapack_int* ipiv, - lapack_complex_double* b, lapack_int* ldb, - lapack_int *info ); -void LAPACK_strtrs( char* uplo, char* trans, char* diag, lapack_int* n, - lapack_int* nrhs, const float* a, lapack_int* lda, float* b, - lapack_int* ldb, lapack_int *info ); -void LAPACK_dtrtrs( char* uplo, char* trans, char* diag, lapack_int* n, - lapack_int* nrhs, const double* a, lapack_int* lda, - double* b, lapack_int* ldb, lapack_int *info ); -void LAPACK_ctrtrs( char* uplo, char* trans, char* diag, lapack_int* n, - lapack_int* nrhs, const lapack_complex_float* a, - lapack_int* lda, lapack_complex_float* b, lapack_int* ldb, - lapack_int *info ); -void LAPACK_ztrtrs( char* uplo, char* trans, char* diag, lapack_int* n, - lapack_int* nrhs, const lapack_complex_double* a, - lapack_int* lda, lapack_complex_double* b, lapack_int* ldb, - lapack_int *info ); -void LAPACK_stptrs( char* uplo, char* trans, char* diag, lapack_int* n, - lapack_int* nrhs, const float* ap, float* b, - lapack_int* ldb, lapack_int *info ); -void LAPACK_dtptrs( char* uplo, char* trans, char* diag, lapack_int* n, - lapack_int* nrhs, const double* ap, double* b, - lapack_int* ldb, lapack_int *info ); -void LAPACK_ctptrs( char* uplo, char* trans, char* diag, lapack_int* n, - lapack_int* nrhs, const lapack_complex_float* ap, - lapack_complex_float* b, lapack_int* ldb, - lapack_int *info ); -void LAPACK_ztptrs( char* uplo, char* trans, char* diag, lapack_int* n, - lapack_int* nrhs, const lapack_complex_double* ap, - lapack_complex_double* b, lapack_int* ldb, - lapack_int *info ); -void LAPACK_stbtrs( char* uplo, char* trans, char* diag, lapack_int* n, - lapack_int* kd, lapack_int* nrhs, const float* ab, - lapack_int* ldab, float* b, lapack_int* ldb, - lapack_int *info ); -void LAPACK_dtbtrs( char* uplo, char* trans, char* diag, lapack_int* n, - lapack_int* kd, lapack_int* nrhs, const double* ab, - lapack_int* ldab, double* b, lapack_int* ldb, - lapack_int *info ); -void LAPACK_ctbtrs( char* uplo, char* trans, char* diag, lapack_int* n, - lapack_int* kd, lapack_int* nrhs, - const lapack_complex_float* ab, lapack_int* ldab, - lapack_complex_float* b, lapack_int* ldb, - lapack_int *info ); -void LAPACK_ztbtrs( char* uplo, char* trans, char* diag, lapack_int* n, - lapack_int* kd, lapack_int* nrhs, - const lapack_complex_double* ab, lapack_int* ldab, - lapack_complex_double* b, lapack_int* ldb, - lapack_int *info ); -void LAPACK_sgecon( char* norm, lapack_int* n, const float* a, lapack_int* lda, - float* anorm, float* rcond, float* work, lapack_int* iwork, - lapack_int *info ); -void LAPACK_dgecon( char* norm, lapack_int* n, const double* a, lapack_int* lda, - double* anorm, double* rcond, double* work, - lapack_int* iwork, lapack_int *info ); -void LAPACK_cgecon( char* norm, lapack_int* n, const lapack_complex_float* a, - lapack_int* lda, float* anorm, float* rcond, - lapack_complex_float* work, float* rwork, - lapack_int *info ); -void LAPACK_zgecon( char* norm, lapack_int* n, const lapack_complex_double* a, - lapack_int* lda, double* anorm, double* rcond, - lapack_complex_double* work, double* rwork, - lapack_int *info ); -void LAPACK_sgbcon( char* norm, lapack_int* n, lapack_int* kl, lapack_int* ku, - const float* ab, lapack_int* ldab, const lapack_int* ipiv, - float* anorm, float* rcond, float* work, lapack_int* iwork, - lapack_int *info ); -void LAPACK_dgbcon( char* norm, lapack_int* n, lapack_int* kl, lapack_int* ku, - const double* ab, lapack_int* ldab, const lapack_int* ipiv, - double* anorm, double* rcond, double* work, - lapack_int* iwork, lapack_int *info ); -void LAPACK_cgbcon( char* norm, lapack_int* n, lapack_int* kl, lapack_int* ku, - const lapack_complex_float* ab, lapack_int* ldab, - const lapack_int* ipiv, float* anorm, float* rcond, - lapack_complex_float* work, float* rwork, - lapack_int *info ); -void LAPACK_zgbcon( char* norm, lapack_int* n, lapack_int* kl, lapack_int* ku, - const lapack_complex_double* ab, lapack_int* ldab, - const lapack_int* ipiv, double* anorm, double* rcond, - lapack_complex_double* work, double* rwork, - lapack_int *info ); -void LAPACK_sgtcon( char* norm, lapack_int* n, const float* dl, const float* d, - const float* du, const float* du2, const lapack_int* ipiv, - float* anorm, float* rcond, float* work, lapack_int* iwork, - lapack_int *info ); -void LAPACK_dgtcon( char* norm, lapack_int* n, const double* dl, - const double* d, const double* du, const double* du2, - const lapack_int* ipiv, double* anorm, double* rcond, - double* work, lapack_int* iwork, lapack_int *info ); -void LAPACK_cgtcon( char* norm, lapack_int* n, const lapack_complex_float* dl, - const lapack_complex_float* d, - const lapack_complex_float* du, - const lapack_complex_float* du2, const lapack_int* ipiv, - float* anorm, float* rcond, lapack_complex_float* work, - lapack_int *info ); -void LAPACK_zgtcon( char* norm, lapack_int* n, const lapack_complex_double* dl, - const lapack_complex_double* d, - const lapack_complex_double* du, - const lapack_complex_double* du2, const lapack_int* ipiv, - double* anorm, double* rcond, lapack_complex_double* work, - lapack_int *info ); -void LAPACK_spocon( char* uplo, lapack_int* n, const float* a, lapack_int* lda, - float* anorm, float* rcond, float* work, lapack_int* iwork, - lapack_int *info ); -void LAPACK_dpocon( char* uplo, lapack_int* n, const double* a, lapack_int* lda, - double* anorm, double* rcond, double* work, - lapack_int* iwork, lapack_int *info ); -void LAPACK_cpocon( char* uplo, lapack_int* n, const lapack_complex_float* a, - lapack_int* lda, float* anorm, float* rcond, - lapack_complex_float* work, float* rwork, - lapack_int *info ); -void LAPACK_zpocon( char* uplo, lapack_int* n, const lapack_complex_double* a, - lapack_int* lda, double* anorm, double* rcond, - lapack_complex_double* work, double* rwork, - lapack_int *info ); -void LAPACK_sppcon( char* uplo, lapack_int* n, const float* ap, float* anorm, - float* rcond, float* work, lapack_int* iwork, - lapack_int *info ); -void LAPACK_dppcon( char* uplo, lapack_int* n, const double* ap, double* anorm, - double* rcond, double* work, lapack_int* iwork, - lapack_int *info ); -void LAPACK_cppcon( char* uplo, lapack_int* n, const lapack_complex_float* ap, - float* anorm, float* rcond, lapack_complex_float* work, - float* rwork, lapack_int *info ); -void LAPACK_zppcon( char* uplo, lapack_int* n, const lapack_complex_double* ap, - double* anorm, double* rcond, lapack_complex_double* work, - double* rwork, lapack_int *info ); -void LAPACK_spbcon( char* uplo, lapack_int* n, lapack_int* kd, const float* ab, - lapack_int* ldab, float* anorm, float* rcond, float* work, - lapack_int* iwork, lapack_int *info ); -void LAPACK_dpbcon( char* uplo, lapack_int* n, lapack_int* kd, const double* ab, - lapack_int* ldab, double* anorm, double* rcond, - double* work, lapack_int* iwork, lapack_int *info ); -void LAPACK_cpbcon( char* uplo, lapack_int* n, lapack_int* kd, - const lapack_complex_float* ab, lapack_int* ldab, - float* anorm, float* rcond, lapack_complex_float* work, - float* rwork, lapack_int *info ); -void LAPACK_zpbcon( char* uplo, lapack_int* n, lapack_int* kd, - const lapack_complex_double* ab, lapack_int* ldab, - double* anorm, double* rcond, lapack_complex_double* work, - double* rwork, lapack_int *info ); -void LAPACK_sptcon( lapack_int* n, const float* d, const float* e, float* anorm, - float* rcond, float* work, lapack_int *info ); -void LAPACK_dptcon( lapack_int* n, const double* d, const double* e, - double* anorm, double* rcond, double* work, - lapack_int *info ); -void LAPACK_cptcon( lapack_int* n, const float* d, - const lapack_complex_float* e, float* anorm, float* rcond, - float* work, lapack_int *info ); -void LAPACK_zptcon( lapack_int* n, const double* d, - const lapack_complex_double* e, double* anorm, - double* rcond, double* work, lapack_int *info ); -void LAPACK_ssycon( char* uplo, lapack_int* n, const float* a, lapack_int* lda, - const lapack_int* ipiv, float* anorm, float* rcond, - float* work, lapack_int* iwork, lapack_int *info ); -void LAPACK_dsycon( char* uplo, lapack_int* n, const double* a, lapack_int* lda, - const lapack_int* ipiv, double* anorm, double* rcond, - double* work, lapack_int* iwork, lapack_int *info ); -void LAPACK_csycon( char* uplo, lapack_int* n, const lapack_complex_float* a, - lapack_int* lda, const lapack_int* ipiv, float* anorm, - float* rcond, lapack_complex_float* work, - lapack_int *info ); -void LAPACK_zsycon( char* uplo, lapack_int* n, const lapack_complex_double* a, - lapack_int* lda, const lapack_int* ipiv, double* anorm, - double* rcond, lapack_complex_double* work, - lapack_int *info ); -void LAPACK_checon( char* uplo, lapack_int* n, const lapack_complex_float* a, - lapack_int* lda, const lapack_int* ipiv, float* anorm, - float* rcond, lapack_complex_float* work, - lapack_int *info ); -void LAPACK_zhecon( char* uplo, lapack_int* n, const lapack_complex_double* a, - lapack_int* lda, const lapack_int* ipiv, double* anorm, - double* rcond, lapack_complex_double* work, - lapack_int *info ); -void LAPACK_sspcon( char* uplo, lapack_int* n, const float* ap, - const lapack_int* ipiv, float* anorm, float* rcond, - float* work, lapack_int* iwork, lapack_int *info ); -void LAPACK_dspcon( char* uplo, lapack_int* n, const double* ap, - const lapack_int* ipiv, double* anorm, double* rcond, - double* work, lapack_int* iwork, lapack_int *info ); -void LAPACK_cspcon( char* uplo, lapack_int* n, const lapack_complex_float* ap, - const lapack_int* ipiv, float* anorm, float* rcond, - lapack_complex_float* work, lapack_int *info ); -void LAPACK_zspcon( char* uplo, lapack_int* n, const lapack_complex_double* ap, - const lapack_int* ipiv, double* anorm, double* rcond, - lapack_complex_double* work, lapack_int *info ); -void LAPACK_chpcon( char* uplo, lapack_int* n, const lapack_complex_float* ap, - const lapack_int* ipiv, float* anorm, float* rcond, - lapack_complex_float* work, lapack_int *info ); -void LAPACK_zhpcon( char* uplo, lapack_int* n, const lapack_complex_double* ap, - const lapack_int* ipiv, double* anorm, double* rcond, - lapack_complex_double* work, lapack_int *info ); -void LAPACK_strcon( char* norm, char* uplo, char* diag, lapack_int* n, - const float* a, lapack_int* lda, float* rcond, float* work, - lapack_int* iwork, lapack_int *info ); -void LAPACK_dtrcon( char* norm, char* uplo, char* diag, lapack_int* n, - const double* a, lapack_int* lda, double* rcond, - double* work, lapack_int* iwork, lapack_int *info ); -void LAPACK_ctrcon( char* norm, char* uplo, char* diag, lapack_int* n, - const lapack_complex_float* a, lapack_int* lda, - float* rcond, lapack_complex_float* work, float* rwork, - lapack_int *info ); -void LAPACK_ztrcon( char* norm, char* uplo, char* diag, lapack_int* n, - const lapack_complex_double* a, lapack_int* lda, - double* rcond, lapack_complex_double* work, double* rwork, - lapack_int *info ); -void LAPACK_stpcon( char* norm, char* uplo, char* diag, lapack_int* n, - const float* ap, float* rcond, float* work, - lapack_int* iwork, lapack_int *info ); -void LAPACK_dtpcon( char* norm, char* uplo, char* diag, lapack_int* n, - const double* ap, double* rcond, double* work, - lapack_int* iwork, lapack_int *info ); -void LAPACK_ctpcon( char* norm, char* uplo, char* diag, lapack_int* n, - const lapack_complex_float* ap, float* rcond, - lapack_complex_float* work, float* rwork, - lapack_int *info ); -void LAPACK_ztpcon( char* norm, char* uplo, char* diag, lapack_int* n, - const lapack_complex_double* ap, double* rcond, - lapack_complex_double* work, double* rwork, - lapack_int *info ); -void LAPACK_stbcon( char* norm, char* uplo, char* diag, lapack_int* n, - lapack_int* kd, const float* ab, lapack_int* ldab, - float* rcond, float* work, lapack_int* iwork, - lapack_int *info ); -void LAPACK_dtbcon( char* norm, char* uplo, char* diag, lapack_int* n, - lapack_int* kd, const double* ab, lapack_int* ldab, - double* rcond, double* work, lapack_int* iwork, - lapack_int *info ); -void LAPACK_ctbcon( char* norm, char* uplo, char* diag, lapack_int* n, - lapack_int* kd, const lapack_complex_float* ab, - lapack_int* ldab, float* rcond, lapack_complex_float* work, - float* rwork, lapack_int *info ); -void LAPACK_ztbcon( char* norm, char* uplo, char* diag, lapack_int* n, - lapack_int* kd, const lapack_complex_double* ab, - lapack_int* ldab, double* rcond, - lapack_complex_double* work, double* rwork, - lapack_int *info ); -void LAPACK_sgerfs( char* trans, lapack_int* n, lapack_int* nrhs, - const float* a, lapack_int* lda, const float* af, - lapack_int* ldaf, const lapack_int* ipiv, const float* b, - lapack_int* ldb, float* x, lapack_int* ldx, float* ferr, - float* berr, float* work, lapack_int* iwork, - lapack_int *info ); -void LAPACK_dgerfs( char* trans, lapack_int* n, lapack_int* nrhs, - const double* a, lapack_int* lda, const double* af, - lapack_int* ldaf, const lapack_int* ipiv, const double* b, - lapack_int* ldb, double* x, lapack_int* ldx, double* ferr, - double* berr, double* work, lapack_int* iwork, - lapack_int *info ); -void LAPACK_cgerfs( char* trans, lapack_int* n, lapack_int* nrhs, - const lapack_complex_float* a, lapack_int* lda, - const lapack_complex_float* af, lapack_int* ldaf, - const lapack_int* ipiv, const lapack_complex_float* b, - lapack_int* ldb, lapack_complex_float* x, lapack_int* ldx, - float* ferr, float* berr, lapack_complex_float* work, - float* rwork, lapack_int *info ); -void LAPACK_zgerfs( char* trans, lapack_int* n, lapack_int* nrhs, - const lapack_complex_double* a, lapack_int* lda, - const lapack_complex_double* af, lapack_int* ldaf, - const lapack_int* ipiv, const lapack_complex_double* b, - lapack_int* ldb, lapack_complex_double* x, lapack_int* ldx, - double* ferr, double* berr, lapack_complex_double* work, - double* rwork, lapack_int *info ); -void LAPACK_dgerfsx( char* trans, char* equed, lapack_int* n, lapack_int* nrhs, - const double* a, lapack_int* lda, const double* af, - lapack_int* ldaf, const lapack_int* ipiv, const double* r, - const double* c, const double* b, lapack_int* ldb, - double* x, lapack_int* ldx, double* rcond, double* berr, - lapack_int* n_err_bnds, double* err_bnds_norm, - double* err_bnds_comp, lapack_int* nparams, double* params, - double* work, lapack_int* iwork, lapack_int *info ); -void LAPACK_sgerfsx( char* trans, char* equed, lapack_int* n, lapack_int* nrhs, - const float* a, lapack_int* lda, const float* af, - lapack_int* ldaf, const lapack_int* ipiv, const float* r, - const float* c, const float* b, lapack_int* ldb, float* x, - lapack_int* ldx, float* rcond, float* berr, - lapack_int* n_err_bnds, float* err_bnds_norm, - float* err_bnds_comp, lapack_int* nparams, float* params, - float* work, lapack_int* iwork, lapack_int *info ); -void LAPACK_zgerfsx( char* trans, char* equed, lapack_int* n, lapack_int* nrhs, - const lapack_complex_double* a, lapack_int* lda, - const lapack_complex_double* af, lapack_int* ldaf, - const lapack_int* ipiv, const double* r, const double* c, - const lapack_complex_double* b, lapack_int* ldb, - lapack_complex_double* x, lapack_int* ldx, double* rcond, - double* berr, lapack_int* n_err_bnds, - double* err_bnds_norm, double* err_bnds_comp, - lapack_int* nparams, double* params, - lapack_complex_double* work, double* rwork, - lapack_int *info ); -void LAPACK_cgerfsx( char* trans, char* equed, lapack_int* n, lapack_int* nrhs, - const lapack_complex_float* a, lapack_int* lda, - const lapack_complex_float* af, lapack_int* ldaf, - const lapack_int* ipiv, const float* r, const float* c, - const lapack_complex_float* b, lapack_int* ldb, - lapack_complex_float* x, lapack_int* ldx, float* rcond, - float* berr, lapack_int* n_err_bnds, float* err_bnds_norm, - float* err_bnds_comp, lapack_int* nparams, float* params, - lapack_complex_float* work, float* rwork, - lapack_int *info ); -void LAPACK_sgbrfs( char* trans, lapack_int* n, lapack_int* kl, lapack_int* ku, - lapack_int* nrhs, const float* ab, lapack_int* ldab, - const float* afb, lapack_int* ldafb, const lapack_int* ipiv, - const float* b, lapack_int* ldb, float* x, lapack_int* ldx, - float* ferr, float* berr, float* work, lapack_int* iwork, - lapack_int *info ); -void LAPACK_dgbrfs( char* trans, lapack_int* n, lapack_int* kl, lapack_int* ku, - lapack_int* nrhs, const double* ab, lapack_int* ldab, - const double* afb, lapack_int* ldafb, - const lapack_int* ipiv, const double* b, lapack_int* ldb, - double* x, lapack_int* ldx, double* ferr, double* berr, - double* work, lapack_int* iwork, lapack_int *info ); -void LAPACK_cgbrfs( char* trans, lapack_int* n, lapack_int* kl, lapack_int* ku, - lapack_int* nrhs, const lapack_complex_float* ab, - lapack_int* ldab, const lapack_complex_float* afb, - lapack_int* ldafb, const lapack_int* ipiv, - const lapack_complex_float* b, lapack_int* ldb, - lapack_complex_float* x, lapack_int* ldx, float* ferr, - float* berr, lapack_complex_float* work, float* rwork, - lapack_int *info ); -void LAPACK_zgbrfs( char* trans, lapack_int* n, lapack_int* kl, lapack_int* ku, - lapack_int* nrhs, const lapack_complex_double* ab, - lapack_int* ldab, const lapack_complex_double* afb, - lapack_int* ldafb, const lapack_int* ipiv, - const lapack_complex_double* b, lapack_int* ldb, - lapack_complex_double* x, lapack_int* ldx, double* ferr, - double* berr, lapack_complex_double* work, double* rwork, - lapack_int *info ); -void LAPACK_dgbrfsx( char* trans, char* equed, lapack_int* n, lapack_int* kl, - lapack_int* ku, lapack_int* nrhs, const double* ab, - lapack_int* ldab, const double* afb, lapack_int* ldafb, - const lapack_int* ipiv, const double* r, const double* c, - const double* b, lapack_int* ldb, double* x, - lapack_int* ldx, double* rcond, double* berr, - lapack_int* n_err_bnds, double* err_bnds_norm, - double* err_bnds_comp, lapack_int* nparams, double* params, - double* work, lapack_int* iwork, lapack_int *info ); -void LAPACK_sgbrfsx( char* trans, char* equed, lapack_int* n, lapack_int* kl, - lapack_int* ku, lapack_int* nrhs, const float* ab, - lapack_int* ldab, const float* afb, lapack_int* ldafb, - const lapack_int* ipiv, const float* r, const float* c, - const float* b, lapack_int* ldb, float* x, lapack_int* ldx, - float* rcond, float* berr, lapack_int* n_err_bnds, - float* err_bnds_norm, float* err_bnds_comp, - lapack_int* nparams, float* params, float* work, - lapack_int* iwork, lapack_int *info ); -void LAPACK_zgbrfsx( char* trans, char* equed, lapack_int* n, lapack_int* kl, - lapack_int* ku, lapack_int* nrhs, - const lapack_complex_double* ab, lapack_int* ldab, - const lapack_complex_double* afb, lapack_int* ldafb, - const lapack_int* ipiv, const double* r, const double* c, - const lapack_complex_double* b, lapack_int* ldb, - lapack_complex_double* x, lapack_int* ldx, double* rcond, - double* berr, lapack_int* n_err_bnds, - double* err_bnds_norm, double* err_bnds_comp, - lapack_int* nparams, double* params, - lapack_complex_double* work, double* rwork, - lapack_int *info ); -void LAPACK_cgbrfsx( char* trans, char* equed, lapack_int* n, lapack_int* kl, - lapack_int* ku, lapack_int* nrhs, - const lapack_complex_float* ab, lapack_int* ldab, - const lapack_complex_float* afb, lapack_int* ldafb, - const lapack_int* ipiv, const float* r, const float* c, - const lapack_complex_float* b, lapack_int* ldb, - lapack_complex_float* x, lapack_int* ldx, float* rcond, - float* berr, lapack_int* n_err_bnds, float* err_bnds_norm, - float* err_bnds_comp, lapack_int* nparams, float* params, - lapack_complex_float* work, float* rwork, - lapack_int *info ); -void LAPACK_sgtrfs( char* trans, lapack_int* n, lapack_int* nrhs, - const float* dl, const float* d, const float* du, - const float* dlf, const float* df, const float* duf, - const float* du2, const lapack_int* ipiv, const float* b, - lapack_int* ldb, float* x, lapack_int* ldx, float* ferr, - float* berr, float* work, lapack_int* iwork, - lapack_int *info ); -void LAPACK_dgtrfs( char* trans, lapack_int* n, lapack_int* nrhs, - const double* dl, const double* d, const double* du, - const double* dlf, const double* df, const double* duf, - const double* du2, const lapack_int* ipiv, const double* b, - lapack_int* ldb, double* x, lapack_int* ldx, double* ferr, - double* berr, double* work, lapack_int* iwork, - lapack_int *info ); -void LAPACK_cgtrfs( char* trans, lapack_int* n, lapack_int* nrhs, - const lapack_complex_float* dl, - const lapack_complex_float* d, - const lapack_complex_float* du, - const lapack_complex_float* dlf, - const lapack_complex_float* df, - const lapack_complex_float* duf, - const lapack_complex_float* du2, const lapack_int* ipiv, - const lapack_complex_float* b, lapack_int* ldb, - lapack_complex_float* x, lapack_int* ldx, float* ferr, - float* berr, lapack_complex_float* work, float* rwork, - lapack_int *info ); -void LAPACK_zgtrfs( char* trans, lapack_int* n, lapack_int* nrhs, - const lapack_complex_double* dl, - const lapack_complex_double* d, - const lapack_complex_double* du, - const lapack_complex_double* dlf, - const lapack_complex_double* df, - const lapack_complex_double* duf, - const lapack_complex_double* du2, const lapack_int* ipiv, - const lapack_complex_double* b, lapack_int* ldb, - lapack_complex_double* x, lapack_int* ldx, double* ferr, - double* berr, lapack_complex_double* work, double* rwork, - lapack_int *info ); -void LAPACK_sporfs( char* uplo, lapack_int* n, lapack_int* nrhs, const float* a, - lapack_int* lda, const float* af, lapack_int* ldaf, - const float* b, lapack_int* ldb, float* x, lapack_int* ldx, - float* ferr, float* berr, float* work, lapack_int* iwork, - lapack_int *info ); -void LAPACK_dporfs( char* uplo, lapack_int* n, lapack_int* nrhs, - const double* a, lapack_int* lda, const double* af, - lapack_int* ldaf, const double* b, lapack_int* ldb, - double* x, lapack_int* ldx, double* ferr, double* berr, - double* work, lapack_int* iwork, lapack_int *info ); -void LAPACK_cporfs( char* uplo, lapack_int* n, lapack_int* nrhs, - const lapack_complex_float* a, lapack_int* lda, - const lapack_complex_float* af, lapack_int* ldaf, - const lapack_complex_float* b, lapack_int* ldb, - lapack_complex_float* x, lapack_int* ldx, float* ferr, - float* berr, lapack_complex_float* work, float* rwork, - lapack_int *info ); -void LAPACK_zporfs( char* uplo, lapack_int* n, lapack_int* nrhs, - const lapack_complex_double* a, lapack_int* lda, - const lapack_complex_double* af, lapack_int* ldaf, - const lapack_complex_double* b, lapack_int* ldb, - lapack_complex_double* x, lapack_int* ldx, double* ferr, - double* berr, lapack_complex_double* work, double* rwork, - lapack_int *info ); -void LAPACK_dporfsx( char* uplo, char* equed, lapack_int* n, lapack_int* nrhs, - const double* a, lapack_int* lda, const double* af, - lapack_int* ldaf, const double* s, const double* b, - lapack_int* ldb, double* x, lapack_int* ldx, double* rcond, - double* berr, lapack_int* n_err_bnds, - double* err_bnds_norm, double* err_bnds_comp, - lapack_int* nparams, double* params, double* work, - lapack_int* iwork, lapack_int *info ); -void LAPACK_sporfsx( char* uplo, char* equed, lapack_int* n, lapack_int* nrhs, - const float* a, lapack_int* lda, const float* af, - lapack_int* ldaf, const float* s, const float* b, - lapack_int* ldb, float* x, lapack_int* ldx, float* rcond, - float* berr, lapack_int* n_err_bnds, float* err_bnds_norm, - float* err_bnds_comp, lapack_int* nparams, float* params, - float* work, lapack_int* iwork, lapack_int *info ); -void LAPACK_zporfsx( char* uplo, char* equed, lapack_int* n, lapack_int* nrhs, - const lapack_complex_double* a, lapack_int* lda, - const lapack_complex_double* af, lapack_int* ldaf, - const double* s, const lapack_complex_double* b, - lapack_int* ldb, lapack_complex_double* x, lapack_int* ldx, - double* rcond, double* berr, lapack_int* n_err_bnds, - double* err_bnds_norm, double* err_bnds_comp, - lapack_int* nparams, double* params, - lapack_complex_double* work, double* rwork, - lapack_int *info ); -void LAPACK_cporfsx( char* uplo, char* equed, lapack_int* n, lapack_int* nrhs, - const lapack_complex_float* a, lapack_int* lda, - const lapack_complex_float* af, lapack_int* ldaf, - const float* s, const lapack_complex_float* b, - lapack_int* ldb, lapack_complex_float* x, lapack_int* ldx, - float* rcond, float* berr, lapack_int* n_err_bnds, - float* err_bnds_norm, float* err_bnds_comp, - lapack_int* nparams, float* params, - lapack_complex_float* work, float* rwork, - lapack_int *info ); -void LAPACK_spprfs( char* uplo, lapack_int* n, lapack_int* nrhs, - const float* ap, const float* afp, const float* b, - lapack_int* ldb, float* x, lapack_int* ldx, float* ferr, - float* berr, float* work, lapack_int* iwork, - lapack_int *info ); -void LAPACK_dpprfs( char* uplo, lapack_int* n, lapack_int* nrhs, - const double* ap, const double* afp, const double* b, - lapack_int* ldb, double* x, lapack_int* ldx, double* ferr, - double* berr, double* work, lapack_int* iwork, - lapack_int *info ); -void LAPACK_cpprfs( char* uplo, lapack_int* n, lapack_int* nrhs, - const lapack_complex_float* ap, - const lapack_complex_float* afp, - const lapack_complex_float* b, lapack_int* ldb, - lapack_complex_float* x, lapack_int* ldx, float* ferr, - float* berr, lapack_complex_float* work, float* rwork, - lapack_int *info ); -void LAPACK_zpprfs( char* uplo, lapack_int* n, lapack_int* nrhs, - const lapack_complex_double* ap, - const lapack_complex_double* afp, - const lapack_complex_double* b, lapack_int* ldb, - lapack_complex_double* x, lapack_int* ldx, double* ferr, - double* berr, lapack_complex_double* work, double* rwork, - lapack_int *info ); -void LAPACK_spbrfs( char* uplo, lapack_int* n, lapack_int* kd, lapack_int* nrhs, - const float* ab, lapack_int* ldab, const float* afb, - lapack_int* ldafb, const float* b, lapack_int* ldb, - float* x, lapack_int* ldx, float* ferr, float* berr, - float* work, lapack_int* iwork, lapack_int *info ); -void LAPACK_dpbrfs( char* uplo, lapack_int* n, lapack_int* kd, lapack_int* nrhs, - const double* ab, lapack_int* ldab, const double* afb, - lapack_int* ldafb, const double* b, lapack_int* ldb, - double* x, lapack_int* ldx, double* ferr, double* berr, - double* work, lapack_int* iwork, lapack_int *info ); -void LAPACK_cpbrfs( char* uplo, lapack_int* n, lapack_int* kd, lapack_int* nrhs, - const lapack_complex_float* ab, lapack_int* ldab, - const lapack_complex_float* afb, lapack_int* ldafb, - const lapack_complex_float* b, lapack_int* ldb, - lapack_complex_float* x, lapack_int* ldx, float* ferr, - float* berr, lapack_complex_float* work, float* rwork, - lapack_int *info ); -void LAPACK_zpbrfs( char* uplo, lapack_int* n, lapack_int* kd, lapack_int* nrhs, - const lapack_complex_double* ab, lapack_int* ldab, - const lapack_complex_double* afb, lapack_int* ldafb, - const lapack_complex_double* b, lapack_int* ldb, - lapack_complex_double* x, lapack_int* ldx, double* ferr, - double* berr, lapack_complex_double* work, double* rwork, - lapack_int *info ); -void LAPACK_sptrfs( lapack_int* n, lapack_int* nrhs, const float* d, - const float* e, const float* df, const float* ef, - const float* b, lapack_int* ldb, float* x, lapack_int* ldx, - float* ferr, float* berr, float* work, lapack_int *info ); -void LAPACK_dptrfs( lapack_int* n, lapack_int* nrhs, const double* d, - const double* e, const double* df, const double* ef, - const double* b, lapack_int* ldb, double* x, - lapack_int* ldx, double* ferr, double* berr, double* work, - lapack_int *info ); -void LAPACK_cptrfs( char* uplo, lapack_int* n, lapack_int* nrhs, const float* d, - const lapack_complex_float* e, const float* df, - const lapack_complex_float* ef, - const lapack_complex_float* b, lapack_int* ldb, - lapack_complex_float* x, lapack_int* ldx, float* ferr, - float* berr, lapack_complex_float* work, float* rwork, - lapack_int *info ); -void LAPACK_zptrfs( char* uplo, lapack_int* n, lapack_int* nrhs, - const double* d, const lapack_complex_double* e, - const double* df, const lapack_complex_double* ef, - const lapack_complex_double* b, lapack_int* ldb, - lapack_complex_double* x, lapack_int* ldx, double* ferr, - double* berr, lapack_complex_double* work, double* rwork, - lapack_int *info ); -void LAPACK_ssyrfs( char* uplo, lapack_int* n, lapack_int* nrhs, const float* a, - lapack_int* lda, const float* af, lapack_int* ldaf, - const lapack_int* ipiv, const float* b, lapack_int* ldb, - float* x, lapack_int* ldx, float* ferr, float* berr, - float* work, lapack_int* iwork, lapack_int *info ); -void LAPACK_dsyrfs( char* uplo, lapack_int* n, lapack_int* nrhs, - const double* a, lapack_int* lda, const double* af, - lapack_int* ldaf, const lapack_int* ipiv, const double* b, - lapack_int* ldb, double* x, lapack_int* ldx, double* ferr, - double* berr, double* work, lapack_int* iwork, - lapack_int *info ); -void LAPACK_csyrfs( char* uplo, lapack_int* n, lapack_int* nrhs, - const lapack_complex_float* a, lapack_int* lda, - const lapack_complex_float* af, lapack_int* ldaf, - const lapack_int* ipiv, const lapack_complex_float* b, - lapack_int* ldb, lapack_complex_float* x, lapack_int* ldx, - float* ferr, float* berr, lapack_complex_float* work, - float* rwork, lapack_int *info ); -void LAPACK_zsyrfs( char* uplo, lapack_int* n, lapack_int* nrhs, - const lapack_complex_double* a, lapack_int* lda, - const lapack_complex_double* af, lapack_int* ldaf, - const lapack_int* ipiv, const lapack_complex_double* b, - lapack_int* ldb, lapack_complex_double* x, lapack_int* ldx, - double* ferr, double* berr, lapack_complex_double* work, - double* rwork, lapack_int *info ); -void LAPACK_dsyrfsx( char* uplo, char* equed, lapack_int* n, lapack_int* nrhs, - const double* a, lapack_int* lda, const double* af, - lapack_int* ldaf, const lapack_int* ipiv, const double* s, - const double* b, lapack_int* ldb, double* x, - lapack_int* ldx, double* rcond, double* berr, - lapack_int* n_err_bnds, double* err_bnds_norm, - double* err_bnds_comp, lapack_int* nparams, double* params, - double* work, lapack_int* iwork, lapack_int *info ); -void LAPACK_ssyrfsx( char* uplo, char* equed, lapack_int* n, lapack_int* nrhs, - const float* a, lapack_int* lda, const float* af, - lapack_int* ldaf, const lapack_int* ipiv, const float* s, - const float* b, lapack_int* ldb, float* x, lapack_int* ldx, - float* rcond, float* berr, lapack_int* n_err_bnds, - float* err_bnds_norm, float* err_bnds_comp, - lapack_int* nparams, float* params, float* work, - lapack_int* iwork, lapack_int *info ); -void LAPACK_zsyrfsx( char* uplo, char* equed, lapack_int* n, lapack_int* nrhs, - const lapack_complex_double* a, lapack_int* lda, - const lapack_complex_double* af, lapack_int* ldaf, - const lapack_int* ipiv, const double* s, - const lapack_complex_double* b, lapack_int* ldb, - lapack_complex_double* x, lapack_int* ldx, double* rcond, - double* berr, lapack_int* n_err_bnds, - double* err_bnds_norm, double* err_bnds_comp, - lapack_int* nparams, double* params, - lapack_complex_double* work, double* rwork, - lapack_int *info ); -void LAPACK_csyrfsx( char* uplo, char* equed, lapack_int* n, lapack_int* nrhs, - const lapack_complex_float* a, lapack_int* lda, - const lapack_complex_float* af, lapack_int* ldaf, - const lapack_int* ipiv, const float* s, - const lapack_complex_float* b, lapack_int* ldb, - lapack_complex_float* x, lapack_int* ldx, float* rcond, - float* berr, lapack_int* n_err_bnds, float* err_bnds_norm, - float* err_bnds_comp, lapack_int* nparams, float* params, - lapack_complex_float* work, float* rwork, - lapack_int *info ); -void LAPACK_cherfs( char* uplo, lapack_int* n, lapack_int* nrhs, - const lapack_complex_float* a, lapack_int* lda, - const lapack_complex_float* af, lapack_int* ldaf, - const lapack_int* ipiv, const lapack_complex_float* b, - lapack_int* ldb, lapack_complex_float* x, lapack_int* ldx, - float* ferr, float* berr, lapack_complex_float* work, - float* rwork, lapack_int *info ); -void LAPACK_zherfs( char* uplo, lapack_int* n, lapack_int* nrhs, - const lapack_complex_double* a, lapack_int* lda, - const lapack_complex_double* af, lapack_int* ldaf, - const lapack_int* ipiv, const lapack_complex_double* b, - lapack_int* ldb, lapack_complex_double* x, lapack_int* ldx, - double* ferr, double* berr, lapack_complex_double* work, - double* rwork, lapack_int *info ); -void LAPACK_zherfsx( char* uplo, char* equed, lapack_int* n, lapack_int* nrhs, - const lapack_complex_double* a, lapack_int* lda, - const lapack_complex_double* af, lapack_int* ldaf, - const lapack_int* ipiv, const double* s, - const lapack_complex_double* b, lapack_int* ldb, - lapack_complex_double* x, lapack_int* ldx, double* rcond, - double* berr, lapack_int* n_err_bnds, - double* err_bnds_norm, double* err_bnds_comp, - lapack_int* nparams, double* params, - lapack_complex_double* work, double* rwork, - lapack_int *info ); -void LAPACK_cherfsx( char* uplo, char* equed, lapack_int* n, lapack_int* nrhs, - const lapack_complex_float* a, lapack_int* lda, - const lapack_complex_float* af, lapack_int* ldaf, - const lapack_int* ipiv, const float* s, - const lapack_complex_float* b, lapack_int* ldb, - lapack_complex_float* x, lapack_int* ldx, float* rcond, - float* berr, lapack_int* n_err_bnds, float* err_bnds_norm, - float* err_bnds_comp, lapack_int* nparams, float* params, - lapack_complex_float* work, float* rwork, - lapack_int *info ); -void LAPACK_ssprfs( char* uplo, lapack_int* n, lapack_int* nrhs, - const float* ap, const float* afp, const lapack_int* ipiv, - const float* b, lapack_int* ldb, float* x, lapack_int* ldx, - float* ferr, float* berr, float* work, lapack_int* iwork, - lapack_int *info ); -void LAPACK_dsprfs( char* uplo, lapack_int* n, lapack_int* nrhs, - const double* ap, const double* afp, const lapack_int* ipiv, - const double* b, lapack_int* ldb, double* x, - lapack_int* ldx, double* ferr, double* berr, double* work, - lapack_int* iwork, lapack_int *info ); -void LAPACK_csprfs( char* uplo, lapack_int* n, lapack_int* nrhs, - const lapack_complex_float* ap, - const lapack_complex_float* afp, const lapack_int* ipiv, - const lapack_complex_float* b, lapack_int* ldb, - lapack_complex_float* x, lapack_int* ldx, float* ferr, - float* berr, lapack_complex_float* work, float* rwork, - lapack_int *info ); -void LAPACK_zsprfs( char* uplo, lapack_int* n, lapack_int* nrhs, - const lapack_complex_double* ap, - const lapack_complex_double* afp, const lapack_int* ipiv, - const lapack_complex_double* b, lapack_int* ldb, - lapack_complex_double* x, lapack_int* ldx, double* ferr, - double* berr, lapack_complex_double* work, double* rwork, - lapack_int *info ); -void LAPACK_chprfs( char* uplo, lapack_int* n, lapack_int* nrhs, - const lapack_complex_float* ap, - const lapack_complex_float* afp, const lapack_int* ipiv, - const lapack_complex_float* b, lapack_int* ldb, - lapack_complex_float* x, lapack_int* ldx, float* ferr, - float* berr, lapack_complex_float* work, float* rwork, - lapack_int *info ); -void LAPACK_zhprfs( char* uplo, lapack_int* n, lapack_int* nrhs, - const lapack_complex_double* ap, - const lapack_complex_double* afp, const lapack_int* ipiv, - const lapack_complex_double* b, lapack_int* ldb, - lapack_complex_double* x, lapack_int* ldx, double* ferr, - double* berr, lapack_complex_double* work, double* rwork, - lapack_int *info ); -void LAPACK_strrfs( char* uplo, char* trans, char* diag, lapack_int* n, - lapack_int* nrhs, const float* a, lapack_int* lda, - const float* b, lapack_int* ldb, const float* x, - lapack_int* ldx, float* ferr, float* berr, float* work, - lapack_int* iwork, lapack_int *info ); -void LAPACK_dtrrfs( char* uplo, char* trans, char* diag, lapack_int* n, - lapack_int* nrhs, const double* a, lapack_int* lda, - const double* b, lapack_int* ldb, const double* x, - lapack_int* ldx, double* ferr, double* berr, double* work, - lapack_int* iwork, lapack_int *info ); -void LAPACK_ctrrfs( char* uplo, char* trans, char* diag, lapack_int* n, - lapack_int* nrhs, const lapack_complex_float* a, - lapack_int* lda, const lapack_complex_float* b, - lapack_int* ldb, const lapack_complex_float* x, - lapack_int* ldx, float* ferr, float* berr, - lapack_complex_float* work, float* rwork, - lapack_int *info ); -void LAPACK_ztrrfs( char* uplo, char* trans, char* diag, lapack_int* n, - lapack_int* nrhs, const lapack_complex_double* a, - lapack_int* lda, const lapack_complex_double* b, - lapack_int* ldb, const lapack_complex_double* x, - lapack_int* ldx, double* ferr, double* berr, - lapack_complex_double* work, double* rwork, - lapack_int *info ); -void LAPACK_stprfs( char* uplo, char* trans, char* diag, lapack_int* n, - lapack_int* nrhs, const float* ap, const float* b, - lapack_int* ldb, const float* x, lapack_int* ldx, - float* ferr, float* berr, float* work, lapack_int* iwork, - lapack_int *info ); -void LAPACK_dtprfs( char* uplo, char* trans, char* diag, lapack_int* n, - lapack_int* nrhs, const double* ap, const double* b, - lapack_int* ldb, const double* x, lapack_int* ldx, - double* ferr, double* berr, double* work, lapack_int* iwork, - lapack_int *info ); -void LAPACK_ctprfs( char* uplo, char* trans, char* diag, lapack_int* n, - lapack_int* nrhs, const lapack_complex_float* ap, - const lapack_complex_float* b, lapack_int* ldb, - const lapack_complex_float* x, lapack_int* ldx, float* ferr, - float* berr, lapack_complex_float* work, float* rwork, - lapack_int *info ); -void LAPACK_ztprfs( char* uplo, char* trans, char* diag, lapack_int* n, - lapack_int* nrhs, const lapack_complex_double* ap, - const lapack_complex_double* b, lapack_int* ldb, - const lapack_complex_double* x, lapack_int* ldx, - double* ferr, double* berr, lapack_complex_double* work, - double* rwork, lapack_int *info ); -void LAPACK_stbrfs( char* uplo, char* trans, char* diag, lapack_int* n, - lapack_int* kd, lapack_int* nrhs, const float* ab, - lapack_int* ldab, const float* b, lapack_int* ldb, - const float* x, lapack_int* ldx, float* ferr, float* berr, - float* work, lapack_int* iwork, lapack_int *info ); -void LAPACK_dtbrfs( char* uplo, char* trans, char* diag, lapack_int* n, - lapack_int* kd, lapack_int* nrhs, const double* ab, - lapack_int* ldab, const double* b, lapack_int* ldb, - const double* x, lapack_int* ldx, double* ferr, - double* berr, double* work, lapack_int* iwork, - lapack_int *info ); -void LAPACK_ctbrfs( char* uplo, char* trans, char* diag, lapack_int* n, - lapack_int* kd, lapack_int* nrhs, - const lapack_complex_float* ab, lapack_int* ldab, - const lapack_complex_float* b, lapack_int* ldb, - const lapack_complex_float* x, lapack_int* ldx, float* ferr, - float* berr, lapack_complex_float* work, float* rwork, - lapack_int *info ); -void LAPACK_ztbrfs( char* uplo, char* trans, char* diag, lapack_int* n, - lapack_int* kd, lapack_int* nrhs, - const lapack_complex_double* ab, lapack_int* ldab, - const lapack_complex_double* b, lapack_int* ldb, - const lapack_complex_double* x, lapack_int* ldx, - double* ferr, double* berr, lapack_complex_double* work, - double* rwork, lapack_int *info ); -void LAPACK_sgetri( lapack_int* n, float* a, lapack_int* lda, - const lapack_int* ipiv, float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_dgetri( lapack_int* n, double* a, lapack_int* lda, - const lapack_int* ipiv, double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_cgetri( lapack_int* n, lapack_complex_float* a, lapack_int* lda, - const lapack_int* ipiv, lapack_complex_float* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_zgetri( lapack_int* n, lapack_complex_double* a, lapack_int* lda, - const lapack_int* ipiv, lapack_complex_double* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_spotri( char* uplo, lapack_int* n, float* a, lapack_int* lda, - lapack_int *info ); -void LAPACK_dpotri( char* uplo, lapack_int* n, double* a, lapack_int* lda, - lapack_int *info ); -void LAPACK_cpotri( char* uplo, lapack_int* n, lapack_complex_float* a, - lapack_int* lda, lapack_int *info ); -void LAPACK_zpotri( char* uplo, lapack_int* n, lapack_complex_double* a, - lapack_int* lda, lapack_int *info ); -void LAPACK_dpftri( char* transr, char* uplo, lapack_int* n, double* a, - lapack_int *info ); -void LAPACK_spftri( char* transr, char* uplo, lapack_int* n, float* a, - lapack_int *info ); -void LAPACK_zpftri( char* transr, char* uplo, lapack_int* n, - lapack_complex_double* a, lapack_int *info ); -void LAPACK_cpftri( char* transr, char* uplo, lapack_int* n, - lapack_complex_float* a, lapack_int *info ); -void LAPACK_spptri( char* uplo, lapack_int* n, float* ap, lapack_int *info ); -void LAPACK_dpptri( char* uplo, lapack_int* n, double* ap, lapack_int *info ); -void LAPACK_cpptri( char* uplo, lapack_int* n, lapack_complex_float* ap, - lapack_int *info ); -void LAPACK_zpptri( char* uplo, lapack_int* n, lapack_complex_double* ap, - lapack_int *info ); -void LAPACK_ssytri( char* uplo, lapack_int* n, float* a, lapack_int* lda, - const lapack_int* ipiv, float* work, lapack_int *info ); -void LAPACK_dsytri( char* uplo, lapack_int* n, double* a, lapack_int* lda, - const lapack_int* ipiv, double* work, lapack_int *info ); -void LAPACK_csytri( char* uplo, lapack_int* n, lapack_complex_float* a, - lapack_int* lda, const lapack_int* ipiv, - lapack_complex_float* work, lapack_int *info ); -void LAPACK_zsytri( char* uplo, lapack_int* n, lapack_complex_double* a, - lapack_int* lda, const lapack_int* ipiv, - lapack_complex_double* work, lapack_int *info ); -void LAPACK_chetri( char* uplo, lapack_int* n, lapack_complex_float* a, - lapack_int* lda, const lapack_int* ipiv, - lapack_complex_float* work, lapack_int *info ); -void LAPACK_zhetri( char* uplo, lapack_int* n, lapack_complex_double* a, - lapack_int* lda, const lapack_int* ipiv, - lapack_complex_double* work, lapack_int *info ); -void LAPACK_ssptri( char* uplo, lapack_int* n, float* ap, - const lapack_int* ipiv, float* work, lapack_int *info ); -void LAPACK_dsptri( char* uplo, lapack_int* n, double* ap, - const lapack_int* ipiv, double* work, lapack_int *info ); -void LAPACK_csptri( char* uplo, lapack_int* n, lapack_complex_float* ap, - const lapack_int* ipiv, lapack_complex_float* work, - lapack_int *info ); -void LAPACK_zsptri( char* uplo, lapack_int* n, lapack_complex_double* ap, - const lapack_int* ipiv, lapack_complex_double* work, - lapack_int *info ); -void LAPACK_chptri( char* uplo, lapack_int* n, lapack_complex_float* ap, - const lapack_int* ipiv, lapack_complex_float* work, - lapack_int *info ); -void LAPACK_zhptri( char* uplo, lapack_int* n, lapack_complex_double* ap, - const lapack_int* ipiv, lapack_complex_double* work, - lapack_int *info ); -void LAPACK_strtri( char* uplo, char* diag, lapack_int* n, float* a, - lapack_int* lda, lapack_int *info ); -void LAPACK_dtrtri( char* uplo, char* diag, lapack_int* n, double* a, - lapack_int* lda, lapack_int *info ); -void LAPACK_ctrtri( char* uplo, char* diag, lapack_int* n, - lapack_complex_float* a, lapack_int* lda, - lapack_int *info ); -void LAPACK_ztrtri( char* uplo, char* diag, lapack_int* n, - lapack_complex_double* a, lapack_int* lda, - lapack_int *info ); -void LAPACK_dtftri( char* transr, char* uplo, char* diag, lapack_int* n, - double* a, lapack_int *info ); -void LAPACK_stftri( char* transr, char* uplo, char* diag, lapack_int* n, - float* a, lapack_int *info ); -void LAPACK_ztftri( char* transr, char* uplo, char* diag, lapack_int* n, - lapack_complex_double* a, lapack_int *info ); -void LAPACK_ctftri( char* transr, char* uplo, char* diag, lapack_int* n, - lapack_complex_float* a, lapack_int *info ); -void LAPACK_stptri( char* uplo, char* diag, lapack_int* n, float* ap, - lapack_int *info ); -void LAPACK_dtptri( char* uplo, char* diag, lapack_int* n, double* ap, - lapack_int *info ); -void LAPACK_ctptri( char* uplo, char* diag, lapack_int* n, - lapack_complex_float* ap, lapack_int *info ); -void LAPACK_ztptri( char* uplo, char* diag, lapack_int* n, - lapack_complex_double* ap, lapack_int *info ); -void LAPACK_sgeequ( lapack_int* m, lapack_int* n, const float* a, - lapack_int* lda, float* r, float* c, float* rowcnd, - float* colcnd, float* amax, lapack_int *info ); -void LAPACK_dgeequ( lapack_int* m, lapack_int* n, const double* a, - lapack_int* lda, double* r, double* c, double* rowcnd, - double* colcnd, double* amax, lapack_int *info ); -void LAPACK_cgeequ( lapack_int* m, lapack_int* n, const lapack_complex_float* a, - lapack_int* lda, float* r, float* c, float* rowcnd, - float* colcnd, float* amax, lapack_int *info ); -void LAPACK_zgeequ( lapack_int* m, lapack_int* n, - const lapack_complex_double* a, lapack_int* lda, double* r, - double* c, double* rowcnd, double* colcnd, double* amax, - lapack_int *info ); -void LAPACK_dgeequb( lapack_int* m, lapack_int* n, const double* a, - lapack_int* lda, double* r, double* c, double* rowcnd, - double* colcnd, double* amax, lapack_int *info ); -void LAPACK_sgeequb( lapack_int* m, lapack_int* n, const float* a, - lapack_int* lda, float* r, float* c, float* rowcnd, - float* colcnd, float* amax, lapack_int *info ); -void LAPACK_zgeequb( lapack_int* m, lapack_int* n, - const lapack_complex_double* a, lapack_int* lda, double* r, - double* c, double* rowcnd, double* colcnd, double* amax, - lapack_int *info ); -void LAPACK_cgeequb( lapack_int* m, lapack_int* n, - const lapack_complex_float* a, lapack_int* lda, float* r, - float* c, float* rowcnd, float* colcnd, float* amax, - lapack_int *info ); -void LAPACK_sgbequ( lapack_int* m, lapack_int* n, lapack_int* kl, - lapack_int* ku, const float* ab, lapack_int* ldab, float* r, - float* c, float* rowcnd, float* colcnd, float* amax, - lapack_int *info ); -void LAPACK_dgbequ( lapack_int* m, lapack_int* n, lapack_int* kl, - lapack_int* ku, const double* ab, lapack_int* ldab, - double* r, double* c, double* rowcnd, double* colcnd, - double* amax, lapack_int *info ); -void LAPACK_cgbequ( lapack_int* m, lapack_int* n, lapack_int* kl, - lapack_int* ku, const lapack_complex_float* ab, - lapack_int* ldab, float* r, float* c, float* rowcnd, - float* colcnd, float* amax, lapack_int *info ); -void LAPACK_zgbequ( lapack_int* m, lapack_int* n, lapack_int* kl, - lapack_int* ku, const lapack_complex_double* ab, - lapack_int* ldab, double* r, double* c, double* rowcnd, - double* colcnd, double* amax, lapack_int *info ); -void LAPACK_dgbequb( lapack_int* m, lapack_int* n, lapack_int* kl, - lapack_int* ku, const double* ab, lapack_int* ldab, - double* r, double* c, double* rowcnd, double* colcnd, - double* amax, lapack_int *info ); -void LAPACK_sgbequb( lapack_int* m, lapack_int* n, lapack_int* kl, - lapack_int* ku, const float* ab, lapack_int* ldab, - float* r, float* c, float* rowcnd, float* colcnd, - float* amax, lapack_int *info ); -void LAPACK_zgbequb( lapack_int* m, lapack_int* n, lapack_int* kl, - lapack_int* ku, const lapack_complex_double* ab, - lapack_int* ldab, double* r, double* c, double* rowcnd, - double* colcnd, double* amax, lapack_int *info ); -void LAPACK_cgbequb( lapack_int* m, lapack_int* n, lapack_int* kl, - lapack_int* ku, const lapack_complex_float* ab, - lapack_int* ldab, float* r, float* c, float* rowcnd, - float* colcnd, float* amax, lapack_int *info ); -void LAPACK_spoequ( lapack_int* n, const float* a, lapack_int* lda, float* s, - float* scond, float* amax, lapack_int *info ); -void LAPACK_dpoequ( lapack_int* n, const double* a, lapack_int* lda, double* s, - double* scond, double* amax, lapack_int *info ); -void LAPACK_cpoequ( lapack_int* n, const lapack_complex_float* a, - lapack_int* lda, float* s, float* scond, float* amax, - lapack_int *info ); -void LAPACK_zpoequ( lapack_int* n, const lapack_complex_double* a, - lapack_int* lda, double* s, double* scond, double* amax, - lapack_int *info ); -void LAPACK_dpoequb( lapack_int* n, const double* a, lapack_int* lda, double* s, - double* scond, double* amax, lapack_int *info ); -void LAPACK_spoequb( lapack_int* n, const float* a, lapack_int* lda, float* s, - float* scond, float* amax, lapack_int *info ); -void LAPACK_zpoequb( lapack_int* n, const lapack_complex_double* a, - lapack_int* lda, double* s, double* scond, double* amax, - lapack_int *info ); -void LAPACK_cpoequb( lapack_int* n, const lapack_complex_float* a, - lapack_int* lda, float* s, float* scond, float* amax, - lapack_int *info ); -void LAPACK_sppequ( char* uplo, lapack_int* n, const float* ap, float* s, - float* scond, float* amax, lapack_int *info ); -void LAPACK_dppequ( char* uplo, lapack_int* n, const double* ap, double* s, - double* scond, double* amax, lapack_int *info ); -void LAPACK_cppequ( char* uplo, lapack_int* n, const lapack_complex_float* ap, - float* s, float* scond, float* amax, lapack_int *info ); -void LAPACK_zppequ( char* uplo, lapack_int* n, const lapack_complex_double* ap, - double* s, double* scond, double* amax, lapack_int *info ); -void LAPACK_spbequ( char* uplo, lapack_int* n, lapack_int* kd, const float* ab, - lapack_int* ldab, float* s, float* scond, float* amax, - lapack_int *info ); -void LAPACK_dpbequ( char* uplo, lapack_int* n, lapack_int* kd, const double* ab, - lapack_int* ldab, double* s, double* scond, double* amax, - lapack_int *info ); -void LAPACK_cpbequ( char* uplo, lapack_int* n, lapack_int* kd, - const lapack_complex_float* ab, lapack_int* ldab, float* s, - float* scond, float* amax, lapack_int *info ); -void LAPACK_zpbequ( char* uplo, lapack_int* n, lapack_int* kd, - const lapack_complex_double* ab, lapack_int* ldab, - double* s, double* scond, double* amax, lapack_int *info ); -void LAPACK_dsyequb( char* uplo, lapack_int* n, const double* a, - lapack_int* lda, double* s, double* scond, double* amax, - double* work, lapack_int *info ); -void LAPACK_ssyequb( char* uplo, lapack_int* n, const float* a, lapack_int* lda, - float* s, float* scond, float* amax, float* work, - lapack_int *info ); -void LAPACK_zsyequb( char* uplo, lapack_int* n, const lapack_complex_double* a, - lapack_int* lda, double* s, double* scond, double* amax, - lapack_complex_double* work, lapack_int *info ); -void LAPACK_csyequb( char* uplo, lapack_int* n, const lapack_complex_float* a, - lapack_int* lda, float* s, float* scond, float* amax, - lapack_complex_float* work, lapack_int *info ); -void LAPACK_zheequb( char* uplo, lapack_int* n, const lapack_complex_double* a, - lapack_int* lda, double* s, double* scond, double* amax, - lapack_complex_double* work, lapack_int *info ); -void LAPACK_cheequb( char* uplo, lapack_int* n, const lapack_complex_float* a, - lapack_int* lda, float* s, float* scond, float* amax, - lapack_complex_float* work, lapack_int *info ); -void LAPACK_sgesv( lapack_int* n, lapack_int* nrhs, float* a, lapack_int* lda, - lapack_int* ipiv, float* b, lapack_int* ldb, - lapack_int *info ); -void LAPACK_dgesv( lapack_int* n, lapack_int* nrhs, double* a, lapack_int* lda, - lapack_int* ipiv, double* b, lapack_int* ldb, - lapack_int *info ); -void LAPACK_cgesv( lapack_int* n, lapack_int* nrhs, lapack_complex_float* a, - lapack_int* lda, lapack_int* ipiv, lapack_complex_float* b, - lapack_int* ldb, lapack_int *info ); -void LAPACK_zgesv( lapack_int* n, lapack_int* nrhs, lapack_complex_double* a, - lapack_int* lda, lapack_int* ipiv, lapack_complex_double* b, - lapack_int* ldb, lapack_int *info ); -void LAPACK_dsgesv( lapack_int* n, lapack_int* nrhs, double* a, lapack_int* lda, - lapack_int* ipiv, double* b, lapack_int* ldb, double* x, - lapack_int* ldx, double* work, float* swork, - lapack_int* iter, lapack_int *info ); -void LAPACK_zcgesv( lapack_int* n, lapack_int* nrhs, lapack_complex_double* a, - lapack_int* lda, lapack_int* ipiv, lapack_complex_double* b, - lapack_int* ldb, lapack_complex_double* x, lapack_int* ldx, - lapack_complex_double* work, lapack_complex_float* swork, - double* rwork, lapack_int* iter, lapack_int *info ); -void LAPACK_sgesvx( char* fact, char* trans, lapack_int* n, lapack_int* nrhs, - float* a, lapack_int* lda, float* af, lapack_int* ldaf, - lapack_int* ipiv, char* equed, float* r, float* c, float* b, - lapack_int* ldb, float* x, lapack_int* ldx, float* rcond, - float* ferr, float* berr, float* work, lapack_int* iwork, - lapack_int *info ); -void LAPACK_dgesvx( char* fact, char* trans, lapack_int* n, lapack_int* nrhs, - double* a, lapack_int* lda, double* af, lapack_int* ldaf, - lapack_int* ipiv, char* equed, double* r, double* c, - double* b, lapack_int* ldb, double* x, lapack_int* ldx, - double* rcond, double* ferr, double* berr, double* work, - lapack_int* iwork, lapack_int *info ); -void LAPACK_cgesvx( char* fact, char* trans, lapack_int* n, lapack_int* nrhs, - lapack_complex_float* a, lapack_int* lda, - lapack_complex_float* af, lapack_int* ldaf, - lapack_int* ipiv, char* equed, float* r, float* c, - lapack_complex_float* b, lapack_int* ldb, - lapack_complex_float* x, lapack_int* ldx, float* rcond, - float* ferr, float* berr, lapack_complex_float* work, - float* rwork, lapack_int *info ); -void LAPACK_zgesvx( char* fact, char* trans, lapack_int* n, lapack_int* nrhs, - lapack_complex_double* a, lapack_int* lda, - lapack_complex_double* af, lapack_int* ldaf, - lapack_int* ipiv, char* equed, double* r, double* c, - lapack_complex_double* b, lapack_int* ldb, - lapack_complex_double* x, lapack_int* ldx, double* rcond, - double* ferr, double* berr, lapack_complex_double* work, - double* rwork, lapack_int *info ); -void LAPACK_dgesvxx( char* fact, char* trans, lapack_int* n, lapack_int* nrhs, - double* a, lapack_int* lda, double* af, lapack_int* ldaf, - lapack_int* ipiv, char* equed, double* r, double* c, - double* b, lapack_int* ldb, double* x, lapack_int* ldx, - double* rcond, double* rpvgrw, double* berr, - lapack_int* n_err_bnds, double* err_bnds_norm, - double* err_bnds_comp, lapack_int* nparams, double* params, - double* work, lapack_int* iwork, lapack_int *info ); -void LAPACK_sgesvxx( char* fact, char* trans, lapack_int* n, lapack_int* nrhs, - float* a, lapack_int* lda, float* af, lapack_int* ldaf, - lapack_int* ipiv, char* equed, float* r, float* c, - float* b, lapack_int* ldb, float* x, lapack_int* ldx, - float* rcond, float* rpvgrw, float* berr, - lapack_int* n_err_bnds, float* err_bnds_norm, - float* err_bnds_comp, lapack_int* nparams, float* params, - float* work, lapack_int* iwork, lapack_int *info ); -void LAPACK_zgesvxx( char* fact, char* trans, lapack_int* n, lapack_int* nrhs, - lapack_complex_double* a, lapack_int* lda, - lapack_complex_double* af, lapack_int* ldaf, - lapack_int* ipiv, char* equed, double* r, double* c, - lapack_complex_double* b, lapack_int* ldb, - lapack_complex_double* x, lapack_int* ldx, double* rcond, - double* rpvgrw, double* berr, lapack_int* n_err_bnds, - double* err_bnds_norm, double* err_bnds_comp, - lapack_int* nparams, double* params, - lapack_complex_double* work, double* rwork, - lapack_int *info ); -void LAPACK_cgesvxx( char* fact, char* trans, lapack_int* n, lapack_int* nrhs, - lapack_complex_float* a, lapack_int* lda, - lapack_complex_float* af, lapack_int* ldaf, - lapack_int* ipiv, char* equed, float* r, float* c, - lapack_complex_float* b, lapack_int* ldb, - lapack_complex_float* x, lapack_int* ldx, float* rcond, - float* rpvgrw, float* berr, lapack_int* n_err_bnds, - float* err_bnds_norm, float* err_bnds_comp, - lapack_int* nparams, float* params, - lapack_complex_float* work, float* rwork, - lapack_int *info ); -void LAPACK_sgbsv( lapack_int* n, lapack_int* kl, lapack_int* ku, - lapack_int* nrhs, float* ab, lapack_int* ldab, - lapack_int* ipiv, float* b, lapack_int* ldb, - lapack_int *info ); -void LAPACK_dgbsv( lapack_int* n, lapack_int* kl, lapack_int* ku, - lapack_int* nrhs, double* ab, lapack_int* ldab, - lapack_int* ipiv, double* b, lapack_int* ldb, - lapack_int *info ); -void LAPACK_cgbsv( lapack_int* n, lapack_int* kl, lapack_int* ku, - lapack_int* nrhs, lapack_complex_float* ab, lapack_int* ldab, - lapack_int* ipiv, lapack_complex_float* b, lapack_int* ldb, - lapack_int *info ); -void LAPACK_zgbsv( lapack_int* n, lapack_int* kl, lapack_int* ku, - lapack_int* nrhs, lapack_complex_double* ab, - lapack_int* ldab, lapack_int* ipiv, lapack_complex_double* b, - lapack_int* ldb, lapack_int *info ); -void LAPACK_sgbsvx( char* fact, char* trans, lapack_int* n, lapack_int* kl, - lapack_int* ku, lapack_int* nrhs, float* ab, - lapack_int* ldab, float* afb, lapack_int* ldafb, - lapack_int* ipiv, char* equed, float* r, float* c, float* b, - lapack_int* ldb, float* x, lapack_int* ldx, float* rcond, - float* ferr, float* berr, float* work, lapack_int* iwork, - lapack_int *info ); -void LAPACK_dgbsvx( char* fact, char* trans, lapack_int* n, lapack_int* kl, - lapack_int* ku, lapack_int* nrhs, double* ab, - lapack_int* ldab, double* afb, lapack_int* ldafb, - lapack_int* ipiv, char* equed, double* r, double* c, - double* b, lapack_int* ldb, double* x, lapack_int* ldx, - double* rcond, double* ferr, double* berr, double* work, - lapack_int* iwork, lapack_int *info ); -void LAPACK_cgbsvx( char* fact, char* trans, lapack_int* n, lapack_int* kl, - lapack_int* ku, lapack_int* nrhs, lapack_complex_float* ab, - lapack_int* ldab, lapack_complex_float* afb, - lapack_int* ldafb, lapack_int* ipiv, char* equed, float* r, - float* c, lapack_complex_float* b, lapack_int* ldb, - lapack_complex_float* x, lapack_int* ldx, float* rcond, - float* ferr, float* berr, lapack_complex_float* work, - float* rwork, lapack_int *info ); -void LAPACK_zgbsvx( char* fact, char* trans, lapack_int* n, lapack_int* kl, - lapack_int* ku, lapack_int* nrhs, lapack_complex_double* ab, - lapack_int* ldab, lapack_complex_double* afb, - lapack_int* ldafb, lapack_int* ipiv, char* equed, double* r, - double* c, lapack_complex_double* b, lapack_int* ldb, - lapack_complex_double* x, lapack_int* ldx, double* rcond, - double* ferr, double* berr, lapack_complex_double* work, - double* rwork, lapack_int *info ); -void LAPACK_dgbsvxx( char* fact, char* trans, lapack_int* n, lapack_int* kl, - lapack_int* ku, lapack_int* nrhs, double* ab, - lapack_int* ldab, double* afb, lapack_int* ldafb, - lapack_int* ipiv, char* equed, double* r, double* c, - double* b, lapack_int* ldb, double* x, lapack_int* ldx, - double* rcond, double* rpvgrw, double* berr, - lapack_int* n_err_bnds, double* err_bnds_norm, - double* err_bnds_comp, lapack_int* nparams, double* params, - double* work, lapack_int* iwork, lapack_int *info ); -void LAPACK_sgbsvxx( char* fact, char* trans, lapack_int* n, lapack_int* kl, - lapack_int* ku, lapack_int* nrhs, float* ab, - lapack_int* ldab, float* afb, lapack_int* ldafb, - lapack_int* ipiv, char* equed, float* r, float* c, - float* b, lapack_int* ldb, float* x, lapack_int* ldx, - float* rcond, float* rpvgrw, float* berr, - lapack_int* n_err_bnds, float* err_bnds_norm, - float* err_bnds_comp, lapack_int* nparams, float* params, - float* work, lapack_int* iwork, lapack_int *info ); -void LAPACK_zgbsvxx( char* fact, char* trans, lapack_int* n, lapack_int* kl, - lapack_int* ku, lapack_int* nrhs, - lapack_complex_double* ab, lapack_int* ldab, - lapack_complex_double* afb, lapack_int* ldafb, - lapack_int* ipiv, char* equed, double* r, double* c, - lapack_complex_double* b, lapack_int* ldb, - lapack_complex_double* x, lapack_int* ldx, double* rcond, - double* rpvgrw, double* berr, lapack_int* n_err_bnds, - double* err_bnds_norm, double* err_bnds_comp, - lapack_int* nparams, double* params, - lapack_complex_double* work, double* rwork, - lapack_int *info ); -void LAPACK_cgbsvxx( char* fact, char* trans, lapack_int* n, lapack_int* kl, - lapack_int* ku, lapack_int* nrhs, lapack_complex_float* ab, - lapack_int* ldab, lapack_complex_float* afb, - lapack_int* ldafb, lapack_int* ipiv, char* equed, float* r, - float* c, lapack_complex_float* b, lapack_int* ldb, - lapack_complex_float* x, lapack_int* ldx, float* rcond, - float* rpvgrw, float* berr, lapack_int* n_err_bnds, - float* err_bnds_norm, float* err_bnds_comp, - lapack_int* nparams, float* params, - lapack_complex_float* work, float* rwork, - lapack_int *info ); -void LAPACK_sgtsv( lapack_int* n, lapack_int* nrhs, float* dl, float* d, - float* du, float* b, lapack_int* ldb, lapack_int *info ); -void LAPACK_dgtsv( lapack_int* n, lapack_int* nrhs, double* dl, double* d, - double* du, double* b, lapack_int* ldb, lapack_int *info ); -void LAPACK_cgtsv( lapack_int* n, lapack_int* nrhs, lapack_complex_float* dl, - lapack_complex_float* d, lapack_complex_float* du, - lapack_complex_float* b, lapack_int* ldb, lapack_int *info ); -void LAPACK_zgtsv( lapack_int* n, lapack_int* nrhs, lapack_complex_double* dl, - lapack_complex_double* d, lapack_complex_double* du, - lapack_complex_double* b, lapack_int* ldb, - lapack_int *info ); -void LAPACK_sgtsvx( char* fact, char* trans, lapack_int* n, lapack_int* nrhs, - const float* dl, const float* d, const float* du, - float* dlf, float* df, float* duf, float* du2, - lapack_int* ipiv, const float* b, lapack_int* ldb, float* x, - lapack_int* ldx, float* rcond, float* ferr, float* berr, - float* work, lapack_int* iwork, lapack_int *info ); -void LAPACK_dgtsvx( char* fact, char* trans, lapack_int* n, lapack_int* nrhs, - const double* dl, const double* d, const double* du, - double* dlf, double* df, double* duf, double* du2, - lapack_int* ipiv, const double* b, lapack_int* ldb, - double* x, lapack_int* ldx, double* rcond, double* ferr, - double* berr, double* work, lapack_int* iwork, - lapack_int *info ); -void LAPACK_cgtsvx( char* fact, char* trans, lapack_int* n, lapack_int* nrhs, - const lapack_complex_float* dl, - const lapack_complex_float* d, - const lapack_complex_float* du, lapack_complex_float* dlf, - lapack_complex_float* df, lapack_complex_float* duf, - lapack_complex_float* du2, lapack_int* ipiv, - const lapack_complex_float* b, lapack_int* ldb, - lapack_complex_float* x, lapack_int* ldx, float* rcond, - float* ferr, float* berr, lapack_complex_float* work, - float* rwork, lapack_int *info ); -void LAPACK_zgtsvx( char* fact, char* trans, lapack_int* n, lapack_int* nrhs, - const lapack_complex_double* dl, - const lapack_complex_double* d, - const lapack_complex_double* du, lapack_complex_double* dlf, - lapack_complex_double* df, lapack_complex_double* duf, - lapack_complex_double* du2, lapack_int* ipiv, - const lapack_complex_double* b, lapack_int* ldb, - lapack_complex_double* x, lapack_int* ldx, double* rcond, - double* ferr, double* berr, lapack_complex_double* work, - double* rwork, lapack_int *info ); -void LAPACK_sposv( char* uplo, lapack_int* n, lapack_int* nrhs, float* a, - lapack_int* lda, float* b, lapack_int* ldb, - lapack_int *info ); -void LAPACK_dposv( char* uplo, lapack_int* n, lapack_int* nrhs, double* a, - lapack_int* lda, double* b, lapack_int* ldb, - lapack_int *info ); -void LAPACK_cposv( char* uplo, lapack_int* n, lapack_int* nrhs, - lapack_complex_float* a, lapack_int* lda, - lapack_complex_float* b, lapack_int* ldb, lapack_int *info ); -void LAPACK_zposv( char* uplo, lapack_int* n, lapack_int* nrhs, - lapack_complex_double* a, lapack_int* lda, - lapack_complex_double* b, lapack_int* ldb, - lapack_int *info ); -void LAPACK_dsposv( char* uplo, lapack_int* n, lapack_int* nrhs, double* a, - lapack_int* lda, double* b, lapack_int* ldb, double* x, - lapack_int* ldx, double* work, float* swork, - lapack_int* iter, lapack_int *info ); -void LAPACK_zcposv( char* uplo, lapack_int* n, lapack_int* nrhs, - lapack_complex_double* a, lapack_int* lda, - lapack_complex_double* b, lapack_int* ldb, - lapack_complex_double* x, lapack_int* ldx, - lapack_complex_double* work, lapack_complex_float* swork, - double* rwork, lapack_int* iter, lapack_int *info ); -void LAPACK_sposvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs, - float* a, lapack_int* lda, float* af, lapack_int* ldaf, - char* equed, float* s, float* b, lapack_int* ldb, float* x, - lapack_int* ldx, float* rcond, float* ferr, float* berr, - float* work, lapack_int* iwork, lapack_int *info ); -void LAPACK_dposvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs, - double* a, lapack_int* lda, double* af, lapack_int* ldaf, - char* equed, double* s, double* b, lapack_int* ldb, - double* x, lapack_int* ldx, double* rcond, double* ferr, - double* berr, double* work, lapack_int* iwork, - lapack_int *info ); -void LAPACK_cposvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs, - lapack_complex_float* a, lapack_int* lda, - lapack_complex_float* af, lapack_int* ldaf, char* equed, - float* s, lapack_complex_float* b, lapack_int* ldb, - lapack_complex_float* x, lapack_int* ldx, float* rcond, - float* ferr, float* berr, lapack_complex_float* work, - float* rwork, lapack_int *info ); -void LAPACK_zposvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs, - lapack_complex_double* a, lapack_int* lda, - lapack_complex_double* af, lapack_int* ldaf, char* equed, - double* s, lapack_complex_double* b, lapack_int* ldb, - lapack_complex_double* x, lapack_int* ldx, double* rcond, - double* ferr, double* berr, lapack_complex_double* work, - double* rwork, lapack_int *info ); -void LAPACK_dposvxx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs, - double* a, lapack_int* lda, double* af, lapack_int* ldaf, - char* equed, double* s, double* b, lapack_int* ldb, - double* x, lapack_int* ldx, double* rcond, double* rpvgrw, - double* berr, lapack_int* n_err_bnds, - double* err_bnds_norm, double* err_bnds_comp, - lapack_int* nparams, double* params, double* work, - lapack_int* iwork, lapack_int *info ); -void LAPACK_sposvxx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs, - float* a, lapack_int* lda, float* af, lapack_int* ldaf, - char* equed, float* s, float* b, lapack_int* ldb, float* x, - lapack_int* ldx, float* rcond, float* rpvgrw, float* berr, - lapack_int* n_err_bnds, float* err_bnds_norm, - float* err_bnds_comp, lapack_int* nparams, float* params, - float* work, lapack_int* iwork, lapack_int *info ); -void LAPACK_zposvxx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs, - lapack_complex_double* a, lapack_int* lda, - lapack_complex_double* af, lapack_int* ldaf, char* equed, - double* s, lapack_complex_double* b, lapack_int* ldb, - lapack_complex_double* x, lapack_int* ldx, double* rcond, - double* rpvgrw, double* berr, lapack_int* n_err_bnds, - double* err_bnds_norm, double* err_bnds_comp, - lapack_int* nparams, double* params, - lapack_complex_double* work, double* rwork, - lapack_int *info ); -void LAPACK_cposvxx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs, - lapack_complex_float* a, lapack_int* lda, - lapack_complex_float* af, lapack_int* ldaf, char* equed, - float* s, lapack_complex_float* b, lapack_int* ldb, - lapack_complex_float* x, lapack_int* ldx, float* rcond, - float* rpvgrw, float* berr, lapack_int* n_err_bnds, - float* err_bnds_norm, float* err_bnds_comp, - lapack_int* nparams, float* params, - lapack_complex_float* work, float* rwork, - lapack_int *info ); -void LAPACK_sppsv( char* uplo, lapack_int* n, lapack_int* nrhs, float* ap, - float* b, lapack_int* ldb, lapack_int *info ); -void LAPACK_dppsv( char* uplo, lapack_int* n, lapack_int* nrhs, double* ap, - double* b, lapack_int* ldb, lapack_int *info ); -void LAPACK_cppsv( char* uplo, lapack_int* n, lapack_int* nrhs, - lapack_complex_float* ap, lapack_complex_float* b, - lapack_int* ldb, lapack_int *info ); -void LAPACK_zppsv( char* uplo, lapack_int* n, lapack_int* nrhs, - lapack_complex_double* ap, lapack_complex_double* b, - lapack_int* ldb, lapack_int *info ); -void LAPACK_sppsvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs, - float* ap, float* afp, char* equed, float* s, float* b, - lapack_int* ldb, float* x, lapack_int* ldx, float* rcond, - float* ferr, float* berr, float* work, lapack_int* iwork, - lapack_int *info ); -void LAPACK_dppsvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs, - double* ap, double* afp, char* equed, double* s, double* b, - lapack_int* ldb, double* x, lapack_int* ldx, double* rcond, - double* ferr, double* berr, double* work, lapack_int* iwork, - lapack_int *info ); -void LAPACK_cppsvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs, - lapack_complex_float* ap, lapack_complex_float* afp, - char* equed, float* s, lapack_complex_float* b, - lapack_int* ldb, lapack_complex_float* x, lapack_int* ldx, - float* rcond, float* ferr, float* berr, - lapack_complex_float* work, float* rwork, - lapack_int *info ); -void LAPACK_zppsvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs, - lapack_complex_double* ap, lapack_complex_double* afp, - char* equed, double* s, lapack_complex_double* b, - lapack_int* ldb, lapack_complex_double* x, lapack_int* ldx, - double* rcond, double* ferr, double* berr, - lapack_complex_double* work, double* rwork, - lapack_int *info ); -void LAPACK_spbsv( char* uplo, lapack_int* n, lapack_int* kd, lapack_int* nrhs, - float* ab, lapack_int* ldab, float* b, lapack_int* ldb, - lapack_int *info ); -void LAPACK_dpbsv( char* uplo, lapack_int* n, lapack_int* kd, lapack_int* nrhs, - double* ab, lapack_int* ldab, double* b, lapack_int* ldb, - lapack_int *info ); -void LAPACK_cpbsv( char* uplo, lapack_int* n, lapack_int* kd, lapack_int* nrhs, - lapack_complex_float* ab, lapack_int* ldab, - lapack_complex_float* b, lapack_int* ldb, lapack_int *info ); -void LAPACK_zpbsv( char* uplo, lapack_int* n, lapack_int* kd, lapack_int* nrhs, - lapack_complex_double* ab, lapack_int* ldab, - lapack_complex_double* b, lapack_int* ldb, - lapack_int *info ); -void LAPACK_spbsvx( char* fact, char* uplo, lapack_int* n, lapack_int* kd, - lapack_int* nrhs, float* ab, lapack_int* ldab, float* afb, - lapack_int* ldafb, char* equed, float* s, float* b, - lapack_int* ldb, float* x, lapack_int* ldx, float* rcond, - float* ferr, float* berr, float* work, lapack_int* iwork, - lapack_int *info ); -void LAPACK_dpbsvx( char* fact, char* uplo, lapack_int* n, lapack_int* kd, - lapack_int* nrhs, double* ab, lapack_int* ldab, double* afb, - lapack_int* ldafb, char* equed, double* s, double* b, - lapack_int* ldb, double* x, lapack_int* ldx, double* rcond, - double* ferr, double* berr, double* work, lapack_int* iwork, - lapack_int *info ); -void LAPACK_cpbsvx( char* fact, char* uplo, lapack_int* n, lapack_int* kd, - lapack_int* nrhs, lapack_complex_float* ab, - lapack_int* ldab, lapack_complex_float* afb, - lapack_int* ldafb, char* equed, float* s, - lapack_complex_float* b, lapack_int* ldb, - lapack_complex_float* x, lapack_int* ldx, float* rcond, - float* ferr, float* berr, lapack_complex_float* work, - float* rwork, lapack_int *info ); -void LAPACK_zpbsvx( char* fact, char* uplo, lapack_int* n, lapack_int* kd, - lapack_int* nrhs, lapack_complex_double* ab, - lapack_int* ldab, lapack_complex_double* afb, - lapack_int* ldafb, char* equed, double* s, - lapack_complex_double* b, lapack_int* ldb, - lapack_complex_double* x, lapack_int* ldx, double* rcond, - double* ferr, double* berr, lapack_complex_double* work, - double* rwork, lapack_int *info ); -void LAPACK_sptsv( lapack_int* n, lapack_int* nrhs, float* d, float* e, - float* b, lapack_int* ldb, lapack_int *info ); -void LAPACK_dptsv( lapack_int* n, lapack_int* nrhs, double* d, double* e, - double* b, lapack_int* ldb, lapack_int *info ); -void LAPACK_cptsv( lapack_int* n, lapack_int* nrhs, float* d, - lapack_complex_float* e, lapack_complex_float* b, - lapack_int* ldb, lapack_int *info ); -void LAPACK_zptsv( lapack_int* n, lapack_int* nrhs, double* d, - lapack_complex_double* e, lapack_complex_double* b, - lapack_int* ldb, lapack_int *info ); -void LAPACK_sptsvx( char* fact, lapack_int* n, lapack_int* nrhs, const float* d, - const float* e, float* df, float* ef, const float* b, - lapack_int* ldb, float* x, lapack_int* ldx, float* rcond, - float* ferr, float* berr, float* work, lapack_int *info ); -void LAPACK_dptsvx( char* fact, lapack_int* n, lapack_int* nrhs, - const double* d, const double* e, double* df, double* ef, - const double* b, lapack_int* ldb, double* x, - lapack_int* ldx, double* rcond, double* ferr, double* berr, - double* work, lapack_int *info ); -void LAPACK_cptsvx( char* fact, lapack_int* n, lapack_int* nrhs, const float* d, - const lapack_complex_float* e, float* df, - lapack_complex_float* ef, const lapack_complex_float* b, - lapack_int* ldb, lapack_complex_float* x, lapack_int* ldx, - float* rcond, float* ferr, float* berr, - lapack_complex_float* work, float* rwork, - lapack_int *info ); -void LAPACK_zptsvx( char* fact, lapack_int* n, lapack_int* nrhs, - const double* d, const lapack_complex_double* e, double* df, - lapack_complex_double* ef, const lapack_complex_double* b, - lapack_int* ldb, lapack_complex_double* x, lapack_int* ldx, - double* rcond, double* ferr, double* berr, - lapack_complex_double* work, double* rwork, - lapack_int *info ); -void LAPACK_ssysv( char* uplo, lapack_int* n, lapack_int* nrhs, float* a, - lapack_int* lda, lapack_int* ipiv, float* b, lapack_int* ldb, - float* work, lapack_int* lwork, lapack_int *info ); -void LAPACK_dsysv( char* uplo, lapack_int* n, lapack_int* nrhs, double* a, - lapack_int* lda, lapack_int* ipiv, double* b, - lapack_int* ldb, double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_csysv( char* uplo, lapack_int* n, lapack_int* nrhs, - lapack_complex_float* a, lapack_int* lda, lapack_int* ipiv, - lapack_complex_float* b, lapack_int* ldb, - lapack_complex_float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_zsysv( char* uplo, lapack_int* n, lapack_int* nrhs, - lapack_complex_double* a, lapack_int* lda, lapack_int* ipiv, - lapack_complex_double* b, lapack_int* ldb, - lapack_complex_double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_ssysvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs, - const float* a, lapack_int* lda, float* af, - lapack_int* ldaf, lapack_int* ipiv, const float* b, - lapack_int* ldb, float* x, lapack_int* ldx, float* rcond, - float* ferr, float* berr, float* work, lapack_int* lwork, - lapack_int* iwork, lapack_int *info ); -void LAPACK_dsysvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs, - const double* a, lapack_int* lda, double* af, - lapack_int* ldaf, lapack_int* ipiv, const double* b, - lapack_int* ldb, double* x, lapack_int* ldx, double* rcond, - double* ferr, double* berr, double* work, lapack_int* lwork, - lapack_int* iwork, lapack_int *info ); -void LAPACK_csysvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs, - const lapack_complex_float* a, lapack_int* lda, - lapack_complex_float* af, lapack_int* ldaf, - lapack_int* ipiv, const lapack_complex_float* b, - lapack_int* ldb, lapack_complex_float* x, lapack_int* ldx, - float* rcond, float* ferr, float* berr, - lapack_complex_float* work, lapack_int* lwork, float* rwork, - lapack_int *info ); -void LAPACK_zsysvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs, - const lapack_complex_double* a, lapack_int* lda, - lapack_complex_double* af, lapack_int* ldaf, - lapack_int* ipiv, const lapack_complex_double* b, - lapack_int* ldb, lapack_complex_double* x, lapack_int* ldx, - double* rcond, double* ferr, double* berr, - lapack_complex_double* work, lapack_int* lwork, - double* rwork, lapack_int *info ); -void LAPACK_dsysvxx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs, - double* a, lapack_int* lda, double* af, lapack_int* ldaf, - lapack_int* ipiv, char* equed, double* s, double* b, - lapack_int* ldb, double* x, lapack_int* ldx, double* rcond, - double* rpvgrw, double* berr, lapack_int* n_err_bnds, - double* err_bnds_norm, double* err_bnds_comp, - lapack_int* nparams, double* params, double* work, - lapack_int* iwork, lapack_int *info ); -void LAPACK_ssysvxx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs, - float* a, lapack_int* lda, float* af, lapack_int* ldaf, - lapack_int* ipiv, char* equed, float* s, float* b, - lapack_int* ldb, float* x, lapack_int* ldx, float* rcond, - float* rpvgrw, float* berr, lapack_int* n_err_bnds, - float* err_bnds_norm, float* err_bnds_comp, - lapack_int* nparams, float* params, float* work, - lapack_int* iwork, lapack_int *info ); -void LAPACK_zsysvxx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs, - lapack_complex_double* a, lapack_int* lda, - lapack_complex_double* af, lapack_int* ldaf, - lapack_int* ipiv, char* equed, double* s, - lapack_complex_double* b, lapack_int* ldb, - lapack_complex_double* x, lapack_int* ldx, double* rcond, - double* rpvgrw, double* berr, lapack_int* n_err_bnds, - double* err_bnds_norm, double* err_bnds_comp, - lapack_int* nparams, double* params, - lapack_complex_double* work, double* rwork, - lapack_int *info ); -void LAPACK_csysvxx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs, - lapack_complex_float* a, lapack_int* lda, - lapack_complex_float* af, lapack_int* ldaf, - lapack_int* ipiv, char* equed, float* s, - lapack_complex_float* b, lapack_int* ldb, - lapack_complex_float* x, lapack_int* ldx, float* rcond, - float* rpvgrw, float* berr, lapack_int* n_err_bnds, - float* err_bnds_norm, float* err_bnds_comp, - lapack_int* nparams, float* params, - lapack_complex_float* work, float* rwork, - lapack_int *info ); -void LAPACK_chesv( char* uplo, lapack_int* n, lapack_int* nrhs, - lapack_complex_float* a, lapack_int* lda, lapack_int* ipiv, - lapack_complex_float* b, lapack_int* ldb, - lapack_complex_float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_zhesv( char* uplo, lapack_int* n, lapack_int* nrhs, - lapack_complex_double* a, lapack_int* lda, lapack_int* ipiv, - lapack_complex_double* b, lapack_int* ldb, - lapack_complex_double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_chesvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs, - const lapack_complex_float* a, lapack_int* lda, - lapack_complex_float* af, lapack_int* ldaf, - lapack_int* ipiv, const lapack_complex_float* b, - lapack_int* ldb, lapack_complex_float* x, lapack_int* ldx, - float* rcond, float* ferr, float* berr, - lapack_complex_float* work, lapack_int* lwork, float* rwork, - lapack_int *info ); -void LAPACK_zhesvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs, - const lapack_complex_double* a, lapack_int* lda, - lapack_complex_double* af, lapack_int* ldaf, - lapack_int* ipiv, const lapack_complex_double* b, - lapack_int* ldb, lapack_complex_double* x, lapack_int* ldx, - double* rcond, double* ferr, double* berr, - lapack_complex_double* work, lapack_int* lwork, - double* rwork, lapack_int *info ); -void LAPACK_zhesvxx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs, - lapack_complex_double* a, lapack_int* lda, - lapack_complex_double* af, lapack_int* ldaf, - lapack_int* ipiv, char* equed, double* s, - lapack_complex_double* b, lapack_int* ldb, - lapack_complex_double* x, lapack_int* ldx, double* rcond, - double* rpvgrw, double* berr, lapack_int* n_err_bnds, - double* err_bnds_norm, double* err_bnds_comp, - lapack_int* nparams, double* params, - lapack_complex_double* work, double* rwork, - lapack_int *info ); -void LAPACK_chesvxx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs, - lapack_complex_float* a, lapack_int* lda, - lapack_complex_float* af, lapack_int* ldaf, - lapack_int* ipiv, char* equed, float* s, - lapack_complex_float* b, lapack_int* ldb, - lapack_complex_float* x, lapack_int* ldx, float* rcond, - float* rpvgrw, float* berr, lapack_int* n_err_bnds, - float* err_bnds_norm, float* err_bnds_comp, - lapack_int* nparams, float* params, - lapack_complex_float* work, float* rwork, - lapack_int *info ); -void LAPACK_sspsv( char* uplo, lapack_int* n, lapack_int* nrhs, float* ap, - lapack_int* ipiv, float* b, lapack_int* ldb, - lapack_int *info ); -void LAPACK_dspsv( char* uplo, lapack_int* n, lapack_int* nrhs, double* ap, - lapack_int* ipiv, double* b, lapack_int* ldb, - lapack_int *info ); -void LAPACK_cspsv( char* uplo, lapack_int* n, lapack_int* nrhs, - lapack_complex_float* ap, lapack_int* ipiv, - lapack_complex_float* b, lapack_int* ldb, lapack_int *info ); -void LAPACK_zspsv( char* uplo, lapack_int* n, lapack_int* nrhs, - lapack_complex_double* ap, lapack_int* ipiv, - lapack_complex_double* b, lapack_int* ldb, - lapack_int *info ); -void LAPACK_sspsvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs, - const float* ap, float* afp, lapack_int* ipiv, - const float* b, lapack_int* ldb, float* x, lapack_int* ldx, - float* rcond, float* ferr, float* berr, float* work, - lapack_int* iwork, lapack_int *info ); -void LAPACK_dspsvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs, - const double* ap, double* afp, lapack_int* ipiv, - const double* b, lapack_int* ldb, double* x, - lapack_int* ldx, double* rcond, double* ferr, double* berr, - double* work, lapack_int* iwork, lapack_int *info ); -void LAPACK_cspsvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs, - const lapack_complex_float* ap, lapack_complex_float* afp, - lapack_int* ipiv, const lapack_complex_float* b, - lapack_int* ldb, lapack_complex_float* x, lapack_int* ldx, - float* rcond, float* ferr, float* berr, - lapack_complex_float* work, float* rwork, - lapack_int *info ); -void LAPACK_zspsvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs, - const lapack_complex_double* ap, lapack_complex_double* afp, - lapack_int* ipiv, const lapack_complex_double* b, - lapack_int* ldb, lapack_complex_double* x, lapack_int* ldx, - double* rcond, double* ferr, double* berr, - lapack_complex_double* work, double* rwork, - lapack_int *info ); -void LAPACK_chpsv( char* uplo, lapack_int* n, lapack_int* nrhs, - lapack_complex_float* ap, lapack_int* ipiv, - lapack_complex_float* b, lapack_int* ldb, lapack_int *info ); -void LAPACK_zhpsv( char* uplo, lapack_int* n, lapack_int* nrhs, - lapack_complex_double* ap, lapack_int* ipiv, - lapack_complex_double* b, lapack_int* ldb, - lapack_int *info ); -void LAPACK_chpsvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs, - const lapack_complex_float* ap, lapack_complex_float* afp, - lapack_int* ipiv, const lapack_complex_float* b, - lapack_int* ldb, lapack_complex_float* x, lapack_int* ldx, - float* rcond, float* ferr, float* berr, - lapack_complex_float* work, float* rwork, - lapack_int *info ); -void LAPACK_zhpsvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs, - const lapack_complex_double* ap, lapack_complex_double* afp, - lapack_int* ipiv, const lapack_complex_double* b, - lapack_int* ldb, lapack_complex_double* x, lapack_int* ldx, - double* rcond, double* ferr, double* berr, - lapack_complex_double* work, double* rwork, - lapack_int *info ); -void LAPACK_sgeqrf( lapack_int* m, lapack_int* n, float* a, lapack_int* lda, - float* tau, float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_dgeqrf( lapack_int* m, lapack_int* n, double* a, lapack_int* lda, - double* tau, double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_cgeqrf( lapack_int* m, lapack_int* n, lapack_complex_float* a, - lapack_int* lda, lapack_complex_float* tau, - lapack_complex_float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_zgeqrf( lapack_int* m, lapack_int* n, lapack_complex_double* a, - lapack_int* lda, lapack_complex_double* tau, - lapack_complex_double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_sgeqpf( lapack_int* m, lapack_int* n, float* a, lapack_int* lda, - lapack_int* jpvt, float* tau, float* work, - lapack_int *info ); -void LAPACK_dgeqpf( lapack_int* m, lapack_int* n, double* a, lapack_int* lda, - lapack_int* jpvt, double* tau, double* work, - lapack_int *info ); -void LAPACK_cgeqpf( lapack_int* m, lapack_int* n, lapack_complex_float* a, - lapack_int* lda, lapack_int* jpvt, - lapack_complex_float* tau, lapack_complex_float* work, - float* rwork, lapack_int *info ); -void LAPACK_zgeqpf( lapack_int* m, lapack_int* n, lapack_complex_double* a, - lapack_int* lda, lapack_int* jpvt, - lapack_complex_double* tau, lapack_complex_double* work, - double* rwork, lapack_int *info ); -void LAPACK_sgeqp3( lapack_int* m, lapack_int* n, float* a, lapack_int* lda, - lapack_int* jpvt, float* tau, float* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_dgeqp3( lapack_int* m, lapack_int* n, double* a, lapack_int* lda, - lapack_int* jpvt, double* tau, double* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_cgeqp3( lapack_int* m, lapack_int* n, lapack_complex_float* a, - lapack_int* lda, lapack_int* jpvt, - lapack_complex_float* tau, lapack_complex_float* work, - lapack_int* lwork, float* rwork, lapack_int *info ); -void LAPACK_zgeqp3( lapack_int* m, lapack_int* n, lapack_complex_double* a, - lapack_int* lda, lapack_int* jpvt, - lapack_complex_double* tau, lapack_complex_double* work, - lapack_int* lwork, double* rwork, lapack_int *info ); -void LAPACK_sorgqr( lapack_int* m, lapack_int* n, lapack_int* k, float* a, - lapack_int* lda, const float* tau, float* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_dorgqr( lapack_int* m, lapack_int* n, lapack_int* k, double* a, - lapack_int* lda, const double* tau, double* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_sormqr( char* side, char* trans, lapack_int* m, lapack_int* n, - lapack_int* k, const float* a, lapack_int* lda, - const float* tau, float* c, lapack_int* ldc, float* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_dormqr( char* side, char* trans, lapack_int* m, lapack_int* n, - lapack_int* k, const double* a, lapack_int* lda, - const double* tau, double* c, lapack_int* ldc, double* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_cungqr( lapack_int* m, lapack_int* n, lapack_int* k, - lapack_complex_float* a, lapack_int* lda, - const lapack_complex_float* tau, lapack_complex_float* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_zungqr( lapack_int* m, lapack_int* n, lapack_int* k, - lapack_complex_double* a, lapack_int* lda, - const lapack_complex_double* tau, - lapack_complex_double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_cunmqr( char* side, char* trans, lapack_int* m, lapack_int* n, - lapack_int* k, const lapack_complex_float* a, - lapack_int* lda, const lapack_complex_float* tau, - lapack_complex_float* c, lapack_int* ldc, - lapack_complex_float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_zunmqr( char* side, char* trans, lapack_int* m, lapack_int* n, - lapack_int* k, const lapack_complex_double* a, - lapack_int* lda, const lapack_complex_double* tau, - lapack_complex_double* c, lapack_int* ldc, - lapack_complex_double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_sgelqf( lapack_int* m, lapack_int* n, float* a, lapack_int* lda, - float* tau, float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_dgelqf( lapack_int* m, lapack_int* n, double* a, lapack_int* lda, - double* tau, double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_cgelqf( lapack_int* m, lapack_int* n, lapack_complex_float* a, - lapack_int* lda, lapack_complex_float* tau, - lapack_complex_float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_zgelqf( lapack_int* m, lapack_int* n, lapack_complex_double* a, - lapack_int* lda, lapack_complex_double* tau, - lapack_complex_double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_sorglq( lapack_int* m, lapack_int* n, lapack_int* k, float* a, - lapack_int* lda, const float* tau, float* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_dorglq( lapack_int* m, lapack_int* n, lapack_int* k, double* a, - lapack_int* lda, const double* tau, double* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_sormlq( char* side, char* trans, lapack_int* m, lapack_int* n, - lapack_int* k, const float* a, lapack_int* lda, - const float* tau, float* c, lapack_int* ldc, float* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_dormlq( char* side, char* trans, lapack_int* m, lapack_int* n, - lapack_int* k, const double* a, lapack_int* lda, - const double* tau, double* c, lapack_int* ldc, double* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_cunglq( lapack_int* m, lapack_int* n, lapack_int* k, - lapack_complex_float* a, lapack_int* lda, - const lapack_complex_float* tau, lapack_complex_float* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_zunglq( lapack_int* m, lapack_int* n, lapack_int* k, - lapack_complex_double* a, lapack_int* lda, - const lapack_complex_double* tau, - lapack_complex_double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_cunmlq( char* side, char* trans, lapack_int* m, lapack_int* n, - lapack_int* k, const lapack_complex_float* a, - lapack_int* lda, const lapack_complex_float* tau, - lapack_complex_float* c, lapack_int* ldc, - lapack_complex_float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_zunmlq( char* side, char* trans, lapack_int* m, lapack_int* n, - lapack_int* k, const lapack_complex_double* a, - lapack_int* lda, const lapack_complex_double* tau, - lapack_complex_double* c, lapack_int* ldc, - lapack_complex_double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_sgeqlf( lapack_int* m, lapack_int* n, float* a, lapack_int* lda, - float* tau, float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_dgeqlf( lapack_int* m, lapack_int* n, double* a, lapack_int* lda, - double* tau, double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_cgeqlf( lapack_int* m, lapack_int* n, lapack_complex_float* a, - lapack_int* lda, lapack_complex_float* tau, - lapack_complex_float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_zgeqlf( lapack_int* m, lapack_int* n, lapack_complex_double* a, - lapack_int* lda, lapack_complex_double* tau, - lapack_complex_double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_sorgql( lapack_int* m, lapack_int* n, lapack_int* k, float* a, - lapack_int* lda, const float* tau, float* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_dorgql( lapack_int* m, lapack_int* n, lapack_int* k, double* a, - lapack_int* lda, const double* tau, double* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_cungql( lapack_int* m, lapack_int* n, lapack_int* k, - lapack_complex_float* a, lapack_int* lda, - const lapack_complex_float* tau, lapack_complex_float* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_zungql( lapack_int* m, lapack_int* n, lapack_int* k, - lapack_complex_double* a, lapack_int* lda, - const lapack_complex_double* tau, - lapack_complex_double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_sormql( char* side, char* trans, lapack_int* m, lapack_int* n, - lapack_int* k, const float* a, lapack_int* lda, - const float* tau, float* c, lapack_int* ldc, float* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_dormql( char* side, char* trans, lapack_int* m, lapack_int* n, - lapack_int* k, const double* a, lapack_int* lda, - const double* tau, double* c, lapack_int* ldc, double* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_cunmql( char* side, char* trans, lapack_int* m, lapack_int* n, - lapack_int* k, const lapack_complex_float* a, - lapack_int* lda, const lapack_complex_float* tau, - lapack_complex_float* c, lapack_int* ldc, - lapack_complex_float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_zunmql( char* side, char* trans, lapack_int* m, lapack_int* n, - lapack_int* k, const lapack_complex_double* a, - lapack_int* lda, const lapack_complex_double* tau, - lapack_complex_double* c, lapack_int* ldc, - lapack_complex_double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_sgerqf( lapack_int* m, lapack_int* n, float* a, lapack_int* lda, - float* tau, float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_dgerqf( lapack_int* m, lapack_int* n, double* a, lapack_int* lda, - double* tau, double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_cgerqf( lapack_int* m, lapack_int* n, lapack_complex_float* a, - lapack_int* lda, lapack_complex_float* tau, - lapack_complex_float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_zgerqf( lapack_int* m, lapack_int* n, lapack_complex_double* a, - lapack_int* lda, lapack_complex_double* tau, - lapack_complex_double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_sorgrq( lapack_int* m, lapack_int* n, lapack_int* k, float* a, - lapack_int* lda, const float* tau, float* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_dorgrq( lapack_int* m, lapack_int* n, lapack_int* k, double* a, - lapack_int* lda, const double* tau, double* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_cungrq( lapack_int* m, lapack_int* n, lapack_int* k, - lapack_complex_float* a, lapack_int* lda, - const lapack_complex_float* tau, lapack_complex_float* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_zungrq( lapack_int* m, lapack_int* n, lapack_int* k, - lapack_complex_double* a, lapack_int* lda, - const lapack_complex_double* tau, - lapack_complex_double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_sormrq( char* side, char* trans, lapack_int* m, lapack_int* n, - lapack_int* k, const float* a, lapack_int* lda, - const float* tau, float* c, lapack_int* ldc, float* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_dormrq( char* side, char* trans, lapack_int* m, lapack_int* n, - lapack_int* k, const double* a, lapack_int* lda, - const double* tau, double* c, lapack_int* ldc, double* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_cunmrq( char* side, char* trans, lapack_int* m, lapack_int* n, - lapack_int* k, const lapack_complex_float* a, - lapack_int* lda, const lapack_complex_float* tau, - lapack_complex_float* c, lapack_int* ldc, - lapack_complex_float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_zunmrq( char* side, char* trans, lapack_int* m, lapack_int* n, - lapack_int* k, const lapack_complex_double* a, - lapack_int* lda, const lapack_complex_double* tau, - lapack_complex_double* c, lapack_int* ldc, - lapack_complex_double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_stzrzf( lapack_int* m, lapack_int* n, float* a, lapack_int* lda, - float* tau, float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_dtzrzf( lapack_int* m, lapack_int* n, double* a, lapack_int* lda, - double* tau, double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_ctzrzf( lapack_int* m, lapack_int* n, lapack_complex_float* a, - lapack_int* lda, lapack_complex_float* tau, - lapack_complex_float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_ztzrzf( lapack_int* m, lapack_int* n, lapack_complex_double* a, - lapack_int* lda, lapack_complex_double* tau, - lapack_complex_double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_sormrz( char* side, char* trans, lapack_int* m, lapack_int* n, - lapack_int* k, lapack_int* l, const float* a, - lapack_int* lda, const float* tau, float* c, - lapack_int* ldc, float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_dormrz( char* side, char* trans, lapack_int* m, lapack_int* n, - lapack_int* k, lapack_int* l, const double* a, - lapack_int* lda, const double* tau, double* c, - lapack_int* ldc, double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_cunmrz( char* side, char* trans, lapack_int* m, lapack_int* n, - lapack_int* k, lapack_int* l, const lapack_complex_float* a, - lapack_int* lda, const lapack_complex_float* tau, - lapack_complex_float* c, lapack_int* ldc, - lapack_complex_float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_zunmrz( char* side, char* trans, lapack_int* m, lapack_int* n, - lapack_int* k, lapack_int* l, - const lapack_complex_double* a, lapack_int* lda, - const lapack_complex_double* tau, lapack_complex_double* c, - lapack_int* ldc, lapack_complex_double* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_sggqrf( lapack_int* n, lapack_int* m, lapack_int* p, float* a, - lapack_int* lda, float* taua, float* b, lapack_int* ldb, - float* taub, float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_dggqrf( lapack_int* n, lapack_int* m, lapack_int* p, double* a, - lapack_int* lda, double* taua, double* b, lapack_int* ldb, - double* taub, double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_cggqrf( lapack_int* n, lapack_int* m, lapack_int* p, - lapack_complex_float* a, lapack_int* lda, - lapack_complex_float* taua, lapack_complex_float* b, - lapack_int* ldb, lapack_complex_float* taub, - lapack_complex_float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_zggqrf( lapack_int* n, lapack_int* m, lapack_int* p, - lapack_complex_double* a, lapack_int* lda, - lapack_complex_double* taua, lapack_complex_double* b, - lapack_int* ldb, lapack_complex_double* taub, - lapack_complex_double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_sggrqf( lapack_int* m, lapack_int* p, lapack_int* n, float* a, - lapack_int* lda, float* taua, float* b, lapack_int* ldb, - float* taub, float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_dggrqf( lapack_int* m, lapack_int* p, lapack_int* n, double* a, - lapack_int* lda, double* taua, double* b, lapack_int* ldb, - double* taub, double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_cggrqf( lapack_int* m, lapack_int* p, lapack_int* n, - lapack_complex_float* a, lapack_int* lda, - lapack_complex_float* taua, lapack_complex_float* b, - lapack_int* ldb, lapack_complex_float* taub, - lapack_complex_float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_zggrqf( lapack_int* m, lapack_int* p, lapack_int* n, - lapack_complex_double* a, lapack_int* lda, - lapack_complex_double* taua, lapack_complex_double* b, - lapack_int* ldb, lapack_complex_double* taub, - lapack_complex_double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_sgebrd( lapack_int* m, lapack_int* n, float* a, lapack_int* lda, - float* d, float* e, float* tauq, float* taup, float* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_dgebrd( lapack_int* m, lapack_int* n, double* a, lapack_int* lda, - double* d, double* e, double* tauq, double* taup, - double* work, lapack_int* lwork, lapack_int *info ); -void LAPACK_cgebrd( lapack_int* m, lapack_int* n, lapack_complex_float* a, - lapack_int* lda, float* d, float* e, - lapack_complex_float* tauq, lapack_complex_float* taup, - lapack_complex_float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_zgebrd( lapack_int* m, lapack_int* n, lapack_complex_double* a, - lapack_int* lda, double* d, double* e, - lapack_complex_double* tauq, lapack_complex_double* taup, - lapack_complex_double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_sgbbrd( char* vect, lapack_int* m, lapack_int* n, lapack_int* ncc, - lapack_int* kl, lapack_int* ku, float* ab, lapack_int* ldab, - float* d, float* e, float* q, lapack_int* ldq, float* pt, - lapack_int* ldpt, float* c, lapack_int* ldc, float* work, - lapack_int *info ); -void LAPACK_dgbbrd( char* vect, lapack_int* m, lapack_int* n, lapack_int* ncc, - lapack_int* kl, lapack_int* ku, double* ab, - lapack_int* ldab, double* d, double* e, double* q, - lapack_int* ldq, double* pt, lapack_int* ldpt, double* c, - lapack_int* ldc, double* work, lapack_int *info ); -void LAPACK_cgbbrd( char* vect, lapack_int* m, lapack_int* n, lapack_int* ncc, - lapack_int* kl, lapack_int* ku, lapack_complex_float* ab, - lapack_int* ldab, float* d, float* e, - lapack_complex_float* q, lapack_int* ldq, - lapack_complex_float* pt, lapack_int* ldpt, - lapack_complex_float* c, lapack_int* ldc, - lapack_complex_float* work, float* rwork, - lapack_int *info ); -void LAPACK_zgbbrd( char* vect, lapack_int* m, lapack_int* n, lapack_int* ncc, - lapack_int* kl, lapack_int* ku, lapack_complex_double* ab, - lapack_int* ldab, double* d, double* e, - lapack_complex_double* q, lapack_int* ldq, - lapack_complex_double* pt, lapack_int* ldpt, - lapack_complex_double* c, lapack_int* ldc, - lapack_complex_double* work, double* rwork, - lapack_int *info ); -void LAPACK_sorgbr( char* vect, lapack_int* m, lapack_int* n, lapack_int* k, - float* a, lapack_int* lda, const float* tau, float* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_dorgbr( char* vect, lapack_int* m, lapack_int* n, lapack_int* k, - double* a, lapack_int* lda, const double* tau, double* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_sormbr( char* vect, char* side, char* trans, lapack_int* m, - lapack_int* n, lapack_int* k, const float* a, - lapack_int* lda, const float* tau, float* c, - lapack_int* ldc, float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_dormbr( char* vect, char* side, char* trans, lapack_int* m, - lapack_int* n, lapack_int* k, const double* a, - lapack_int* lda, const double* tau, double* c, - lapack_int* ldc, double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_cungbr( char* vect, lapack_int* m, lapack_int* n, lapack_int* k, - lapack_complex_float* a, lapack_int* lda, - const lapack_complex_float* tau, lapack_complex_float* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_zungbr( char* vect, lapack_int* m, lapack_int* n, lapack_int* k, - lapack_complex_double* a, lapack_int* lda, - const lapack_complex_double* tau, - lapack_complex_double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_cunmbr( char* vect, char* side, char* trans, lapack_int* m, - lapack_int* n, lapack_int* k, const lapack_complex_float* a, - lapack_int* lda, const lapack_complex_float* tau, - lapack_complex_float* c, lapack_int* ldc, - lapack_complex_float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_zunmbr( char* vect, char* side, char* trans, lapack_int* m, - lapack_int* n, lapack_int* k, - const lapack_complex_double* a, lapack_int* lda, - const lapack_complex_double* tau, lapack_complex_double* c, - lapack_int* ldc, lapack_complex_double* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_sbdsqr( char* uplo, lapack_int* n, lapack_int* ncvt, - lapack_int* nru, lapack_int* ncc, float* d, float* e, - float* vt, lapack_int* ldvt, float* u, lapack_int* ldu, - float* c, lapack_int* ldc, float* work, lapack_int *info ); -void LAPACK_dbdsqr( char* uplo, lapack_int* n, lapack_int* ncvt, - lapack_int* nru, lapack_int* ncc, double* d, double* e, - double* vt, lapack_int* ldvt, double* u, lapack_int* ldu, - double* c, lapack_int* ldc, double* work, - lapack_int *info ); -void LAPACK_cbdsqr( char* uplo, lapack_int* n, lapack_int* ncvt, - lapack_int* nru, lapack_int* ncc, float* d, float* e, - lapack_complex_float* vt, lapack_int* ldvt, - lapack_complex_float* u, lapack_int* ldu, - lapack_complex_float* c, lapack_int* ldc, float* work, - lapack_int *info ); -void LAPACK_zbdsqr( char* uplo, lapack_int* n, lapack_int* ncvt, - lapack_int* nru, lapack_int* ncc, double* d, double* e, - lapack_complex_double* vt, lapack_int* ldvt, - lapack_complex_double* u, lapack_int* ldu, - lapack_complex_double* c, lapack_int* ldc, double* work, - lapack_int *info ); -void LAPACK_sbdsdc( char* uplo, char* compq, lapack_int* n, float* d, float* e, - float* u, lapack_int* ldu, float* vt, lapack_int* ldvt, - float* q, lapack_int* iq, float* work, lapack_int* iwork, - lapack_int *info ); -void LAPACK_dbdsdc( char* uplo, char* compq, lapack_int* n, double* d, - double* e, double* u, lapack_int* ldu, double* vt, - lapack_int* ldvt, double* q, lapack_int* iq, double* work, - lapack_int* iwork, lapack_int *info ); -void LAPACK_sbdsvdx( char* uplo, char* jobz, char* range, - lapack_int* n, float* d, float* e, - float* vl, float* vu, - lapack_int* il, lapack_int* iu, lapack_int* ns, - float* s, float* z, lapack_int* ldz, - float* work, lapack_int *iwork, lapack_int *info ); -void LAPACK_dbdsvdx( char* uplo, char* jobz, char* range, - lapack_int* n, double* d, double* e, - double* vl, double* vu, - lapack_int* il, lapack_int* iu, lapack_int* ns, - double* s, double* z, lapack_int* ldz, - double* work, lapack_int *iwork, lapack_int *info ); -void LAPACK_ssytrd( char* uplo, lapack_int* n, float* a, lapack_int* lda, - float* d, float* e, float* tau, float* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_dsytrd( char* uplo, lapack_int* n, double* a, lapack_int* lda, - double* d, double* e, double* tau, double* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_sorgtr( char* uplo, lapack_int* n, float* a, lapack_int* lda, - const float* tau, float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_dorgtr( char* uplo, lapack_int* n, double* a, lapack_int* lda, - const double* tau, double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_sormtr( char* side, char* uplo, char* trans, lapack_int* m, - lapack_int* n, const float* a, lapack_int* lda, - const float* tau, float* c, lapack_int* ldc, float* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_dormtr( char* side, char* uplo, char* trans, lapack_int* m, - lapack_int* n, const double* a, lapack_int* lda, - const double* tau, double* c, lapack_int* ldc, double* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_chetrd( char* uplo, lapack_int* n, lapack_complex_float* a, - lapack_int* lda, float* d, float* e, - lapack_complex_float* tau, lapack_complex_float* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_zhetrd( char* uplo, lapack_int* n, lapack_complex_double* a, - lapack_int* lda, double* d, double* e, - lapack_complex_double* tau, lapack_complex_double* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_cungtr( char* uplo, lapack_int* n, lapack_complex_float* a, - lapack_int* lda, const lapack_complex_float* tau, - lapack_complex_float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_zungtr( char* uplo, lapack_int* n, lapack_complex_double* a, - lapack_int* lda, const lapack_complex_double* tau, - lapack_complex_double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_cunmtr( char* side, char* uplo, char* trans, lapack_int* m, - lapack_int* n, const lapack_complex_float* a, - lapack_int* lda, const lapack_complex_float* tau, - lapack_complex_float* c, lapack_int* ldc, - lapack_complex_float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_zunmtr( char* side, char* uplo, char* trans, lapack_int* m, - lapack_int* n, const lapack_complex_double* a, - lapack_int* lda, const lapack_complex_double* tau, - lapack_complex_double* c, lapack_int* ldc, - lapack_complex_double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_ssptrd( char* uplo, lapack_int* n, float* ap, float* d, float* e, - float* tau, lapack_int *info ); -void LAPACK_dsptrd( char* uplo, lapack_int* n, double* ap, double* d, double* e, - double* tau, lapack_int *info ); -void LAPACK_sopgtr( char* uplo, lapack_int* n, const float* ap, - const float* tau, float* q, lapack_int* ldq, float* work, - lapack_int *info ); -void LAPACK_dopgtr( char* uplo, lapack_int* n, const double* ap, - const double* tau, double* q, lapack_int* ldq, double* work, - lapack_int *info ); -void LAPACK_sopmtr( char* side, char* uplo, char* trans, lapack_int* m, - lapack_int* n, const float* ap, const float* tau, float* c, - lapack_int* ldc, float* work, lapack_int *info ); -void LAPACK_dopmtr( char* side, char* uplo, char* trans, lapack_int* m, - lapack_int* n, const double* ap, const double* tau, - double* c, lapack_int* ldc, double* work, - lapack_int *info ); -void LAPACK_chptrd( char* uplo, lapack_int* n, lapack_complex_float* ap, - float* d, float* e, lapack_complex_float* tau, - lapack_int *info ); -void LAPACK_zhptrd( char* uplo, lapack_int* n, lapack_complex_double* ap, - double* d, double* e, lapack_complex_double* tau, - lapack_int *info ); -void LAPACK_cupgtr( char* uplo, lapack_int* n, const lapack_complex_float* ap, - const lapack_complex_float* tau, lapack_complex_float* q, - lapack_int* ldq, lapack_complex_float* work, - lapack_int *info ); -void LAPACK_zupgtr( char* uplo, lapack_int* n, const lapack_complex_double* ap, - const lapack_complex_double* tau, lapack_complex_double* q, - lapack_int* ldq, lapack_complex_double* work, - lapack_int *info ); -void LAPACK_cupmtr( char* side, char* uplo, char* trans, lapack_int* m, - lapack_int* n, const lapack_complex_float* ap, - const lapack_complex_float* tau, lapack_complex_float* c, - lapack_int* ldc, lapack_complex_float* work, - lapack_int *info ); -void LAPACK_zupmtr( char* side, char* uplo, char* trans, lapack_int* m, - lapack_int* n, const lapack_complex_double* ap, - const lapack_complex_double* tau, lapack_complex_double* c, - lapack_int* ldc, lapack_complex_double* work, - lapack_int *info ); -void LAPACK_ssbtrd( char* vect, char* uplo, lapack_int* n, lapack_int* kd, - float* ab, lapack_int* ldab, float* d, float* e, float* q, - lapack_int* ldq, float* work, lapack_int *info ); -void LAPACK_dsbtrd( char* vect, char* uplo, lapack_int* n, lapack_int* kd, - double* ab, lapack_int* ldab, double* d, double* e, - double* q, lapack_int* ldq, double* work, - lapack_int *info ); -void LAPACK_chbtrd( char* vect, char* uplo, lapack_int* n, lapack_int* kd, - lapack_complex_float* ab, lapack_int* ldab, float* d, - float* e, lapack_complex_float* q, lapack_int* ldq, - lapack_complex_float* work, lapack_int *info ); -void LAPACK_zhbtrd( char* vect, char* uplo, lapack_int* n, lapack_int* kd, - lapack_complex_double* ab, lapack_int* ldab, double* d, - double* e, lapack_complex_double* q, lapack_int* ldq, - lapack_complex_double* work, lapack_int *info ); -void LAPACK_ssterf( lapack_int* n, float* d, float* e, lapack_int *info ); -void LAPACK_dsterf( lapack_int* n, double* d, double* e, lapack_int *info ); -void LAPACK_ssteqr( char* compz, lapack_int* n, float* d, float* e, float* z, - lapack_int* ldz, float* work, lapack_int *info ); -void LAPACK_dsteqr( char* compz, lapack_int* n, double* d, double* e, double* z, - lapack_int* ldz, double* work, lapack_int *info ); -void LAPACK_csteqr( char* compz, lapack_int* n, float* d, float* e, - lapack_complex_float* z, lapack_int* ldz, float* work, - lapack_int *info ); -void LAPACK_zsteqr( char* compz, lapack_int* n, double* d, double* e, - lapack_complex_double* z, lapack_int* ldz, double* work, - lapack_int *info ); -void LAPACK_sstemr( char* jobz, char* range, lapack_int* n, float* d, float* e, - float* vl, float* vu, lapack_int* il, lapack_int* iu, - lapack_int* m, float* w, float* z, lapack_int* ldz, - lapack_int* nzc, lapack_int* isuppz, lapack_logical* tryrac, - float* work, lapack_int* lwork, lapack_int* iwork, - lapack_int* liwork, lapack_int *info ); -void LAPACK_dstemr( char* jobz, char* range, lapack_int* n, double* d, - double* e, double* vl, double* vu, lapack_int* il, - lapack_int* iu, lapack_int* m, double* w, double* z, - lapack_int* ldz, lapack_int* nzc, lapack_int* isuppz, - lapack_logical* tryrac, double* work, lapack_int* lwork, - lapack_int* iwork, lapack_int* liwork, lapack_int *info ); -void LAPACK_cstemr( char* jobz, char* range, lapack_int* n, float* d, float* e, - float* vl, float* vu, lapack_int* il, lapack_int* iu, - lapack_int* m, float* w, lapack_complex_float* z, - lapack_int* ldz, lapack_int* nzc, lapack_int* isuppz, - lapack_logical* tryrac, float* work, lapack_int* lwork, - lapack_int* iwork, lapack_int* liwork, lapack_int *info ); -void LAPACK_zstemr( char* jobz, char* range, lapack_int* n, double* d, - double* e, double* vl, double* vu, lapack_int* il, - lapack_int* iu, lapack_int* m, double* w, - lapack_complex_double* z, lapack_int* ldz, lapack_int* nzc, - lapack_int* isuppz, lapack_logical* tryrac, double* work, - lapack_int* lwork, lapack_int* iwork, lapack_int* liwork, - lapack_int *info ); -void LAPACK_sstedc( char* compz, lapack_int* n, float* d, float* e, float* z, - lapack_int* ldz, float* work, lapack_int* lwork, - lapack_int* iwork, lapack_int* liwork, lapack_int *info ); -void LAPACK_dstedc( char* compz, lapack_int* n, double* d, double* e, double* z, - lapack_int* ldz, double* work, lapack_int* lwork, - lapack_int* iwork, lapack_int* liwork, lapack_int *info ); -void LAPACK_cstedc( char* compz, lapack_int* n, float* d, float* e, - lapack_complex_float* z, lapack_int* ldz, - lapack_complex_float* work, lapack_int* lwork, float* rwork, - lapack_int* lrwork, lapack_int* iwork, lapack_int* liwork, - lapack_int *info ); -void LAPACK_zstedc( char* compz, lapack_int* n, double* d, double* e, - lapack_complex_double* z, lapack_int* ldz, - lapack_complex_double* work, lapack_int* lwork, - double* rwork, lapack_int* lrwork, lapack_int* iwork, - lapack_int* liwork, lapack_int *info ); -void LAPACK_sstegr( char* jobz, char* range, lapack_int* n, float* d, float* e, - float* vl, float* vu, lapack_int* il, lapack_int* iu, - float* abstol, lapack_int* m, float* w, float* z, - lapack_int* ldz, lapack_int* isuppz, float* work, - lapack_int* lwork, lapack_int* iwork, lapack_int* liwork, - lapack_int *info ); -void LAPACK_dstegr( char* jobz, char* range, lapack_int* n, double* d, - double* e, double* vl, double* vu, lapack_int* il, - lapack_int* iu, double* abstol, lapack_int* m, double* w, - double* z, lapack_int* ldz, lapack_int* isuppz, - double* work, lapack_int* lwork, lapack_int* iwork, - lapack_int* liwork, lapack_int *info ); -void LAPACK_cstegr( char* jobz, char* range, lapack_int* n, float* d, float* e, - float* vl, float* vu, lapack_int* il, lapack_int* iu, - float* abstol, lapack_int* m, float* w, - lapack_complex_float* z, lapack_int* ldz, - lapack_int* isuppz, float* work, lapack_int* lwork, - lapack_int* iwork, lapack_int* liwork, lapack_int *info ); -void LAPACK_zstegr( char* jobz, char* range, lapack_int* n, double* d, - double* e, double* vl, double* vu, lapack_int* il, - lapack_int* iu, double* abstol, lapack_int* m, double* w, - lapack_complex_double* z, lapack_int* ldz, - lapack_int* isuppz, double* work, lapack_int* lwork, - lapack_int* iwork, lapack_int* liwork, lapack_int *info ); -void LAPACK_spteqr( char* compz, lapack_int* n, float* d, float* e, float* z, - lapack_int* ldz, float* work, lapack_int *info ); -void LAPACK_dpteqr( char* compz, lapack_int* n, double* d, double* e, double* z, - lapack_int* ldz, double* work, lapack_int *info ); -void LAPACK_cpteqr( char* compz, lapack_int* n, float* d, float* e, - lapack_complex_float* z, lapack_int* ldz, float* work, - lapack_int *info ); -void LAPACK_zpteqr( char* compz, lapack_int* n, double* d, double* e, - lapack_complex_double* z, lapack_int* ldz, double* work, - lapack_int *info ); -void LAPACK_sstebz( char* range, char* order, lapack_int* n, float* vl, - float* vu, lapack_int* il, lapack_int* iu, float* abstol, - const float* d, const float* e, lapack_int* m, - lapack_int* nsplit, float* w, lapack_int* iblock, - lapack_int* isplit, float* work, lapack_int* iwork, - lapack_int *info ); -void LAPACK_dstebz( char* range, char* order, lapack_int* n, double* vl, - double* vu, lapack_int* il, lapack_int* iu, double* abstol, - const double* d, const double* e, lapack_int* m, - lapack_int* nsplit, double* w, lapack_int* iblock, - lapack_int* isplit, double* work, lapack_int* iwork, - lapack_int *info ); -void LAPACK_sstein( lapack_int* n, const float* d, const float* e, - lapack_int* m, const float* w, const lapack_int* iblock, - const lapack_int* isplit, float* z, lapack_int* ldz, - float* work, lapack_int* iwork, lapack_int* ifailv, - lapack_int *info ); -void LAPACK_dstein( lapack_int* n, const double* d, const double* e, - lapack_int* m, const double* w, const lapack_int* iblock, - const lapack_int* isplit, double* z, lapack_int* ldz, - double* work, lapack_int* iwork, lapack_int* ifailv, - lapack_int *info ); -void LAPACK_cstein( lapack_int* n, const float* d, const float* e, - lapack_int* m, const float* w, const lapack_int* iblock, - const lapack_int* isplit, lapack_complex_float* z, - lapack_int* ldz, float* work, lapack_int* iwork, - lapack_int* ifailv, lapack_int *info ); -void LAPACK_zstein( lapack_int* n, const double* d, const double* e, - lapack_int* m, const double* w, const lapack_int* iblock, - const lapack_int* isplit, lapack_complex_double* z, - lapack_int* ldz, double* work, lapack_int* iwork, - lapack_int* ifailv, lapack_int *info ); -void LAPACK_sdisna( char* job, lapack_int* m, lapack_int* n, const float* d, - float* sep, lapack_int *info ); -void LAPACK_ddisna( char* job, lapack_int* m, lapack_int* n, const double* d, - double* sep, lapack_int *info ); -void LAPACK_ssygst( lapack_int* itype, char* uplo, lapack_int* n, float* a, - lapack_int* lda, const float* b, lapack_int* ldb, - lapack_int *info ); -void LAPACK_dsygst( lapack_int* itype, char* uplo, lapack_int* n, double* a, - lapack_int* lda, const double* b, lapack_int* ldb, - lapack_int *info ); -void LAPACK_chegst( lapack_int* itype, char* uplo, lapack_int* n, - lapack_complex_float* a, lapack_int* lda, - const lapack_complex_float* b, lapack_int* ldb, - lapack_int *info ); -void LAPACK_zhegst( lapack_int* itype, char* uplo, lapack_int* n, - lapack_complex_double* a, lapack_int* lda, - const lapack_complex_double* b, lapack_int* ldb, - lapack_int *info ); -void LAPACK_sspgst( lapack_int* itype, char* uplo, lapack_int* n, float* ap, - const float* bp, lapack_int *info ); -void LAPACK_dspgst( lapack_int* itype, char* uplo, lapack_int* n, double* ap, - const double* bp, lapack_int *info ); -void LAPACK_chpgst( lapack_int* itype, char* uplo, lapack_int* n, - lapack_complex_float* ap, const lapack_complex_float* bp, - lapack_int *info ); -void LAPACK_zhpgst( lapack_int* itype, char* uplo, lapack_int* n, - lapack_complex_double* ap, const lapack_complex_double* bp, - lapack_int *info ); -void LAPACK_ssbgst( char* vect, char* uplo, lapack_int* n, lapack_int* ka, - lapack_int* kb, float* ab, lapack_int* ldab, - const float* bb, lapack_int* ldbb, float* x, - lapack_int* ldx, float* work, lapack_int *info ); -void LAPACK_dsbgst( char* vect, char* uplo, lapack_int* n, lapack_int* ka, - lapack_int* kb, double* ab, lapack_int* ldab, - const double* bb, lapack_int* ldbb, double* x, - lapack_int* ldx, double* work, lapack_int *info ); -void LAPACK_chbgst( char* vect, char* uplo, lapack_int* n, lapack_int* ka, - lapack_int* kb, lapack_complex_float* ab, lapack_int* ldab, - const lapack_complex_float* bb, lapack_int* ldbb, - lapack_complex_float* x, lapack_int* ldx, - lapack_complex_float* work, float* rwork, - lapack_int *info ); -void LAPACK_zhbgst( char* vect, char* uplo, lapack_int* n, lapack_int* ka, - lapack_int* kb, lapack_complex_double* ab, lapack_int* ldab, - const lapack_complex_double* bb, lapack_int* ldbb, - lapack_complex_double* x, lapack_int* ldx, - lapack_complex_double* work, double* rwork, - lapack_int *info ); -void LAPACK_spbstf( char* uplo, lapack_int* n, lapack_int* kb, float* bb, - lapack_int* ldbb, lapack_int *info ); -void LAPACK_dpbstf( char* uplo, lapack_int* n, lapack_int* kb, double* bb, - lapack_int* ldbb, lapack_int *info ); -void LAPACK_cpbstf( char* uplo, lapack_int* n, lapack_int* kb, - lapack_complex_float* bb, lapack_int* ldbb, - lapack_int *info ); -void LAPACK_zpbstf( char* uplo, lapack_int* n, lapack_int* kb, - lapack_complex_double* bb, lapack_int* ldbb, - lapack_int *info ); -void LAPACK_sgehrd( lapack_int* n, lapack_int* ilo, lapack_int* ihi, float* a, - lapack_int* lda, float* tau, float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_dgehrd( lapack_int* n, lapack_int* ilo, lapack_int* ihi, double* a, - lapack_int* lda, double* tau, double* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_cgehrd( lapack_int* n, lapack_int* ilo, lapack_int* ihi, - lapack_complex_float* a, lapack_int* lda, - lapack_complex_float* tau, lapack_complex_float* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_zgehrd( lapack_int* n, lapack_int* ilo, lapack_int* ihi, - lapack_complex_double* a, lapack_int* lda, - lapack_complex_double* tau, lapack_complex_double* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_sorghr( lapack_int* n, lapack_int* ilo, lapack_int* ihi, float* a, - lapack_int* lda, const float* tau, float* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_dorghr( lapack_int* n, lapack_int* ilo, lapack_int* ihi, double* a, - lapack_int* lda, const double* tau, double* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_sormhr( char* side, char* trans, lapack_int* m, lapack_int* n, - lapack_int* ilo, lapack_int* ihi, const float* a, - lapack_int* lda, const float* tau, float* c, - lapack_int* ldc, float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_dormhr( char* side, char* trans, lapack_int* m, lapack_int* n, - lapack_int* ilo, lapack_int* ihi, const double* a, - lapack_int* lda, const double* tau, double* c, - lapack_int* ldc, double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_cunghr( lapack_int* n, lapack_int* ilo, lapack_int* ihi, - lapack_complex_float* a, lapack_int* lda, - const lapack_complex_float* tau, lapack_complex_float* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_zunghr( lapack_int* n, lapack_int* ilo, lapack_int* ihi, - lapack_complex_double* a, lapack_int* lda, - const lapack_complex_double* tau, - lapack_complex_double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_cunmhr( char* side, char* trans, lapack_int* m, lapack_int* n, - lapack_int* ilo, lapack_int* ihi, - const lapack_complex_float* a, lapack_int* lda, - const lapack_complex_float* tau, lapack_complex_float* c, - lapack_int* ldc, lapack_complex_float* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_zunmhr( char* side, char* trans, lapack_int* m, lapack_int* n, - lapack_int* ilo, lapack_int* ihi, - const lapack_complex_double* a, lapack_int* lda, - const lapack_complex_double* tau, lapack_complex_double* c, - lapack_int* ldc, lapack_complex_double* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_sgebal( char* job, lapack_int* n, float* a, lapack_int* lda, - lapack_int* ilo, lapack_int* ihi, float* scale, - lapack_int *info ); -void LAPACK_dgebal( char* job, lapack_int* n, double* a, lapack_int* lda, - lapack_int* ilo, lapack_int* ihi, double* scale, - lapack_int *info ); -void LAPACK_cgebal( char* job, lapack_int* n, lapack_complex_float* a, - lapack_int* lda, lapack_int* ilo, lapack_int* ihi, - float* scale, lapack_int *info ); -void LAPACK_zgebal( char* job, lapack_int* n, lapack_complex_double* a, - lapack_int* lda, lapack_int* ilo, lapack_int* ihi, - double* scale, lapack_int *info ); -void LAPACK_sgebak( char* job, char* side, lapack_int* n, lapack_int* ilo, - lapack_int* ihi, const float* scale, lapack_int* m, - float* v, lapack_int* ldv, lapack_int *info ); -void LAPACK_dgebak( char* job, char* side, lapack_int* n, lapack_int* ilo, - lapack_int* ihi, const double* scale, lapack_int* m, - double* v, lapack_int* ldv, lapack_int *info ); -void LAPACK_cgebak( char* job, char* side, lapack_int* n, lapack_int* ilo, - lapack_int* ihi, const float* scale, lapack_int* m, - lapack_complex_float* v, lapack_int* ldv, - lapack_int *info ); -void LAPACK_zgebak( char* job, char* side, lapack_int* n, lapack_int* ilo, - lapack_int* ihi, const double* scale, lapack_int* m, - lapack_complex_double* v, lapack_int* ldv, - lapack_int *info ); -void LAPACK_shseqr( char* job, char* compz, lapack_int* n, lapack_int* ilo, - lapack_int* ihi, float* h, lapack_int* ldh, float* wr, - float* wi, float* z, lapack_int* ldz, float* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_dhseqr( char* job, char* compz, lapack_int* n, lapack_int* ilo, - lapack_int* ihi, double* h, lapack_int* ldh, double* wr, - double* wi, double* z, lapack_int* ldz, double* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_chseqr( char* job, char* compz, lapack_int* n, lapack_int* ilo, - lapack_int* ihi, lapack_complex_float* h, lapack_int* ldh, - lapack_complex_float* w, lapack_complex_float* z, - lapack_int* ldz, lapack_complex_float* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_zhseqr( char* job, char* compz, lapack_int* n, lapack_int* ilo, - lapack_int* ihi, lapack_complex_double* h, lapack_int* ldh, - lapack_complex_double* w, lapack_complex_double* z, - lapack_int* ldz, lapack_complex_double* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_shsein( char* job, char* eigsrc, char* initv, - lapack_logical* select, lapack_int* n, const float* h, - lapack_int* ldh, float* wr, const float* wi, float* vl, - lapack_int* ldvl, float* vr, lapack_int* ldvr, - lapack_int* mm, lapack_int* m, float* work, - lapack_int* ifaill, lapack_int* ifailr, lapack_int *info ); -void LAPACK_dhsein( char* job, char* eigsrc, char* initv, - lapack_logical* select, lapack_int* n, const double* h, - lapack_int* ldh, double* wr, const double* wi, double* vl, - lapack_int* ldvl, double* vr, lapack_int* ldvr, - lapack_int* mm, lapack_int* m, double* work, - lapack_int* ifaill, lapack_int* ifailr, lapack_int *info ); -void LAPACK_chsein( char* job, char* eigsrc, char* initv, - const lapack_logical* select, lapack_int* n, - const lapack_complex_float* h, lapack_int* ldh, - lapack_complex_float* w, lapack_complex_float* vl, - lapack_int* ldvl, lapack_complex_float* vr, - lapack_int* ldvr, lapack_int* mm, lapack_int* m, - lapack_complex_float* work, float* rwork, - lapack_int* ifaill, lapack_int* ifailr, lapack_int *info ); -void LAPACK_zhsein( char* job, char* eigsrc, char* initv, - const lapack_logical* select, lapack_int* n, - const lapack_complex_double* h, lapack_int* ldh, - lapack_complex_double* w, lapack_complex_double* vl, - lapack_int* ldvl, lapack_complex_double* vr, - lapack_int* ldvr, lapack_int* mm, lapack_int* m, - lapack_complex_double* work, double* rwork, - lapack_int* ifaill, lapack_int* ifailr, lapack_int *info ); -void LAPACK_strevc( char* side, char* howmny, lapack_logical* select, - lapack_int* n, const float* t, lapack_int* ldt, float* vl, - lapack_int* ldvl, float* vr, lapack_int* ldvr, - lapack_int* mm, lapack_int* m, float* work, - lapack_int *info ); -void LAPACK_dtrevc( char* side, char* howmny, lapack_logical* select, - lapack_int* n, const double* t, lapack_int* ldt, double* vl, - lapack_int* ldvl, double* vr, lapack_int* ldvr, - lapack_int* mm, lapack_int* m, double* work, - lapack_int *info ); -void LAPACK_ctrevc( char* side, char* howmny, const lapack_logical* select, - lapack_int* n, lapack_complex_float* t, lapack_int* ldt, - lapack_complex_float* vl, lapack_int* ldvl, - lapack_complex_float* vr, lapack_int* ldvr, lapack_int* mm, - lapack_int* m, lapack_complex_float* work, float* rwork, - lapack_int *info ); -void LAPACK_ztrevc( char* side, char* howmny, const lapack_logical* select, - lapack_int* n, lapack_complex_double* t, lapack_int* ldt, - lapack_complex_double* vl, lapack_int* ldvl, - lapack_complex_double* vr, lapack_int* ldvr, lapack_int* mm, - lapack_int* m, lapack_complex_double* work, double* rwork, - lapack_int *info ); -void LAPACK_strsna( char* job, char* howmny, const lapack_logical* select, - lapack_int* n, const float* t, lapack_int* ldt, - const float* vl, lapack_int* ldvl, const float* vr, - lapack_int* ldvr, float* s, float* sep, lapack_int* mm, - lapack_int* m, float* work, lapack_int* ldwork, - lapack_int* iwork, lapack_int *info ); -void LAPACK_dtrsna( char* job, char* howmny, const lapack_logical* select, - lapack_int* n, const double* t, lapack_int* ldt, - const double* vl, lapack_int* ldvl, const double* vr, - lapack_int* ldvr, double* s, double* sep, lapack_int* mm, - lapack_int* m, double* work, lapack_int* ldwork, - lapack_int* iwork, lapack_int *info ); -void LAPACK_ctrsna( char* job, char* howmny, const lapack_logical* select, - lapack_int* n, const lapack_complex_float* t, - lapack_int* ldt, const lapack_complex_float* vl, - lapack_int* ldvl, const lapack_complex_float* vr, - lapack_int* ldvr, float* s, float* sep, lapack_int* mm, - lapack_int* m, lapack_complex_float* work, - lapack_int* ldwork, float* rwork, lapack_int *info ); -void LAPACK_ztrsna( char* job, char* howmny, const lapack_logical* select, - lapack_int* n, const lapack_complex_double* t, - lapack_int* ldt, const lapack_complex_double* vl, - lapack_int* ldvl, const lapack_complex_double* vr, - lapack_int* ldvr, double* s, double* sep, lapack_int* mm, - lapack_int* m, lapack_complex_double* work, - lapack_int* ldwork, double* rwork, lapack_int *info ); -void LAPACK_strexc( char* compq, lapack_int* n, float* t, lapack_int* ldt, - float* q, lapack_int* ldq, lapack_int* ifst, - lapack_int* ilst, float* work, lapack_int *info ); -void LAPACK_dtrexc( char* compq, lapack_int* n, double* t, lapack_int* ldt, - double* q, lapack_int* ldq, lapack_int* ifst, - lapack_int* ilst, double* work, lapack_int *info ); -void LAPACK_ctrexc( char* compq, lapack_int* n, lapack_complex_float* t, - lapack_int* ldt, lapack_complex_float* q, lapack_int* ldq, - lapack_int* ifst, lapack_int* ilst, lapack_int *info ); -void LAPACK_ztrexc( char* compq, lapack_int* n, lapack_complex_double* t, - lapack_int* ldt, lapack_complex_double* q, lapack_int* ldq, - lapack_int* ifst, lapack_int* ilst, lapack_int *info ); -void LAPACK_strsen( char* job, char* compq, const lapack_logical* select, - lapack_int* n, float* t, lapack_int* ldt, float* q, - lapack_int* ldq, float* wr, float* wi, lapack_int* m, - float* s, float* sep, float* work, lapack_int* lwork, - lapack_int* iwork, lapack_int* liwork, lapack_int *info ); -void LAPACK_dtrsen( char* job, char* compq, const lapack_logical* select, - lapack_int* n, double* t, lapack_int* ldt, double* q, - lapack_int* ldq, double* wr, double* wi, lapack_int* m, - double* s, double* sep, double* work, lapack_int* lwork, - lapack_int* iwork, lapack_int* liwork, lapack_int *info ); -void LAPACK_ctrsen( char* job, char* compq, const lapack_logical* select, - lapack_int* n, lapack_complex_float* t, lapack_int* ldt, - lapack_complex_float* q, lapack_int* ldq, - lapack_complex_float* w, lapack_int* m, float* s, - float* sep, lapack_complex_float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_ztrsen( char* job, char* compq, const lapack_logical* select, - lapack_int* n, lapack_complex_double* t, lapack_int* ldt, - lapack_complex_double* q, lapack_int* ldq, - lapack_complex_double* w, lapack_int* m, double* s, - double* sep, lapack_complex_double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_strsyl( char* trana, char* tranb, lapack_int* isgn, lapack_int* m, - lapack_int* n, const float* a, lapack_int* lda, - const float* b, lapack_int* ldb, float* c, lapack_int* ldc, - float* scale, lapack_int *info ); -void LAPACK_dtrsyl( char* trana, char* tranb, lapack_int* isgn, lapack_int* m, - lapack_int* n, const double* a, lapack_int* lda, - const double* b, lapack_int* ldb, double* c, - lapack_int* ldc, double* scale, lapack_int *info ); -void LAPACK_ctrsyl( char* trana, char* tranb, lapack_int* isgn, lapack_int* m, - lapack_int* n, const lapack_complex_float* a, - lapack_int* lda, const lapack_complex_float* b, - lapack_int* ldb, lapack_complex_float* c, lapack_int* ldc, - float* scale, lapack_int *info ); -void LAPACK_ztrsyl( char* trana, char* tranb, lapack_int* isgn, lapack_int* m, - lapack_int* n, const lapack_complex_double* a, - lapack_int* lda, const lapack_complex_double* b, - lapack_int* ldb, lapack_complex_double* c, lapack_int* ldc, - double* scale, lapack_int *info ); -void LAPACK_sgghrd( char* compq, char* compz, lapack_int* n, lapack_int* ilo, - lapack_int* ihi, float* a, lapack_int* lda, float* b, - lapack_int* ldb, float* q, lapack_int* ldq, float* z, - lapack_int* ldz, lapack_int *info ); -void LAPACK_dgghrd( char* compq, char* compz, lapack_int* n, lapack_int* ilo, - lapack_int* ihi, double* a, lapack_int* lda, double* b, - lapack_int* ldb, double* q, lapack_int* ldq, double* z, - lapack_int* ldz, lapack_int *info ); -void LAPACK_cgghrd( char* compq, char* compz, lapack_int* n, lapack_int* ilo, - lapack_int* ihi, lapack_complex_float* a, lapack_int* lda, - lapack_complex_float* b, lapack_int* ldb, - lapack_complex_float* q, lapack_int* ldq, - lapack_complex_float* z, lapack_int* ldz, - lapack_int *info ); -void LAPACK_zgghrd( char* compq, char* compz, lapack_int* n, lapack_int* ilo, - lapack_int* ihi, lapack_complex_double* a, lapack_int* lda, - lapack_complex_double* b, lapack_int* ldb, - lapack_complex_double* q, lapack_int* ldq, - lapack_complex_double* z, lapack_int* ldz, - lapack_int *info ); -void LAPACK_sgghd3( char* compq, char* compz, lapack_int* n, lapack_int* ilo, - lapack_int* ihi, float* a, lapack_int* lda, float* b, - lapack_int* ldb, float* q, lapack_int* ldq, float* z, - lapack_int* ldz, float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_dgghd3( char* compq, char* compz, lapack_int* n, lapack_int* ilo, - lapack_int* ihi, double* a, lapack_int* lda, double* b, - lapack_int* ldb, double* q, lapack_int* ldq, double* z, - lapack_int* ldz, double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_cgghd3( char* compq, char* compz, lapack_int* n, - lapack_int* ilo, lapack_int* ihi, - lapack_complex_float* a, lapack_int* lda, - lapack_complex_float* b, lapack_int* ldb, - lapack_complex_float* q, lapack_int* ldq, - lapack_complex_float* z, lapack_int* ldz, - lapack_complex_float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_zgghd3( char* compq, char* compz, lapack_int* n, - lapack_int* ilo, lapack_int* ihi, - lapack_complex_double* a, lapack_int* lda, - lapack_complex_double* b, lapack_int* ldb, - lapack_complex_double* q, lapack_int* ldq, - lapack_complex_double* z, lapack_int* ldz, - lapack_complex_double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_sggbal( char* job, lapack_int* n, float* a, lapack_int* lda, - float* b, lapack_int* ldb, lapack_int* ilo, lapack_int* ihi, - float* lscale, float* rscale, float* work, - lapack_int *info ); -void LAPACK_dggbal( char* job, lapack_int* n, double* a, lapack_int* lda, - double* b, lapack_int* ldb, lapack_int* ilo, - lapack_int* ihi, double* lscale, double* rscale, - double* work, lapack_int *info ); -void LAPACK_cggbal( char* job, lapack_int* n, lapack_complex_float* a, - lapack_int* lda, lapack_complex_float* b, lapack_int* ldb, - lapack_int* ilo, lapack_int* ihi, float* lscale, - float* rscale, float* work, lapack_int *info ); -void LAPACK_zggbal( char* job, lapack_int* n, lapack_complex_double* a, - lapack_int* lda, lapack_complex_double* b, lapack_int* ldb, - lapack_int* ilo, lapack_int* ihi, double* lscale, - double* rscale, double* work, lapack_int *info ); -void LAPACK_sggbak( char* job, char* side, lapack_int* n, lapack_int* ilo, - lapack_int* ihi, const float* lscale, const float* rscale, - lapack_int* m, float* v, lapack_int* ldv, - lapack_int *info ); -void LAPACK_dggbak( char* job, char* side, lapack_int* n, lapack_int* ilo, - lapack_int* ihi, const double* lscale, const double* rscale, - lapack_int* m, double* v, lapack_int* ldv, - lapack_int *info ); -void LAPACK_cggbak( char* job, char* side, lapack_int* n, lapack_int* ilo, - lapack_int* ihi, const float* lscale, const float* rscale, - lapack_int* m, lapack_complex_float* v, lapack_int* ldv, - lapack_int *info ); -void LAPACK_zggbak( char* job, char* side, lapack_int* n, lapack_int* ilo, - lapack_int* ihi, const double* lscale, const double* rscale, - lapack_int* m, lapack_complex_double* v, lapack_int* ldv, - lapack_int *info ); -void LAPACK_shgeqz( char* job, char* compq, char* compz, lapack_int* n, - lapack_int* ilo, lapack_int* ihi, float* h, lapack_int* ldh, - float* t, lapack_int* ldt, float* alphar, float* alphai, - float* beta, float* q, lapack_int* ldq, float* z, - lapack_int* ldz, float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_dhgeqz( char* job, char* compq, char* compz, lapack_int* n, - lapack_int* ilo, lapack_int* ihi, double* h, - lapack_int* ldh, double* t, lapack_int* ldt, double* alphar, - double* alphai, double* beta, double* q, lapack_int* ldq, - double* z, lapack_int* ldz, double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_chgeqz( char* job, char* compq, char* compz, lapack_int* n, - lapack_int* ilo, lapack_int* ihi, lapack_complex_float* h, - lapack_int* ldh, lapack_complex_float* t, lapack_int* ldt, - lapack_complex_float* alpha, lapack_complex_float* beta, - lapack_complex_float* q, lapack_int* ldq, - lapack_complex_float* z, lapack_int* ldz, - lapack_complex_float* work, lapack_int* lwork, float* rwork, - lapack_int *info ); -void LAPACK_zhgeqz( char* job, char* compq, char* compz, lapack_int* n, - lapack_int* ilo, lapack_int* ihi, lapack_complex_double* h, - lapack_int* ldh, lapack_complex_double* t, lapack_int* ldt, - lapack_complex_double* alpha, lapack_complex_double* beta, - lapack_complex_double* q, lapack_int* ldq, - lapack_complex_double* z, lapack_int* ldz, - lapack_complex_double* work, lapack_int* lwork, - double* rwork, lapack_int *info ); -void LAPACK_stgevc( char* side, char* howmny, const lapack_logical* select, - lapack_int* n, const float* s, lapack_int* lds, - const float* p, lapack_int* ldp, float* vl, - lapack_int* ldvl, float* vr, lapack_int* ldvr, - lapack_int* mm, lapack_int* m, float* work, - lapack_int *info ); -void LAPACK_dtgevc( char* side, char* howmny, const lapack_logical* select, - lapack_int* n, const double* s, lapack_int* lds, - const double* p, lapack_int* ldp, double* vl, - lapack_int* ldvl, double* vr, lapack_int* ldvr, - lapack_int* mm, lapack_int* m, double* work, - lapack_int *info ); -void LAPACK_ctgevc( char* side, char* howmny, const lapack_logical* select, - lapack_int* n, const lapack_complex_float* s, - lapack_int* lds, const lapack_complex_float* p, - lapack_int* ldp, lapack_complex_float* vl, lapack_int* ldvl, - lapack_complex_float* vr, lapack_int* ldvr, lapack_int* mm, - lapack_int* m, lapack_complex_float* work, float* rwork, - lapack_int *info ); -void LAPACK_ztgevc( char* side, char* howmny, const lapack_logical* select, - lapack_int* n, const lapack_complex_double* s, - lapack_int* lds, const lapack_complex_double* p, - lapack_int* ldp, lapack_complex_double* vl, - lapack_int* ldvl, lapack_complex_double* vr, - lapack_int* ldvr, lapack_int* mm, lapack_int* m, - lapack_complex_double* work, double* rwork, - lapack_int *info ); -void LAPACK_stgexc( lapack_logical* wantq, lapack_logical* wantz, lapack_int* n, - float* a, lapack_int* lda, float* b, lapack_int* ldb, - float* q, lapack_int* ldq, float* z, lapack_int* ldz, - lapack_int* ifst, lapack_int* ilst, float* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_dtgexc( lapack_logical* wantq, lapack_logical* wantz, lapack_int* n, - double* a, lapack_int* lda, double* b, lapack_int* ldb, - double* q, lapack_int* ldq, double* z, lapack_int* ldz, - lapack_int* ifst, lapack_int* ilst, double* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_ctgexc( lapack_logical* wantq, lapack_logical* wantz, lapack_int* n, - lapack_complex_float* a, lapack_int* lda, - lapack_complex_float* b, lapack_int* ldb, - lapack_complex_float* q, lapack_int* ldq, - lapack_complex_float* z, lapack_int* ldz, lapack_int* ifst, - lapack_int* ilst, lapack_int *info ); -void LAPACK_ztgexc( lapack_logical* wantq, lapack_logical* wantz, lapack_int* n, - lapack_complex_double* a, lapack_int* lda, - lapack_complex_double* b, lapack_int* ldb, - lapack_complex_double* q, lapack_int* ldq, - lapack_complex_double* z, lapack_int* ldz, lapack_int* ifst, - lapack_int* ilst, lapack_int *info ); -void LAPACK_stgsen( lapack_int* ijob, lapack_logical* wantq, - lapack_logical* wantz, const lapack_logical* select, - lapack_int* n, float* a, lapack_int* lda, float* b, - lapack_int* ldb, float* alphar, float* alphai, float* beta, - float* q, lapack_int* ldq, float* z, lapack_int* ldz, - lapack_int* m, float* pl, float* pr, float* dif, - float* work, lapack_int* lwork, lapack_int* iwork, - lapack_int* liwork, lapack_int *info ); -void LAPACK_dtgsen( lapack_int* ijob, lapack_logical* wantq, - lapack_logical* wantz, const lapack_logical* select, - lapack_int* n, double* a, lapack_int* lda, double* b, - lapack_int* ldb, double* alphar, double* alphai, - double* beta, double* q, lapack_int* ldq, double* z, - lapack_int* ldz, lapack_int* m, double* pl, double* pr, - double* dif, double* work, lapack_int* lwork, - lapack_int* iwork, lapack_int* liwork, lapack_int *info ); -void LAPACK_ctgsen( lapack_int* ijob, lapack_logical* wantq, - lapack_logical* wantz, const lapack_logical* select, - lapack_int* n, lapack_complex_float* a, lapack_int* lda, - lapack_complex_float* b, lapack_int* ldb, - lapack_complex_float* alpha, lapack_complex_float* beta, - lapack_complex_float* q, lapack_int* ldq, - lapack_complex_float* z, lapack_int* ldz, lapack_int* m, - float* pl, float* pr, float* dif, - lapack_complex_float* work, lapack_int* lwork, - lapack_int* iwork, lapack_int* liwork, lapack_int *info ); -void LAPACK_ztgsen( lapack_int* ijob, lapack_logical* wantq, - lapack_logical* wantz, const lapack_logical* select, - lapack_int* n, lapack_complex_double* a, lapack_int* lda, - lapack_complex_double* b, lapack_int* ldb, - lapack_complex_double* alpha, lapack_complex_double* beta, - lapack_complex_double* q, lapack_int* ldq, - lapack_complex_double* z, lapack_int* ldz, lapack_int* m, - double* pl, double* pr, double* dif, - lapack_complex_double* work, lapack_int* lwork, - lapack_int* iwork, lapack_int* liwork, lapack_int *info ); -void LAPACK_stgsyl( char* trans, lapack_int* ijob, lapack_int* m, lapack_int* n, - const float* a, lapack_int* lda, const float* b, - lapack_int* ldb, float* c, lapack_int* ldc, const float* d, - lapack_int* ldd, const float* e, lapack_int* lde, float* f, - lapack_int* ldf, float* scale, float* dif, float* work, - lapack_int* lwork, lapack_int* iwork, lapack_int *info ); -void LAPACK_dtgsyl( char* trans, lapack_int* ijob, lapack_int* m, lapack_int* n, - const double* a, lapack_int* lda, const double* b, - lapack_int* ldb, double* c, lapack_int* ldc, - const double* d, lapack_int* ldd, const double* e, - lapack_int* lde, double* f, lapack_int* ldf, double* scale, - double* dif, double* work, lapack_int* lwork, - lapack_int* iwork, lapack_int *info ); -void LAPACK_ctgsyl( char* trans, lapack_int* ijob, lapack_int* m, lapack_int* n, - const lapack_complex_float* a, lapack_int* lda, - const lapack_complex_float* b, lapack_int* ldb, - lapack_complex_float* c, lapack_int* ldc, - const lapack_complex_float* d, lapack_int* ldd, - const lapack_complex_float* e, lapack_int* lde, - lapack_complex_float* f, lapack_int* ldf, float* scale, - float* dif, lapack_complex_float* work, lapack_int* lwork, - lapack_int* iwork, lapack_int *info ); -void LAPACK_ztgsyl( char* trans, lapack_int* ijob, lapack_int* m, lapack_int* n, - const lapack_complex_double* a, lapack_int* lda, - const lapack_complex_double* b, lapack_int* ldb, - lapack_complex_double* c, lapack_int* ldc, - const lapack_complex_double* d, lapack_int* ldd, - const lapack_complex_double* e, lapack_int* lde, - lapack_complex_double* f, lapack_int* ldf, double* scale, - double* dif, lapack_complex_double* work, lapack_int* lwork, - lapack_int* iwork, lapack_int *info ); -void LAPACK_stgsna( char* job, char* howmny, const lapack_logical* select, - lapack_int* n, const float* a, lapack_int* lda, - const float* b, lapack_int* ldb, const float* vl, - lapack_int* ldvl, const float* vr, lapack_int* ldvr, - float* s, float* dif, lapack_int* mm, lapack_int* m, - float* work, lapack_int* lwork, lapack_int* iwork, - lapack_int *info ); -void LAPACK_dtgsna( char* job, char* howmny, const lapack_logical* select, - lapack_int* n, const double* a, lapack_int* lda, - const double* b, lapack_int* ldb, const double* vl, - lapack_int* ldvl, const double* vr, lapack_int* ldvr, - double* s, double* dif, lapack_int* mm, lapack_int* m, - double* work, lapack_int* lwork, lapack_int* iwork, - lapack_int *info ); -void LAPACK_ctgsna( char* job, char* howmny, const lapack_logical* select, - lapack_int* n, const lapack_complex_float* a, - lapack_int* lda, const lapack_complex_float* b, - lapack_int* ldb, const lapack_complex_float* vl, - lapack_int* ldvl, const lapack_complex_float* vr, - lapack_int* ldvr, float* s, float* dif, lapack_int* mm, - lapack_int* m, lapack_complex_float* work, - lapack_int* lwork, lapack_int* iwork, lapack_int *info ); -void LAPACK_ztgsna( char* job, char* howmny, const lapack_logical* select, - lapack_int* n, const lapack_complex_double* a, - lapack_int* lda, const lapack_complex_double* b, - lapack_int* ldb, const lapack_complex_double* vl, - lapack_int* ldvl, const lapack_complex_double* vr, - lapack_int* ldvr, double* s, double* dif, lapack_int* mm, - lapack_int* m, lapack_complex_double* work, - lapack_int* lwork, lapack_int* iwork, lapack_int *info ); -void LAPACK_sggsvp( char* jobu, char* jobv, char* jobq, lapack_int* m, - lapack_int* p, lapack_int* n, float* a, lapack_int* lda, - float* b, lapack_int* ldb, float* tola, float* tolb, - lapack_int* k, lapack_int* l, float* u, lapack_int* ldu, - float* v, lapack_int* ldv, float* q, lapack_int* ldq, - lapack_int* iwork, float* tau, float* work, - lapack_int *info ); -void LAPACK_dggsvp( char* jobu, char* jobv, char* jobq, lapack_int* m, - lapack_int* p, lapack_int* n, double* a, lapack_int* lda, - double* b, lapack_int* ldb, double* tola, double* tolb, - lapack_int* k, lapack_int* l, double* u, lapack_int* ldu, - double* v, lapack_int* ldv, double* q, lapack_int* ldq, - lapack_int* iwork, double* tau, double* work, - lapack_int *info ); -void LAPACK_cggsvp( char* jobu, char* jobv, char* jobq, lapack_int* m, - lapack_int* p, lapack_int* n, lapack_complex_float* a, - lapack_int* lda, lapack_complex_float* b, lapack_int* ldb, - float* tola, float* tolb, lapack_int* k, lapack_int* l, - lapack_complex_float* u, lapack_int* ldu, - lapack_complex_float* v, lapack_int* ldv, - lapack_complex_float* q, lapack_int* ldq, lapack_int* iwork, - float* rwork, lapack_complex_float* tau, - lapack_complex_float* work, lapack_int *info ); -void LAPACK_zggsvp( char* jobu, char* jobv, char* jobq, lapack_int* m, - lapack_int* p, lapack_int* n, lapack_complex_double* a, - lapack_int* lda, lapack_complex_double* b, lapack_int* ldb, - double* tola, double* tolb, lapack_int* k, lapack_int* l, - lapack_complex_double* u, lapack_int* ldu, - lapack_complex_double* v, lapack_int* ldv, - lapack_complex_double* q, lapack_int* ldq, - lapack_int* iwork, double* rwork, - lapack_complex_double* tau, lapack_complex_double* work, - lapack_int *info ); -void LAPACK_sggsvp3( char* jobu, char* jobv, char* jobq, lapack_int* m, - lapack_int* p, lapack_int* n, float* a, lapack_int* lda, - float* b, lapack_int* ldb, float* tola, float* tolb, - lapack_int* k, lapack_int* l, float* u, lapack_int* ldu, - float* v, lapack_int* ldv, float* q, lapack_int* ldq, - lapack_int* iwork, float* tau, float* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_dggsvp3( char* jobu, char* jobv, char* jobq, lapack_int* m, - lapack_int* p, lapack_int* n, double* a, lapack_int* lda, - double* b, lapack_int* ldb, double* tola, double* tolb, - lapack_int* k, lapack_int* l, double* u, lapack_int* ldu, - double* v, lapack_int* ldv, double* q, lapack_int* ldq, - lapack_int* iwork, double* tau, double* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_cggsvp3( char* jobu, char* jobv, char* jobq, lapack_int* m, - lapack_int* p, lapack_int* n, lapack_complex_float* a, - lapack_int* lda, lapack_complex_float* b, lapack_int* ldb, - float* tola, float* tolb, lapack_int* k, lapack_int* l, - lapack_complex_float* u, lapack_int* ldu, - lapack_complex_float* v, lapack_int* ldv, - lapack_complex_float* q, lapack_int* ldq, lapack_int* iwork, - float* rwork, lapack_complex_float* tau, - lapack_complex_float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_zggsvp3( char* jobu, char* jobv, char* jobq, lapack_int* m, - lapack_int* p, lapack_int* n, lapack_complex_double* a, - lapack_int* lda, lapack_complex_double* b, lapack_int* ldb, - double* tola, double* tolb, lapack_int* k, lapack_int* l, - lapack_complex_double* u, lapack_int* ldu, - lapack_complex_double* v, lapack_int* ldv, - lapack_complex_double* q, lapack_int* ldq, - lapack_int* iwork, double* rwork, - lapack_complex_double* tau, lapack_complex_double* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_stgsja( char* jobu, char* jobv, char* jobq, lapack_int* m, - lapack_int* p, lapack_int* n, lapack_int* k, lapack_int* l, - float* a, lapack_int* lda, float* b, lapack_int* ldb, - float* tola, float* tolb, float* alpha, float* beta, - float* u, lapack_int* ldu, float* v, lapack_int* ldv, - float* q, lapack_int* ldq, float* work, lapack_int* ncycle, - lapack_int *info ); -void LAPACK_dtgsja( char* jobu, char* jobv, char* jobq, lapack_int* m, - lapack_int* p, lapack_int* n, lapack_int* k, lapack_int* l, - double* a, lapack_int* lda, double* b, lapack_int* ldb, - double* tola, double* tolb, double* alpha, double* beta, - double* u, lapack_int* ldu, double* v, lapack_int* ldv, - double* q, lapack_int* ldq, double* work, - lapack_int* ncycle, lapack_int *info ); -void LAPACK_ctgsja( char* jobu, char* jobv, char* jobq, lapack_int* m, - lapack_int* p, lapack_int* n, lapack_int* k, lapack_int* l, - lapack_complex_float* a, lapack_int* lda, - lapack_complex_float* b, lapack_int* ldb, float* tola, - float* tolb, float* alpha, float* beta, - lapack_complex_float* u, lapack_int* ldu, - lapack_complex_float* v, lapack_int* ldv, - lapack_complex_float* q, lapack_int* ldq, - lapack_complex_float* work, lapack_int* ncycle, - lapack_int *info ); -void LAPACK_ztgsja( char* jobu, char* jobv, char* jobq, lapack_int* m, - lapack_int* p, lapack_int* n, lapack_int* k, lapack_int* l, - lapack_complex_double* a, lapack_int* lda, - lapack_complex_double* b, lapack_int* ldb, double* tola, - double* tolb, double* alpha, double* beta, - lapack_complex_double* u, lapack_int* ldu, - lapack_complex_double* v, lapack_int* ldv, - lapack_complex_double* q, lapack_int* ldq, - lapack_complex_double* work, lapack_int* ncycle, - lapack_int *info ); -void LAPACK_sgels( char* trans, lapack_int* m, lapack_int* n, lapack_int* nrhs, - float* a, lapack_int* lda, float* b, lapack_int* ldb, - float* work, lapack_int* lwork, lapack_int *info ); -void LAPACK_dgels( char* trans, lapack_int* m, lapack_int* n, lapack_int* nrhs, - double* a, lapack_int* lda, double* b, lapack_int* ldb, - double* work, lapack_int* lwork, lapack_int *info ); -void LAPACK_cgels( char* trans, lapack_int* m, lapack_int* n, lapack_int* nrhs, - lapack_complex_float* a, lapack_int* lda, - lapack_complex_float* b, lapack_int* ldb, - lapack_complex_float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_zgels( char* trans, lapack_int* m, lapack_int* n, lapack_int* nrhs, - lapack_complex_double* a, lapack_int* lda, - lapack_complex_double* b, lapack_int* ldb, - lapack_complex_double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_sgelsy( lapack_int* m, lapack_int* n, lapack_int* nrhs, float* a, - lapack_int* lda, float* b, lapack_int* ldb, - lapack_int* jpvt, float* rcond, lapack_int* rank, - float* work, lapack_int* lwork, lapack_int *info ); -void LAPACK_dgelsy( lapack_int* m, lapack_int* n, lapack_int* nrhs, double* a, - lapack_int* lda, double* b, lapack_int* ldb, - lapack_int* jpvt, double* rcond, lapack_int* rank, - double* work, lapack_int* lwork, lapack_int *info ); -void LAPACK_cgelsy( lapack_int* m, lapack_int* n, lapack_int* nrhs, - lapack_complex_float* a, lapack_int* lda, - lapack_complex_float* b, lapack_int* ldb, lapack_int* jpvt, - float* rcond, lapack_int* rank, lapack_complex_float* work, - lapack_int* lwork, float* rwork, lapack_int *info ); -void LAPACK_zgelsy( lapack_int* m, lapack_int* n, lapack_int* nrhs, - lapack_complex_double* a, lapack_int* lda, - lapack_complex_double* b, lapack_int* ldb, lapack_int* jpvt, - double* rcond, lapack_int* rank, - lapack_complex_double* work, lapack_int* lwork, - double* rwork, lapack_int *info ); -void LAPACK_sgelss( lapack_int* m, lapack_int* n, lapack_int* nrhs, float* a, - lapack_int* lda, float* b, lapack_int* ldb, float* s, - float* rcond, lapack_int* rank, float* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_dgelss( lapack_int* m, lapack_int* n, lapack_int* nrhs, double* a, - lapack_int* lda, double* b, lapack_int* ldb, double* s, - double* rcond, lapack_int* rank, double* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_cgelss( lapack_int* m, lapack_int* n, lapack_int* nrhs, - lapack_complex_float* a, lapack_int* lda, - lapack_complex_float* b, lapack_int* ldb, float* s, - float* rcond, lapack_int* rank, lapack_complex_float* work, - lapack_int* lwork, float* rwork, lapack_int *info ); -void LAPACK_zgelss( lapack_int* m, lapack_int* n, lapack_int* nrhs, - lapack_complex_double* a, lapack_int* lda, - lapack_complex_double* b, lapack_int* ldb, double* s, - double* rcond, lapack_int* rank, - lapack_complex_double* work, lapack_int* lwork, - double* rwork, lapack_int *info ); -void LAPACK_sgelsd( lapack_int* m, lapack_int* n, lapack_int* nrhs, float* a, - lapack_int* lda, float* b, lapack_int* ldb, float* s, - float* rcond, lapack_int* rank, float* work, - lapack_int* lwork, lapack_int* iwork, lapack_int *info ); -void LAPACK_dgelsd( lapack_int* m, lapack_int* n, lapack_int* nrhs, double* a, - lapack_int* lda, double* b, lapack_int* ldb, double* s, - double* rcond, lapack_int* rank, double* work, - lapack_int* lwork, lapack_int* iwork, lapack_int *info ); -void LAPACK_cgelsd( lapack_int* m, lapack_int* n, lapack_int* nrhs, - lapack_complex_float* a, lapack_int* lda, - lapack_complex_float* b, lapack_int* ldb, float* s, - float* rcond, lapack_int* rank, lapack_complex_float* work, - lapack_int* lwork, float* rwork, lapack_int* iwork, - lapack_int *info ); -void LAPACK_zgelsd( lapack_int* m, lapack_int* n, lapack_int* nrhs, - lapack_complex_double* a, lapack_int* lda, - lapack_complex_double* b, lapack_int* ldb, double* s, - double* rcond, lapack_int* rank, - lapack_complex_double* work, lapack_int* lwork, - double* rwork, lapack_int* iwork, lapack_int *info ); -void LAPACK_sgglse( lapack_int* m, lapack_int* n, lapack_int* p, float* a, - lapack_int* lda, float* b, lapack_int* ldb, float* c, - float* d, float* x, float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_dgglse( lapack_int* m, lapack_int* n, lapack_int* p, double* a, - lapack_int* lda, double* b, lapack_int* ldb, double* c, - double* d, double* x, double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_cgglse( lapack_int* m, lapack_int* n, lapack_int* p, - lapack_complex_float* a, lapack_int* lda, - lapack_complex_float* b, lapack_int* ldb, - lapack_complex_float* c, lapack_complex_float* d, - lapack_complex_float* x, lapack_complex_float* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_zgglse( lapack_int* m, lapack_int* n, lapack_int* p, - lapack_complex_double* a, lapack_int* lda, - lapack_complex_double* b, lapack_int* ldb, - lapack_complex_double* c, lapack_complex_double* d, - lapack_complex_double* x, lapack_complex_double* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_sggglm( lapack_int* n, lapack_int* m, lapack_int* p, float* a, - lapack_int* lda, float* b, lapack_int* ldb, float* d, - float* x, float* y, float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_dggglm( lapack_int* n, lapack_int* m, lapack_int* p, double* a, - lapack_int* lda, double* b, lapack_int* ldb, double* d, - double* x, double* y, double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_cggglm( lapack_int* n, lapack_int* m, lapack_int* p, - lapack_complex_float* a, lapack_int* lda, - lapack_complex_float* b, lapack_int* ldb, - lapack_complex_float* d, lapack_complex_float* x, - lapack_complex_float* y, lapack_complex_float* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_zggglm( lapack_int* n, lapack_int* m, lapack_int* p, - lapack_complex_double* a, lapack_int* lda, - lapack_complex_double* b, lapack_int* ldb, - lapack_complex_double* d, lapack_complex_double* x, - lapack_complex_double* y, lapack_complex_double* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_ssyev( char* jobz, char* uplo, lapack_int* n, float* a, - lapack_int* lda, float* w, float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_dsyev( char* jobz, char* uplo, lapack_int* n, double* a, - lapack_int* lda, double* w, double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_cheev( char* jobz, char* uplo, lapack_int* n, - lapack_complex_float* a, lapack_int* lda, float* w, - lapack_complex_float* work, lapack_int* lwork, float* rwork, - lapack_int *info ); -void LAPACK_zheev( char* jobz, char* uplo, lapack_int* n, - lapack_complex_double* a, lapack_int* lda, double* w, - lapack_complex_double* work, lapack_int* lwork, - double* rwork, lapack_int *info ); -void LAPACK_ssyevd( char* jobz, char* uplo, lapack_int* n, float* a, - lapack_int* lda, float* w, float* work, lapack_int* lwork, - lapack_int* iwork, lapack_int* liwork, lapack_int *info ); -void LAPACK_dsyevd( char* jobz, char* uplo, lapack_int* n, double* a, - lapack_int* lda, double* w, double* work, lapack_int* lwork, - lapack_int* iwork, lapack_int* liwork, lapack_int *info ); -void LAPACK_cheevd( char* jobz, char* uplo, lapack_int* n, - lapack_complex_float* a, lapack_int* lda, float* w, - lapack_complex_float* work, lapack_int* lwork, float* rwork, - lapack_int* lrwork, lapack_int* iwork, lapack_int* liwork, - lapack_int *info ); -void LAPACK_zheevd( char* jobz, char* uplo, lapack_int* n, - lapack_complex_double* a, lapack_int* lda, double* w, - lapack_complex_double* work, lapack_int* lwork, - double* rwork, lapack_int* lrwork, lapack_int* iwork, - lapack_int* liwork, lapack_int *info ); -void LAPACK_ssyevx( char* jobz, char* range, char* uplo, lapack_int* n, - float* a, lapack_int* lda, float* vl, float* vu, - lapack_int* il, lapack_int* iu, float* abstol, - lapack_int* m, float* w, float* z, lapack_int* ldz, - float* work, lapack_int* lwork, lapack_int* iwork, - lapack_int* ifail, lapack_int *info ); -void LAPACK_dsyevx( char* jobz, char* range, char* uplo, lapack_int* n, - double* a, lapack_int* lda, double* vl, double* vu, - lapack_int* il, lapack_int* iu, double* abstol, - lapack_int* m, double* w, double* z, lapack_int* ldz, - double* work, lapack_int* lwork, lapack_int* iwork, - lapack_int* ifail, lapack_int *info ); -void LAPACK_cheevx( char* jobz, char* range, char* uplo, lapack_int* n, - lapack_complex_float* a, lapack_int* lda, float* vl, - float* vu, lapack_int* il, lapack_int* iu, float* abstol, - lapack_int* m, float* w, lapack_complex_float* z, - lapack_int* ldz, lapack_complex_float* work, - lapack_int* lwork, float* rwork, lapack_int* iwork, - lapack_int* ifail, lapack_int *info ); -void LAPACK_zheevx( char* jobz, char* range, char* uplo, lapack_int* n, - lapack_complex_double* a, lapack_int* lda, double* vl, - double* vu, lapack_int* il, lapack_int* iu, double* abstol, - lapack_int* m, double* w, lapack_complex_double* z, - lapack_int* ldz, lapack_complex_double* work, - lapack_int* lwork, double* rwork, lapack_int* iwork, - lapack_int* ifail, lapack_int *info ); -void LAPACK_ssyevr( char* jobz, char* range, char* uplo, lapack_int* n, - float* a, lapack_int* lda, float* vl, float* vu, - lapack_int* il, lapack_int* iu, float* abstol, - lapack_int* m, float* w, float* z, lapack_int* ldz, - lapack_int* isuppz, float* work, lapack_int* lwork, - lapack_int* iwork, lapack_int* liwork, lapack_int *info ); -void LAPACK_dsyevr( char* jobz, char* range, char* uplo, lapack_int* n, - double* a, lapack_int* lda, double* vl, double* vu, - lapack_int* il, lapack_int* iu, double* abstol, - lapack_int* m, double* w, double* z, lapack_int* ldz, - lapack_int* isuppz, double* work, lapack_int* lwork, - lapack_int* iwork, lapack_int* liwork, lapack_int *info ); -void LAPACK_cheevr( char* jobz, char* range, char* uplo, lapack_int* n, - lapack_complex_float* a, lapack_int* lda, float* vl, - float* vu, lapack_int* il, lapack_int* iu, float* abstol, - lapack_int* m, float* w, lapack_complex_float* z, - lapack_int* ldz, lapack_int* isuppz, - lapack_complex_float* work, lapack_int* lwork, float* rwork, - lapack_int* lrwork, lapack_int* iwork, lapack_int* liwork, - lapack_int *info ); -void LAPACK_zheevr( char* jobz, char* range, char* uplo, lapack_int* n, - lapack_complex_double* a, lapack_int* lda, double* vl, - double* vu, lapack_int* il, lapack_int* iu, double* abstol, - lapack_int* m, double* w, lapack_complex_double* z, - lapack_int* ldz, lapack_int* isuppz, - lapack_complex_double* work, lapack_int* lwork, - double* rwork, lapack_int* lrwork, lapack_int* iwork, - lapack_int* liwork, lapack_int *info ); -void LAPACK_sspev( char* jobz, char* uplo, lapack_int* n, float* ap, float* w, - float* z, lapack_int* ldz, float* work, lapack_int *info ); -void LAPACK_dspev( char* jobz, char* uplo, lapack_int* n, double* ap, double* w, - double* z, lapack_int* ldz, double* work, lapack_int *info ); -void LAPACK_chpev( char* jobz, char* uplo, lapack_int* n, - lapack_complex_float* ap, float* w, lapack_complex_float* z, - lapack_int* ldz, lapack_complex_float* work, float* rwork, - lapack_int *info ); -void LAPACK_zhpev( char* jobz, char* uplo, lapack_int* n, - lapack_complex_double* ap, double* w, - lapack_complex_double* z, lapack_int* ldz, - lapack_complex_double* work, double* rwork, - lapack_int *info ); -void LAPACK_sspevd( char* jobz, char* uplo, lapack_int* n, float* ap, float* w, - float* z, lapack_int* ldz, float* work, lapack_int* lwork, - lapack_int* iwork, lapack_int* liwork, lapack_int *info ); -void LAPACK_dspevd( char* jobz, char* uplo, lapack_int* n, double* ap, - double* w, double* z, lapack_int* ldz, double* work, - lapack_int* lwork, lapack_int* iwork, lapack_int* liwork, - lapack_int *info ); -void LAPACK_chpevd( char* jobz, char* uplo, lapack_int* n, - lapack_complex_float* ap, float* w, lapack_complex_float* z, - lapack_int* ldz, lapack_complex_float* work, - lapack_int* lwork, float* rwork, lapack_int* lrwork, - lapack_int* iwork, lapack_int* liwork, lapack_int *info ); -void LAPACK_zhpevd( char* jobz, char* uplo, lapack_int* n, - lapack_complex_double* ap, double* w, - lapack_complex_double* z, lapack_int* ldz, - lapack_complex_double* work, lapack_int* lwork, - double* rwork, lapack_int* lrwork, lapack_int* iwork, - lapack_int* liwork, lapack_int *info ); -void LAPACK_sspevx( char* jobz, char* range, char* uplo, lapack_int* n, - float* ap, float* vl, float* vu, lapack_int* il, - lapack_int* iu, float* abstol, lapack_int* m, float* w, - float* z, lapack_int* ldz, float* work, lapack_int* iwork, - lapack_int* ifail, lapack_int *info ); -void LAPACK_dspevx( char* jobz, char* range, char* uplo, lapack_int* n, - double* ap, double* vl, double* vu, lapack_int* il, - lapack_int* iu, double* abstol, lapack_int* m, double* w, - double* z, lapack_int* ldz, double* work, lapack_int* iwork, - lapack_int* ifail, lapack_int *info ); -void LAPACK_chpevx( char* jobz, char* range, char* uplo, lapack_int* n, - lapack_complex_float* ap, float* vl, float* vu, - lapack_int* il, lapack_int* iu, float* abstol, - lapack_int* m, float* w, lapack_complex_float* z, - lapack_int* ldz, lapack_complex_float* work, float* rwork, - lapack_int* iwork, lapack_int* ifail, lapack_int *info ); -void LAPACK_zhpevx( char* jobz, char* range, char* uplo, lapack_int* n, - lapack_complex_double* ap, double* vl, double* vu, - lapack_int* il, lapack_int* iu, double* abstol, - lapack_int* m, double* w, lapack_complex_double* z, - lapack_int* ldz, lapack_complex_double* work, double* rwork, - lapack_int* iwork, lapack_int* ifail, lapack_int *info ); -void LAPACK_ssbev( char* jobz, char* uplo, lapack_int* n, lapack_int* kd, - float* ab, lapack_int* ldab, float* w, float* z, - lapack_int* ldz, float* work, lapack_int *info ); -void LAPACK_dsbev( char* jobz, char* uplo, lapack_int* n, lapack_int* kd, - double* ab, lapack_int* ldab, double* w, double* z, - lapack_int* ldz, double* work, lapack_int *info ); -void LAPACK_chbev( char* jobz, char* uplo, lapack_int* n, lapack_int* kd, - lapack_complex_float* ab, lapack_int* ldab, float* w, - lapack_complex_float* z, lapack_int* ldz, - lapack_complex_float* work, float* rwork, lapack_int *info ); -void LAPACK_zhbev( char* jobz, char* uplo, lapack_int* n, lapack_int* kd, - lapack_complex_double* ab, lapack_int* ldab, double* w, - lapack_complex_double* z, lapack_int* ldz, - lapack_complex_double* work, double* rwork, - lapack_int *info ); -void LAPACK_ssbevd( char* jobz, char* uplo, lapack_int* n, lapack_int* kd, - float* ab, lapack_int* ldab, float* w, float* z, - lapack_int* ldz, float* work, lapack_int* lwork, - lapack_int* iwork, lapack_int* liwork, lapack_int *info ); -void LAPACK_dsbevd( char* jobz, char* uplo, lapack_int* n, lapack_int* kd, - double* ab, lapack_int* ldab, double* w, double* z, - lapack_int* ldz, double* work, lapack_int* lwork, - lapack_int* iwork, lapack_int* liwork, lapack_int *info ); -void LAPACK_chbevd( char* jobz, char* uplo, lapack_int* n, lapack_int* kd, - lapack_complex_float* ab, lapack_int* ldab, float* w, - lapack_complex_float* z, lapack_int* ldz, - lapack_complex_float* work, lapack_int* lwork, float* rwork, - lapack_int* lrwork, lapack_int* iwork, lapack_int* liwork, - lapack_int *info ); -void LAPACK_zhbevd( char* jobz, char* uplo, lapack_int* n, lapack_int* kd, - lapack_complex_double* ab, lapack_int* ldab, double* w, - lapack_complex_double* z, lapack_int* ldz, - lapack_complex_double* work, lapack_int* lwork, - double* rwork, lapack_int* lrwork, lapack_int* iwork, - lapack_int* liwork, lapack_int *info ); -void LAPACK_ssbevx( char* jobz, char* range, char* uplo, lapack_int* n, - lapack_int* kd, float* ab, lapack_int* ldab, float* q, - lapack_int* ldq, float* vl, float* vu, lapack_int* il, - lapack_int* iu, float* abstol, lapack_int* m, float* w, - float* z, lapack_int* ldz, float* work, - lapack_int* iwork, lapack_int* ifail, lapack_int *info ); -void LAPACK_dsbevx( char* jobz, char* range, char* uplo, lapack_int* n, - lapack_int* kd, double* ab, lapack_int* ldab, double* q, - lapack_int* ldq, double* vl, double* vu, lapack_int* il, - lapack_int* iu, double* abstol, lapack_int* m, double* w, - double* z, lapack_int* ldz, double* work, - lapack_int* iwork, lapack_int* ifail, lapack_int *info ); -void LAPACK_chbevx( char* jobz, char* range, char* uplo, lapack_int* n, - lapack_int* kd, lapack_complex_float* ab, lapack_int* ldab, - lapack_complex_float* q, lapack_int* ldq, float* vl, - float* vu, lapack_int* il, lapack_int* iu, float* abstol, - lapack_int* m, float* w, lapack_complex_float* z, - lapack_int* ldz, lapack_complex_float* work, - float* rwork, lapack_int* iwork, lapack_int* ifail, lapack_int *info ); -void LAPACK_zhbevx( char* jobz, char* range, char* uplo, lapack_int* n, - lapack_int* kd, lapack_complex_double* ab, lapack_int* ldab, - lapack_complex_double* q, lapack_int* ldq, double* vl, - double* vu, lapack_int* il, lapack_int* iu, double* abstol, - lapack_int* m, double* w, lapack_complex_double* z, - lapack_int* ldz, lapack_complex_double* work, - double* rwork, lapack_int* iwork, lapack_int* ifail, lapack_int *info ); -void LAPACK_sstev( char* jobz, lapack_int* n, float* d, float* e, float* z, - lapack_int* ldz, float* work, lapack_int *info ); -void LAPACK_dstev( char* jobz, lapack_int* n, double* d, double* e, double* z, - lapack_int* ldz, double* work, lapack_int *info ); -void LAPACK_sstevd( char* jobz, lapack_int* n, float* d, float* e, float* z, - lapack_int* ldz, float* work, lapack_int* lwork, - lapack_int* iwork, lapack_int* liwork, lapack_int *info ); -void LAPACK_dstevd( char* jobz, lapack_int* n, double* d, double* e, double* z, - lapack_int* ldz, double* work, lapack_int* lwork, - lapack_int* iwork, lapack_int* liwork, lapack_int *info ); -void LAPACK_sstevx( char* jobz, char* range, lapack_int* n, float* d, float* e, - float* vl, float* vu, lapack_int* il, lapack_int* iu, - float* abstol, lapack_int* m, float* w, float* z, - lapack_int* ldz, float* work, lapack_int* iwork, - lapack_int* ifail, lapack_int *info ); -void LAPACK_dstevx( char* jobz, char* range, lapack_int* n, double* d, - double* e, double* vl, double* vu, lapack_int* il, - lapack_int* iu, double* abstol, lapack_int* m, double* w, - double* z, lapack_int* ldz, double* work, lapack_int* iwork, - lapack_int* ifail, lapack_int *info ); -void LAPACK_sstevr( char* jobz, char* range, lapack_int* n, float* d, float* e, - float* vl, float* vu, lapack_int* il, lapack_int* iu, - float* abstol, lapack_int* m, float* w, float* z, - lapack_int* ldz, lapack_int* isuppz, float* work, - lapack_int* lwork, lapack_int* iwork, lapack_int* liwork, - lapack_int *info ); -void LAPACK_dstevr( char* jobz, char* range, lapack_int* n, double* d, - double* e, double* vl, double* vu, lapack_int* il, - lapack_int* iu, double* abstol, lapack_int* m, double* w, - double* z, lapack_int* ldz, lapack_int* isuppz, - double* work, lapack_int* lwork, lapack_int* iwork, - lapack_int* liwork, lapack_int *info ); -void LAPACK_sgees( char* jobvs, char* sort, LAPACK_S_SELECT2 select, - lapack_int* n, float* a, lapack_int* lda, lapack_int* sdim, - float* wr, float* wi, float* vs, lapack_int* ldvs, - float* work, lapack_int* lwork, lapack_logical* bwork, - lapack_int *info ); -void LAPACK_dgees( char* jobvs, char* sort, LAPACK_D_SELECT2 select, - lapack_int* n, double* a, lapack_int* lda, lapack_int* sdim, - double* wr, double* wi, double* vs, lapack_int* ldvs, - double* work, lapack_int* lwork, lapack_logical* bwork, - lapack_int *info ); -void LAPACK_cgees( char* jobvs, char* sort, LAPACK_C_SELECT1 select, - lapack_int* n, lapack_complex_float* a, lapack_int* lda, - lapack_int* sdim, lapack_complex_float* w, - lapack_complex_float* vs, lapack_int* ldvs, - lapack_complex_float* work, lapack_int* lwork, float* rwork, - lapack_logical* bwork, lapack_int *info ); -void LAPACK_zgees( char* jobvs, char* sort, LAPACK_Z_SELECT1 select, - lapack_int* n, lapack_complex_double* a, lapack_int* lda, - lapack_int* sdim, lapack_complex_double* w, - lapack_complex_double* vs, lapack_int* ldvs, - lapack_complex_double* work, lapack_int* lwork, - double* rwork, lapack_logical* bwork, lapack_int *info ); -void LAPACK_sgeesx( char* jobvs, char* sort, LAPACK_S_SELECT2 select, - char* sense, lapack_int* n, float* a, lapack_int* lda, - lapack_int* sdim, float* wr, float* wi, float* vs, - lapack_int* ldvs, float* rconde, float* rcondv, float* work, - lapack_int* lwork, lapack_int* iwork, lapack_int* liwork, - lapack_logical* bwork, lapack_int *info ); -void LAPACK_dgeesx( char* jobvs, char* sort, LAPACK_D_SELECT2 select, - char* sense, lapack_int* n, double* a, lapack_int* lda, - lapack_int* sdim, double* wr, double* wi, double* vs, - lapack_int* ldvs, double* rconde, double* rcondv, - double* work, lapack_int* lwork, lapack_int* iwork, - lapack_int* liwork, lapack_logical* bwork, - lapack_int *info ); -void LAPACK_cgeesx( char* jobvs, char* sort, LAPACK_C_SELECT1 select, - char* sense, lapack_int* n, lapack_complex_float* a, - lapack_int* lda, lapack_int* sdim, lapack_complex_float* w, - lapack_complex_float* vs, lapack_int* ldvs, float* rconde, - float* rcondv, lapack_complex_float* work, - lapack_int* lwork, float* rwork, lapack_logical* bwork, - lapack_int *info ); -void LAPACK_zgeesx( char* jobvs, char* sort, LAPACK_Z_SELECT1 select, - char* sense, lapack_int* n, lapack_complex_double* a, - lapack_int* lda, lapack_int* sdim, lapack_complex_double* w, - lapack_complex_double* vs, lapack_int* ldvs, double* rconde, - double* rcondv, lapack_complex_double* work, - lapack_int* lwork, double* rwork, lapack_logical* bwork, - lapack_int *info ); -void LAPACK_sgeev( char* jobvl, char* jobvr, lapack_int* n, float* a, - lapack_int* lda, float* wr, float* wi, float* vl, - lapack_int* ldvl, float* vr, lapack_int* ldvr, float* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_dgeev( char* jobvl, char* jobvr, lapack_int* n, double* a, - lapack_int* lda, double* wr, double* wi, double* vl, - lapack_int* ldvl, double* vr, lapack_int* ldvr, double* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_cgeev( char* jobvl, char* jobvr, lapack_int* n, - lapack_complex_float* a, lapack_int* lda, - lapack_complex_float* w, lapack_complex_float* vl, - lapack_int* ldvl, lapack_complex_float* vr, lapack_int* ldvr, - lapack_complex_float* work, lapack_int* lwork, float* rwork, - lapack_int *info ); -void LAPACK_zgeev( char* jobvl, char* jobvr, lapack_int* n, - lapack_complex_double* a, lapack_int* lda, - lapack_complex_double* w, lapack_complex_double* vl, - lapack_int* ldvl, lapack_complex_double* vr, - lapack_int* ldvr, lapack_complex_double* work, - lapack_int* lwork, double* rwork, lapack_int *info ); -void LAPACK_sgeevx( char* balanc, char* jobvl, char* jobvr, char* sense, - lapack_int* n, float* a, lapack_int* lda, float* wr, - float* wi, float* vl, lapack_int* ldvl, float* vr, - lapack_int* ldvr, lapack_int* ilo, lapack_int* ihi, - float* scale, float* abnrm, float* rconde, float* rcondv, - float* work, lapack_int* lwork, lapack_int* iwork, - lapack_int *info ); -void LAPACK_dgeevx( char* balanc, char* jobvl, char* jobvr, char* sense, - lapack_int* n, double* a, lapack_int* lda, double* wr, - double* wi, double* vl, lapack_int* ldvl, double* vr, - lapack_int* ldvr, lapack_int* ilo, lapack_int* ihi, - double* scale, double* abnrm, double* rconde, - double* rcondv, double* work, lapack_int* lwork, - lapack_int* iwork, lapack_int *info ); -void LAPACK_cgeevx( char* balanc, char* jobvl, char* jobvr, char* sense, - lapack_int* n, lapack_complex_float* a, lapack_int* lda, - lapack_complex_float* w, lapack_complex_float* vl, - lapack_int* ldvl, lapack_complex_float* vr, - lapack_int* ldvr, lapack_int* ilo, lapack_int* ihi, - float* scale, float* abnrm, float* rconde, float* rcondv, - lapack_complex_float* work, lapack_int* lwork, float* rwork, - lapack_int *info ); -void LAPACK_zgeevx( char* balanc, char* jobvl, char* jobvr, char* sense, - lapack_int* n, lapack_complex_double* a, lapack_int* lda, - lapack_complex_double* w, lapack_complex_double* vl, - lapack_int* ldvl, lapack_complex_double* vr, - lapack_int* ldvr, lapack_int* ilo, lapack_int* ihi, - double* scale, double* abnrm, double* rconde, - double* rcondv, lapack_complex_double* work, - lapack_int* lwork, double* rwork, lapack_int *info ); -void LAPACK_sgesvd( char* jobu, char* jobvt, lapack_int* m, lapack_int* n, - float* a, lapack_int* lda, float* s, float* u, - lapack_int* ldu, float* vt, lapack_int* ldvt, float* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_dgesvd( char* jobu, char* jobvt, lapack_int* m, lapack_int* n, - double* a, lapack_int* lda, double* s, double* u, - lapack_int* ldu, double* vt, lapack_int* ldvt, double* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_cgesvd( char* jobu, char* jobvt, lapack_int* m, lapack_int* n, - lapack_complex_float* a, lapack_int* lda, float* s, - lapack_complex_float* u, lapack_int* ldu, - lapack_complex_float* vt, lapack_int* ldvt, - lapack_complex_float* work, lapack_int* lwork, float* rwork, - lapack_int *info ); -void LAPACK_zgesvd( char* jobu, char* jobvt, lapack_int* m, lapack_int* n, - lapack_complex_double* a, lapack_int* lda, double* s, - lapack_complex_double* u, lapack_int* ldu, - lapack_complex_double* vt, lapack_int* ldvt, - lapack_complex_double* work, lapack_int* lwork, - double* rwork, lapack_int *info ); -void LAPACK_sgesvdx( char* jobu, char* jobvt, char* range, lapack_int* m, lapack_int* n, - float* a, lapack_int* lda, float* vl, float* vu, - lapack_int* il, lapack_int* iu, lapack_int* ns, float* s, float* u, - lapack_int* ldu, float* vt, lapack_int* ldvt, float* work, - lapack_int* lwork, lapack_int *iwork, lapack_int *info ); -void LAPACK_dgesvdx( char* jobu, char* jobvt, char* range, lapack_int* m, lapack_int* n, - double* a, lapack_int* lda, double* vl, double* vu, - lapack_int* il, lapack_int* iu, lapack_int* ns, double* s, double* u, - lapack_int* ldu, double* vt, lapack_int* ldvt, double* work, - lapack_int* lwork, lapack_int *iwork, lapack_int *info ); -void LAPACK_cgesvdx( char* jobu, char* jobvt, char* range, lapack_int* m, lapack_int* n, - lapack_complex_float* a, lapack_int* lda, float* vl, float* vu, - lapack_int* il, lapack_int* iu, lapack_int* ns, float* s, - lapack_complex_float* u, lapack_int* ldu, - lapack_complex_float* vt, lapack_int* ldvt, - lapack_complex_float* work, lapack_int* lwork, float* rwork, - lapack_int *iwork, lapack_int *info ); -void LAPACK_zgesvdx( char* jobu, char* jobvt, char* range, lapack_int* m, lapack_int* n, - lapack_complex_double* a, lapack_int* lda, double* vl, double* vu, - lapack_int* il, lapack_int* iu, lapack_int* ns, double* s, - lapack_complex_double* u, lapack_int* ldu, - lapack_complex_double* vt, lapack_int* ldvt, - lapack_complex_double* work, lapack_int* lwork, - double* rwork, lapack_int *iwork, lapack_int *info ); -void LAPACK_sgesdd( char* jobz, lapack_int* m, lapack_int* n, float* a, - lapack_int* lda, float* s, float* u, lapack_int* ldu, - float* vt, lapack_int* ldvt, float* work, lapack_int* lwork, - lapack_int* iwork, lapack_int *info ); -void LAPACK_dgesdd( char* jobz, lapack_int* m, lapack_int* n, double* a, - lapack_int* lda, double* s, double* u, lapack_int* ldu, - double* vt, lapack_int* ldvt, double* work, - lapack_int* lwork, lapack_int* iwork, lapack_int *info ); -void LAPACK_cgesdd( char* jobz, lapack_int* m, lapack_int* n, - lapack_complex_float* a, lapack_int* lda, float* s, - lapack_complex_float* u, lapack_int* ldu, - lapack_complex_float* vt, lapack_int* ldvt, - lapack_complex_float* work, lapack_int* lwork, float* rwork, - lapack_int* iwork, lapack_int *info ); -void LAPACK_zgesdd( char* jobz, lapack_int* m, lapack_int* n, - lapack_complex_double* a, lapack_int* lda, double* s, - lapack_complex_double* u, lapack_int* ldu, - lapack_complex_double* vt, lapack_int* ldvt, - lapack_complex_double* work, lapack_int* lwork, - double* rwork, lapack_int* iwork, lapack_int *info ); -void LAPACK_dgejsv( char* joba, char* jobu, char* jobv, char* jobr, char* jobt, - char* jobp, lapack_int* m, lapack_int* n, double* a, - lapack_int* lda, double* sva, double* u, lapack_int* ldu, - double* v, lapack_int* ldv, double* work, lapack_int* lwork, - lapack_int* iwork, lapack_int *info ); -void LAPACK_sgejsv( char* joba, char* jobu, char* jobv, char* jobr, char* jobt, - char* jobp, lapack_int* m, lapack_int* n, float* a, - lapack_int* lda, float* sva, float* u, lapack_int* ldu, - float* v, lapack_int* ldv, float* work, lapack_int* lwork, - lapack_int* iwork, lapack_int *info ); -void LAPACK_cgejsv( char* joba, char* jobu, char* jobv, char* jobr, char* jobt, - char* jobp, lapack_int* m, lapack_int* n, lapack_complex_float* a, - lapack_int* lda, float* sva, lapack_complex_float* u, lapack_int* ldu, - lapack_complex_float* v, lapack_int* ldv, lapack_complex_float* cwork, - lapack_int* lwork, float* work, lapack_int* lrwork, - lapack_int* iwork, lapack_int *info ); -void LAPACK_zgejsv( char* joba, char* jobu, char* jobv, char* jobr, char* jobt, - char* jobp, lapack_int* m, lapack_int* n, lapack_complex_double* a, - lapack_int* lda, double* sva, lapack_complex_double* u, lapack_int* ldu, - lapack_complex_double* v, lapack_int* ldv, lapack_complex_double* cwork, - lapack_int* lwork, double* work, lapack_int* lrwork, - lapack_int* iwork, lapack_int *info ); -void LAPACK_dgesvj( char* joba, char* jobu, char* jobv, lapack_int* m, - lapack_int* n, double* a, lapack_int* lda, double* sva, - lapack_int* mv, double* v, lapack_int* ldv, double* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_sgesvj( char* joba, char* jobu, char* jobv, lapack_int* m, - lapack_int* n, float* a, lapack_int* lda, float* sva, - lapack_int* mv, float* v, lapack_int* ldv, float* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_cgesvj( char* joba, char* jobu, char* jobv, lapack_int* m, - lapack_int* n, lapack_complex_float* a, lapack_int* lda, float* sva, - lapack_int* mv, lapack_complex_float* v, lapack_int* ldv, - lapack_complex_float* cwork, lapack_int* lwork, float* rwork, - lapack_int* lrwork, lapack_int *info ); -void LAPACK_zgesvj( char* joba, char* jobu, char* jobv, lapack_int* m, - lapack_int* n, lapack_complex_double* a, lapack_int* lda, double* sva, - lapack_int* mv, lapack_complex_double* v, lapack_int* ldv, - lapack_complex_double* cwork, lapack_int* lwork, double* rwork, - lapack_int* lrwork, lapack_int *info ); -void LAPACK_sggsvd( char* jobu, char* jobv, char* jobq, lapack_int* m, - lapack_int* n, lapack_int* p, lapack_int* k, lapack_int* l, - float* a, lapack_int* lda, float* b, lapack_int* ldb, - float* alpha, float* beta, float* u, lapack_int* ldu, - float* v, lapack_int* ldv, float* q, lapack_int* ldq, - float* work, lapack_int* iwork, lapack_int *info ); -void LAPACK_dggsvd( char* jobu, char* jobv, char* jobq, lapack_int* m, - lapack_int* n, lapack_int* p, lapack_int* k, lapack_int* l, - double* a, lapack_int* lda, double* b, lapack_int* ldb, - double* alpha, double* beta, double* u, lapack_int* ldu, - double* v, lapack_int* ldv, double* q, lapack_int* ldq, - double* work, lapack_int* iwork, lapack_int *info ); -void LAPACK_cggsvd( char* jobu, char* jobv, char* jobq, lapack_int* m, - lapack_int* n, lapack_int* p, lapack_int* k, lapack_int* l, - lapack_complex_float* a, lapack_int* lda, - lapack_complex_float* b, lapack_int* ldb, float* alpha, - float* beta, lapack_complex_float* u, lapack_int* ldu, - lapack_complex_float* v, lapack_int* ldv, - lapack_complex_float* q, lapack_int* ldq, - lapack_complex_float* work, float* rwork, lapack_int* iwork, - lapack_int *info ); -void LAPACK_zggsvd( char* jobu, char* jobv, char* jobq, lapack_int* m, - lapack_int* n, lapack_int* p, lapack_int* k, lapack_int* l, - lapack_complex_double* a, lapack_int* lda, - lapack_complex_double* b, lapack_int* ldb, double* alpha, - double* beta, lapack_complex_double* u, lapack_int* ldu, - lapack_complex_double* v, lapack_int* ldv, - lapack_complex_double* q, lapack_int* ldq, - lapack_complex_double* work, double* rwork, - lapack_int* iwork, lapack_int *info ); -void LAPACK_sggsvd3( char* jobu, char* jobv, char* jobq, lapack_int* m, - lapack_int* n, lapack_int* p, lapack_int* k, lapack_int* l, - float* a, lapack_int* lda, float* b, lapack_int* ldb, - float* alpha, float* beta, float* u, lapack_int* ldu, - float* v, lapack_int* ldv, float* q, lapack_int* ldq, - float* work, lapack_int* lwork, lapack_int* iwork, - lapack_int *info ); -void LAPACK_dggsvd3( char* jobu, char* jobv, char* jobq, lapack_int* m, - lapack_int* n, lapack_int* p, lapack_int* k, lapack_int* l, - double* a, lapack_int* lda, double* b, lapack_int* ldb, - double* alpha, double* beta, double* u, lapack_int* ldu, - double* v, lapack_int* ldv, double* q, lapack_int* ldq, - double* work, lapack_int* lwork, lapack_int* iwork, - lapack_int *info ); -void LAPACK_cggsvd3( char* jobu, char* jobv, char* jobq, lapack_int* m, - lapack_int* n, lapack_int* p, lapack_int* k, lapack_int* l, - lapack_complex_float* a, lapack_int* lda, - lapack_complex_float* b, lapack_int* ldb, float* alpha, - float* beta, lapack_complex_float* u, lapack_int* ldu, - lapack_complex_float* v, lapack_int* ldv, - lapack_complex_float* q, lapack_int* ldq, - lapack_complex_float* work, lapack_int* lwork, float* rwork, - lapack_int* iwork, lapack_int *info ); -void LAPACK_zggsvd3( char* jobu, char* jobv, char* jobq, lapack_int* m, - lapack_int* n, lapack_int* p, lapack_int* k, lapack_int* l, - lapack_complex_double* a, lapack_int* lda, - lapack_complex_double* b, lapack_int* ldb, double* alpha, - double* beta, lapack_complex_double* u, lapack_int* ldu, - lapack_complex_double* v, lapack_int* ldv, - lapack_complex_double* q, lapack_int* ldq, - lapack_complex_double* work, lapack_int* lwork, - double* rwork, lapack_int* iwork, lapack_int *info ); -void LAPACK_ssygv( lapack_int* itype, char* jobz, char* uplo, lapack_int* n, - float* a, lapack_int* lda, float* b, lapack_int* ldb, - float* w, float* work, lapack_int* lwork, lapack_int *info ); -void LAPACK_dsygv( lapack_int* itype, char* jobz, char* uplo, lapack_int* n, - double* a, lapack_int* lda, double* b, lapack_int* ldb, - double* w, double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_chegv( lapack_int* itype, char* jobz, char* uplo, lapack_int* n, - lapack_complex_float* a, lapack_int* lda, - lapack_complex_float* b, lapack_int* ldb, float* w, - lapack_complex_float* work, lapack_int* lwork, float* rwork, - lapack_int *info ); -void LAPACK_zhegv( lapack_int* itype, char* jobz, char* uplo, lapack_int* n, - lapack_complex_double* a, lapack_int* lda, - lapack_complex_double* b, lapack_int* ldb, double* w, - lapack_complex_double* work, lapack_int* lwork, - double* rwork, lapack_int *info ); -void LAPACK_ssygvd( lapack_int* itype, char* jobz, char* uplo, lapack_int* n, - float* a, lapack_int* lda, float* b, lapack_int* ldb, - float* w, float* work, lapack_int* lwork, lapack_int* iwork, - lapack_int* liwork, lapack_int *info ); -void LAPACK_dsygvd( lapack_int* itype, char* jobz, char* uplo, lapack_int* n, - double* a, lapack_int* lda, double* b, lapack_int* ldb, - double* w, double* work, lapack_int* lwork, - lapack_int* iwork, lapack_int* liwork, lapack_int *info ); -void LAPACK_chegvd( lapack_int* itype, char* jobz, char* uplo, lapack_int* n, - lapack_complex_float* a, lapack_int* lda, - lapack_complex_float* b, lapack_int* ldb, float* w, - lapack_complex_float* work, lapack_int* lwork, float* rwork, - lapack_int* lrwork, lapack_int* iwork, lapack_int* liwork, - lapack_int *info ); -void LAPACK_zhegvd( lapack_int* itype, char* jobz, char* uplo, lapack_int* n, - lapack_complex_double* a, lapack_int* lda, - lapack_complex_double* b, lapack_int* ldb, double* w, - lapack_complex_double* work, lapack_int* lwork, - double* rwork, lapack_int* lrwork, lapack_int* iwork, - lapack_int* liwork, lapack_int *info ); -void LAPACK_ssygvx( lapack_int* itype, char* jobz, char* range, char* uplo, - lapack_int* n, float* a, lapack_int* lda, float* b, - lapack_int* ldb, float* vl, float* vu, lapack_int* il, - lapack_int* iu, float* abstol, lapack_int* m, float* w, - float* z, lapack_int* ldz, float* work, lapack_int* lwork, - lapack_int* iwork, lapack_int* ifail, lapack_int *info ); -void LAPACK_dsygvx( lapack_int* itype, char* jobz, char* range, char* uplo, - lapack_int* n, double* a, lapack_int* lda, double* b, - lapack_int* ldb, double* vl, double* vu, lapack_int* il, - lapack_int* iu, double* abstol, lapack_int* m, double* w, - double* z, lapack_int* ldz, double* work, lapack_int* lwork, - lapack_int* iwork, lapack_int* ifail, lapack_int *info ); -void LAPACK_chegvx( lapack_int* itype, char* jobz, char* range, char* uplo, - lapack_int* n, lapack_complex_float* a, lapack_int* lda, - lapack_complex_float* b, lapack_int* ldb, float* vl, - float* vu, lapack_int* il, lapack_int* iu, float* abstol, - lapack_int* m, float* w, lapack_complex_float* z, - lapack_int* ldz, lapack_complex_float* work, - lapack_int* lwork, float* rwork, lapack_int* iwork, - lapack_int* ifail, lapack_int *info ); -void LAPACK_zhegvx( lapack_int* itype, char* jobz, char* range, char* uplo, - lapack_int* n, lapack_complex_double* a, lapack_int* lda, - lapack_complex_double* b, lapack_int* ldb, double* vl, - double* vu, lapack_int* il, lapack_int* iu, double* abstol, - lapack_int* m, double* w, lapack_complex_double* z, - lapack_int* ldz, lapack_complex_double* work, - lapack_int* lwork, double* rwork, lapack_int* iwork, - lapack_int* ifail, lapack_int *info ); -void LAPACK_sspgv( lapack_int* itype, char* jobz, char* uplo, lapack_int* n, - float* ap, float* bp, float* w, float* z, lapack_int* ldz, - float* work, lapack_int *info ); -void LAPACK_dspgv( lapack_int* itype, char* jobz, char* uplo, lapack_int* n, - double* ap, double* bp, double* w, double* z, - lapack_int* ldz, double* work, lapack_int *info ); -void LAPACK_chpgv( lapack_int* itype, char* jobz, char* uplo, lapack_int* n, - lapack_complex_float* ap, lapack_complex_float* bp, float* w, - lapack_complex_float* z, lapack_int* ldz, - lapack_complex_float* work, float* rwork, lapack_int *info ); -void LAPACK_zhpgv( lapack_int* itype, char* jobz, char* uplo, lapack_int* n, - lapack_complex_double* ap, lapack_complex_double* bp, - double* w, lapack_complex_double* z, lapack_int* ldz, - lapack_complex_double* work, double* rwork, - lapack_int *info ); -void LAPACK_sspgvd( lapack_int* itype, char* jobz, char* uplo, lapack_int* n, - float* ap, float* bp, float* w, float* z, lapack_int* ldz, - float* work, lapack_int* lwork, lapack_int* iwork, - lapack_int* liwork, lapack_int *info ); -void LAPACK_dspgvd( lapack_int* itype, char* jobz, char* uplo, lapack_int* n, - double* ap, double* bp, double* w, double* z, - lapack_int* ldz, double* work, lapack_int* lwork, - lapack_int* iwork, lapack_int* liwork, lapack_int *info ); -void LAPACK_chpgvd( lapack_int* itype, char* jobz, char* uplo, lapack_int* n, - lapack_complex_float* ap, lapack_complex_float* bp, - float* w, lapack_complex_float* z, lapack_int* ldz, - lapack_complex_float* work, lapack_int* lwork, float* rwork, - lapack_int* lrwork, lapack_int* iwork, lapack_int* liwork, - lapack_int *info ); -void LAPACK_zhpgvd( lapack_int* itype, char* jobz, char* uplo, lapack_int* n, - lapack_complex_double* ap, lapack_complex_double* bp, - double* w, lapack_complex_double* z, lapack_int* ldz, - lapack_complex_double* work, lapack_int* lwork, - double* rwork, lapack_int* lrwork, lapack_int* iwork, - lapack_int* liwork, lapack_int *info ); -void LAPACK_sspgvx( lapack_int* itype, char* jobz, char* range, char* uplo, - lapack_int* n, float* ap, float* bp, float* vl, float* vu, - lapack_int* il, lapack_int* iu, float* abstol, - lapack_int* m, float* w, float* z, lapack_int* ldz, - float* work, lapack_int* iwork, lapack_int* ifail, - lapack_int *info ); -void LAPACK_dspgvx( lapack_int* itype, char* jobz, char* range, char* uplo, - lapack_int* n, double* ap, double* bp, double* vl, - double* vu, lapack_int* il, lapack_int* iu, double* abstol, - lapack_int* m, double* w, double* z, lapack_int* ldz, - double* work, lapack_int* iwork, lapack_int* ifail, - lapack_int *info ); -void LAPACK_chpgvx( lapack_int* itype, char* jobz, char* range, char* uplo, - lapack_int* n, lapack_complex_float* ap, - lapack_complex_float* bp, float* vl, float* vu, - lapack_int* il, lapack_int* iu, float* abstol, - lapack_int* m, float* w, lapack_complex_float* z, - lapack_int* ldz, lapack_complex_float* work, float* rwork, - lapack_int* iwork, lapack_int* ifail, lapack_int *info ); -void LAPACK_zhpgvx( lapack_int* itype, char* jobz, char* range, char* uplo, - lapack_int* n, lapack_complex_double* ap, - lapack_complex_double* bp, double* vl, double* vu, - lapack_int* il, lapack_int* iu, double* abstol, - lapack_int* m, double* w, lapack_complex_double* z, - lapack_int* ldz, lapack_complex_double* work, double* rwork, - lapack_int* iwork, lapack_int* ifail, lapack_int *info ); -void LAPACK_ssbgv( char* jobz, char* uplo, lapack_int* n, lapack_int* ka, - lapack_int* kb, float* ab, lapack_int* ldab, float* bb, - lapack_int* ldbb, float* w, float* z, lapack_int* ldz, - float* work, lapack_int *info ); -void LAPACK_dsbgv( char* jobz, char* uplo, lapack_int* n, lapack_int* ka, - lapack_int* kb, double* ab, lapack_int* ldab, double* bb, - lapack_int* ldbb, double* w, double* z, lapack_int* ldz, - double* work, lapack_int *info ); -void LAPACK_chbgv( char* jobz, char* uplo, lapack_int* n, lapack_int* ka, - lapack_int* kb, lapack_complex_float* ab, lapack_int* ldab, - lapack_complex_float* bb, lapack_int* ldbb, float* w, - lapack_complex_float* z, lapack_int* ldz, - lapack_complex_float* work, float* rwork, lapack_int *info ); -void LAPACK_zhbgv( char* jobz, char* uplo, lapack_int* n, lapack_int* ka, - lapack_int* kb, lapack_complex_double* ab, lapack_int* ldab, - lapack_complex_double* bb, lapack_int* ldbb, double* w, - lapack_complex_double* z, lapack_int* ldz, - lapack_complex_double* work, double* rwork, - lapack_int *info ); -void LAPACK_ssbgvd( char* jobz, char* uplo, lapack_int* n, lapack_int* ka, - lapack_int* kb, float* ab, lapack_int* ldab, float* bb, - lapack_int* ldbb, float* w, float* z, lapack_int* ldz, - float* work, lapack_int* lwork, lapack_int* iwork, - lapack_int* liwork, lapack_int *info ); -void LAPACK_dsbgvd( char* jobz, char* uplo, lapack_int* n, lapack_int* ka, - lapack_int* kb, double* ab, lapack_int* ldab, double* bb, - lapack_int* ldbb, double* w, double* z, lapack_int* ldz, - double* work, lapack_int* lwork, lapack_int* iwork, - lapack_int* liwork, lapack_int *info ); -void LAPACK_chbgvd( char* jobz, char* uplo, lapack_int* n, lapack_int* ka, - lapack_int* kb, lapack_complex_float* ab, lapack_int* ldab, - lapack_complex_float* bb, lapack_int* ldbb, float* w, - lapack_complex_float* z, lapack_int* ldz, - lapack_complex_float* work, lapack_int* lwork, float* rwork, - lapack_int* lrwork, lapack_int* iwork, lapack_int* liwork, - lapack_int *info ); -void LAPACK_zhbgvd( char* jobz, char* uplo, lapack_int* n, lapack_int* ka, - lapack_int* kb, lapack_complex_double* ab, lapack_int* ldab, - lapack_complex_double* bb, lapack_int* ldbb, double* w, - lapack_complex_double* z, lapack_int* ldz, - lapack_complex_double* work, lapack_int* lwork, - double* rwork, lapack_int* lrwork, lapack_int* iwork, - lapack_int* liwork, lapack_int *info ); -void LAPACK_ssbgvx( char* jobz, char* range, char* uplo, lapack_int* n, - lapack_int* ka, lapack_int* kb, float* ab, lapack_int* ldab, - float* bb, lapack_int* ldbb, float* q, lapack_int* ldq, - float* vl, float* vu, lapack_int* il, lapack_int* iu, - float* abstol, lapack_int* m, float* w, float* z, - lapack_int* ldz, float* work, lapack_int* iwork, - lapack_int* ifail, lapack_int *info ); -void LAPACK_dsbgvx( char* jobz, char* range, char* uplo, lapack_int* n, - lapack_int* ka, lapack_int* kb, double* ab, - lapack_int* ldab, double* bb, lapack_int* ldbb, double* q, - lapack_int* ldq, double* vl, double* vu, lapack_int* il, - lapack_int* iu, double* abstol, lapack_int* m, double* w, - double* z, lapack_int* ldz, double* work, lapack_int* iwork, - lapack_int* ifail, lapack_int *info ); -void LAPACK_chbgvx( char* jobz, char* range, char* uplo, lapack_int* n, - lapack_int* ka, lapack_int* kb, lapack_complex_float* ab, - lapack_int* ldab, lapack_complex_float* bb, - lapack_int* ldbb, lapack_complex_float* q, lapack_int* ldq, - float* vl, float* vu, lapack_int* il, lapack_int* iu, - float* abstol, lapack_int* m, float* w, - lapack_complex_float* z, lapack_int* ldz, - lapack_complex_float* work, float* rwork, lapack_int* iwork, - lapack_int* ifail, lapack_int *info ); -void LAPACK_zhbgvx( char* jobz, char* range, char* uplo, lapack_int* n, - lapack_int* ka, lapack_int* kb, lapack_complex_double* ab, - lapack_int* ldab, lapack_complex_double* bb, - lapack_int* ldbb, lapack_complex_double* q, lapack_int* ldq, - double* vl, double* vu, lapack_int* il, lapack_int* iu, - double* abstol, lapack_int* m, double* w, - lapack_complex_double* z, lapack_int* ldz, - lapack_complex_double* work, double* rwork, - lapack_int* iwork, lapack_int* ifail, lapack_int *info ); -void LAPACK_sgges( char* jobvsl, char* jobvsr, char* sort, - LAPACK_S_SELECT3 selctg, lapack_int* n, float* a, - lapack_int* lda, float* b, lapack_int* ldb, lapack_int* sdim, - float* alphar, float* alphai, float* beta, float* vsl, - lapack_int* ldvsl, float* vsr, lapack_int* ldvsr, - float* work, lapack_int* lwork, lapack_logical* bwork, - lapack_int *info ); -void LAPACK_dgges( char* jobvsl, char* jobvsr, char* sort, - LAPACK_D_SELECT3 selctg, lapack_int* n, double* a, - lapack_int* lda, double* b, lapack_int* ldb, - lapack_int* sdim, double* alphar, double* alphai, - double* beta, double* vsl, lapack_int* ldvsl, double* vsr, - lapack_int* ldvsr, double* work, lapack_int* lwork, - lapack_logical* bwork, lapack_int *info ); -void LAPACK_cgges( char* jobvsl, char* jobvsr, char* sort, - LAPACK_C_SELECT2 selctg, lapack_int* n, - lapack_complex_float* a, lapack_int* lda, - lapack_complex_float* b, lapack_int* ldb, lapack_int* sdim, - lapack_complex_float* alpha, lapack_complex_float* beta, - lapack_complex_float* vsl, lapack_int* ldvsl, - lapack_complex_float* vsr, lapack_int* ldvsr, - lapack_complex_float* work, lapack_int* lwork, float* rwork, - lapack_logical* bwork, lapack_int *info ); -void LAPACK_zgges( char* jobvsl, char* jobvsr, char* sort, - LAPACK_Z_SELECT2 selctg, lapack_int* n, - lapack_complex_double* a, lapack_int* lda, - lapack_complex_double* b, lapack_int* ldb, lapack_int* sdim, - lapack_complex_double* alpha, lapack_complex_double* beta, - lapack_complex_double* vsl, lapack_int* ldvsl, - lapack_complex_double* vsr, lapack_int* ldvsr, - lapack_complex_double* work, lapack_int* lwork, - double* rwork, lapack_logical* bwork, lapack_int *info ); -void LAPACK_sgges3( char* jobvsl, char* jobvsr, char* sort, - LAPACK_S_SELECT3 selctg, lapack_int* n, - float* a, lapack_int* lda, float* b, lapack_int* ldb, - lapack_int* sdim, float* alphar, float* alphai, - float* beta, float* vsl, lapack_int* ldvsl, - float* vsr, lapack_int* ldvsr, - float* work, lapack_int* lwork, lapack_logical* bwork, - lapack_int *info ); -void LAPACK_dgges3( char* jobvsl, char* jobvsr, char* sort, - LAPACK_D_SELECT3 selctg, lapack_int* n, double* a, - lapack_int* lda, double* b, lapack_int* ldb, - lapack_int* sdim, double* alphar, double* alphai, - double* beta, double* vsl, lapack_int* ldvsl, double* vsr, - lapack_int* ldvsr, double* work, lapack_int* lwork, - lapack_logical* bwork, lapack_int *info ); -void LAPACK_cgges3( char* jobvsl, char* jobvsr, char* sort, - LAPACK_C_SELECT2 selctg, lapack_int* n, - lapack_complex_float* a, lapack_int* lda, - lapack_complex_float* b, lapack_int* ldb, - lapack_int* sdim, - lapack_complex_float* alpha, lapack_complex_float* beta, - lapack_complex_float* vsl, lapack_int* ldvsl, - lapack_complex_float* vsr, lapack_int* ldvsr, - lapack_complex_float* work, lapack_int* lwork, float* rwork, - lapack_logical* bwork, lapack_int *info ); -void LAPACK_zgges3( char* jobvsl, char* jobvsr, char* sort, - LAPACK_Z_SELECT2 selctg, lapack_int* n, - lapack_complex_double* a, lapack_int* lda, - lapack_complex_double* b, lapack_int* ldb, lapack_int* sdim, - lapack_complex_double* alpha, lapack_complex_double* beta, - lapack_complex_double* vsl, lapack_int* ldvsl, - lapack_complex_double* vsr, lapack_int* ldvsr, - lapack_complex_double* work, lapack_int* lwork, - double* rwork, lapack_logical* bwork, lapack_int *info ); -void LAPACK_sggesx( char* jobvsl, char* jobvsr, char* sort, - LAPACK_S_SELECT3 selctg, char* sense, lapack_int* n, - float* a, lapack_int* lda, float* b, lapack_int* ldb, - lapack_int* sdim, float* alphar, float* alphai, float* beta, - float* vsl, lapack_int* ldvsl, float* vsr, - lapack_int* ldvsr, float* rconde, float* rcondv, - float* work, lapack_int* lwork, lapack_int* iwork, - lapack_int* liwork, lapack_logical* bwork, - lapack_int *info ); -void LAPACK_dggesx( char* jobvsl, char* jobvsr, char* sort, - LAPACK_D_SELECT3 selctg, char* sense, lapack_int* n, - double* a, lapack_int* lda, double* b, lapack_int* ldb, - lapack_int* sdim, double* alphar, double* alphai, - double* beta, double* vsl, lapack_int* ldvsl, double* vsr, - lapack_int* ldvsr, double* rconde, double* rcondv, - double* work, lapack_int* lwork, lapack_int* iwork, - lapack_int* liwork, lapack_logical* bwork, - lapack_int *info ); -void LAPACK_cggesx( char* jobvsl, char* jobvsr, char* sort, - LAPACK_C_SELECT2 selctg, char* sense, lapack_int* n, - lapack_complex_float* a, lapack_int* lda, - lapack_complex_float* b, lapack_int* ldb, lapack_int* sdim, - lapack_complex_float* alpha, lapack_complex_float* beta, - lapack_complex_float* vsl, lapack_int* ldvsl, - lapack_complex_float* vsr, lapack_int* ldvsr, float* rconde, - float* rcondv, lapack_complex_float* work, - lapack_int* lwork, float* rwork, lapack_int* iwork, - lapack_int* liwork, lapack_logical* bwork, - lapack_int *info ); -void LAPACK_zggesx( char* jobvsl, char* jobvsr, char* sort, - LAPACK_Z_SELECT2 selctg, char* sense, lapack_int* n, - lapack_complex_double* a, lapack_int* lda, - lapack_complex_double* b, lapack_int* ldb, lapack_int* sdim, - lapack_complex_double* alpha, lapack_complex_double* beta, - lapack_complex_double* vsl, lapack_int* ldvsl, - lapack_complex_double* vsr, lapack_int* ldvsr, - double* rconde, double* rcondv, lapack_complex_double* work, - lapack_int* lwork, double* rwork, lapack_int* iwork, - lapack_int* liwork, lapack_logical* bwork, - lapack_int *info ); -void LAPACK_sggev( char* jobvl, char* jobvr, lapack_int* n, float* a, - lapack_int* lda, float* b, lapack_int* ldb, float* alphar, - float* alphai, float* beta, float* vl, lapack_int* ldvl, - float* vr, lapack_int* ldvr, float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_dggev( char* jobvl, char* jobvr, lapack_int* n, double* a, - lapack_int* lda, double* b, lapack_int* ldb, double* alphar, - double* alphai, double* beta, double* vl, lapack_int* ldvl, - double* vr, lapack_int* ldvr, double* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_cggev( char* jobvl, char* jobvr, lapack_int* n, - lapack_complex_float* a, lapack_int* lda, - lapack_complex_float* b, lapack_int* ldb, - lapack_complex_float* alpha, lapack_complex_float* beta, - lapack_complex_float* vl, lapack_int* ldvl, - lapack_complex_float* vr, lapack_int* ldvr, - lapack_complex_float* work, lapack_int* lwork, float* rwork, - lapack_int *info ); -void LAPACK_zggev( char* jobvl, char* jobvr, lapack_int* n, - lapack_complex_double* a, lapack_int* lda, - lapack_complex_double* b, lapack_int* ldb, - lapack_complex_double* alpha, lapack_complex_double* beta, - lapack_complex_double* vl, lapack_int* ldvl, - lapack_complex_double* vr, lapack_int* ldvr, - lapack_complex_double* work, lapack_int* lwork, - double* rwork, lapack_int *info ); -void LAPACK_sggev3( char* jobvl, char* jobvr, lapack_int* n, float* a, - lapack_int* lda, float* b, lapack_int* ldb, float* alphar, - float* alphai, float* beta, float* vl, lapack_int* ldvl, - float* vr, lapack_int* ldvr, float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_dggev3( char* jobvl, char* jobvr, lapack_int* n, double* a, - lapack_int* lda, double* b, lapack_int* ldb, double* alphar, - double* alphai, double* beta, double* vl, lapack_int* ldvl, - double* vr, lapack_int* ldvr, double* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_cggev3( char* jobvl, char* jobvr, lapack_int* n, - lapack_complex_float* a, lapack_int* lda, - lapack_complex_float* b, lapack_int* ldb, - lapack_complex_float* alpha, lapack_complex_float* beta, - lapack_complex_float* vl, lapack_int* ldvl, - lapack_complex_float* vr, lapack_int* ldvr, - lapack_complex_float* work, lapack_int* lwork, float* rwork, - lapack_int *info ); -void LAPACK_zggev3( char* jobvl, char* jobvr, lapack_int* n, - lapack_complex_double* a, lapack_int* lda, - lapack_complex_double* b, lapack_int* ldb, - lapack_complex_double* alpha, lapack_complex_double* beta, - lapack_complex_double* vl, lapack_int* ldvl, - lapack_complex_double* vr, lapack_int* ldvr, - lapack_complex_double* work, lapack_int* lwork, - double* rwork, lapack_int *info ); -void LAPACK_sggevx( char* balanc, char* jobvl, char* jobvr, char* sense, - lapack_int* n, float* a, lapack_int* lda, float* b, - lapack_int* ldb, float* alphar, float* alphai, float* beta, - float* vl, lapack_int* ldvl, float* vr, lapack_int* ldvr, - lapack_int* ilo, lapack_int* ihi, float* lscale, - float* rscale, float* abnrm, float* bbnrm, float* rconde, - float* rcondv, float* work, lapack_int* lwork, - lapack_int* iwork, lapack_logical* bwork, - lapack_int *info ); -void LAPACK_dggevx( char* balanc, char* jobvl, char* jobvr, char* sense, - lapack_int* n, double* a, lapack_int* lda, double* b, - lapack_int* ldb, double* alphar, double* alphai, - double* beta, double* vl, lapack_int* ldvl, double* vr, - lapack_int* ldvr, lapack_int* ilo, lapack_int* ihi, - double* lscale, double* rscale, double* abnrm, - double* bbnrm, double* rconde, double* rcondv, double* work, - lapack_int* lwork, lapack_int* iwork, lapack_logical* bwork, - lapack_int *info ); -void LAPACK_cggevx( char* balanc, char* jobvl, char* jobvr, char* sense, - lapack_int* n, lapack_complex_float* a, lapack_int* lda, - lapack_complex_float* b, lapack_int* ldb, - lapack_complex_float* alpha, lapack_complex_float* beta, - lapack_complex_float* vl, lapack_int* ldvl, - lapack_complex_float* vr, lapack_int* ldvr, lapack_int* ilo, - lapack_int* ihi, float* lscale, float* rscale, float* abnrm, - float* bbnrm, float* rconde, float* rcondv, - lapack_complex_float* work, lapack_int* lwork, float* rwork, - lapack_int* iwork, lapack_logical* bwork, - lapack_int *info ); -void LAPACK_zggevx( char* balanc, char* jobvl, char* jobvr, char* sense, - lapack_int* n, lapack_complex_double* a, lapack_int* lda, - lapack_complex_double* b, lapack_int* ldb, - lapack_complex_double* alpha, lapack_complex_double* beta, - lapack_complex_double* vl, lapack_int* ldvl, - lapack_complex_double* vr, lapack_int* ldvr, - lapack_int* ilo, lapack_int* ihi, double* lscale, - double* rscale, double* abnrm, double* bbnrm, - double* rconde, double* rcondv, lapack_complex_double* work, - lapack_int* lwork, double* rwork, lapack_int* iwork, - lapack_logical* bwork, lapack_int *info ); -void LAPACK_dsfrk( char* transr, char* uplo, char* trans, lapack_int* n, - lapack_int* k, double* alpha, const double* a, - lapack_int* lda, double* beta, double* c ); -void LAPACK_ssfrk( char* transr, char* uplo, char* trans, lapack_int* n, - lapack_int* k, float* alpha, const float* a, lapack_int* lda, - float* beta, float* c ); -void LAPACK_zhfrk( char* transr, char* uplo, char* trans, lapack_int* n, - lapack_int* k, double* alpha, const lapack_complex_double* a, - lapack_int* lda, double* beta, lapack_complex_double* c ); -void LAPACK_chfrk( char* transr, char* uplo, char* trans, lapack_int* n, - lapack_int* k, float* alpha, const lapack_complex_float* a, - lapack_int* lda, float* beta, lapack_complex_float* c ); -void LAPACK_dtfsm( char* transr, char* side, char* uplo, char* trans, - char* diag, lapack_int* m, lapack_int* n, double* alpha, - const double* a, double* b, lapack_int* ldb ); -void LAPACK_stfsm( char* transr, char* side, char* uplo, char* trans, - char* diag, lapack_int* m, lapack_int* n, float* alpha, - const float* a, float* b, lapack_int* ldb ); -void LAPACK_ztfsm( char* transr, char* side, char* uplo, char* trans, - char* diag, lapack_int* m, lapack_int* n, - lapack_complex_double* alpha, const lapack_complex_double* a, - lapack_complex_double* b, lapack_int* ldb ); -void LAPACK_ctfsm( char* transr, char* side, char* uplo, char* trans, - char* diag, lapack_int* m, lapack_int* n, - lapack_complex_float* alpha, const lapack_complex_float* a, - lapack_complex_float* b, lapack_int* ldb ); -void LAPACK_dtfttp( char* transr, char* uplo, lapack_int* n, const double* arf, - double* ap, lapack_int *info ); -void LAPACK_stfttp( char* transr, char* uplo, lapack_int* n, const float* arf, - float* ap, lapack_int *info ); -void LAPACK_ztfttp( char* transr, char* uplo, lapack_int* n, - const lapack_complex_double* arf, lapack_complex_double* ap, - lapack_int *info ); -void LAPACK_ctfttp( char* transr, char* uplo, lapack_int* n, - const lapack_complex_float* arf, lapack_complex_float* ap, - lapack_int *info ); -void LAPACK_dtfttr( char* transr, char* uplo, lapack_int* n, const double* arf, - double* a, lapack_int* lda, lapack_int *info ); -void LAPACK_stfttr( char* transr, char* uplo, lapack_int* n, const float* arf, - float* a, lapack_int* lda, lapack_int *info ); -void LAPACK_ztfttr( char* transr, char* uplo, lapack_int* n, - const lapack_complex_double* arf, lapack_complex_double* a, - lapack_int* lda, lapack_int *info ); -void LAPACK_ctfttr( char* transr, char* uplo, lapack_int* n, - const lapack_complex_float* arf, lapack_complex_float* a, - lapack_int* lda, lapack_int *info ); -void LAPACK_dtpttf( char* transr, char* uplo, lapack_int* n, const double* ap, - double* arf, lapack_int *info ); -void LAPACK_stpttf( char* transr, char* uplo, lapack_int* n, const float* ap, - float* arf, lapack_int *info ); -void LAPACK_ztpttf( char* transr, char* uplo, lapack_int* n, - const lapack_complex_double* ap, lapack_complex_double* arf, - lapack_int *info ); -void LAPACK_ctpttf( char* transr, char* uplo, lapack_int* n, - const lapack_complex_float* ap, lapack_complex_float* arf, - lapack_int *info ); -void LAPACK_dtpttr( char* uplo, lapack_int* n, const double* ap, double* a, - lapack_int* lda, lapack_int *info ); -void LAPACK_stpttr( char* uplo, lapack_int* n, const float* ap, float* a, - lapack_int* lda, lapack_int *info ); -void LAPACK_ztpttr( char* uplo, lapack_int* n, const lapack_complex_double* ap, - lapack_complex_double* a, lapack_int* lda, - lapack_int *info ); -void LAPACK_ctpttr( char* uplo, lapack_int* n, const lapack_complex_float* ap, - lapack_complex_float* a, lapack_int* lda, - lapack_int *info ); -void LAPACK_dtrttf( char* transr, char* uplo, lapack_int* n, const double* a, - lapack_int* lda, double* arf, lapack_int *info ); -void LAPACK_strttf( char* transr, char* uplo, lapack_int* n, const float* a, - lapack_int* lda, float* arf, lapack_int *info ); -void LAPACK_ztrttf( char* transr, char* uplo, lapack_int* n, - const lapack_complex_double* a, lapack_int* lda, - lapack_complex_double* arf, lapack_int *info ); -void LAPACK_ctrttf( char* transr, char* uplo, lapack_int* n, - const lapack_complex_float* a, lapack_int* lda, - lapack_complex_float* arf, lapack_int *info ); -void LAPACK_dtrttp( char* uplo, lapack_int* n, const double* a, lapack_int* lda, - double* ap, lapack_int *info ); -void LAPACK_strttp( char* uplo, lapack_int* n, const float* a, lapack_int* lda, - float* ap, lapack_int *info ); -void LAPACK_ztrttp( char* uplo, lapack_int* n, const lapack_complex_double* a, - lapack_int* lda, lapack_complex_double* ap, - lapack_int *info ); -void LAPACK_ctrttp( char* uplo, lapack_int* n, const lapack_complex_float* a, - lapack_int* lda, lapack_complex_float* ap, - lapack_int *info ); -void LAPACK_sgeqrfp( lapack_int* m, lapack_int* n, float* a, lapack_int* lda, - float* tau, float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_dgeqrfp( lapack_int* m, lapack_int* n, double* a, lapack_int* lda, - double* tau, double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_cgeqrfp( lapack_int* m, lapack_int* n, lapack_complex_float* a, - lapack_int* lda, lapack_complex_float* tau, - lapack_complex_float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_zgeqrfp( lapack_int* m, lapack_int* n, lapack_complex_double* a, - lapack_int* lda, lapack_complex_double* tau, - lapack_complex_double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_clacgv( lapack_int* n, lapack_complex_float* x, lapack_int* incx ); -void LAPACK_zlacgv( lapack_int* n, lapack_complex_double* x, lapack_int* incx ); -void LAPACK_slarnv( lapack_int* idist, lapack_int* iseed, lapack_int* n, - float* x ); -void LAPACK_dlarnv( lapack_int* idist, lapack_int* iseed, lapack_int* n, - double* x ); -void LAPACK_clarnv( lapack_int* idist, lapack_int* iseed, lapack_int* n, - lapack_complex_float* x ); -void LAPACK_zlarnv( lapack_int* idist, lapack_int* iseed, lapack_int* n, - lapack_complex_double* x ); -void LAPACK_sgeqr2( lapack_int* m, lapack_int* n, float* a, lapack_int* lda, - float* tau, float* work, lapack_int *info ); -void LAPACK_dgeqr2( lapack_int* m, lapack_int* n, double* a, lapack_int* lda, - double* tau, double* work, lapack_int *info ); -void LAPACK_cgeqr2( lapack_int* m, lapack_int* n, lapack_complex_float* a, - lapack_int* lda, lapack_complex_float* tau, - lapack_complex_float* work, lapack_int *info ); -void LAPACK_zgeqr2( lapack_int* m, lapack_int* n, lapack_complex_double* a, - lapack_int* lda, lapack_complex_double* tau, - lapack_complex_double* work, lapack_int *info ); -void LAPACK_slacn2( lapack_int* n, float* v, float* x, lapack_int* isgn, - float* est, lapack_int* kase, lapack_int* isave ); -void LAPACK_dlacn2( lapack_int* n, double* v, double* x, lapack_int* isgn, - double* est, lapack_int* kase, lapack_int* isave ); -void LAPACK_clacn2( lapack_int* n, lapack_complex_float* v, - lapack_complex_float* x, float* est, - lapack_int* kase, lapack_int* isave ); -void LAPACK_zlacn2( lapack_int* n, lapack_complex_double* v, - lapack_complex_double* x, double* est, - lapack_int* kase, lapack_int* isave ); -void LAPACK_slacpy( char* uplo, lapack_int* m, lapack_int* n, const float* a, - lapack_int* lda, float* b, lapack_int* ldb ); -void LAPACK_dlacpy( char* uplo, lapack_int* m, lapack_int* n, const double* a, - lapack_int* lda, double* b, lapack_int* ldb ); -void LAPACK_clacpy( char* uplo, lapack_int* m, lapack_int* n, - const lapack_complex_float* a, lapack_int* lda, - lapack_complex_float* b, lapack_int* ldb ); -void LAPACK_zlacpy( char* uplo, lapack_int* m, lapack_int* n, - const lapack_complex_double* a, lapack_int* lda, - lapack_complex_double* b, lapack_int* ldb ); - -void LAPACK_clacp2( char* uplo, lapack_int* m, lapack_int* n, const float* a, - lapack_int* lda, lapack_complex_float* b, lapack_int* ldb ); -void LAPACK_zlacp2( char* uplo, lapack_int* m, lapack_int* n, const double* a, - lapack_int* lda, lapack_complex_double* b, - lapack_int* ldb ); - -void LAPACK_sgetf2( lapack_int* m, lapack_int* n, float* a, lapack_int* lda, - lapack_int* ipiv, lapack_int *info ); -void LAPACK_dgetf2( lapack_int* m, lapack_int* n, double* a, lapack_int* lda, - lapack_int* ipiv, lapack_int *info ); -void LAPACK_cgetf2( lapack_int* m, lapack_int* n, lapack_complex_float* a, - lapack_int* lda, lapack_int* ipiv, lapack_int *info ); -void LAPACK_zgetf2( lapack_int* m, lapack_int* n, lapack_complex_double* a, - lapack_int* lda, lapack_int* ipiv, lapack_int *info ); -void LAPACK_slaswp( lapack_int* n, float* a, lapack_int* lda, lapack_int* k1, - lapack_int* k2, const lapack_int* ipiv, lapack_int* incx ); -void LAPACK_dlaswp( lapack_int* n, double* a, lapack_int* lda, lapack_int* k1, - lapack_int* k2, const lapack_int* ipiv, lapack_int* incx ); -void LAPACK_claswp( lapack_int* n, lapack_complex_float* a, lapack_int* lda, - lapack_int* k1, lapack_int* k2, const lapack_int* ipiv, - lapack_int* incx ); -void LAPACK_zlaswp( lapack_int* n, lapack_complex_double* a, lapack_int* lda, - lapack_int* k1, lapack_int* k2, const lapack_int* ipiv, - lapack_int* incx ); -float LAPACK_slange( char* norm, lapack_int* m, lapack_int* n, const float* a, - lapack_int* lda, float* work ); -double LAPACK_dlange( char* norm, lapack_int* m, lapack_int* n, const double* a, - lapack_int* lda, double* work ); -float LAPACK_clange( char* norm, lapack_int* m, lapack_int* n, - const lapack_complex_float* a, lapack_int* lda, float* work ); -double LAPACK_zlange( char* norm, lapack_int* m, lapack_int* n, - const lapack_complex_double* a, lapack_int* lda, double* work ); -float LAPACK_clanhe( char* norm, char* uplo, lapack_int* n, - const lapack_complex_float* a, lapack_int* lda, float* work ); -double LAPACK_zlanhe( char* norm, char* uplo, lapack_int* n, - const lapack_complex_double* a, lapack_int* lda, double* work ); -void LAPACK_clarcm( lapack_int* m, lapack_int* n, const float* a, - lapack_int* lda, const lapack_complex_float* b, - lapack_int* ldb, lapack_complex_float* c, - lapack_int* ldc, float* work ); -void LAPACK_zlarcm( lapack_int* m, lapack_int* n, const double* a, - lapack_int* lda, const lapack_complex_double* b, - lapack_int* ldb, lapack_complex_double* c, - lapack_int* ldc, double* work ); -void LAPACK_clacrm( lapack_int* m, lapack_int* n, const lapack_complex_float* a, - lapack_int* lda, const float* b, - lapack_int* ldb, lapack_complex_float* c, - lapack_int* ldc, float* work ); -void LAPACK_zlacrm( lapack_int* m, lapack_int* n, const lapack_complex_double* a, - lapack_int* lda, const double* b, - lapack_int* ldb, lapack_complex_double* c, - lapack_int* ldc, double* work ); -float LAPACK_slansy( char* norm, char* uplo, lapack_int* n, const float* a, - lapack_int* lda, float* work ); -double LAPACK_dlansy( char* norm, char* uplo, lapack_int* n, const double* a, - lapack_int* lda, double* work ); -float LAPACK_clansy( char* norm, char* uplo, lapack_int* n, - const lapack_complex_float* a, lapack_int* lda, float* work ); -double LAPACK_zlansy( char* norm, char* uplo, lapack_int* n, - const lapack_complex_double* a, lapack_int* lda, double* work ); -float LAPACK_slantr( char* norm, char* uplo, char* diag, lapack_int* m, - lapack_int* n, const float* a, lapack_int* lda, float* work ); -double LAPACK_dlantr( char* norm, char* uplo, char* diag, lapack_int* m, - lapack_int* n, const double* a, lapack_int* lda, double* work ); -float LAPACK_clantr( char* norm, char* uplo, char* diag, lapack_int* m, - lapack_int* n, const lapack_complex_float* a, lapack_int* lda, - float* work ); -double LAPACK_zlantr( char* norm, char* uplo, char* diag, lapack_int* m, - lapack_int* n, const lapack_complex_double* a, lapack_int* lda, - double* work ); -float LAPACK_slamch( char* cmach ); -double LAPACK_dlamch( char* cmach ); -void LAPACK_sgelq2( lapack_int* m, lapack_int* n, float* a, lapack_int* lda, - float* tau, float* work, lapack_int *info ); -void LAPACK_dgelq2( lapack_int* m, lapack_int* n, double* a, lapack_int* lda, - double* tau, double* work, lapack_int *info ); -void LAPACK_cgelq2( lapack_int* m, lapack_int* n, lapack_complex_float* a, - lapack_int* lda, lapack_complex_float* tau, - lapack_complex_float* work, lapack_int *info ); -void LAPACK_zgelq2( lapack_int* m, lapack_int* n, lapack_complex_double* a, - lapack_int* lda, lapack_complex_double* tau, - lapack_complex_double* work, lapack_int *info ); -void LAPACK_slarfb( char* side, char* trans, char* direct, char* storev, - lapack_int* m, lapack_int* n, lapack_int* k, const float* v, - lapack_int* ldv, const float* t, lapack_int* ldt, float* c, - lapack_int* ldc, float* work, lapack_int* ldwork ); -void LAPACK_dlarfb( char* side, char* trans, char* direct, char* storev, - lapack_int* m, lapack_int* n, lapack_int* k, - const double* v, lapack_int* ldv, const double* t, - lapack_int* ldt, double* c, lapack_int* ldc, double* work, - lapack_int* ldwork ); -void LAPACK_clarfb( char* side, char* trans, char* direct, char* storev, - lapack_int* m, lapack_int* n, lapack_int* k, - const lapack_complex_float* v, lapack_int* ldv, - const lapack_complex_float* t, lapack_int* ldt, - lapack_complex_float* c, lapack_int* ldc, - lapack_complex_float* work, lapack_int* ldwork ); -void LAPACK_zlarfb( char* side, char* trans, char* direct, char* storev, - lapack_int* m, lapack_int* n, lapack_int* k, - const lapack_complex_double* v, lapack_int* ldv, - const lapack_complex_double* t, lapack_int* ldt, - lapack_complex_double* c, lapack_int* ldc, - lapack_complex_double* work, lapack_int* ldwork ); -void LAPACK_slarfg( lapack_int* n, float* alpha, float* x, lapack_int* incx, - float* tau ); -void LAPACK_dlarfg( lapack_int* n, double* alpha, double* x, lapack_int* incx, - double* tau ); -void LAPACK_clarfg( lapack_int* n, lapack_complex_float* alpha, - lapack_complex_float* x, lapack_int* incx, - lapack_complex_float* tau ); -void LAPACK_zlarfg( lapack_int* n, lapack_complex_double* alpha, - lapack_complex_double* x, lapack_int* incx, - lapack_complex_double* tau ); -void LAPACK_slassq( lapack_int *n, float* x, lapack_int *incx, float* scale, float* sumsq ); -void LAPACK_dlassq( lapack_int *n, double* x, lapack_int *incx, double* scale, double* sumsq ); -void LAPACK_classq( lapack_int *n, lapack_complex_float* x, lapack_int *incx, float* scale, float* sumsq ); -void LAPACK_zlassq( lapack_int *n, lapack_complex_double* x, lapack_int *incx, double* scale, double* sumsq ); -void LAPACK_slarft( char* direct, char* storev, lapack_int* n, lapack_int* k, - const float* v, lapack_int* ldv, const float* tau, float* t, - lapack_int* ldt ); -void LAPACK_dlarft( char* direct, char* storev, lapack_int* n, lapack_int* k, - const double* v, lapack_int* ldv, const double* tau, - double* t, lapack_int* ldt ); -void LAPACK_clarft( char* direct, char* storev, lapack_int* n, lapack_int* k, - const lapack_complex_float* v, lapack_int* ldv, - const lapack_complex_float* tau, lapack_complex_float* t, - lapack_int* ldt ); -void LAPACK_zlarft( char* direct, char* storev, lapack_int* n, lapack_int* k, - const lapack_complex_double* v, lapack_int* ldv, - const lapack_complex_double* tau, lapack_complex_double* t, - lapack_int* ldt ); -void LAPACK_slarfx( char* side, lapack_int* m, lapack_int* n, const float* v, - float* tau, float* c, lapack_int* ldc, float* work ); -void LAPACK_dlarfx( char* side, lapack_int* m, lapack_int* n, const double* v, - double* tau, double* c, lapack_int* ldc, double* work ); -void LAPACK_clarfx( char* side, lapack_int* m, lapack_int* n, - const lapack_complex_float* v, lapack_complex_float* tau, - lapack_complex_float* c, lapack_int* ldc, - lapack_complex_float* work ); -void LAPACK_zlarfx( char* side, lapack_int* m, lapack_int* n, - const lapack_complex_double* v, lapack_complex_double* tau, - lapack_complex_double* c, lapack_int* ldc, - lapack_complex_double* work ); -void LAPACK_slatms( lapack_int* m, lapack_int* n, char* dist, lapack_int* iseed, - char* sym, float* d, lapack_int* mode, float* cond, - float* dmax, lapack_int* kl, lapack_int* ku, char* pack, - float* a, lapack_int* lda, float* work, lapack_int *info ); -void LAPACK_dlatms( lapack_int* m, lapack_int* n, char* dist, lapack_int* iseed, - char* sym, double* d, lapack_int* mode, double* cond, - double* dmax, lapack_int* kl, lapack_int* ku, char* pack, - double* a, lapack_int* lda, double* work, - lapack_int *info ); -void LAPACK_clatms( lapack_int* m, lapack_int* n, char* dist, lapack_int* iseed, - char* sym, float* d, lapack_int* mode, float* cond, - float* dmax, lapack_int* kl, lapack_int* ku, char* pack, - lapack_complex_float* a, lapack_int* lda, - lapack_complex_float* work, lapack_int *info ); -void LAPACK_zlatms( lapack_int* m, lapack_int* n, char* dist, lapack_int* iseed, - char* sym, double* d, lapack_int* mode, double* cond, - double* dmax, lapack_int* kl, lapack_int* ku, char* pack, - lapack_complex_double* a, lapack_int* lda, - lapack_complex_double* work, lapack_int *info ); -void LAPACK_slag2d( lapack_int* m, lapack_int* n, const float* sa, - lapack_int* ldsa, double* a, lapack_int* lda, - lapack_int *info ); -void LAPACK_dlag2s( lapack_int* m, lapack_int* n, const double* a, - lapack_int* lda, float* sa, lapack_int* ldsa, - lapack_int *info ); -void LAPACK_clag2z( lapack_int* m, lapack_int* n, - const lapack_complex_float* sa, lapack_int* ldsa, - lapack_complex_double* a, lapack_int* lda, - lapack_int *info ); -void LAPACK_zlag2c( lapack_int* m, lapack_int* n, - const lapack_complex_double* a, lapack_int* lda, - lapack_complex_float* sa, lapack_int* ldsa, - lapack_int *info ); -void LAPACK_slauum( char* uplo, lapack_int* n, float* a, lapack_int* lda, - lapack_int *info ); -void LAPACK_dlauum( char* uplo, lapack_int* n, double* a, lapack_int* lda, - lapack_int *info ); -void LAPACK_clauum( char* uplo, lapack_int* n, lapack_complex_float* a, - lapack_int* lda, lapack_int *info ); -void LAPACK_zlauum( char* uplo, lapack_int* n, lapack_complex_double* a, - lapack_int* lda, lapack_int *info ); -void LAPACK_slagge( lapack_int* m, lapack_int* n, lapack_int* kl, - lapack_int* ku, const float* d, float* a, lapack_int* lda, - lapack_int* iseed, float* work, lapack_int *info ); -void LAPACK_dlagge( lapack_int* m, lapack_int* n, lapack_int* kl, - lapack_int* ku, const double* d, double* a, lapack_int* lda, - lapack_int* iseed, double* work, lapack_int *info ); -void LAPACK_clagge( lapack_int* m, lapack_int* n, lapack_int* kl, - lapack_int* ku, const float* d, lapack_complex_float* a, - lapack_int* lda, lapack_int* iseed, - lapack_complex_float* work, lapack_int *info ); -void LAPACK_zlagge( lapack_int* m, lapack_int* n, lapack_int* kl, - lapack_int* ku, const double* d, lapack_complex_double* a, - lapack_int* lda, lapack_int* iseed, - lapack_complex_double* work, lapack_int *info ); -void LAPACK_slascl( char* type, lapack_int* kl, lapack_int* ku, float* cfrom, - float* cto, lapack_int* m, lapack_int* n, float* a, - lapack_int* lda, lapack_int *info ); -void LAPACK_dlascl( char* type, lapack_int* kl, lapack_int* ku, double* cfrom, - double* cto, lapack_int* m, lapack_int* n, double* a, - lapack_int* lda, lapack_int *info ); -void LAPACK_clascl( char* type, lapack_int* kl, lapack_int* ku, float* cfrom, - float* cto, lapack_int* m, lapack_int* n, lapack_complex_float* a, - lapack_int* lda, lapack_int *info ); -void LAPACK_zlascl( char* type, lapack_int* kl, lapack_int* ku, double* cfrom, - double* cto, lapack_int* m, lapack_int* n, lapack_complex_double* a, - lapack_int* lda, lapack_int *info ); -void LAPACK_slaset( char* uplo, lapack_int* m, lapack_int* n, float* alpha, - float* beta, float* a, lapack_int* lda ); -void LAPACK_dlaset( char* uplo, lapack_int* m, lapack_int* n, double* alpha, - double* beta, double* a, lapack_int* lda ); -void LAPACK_claset( char* uplo, lapack_int* m, lapack_int* n, - lapack_complex_float* alpha, lapack_complex_float* beta, - lapack_complex_float* a, lapack_int* lda ); -void LAPACK_zlaset( char* uplo, lapack_int* m, lapack_int* n, - lapack_complex_double* alpha, lapack_complex_double* beta, - lapack_complex_double* a, lapack_int* lda ); -void LAPACK_slasrt( char* id, lapack_int* n, float* d, lapack_int *info ); -void LAPACK_dlasrt( char* id, lapack_int* n, double* d, lapack_int *info ); -void LAPACK_claghe( lapack_int* n, lapack_int* k, const float* d, - lapack_complex_float* a, lapack_int* lda, lapack_int* iseed, - lapack_complex_float* work, lapack_int *info ); -void LAPACK_zlaghe( lapack_int* n, lapack_int* k, const double* d, - lapack_complex_double* a, lapack_int* lda, - lapack_int* iseed, lapack_complex_double* work, - lapack_int *info ); -void LAPACK_slagsy( lapack_int* n, lapack_int* k, const float* d, float* a, - lapack_int* lda, lapack_int* iseed, float* work, - lapack_int *info ); -void LAPACK_dlagsy( lapack_int* n, lapack_int* k, const double* d, double* a, - lapack_int* lda, lapack_int* iseed, double* work, - lapack_int *info ); -void LAPACK_clagsy( lapack_int* n, lapack_int* k, const float* d, - lapack_complex_float* a, lapack_int* lda, lapack_int* iseed, - lapack_complex_float* work, lapack_int *info ); -void LAPACK_zlagsy( lapack_int* n, lapack_int* k, const double* d, - lapack_complex_double* a, lapack_int* lda, - lapack_int* iseed, lapack_complex_double* work, - lapack_int *info ); -void LAPACK_slapmr( lapack_logical* forwrd, lapack_int* m, lapack_int* n, - float* x, lapack_int* ldx, lapack_int* k ); -void LAPACK_dlapmr( lapack_logical* forwrd, lapack_int* m, lapack_int* n, - double* x, lapack_int* ldx, lapack_int* k ); -void LAPACK_clapmr( lapack_logical* forwrd, lapack_int* m, lapack_int* n, - lapack_complex_float* x, lapack_int* ldx, lapack_int* k ); -void LAPACK_zlapmr( lapack_logical* forwrd, lapack_int* m, lapack_int* n, - lapack_complex_double* x, lapack_int* ldx, lapack_int* k ); -void LAPACK_slapmt( lapack_logical* forwrd, lapack_int* m, lapack_int* n, - float* x, lapack_int* ldx, lapack_int* k ); -void LAPACK_dlapmt( lapack_logical* forwrd, lapack_int* m, lapack_int* n, - double* x, lapack_int* ldx, lapack_int* k ); -void LAPACK_clapmt( lapack_logical* forwrd, lapack_int* m, lapack_int* n, - lapack_complex_float* x, lapack_int* ldx, lapack_int* k ); -void LAPACK_zlapmt( lapack_logical* forwrd, lapack_int* m, lapack_int* n, - lapack_complex_double* x, lapack_int* ldx, lapack_int* k ); -float LAPACK_slapy2( float* x, float* y ); -double LAPACK_dlapy2( double* x, double* y ); -float LAPACK_slapy3( float* x, float* y, float* z ); -double LAPACK_dlapy3( double* x, double* y, double* z ); -void LAPACK_slartgp( float* f, float* g, float* cs, float* sn, float* r ); -void LAPACK_dlartgp( double* f, double* g, double* cs, double* sn, double* r ); -void LAPACK_slartgs( float* x, float* y, float* sigma, float* cs, float* sn ); -void LAPACK_dlartgs( double* x, double* y, double* sigma, double* cs, - double* sn ); -// LAPACK 3.3.0 -void LAPACK_cbbcsd( char* jobu1, char* jobu2, - char* jobv1t, char* jobv2t, char* trans, - lapack_int* m, lapack_int* p, lapack_int* q, - float* theta, float* phi, - lapack_complex_float* u1, lapack_int* ldu1, - lapack_complex_float* u2, lapack_int* ldu2, - lapack_complex_float* v1t, lapack_int* ldv1t, - lapack_complex_float* v2t, lapack_int* ldv2t, - float* b11d, float* b11e, float* b12d, - float* b12e, float* b21d, float* b21e, - float* b22d, float* b22e, float* rwork, - lapack_int* lrwork , lapack_int *info ); -void LAPACK_cheswapr( char* uplo, lapack_int* n, lapack_complex_float* a, - lapack_int* lda, lapack_int* i1, lapack_int* i2 ); -void LAPACK_chetri2( char* uplo, lapack_int* n, - lapack_complex_float* a, lapack_int* lda, - const lapack_int* ipiv, - lapack_complex_float* work, lapack_int* lwork , lapack_int *info ); -void LAPACK_chetri2x( char* uplo, lapack_int* n, - lapack_complex_float* a, lapack_int* lda, - const lapack_int* ipiv, - lapack_complex_float* work, lapack_int* nb , lapack_int *info ); -void LAPACK_chetrs2( char* uplo, lapack_int* n, - lapack_int* nrhs, const lapack_complex_float* a, - lapack_int* lda, const lapack_int* ipiv, - lapack_complex_float* b, lapack_int* ldb, - lapack_complex_float* work , lapack_int *info ); -void LAPACK_csyconv( char* uplo, char* way, - lapack_int* n, lapack_complex_float* a, - lapack_int* lda, const lapack_int* ipiv, - lapack_complex_float* e , lapack_int *info ); -void LAPACK_csyswapr( char* uplo, lapack_int* n, - lapack_complex_float* a, lapack_int* lda, - lapack_int* i1, lapack_int* i2 ); -void LAPACK_csytri2( char* uplo, lapack_int* n, - lapack_complex_float* a, lapack_int* lda, - const lapack_int* ipiv, - lapack_complex_float* work, lapack_int* lwork , lapack_int *info ); -void LAPACK_csytri2x( char* uplo, lapack_int* n, - lapack_complex_float* a, lapack_int* lda, - const lapack_int* ipiv, - lapack_complex_float* work, lapack_int* nb , lapack_int *info ); -void LAPACK_csytrs2( char* uplo, lapack_int* n, - lapack_int* nrhs, const lapack_complex_float* a, - lapack_int* lda, const lapack_int* ipiv, - lapack_complex_float* b, lapack_int* ldb, - lapack_complex_float* work , lapack_int *info ); -void LAPACK_cunbdb( char* trans, char* signs, - lapack_int* m, lapack_int* p, lapack_int* q, - lapack_complex_float* x11, lapack_int* ldx11, - lapack_complex_float* x12, lapack_int* ldx12, - lapack_complex_float* x21, lapack_int* ldx21, - lapack_complex_float* x22, lapack_int* ldx22, - float* theta, float* phi, - lapack_complex_float* taup1, - lapack_complex_float* taup2, - lapack_complex_float* tauq1, - lapack_complex_float* tauq2, - lapack_complex_float* work, lapack_int* lwork , lapack_int *info ); -void LAPACK_cuncsd( char* jobu1, char* jobu2, - char* jobv1t, char* jobv2t, char* trans, - char* signs, lapack_int* m, lapack_int* p, - lapack_int* q, lapack_complex_float* x11, - lapack_int* ldx11, lapack_complex_float* x12, - lapack_int* ldx12, lapack_complex_float* x21, - lapack_int* ldx21, lapack_complex_float* x22, - lapack_int* ldx22, float* theta, - lapack_complex_float* u1, lapack_int* ldu1, - lapack_complex_float* u2, lapack_int* ldu2, - lapack_complex_float* v1t, lapack_int* ldv1t, - lapack_complex_float* v2t, lapack_int* ldv2t, - lapack_complex_float* work, lapack_int* lwork, - float* rwork, lapack_int* lrwork, - lapack_int* iwork , lapack_int *info ); -void LAPACK_cuncsd2by1( char* jobu1, char* jobu2, - char* jobv1t, lapack_int* m, lapack_int* p, - lapack_int* q, lapack_complex_float* x11, - lapack_int* ldx11, lapack_complex_float* x21, - lapack_int* ldx21, float* theta, - lapack_complex_float* u1, lapack_int* ldu1, - lapack_complex_float* u2, lapack_int* ldu2, - lapack_complex_float* v1t, lapack_int* ldv1t, - lapack_complex_float* work, lapack_int* lwork, - float* rwork, lapack_int* lrwork, - lapack_int* iwork , lapack_int *info ); -void LAPACK_dbbcsd( char* jobu1, char* jobu2, - char* jobv1t, char* jobv2t, char* trans, - lapack_int* m, lapack_int* p, lapack_int* q, - double* theta, double* phi, double* u1, - lapack_int* ldu1, double* u2, lapack_int* ldu2, - double* v1t, lapack_int* ldv1t, double* v2t, - lapack_int* ldv2t, double* b11d, double* b11e, - double* b12d, double* b12e, double* b21d, - double* b21e, double* b22d, double* b22e, - double* work, lapack_int* lwork , lapack_int *info ); -void LAPACK_dorbdb( char* trans, char* signs, - lapack_int* m, lapack_int* p, lapack_int* q, - double* x11, lapack_int* ldx11, double* x12, - lapack_int* ldx12, double* x21, lapack_int* ldx21, - double* x22, lapack_int* ldx22, double* theta, - double* phi, double* taup1, double* taup2, - double* tauq1, double* tauq2, double* work, - lapack_int* lwork , lapack_int *info ); -void LAPACK_dorcsd( char* jobu1, char* jobu2, - char* jobv1t, char* jobv2t, char* trans, - char* signs, lapack_int* m, lapack_int* p, - lapack_int* q, double* x11, lapack_int* ldx11, - double* x12, lapack_int* ldx12, double* x21, - lapack_int* ldx21, double* x22, lapack_int* ldx22, - double* theta, double* u1, lapack_int* ldu1, - double* u2, lapack_int* ldu2, double* v1t, - lapack_int* ldv1t, double* v2t, lapack_int* ldv2t, - double* work, lapack_int* lwork, - lapack_int* iwork , lapack_int *info ); -void LAPACK_dorcsd2by1( char* jobu1, char* jobu2, - char* jobv1t, lapack_int* m, lapack_int* p, - lapack_int* q, double* x11, lapack_int* ldx11, - double* x21, lapack_int* ldx21, - double* theta, double* u1, lapack_int* ldu1, - double* u2, lapack_int* ldu2, double* v1t, - lapack_int* ldv1t, double* work, lapack_int* lwork, - lapack_int* iwork , lapack_int *info ); -void LAPACK_dsyconv( char* uplo, char* way, - lapack_int* n, double* a, lapack_int* lda, - const lapack_int* ipiv, double* e , lapack_int *info ); -void LAPACK_dsyswapr( char* uplo, lapack_int* n, double* a, - lapack_int* lda, lapack_int* i1, lapack_int* i2 ); -void LAPACK_dsytri2( char* uplo, lapack_int* n, - double* a, lapack_int* lda, - const lapack_int* ipiv, - double* work, lapack_int* lwork , lapack_int *info ); -void LAPACK_dsytri2x( char* uplo, lapack_int* n, - double* a, lapack_int* lda, - const lapack_int* ipiv, double* work, - lapack_int* nb , lapack_int *info ); -void LAPACK_dsytrs2( char* uplo, lapack_int* n, - lapack_int* nrhs, const double* a, - lapack_int* lda, const lapack_int* ipiv, - double* b, lapack_int* ldb, double* work , lapack_int *info ); -void LAPACK_sbbcsd( char* jobu1, char* jobu2, - char* jobv1t, char* jobv2t, char* trans, - lapack_int* m, lapack_int* p, lapack_int* q, - float* theta, float* phi, float* u1, - lapack_int* ldu1, float* u2, lapack_int* ldu2, - float* v1t, lapack_int* ldv1t, float* v2t, - lapack_int* ldv2t, float* b11d, float* b11e, - float* b12d, float* b12e, float* b21d, - float* b21e, float* b22d, float* b22e, - float* work, lapack_int* lwork , lapack_int *info ); -void LAPACK_sorbdb( char* trans, char* signs, - lapack_int* m, lapack_int* p, lapack_int* q, - float* x11, lapack_int* ldx11, float* x12, - lapack_int* ldx12, float* x21, lapack_int* ldx21, - float* x22, lapack_int* ldx22, float* theta, - float* phi, float* taup1, float* taup2, - float* tauq1, float* tauq2, float* work, - lapack_int* lwork , lapack_int *info ); -void LAPACK_sorcsd( char* jobu1, char* jobu2, - char* jobv1t, char* jobv2t, char* trans, - char* signs, lapack_int* m, lapack_int* p, - lapack_int* q, float* x11, lapack_int* ldx11, - float* x12, lapack_int* ldx12, float* x21, - lapack_int* ldx21, float* x22, lapack_int* ldx22, - float* theta, float* u1, lapack_int* ldu1, - float* u2, lapack_int* ldu2, float* v1t, - lapack_int* ldv1t, float* v2t, lapack_int* ldv2t, - float* work, lapack_int* lwork, - lapack_int* iwork , lapack_int *info ); -void LAPACK_sorcsd2by1( char* jobu1, char* jobu2, - char* jobv1t, lapack_int* m, lapack_int* p, - lapack_int* q, float* x11, lapack_int* ldx11, - float* x21, lapack_int* ldx21, - float* theta, float* u1, lapack_int* ldu1, - float* u2, lapack_int* ldu2, float* v1t, - lapack_int* ldv1t, float* work, lapack_int* lwork, - lapack_int* iwork , lapack_int *info ); -void LAPACK_ssyconv( char* uplo, char* way, - lapack_int* n, float* a, lapack_int* lda, - const lapack_int* ipiv, float* e , lapack_int *info ); -void LAPACK_ssyswapr( char* uplo, lapack_int* n, float* a, - lapack_int* lda, lapack_int* i1, lapack_int* i2 ); -void LAPACK_ssytri2( char* uplo, lapack_int* n, - float* a, lapack_int* lda, - const lapack_int* ipiv, - float* work, lapack_int* lwork , lapack_int *info ); -void LAPACK_ssytri2x( char* uplo, lapack_int* n, - float* a, lapack_int* lda, - const lapack_int* ipiv, float* work, - lapack_int* nb , lapack_int *info ); -void LAPACK_ssytrs2( char* uplo, lapack_int* n, - lapack_int* nrhs, const float* a, - lapack_int* lda, const lapack_int* ipiv, - float* b, lapack_int* ldb, float* work , lapack_int *info ); -void LAPACK_zbbcsd( char* jobu1, char* jobu2, - char* jobv1t, char* jobv2t, char* trans, - lapack_int* m, lapack_int* p, lapack_int* q, - double* theta, double* phi, - lapack_complex_double* u1, lapack_int* ldu1, - lapack_complex_double* u2, lapack_int* ldu2, - lapack_complex_double* v1t, lapack_int* ldv1t, - lapack_complex_double* v2t, lapack_int* ldv2t, - double* b11d, double* b11e, double* b12d, - double* b12e, double* b21d, double* b21e, - double* b22d, double* b22e, double* rwork, - lapack_int* lrwork , lapack_int *info ); -void LAPACK_zheswapr( char* uplo, lapack_int* n, lapack_complex_double* a, - lapack_int* lda, lapack_int* i1, lapack_int* i2 ); -void LAPACK_zhetri2( char* uplo, lapack_int* n, - lapack_complex_double* a, lapack_int* lda, - const lapack_int* ipiv, - lapack_complex_double* work, lapack_int* lwork , lapack_int *info ); -void LAPACK_zhetri2x( char* uplo, lapack_int* n, - lapack_complex_double* a, lapack_int* lda, - const lapack_int* ipiv, - lapack_complex_double* work, lapack_int* nb , lapack_int *info ); -void LAPACK_zhetrs2( char* uplo, lapack_int* n, - lapack_int* nrhs, - const lapack_complex_double* a, lapack_int* lda, - const lapack_int* ipiv, - lapack_complex_double* b, lapack_int* ldb, - lapack_complex_double* work , lapack_int *info ); -void LAPACK_zsyconv( char* uplo, char* way, - lapack_int* n, lapack_complex_double* a, - lapack_int* lda, const lapack_int* ipiv, - lapack_complex_double* e , lapack_int *info ); -void LAPACK_zsyswapr( char* uplo, lapack_int* n, lapack_complex_double* a, - lapack_int* lda, lapack_int* i1, - lapack_int* i2 ); -void LAPACK_zsytri2( char* uplo, lapack_int* n, - lapack_complex_double* a, lapack_int* lda, - const lapack_int* ipiv, - lapack_complex_double* work, lapack_int* lwork , lapack_int *info ); -void LAPACK_zsytri2x( char* uplo, lapack_int* n, - lapack_complex_double* a, lapack_int* lda, - const lapack_int* ipiv, - lapack_complex_double* work, lapack_int* nb , lapack_int *info ); -void LAPACK_zsytrs2( char* uplo, lapack_int* n, - lapack_int* nrhs, - const lapack_complex_double* a, lapack_int* lda, - const lapack_int* ipiv, - lapack_complex_double* b, lapack_int* ldb, - lapack_complex_double* work , lapack_int *info ); -void LAPACK_zunbdb( char* trans, char* signs, - lapack_int* m, lapack_int* p, lapack_int* q, - lapack_complex_double* x11, lapack_int* ldx11, - lapack_complex_double* x12, lapack_int* ldx12, - lapack_complex_double* x21, lapack_int* ldx21, - lapack_complex_double* x22, lapack_int* ldx22, - double* theta, double* phi, - lapack_complex_double* taup1, - lapack_complex_double* taup2, - lapack_complex_double* tauq1, - lapack_complex_double* tauq2, - lapack_complex_double* work, lapack_int* lwork , lapack_int *info ); -void LAPACK_zuncsd( char* jobu1, char* jobu2, - char* jobv1t, char* jobv2t, char* trans, - char* signs, lapack_int* m, lapack_int* p, - lapack_int* q, lapack_complex_double* x11, - lapack_int* ldx11, lapack_complex_double* x12, - lapack_int* ldx12, lapack_complex_double* x21, - lapack_int* ldx21, lapack_complex_double* x22, - lapack_int* ldx22, double* theta, - lapack_complex_double* u1, lapack_int* ldu1, - lapack_complex_double* u2, lapack_int* ldu2, - lapack_complex_double* v1t, lapack_int* ldv1t, - lapack_complex_double* v2t, lapack_int* ldv2t, - lapack_complex_double* work, lapack_int* lwork, - double* rwork, lapack_int* lrwork, - lapack_int* iwork , lapack_int *info ); -void LAPACK_zuncsd2by1( char* jobu1, char* jobu2, - char* jobv1t, lapack_int* m, lapack_int* p, - lapack_int* q, lapack_complex_double* x11, - lapack_int* ldx11, lapack_complex_double* x21, - lapack_int* ldx21, double* theta, - lapack_complex_double* u1, lapack_int* ldu1, - lapack_complex_double* u2, lapack_int* ldu2, - lapack_complex_double* v1t, lapack_int* ldv1t, - lapack_complex_double* work, lapack_int* lwork, - double* rwork, lapack_int* lrwork, - lapack_int* iwork , lapack_int *info ); -// LAPACK 3.4.0 -void LAPACK_sgemqrt( char* side, char* trans, lapack_int* m, lapack_int* n, - lapack_int* k, lapack_int* nb, const float* v, - lapack_int* ldv, const float* t, lapack_int* ldt, float* c, - lapack_int* ldc, float* work, lapack_int *info ); -void LAPACK_dgemqrt( char* side, char* trans, lapack_int* m, lapack_int* n, - lapack_int* k, lapack_int* nb, const double* v, - lapack_int* ldv, const double* t, lapack_int* ldt, - double* c, lapack_int* ldc, double* work, - lapack_int *info ); -void LAPACK_cgemqrt( char* side, char* trans, lapack_int* m, lapack_int* n, - lapack_int* k, lapack_int* nb, - const lapack_complex_float* v, lapack_int* ldv, - const lapack_complex_float* t, lapack_int* ldt, - lapack_complex_float* c, lapack_int* ldc, - lapack_complex_float* work, lapack_int *info ); -void LAPACK_zgemqrt( char* side, char* trans, lapack_int* m, lapack_int* n, - lapack_int* k, lapack_int* nb, - const lapack_complex_double* v, lapack_int* ldv, - const lapack_complex_double* t, lapack_int* ldt, - lapack_complex_double* c, lapack_int* ldc, - lapack_complex_double* work, lapack_int *info ); -void LAPACK_sgeqrt( lapack_int* m, lapack_int* n, lapack_int* nb, float* a, - lapack_int* lda, float* t, lapack_int* ldt, float* work, - lapack_int *info ); -void LAPACK_dgeqrt( lapack_int* m, lapack_int* n, lapack_int* nb, double* a, - lapack_int* lda, double* t, lapack_int* ldt, double* work, - lapack_int *info ); -void LAPACK_cgeqrt( lapack_int* m, lapack_int* n, lapack_int* nb, - lapack_complex_float* a, lapack_int* lda, - lapack_complex_float* t, lapack_int* ldt, - lapack_complex_float* work, lapack_int *info ); -void LAPACK_zgeqrt( lapack_int* m, lapack_int* n, lapack_int* nb, - lapack_complex_double* a, lapack_int* lda, - lapack_complex_double* t, lapack_int* ldt, - lapack_complex_double* work, lapack_int *info ); -void LAPACK_sgeqrt2( lapack_int* m, lapack_int* n, float* a, lapack_int* lda, - float* t, lapack_int* ldt, lapack_int *info ); -void LAPACK_dgeqrt2( lapack_int* m, lapack_int* n, double* a, lapack_int* lda, - double* t, lapack_int* ldt, lapack_int *info ); -void LAPACK_cgeqrt2( lapack_int* m, lapack_int* n, lapack_complex_float* a, - lapack_int* lda, lapack_complex_float* t, lapack_int* ldt, - lapack_int *info ); -void LAPACK_zgeqrt2( lapack_int* m, lapack_int* n, lapack_complex_double* a, - lapack_int* lda, lapack_complex_double* t, lapack_int* ldt, - lapack_int *info ); -void LAPACK_sgeqrt3( lapack_int* m, lapack_int* n, float* a, lapack_int* lda, - float* t, lapack_int* ldt, lapack_int *info ); -void LAPACK_dgeqrt3( lapack_int* m, lapack_int* n, double* a, lapack_int* lda, - double* t, lapack_int* ldt, lapack_int *info ); -void LAPACK_cgeqrt3( lapack_int* m, lapack_int* n, lapack_complex_float* a, - lapack_int* lda, lapack_complex_float* t, lapack_int* ldt, - lapack_int *info ); -void LAPACK_zgeqrt3( lapack_int* m, lapack_int* n, lapack_complex_double* a, - lapack_int* lda, lapack_complex_double* t, lapack_int* ldt, - lapack_int *info ); -void LAPACK_stpmqrt( char* side, char* trans, lapack_int* m, lapack_int* n, - lapack_int* k, lapack_int* l, lapack_int* nb, - const float* v, lapack_int* ldv, const float* t, - lapack_int* ldt, float* a, lapack_int* lda, float* b, - lapack_int* ldb, float* work, lapack_int *info ); -void LAPACK_dtpmqrt( char* side, char* trans, lapack_int* m, lapack_int* n, - lapack_int* k, lapack_int* l, lapack_int* nb, - const double* v, lapack_int* ldv, const double* t, - lapack_int* ldt, double* a, lapack_int* lda, double* b, - lapack_int* ldb, double* work, lapack_int *info ); -void LAPACK_ctpmqrt( char* side, char* trans, lapack_int* m, lapack_int* n, - lapack_int* k, lapack_int* l, lapack_int* nb, - const lapack_complex_float* v, lapack_int* ldv, - const lapack_complex_float* t, lapack_int* ldt, - lapack_complex_float* a, lapack_int* lda, - lapack_complex_float* b, lapack_int* ldb, - lapack_complex_float* work, lapack_int *info ); -void LAPACK_ztpmqrt( char* side, char* trans, lapack_int* m, lapack_int* n, - lapack_int* k, lapack_int* l, lapack_int* nb, - const lapack_complex_double* v, lapack_int* ldv, - const lapack_complex_double* t, lapack_int* ldt, - lapack_complex_double* a, lapack_int* lda, - lapack_complex_double* b, lapack_int* ldb, - lapack_complex_double* work, lapack_int *info ); -void LAPACK_stpqrt( lapack_int* m, lapack_int* n, lapack_int* l, lapack_int* nb, - float* a, lapack_int* lda, float* b, lapack_int* ldb, - float* t, lapack_int* ldt, float* work, lapack_int *info ); -void LAPACK_dtpqrt( lapack_int* m, lapack_int* n, lapack_int* l, lapack_int* nb, - double* a, lapack_int* lda, double* b, lapack_int* ldb, - double* t, lapack_int* ldt, double* work, - lapack_int *info ); -void LAPACK_ctpqrt( lapack_int* m, lapack_int* n, lapack_int* l, lapack_int* nb, - lapack_complex_float* a, lapack_int* lda, - lapack_complex_float* b, lapack_int* ldb, - lapack_complex_float* t, lapack_int* ldt, - lapack_complex_float* work, lapack_int *info ); -void LAPACK_ztpqrt( lapack_int* m, lapack_int* n, lapack_int* l, lapack_int* nb, - lapack_complex_double* a, lapack_int* lda, - lapack_complex_double* b, lapack_int* ldb, - lapack_complex_double* t, lapack_int* ldt, - lapack_complex_double* work, lapack_int *info ); -void LAPACK_stpqrt2( lapack_int* m, lapack_int* n, lapack_int* l, - float* a, lapack_int* lda, - float* b, lapack_int* ldb, - float* t, lapack_int* ldt, - lapack_int *info ); -void LAPACK_dtpqrt2( lapack_int* m, lapack_int* n, lapack_int* l, - double* a, lapack_int* lda, - double* b, lapack_int* ldb, - double* t, lapack_int* ldt, - lapack_int *info ); -void LAPACK_ctpqrt2( lapack_int* m, lapack_int* n, lapack_int* l, - lapack_complex_float* a, lapack_int* lda, - lapack_complex_float* b, lapack_int* ldb, - lapack_complex_float* t, lapack_int* ldt, - lapack_int *info ); -void LAPACK_ztpqrt2( lapack_int* m, lapack_int* n, lapack_int* l, - lapack_complex_double* a, lapack_int* lda, - lapack_complex_double* b, lapack_int* ldb, - lapack_complex_double* t, lapack_int* ldt, - lapack_int *info ); -void LAPACK_stprfb( char* side, char* trans, char* direct, char* storev, - lapack_int* m, lapack_int* n, lapack_int* k, lapack_int* l, - const float* v, lapack_int* ldv, const float* t, - lapack_int* ldt, float* a, lapack_int* lda, float* b, - lapack_int* ldb, const float* work, - lapack_int* ldwork ); -void LAPACK_dtprfb( char* side, char* trans, char* direct, char* storev, - lapack_int* m, lapack_int* n, lapack_int* k, lapack_int* l, - const double* v, lapack_int* ldv, const double* t, - lapack_int* ldt, double* a, lapack_int* lda, double* b, - lapack_int* ldb, const double* work, - lapack_int* ldwork ); -void LAPACK_ctprfb( char* side, char* trans, char* direct, char* storev, - lapack_int* m, lapack_int* n, lapack_int* k, lapack_int* l, - const lapack_complex_float* v, lapack_int* ldv, - const lapack_complex_float* t, lapack_int* ldt, - lapack_complex_float* a, lapack_int* lda, - lapack_complex_float* b, lapack_int* ldb, - lapack_complex_float* work, lapack_int* ldwork ); -void LAPACK_ztprfb( char* side, char* trans, char* direct, char* storev, - lapack_int* m, lapack_int* n, lapack_int* k, lapack_int* l, - const lapack_complex_double* v, lapack_int* ldv, - const lapack_complex_double* t, lapack_int* ldt, - lapack_complex_double* a, lapack_int* lda, - lapack_complex_double* b, lapack_int* ldb, - lapack_complex_double* work, lapack_int* ldwork ); -// LAPACK 3.5.0 -void LAPACK_ssysv_rook( char* uplo, lapack_int* n, lapack_int* nrhs, float* a, - lapack_int* lda, lapack_int* ipiv, float* b, - lapack_int* ldb, float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_ssytrf_rook( char* uplo, lapack_int* n, float* a, lapack_int* lda, - lapack_int* ipiv, float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_dsysv_rook( char* uplo, lapack_int* n, lapack_int* nrhs, double* a, - lapack_int* lda, lapack_int* ipiv, double* b, - lapack_int* ldb, double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_dsytrf_rook( char* uplo, lapack_int* n, double* a, lapack_int* lda, - lapack_int* ipiv, double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_csysv_rook( char* uplo, lapack_int* n, lapack_int* nrhs, - lapack_complex_float* a, lapack_int* lda, - lapack_int* ipiv, lapack_complex_float* b, - lapack_int* ldb, lapack_complex_float* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_csytrf_rook( char* uplo, lapack_int* n, lapack_complex_float* a, - lapack_int* lda, lapack_int* ipiv, - lapack_complex_float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_zsysv_rook( char* uplo, lapack_int* n, lapack_int* nrhs, - lapack_complex_double* a, lapack_int* lda, - lapack_int* ipiv, lapack_complex_double* b, - lapack_int* ldb, lapack_complex_double* work, - lapack_int* lwork, lapack_int *info ); -void LAPACK_zsytrf_rook( char* uplo, lapack_int* n, lapack_complex_double* a, - lapack_int* lda, lapack_int* ipiv, - lapack_complex_double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_ssytrs_rook( char* uplo, lapack_int* n, lapack_int* nrhs, const float* a, - lapack_int* lda, const lapack_int* ipiv, float* b, - lapack_int* ldb, lapack_int *info ); -void LAPACK_dsytrs_rook( char* uplo, lapack_int* n, lapack_int* nrhs, - const double* a, lapack_int* lda, const lapack_int* ipiv, - double* b, lapack_int* ldb, lapack_int *info ); -void LAPACK_csytrs_rook( char* uplo, lapack_int* n, lapack_int* nrhs, - const lapack_complex_float* a, lapack_int* lda, - const lapack_int* ipiv, lapack_complex_float* b, - lapack_int* ldb, lapack_int *info ); -void LAPACK_zsytrs_rook( char* uplo, lapack_int* n, lapack_int* nrhs, - const lapack_complex_double* a, lapack_int* lda, - const lapack_int* ipiv, lapack_complex_double* b, - lapack_int* ldb, lapack_int *info ); -void LAPACK_chetrf_rook( char* uplo, lapack_int* n, lapack_complex_float* a, - lapack_int* lda, lapack_int* ipiv, - lapack_complex_float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_zhetrf_rook( char* uplo, lapack_int* n, lapack_complex_double* a, - lapack_int* lda, lapack_int* ipiv, - lapack_complex_double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_chetrs_rook( char* uplo, lapack_int* n, lapack_int* nrhs, - const lapack_complex_float* a, lapack_int* lda, - const lapack_int* ipiv, lapack_complex_float* b, - lapack_int* ldb, lapack_int *info ); -void LAPACK_zhetrs_rook( char* uplo, lapack_int* n, lapack_int* nrhs, - const lapack_complex_double* a, lapack_int* lda, - const lapack_int* ipiv, lapack_complex_double* b, - lapack_int* ldb, lapack_int *info ); - -void LAPACK_csyr( char* uplo, lapack_int* n, lapack_complex_float* alpha, - const lapack_complex_float* x, lapack_int* incx, - lapack_complex_float* a, lapack_int* lda ); -void LAPACK_zsyr( char* uplo, lapack_int* n, lapack_complex_double* alpha, - const lapack_complex_double* x, lapack_int* incx, - lapack_complex_double* a, lapack_int* lda ); -void LAPACK_ilaver( const lapack_int* vers_major, const lapack_int* vers_minor, - const lapack_int* vers_patch ); - -// LAPACK 3.7.0 -void LAPACK_ssysv_aa( char* uplo, lapack_int* n, lapack_int* nrhs, float* a, - lapack_int* lda, lapack_int* ipiv, float* b, lapack_int* ldb, - float* work, lapack_int* lwork, lapack_int *info ); -void LAPACK_dsysv_aa( char* uplo, lapack_int* n, lapack_int* nrhs, double* a, - lapack_int* lda, lapack_int* ipiv, double* b, - lapack_int* ldb, double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_csysv_aa( char* uplo, lapack_int* n, lapack_int* nrhs, - lapack_complex_float* a, lapack_int* lda, lapack_int* ipiv, - lapack_complex_float* b, lapack_int* ldb, - lapack_complex_float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_zsysv_aa( char* uplo, lapack_int* n, lapack_int* nrhs, - lapack_complex_double* a, lapack_int* lda, lapack_int* ipiv, - lapack_complex_double* b, lapack_int* ldb, - lapack_complex_double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_chesv_aa( char* uplo, lapack_int* n, lapack_int* nrhs, - lapack_complex_float* a, lapack_int* lda, lapack_int* ipiv, - lapack_complex_float* b, lapack_int* ldb, - lapack_complex_float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_zhesv_aa( char* uplo, lapack_int* n, lapack_int* nrhs, - lapack_complex_double* a, lapack_int* lda, lapack_int* ipiv, - lapack_complex_double* b, lapack_int* ldb, - lapack_complex_double* work, lapack_int* lwork, - lapack_int *info ); - -void LAPACK_ssytrf_aa( char* uplo, lapack_int* n, float* a, lapack_int* lda, - lapack_int* ipiv, float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_dsytrf_aa( char* uplo, lapack_int* n, double* a, lapack_int* lda, - lapack_int* ipiv, double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_csytrf_aa( char* uplo, lapack_int* n, lapack_complex_float* a, - lapack_int* lda, lapack_int* ipiv, - lapack_complex_float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_zsytrf_aa( char* uplo, lapack_int* n, lapack_complex_double* a, - lapack_int* lda, lapack_int* ipiv, - lapack_complex_double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_chetrf_aa( char* uplo, lapack_int* n, lapack_complex_float* a, - lapack_int* lda, lapack_int* ipiv, - lapack_complex_float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_zhetrf_aa( char* uplo, lapack_int* n, lapack_complex_double* a, - lapack_int* lda, lapack_int* ipiv, - lapack_complex_double* work, lapack_int* lwork, - lapack_int *info ); - -void LAPACK_ssytrs_aa( char* uplo, lapack_int* n, - lapack_int* nrhs, const float* a, - lapack_int* lda, const lapack_int* ipiv, - float* b, lapack_int* ldb, float* work, lapack_int* lwork, lapack_int *info ); -void LAPACK_dsytrs_aa( char* uplo, lapack_int* n, - lapack_int* nrhs, const double* a, - lapack_int* lda, const lapack_int* ipiv, - double* b, lapack_int* ldb, double* work, lapack_int* lwork, lapack_int *info ); -void LAPACK_csytrs_aa( char* uplo, lapack_int* n, - lapack_int* nrhs, const lapack_complex_float* a, - lapack_int* lda, const lapack_int* ipiv, - lapack_complex_float* b, lapack_int* ldb, - lapack_complex_float* work , lapack_int* lwork, lapack_int *info ); -void LAPACK_zsytrs_aa( char* uplo, lapack_int* n, - lapack_int* nrhs, - const lapack_complex_double* a, lapack_int* lda, - const lapack_int* ipiv, - lapack_complex_double* b, lapack_int* ldb, - lapack_complex_double* work, lapack_int* lwork, lapack_int *info ); -void LAPACK_chetrs_aa( char* uplo, lapack_int* n, - lapack_int* nrhs, const lapack_complex_float* a, - lapack_int* lda, const lapack_int* ipiv, - lapack_complex_float* b, lapack_int* ldb, - lapack_complex_float* work , lapack_int* lwork, lapack_int *info ); -void LAPACK_zhetrs_aa( char* uplo, lapack_int* n, - lapack_int* nrhs, - const lapack_complex_double* a, lapack_int* lda, - const lapack_int* ipiv, - lapack_complex_double* b, lapack_int* ldb, - lapack_complex_double* work, lapack_int* lwork, lapack_int *info ); - -void LAPACK_ssysv_rk( char* uplo, lapack_int* n, lapack_int* nrhs, float* a, - lapack_int* lda, float* e, lapack_int* ipiv, float* b, lapack_int* ldb, - float* work, lapack_int* lwork, lapack_int *info ); -void LAPACK_dsysv_rk( char* uplo, lapack_int* n, lapack_int* nrhs, double* a, - lapack_int* lda, double* e, lapack_int* ipiv, double* b, - lapack_int* ldb, double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_csysv_rk( char* uplo, lapack_int* n, lapack_int* nrhs, - lapack_complex_float* a, lapack_int* lda, - lapack_complex_float* e, lapack_int* ipiv, - lapack_complex_float* b, lapack_int* ldb, - lapack_complex_float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_zsysv_rk( char* uplo, lapack_int* n, lapack_int* nrhs, - lapack_complex_double* a, lapack_int* lda, - lapack_complex_double* e, lapack_int* ipiv, - lapack_complex_double* b, lapack_int* ldb, - lapack_complex_double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_chesv_rk( char* uplo, lapack_int* n, lapack_int* nrhs, - lapack_complex_float* a, lapack_int* lda, - lapack_complex_float* e, lapack_int* ipiv, - lapack_complex_float* b, lapack_int* ldb, - lapack_complex_float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_zhesv_rk( char* uplo, lapack_int* n, lapack_int* nrhs, - lapack_complex_double* a, lapack_int* lda, - lapack_complex_double* e, lapack_int* ipiv, - lapack_complex_double* b, lapack_int* ldb, - lapack_complex_double* work, lapack_int* lwork, - lapack_int *info ); - -void LAPACK_ssytrf_rk( char* uplo, lapack_int* n, float* a, lapack_int* lda, - float* e, lapack_int* ipiv, float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_dsytrf_rk( char* uplo, lapack_int* n, double* a, lapack_int* lda, - double* e, lapack_int* ipiv, double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_csytrf_rk( char* uplo, lapack_int* n, lapack_complex_float* a, - lapack_int* lda, lapack_complex_float* e, lapack_int* ipiv, - lapack_complex_float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_zsytrf_rk( char* uplo, lapack_int* n, lapack_complex_double* a, - lapack_int* lda, lapack_complex_double* e, lapack_int* ipiv, - lapack_complex_double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_chetrf_rk( char* uplo, lapack_int* n, lapack_complex_float* a, - lapack_int* lda, lapack_complex_float* e, lapack_int* ipiv, - lapack_complex_float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_zhetrf_rk( char* uplo, lapack_int* n, lapack_complex_double* a, - lapack_int* lda, lapack_complex_double* e, lapack_int* ipiv, - lapack_complex_double* work, lapack_int* lwork, - lapack_int *info ); - -void LAPACK_ssytrs_3( char* uplo, lapack_int* n, - lapack_int* nrhs, const float* a, - lapack_int* lda, const float* e, const lapack_int* ipiv, - float* b, lapack_int* ldb, lapack_int *info ); -void LAPACK_dsytrs_3( char* uplo, lapack_int* n, - lapack_int* nrhs, const double* a, - lapack_int* lda, const double* e, const lapack_int* ipiv, - double* b, lapack_int* ldb, lapack_int *info ); -void LAPACK_csytrs_3( char* uplo, lapack_int* n, - lapack_int* nrhs, const lapack_complex_float* a, - lapack_int* lda, const lapack_complex_float* e, - const lapack_int* ipiv, - lapack_complex_float* b, lapack_int* ldb, lapack_int *info ); -void LAPACK_zsytrs_3( char* uplo, lapack_int* n, - lapack_int* nrhs, - const lapack_complex_double* a, lapack_int* lda, - const lapack_complex_double* e, const lapack_int* ipiv, - lapack_complex_double* b, lapack_int* ldb, lapack_int *info ); -void LAPACK_chetrs_3( char* uplo, lapack_int* n, - lapack_int* nrhs, const lapack_complex_float* a, - lapack_int* lda, const lapack_complex_float* e, - const lapack_int* ipiv, - lapack_complex_float* b, lapack_int* ldb, lapack_int *info ); -void LAPACK_zhetrs_3( char* uplo, lapack_int* n, - lapack_int* nrhs, - const lapack_complex_double* a, lapack_int* lda, - const lapack_complex_double* e, - const lapack_int* ipiv, - lapack_complex_double* b, lapack_int* ldb, lapack_int *info ); - -void LAPACK_ssytri_3( char* uplo, lapack_int* n, float* a, lapack_int* lda, const float* e, - const lapack_int* ipiv, float* work, lapack_int* lwork, lapack_int *info ); -void LAPACK_dsytri_3( char* uplo, lapack_int* n, double* a, lapack_int* lda, const double* e, - const lapack_int* ipiv, double* work, lapack_int* lwork, lapack_int *info ); -void LAPACK_csytri_3( char* uplo, lapack_int* n, lapack_complex_float* a, - lapack_int* lda, const lapack_complex_float* e, const lapack_int* ipiv, - lapack_complex_float* work, lapack_int* lwork, lapack_int *info ); -void LAPACK_zsytri_3( char* uplo, lapack_int* n, lapack_complex_double* a, - lapack_int* lda, const lapack_complex_double* e, const lapack_int* ipiv, - lapack_complex_double* work, lapack_int* lwork, lapack_int *info ); -void LAPACK_chetri_3( char* uplo, lapack_int* n, lapack_complex_float* a, - lapack_int* lda, const lapack_complex_float* e, const lapack_int* ipiv, - lapack_complex_float* work, lapack_int* lwork, lapack_int *info ); -void LAPACK_zhetri_3( char* uplo, lapack_int* n, lapack_complex_double* a, - lapack_int* lda, const lapack_complex_double* e, const lapack_int* ipiv, - lapack_complex_double* work, lapack_int* lwork, lapack_int *info ); - -void LAPACK_ssycon_3( char* uplo, lapack_int* n, const float* a, lapack_int* lda, const float* e, - const lapack_int* ipiv, float* anorm, float* rcond, - float* work, lapack_int* iwork, lapack_int *info ); -void LAPACK_dsycon_3( char* uplo, lapack_int* n, const double* a, lapack_int* lda, const double* e, - const lapack_int* ipiv, double* anorm, double* rcond, - double* work, lapack_int* iwork, lapack_int *info ); -void LAPACK_csycon_3( char* uplo, lapack_int* n, const lapack_complex_float* a, - lapack_int* lda, const lapack_complex_float* e, const lapack_int* ipiv, float* anorm, - float* rcond, lapack_complex_float* work, - lapack_int *info ); -void LAPACK_zsycon_3( char* uplo, lapack_int* n, const lapack_complex_double* a, - lapack_int* lda, const lapack_complex_double* e, const lapack_int* ipiv, double* anorm, - double* rcond, lapack_complex_double* work, - lapack_int *info ); -void LAPACK_checon_3( char* uplo, lapack_int* n, const lapack_complex_float* a, - lapack_int* lda, const lapack_complex_float* e, const lapack_int* ipiv, float* anorm, - float* rcond, lapack_complex_float* work, - lapack_int *info ); -void LAPACK_zhecon_3( char* uplo, lapack_int* n, const lapack_complex_double* a, - lapack_int* lda, const lapack_complex_double* e, const lapack_int* ipiv, double* anorm, - double* rcond, lapack_complex_double* work, - lapack_int *info ); - -void LAPACK_sgelq( lapack_int* m, lapack_int* n, float* a, lapack_int* lda, - float* t, lapack_int* tsize, float* work, lapack_int* lwork, - lapack_int* info ); -void LAPACK_dgelq( lapack_int* m, lapack_int* n, double* a, lapack_int* lda, - double* t, lapack_int* tsize, double* work, lapack_int* lwork, - lapack_int* info ); -void LAPACK_cgelq( lapack_int* m, lapack_int* n, lapack_complex_float* a, lapack_int* lda, - lapack_complex_float* t, lapack_int* tsize, lapack_complex_float* work, lapack_int* lwork, - lapack_int* info ); -void LAPACK_zgelq( lapack_int* m, lapack_int* n, lapack_complex_double* a, lapack_int* lda, - lapack_complex_double* t, lapack_int* tsize, lapack_complex_double* work, lapack_int* lwork, - lapack_int* info ); - -void LAPACK_sgemlq( char* side, char* trans, lapack_int* m, lapack_int* n, lapack_int* k, - const float* a, lapack_int* lda, - const float* t, lapack_int* tsize, - float* c, lapack_int* ldc, - float* work, lapack_int* lwork, - lapack_int* info ); -void LAPACK_dgemlq( char* side, char* trans, lapack_int* m, lapack_int* n, lapack_int* k, - const double* a, lapack_int* lda, - const double* t, lapack_int* tsize, - double* c, lapack_int* ldc, - double* work, lapack_int* lwork, - lapack_int* info ); -void LAPACK_cgemlq( char* side, char* trans, lapack_int* m, lapack_int* n, lapack_int* k, - const lapack_complex_float* a, lapack_int* lda, - const lapack_complex_float* t, lapack_int* tsize, - lapack_complex_float* c, lapack_int* ldc, - lapack_complex_float* work, lapack_int* lwork, - lapack_int* info ); -void LAPACK_zgemlq( char* side, char* trans, lapack_int* m, lapack_int* n, lapack_int* k, - const lapack_complex_double* a, lapack_int* lda, - const lapack_complex_double* t, lapack_int* tsize, - lapack_complex_double* c, lapack_int* ldc, - lapack_complex_double* work, lapack_int* lwork, - lapack_int* info ); - -void LAPACK_sgeqr( lapack_int* m, lapack_int* n, float* a, lapack_int* lda, - float* t, lapack_int* tsize, float* work, lapack_int* lwork, - lapack_int* info ); -void LAPACK_dgeqr( lapack_int* m, lapack_int* n, double* a, lapack_int* lda, - double* t, lapack_int* tsize, double* work, lapack_int* lwork, - lapack_int* info ); -void LAPACK_cgeqr( lapack_int* m, lapack_int* n, lapack_complex_float* a, lapack_int* lda, - lapack_complex_float* t, lapack_int* tsize, lapack_complex_float* work, lapack_int* lwork, - lapack_int* info ); -void LAPACK_zgeqr( lapack_int* m, lapack_int* n, lapack_complex_double* a, lapack_int* lda, - lapack_complex_double* t, lapack_int* tsize, lapack_complex_double* work, lapack_int* lwork, - lapack_int* info ); - -void LAPACK_sgemqr( char* side, char* trans, lapack_int* m, lapack_int* n, lapack_int* k, - const float* a, lapack_int* lda, - const float* t, lapack_int* tsize, - float* c, lapack_int* ldc, - float* work, lapack_int* lwork, - lapack_int* info ); -void LAPACK_dgemqr( char* side, char* trans, lapack_int* m, lapack_int* n, lapack_int* k, - const double* a, lapack_int* lda, - const double* t, lapack_int* tsize, - double* c, lapack_int* ldc, - double* work, lapack_int* lwork, - lapack_int* info ); -void LAPACK_cgemqr( char* side, char* trans, lapack_int* m, lapack_int* n, lapack_int* k, - const lapack_complex_float* a, lapack_int* lda, - const lapack_complex_float* t, lapack_int* tsize, - lapack_complex_float* c, lapack_int* ldc, - lapack_complex_float* work, lapack_int* lwork, - lapack_int* info ); -void LAPACK_zgemqr( char* side, char* trans, lapack_int* m, lapack_int* n, lapack_int* k, - const lapack_complex_double* a, lapack_int* lda, - const lapack_complex_double* t, lapack_int* tsize, - lapack_complex_double* c, lapack_int* ldc, - lapack_complex_double* work, lapack_int* lwork, - lapack_int* info ); - -void LAPACK_sgetsls( char* trans, lapack_int* m, lapack_int* n, lapack_int* nrhs, - float* a, lapack_int* lda, float* b, lapack_int* ldb, - float* work, lapack_int* lwork, lapack_int *info ); -void LAPACK_dgetsls( char* trans, lapack_int* m, lapack_int* n, lapack_int* nrhs, - double* a, lapack_int* lda, double* b, lapack_int* ldb, - double* work, lapack_int* lwork, lapack_int *info ); -void LAPACK_cgetsls( char* trans, lapack_int* m, lapack_int* n, lapack_int* nrhs, - lapack_complex_float* a, lapack_int* lda, - lapack_complex_float* b, lapack_int* ldb, - lapack_complex_float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_zgetsls( char* trans, lapack_int* m, lapack_int* n, lapack_int* nrhs, - lapack_complex_double* a, lapack_int* lda, - lapack_complex_double* b, lapack_int* ldb, - lapack_complex_double* work, lapack_int* lwork, - lapack_int *info ); - -void LAPACK_ssyev_2stage( char* jobz, char* uplo, lapack_int* n, float* a, - lapack_int* lda, float* w, float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_dsyev_2stage( char* jobz, char* uplo, lapack_int* n, double* a, - lapack_int* lda, double* w, double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_cheev_2stage( char* jobz, char* uplo, lapack_int* n, - lapack_complex_float* a, lapack_int* lda, float* w, - lapack_complex_float* work, lapack_int* lwork, float* rwork, - lapack_int *info ); -void LAPACK_zheev_2stage( char* jobz, char* uplo, lapack_int* n, - lapack_complex_double* a, lapack_int* lda, double* w, - lapack_complex_double* work, lapack_int* lwork, - double* rwork, lapack_int *info ); -void LAPACK_ssyevd_2stage( char* jobz, char* uplo, lapack_int* n, float* a, - lapack_int* lda, float* w, float* work, lapack_int* lwork, - lapack_int* iwork, lapack_int* liwork, lapack_int *info ); -void LAPACK_dsyevd_2stage( char* jobz, char* uplo, lapack_int* n, double* a, - lapack_int* lda, double* w, double* work, lapack_int* lwork, - lapack_int* iwork, lapack_int* liwork, lapack_int *info ); -void LAPACK_cheevd_2stage( char* jobz, char* uplo, lapack_int* n, - lapack_complex_float* a, lapack_int* lda, float* w, - lapack_complex_float* work, lapack_int* lwork, float* rwork, - lapack_int* lrwork, lapack_int* iwork, lapack_int* liwork, - lapack_int *info ); -void LAPACK_zheevd_2stage( char* jobz, char* uplo, lapack_int* n, - lapack_complex_double* a, lapack_int* lda, double* w, - lapack_complex_double* work, lapack_int* lwork, - double* rwork, lapack_int* lrwork, lapack_int* iwork, - lapack_int* liwork, lapack_int *info ); -void LAPACK_ssyevx_2stage( char* jobz, char* range, char* uplo, lapack_int* n, - float* a, lapack_int* lda, float* vl, float* vu, - lapack_int* il, lapack_int* iu, float* abstol, - lapack_int* m, float* w, float* z, lapack_int* ldz, - float* work, lapack_int* lwork, lapack_int* iwork, - lapack_int* ifail, lapack_int *info ); -void LAPACK_dsyevx_2stage( char* jobz, char* range, char* uplo, lapack_int* n, - double* a, lapack_int* lda, double* vl, double* vu, - lapack_int* il, lapack_int* iu, double* abstol, - lapack_int* m, double* w, double* z, lapack_int* ldz, - double* work, lapack_int* lwork, lapack_int* iwork, - lapack_int* ifail, lapack_int *info ); -void LAPACK_cheevx_2stage( char* jobz, char* range, char* uplo, lapack_int* n, - lapack_complex_float* a, lapack_int* lda, float* vl, - float* vu, lapack_int* il, lapack_int* iu, float* abstol, - lapack_int* m, float* w, lapack_complex_float* z, - lapack_int* ldz, lapack_complex_float* work, - lapack_int* lwork, float* rwork, lapack_int* iwork, - lapack_int* ifail, lapack_int *info ); -void LAPACK_zheevx_2stage( char* jobz, char* range, char* uplo, lapack_int* n, - lapack_complex_double* a, lapack_int* lda, double* vl, - double* vu, lapack_int* il, lapack_int* iu, double* abstol, - lapack_int* m, double* w, lapack_complex_double* z, - lapack_int* ldz, lapack_complex_double* work, - lapack_int* lwork, double* rwork, lapack_int* iwork, - lapack_int* ifail, lapack_int *info ); -void LAPACK_ssyevr_2stage( char* jobz, char* range, char* uplo, lapack_int* n, - float* a, lapack_int* lda, float* vl, float* vu, - lapack_int* il, lapack_int* iu, float* abstol, - lapack_int* m, float* w, float* z, lapack_int* ldz, - lapack_int* isuppz, float* work, lapack_int* lwork, - lapack_int* iwork, lapack_int* liwork, lapack_int *info ); -void LAPACK_dsyevr_2stage( char* jobz, char* range, char* uplo, lapack_int* n, - double* a, lapack_int* lda, double* vl, double* vu, - lapack_int* il, lapack_int* iu, double* abstol, - lapack_int* m, double* w, double* z, lapack_int* ldz, - lapack_int* isuppz, double* work, lapack_int* lwork, - lapack_int* iwork, lapack_int* liwork, lapack_int *info ); -void LAPACK_cheevr_2stage( char* jobz, char* range, char* uplo, lapack_int* n, - lapack_complex_float* a, lapack_int* lda, float* vl, - float* vu, lapack_int* il, lapack_int* iu, float* abstol, - lapack_int* m, float* w, lapack_complex_float* z, - lapack_int* ldz, lapack_int* isuppz, - lapack_complex_float* work, lapack_int* lwork, float* rwork, - lapack_int* lrwork, lapack_int* iwork, lapack_int* liwork, - lapack_int *info ); -void LAPACK_zheevr_2stage( char* jobz, char* range, char* uplo, lapack_int* n, - lapack_complex_double* a, lapack_int* lda, double* vl, - double* vu, lapack_int* il, lapack_int* iu, double* abstol, - lapack_int* m, double* w, lapack_complex_double* z, - lapack_int* ldz, lapack_int* isuppz, - lapack_complex_double* work, lapack_int* lwork, - double* rwork, lapack_int* lrwork, lapack_int* iwork, - lapack_int* liwork, lapack_int *info ); -void LAPACK_ssbev_2stage( char* jobz, char* uplo, lapack_int* n, lapack_int* kd, - float* ab, lapack_int* ldab, float* w, float* z, - lapack_int* ldz, float* work, lapack_int* lwork, lapack_int *info ); -void LAPACK_dsbev_2stage( char* jobz, char* uplo, lapack_int* n, lapack_int* kd, - double* ab, lapack_int* ldab, double* w, double* z, - lapack_int* ldz, double* work, lapack_int* lwork, lapack_int *info ); -void LAPACK_chbev_2stage( char* jobz, char* uplo, lapack_int* n, lapack_int* kd, - lapack_complex_float* ab, lapack_int* ldab, float* w, - lapack_complex_float* z, lapack_int* ldz, - lapack_complex_float* work, lapack_int* lwork, float* rwork, lapack_int *info ); -void LAPACK_zhbev_2stage( char* jobz, char* uplo, lapack_int* n, lapack_int* kd, - lapack_complex_double* ab, lapack_int* ldab, double* w, - lapack_complex_double* z, lapack_int* ldz, - lapack_complex_double* work, lapack_int* lwork, double* rwork, - lapack_int *info ); -void LAPACK_ssbevd_2stage( char* jobz, char* uplo, lapack_int* n, lapack_int* kd, - float* ab, lapack_int* ldab, float* w, float* z, - lapack_int* ldz, float* work, lapack_int* lwork, - lapack_int* iwork, lapack_int* liwork, lapack_int *info ); -void LAPACK_dsbevd_2stage( char* jobz, char* uplo, lapack_int* n, lapack_int* kd, - double* ab, lapack_int* ldab, double* w, double* z, - lapack_int* ldz, double* work, lapack_int* lwork, - lapack_int* iwork, lapack_int* liwork, lapack_int *info ); -void LAPACK_chbevd_2stage( char* jobz, char* uplo, lapack_int* n, lapack_int* kd, - lapack_complex_float* ab, lapack_int* ldab, float* w, - lapack_complex_float* z, lapack_int* ldz, - lapack_complex_float* work, lapack_int* lwork, float* rwork, - lapack_int* lrwork, lapack_int* iwork, lapack_int* liwork, - lapack_int *info ); -void LAPACK_zhbevd_2stage( char* jobz, char* uplo, lapack_int* n, lapack_int* kd, - lapack_complex_double* ab, lapack_int* ldab, double* w, - lapack_complex_double* z, lapack_int* ldz, - lapack_complex_double* work, lapack_int* lwork, - double* rwork, lapack_int* lrwork, lapack_int* iwork, - lapack_int* liwork, lapack_int *info ); -void LAPACK_ssbevx_2stage( char* jobz, char* range, char* uplo, lapack_int* n, - lapack_int* kd, float* ab, lapack_int* ldab, float* q, - lapack_int* ldq, float* vl, float* vu, lapack_int* il, - lapack_int* iu, float* abstol, lapack_int* m, float* w, - float* z, lapack_int* ldz, float* work, lapack_int* lwork, lapack_int* iwork, - lapack_int* ifail, lapack_int *info ); -void LAPACK_dsbevx_2stage( char* jobz, char* range, char* uplo, lapack_int* n, - lapack_int* kd, double* ab, lapack_int* ldab, double* q, - lapack_int* ldq, double* vl, double* vu, lapack_int* il, - lapack_int* iu, double* abstol, lapack_int* m, double* w, - double* z, lapack_int* ldz, double* work, lapack_int* lwork, lapack_int* iwork, - lapack_int* ifail, lapack_int *info ); -void LAPACK_chbevx_2stage( char* jobz, char* range, char* uplo, lapack_int* n, - lapack_int* kd, lapack_complex_float* ab, lapack_int* ldab, - lapack_complex_float* q, lapack_int* ldq, float* vl, - float* vu, lapack_int* il, lapack_int* iu, float* abstol, - lapack_int* m, float* w, lapack_complex_float* z, - lapack_int* ldz, lapack_complex_float* work, lapack_int* lwork, float* rwork, - lapack_int* iwork, lapack_int* ifail, lapack_int *info ); -void LAPACK_zhbevx_2stage( char* jobz, char* range, char* uplo, lapack_int* n, - lapack_int* kd, lapack_complex_double* ab, lapack_int* ldab, - lapack_complex_double* q, lapack_int* ldq, double* vl, - double* vu, lapack_int* il, lapack_int* iu, double* abstol, - lapack_int* m, double* w, lapack_complex_double* z, - lapack_int* ldz, lapack_complex_double* work, lapack_int* lwork, double* rwork, - lapack_int* iwork, lapack_int* ifail, lapack_int *info ); -void LAPACK_ssygv_2stage( lapack_int* itype, char* jobz, char* uplo, lapack_int* n, - float* a, lapack_int* lda, float* b, lapack_int* ldb, - float* w, float* work, lapack_int* lwork, lapack_int *info ); -void LAPACK_dsygv_2stage( lapack_int* itype, char* jobz, char* uplo, lapack_int* n, - double* a, lapack_int* lda, double* b, lapack_int* ldb, - double* w, double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_chegv_2stage( lapack_int* itype, char* jobz, char* uplo, lapack_int* n, - lapack_complex_float* a, lapack_int* lda, - lapack_complex_float* b, lapack_int* ldb, float* w, - lapack_complex_float* work, lapack_int* lwork, float* rwork, - lapack_int *info ); -void LAPACK_zhegv_2stage( lapack_int* itype, char* jobz, char* uplo, lapack_int* n, - lapack_complex_double* a, lapack_int* lda, - lapack_complex_double* b, lapack_int* ldb, double* w, - lapack_complex_double* work, lapack_int* lwork, - double* rwork, lapack_int *info ); - -//LAPACK 3.8.0 - -void LAPACK_ssysv_aa_2stage( char* uplo, lapack_int* n, lapack_int* nrhs, - float* a, lapack_int* lda, float* tb, lapack_int* ltb, - lapack_int* ipiv, lapack_int* ipiv2, float* b, lapack_int* ldb, - float* work, lapack_int* lwork, lapack_int *info ); -void LAPACK_dsysv_aa_2stage( char* uplo, lapack_int* n, lapack_int* nrhs, double* a, - lapack_int* lda, double* tb, lapack_int* ltb, - lapack_int* ipiv, lapack_int* ipiv2, double* b, - lapack_int* ldb, double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_csysv_aa_2stage( char* uplo, lapack_int* n, lapack_int* nrhs, - lapack_complex_float* a, lapack_int* lda, - lapack_complex_float* tb, lapack_int* ltb, - lapack_int* ipiv, lapack_int* ipiv2, - lapack_complex_float* b, lapack_int* ldb, - lapack_complex_float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_zsysv_aa_2stage( char* uplo, lapack_int* n, lapack_int* nrhs, - lapack_complex_double* a, lapack_int* lda, - lapack_complex_double* tb, lapack_int* ltb, - lapack_int* ipiv, lapack_int* ipiv2, - lapack_complex_double* b, lapack_int* ldb, - lapack_complex_double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_chesv_aa_2stage( char* uplo, lapack_int* n, lapack_int* nrhs, - lapack_complex_float* a, lapack_int* lda, - lapack_complex_float* tb, lapack_int* ltb, - lapack_int* ipiv, lapack_int* ipiv2, - lapack_complex_float* b, lapack_int* ldb, - lapack_complex_float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_zhesv_aa_2stage( char* uplo, lapack_int* n, lapack_int* nrhs, - lapack_complex_double* a, lapack_int* lda, - lapack_complex_double* tb, lapack_int* ltb, - lapack_int* ipiv, lapack_int* ipiv2, - lapack_complex_double* b, lapack_int* ldb, - lapack_complex_double* work, lapack_int* lwork, - lapack_int *info ); - -void LAPACK_ssytrf_aa_2stage( char* uplo, lapack_int* n, - float* a, lapack_int* lda, float* tb, lapack_int* ltb, - lapack_int* ipiv, lapack_int* ipiv2, - float* work, lapack_int* lwork, lapack_int *info ); -void LAPACK_dsytrf_aa_2stage( char* uplo, lapack_int* n, double* a, - lapack_int* lda, double* tb, lapack_int* ltb, - lapack_int* ipiv, lapack_int* ipiv2, - double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_csytrf_aa_2stage( char* uplo, lapack_int* n, - lapack_complex_float* a, lapack_int* lda, - lapack_complex_float* tb, lapack_int* ltb, - lapack_int* ipiv, lapack_int* ipiv2, - lapack_complex_float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_zsytrf_aa_2stage( char* uplo, lapack_int* n, - lapack_complex_double* a, lapack_int* lda, - lapack_complex_double* tb, lapack_int* ltb, - lapack_int* ipiv, lapack_int* ipiv2, - lapack_complex_double* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_chetrf_aa_2stage( char* uplo, lapack_int* n, - lapack_complex_float* a, lapack_int* lda, - lapack_complex_float* tb, lapack_int* ltb, - lapack_int* ipiv, lapack_int* ipiv2, - lapack_complex_float* work, lapack_int* lwork, - lapack_int *info ); -void LAPACK_zhetrf_aa_2stage( char* uplo, lapack_int* n, - lapack_complex_double* a, lapack_int* lda, - lapack_complex_double* tb, lapack_int* ltb, - lapack_int* ipiv, lapack_int* ipiv2, - lapack_complex_double* work, lapack_int* lwork, - lapack_int *info ); - -void LAPACK_ssytrs_aa_2stage( char* uplo, lapack_int* n, lapack_int* nrhs, - float* a, lapack_int* lda, float* tb, lapack_int* ltb, - lapack_int* ipiv, lapack_int* ipiv2, float* b, lapack_int* ldb, - lapack_int *info ); -void LAPACK_dsytrs_aa_2stage( char* uplo, lapack_int* n, lapack_int* nrhs, double* a, - lapack_int* lda, double* tb, lapack_int* ltb, - lapack_int* ipiv, lapack_int* ipiv2, double* b, - lapack_int* ldb, lapack_int *info ); -void LAPACK_csytrs_aa_2stage( char* uplo, lapack_int* n, lapack_int* nrhs, - lapack_complex_float* a, lapack_int* lda, - lapack_complex_float* tb, lapack_int* ltb, - lapack_int* ipiv, lapack_int* ipiv2, - lapack_complex_float* b, lapack_int* ldb, - lapack_int *info ); -void LAPACK_zsytrs_aa_2stage( char* uplo, lapack_int* n, lapack_int* nrhs, - lapack_complex_double* a, lapack_int* lda, - lapack_complex_double* tb, lapack_int* ltb, - lapack_int* ipiv, lapack_int* ipiv2, - lapack_complex_double* b, lapack_int* ldb, - lapack_int *info ); -void LAPACK_chetrs_aa_2stage( char* uplo, lapack_int* n, lapack_int* nrhs, - lapack_complex_float* a, lapack_int* lda, - lapack_complex_float* tb, lapack_int* ltb, - lapack_int* ipiv, lapack_int* ipiv2, - lapack_complex_float* b, lapack_int* ldb, - lapack_int *info ); -void LAPACK_zhetrs_aa_2stage( char* uplo, lapack_int* n, lapack_int* nrhs, - lapack_complex_double* a, lapack_int* lda, - lapack_complex_double* tb, lapack_int* ltb, - lapack_int* ipiv, lapack_int* ipiv2, - lapack_complex_double* b, lapack_int* ldb, - lapack_int *info ); /* APIs for set/get nancheck flags */ void LAPACKE_set_nancheck( int flag ); -int LAPACKE_get_nancheck( ); +int LAPACKE_get_nancheck( void ); #ifdef __cplusplus } diff --git a/lapack-netlib/LAPACKE/src/CMakeLists.txt b/lapack-netlib/LAPACKE/src/CMakeLists.txt index 26e52acfa..4c13dce0b 100644 --- a/lapack-netlib/LAPACKE/src/CMakeLists.txt +++ b/lapack-netlib/LAPACKE/src/CMakeLists.txt @@ -1,4 +1,4 @@ -set(SOURCES +set(SOURCES_COMPLEX lapacke_cbbcsd.c lapacke_cbbcsd_work.c lapacke_cbdsqr.c @@ -78,11 +78,11 @@ lapacke_cgeqrf_work.c lapacke_cgeqrfp.c lapacke_cgeqrfp_work.c lapacke_cgeqrt.c +lapacke_cgeqrt_work.c lapacke_cgeqrt2.c lapacke_cgeqrt2_work.c lapacke_cgeqrt3.c lapacke_cgeqrt3_work.c -lapacke_cgeqrt_work.c lapacke_cgerfs.c lapacke_cgerfs_work.c lapacke_cgerqf.c @@ -93,6 +93,8 @@ lapacke_cgesv.c lapacke_cgesv_work.c lapacke_cgesvd.c lapacke_cgesvd_work.c +lapacke_cgesvdq.c +lapacke_cgesvdq_work.c lapacke_cgesvdx.c lapacke_cgesvdx_work.c lapacke_cgesvj.c @@ -129,10 +131,10 @@ lapacke_cggevx.c lapacke_cggevx_work.c lapacke_cggglm.c lapacke_cggglm_work.c -lapacke_cgghrd.c -lapacke_cgghrd_work.c lapacke_cgghd3.c lapacke_cgghd3_work.c +lapacke_cgghrd.c +lapacke_cgghrd_work.c lapacke_cgglse.c lapacke_cgglse_work.c lapacke_cggqrf.c @@ -157,14 +159,14 @@ lapacke_cgttrs.c lapacke_cgttrs_work.c lapacke_chbev.c lapacke_chbev_work.c -lapacke_chbevd.c -lapacke_chbevd_work.c -lapacke_chbevx.c -lapacke_chbevx_work.c lapacke_chbev_2stage.c lapacke_chbev_2stage_work.c +lapacke_chbevd.c +lapacke_chbevd_work.c lapacke_chbevd_2stage.c lapacke_chbevd_2stage_work.c +lapacke_chbevx.c +lapacke_chbevx_work.c lapacke_chbevx_2stage.c lapacke_chbevx_2stage_work.c lapacke_chbgst.c @@ -185,18 +187,18 @@ lapacke_cheequb.c lapacke_cheequb_work.c lapacke_cheev.c lapacke_cheev_work.c -lapacke_cheevd.c -lapacke_cheevd_work.c -lapacke_cheevr.c -lapacke_cheevr_work.c -lapacke_cheevx.c -lapacke_cheevx_work.c lapacke_cheev_2stage.c lapacke_cheev_2stage_work.c +lapacke_cheevd.c +lapacke_cheevd_work.c lapacke_cheevd_2stage.c lapacke_cheevd_2stage_work.c +lapacke_cheevr.c +lapacke_cheevr_work.c lapacke_cheevr_2stage.c lapacke_cheevr_2stage_work.c +lapacke_cheevx.c +lapacke_cheevx_work.c lapacke_cheevx_2stage.c lapacke_cheevx_2stage_work.c lapacke_chegst.c @@ -214,8 +216,8 @@ lapacke_cherfs_work.c lapacke_chesv.c lapacke_chesv_work.c lapacke_chesv_aa.c -lapacke_chesv_aa_2stage.c lapacke_chesv_aa_work.c +lapacke_chesv_aa_2stage.c lapacke_chesv_aa_2stage_work.c lapacke_chesv_rk.c lapacke_chesv_rk_work.c @@ -226,35 +228,35 @@ lapacke_cheswapr_work.c lapacke_chetrd.c lapacke_chetrd_work.c lapacke_chetrf.c -lapacke_chetrf_rook.c lapacke_chetrf_work.c -lapacke_chetrf_rook_work.c lapacke_chetrf_aa.c -lapacke_chetrf_aa_2stage.c lapacke_chetrf_aa_work.c +lapacke_chetrf_aa_2stage.c lapacke_chetrf_aa_2stage_work.c lapacke_chetrf_rk.c lapacke_chetrf_rk_work.c +lapacke_chetrf_rook.c +lapacke_chetrf_rook_work.c lapacke_chetri.c +lapacke_chetri_work.c lapacke_chetri2.c lapacke_chetri2_work.c -lapacke_chetri_3.c -lapacke_chetri_3_work.c lapacke_chetri2x.c lapacke_chetri2x_work.c -lapacke_chetri_work.c +lapacke_chetri_3.c +lapacke_chetri_3_work.c lapacke_chetrs.c -lapacke_chetrs_rook.c +lapacke_chetrs_work.c lapacke_chetrs2.c lapacke_chetrs2_work.c -lapacke_chetrs_work.c -lapacke_chetrs_rook_work.c +lapacke_chetrs_3.c +lapacke_chetrs_3_work.c lapacke_chetrs_aa.c -lapacke_chetrs_aa_2stage.c lapacke_chetrs_aa_work.c +lapacke_chetrs_aa_2stage.c lapacke_chetrs_aa_2stage_work.c -lapacke_chetrs_3.c -lapacke_chetrs_3_work.c +lapacke_chetrs_rook.c +lapacke_chetrs_rook_work.c lapacke_chfrk.c lapacke_chfrk_work.c lapacke_chgeqz.c @@ -445,52 +447,54 @@ lapacke_csyconv.c lapacke_csyconv_work.c lapacke_csyequb.c lapacke_csyequb_work.c +lapacke_csyr.c +lapacke_csyr_work.c lapacke_csyrfs.c lapacke_csyrfs_work.c lapacke_csysv.c -lapacke_csysv_rook.c -lapacke_csysv_rook_work.c lapacke_csysv_work.c lapacke_csysv_aa.c -lapacke_csysv_aa_2stage.c lapacke_csysv_aa_work.c +lapacke_csysv_aa_2stage.c lapacke_csysv_aa_2stage_work.c lapacke_csysv_rk.c lapacke_csysv_rk_work.c +lapacke_csysv_rook.c +lapacke_csysv_rook_work.c lapacke_csysvx.c lapacke_csysvx_work.c lapacke_csyswapr.c lapacke_csyswapr_work.c lapacke_csytrf.c lapacke_csytrf_work.c -lapacke_csytrf_rook.c -lapacke_csytrf_rook_work.c lapacke_csytrf_aa.c -lapacke_csytrf_aa_2stage.c lapacke_csytrf_aa_work.c +lapacke_csytrf_aa_2stage.c lapacke_csytrf_aa_2stage_work.c lapacke_csytrf_rk.c lapacke_csytrf_rk_work.c +lapacke_csytrf_rook.c +lapacke_csytrf_rook_work.c lapacke_csytri.c +lapacke_csytri_work.c lapacke_csytri2.c lapacke_csytri2_work.c -lapacke_csytri_3.c -lapacke_csytri_3_work.c lapacke_csytri2x.c lapacke_csytri2x_work.c -lapacke_csytri_work.c +lapacke_csytri_3.c +lapacke_csytri_3_work.c lapacke_csytrs.c -lapacke_csytrs_rook.c +lapacke_csytrs_work.c lapacke_csytrs2.c lapacke_csytrs2_work.c -lapacke_csytrs_work.c -lapacke_csytrs_rook_work.c +lapacke_csytrs_3.c +lapacke_csytrs_3_work.c lapacke_csytrs_aa.c -lapacke_csytrs_aa_2stage.c lapacke_csytrs_aa_work.c +lapacke_csytrs_aa_2stage.c lapacke_csytrs_aa_2stage_work.c -lapacke_csytrs_3.c -lapacke_csytrs_3_work.c +lapacke_csytrs_rook.c +lapacke_csytrs_rook_work.c lapacke_ctbcon.c lapacke_ctbcon_work.c lapacke_ctbrfs.c @@ -522,9 +526,9 @@ lapacke_ctpcon_work.c lapacke_ctpmqrt.c lapacke_ctpmqrt_work.c lapacke_ctpqrt.c +lapacke_ctpqrt_work.c lapacke_ctpqrt2.c lapacke_ctpqrt2_work.c -lapacke_ctpqrt_work.c lapacke_ctprfb.c lapacke_ctprfb_work.c lapacke_ctprfs.c @@ -601,14 +605,16 @@ lapacke_cupgtr.c lapacke_cupgtr_work.c lapacke_cupmtr.c lapacke_cupmtr_work.c +) +set(SOURCES_DOUBLE lapacke_dbbcsd.c lapacke_dbbcsd_work.c lapacke_dbdsdc.c lapacke_dbdsdc_work.c -lapacke_dbdsvdx.c -lapacke_dbdsvdx_work.c lapacke_dbdsqr.c lapacke_dbdsqr_work.c +lapacke_dbdsvdx.c +lapacke_dbdsvdx_work.c lapacke_ddisna.c lapacke_ddisna_work.c lapacke_dgbbrd.c @@ -686,11 +692,11 @@ lapacke_dgeqrf_work.c lapacke_dgeqrfp.c lapacke_dgeqrfp_work.c lapacke_dgeqrt.c +lapacke_dgeqrt_work.c lapacke_dgeqrt2.c lapacke_dgeqrt2_work.c lapacke_dgeqrt3.c lapacke_dgeqrt3_work.c -lapacke_dgeqrt_work.c lapacke_dgerfs.c lapacke_dgerfs_work.c lapacke_dgerqf.c @@ -701,6 +707,8 @@ lapacke_dgesv.c lapacke_dgesv_work.c lapacke_dgesvd.c lapacke_dgesvd_work.c +lapacke_dgesvdq.c +lapacke_dgesvdq_work.c lapacke_dgesvdx.c lapacke_dgesvdx_work.c lapacke_dgesvj.c @@ -737,10 +745,10 @@ lapacke_dggevx.c lapacke_dggevx_work.c lapacke_dggglm.c lapacke_dggglm_work.c -lapacke_dgghrd.c -lapacke_dgghrd_work.c lapacke_dgghd3.c lapacke_dgghd3_work.c +lapacke_dgghrd.c +lapacke_dgghrd_work.c lapacke_dgglse.c lapacke_dgglse_work.c lapacke_dggqrf.c @@ -823,10 +831,10 @@ lapacke_dopmtr.c lapacke_dopmtr_work.c lapacke_dorbdb.c lapacke_dorbdb_work.c -lapacke_dorcsd2by1.c -lapacke_dorcsd2by1_work.c lapacke_dorcsd.c lapacke_dorcsd_work.c +lapacke_dorcsd2by1.c +lapacke_dorcsd2by1_work.c lapacke_dorgbr.c lapacke_dorgbr_work.c lapacke_dorghr.c @@ -933,14 +941,14 @@ lapacke_dpttrs.c lapacke_dpttrs_work.c lapacke_dsbev.c lapacke_dsbev_work.c -lapacke_dsbevd.c -lapacke_dsbevd_work.c -lapacke_dsbevx.c -lapacke_dsbevx_work.c lapacke_dsbev_2stage.c lapacke_dsbev_2stage_work.c +lapacke_dsbevd.c +lapacke_dsbevd_work.c lapacke_dsbevd_2stage.c lapacke_dsbevd_2stage_work.c +lapacke_dsbevx.c +lapacke_dsbevx_work.c lapacke_dsbevx_2stage.c lapacke_dsbevx_2stage_work.c lapacke_dsbgst.c @@ -1021,18 +1029,18 @@ lapacke_dsyequb.c lapacke_dsyequb_work.c lapacke_dsyev.c lapacke_dsyev_work.c -lapacke_dsyevd.c -lapacke_dsyevd_work.c -lapacke_dsyevr.c -lapacke_dsyevr_work.c -lapacke_dsyevx.c -lapacke_dsyevx_work.c lapacke_dsyev_2stage.c lapacke_dsyev_2stage_work.c +lapacke_dsyevd.c +lapacke_dsyevd_work.c lapacke_dsyevd_2stage.c lapacke_dsyevd_2stage_work.c +lapacke_dsyevr.c +lapacke_dsyevr_work.c lapacke_dsyevr_2stage.c lapacke_dsyevr_2stage_work.c +lapacke_dsyevx.c +lapacke_dsyevx_work.c lapacke_dsyevx_2stage.c lapacke_dsyevx_2stage_work.c lapacke_dsygst.c @@ -1048,15 +1056,15 @@ lapacke_dsygvx_work.c lapacke_dsyrfs.c lapacke_dsyrfs_work.c lapacke_dsysv.c -lapacke_dsysv_rook.c -lapacke_dsysv_rook_work.c lapacke_dsysv_work.c lapacke_dsysv_aa.c -lapacke_dsysv_aa_2stage.c lapacke_dsysv_aa_work.c +lapacke_dsysv_aa_2stage.c lapacke_dsysv_aa_2stage_work.c lapacke_dsysv_rk.c lapacke_dsysv_rk_work.c +lapacke_dsysv_rook.c +lapacke_dsysv_rook_work.c lapacke_dsysvx.c lapacke_dsysvx_work.c lapacke_dsyswapr.c @@ -1065,33 +1073,33 @@ lapacke_dsytrd.c lapacke_dsytrd_work.c lapacke_dsytrf.c lapacke_dsytrf_work.c -lapacke_dsytrf_rook.c -lapacke_dsytrf_rook_work.c lapacke_dsytrf_aa.c -lapacke_dsytrf_aa_2stage.c lapacke_dsytrf_aa_work.c +lapacke_dsytrf_aa_2stage.c lapacke_dsytrf_aa_2stage_work.c lapacke_dsytrf_rk.c lapacke_dsytrf_rk_work.c +lapacke_dsytrf_rook.c +lapacke_dsytrf_rook_work.c lapacke_dsytri.c +lapacke_dsytri_work.c lapacke_dsytri2.c lapacke_dsytri2_work.c -lapacke_dsytri_3.c -lapacke_dsytri_3_work.c lapacke_dsytri2x.c lapacke_dsytri2x_work.c -lapacke_dsytri_work.c +lapacke_dsytri_3.c +lapacke_dsytri_3_work.c lapacke_dsytrs.c -lapacke_dsytrs_rook.c +lapacke_dsytrs_work.c lapacke_dsytrs2.c lapacke_dsytrs2_work.c +lapacke_dsytrs_3.c +lapacke_dsytrs_3_work.c lapacke_dsytrs_aa.c -lapacke_dsytrs_aa_2stage.c lapacke_dsytrs_aa_work.c +lapacke_dsytrs_aa_2stage.c lapacke_dsytrs_aa_2stage_work.c -lapacke_dsytrs_3.c -lapacke_dsytrs_3_work.c -lapacke_dsytrs_work.c +lapacke_dsytrs_rook.c lapacke_dsytrs_rook_work.c lapacke_dtbcon.c lapacke_dtbcon_work.c @@ -1124,9 +1132,9 @@ lapacke_dtpcon_work.c lapacke_dtpmqrt.c lapacke_dtpmqrt_work.c lapacke_dtpqrt.c +lapacke_dtpqrt_work.c lapacke_dtpqrt2.c lapacke_dtpqrt2_work.c -lapacke_dtpqrt_work.c lapacke_dtprfb.c lapacke_dtprfb_work.c lapacke_dtprfs.c @@ -1163,15 +1171,21 @@ lapacke_dtrttp.c lapacke_dtrttp_work.c lapacke_dtzrzf.c lapacke_dtzrzf_work.c +) + +set(SOURCES lapacke_nancheck.c +lapacke_ilaver.c +) +set(SOURCES_SINGLE lapacke_sbbcsd.c lapacke_sbbcsd_work.c lapacke_sbdsdc.c lapacke_sbdsdc_work.c -lapacke_sbdsvdx.c -lapacke_sbdsvdx_work.c lapacke_sbdsqr.c lapacke_sbdsqr_work.c +lapacke_sbdsvdx.c +lapacke_sbdsvdx_work.c lapacke_sdisna.c lapacke_sdisna_work.c lapacke_sgbbrd.c @@ -1249,11 +1263,11 @@ lapacke_sgeqrf_work.c lapacke_sgeqrfp.c lapacke_sgeqrfp_work.c lapacke_sgeqrt.c +lapacke_sgeqrt_work.c lapacke_sgeqrt2.c lapacke_sgeqrt2_work.c lapacke_sgeqrt3.c lapacke_sgeqrt3_work.c -lapacke_sgeqrt_work.c lapacke_sgerfs.c lapacke_sgerfs_work.c lapacke_sgerqf.c @@ -1264,6 +1278,8 @@ lapacke_sgesv.c lapacke_sgesv_work.c lapacke_sgesvd.c lapacke_sgesvd_work.c +lapacke_sgesvdq.c +lapacke_sgesvdq_work.c lapacke_sgesvdx.c lapacke_sgesvdx_work.c lapacke_sgesvj.c @@ -1300,10 +1316,10 @@ lapacke_sggevx.c lapacke_sggevx_work.c lapacke_sggglm.c lapacke_sggglm_work.c -lapacke_sgghrd.c -lapacke_sgghrd_work.c lapacke_sgghd3.c lapacke_sgghd3_work.c +lapacke_sgghrd.c +lapacke_sgghrd_work.c lapacke_sgglse.c lapacke_sgglse_work.c lapacke_sggqrf.c @@ -1496,14 +1512,14 @@ lapacke_spttrs.c lapacke_spttrs_work.c lapacke_ssbev.c lapacke_ssbev_work.c -lapacke_ssbevd.c -lapacke_ssbevd_work.c -lapacke_ssbevx.c -lapacke_ssbevx_work.c lapacke_ssbev_2stage.c lapacke_ssbev_2stage_work.c +lapacke_ssbevd.c +lapacke_ssbevd_work.c lapacke_ssbevd_2stage.c lapacke_ssbevd_2stage_work.c +lapacke_ssbevx.c +lapacke_ssbevx_work.c lapacke_ssbevx_2stage.c lapacke_ssbevx_2stage_work.c lapacke_ssbgst.c @@ -1580,18 +1596,18 @@ lapacke_ssyequb.c lapacke_ssyequb_work.c lapacke_ssyev.c lapacke_ssyev_work.c -lapacke_ssyevd.c -lapacke_ssyevd_work.c -lapacke_ssyevr.c -lapacke_ssyevr_work.c -lapacke_ssyevx.c -lapacke_ssyevx_work.c lapacke_ssyev_2stage.c lapacke_ssyev_2stage_work.c +lapacke_ssyevd.c +lapacke_ssyevd_work.c lapacke_ssyevd_2stage.c lapacke_ssyevd_2stage_work.c +lapacke_ssyevr.c +lapacke_ssyevr_work.c lapacke_ssyevr_2stage.c lapacke_ssyevr_2stage_work.c +lapacke_ssyevx.c +lapacke_ssyevx_work.c lapacke_ssyevx_2stage.c lapacke_ssyevx_2stage_work.c lapacke_ssygst.c @@ -1607,8 +1623,6 @@ lapacke_ssygvx_work.c lapacke_ssyrfs.c lapacke_ssyrfs_work.c lapacke_ssysv.c -lapacke_ssysv_rook.c -lapacke_ssysv_rook_work.c lapacke_ssysv_work.c lapacke_ssysv_aa.c lapacke_ssysv_aa_work.c @@ -1616,6 +1630,8 @@ lapacke_ssysv_aa_2stage.c lapacke_ssysv_aa_2stage_work.c lapacke_ssysv_rk.c lapacke_ssysv_rk_work.c +lapacke_ssysv_rook.c +lapacke_ssysv_rook_work.c lapacke_ssysvx.c lapacke_ssysvx_work.c lapacke_ssyswapr.c @@ -1624,33 +1640,33 @@ lapacke_ssytrd.c lapacke_ssytrd_work.c lapacke_ssytrf.c lapacke_ssytrf_work.c -lapacke_ssytrf_rook.c -lapacke_ssytrf_rook_work.c lapacke_ssytrf_aa.c -lapacke_ssytrf_aa_2stage.c lapacke_ssytrf_aa_work.c +lapacke_ssytrf_aa_2stage.c lapacke_ssytrf_aa_2stage_work.c lapacke_ssytrf_rk.c lapacke_ssytrf_rk_work.c +lapacke_ssytrf_rook.c +lapacke_ssytrf_rook_work.c lapacke_ssytri.c +lapacke_ssytri_work.c lapacke_ssytri2.c lapacke_ssytri2_work.c -lapacke_ssytri_3.c -lapacke_ssytri_3_work.c lapacke_ssytri2x.c lapacke_ssytri2x_work.c -lapacke_ssytri_work.c +lapacke_ssytri_3.c +lapacke_ssytri_3_work.c lapacke_ssytrs.c -lapacke_ssytrs_rook.c +lapacke_ssytrs_work.c lapacke_ssytrs2.c lapacke_ssytrs2_work.c +lapacke_ssytrs_3.c +lapacke_ssytrs_3_work.c lapacke_ssytrs_aa.c -lapacke_ssytrs_aa_2stage.c lapacke_ssytrs_aa_work.c +lapacke_ssytrs_aa_2stage.c lapacke_ssytrs_aa_2stage_work.c -lapacke_ssytrs_3.c -lapacke_ssytrs_3_work.c -lapacke_ssytrs_work.c +lapacke_ssytrs_rook.c lapacke_ssytrs_rook_work.c lapacke_stbcon.c lapacke_stbcon_work.c @@ -1722,6 +1738,8 @@ lapacke_strttp.c lapacke_strttp_work.c lapacke_stzrzf.c lapacke_stzrzf_work.c +) +set(SOURCES_COMPLEX16 lapacke_zbbcsd.c lapacke_zbbcsd_work.c lapacke_zbdsqr.c @@ -1805,11 +1823,11 @@ lapacke_zgeqrf_work.c lapacke_zgeqrfp.c lapacke_zgeqrfp_work.c lapacke_zgeqrt.c +lapacke_zgeqrt_work.c lapacke_zgeqrt2.c lapacke_zgeqrt2_work.c lapacke_zgeqrt3.c lapacke_zgeqrt3_work.c -lapacke_zgeqrt_work.c lapacke_zgerfs.c lapacke_zgerfs_work.c lapacke_zgerqf.c @@ -1820,6 +1838,8 @@ lapacke_zgesv.c lapacke_zgesv_work.c lapacke_zgesvd.c lapacke_zgesvd_work.c +lapacke_zgesvdq.c +lapacke_zgesvdq_work.c lapacke_zgesvdx.c lapacke_zgesvdx_work.c lapacke_zgesvj.c @@ -1856,10 +1876,10 @@ lapacke_zggevx.c lapacke_zggevx_work.c lapacke_zggglm.c lapacke_zggglm_work.c -lapacke_zgghrd.c -lapacke_zgghrd_work.c lapacke_zgghd3.c lapacke_zgghd3_work.c +lapacke_zgghrd.c +lapacke_zgghrd_work.c lapacke_zgglse.c lapacke_zgglse_work.c lapacke_zggqrf.c @@ -1884,14 +1904,14 @@ lapacke_zgttrs.c lapacke_zgttrs_work.c lapacke_zhbev.c lapacke_zhbev_work.c -lapacke_zhbevd.c -lapacke_zhbevd_work.c -lapacke_zhbevx.c -lapacke_zhbevx_work.c lapacke_zhbev_2stage.c lapacke_zhbev_2stage_work.c +lapacke_zhbevd.c +lapacke_zhbevd_work.c lapacke_zhbevd_2stage.c lapacke_zhbevd_2stage_work.c +lapacke_zhbevx.c +lapacke_zhbevx_work.c lapacke_zhbevx_2stage.c lapacke_zhbevx_2stage_work.c lapacke_zhbgst.c @@ -1912,18 +1932,18 @@ lapacke_zheequb.c lapacke_zheequb_work.c lapacke_zheev.c lapacke_zheev_work.c -lapacke_zheevd.c -lapacke_zheevd_work.c -lapacke_zheevr.c -lapacke_zheevr_work.c -lapacke_zheevx.c -lapacke_zheevx_work.c lapacke_zheev_2stage.c lapacke_zheev_2stage_work.c +lapacke_zheevd.c +lapacke_zheevd_work.c lapacke_zheevd_2stage.c lapacke_zheevd_2stage_work.c +lapacke_zheevr.c +lapacke_zheevr_work.c lapacke_zheevr_2stage.c lapacke_zheevr_2stage_work.c +lapacke_zheevx.c +lapacke_zheevx_work.c lapacke_zheevx_2stage.c lapacke_zheevx_2stage_work.c lapacke_zhegst.c @@ -1941,8 +1961,8 @@ lapacke_zherfs_work.c lapacke_zhesv.c lapacke_zhesv_work.c lapacke_zhesv_aa.c -lapacke_zhesv_aa_2stage.c lapacke_zhesv_aa_work.c +lapacke_zhesv_aa_2stage.c lapacke_zhesv_aa_2stage_work.c lapacke_zhesv_rk.c lapacke_zhesv_rk_work.c @@ -1953,34 +1973,34 @@ lapacke_zheswapr_work.c lapacke_zhetrd.c lapacke_zhetrd_work.c lapacke_zhetrf.c -lapacke_zhetrf_rook.c lapacke_zhetrf_work.c -lapacke_zhetrf_rook_work.c lapacke_zhetrf_aa.c -lapacke_zhetrf_aa_2stage.c lapacke_zhetrf_aa_work.c +lapacke_zhetrf_aa_2stage.c lapacke_zhetrf_aa_2stage_work.c lapacke_zhetrf_rk.c lapacke_zhetrf_rk_work.c +lapacke_zhetrf_rook.c +lapacke_zhetrf_rook_work.c lapacke_zhetri.c +lapacke_zhetri_work.c lapacke_zhetri2.c lapacke_zhetri2_work.c -lapacke_zhetri_3.c -lapacke_zhetri_3_work.c lapacke_zhetri2x.c lapacke_zhetri2x_work.c -lapacke_zhetri_work.c +lapacke_zhetri_3.c +lapacke_zhetri_3_work.c lapacke_zhetrs.c -lapacke_zhetrs_rook.c +lapacke_zhetrs_work.c lapacke_zhetrs2.c lapacke_zhetrs2_work.c -lapacke_zhetrs_work.c +lapacke_zhetrs_3.c +lapacke_zhetrs_3_work.c lapacke_zhetrs_aa.c -lapacke_zhetrs_aa_2stage.c lapacke_zhetrs_aa_work.c +lapacke_zhetrs_aa_2stage.c lapacke_zhetrs_aa_2stage_work.c -lapacke_zhetrs_3.c -lapacke_zhetrs_3_work.c +lapacke_zhetrs_rook.c lapacke_zhetrs_rook_work.c lapacke_zhfrk.c lapacke_zhfrk_work.c @@ -2172,52 +2192,54 @@ lapacke_zsyconv.c lapacke_zsyconv_work.c lapacke_zsyequb.c lapacke_zsyequb_work.c +lapacke_zsyr.c +lapacke_zsyr_work.c lapacke_zsyrfs.c lapacke_zsyrfs_work.c lapacke_zsysv.c -lapacke_zsysv_rook.c -lapacke_zsysv_rook_work.c lapacke_zsysv_work.c lapacke_zsysv_aa.c -lapacke_zsysv_aa_2stage.c lapacke_zsysv_aa_work.c +lapacke_zsysv_aa_2stage.c lapacke_zsysv_aa_2stage_work.c lapacke_zsysv_rk.c lapacke_zsysv_rk_work.c +lapacke_zsysv_rook.c +lapacke_zsysv_rook_work.c lapacke_zsysvx.c lapacke_zsysvx_work.c lapacke_zsyswapr.c lapacke_zsyswapr_work.c lapacke_zsytrf.c lapacke_zsytrf_work.c -lapacke_zsytrf_rook.c -lapacke_zsytrf_rook_work.c lapacke_zsytrf_aa.c -lapacke_zsytrf_aa_2stage.c lapacke_zsytrf_aa_work.c +lapacke_zsytrf_aa_2stage.c lapacke_zsytrf_aa_2stage_work.c lapacke_zsytrf_rk.c lapacke_zsytrf_rk_work.c +lapacke_zsytrf_rook.c +lapacke_zsytrf_rook_work.c lapacke_zsytri.c +lapacke_zsytri_work.c lapacke_zsytri2.c lapacke_zsytri2_work.c -lapacke_zsytri_3.c -lapacke_zsytri_3_work.c lapacke_zsytri2x.c lapacke_zsytri2x_work.c -lapacke_zsytri_work.c +lapacke_zsytri_3.c +lapacke_zsytri_3_work.c lapacke_zsytrs.c -lapacke_zsytrs_rook.c +lapacke_zsytrs_work.c lapacke_zsytrs2.c lapacke_zsytrs2_work.c -lapacke_zsytrs_work.c -lapacke_zsytrs_rook_work.c +lapacke_zsytrs_3.c +lapacke_zsytrs_3_work.c lapacke_zsytrs_aa.c -lapacke_zsytrs_aa_2stage.c lapacke_zsytrs_aa_work.c +lapacke_zsytrs_aa_2stage.c lapacke_zsytrs_aa_2stage_work.c -lapacke_zsytrs_3.c -lapacke_zsytrs_3_work.c +lapacke_zsytrs_rook.c +lapacke_zsytrs_rook_work.c lapacke_ztbcon.c lapacke_ztbcon_work.c lapacke_ztbrfs.c @@ -2249,9 +2271,9 @@ lapacke_ztpcon_work.c lapacke_ztpmqrt.c lapacke_ztpmqrt_work.c lapacke_ztpqrt.c +lapacke_ztpqrt_work.c lapacke_ztpqrt2.c lapacke_ztpqrt2_work.c -lapacke_ztpqrt_work.c lapacke_ztprfb.c lapacke_ztprfb_work.c lapacke_ztprfs.c @@ -2328,11 +2350,6 @@ lapacke_zupgtr.c lapacke_zupgtr_work.c lapacke_zupmtr.c lapacke_zupmtr_work.c -lapacke_zsyr.c -lapacke_csyr.c -lapacke_zsyr_work.c -lapacke_csyr_work.c -lapacke_ilaver.c ) set(DEPRECATED diff --git a/lapack-netlib/LAPACKE/src/Makefile b/lapack-netlib/LAPACKE/src/Makefile index 7672f9f73..a602dd7a0 100644 --- a/lapack-netlib/LAPACKE/src/Makefile +++ b/lapack-netlib/LAPACKE/src/Makefile @@ -32,12 +32,22 @@ ############################################################################## # makefile for LAPACKE, used to build lapacke binary. # -# Note: we use multiple OBJ_A, OBJ_B, etc, instead of a single OBJ +# Note: we use multiple OBJ_S, OBJ_C, etc, instead of a single OBJ # to allow build with mingw (argument list too long for the msys ar) # -include ../../make.inc +TOPSRCDIR = ../.. +include $(TOPSRCDIR)/make.inc -OBJ_A = \ +.SUFFIXES: .c .o +.c.o: + $(CC) $(CFLAGS) -I../include -c -o $@ $< + +OBJ = \ +lapacke_ilaver.o \ +lapacke_nancheck.o + +ifeq ($(BUILD_COMPLEX),1) +OBJ_C = \ lapacke_cbbcsd.o \ lapacke_cbbcsd_work.o \ lapacke_cbdsqr.o \ @@ -82,12 +92,12 @@ lapacke_cgeevx.o \ lapacke_cgeevx_work.o \ lapacke_cgehrd.o \ lapacke_cgehrd_work.o \ +lapacke_cgejsv.o \ +lapacke_cgejsv_work.o \ lapacke_cgelq.o \ lapacke_cgelq_work.o \ lapacke_cgelq2.o \ lapacke_cgelq2_work.o \ -lapacke_cgejsv.o \ -lapacke_cgejsv_work.o \ lapacke_cgelqf.o \ lapacke_cgelqf_work.o \ lapacke_cgels.o \ @@ -117,11 +127,11 @@ lapacke_cgeqrf_work.o \ lapacke_cgeqrfp.o \ lapacke_cgeqrfp_work.o \ lapacke_cgeqrt.o \ +lapacke_cgeqrt_work.o \ lapacke_cgeqrt2.o \ lapacke_cgeqrt2_work.o \ lapacke_cgeqrt3.o \ lapacke_cgeqrt3_work.o \ -lapacke_cgeqrt_work.o \ lapacke_cgerfs.o \ lapacke_cgerfs_work.o \ lapacke_cgerqf.o \ @@ -132,6 +142,8 @@ lapacke_cgesv.o \ lapacke_cgesv_work.o \ lapacke_cgesvd.o \ lapacke_cgesvd_work.o \ +lapacke_cgesvdq.o \ +lapacke_cgesvdq_work.o \ lapacke_cgesvdx.o \ lapacke_cgesvdx_work.o \ lapacke_cgesvj.o \ @@ -168,10 +180,10 @@ lapacke_cggevx.o \ lapacke_cggevx_work.o \ lapacke_cggglm.o \ lapacke_cggglm_work.o \ -lapacke_cgghrd.o \ -lapacke_cgghrd_work.o \ lapacke_cgghd3.o \ lapacke_cgghd3_work.o \ +lapacke_cgghrd.o \ +lapacke_cgghrd_work.o \ lapacke_cgglse.o \ lapacke_cgglse_work.o \ lapacke_cggqrf.o \ @@ -196,14 +208,14 @@ lapacke_cgttrs.o \ lapacke_cgttrs_work.o \ lapacke_chbev.o \ lapacke_chbev_work.o \ -lapacke_chbevd.o \ -lapacke_chbevd_work.o \ -lapacke_chbevx.o \ -lapacke_chbevx_work.o \ lapacke_chbev_2stage.o \ lapacke_chbev_2stage_work.o \ +lapacke_chbevd.o \ +lapacke_chbevd_work.o \ lapacke_chbevd_2stage.o \ lapacke_chbevd_2stage_work.o \ +lapacke_chbevx.o \ +lapacke_chbevx_work.o \ lapacke_chbevx_2stage.o \ lapacke_chbevx_2stage_work.o \ lapacke_chbgst.o \ @@ -224,18 +236,18 @@ lapacke_cheequb.o \ lapacke_cheequb_work.o \ lapacke_cheev.o \ lapacke_cheev_work.o \ -lapacke_cheevd.o \ -lapacke_cheevd_work.o \ -lapacke_cheevr.o \ -lapacke_cheevr_work.o \ -lapacke_cheevx.o \ -lapacke_cheevx_work.o \ lapacke_cheev_2stage.o \ lapacke_cheev_2stage_work.o \ +lapacke_cheevd.o \ +lapacke_cheevd_work.o \ lapacke_cheevd_2stage.o \ lapacke_cheevd_2stage_work.o \ +lapacke_cheevr.o \ +lapacke_cheevr_work.o \ lapacke_cheevr_2stage.o \ lapacke_cheevr_2stage_work.o \ +lapacke_cheevx.o \ +lapacke_cheevx_work.o \ lapacke_cheevx_2stage.o \ lapacke_cheevx_2stage_work.o \ lapacke_chegst.o \ @@ -265,35 +277,35 @@ lapacke_cheswapr_work.o \ lapacke_chetrd.o \ lapacke_chetrd_work.o \ lapacke_chetrf.o \ -lapacke_chetrf_rook.o \ lapacke_chetrf_work.o \ -lapacke_chetrf_rook_work.o \ lapacke_chetrf_aa.o \ -lapacke_chetrf_aa_2stage.o \ lapacke_chetrf_aa_work.o \ +lapacke_chetrf_aa_2stage.o \ lapacke_chetrf_aa_2stage_work.o \ lapacke_chetrf_rk.o \ lapacke_chetrf_rk_work.o \ +lapacke_chetrf_rook.o \ +lapacke_chetrf_rook_work.o \ lapacke_chetri.o \ +lapacke_chetri_work.o \ lapacke_chetri2.o \ lapacke_chetri2_work.o \ -lapacke_chetri_3.o \ -lapacke_chetri_3_work.o \ lapacke_chetri2x.o \ lapacke_chetri2x_work.o \ -lapacke_chetri_work.o \ +lapacke_chetri_3.o \ +lapacke_chetri_3_work.o \ lapacke_chetrs.o \ -lapacke_chetrs_rook.o \ +lapacke_chetrs_work.o \ lapacke_chetrs2.o \ lapacke_chetrs2_work.o \ -lapacke_chetrs_work.o \ -lapacke_chetrs_rook_work.o \ +lapacke_chetrs_3.o \ +lapacke_chetrs_3_work.o \ lapacke_chetrs_aa.o \ -lapacke_chetrs_aa_2stage.o \ lapacke_chetrs_aa_work.o \ +lapacke_chetrs_aa_2stage.o \ lapacke_chetrs_aa_2stage_work.o \ -lapacke_chetrs_3.o \ -lapacke_chetrs_3_work.o \ +lapacke_chetrs_rook.o \ +lapacke_chetrs_rook_work.o \ lapacke_chfrk.o \ lapacke_chfrk_work.o \ lapacke_chgeqz.o \ @@ -484,11 +496,11 @@ lapacke_csyconv.o \ lapacke_csyconv_work.o \ lapacke_csyequb.o \ lapacke_csyequb_work.o \ +lapacke_csyr.o \ +lapacke_csyr_work.o \ lapacke_csyrfs.o \ lapacke_csyrfs_work.o \ lapacke_csysv.o \ -lapacke_csysv_rook.o \ -lapacke_csysv_rook_work.o \ lapacke_csysv_work.o \ lapacke_csysv_aa.o \ lapacke_csysv_aa_work.o \ @@ -496,40 +508,42 @@ lapacke_csysv_aa_2stage.o \ lapacke_csysv_aa_2stage_work.o \ lapacke_csysv_rk.o \ lapacke_csysv_rk_work.o \ +lapacke_csysv_rook.o \ +lapacke_csysv_rook_work.o \ lapacke_csysvx.o \ lapacke_csysvx_work.o \ lapacke_csyswapr.o \ lapacke_csyswapr_work.o \ lapacke_csytrf.o \ lapacke_csytrf_work.o \ -lapacke_csytrf_rook.o \ -lapacke_csytrf_rook_work.o \ lapacke_csytrf_aa.o \ -lapacke_csytrf_aa_2stage.o \ lapacke_csytrf_aa_work.o \ +lapacke_csytrf_aa_2stage.o \ lapacke_csytrf_aa_2stage_work.o \ lapacke_csytrf_rk.o \ lapacke_csytrf_rk_work.o \ +lapacke_csytrf_rook.o \ +lapacke_csytrf_rook_work.o \ lapacke_csytri.o \ +lapacke_csytri_work.o \ lapacke_csytri2.o \ lapacke_csytri2_work.o \ -lapacke_csytri_3.o \ -lapacke_csytri_3_work.o \ lapacke_csytri2x.o \ lapacke_csytri2x_work.o \ -lapacke_csytri_work.o \ +lapacke_csytri_3.o \ +lapacke_csytri_3_work.o \ lapacke_csytrs.o \ -lapacke_csytrs_rook.o \ +lapacke_csytrs_work.o \ lapacke_csytrs2.o \ lapacke_csytrs2_work.o \ -lapacke_csytrs_work.o \ -lapacke_csytrs_rook_work.o \ +lapacke_csytrs_3.o \ +lapacke_csytrs_3_work.o \ lapacke_csytrs_aa.o \ -lapacke_csytrs_aa_2stage.o \ lapacke_csytrs_aa_work.o \ +lapacke_csytrs_aa_2stage.o \ lapacke_csytrs_aa_2stage_work.o \ -lapacke_csytrs_3.o \ -lapacke_csytrs_3_work.o \ +lapacke_csytrs_rook.o \ +lapacke_csytrs_rook_work.o \ lapacke_ctbcon.o \ lapacke_ctbcon_work.o \ lapacke_ctbrfs.o \ @@ -561,9 +575,9 @@ lapacke_ctpcon_work.o \ lapacke_ctpmqrt.o \ lapacke_ctpmqrt_work.o \ lapacke_ctpqrt.o \ +lapacke_ctpqrt_work.o \ lapacke_ctpqrt2.o \ lapacke_ctpqrt2_work.o \ -lapacke_ctpqrt_work.o \ lapacke_ctprfb.o \ lapacke_ctprfb_work.o \ lapacke_ctprfs.o \ @@ -639,15 +653,19 @@ lapacke_cunmtr_work.o \ lapacke_cupgtr.o \ lapacke_cupgtr_work.o \ lapacke_cupmtr.o \ -lapacke_cupmtr_work.o \ +lapacke_cupmtr_work.o +endif + +ifeq ($(BUILD_DOUBLE),1) +OBJ_D = \ lapacke_dbbcsd.o \ lapacke_dbbcsd_work.o \ lapacke_dbdsdc.o \ lapacke_dbdsdc_work.o \ -lapacke_dbdsvdx.o \ -lapacke_dbdsvdx_work.o \ lapacke_dbdsqr.o \ lapacke_dbdsqr_work.o \ +lapacke_dbdsvdx.o \ +lapacke_dbdsvdx_work.o \ lapacke_ddisna.o \ lapacke_ddisna_work.o \ lapacke_dgbbrd.o \ @@ -725,11 +743,11 @@ lapacke_dgeqrf_work.o \ lapacke_dgeqrfp.o \ lapacke_dgeqrfp_work.o \ lapacke_dgeqrt.o \ +lapacke_dgeqrt_work.o \ lapacke_dgeqrt2.o \ lapacke_dgeqrt2_work.o \ lapacke_dgeqrt3.o \ lapacke_dgeqrt3_work.o \ -lapacke_dgeqrt_work.o \ lapacke_dgerfs.o \ lapacke_dgerfs_work.o \ lapacke_dgerqf.o \ @@ -740,6 +758,8 @@ lapacke_dgesv.o \ lapacke_dgesv_work.o \ lapacke_dgesvd.o \ lapacke_dgesvd_work.o \ +lapacke_dgesvdq.o \ +lapacke_dgesvdq_work.o \ lapacke_dgesvdx.o \ lapacke_dgesvdx_work.o \ lapacke_dgesvj.o \ @@ -776,10 +796,10 @@ lapacke_dggevx.o \ lapacke_dggevx_work.o \ lapacke_dggglm.o \ lapacke_dggglm_work.o \ -lapacke_dgghrd.o \ -lapacke_dgghrd_work.o \ lapacke_dgghd3.o \ lapacke_dgghd3_work.o \ +lapacke_dgghrd.o \ +lapacke_dgghrd_work.o \ lapacke_dgglse.o \ lapacke_dgglse_work.o \ lapacke_dggqrf.o \ @@ -972,14 +992,14 @@ lapacke_dpttrs.o \ lapacke_dpttrs_work.o \ lapacke_dsbev.o \ lapacke_dsbev_work.o \ -lapacke_dsbevd.o \ -lapacke_dsbevd_work.o \ -lapacke_dsbevx.o \ -lapacke_dsbevx_work.o \ lapacke_dsbev_2stage.o \ lapacke_dsbev_2stage_work.o \ +lapacke_dsbevd.o \ +lapacke_dsbevd_work.o \ lapacke_dsbevd_2stage.o \ lapacke_dsbevd_2stage_work.o \ +lapacke_dsbevx.o \ +lapacke_dsbevx_work.o \ lapacke_dsbevx_2stage.o \ lapacke_dsbevx_2stage_work.o \ lapacke_dsbgst.o \ @@ -1060,18 +1080,18 @@ lapacke_dsyequb.o \ lapacke_dsyequb_work.o \ lapacke_dsyev.o \ lapacke_dsyev_work.o \ -lapacke_dsyevd.o \ -lapacke_dsyevd_work.o \ -lapacke_dsyevr.o \ -lapacke_dsyevr_work.o \ -lapacke_dsyevx.o \ -lapacke_dsyevx_work.o \ lapacke_dsyev_2stage.o \ lapacke_dsyev_2stage_work.o \ +lapacke_dsyevd.o \ +lapacke_dsyevd_work.o \ lapacke_dsyevd_2stage.o \ lapacke_dsyevd_2stage_work.o \ +lapacke_dsyevr.o \ +lapacke_dsyevr_work.o \ lapacke_dsyevr_2stage.o \ lapacke_dsyevr_2stage_work.o \ +lapacke_dsyevx.o \ +lapacke_dsyevx_work.o \ lapacke_dsyevx_2stage.o \ lapacke_dsyevx_2stage_work.o \ lapacke_dsygst.o \ @@ -1087,8 +1107,6 @@ lapacke_dsygvx_work.o \ lapacke_dsyrfs.o \ lapacke_dsyrfs_work.o \ lapacke_dsysv.o \ -lapacke_dsysv_rook.o \ -lapacke_dsysv_rook_work.o \ lapacke_dsysv_work.o \ lapacke_dsysv_aa.o \ lapacke_dsysv_aa_work.o \ @@ -1096,6 +1114,8 @@ lapacke_dsysv_aa_2stage.o \ lapacke_dsysv_aa_2stage_work.o \ lapacke_dsysv_rk.o \ lapacke_dsysv_rk_work.o \ +lapacke_dsysv_rook.o \ +lapacke_dsysv_rook_work.o \ lapacke_dsysvx.o \ lapacke_dsysvx_work.o \ lapacke_dsyswapr.o \ @@ -1104,36 +1124,34 @@ lapacke_dsytrd.o \ lapacke_dsytrd_work.o \ lapacke_dsytrf.o \ lapacke_dsytrf_work.o \ -lapacke_dsytrf_rook.o \ -lapacke_dsytrf_rook_work.o \ lapacke_dsytrf_aa.o \ lapacke_dsytrf_aa_work.o \ lapacke_dsytrf_aa_2stage.o \ lapacke_dsytrf_aa_2stage_work.o \ lapacke_dsytrf_rk.o \ lapacke_dsytrf_rk_work.o \ +lapacke_dsytrf_rook.o \ +lapacke_dsytrf_rook_work.o \ lapacke_dsytri.o \ +lapacke_dsytri_work.o \ lapacke_dsytri2.o \ lapacke_dsytri2_work.o \ -lapacke_dsytri_3.o \ -lapacke_dsytri_3_work.o \ lapacke_dsytri2x.o \ lapacke_dsytri2x_work.o \ -lapacke_dsytri_work.o - -OBJ_B = \ +lapacke_dsytri_3.o \ +lapacke_dsytri_3_work.o \ lapacke_dsytrs.o \ -lapacke_dsytrs_rook.o \ +lapacke_dsytrs_work.o \ lapacke_dsytrs2.o \ lapacke_dsytrs2_work.o \ -lapacke_dsytrs_work.o \ -lapacke_dsytrs_rook_work.o \ +lapacke_dsytrs_3.o \ +lapacke_dsytrs_3_work.o \ lapacke_dsytrs_aa.o \ -lapacke_dsytrs_aa_2stage.o \ lapacke_dsytrs_aa_work.o \ +lapacke_dsytrs_aa_2stage.o \ lapacke_dsytrs_aa_2stage_work.o \ -lapacke_dsytrs_3.o \ -lapacke_dsytrs_3_work.o \ +lapacke_dsytrs_rook.o \ +lapacke_dsytrs_rook_work.o \ lapacke_dtbcon.o \ lapacke_dtbcon_work.o \ lapacke_dtbrfs.o \ @@ -1165,9 +1183,9 @@ lapacke_dtpcon_work.o \ lapacke_dtpmqrt.o \ lapacke_dtpmqrt_work.o \ lapacke_dtpqrt.o \ +lapacke_dtpqrt_work.o \ lapacke_dtpqrt2.o \ lapacke_dtpqrt2_work.o \ -lapacke_dtpqrt_work.o \ lapacke_dtprfb.o \ lapacke_dtprfb_work.o \ lapacke_dtprfs.o \ @@ -1204,15 +1222,20 @@ lapacke_dtrttp.o \ lapacke_dtrttp_work.o \ lapacke_dtzrzf.o \ lapacke_dtzrzf_work.o \ -lapacke_nancheck.o \ +lapacke_slag2d.o \ +lapacke_slag2d_work.o +endif + +ifeq ($(BUILD_SINGLE),1) +OBJ_S = \ lapacke_sbbcsd.o \ lapacke_sbbcsd_work.o \ lapacke_sbdsdc.o \ lapacke_sbdsdc_work.o \ -lapacke_sbdsvdx.o \ -lapacke_sbdsvdx_work.o \ lapacke_sbdsqr.o \ lapacke_sbdsqr_work.o \ +lapacke_sbdsvdx.o \ +lapacke_sbdsvdx_work.o \ lapacke_sdisna.o \ lapacke_sdisna_work.o \ lapacke_sgbbrd.o \ @@ -1290,11 +1313,11 @@ lapacke_sgeqrf_work.o \ lapacke_sgeqrfp.o \ lapacke_sgeqrfp_work.o \ lapacke_sgeqrt.o \ +lapacke_sgeqrt_work.o \ lapacke_sgeqrt2.o \ lapacke_sgeqrt2_work.o \ lapacke_sgeqrt3.o \ lapacke_sgeqrt3_work.o \ -lapacke_sgeqrt_work.o \ lapacke_sgerfs.o \ lapacke_sgerfs_work.o \ lapacke_sgerqf.o \ @@ -1305,6 +1328,8 @@ lapacke_sgesv.o \ lapacke_sgesv_work.o \ lapacke_sgesvd.o \ lapacke_sgesvd_work.o \ +lapacke_sgesvdq.o \ +lapacke_sgesvdq_work.o \ lapacke_sgesvdx.o \ lapacke_sgesvdx_work.o \ lapacke_sgesvj.o \ @@ -1341,10 +1366,10 @@ lapacke_sggevx.o \ lapacke_sggevx_work.o \ lapacke_sggglm.o \ lapacke_sggglm_work.o \ -lapacke_sgghrd.o \ -lapacke_sgghrd_work.o \ lapacke_sgghd3.o \ lapacke_sgghd3_work.o \ +lapacke_sgghrd.o \ +lapacke_sgghrd_work.o \ lapacke_sgglse.o \ lapacke_sgglse_work.o \ lapacke_sggqrf.o \ @@ -1377,8 +1402,6 @@ lapacke_slacn2.o \ lapacke_slacn2_work.o \ lapacke_slacpy.o \ lapacke_slacpy_work.o \ -lapacke_slag2d.o \ -lapacke_slag2d_work.o \ lapacke_slamch.o \ lapacke_slamch_work.o \ lapacke_slange.o \ @@ -1537,14 +1560,14 @@ lapacke_spttrs.o \ lapacke_spttrs_work.o \ lapacke_ssbev.o \ lapacke_ssbev_work.o \ -lapacke_ssbevd.o \ -lapacke_ssbevd_work.o \ -lapacke_ssbevx.o \ -lapacke_ssbevx_work.o \ lapacke_ssbev_2stage.o \ lapacke_ssbev_2stage_work.o \ +lapacke_ssbevd.o \ +lapacke_ssbevd_work.o \ lapacke_ssbevd_2stage.o \ lapacke_ssbevd_2stage_work.o \ +lapacke_ssbevx.o \ +lapacke_ssbevx_work.o \ lapacke_ssbevx_2stage.o \ lapacke_ssbevx_2stage_work.o \ lapacke_ssbgst.o \ @@ -1621,18 +1644,18 @@ lapacke_ssyequb.o \ lapacke_ssyequb_work.o \ lapacke_ssyev.o \ lapacke_ssyev_work.o \ -lapacke_ssyevd.o \ -lapacke_ssyevd_work.o \ -lapacke_ssyevr.o \ -lapacke_ssyevr_work.o \ -lapacke_ssyevx.o \ -lapacke_ssyevx_work.o \ lapacke_ssyev_2stage.o \ lapacke_ssyev_2stage_work.o \ +lapacke_ssyevd.o \ +lapacke_ssyevd_work.o \ lapacke_ssyevd_2stage.o \ lapacke_ssyevd_2stage_work.o \ +lapacke_ssyevr.o \ +lapacke_ssyevr_work.o \ lapacke_ssyevr_2stage.o \ lapacke_ssyevr_2stage_work.o \ +lapacke_ssyevx.o \ +lapacke_ssyevx_work.o \ lapacke_ssyevx_2stage.o \ lapacke_ssyevx_2stage_work.o \ lapacke_ssygst.o \ @@ -1648,8 +1671,6 @@ lapacke_ssygvx_work.o \ lapacke_ssyrfs.o \ lapacke_ssyrfs_work.o \ lapacke_ssysv.o \ -lapacke_ssysv_rook.o \ -lapacke_ssysv_rook_work.o \ lapacke_ssysv_work.o \ lapacke_ssysv_aa.o \ lapacke_ssysv_aa_work.o \ @@ -1657,6 +1678,8 @@ lapacke_ssysv_aa_2stage.o \ lapacke_ssysv_aa_2stage_work.o \ lapacke_ssysv_rk.o \ lapacke_ssysv_rk_work.o \ +lapacke_ssysv_rook.o \ +lapacke_ssysv_rook_work.o \ lapacke_ssysvx.o \ lapacke_ssysvx_work.o \ lapacke_ssyswapr.o \ @@ -1665,34 +1688,34 @@ lapacke_ssytrd.o \ lapacke_ssytrd_work.o \ lapacke_ssytrf.o \ lapacke_ssytrf_work.o \ -lapacke_ssytrf_rook.o \ -lapacke_ssytrf_rook_work.o \ lapacke_ssytrf_aa.o \ lapacke_ssytrf_aa_work.o \ lapacke_ssytrf_aa_2stage.o \ lapacke_ssytrf_aa_2stage_work.o \ lapacke_ssytrf_rk.o \ lapacke_ssytrf_rk_work.o \ +lapacke_ssytrf_rook.o \ +lapacke_ssytrf_rook_work.o \ lapacke_ssytri.o \ +lapacke_ssytri_work.o \ lapacke_ssytri2.o \ lapacke_ssytri2_work.o \ -lapacke_ssytri_3.o \ -lapacke_ssytri_3_work.o \ lapacke_ssytri2x.o \ lapacke_ssytri2x_work.o \ -lapacke_ssytri_work.o \ +lapacke_ssytri_3.o \ +lapacke_ssytri_3_work.o \ lapacke_ssytrs.o \ -lapacke_ssytrs_rook.o \ +lapacke_ssytrs_work.o \ lapacke_ssytrs2.o \ lapacke_ssytrs2_work.o \ -lapacke_ssytrs_work.o \ -lapacke_ssytrs_rook_work.o \ +lapacke_ssytrs_3.o \ +lapacke_ssytrs_3_work.o \ lapacke_ssytrs_aa.o \ -lapacke_ssytrs_aa_2stage.o \ lapacke_ssytrs_aa_work.o \ +lapacke_ssytrs_aa_2stage.o \ lapacke_ssytrs_aa_2stage_work.o \ -lapacke_ssytrs_3.o \ -lapacke_ssytrs_3_work.o \ +lapacke_ssytrs_rook.o \ +lapacke_ssytrs_rook_work.o \ lapacke_stbcon.o \ lapacke_stbcon_work.o \ lapacke_stbrfs.o \ @@ -1762,7 +1785,11 @@ lapacke_strttf_work.o \ lapacke_strttp.o \ lapacke_strttp_work.o \ lapacke_stzrzf.o \ -lapacke_stzrzf_work.o \ +lapacke_stzrzf_work.o +endif + +ifeq ($(BUILD_COMPLEX16),1) +OBJ_Z = \ lapacke_zbbcsd.o \ lapacke_zbbcsd_work.o \ lapacke_zbdsqr.o \ @@ -1846,11 +1873,11 @@ lapacke_zgeqrf_work.o \ lapacke_zgeqrfp.o \ lapacke_zgeqrfp_work.o \ lapacke_zgeqrt.o \ +lapacke_zgeqrt_work.o \ lapacke_zgeqrt2.o \ lapacke_zgeqrt2_work.o \ lapacke_zgeqrt3.o \ lapacke_zgeqrt3_work.o \ -lapacke_zgeqrt_work.o \ lapacke_zgerfs.o \ lapacke_zgerfs_work.o \ lapacke_zgerqf.o \ @@ -1861,6 +1888,8 @@ lapacke_zgesv.o \ lapacke_zgesv_work.o \ lapacke_zgesvd.o \ lapacke_zgesvd_work.o \ +lapacke_zgesvdq.o \ +lapacke_zgesvdq_work.o \ lapacke_zgesvdx.o \ lapacke_zgesvdx_work.o \ lapacke_zgesvj.o \ @@ -1897,10 +1926,10 @@ lapacke_zggevx.o \ lapacke_zggevx_work.o \ lapacke_zggglm.o \ lapacke_zggglm_work.o \ -lapacke_zgghrd.o \ -lapacke_zgghrd_work.o \ lapacke_zgghd3.o \ lapacke_zgghd3_work.o \ +lapacke_zgghrd.o \ +lapacke_zgghrd_work.o \ lapacke_zgglse.o \ lapacke_zgglse_work.o \ lapacke_zggqrf.o \ @@ -1925,14 +1954,14 @@ lapacke_zgttrs.o \ lapacke_zgttrs_work.o \ lapacke_zhbev.o \ lapacke_zhbev_work.o \ -lapacke_zhbevd.o \ -lapacke_zhbevd_work.o \ -lapacke_zhbevx.o \ -lapacke_zhbevx_work.o \ lapacke_zhbev_2stage.o \ lapacke_zhbev_2stage_work.o \ +lapacke_zhbevd.o \ +lapacke_zhbevd_work.o \ lapacke_zhbevd_2stage.o \ lapacke_zhbevd_2stage_work.o \ +lapacke_zhbevx.o \ +lapacke_zhbevx_work.o \ lapacke_zhbevx_2stage.o \ lapacke_zhbevx_2stage_work.o \ lapacke_zhbgst.o \ @@ -1953,18 +1982,18 @@ lapacke_zheequb.o \ lapacke_zheequb_work.o \ lapacke_zheev.o \ lapacke_zheev_work.o \ -lapacke_zheevd.o \ -lapacke_zheevd_work.o \ -lapacke_zheevr.o \ -lapacke_zheevr_work.o \ -lapacke_zheevx.o \ -lapacke_zheevx_work.o \ lapacke_zheev_2stage.o \ lapacke_zheev_2stage_work.o \ +lapacke_zheevd.o \ +lapacke_zheevd_work.o \ lapacke_zheevd_2stage.o \ lapacke_zheevd_2stage_work.o \ +lapacke_zheevr.o \ +lapacke_zheevr_work.o \ lapacke_zheevr_2stage.o \ lapacke_zheevr_2stage_work.o \ +lapacke_zheevx.o \ +lapacke_zheevx_work.o \ lapacke_zheevx_2stage.o \ lapacke_zheevx_2stage_work.o \ lapacke_zhegst.o \ @@ -1994,35 +2023,35 @@ lapacke_zheswapr_work.o \ lapacke_zhetrd.o \ lapacke_zhetrd_work.o \ lapacke_zhetrf.o \ -lapacke_zhetrf_rook.o \ lapacke_zhetrf_work.o \ -lapacke_zhetrf_rook_work.o \ lapacke_zhetrf_aa.o \ -lapacke_zhetrf_aa_2stage.o \ lapacke_zhetrf_aa_work.o \ +lapacke_zhetrf_aa_2stage.o \ lapacke_zhetrf_aa_2stage_work.o \ lapacke_zhetrf_rk.o \ lapacke_zhetrf_rk_work.o \ +lapacke_zhetrf_rook.o \ +lapacke_zhetrf_rook_work.o \ lapacke_zhetri.o \ +lapacke_zhetri_work.o \ lapacke_zhetri2.o \ lapacke_zhetri2_work.o \ -lapacke_zhetri_3.o \ -lapacke_zhetri_3_work.o \ lapacke_zhetri2x.o \ lapacke_zhetri2x_work.o \ -lapacke_zhetri_work.o \ +lapacke_zhetri_3.o \ +lapacke_zhetri_3_work.o \ lapacke_zhetrs.o \ -lapacke_zhetrs_rook.o \ +lapacke_zhetrs_work.o \ lapacke_zhetrs2.o \ lapacke_zhetrs2_work.o \ -lapacke_zhetrs_work.o \ -lapacke_zhetrs_rook_work.o \ +lapacke_zhetrs_3.o \ +lapacke_zhetrs_3_work.o \ lapacke_zhetrs_aa.o \ -lapacke_zhetrs_aa_2stage.o \ lapacke_zhetrs_aa_work.o \ +lapacke_zhetrs_aa_2stage.o \ lapacke_zhetrs_aa_2stage_work.o \ -lapacke_zhetrs_3.o \ -lapacke_zhetrs_3_work.o \ +lapacke_zhetrs_rook.o \ +lapacke_zhetrs_rook_work.o \ lapacke_zhfrk.o \ lapacke_zhfrk_work.o \ lapacke_zhgeqz.o \ @@ -2213,11 +2242,11 @@ lapacke_zsyconv.o \ lapacke_zsyconv_work.o \ lapacke_zsyequb.o \ lapacke_zsyequb_work.o \ +lapacke_zsyr.o \ +lapacke_zsyr_work.o \ lapacke_zsyrfs.o \ lapacke_zsyrfs_work.o \ lapacke_zsysv.o \ -lapacke_zsysv_rook.o \ -lapacke_zsysv_rook_work.o \ lapacke_zsysv_work.o \ lapacke_zsysv_aa.o \ lapacke_zsysv_aa_work.o \ @@ -2225,40 +2254,42 @@ lapacke_zsysv_aa_2stage.o \ lapacke_zsysv_aa_2stage_work.o \ lapacke_zsysv_rk.o \ lapacke_zsysv_rk_work.o \ +lapacke_zsysv_rook.o \ +lapacke_zsysv_rook_work.o \ lapacke_zsysvx.o \ lapacke_zsysvx_work.o \ lapacke_zsyswapr.o \ lapacke_zsyswapr_work.o \ lapacke_zsytrf.o \ lapacke_zsytrf_work.o \ -lapacke_zsytrf_rook.o \ -lapacke_zsytrf_rook_work.o \ lapacke_zsytrf_aa.o \ -lapacke_zsytrf_aa_2stage.o \ lapacke_zsytrf_aa_work.o \ +lapacke_zsytrf_aa_2stage.o \ lapacke_zsytrf_aa_2stage_work.o \ lapacke_zsytrf_rk.o \ lapacke_zsytrf_rk_work.o \ +lapacke_zsytrf_rook.o \ +lapacke_zsytrf_rook_work.o \ lapacke_zsytri.o \ +lapacke_zsytri_work.o \ lapacke_zsytri2.o \ lapacke_zsytri2_work.o \ -lapacke_zsytri_3.o \ -lapacke_zsytri_3_work.o \ lapacke_zsytri2x.o \ lapacke_zsytri2x_work.o \ -lapacke_zsytri_work.o \ +lapacke_zsytri_3.o \ +lapacke_zsytri_3_work.o \ lapacke_zsytrs.o \ -lapacke_zsytrs_rook.o \ +lapacke_zsytrs_work.o \ lapacke_zsytrs2.o \ lapacke_zsytrs2_work.o \ -lapacke_zsytrs_work.o \ -lapacke_zsytrs_rook_work.o \ +lapacke_zsytrs_3.o \ +lapacke_zsytrs_3_work.o \ lapacke_zsytrs_aa.o \ -lapacke_zsytrs_aa_2stage.o \ lapacke_zsytrs_aa_work.o \ +lapacke_zsytrs_aa_2stage.o \ lapacke_zsytrs_aa_2stage_work.o \ -lapacke_zsytrs_3.o \ -lapacke_zsytrs_3_work.o \ +lapacke_zsytrs_rook.o \ +lapacke_zsytrs_rook_work.o \ lapacke_ztbcon.o \ lapacke_ztbcon_work.o \ lapacke_ztbrfs.o \ @@ -2290,9 +2321,9 @@ lapacke_ztpcon_work.o \ lapacke_ztpmqrt.o \ lapacke_ztpmqrt_work.o \ lapacke_ztpqrt.o \ +lapacke_ztpqrt_work.o \ lapacke_ztpqrt2.o \ lapacke_ztpqrt2_work.o \ -lapacke_ztpqrt_work.o \ lapacke_ztprfb.o \ lapacke_ztprfb_work.o \ lapacke_ztprfs.o \ @@ -2368,41 +2399,53 @@ lapacke_zunmtr_work.o \ lapacke_zupgtr.o \ lapacke_zupgtr_work.o \ lapacke_zupmtr.o \ -lapacke_zupmtr_work.o \ -lapacke_zsyr.o \ -lapacke_csyr.o \ -lapacke_zsyr_work.o \ -lapacke_csyr_work.o \ -lapacke_ilaver.o +lapacke_zupmtr_work.o +endif ifdef BUILD_DEPRECATED -DEPRECATED = \ +ifeq ($(BUILD_COMPLEX),1) +DEPRECATEDC = \ lapacke_cggsvp.o \ lapacke_cggsvp_work.o \ -lapacke_dggsvp.o \ -lapacke_dggsvp_work.o \ -lapacke_sggsvp.o \ -lapacke_sggsvp_work.o \ -lapacke_zggsvp.o \ -lapacke_zggsvp_work.o \ lapacke_cggsvd.o \ lapacke_cggsvd_work.o \ +lapacke_cgeqpf.o \ +lapacke_cgeqpf_work.o +endif + +ifeq ($(BUILD_DOUBLE),1) +DEPRECATEDD = \ +lapacke_dggsvp.o \ +lapacke_dggsvp_work.o \ lapacke_dggsvd.o \ lapacke_dggsvd_work.o \ +lapacke_dgeqpf.o \ +lapacke_dgeqpf_work.o +endif + +ifeq ($(BUILD_SINGLE),1) +DEPRECATEDS = \ +lapacke_sggsvp.o \ +lapacke_sggsvp_work.o \ lapacke_sggsvd.o \ lapacke_sggsvd_work.o \ +lapacke_sgeqpf.o \ +lapacke_sgeqpf_work.o +endif + +ifeq ($(BUILD_COMPLEX16),1) +DEPRECATEDZ = \ +lapacke_zggsvp.o \ +lapacke_zggsvp_work.o \ lapacke_zggsvd.o \ lapacke_zggsvd_work.o \ -lapacke_cgeqpf.o \ -lapacke_cgeqpf_work.o \ -lapacke_dgeqpf.o \ -lapacke_dgeqpf_work.o \ -lapacke_sgeqpf.o \ -lapacke_sgeqpf_work.o \ lapacke_zgeqpf.o \ lapacke_zgeqpf_work.o endif +DEPRECATED = $(DEPRECATEDS) $(DEPRECATEDD) $(DEPRECATEDC) $(DEPRECATEDZ) +endif + ifdef USEXBLAS EXTENDED = \ lapacke_cgbrfsx.o lapacke_cporfsx.o lapacke_dgerfsx.o lapacke_sgbrfsx.o lapacke_ssyrfsx.o lapacke_zherfsx.o \ @@ -2421,58 +2464,73 @@ endif ifdef LAPACKE_WITH_TMG # FILE PARTS OF TMGLIB -MATGEN = \ +ifeq ($(BUILD_COMPLEX),1) +MATGENC = \ lapacke_clatms.o \ lapacke_clatms_work.o \ -lapacke_dlatms.o \ -lapacke_dlatms_work.o \ -lapacke_slatms.o \ -lapacke_slatms_work.o \ -lapacke_zlatms.o \ -lapacke_zlatms_work.o \ lapacke_clagge.o \ lapacke_clagge_work.o \ +lapacke_claghe.o \ +lapacke_claghe_work.o \ +lapacke_clagsy.o \ +lapacke_clagsy_work.o +endif +ifeq ($(BUILD_DOUBLE),1) +MATGEND = \ +lapacke_dlatms.o \ +lapacke_dlatms_work.o \ lapacke_dlagge.o \ lapacke_dlagge_work.o \ +lapacke_dlagsy.o \ +lapacke_dlagsy_work.o +endif +ifeq ($(BUILD_SINGLE),1) +MATGENS = \ +lapacke_slatms.o \ +lapacke_slatms_work.o \ lapacke_slagge.o \ lapacke_slagge_work.o \ +lapacke_slagsy.o \ +lapacke_slagsy_work.o +endif +ifeq ($(BUILD_COMPLEX16),1) +MATGENZ = \ +lapacke_zlatms.o \ +lapacke_zlatms_work.o \ lapacke_zlagge.o \ lapacke_zlagge_work.o \ -lapacke_claghe.o \ -lapacke_claghe_work.o \ lapacke_zlaghe.o \ lapacke_zlaghe_work.o \ -lapacke_clagsy.o \ -lapacke_clagsy_work.o \ -lapacke_dlagsy.o \ -lapacke_dlagsy_work.o \ -lapacke_slagsy.o \ -lapacke_slagsy_work.o \ lapacke_zlagsy.o \ lapacke_zlagsy_work.o endif -all: ../../$(LAPACKELIB) +MATGEN = $(MATGENS) $(MATGEND) $(MATGENC) $(MATGENZ) +endif -.PHONY: ../../$(LAPACKELIB) +.PHONY: all +all: $(LAPACKELIB) -../../$(LAPACKELIB): $(OBJ_A) $(OBJ_B) $(DEPRECATED) $(EXTENDED) $(MATGEN) - $(ARCH) $(ARCHFLAGS) $@ $(OBJ_A) - $(ARCH) $(ARCHFLAGS) $@ $(OBJ_B) +$(LAPACKELIB): $(OBJ) $(OBJ_S) $(OBJ_C) $(OBJ_D) $(OBJ_Z) $(DEPRECATED) $(EXTENDED) $(MATGEN) + $(AR) $(ARFLAGS) $@ $(OBJ) + $(AR) $(ARFLAGS) $@ $(OBJ_S) + $(AR) $(ARFLAGS) $@ $(OBJ_C) + $(AR) $(ARFLAGS) $@ $(OBJ_D) + $(AR) $(ARFLAGS) $@ $(OBJ_Z) ifdef BUILD_DEPRECATED - $(ARCH) $(ARCHFLAGS) $@ $(DEPRECATED) + $(AR) $(ARFLAGS) $@ $(DEPRECATED) endif ifdef (USEXBLAS) - $(ARCH) $(ARCHFLAGS) $@ $(EXTENDED) + $(AR) $(ARFLAGS) $@ $(EXTENDED) endif ifdef LAPACKE_WITH_TMG - $(ARCH) $(ARCHFLAGS) $@ $(MATGEN) + $(AR) $(ARFLAGS) $@ $(MATGEN) endif $(RANLIB) $@ -clean: cleanobj +.PHONY: clean cleanobj cleanlib +clean: cleanobj cleanlib cleanobj: rm -f *.o - -.c.o: - $(CC) $(CFLAGS) -I../include -c -o $@ $< +cleanlib: + rm -f $(LAPACKELIB) diff --git a/lapack-netlib/LAPACKE/src/lapacke_cgejsv.c b/lapack-netlib/LAPACKE/src/lapacke_cgejsv.c index 7d371f660..41278428b 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_cgejsv.c +++ b/lapack-netlib/LAPACKE/src/lapacke_cgejsv.c @@ -124,7 +124,6 @@ lapack_int LAPACKE_cgejsv( int matrix_layout, char joba, char jobu, char jobv, float* rwork = NULL; lapack_complex_float* cwork = NULL; lapack_int i; - lapack_int nu, nv; if( matrix_layout != LAPACK_COL_MAJOR && matrix_layout != LAPACK_ROW_MAJOR ) { LAPACKE_xerbla( "LAPACKE_cgejsv", -1 ); return -1; @@ -132,8 +131,6 @@ lapack_int LAPACKE_cgejsv( int matrix_layout, char joba, char jobu, char jobv, #ifndef LAPACK_DISABLE_NAN_CHECK if( LAPACKE_get_nancheck() ) { /* Optionally check input matrices for NaNs */ - nu = LAPACKE_lsame( jobu, 'n' ) ? 1 : m; - nv = LAPACKE_lsame( jobv, 'n' ) ? 1 : n; if( LAPACKE_cge_nancheck( matrix_layout, m, n, a, lda ) ) { return -10; } diff --git a/lapack-netlib/LAPACKE/src/lapacke_cgelsd.c b/lapack-netlib/LAPACKE/src/lapacke_cgelsd.c index 2ee891977..9d022dae6 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_cgelsd.c +++ b/lapack-netlib/LAPACKE/src/lapacke_cgelsd.c @@ -75,7 +75,7 @@ lapack_int LAPACKE_cgelsd( int matrix_layout, lapack_int m, lapack_int n, if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lrwork = (lapack_int)rwork_query; lwork = LAPACK_C2INT( work_query ); /* Allocate memory for work arrays */ diff --git a/lapack-netlib/LAPACKE/src/lapacke_cgesvdq.c b/lapack-netlib/LAPACKE/src/lapacke_cgesvdq.c new file mode 100644 index 000000000..c5eca535e --- /dev/null +++ b/lapack-netlib/LAPACKE/src/lapacke_cgesvdq.c @@ -0,0 +1,106 @@ +/***************************************************************************** + Copyright (c) 2014, Intel Corp. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + THE POSSIBILITY OF SUCH DAMAGE. +***************************************************************************** +* Contents: Native high-level C interface to LAPACK function cgesvdq +* Author: Intel Corporation +* Generated November 2018 +*****************************************************************************/ + +#include "lapacke_utils.h" + +lapack_int LAPACKE_cgesvdq( int matrix_layout, char joba, char jobp, + char jobr, char jobu, char jobv, + lapack_int m, lapack_int n, lapack_complex_float* a, + lapack_int lda, float* s, lapack_complex_float* u, lapack_int ldu, + lapack_complex_float* v, lapack_int ldv, lapack_int* numrank) +{ + lapack_int info = 0; + lapack_int liwork = -1; + lapack_int* iwork = NULL; + lapack_int iwork_query; + lapack_int lcwork = -1; + lapack_complex_float* cwork = NULL; + lapack_complex_float cwork_query; + lapack_int lrwork = -1; + float* rwork = NULL; + float rwork_query; + lapack_int i; + if( matrix_layout != LAPACK_COL_MAJOR && matrix_layout != LAPACK_ROW_MAJOR ) { + LAPACKE_xerbla( "LAPACKE_cgesvdq", -1 ); + return -1; + } +#ifndef LAPACK_DISABLE_NAN_CHECK + if( LAPACKE_get_nancheck() ) { + /* Optionally check input matrices for NaNs */ + if( LAPACKE_cge_nancheck( matrix_layout, m, n, a, lda ) ) { + return -6; + } + } +#endif + /* Query optimal working array(s) size */ + info = LAPACKE_cgesvdq_work( matrix_layout, joba, jobp, jobr, jobu, jobv, + m, n, a, lda, s, u, ldu, v, ldv, numrank, + &iwork_query, liwork, &cwork_query, lcwork, + &rwork_query, lrwork ); + if( info != 0 ) { + goto exit_level_0; + } + liwork = iwork_query; + lcwork = LAPACK_C2INT(cwork_query); + lrwork = (lapack_int)rwork_query; + /* Allocate memory for work arrays */ + iwork = (lapack_int*)LAPACKE_malloc( sizeof(lapack_int) * liwork ); + if( iwork == NULL ) { + info = LAPACK_WORK_MEMORY_ERROR; + goto exit_level_0; + } + cwork = (lapack_complex_float*)LAPACKE_malloc( sizeof(lapack_complex_float) * lcwork ); + if( cwork == NULL ) { + info = LAPACK_WORK_MEMORY_ERROR; + goto exit_level_0; + } + rwork = (float*)LAPACKE_malloc( sizeof(float) * lrwork ); + if( rwork == NULL ) { + info = LAPACK_WORK_MEMORY_ERROR; + goto exit_level_0; + } + /* Call middle-level interface */ + info = LAPACKE_cgesvdq_work( matrix_layout, joba, jobp, jobr, jobu, jobv, + m, n, a, lda, s, u, ldu, v, ldv, numrank, + iwork, liwork, cwork, lcwork, rwork, lrwork ); + + /* Release memory and exit */ + LAPACKE_free( iwork ); + LAPACKE_free( cwork ); + LAPACKE_free( rwork ); +exit_level_0: + if( info == LAPACK_WORK_MEMORY_ERROR ) { + LAPACKE_xerbla( "LAPACKE_cgesvdq", info ); + } + return info; +} diff --git a/lapack-netlib/LAPACKE/src/lapacke_cgesvdq_work.c b/lapack-netlib/LAPACKE/src/lapacke_cgesvdq_work.c new file mode 100644 index 000000000..e86f76e4b --- /dev/null +++ b/lapack-netlib/LAPACKE/src/lapacke_cgesvdq_work.c @@ -0,0 +1,149 @@ +/***************************************************************************** + Copyright (c) 2014, Intel Corp. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + THE POSSIBILITY OF SUCH DAMAGE. +***************************************************************************** +* Contents: Native middle-level C interface to LAPACK function cgesvdq +* Author: Intel Corporation +* Generated November 2015 +*****************************************************************************/ + +#include "lapacke_utils.h" + +lapack_int LAPACKE_cgesvdq_work( int matrix_layout, char joba, char jobp, + char jobr, char jobu, char jobv, + lapack_int m, lapack_int n, lapack_complex_float* a, + lapack_int lda, float* s, lapack_complex_float* u, lapack_int ldu, + lapack_complex_float* v, lapack_int ldv, lapack_int* numrank, + lapack_int* iwork, lapack_int liwork, + lapack_complex_float* cwork, lapack_int lcwork, + float* rwork, lapack_int lrwork ) +{ + lapack_int info = 0; + if( matrix_layout == LAPACK_COL_MAJOR ) { + /* Call LAPACK function and adjust info */ + LAPACK_cgesvdq( &joba, &jobp, &jobr, &jobu, &jobv, &m, &n, a, &lda, s, u, &ldu, v, &ldv, + numrank, iwork, &liwork, cwork, &lcwork, rwork, &lrwork, &info ); + if( info < 0 ) { + info = info - 1; + } + } else if( matrix_layout == LAPACK_ROW_MAJOR ) { + lapack_int nrows_u = ( LAPACKE_lsame( jobu, 'a' ) || + LAPACKE_lsame( jobu, 's' ) ) ? m : 1; + lapack_int ncols_u = LAPACKE_lsame( jobu, 'a' ) ? m : + (LAPACKE_lsame( jobu, 's' ) ? MIN(m,n) : 1); + lapack_int nrows_v = LAPACKE_lsame( jobv, 'a' ) ? n : + ( LAPACKE_lsame( jobv, 's' ) ? MIN(m,n) : 1); + lapack_int lda_t = MAX(1,m); + lapack_int ldu_t = MAX(1,nrows_u); + lapack_int ldv_t = MAX(1,nrows_v); + lapack_complex_float* a_t = NULL; + lapack_complex_float* u_t = NULL; + lapack_complex_float* v_t = NULL; + /* Check leading dimension(s) */ + if( lda < n ) { + info = -9; + LAPACKE_xerbla( "LAPACKE_cgesvdq_work", info ); + return info; + } + if( ldu < ncols_u ) { + info = -12; + LAPACKE_xerbla( "LAPACKE_cgesvdq_work", info ); + return info; + } + if( ldv < n ) { + info = -14; + LAPACKE_xerbla( "LAPACKE_cgesvdq_work", info ); + return info; + } + /* Query optimal working array(s) size if requested */ + if( lcwork == -1 ) { + LAPACK_cgesvdq( &joba, &jobp, &jobr, &jobu, &jobv, &m, &n, a, &lda_t, + s, u, &ldu_t, v, &ldv_t, numrank, iwork, &liwork, + cwork, &lcwork, rwork, &lrwork, &info ); + return (info < 0) ? (info - 1) : info; + } + /* Allocate memory for temporary array(s) */ + a_t = (lapack_complex_float*)LAPACKE_malloc( sizeof(lapack_complex_float) * lda_t * MAX(1,n) ); + if( a_t == NULL ) { + info = LAPACK_TRANSPOSE_MEMORY_ERROR; + goto exit_level_0; + } + if( LAPACKE_lsame( jobu, 'a' ) || LAPACKE_lsame( jobu, 's' ) ) { + u_t = (lapack_complex_float*) + LAPACKE_malloc( sizeof(lapack_complex_float) * ldu_t * MAX(1,ncols_u) ); + if( u_t == NULL ) { + info = LAPACK_TRANSPOSE_MEMORY_ERROR; + goto exit_level_1; + } + } + if( LAPACKE_lsame( jobv, 'a' ) || LAPACKE_lsame( jobv, 's' ) ) { + v_t = (lapack_complex_float*) + LAPACKE_malloc( sizeof(lapack_complex_float) * ldv_t * MAX(1,n) ); + if( v_t == NULL ) { + info = LAPACK_TRANSPOSE_MEMORY_ERROR; + goto exit_level_2; + } + } + /* Transpose input matrices */ + LAPACKE_cge_trans( matrix_layout, m, n, a, lda, a_t, lda_t ); + /* Call LAPACK function and adjust info */ + LAPACK_cgesvdq( &joba, &jobp, &jobr, &jobu, &jobv, &m, &n, a, &lda_t, + s, u, &ldu_t, v, &ldv_t, numrank, iwork, &liwork, + cwork, &lcwork, rwork, &lrwork, &info ); + if( info < 0 ) { + info = info - 1; + } + /* Transpose output matrices */ + LAPACKE_cge_trans( LAPACK_COL_MAJOR, m, n, a_t, lda_t, a, lda ); + if( LAPACKE_lsame( jobu, 'a' ) || LAPACKE_lsame( jobu, 's' ) ) { + LAPACKE_cge_trans( LAPACK_COL_MAJOR, nrows_u, ncols_u, u_t, ldu_t, + u, ldu ); + } + if( LAPACKE_lsame( jobv, 'a' ) || LAPACKE_lsame( jobv, 's' ) ) { + LAPACKE_cge_trans( LAPACK_COL_MAJOR, nrows_v, n, v_t, ldv_t, v, + ldv ); + } + /* Release memory and exit */ + if( LAPACKE_lsame( jobv, 'a' ) || LAPACKE_lsame( jobv, 's' ) ) { + LAPACKE_free( v_t ); + } +exit_level_2: + if( LAPACKE_lsame( jobu, 'a' ) || LAPACKE_lsame( jobu, 's' ) ) { + LAPACKE_free( u_t ); + } +exit_level_1: + LAPACKE_free( a_t ); +exit_level_0: + if( info == LAPACK_TRANSPOSE_MEMORY_ERROR ) { + LAPACKE_xerbla( "LAPACKE_cgesvdq_work", info ); + } + } else { + info = -1; + LAPACKE_xerbla( "LAPACKE_cgesvdq_work", info ); + } + return info; +} diff --git a/lapack-netlib/LAPACKE/src/lapacke_cggesx.c b/lapack-netlib/LAPACKE/src/lapacke_cggesx.c index fc939a314..9581691c6 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_cggesx.c +++ b/lapack-netlib/LAPACKE/src/lapacke_cggesx.c @@ -91,7 +91,7 @@ lapack_int LAPACKE_cggesx( int matrix_layout, char jobvsl, char jobvsr, if( info != 0 ) { goto exit_level_2; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lwork = LAPACK_C2INT( work_query ); /* Allocate memory for work arrays */ iwork = (lapack_int*)LAPACKE_malloc( sizeof(lapack_int) * liwork ); diff --git a/lapack-netlib/LAPACKE/src/lapacke_chbevd.c b/lapack-netlib/LAPACKE/src/lapacke_chbevd.c index 024cf2585..b4af255a9 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_chbevd.c +++ b/lapack-netlib/LAPACKE/src/lapacke_chbevd.c @@ -67,7 +67,7 @@ lapack_int LAPACKE_chbevd( int matrix_layout, char jobz, char uplo, lapack_int n if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lrwork = (lapack_int)rwork_query; lwork = LAPACK_C2INT( work_query ); /* Allocate memory for work arrays */ diff --git a/lapack-netlib/LAPACKE/src/lapacke_chbevd_2stage.c b/lapack-netlib/LAPACKE/src/lapacke_chbevd_2stage.c index 63f7d8ccb..e8e9a6830 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_chbevd_2stage.c +++ b/lapack-netlib/LAPACKE/src/lapacke_chbevd_2stage.c @@ -67,7 +67,7 @@ lapack_int LAPACKE_chbevd_2stage( int matrix_layout, char jobz, char uplo, lapac if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lrwork = (lapack_int)rwork_query; lwork = LAPACK_C2INT( work_query ); /* Allocate memory for work arrays */ diff --git a/lapack-netlib/LAPACKE/src/lapacke_chbgvd.c b/lapack-netlib/LAPACKE/src/lapacke_chbgvd.c index d44f6c622..5a7331d87 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_chbgvd.c +++ b/lapack-netlib/LAPACKE/src/lapacke_chbgvd.c @@ -71,7 +71,7 @@ lapack_int LAPACKE_chbgvd( int matrix_layout, char jobz, char uplo, lapack_int n if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lrwork = (lapack_int)rwork_query; lwork = LAPACK_C2INT( work_query ); /* Allocate memory for work arrays */ diff --git a/lapack-netlib/LAPACKE/src/lapacke_cheev_work.c b/lapack-netlib/LAPACKE/src/lapacke_cheev_work.c index 40224607c..aa78e678e 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_cheev_work.c +++ b/lapack-netlib/LAPACKE/src/lapacke_cheev_work.c @@ -70,7 +70,7 @@ lapack_int LAPACKE_cheev_work( int matrix_layout, char jobz, char uplo, goto exit_level_0; } /* Transpose input matrices */ - LAPACKE_cge_trans( matrix_layout, n, n, a, lda, a_t, lda_t ); + LAPACKE_che_trans( matrix_layout, uplo, n, a, lda, a_t, lda_t ); /* Call LAPACK function and adjust info */ LAPACK_cheev( &jobz, &uplo, &n, a_t, &lda_t, w, work, &lwork, rwork, &info ); @@ -78,7 +78,11 @@ lapack_int LAPACKE_cheev_work( int matrix_layout, char jobz, char uplo, info = info - 1; } /* Transpose output matrices */ - LAPACKE_cge_trans( LAPACK_COL_MAJOR, n, n, a_t, lda_t, a, lda ); + if ( jobz == 'V') { + LAPACKE_cge_trans( LAPACK_COL_MAJOR, n, n, a_t, lda_t, a, lda ); + } else { + LAPACKE_che_trans( LAPACK_COL_MAJOR, uplo, n, a_t, lda_t, a, lda ); + } /* Release memory and exit */ LAPACKE_free( a_t ); exit_level_0: diff --git a/lapack-netlib/LAPACKE/src/lapacke_cheevd.c b/lapack-netlib/LAPACKE/src/lapacke_cheevd.c index d0dea375b..75fa47915 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_cheevd.c +++ b/lapack-netlib/LAPACKE/src/lapacke_cheevd.c @@ -53,7 +53,7 @@ lapack_int LAPACKE_cheevd( int matrix_layout, char jobz, char uplo, lapack_int n #ifndef LAPACK_DISABLE_NAN_CHECK if( LAPACKE_get_nancheck() ) { /* Optionally check input matrices for NaNs */ - if( LAPACKE_cge_nancheck( matrix_layout, n, n, a, lda ) ) { + if( LAPACKE_che_nancheck( matrix_layout, uplo, n, a, lda ) ) { return -5; } } @@ -65,7 +65,7 @@ lapack_int LAPACKE_cheevd( int matrix_layout, char jobz, char uplo, lapack_int n if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lrwork = (lapack_int)rwork_query; lwork = LAPACK_C2INT( work_query ); /* Allocate memory for work arrays */ diff --git a/lapack-netlib/LAPACKE/src/lapacke_cheevd_2stage.c b/lapack-netlib/LAPACKE/src/lapacke_cheevd_2stage.c index d87481abf..cb4d34a09 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_cheevd_2stage.c +++ b/lapack-netlib/LAPACKE/src/lapacke_cheevd_2stage.c @@ -53,7 +53,7 @@ lapack_int LAPACKE_cheevd_2stage( int matrix_layout, char jobz, char uplo, lapac #ifndef LAPACK_DISABLE_NAN_CHECK if( LAPACKE_get_nancheck() ) { /* Optionally check input matrices for NaNs */ - if( LAPACKE_cge_nancheck( matrix_layout, n, n, a, lda ) ) { + if( LAPACKE_che_nancheck( matrix_layout, uplo, n, a, lda ) ) { return -5; } } @@ -65,7 +65,7 @@ lapack_int LAPACKE_cheevd_2stage( int matrix_layout, char jobz, char uplo, lapac if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lrwork = (lapack_int)rwork_query; lwork = LAPACK_C2INT( work_query ); /* Allocate memory for work arrays */ diff --git a/lapack-netlib/LAPACKE/src/lapacke_cheevd_2stage_work.c b/lapack-netlib/LAPACKE/src/lapacke_cheevd_2stage_work.c index cb51f9ee4..d26c84785 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_cheevd_2stage_work.c +++ b/lapack-netlib/LAPACKE/src/lapacke_cheevd_2stage_work.c @@ -71,7 +71,7 @@ lapack_int LAPACKE_cheevd_2stage_work( int matrix_layout, char jobz, char uplo, goto exit_level_0; } /* Transpose input matrices */ - LAPACKE_cge_trans( matrix_layout, n, n, a, lda, a_t, lda_t ); + LAPACKE_che_trans( matrix_layout, uplo, n, a, lda, a_t, lda_t ); /* Call LAPACK function and adjust info */ LAPACK_cheevd_2stage( &jobz, &uplo, &n, a_t, &lda_t, w, work, &lwork, rwork, &lrwork, iwork, &liwork, &info ); @@ -79,7 +79,11 @@ lapack_int LAPACKE_cheevd_2stage_work( int matrix_layout, char jobz, char uplo, info = info - 1; } /* Transpose output matrices */ - LAPACKE_cge_trans( LAPACK_COL_MAJOR, n, n, a_t, lda_t, a, lda ); + if ( jobz == 'V') { + LAPACKE_cge_trans( LAPACK_COL_MAJOR, n, n, a_t, lda_t, a, lda ); + } else { + LAPACKE_che_trans( LAPACK_COL_MAJOR, uplo, n, a_t, lda_t, a, lda ); + } /* Release memory and exit */ LAPACKE_free( a_t ); exit_level_0: diff --git a/lapack-netlib/LAPACKE/src/lapacke_cheevd_work.c b/lapack-netlib/LAPACKE/src/lapacke_cheevd_work.c index 81869c564..e8f212efb 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_cheevd_work.c +++ b/lapack-netlib/LAPACKE/src/lapacke_cheevd_work.c @@ -71,7 +71,7 @@ lapack_int LAPACKE_cheevd_work( int matrix_layout, char jobz, char uplo, goto exit_level_0; } /* Transpose input matrices */ - LAPACKE_cge_trans( matrix_layout, n, n, a, lda, a_t, lda_t ); + LAPACKE_che_trans( matrix_layout, uplo, n, a, lda, a_t, lda_t ); /* Call LAPACK function and adjust info */ LAPACK_cheevd( &jobz, &uplo, &n, a_t, &lda_t, w, work, &lwork, rwork, &lrwork, iwork, &liwork, &info ); @@ -79,7 +79,11 @@ lapack_int LAPACKE_cheevd_work( int matrix_layout, char jobz, char uplo, info = info - 1; } /* Transpose output matrices */ - LAPACKE_cge_trans( LAPACK_COL_MAJOR, n, n, a_t, lda_t, a, lda ); + if ( jobz == 'V') { + LAPACKE_cge_trans( LAPACK_COL_MAJOR, n, n, a_t, lda_t, a, lda ); + } else { + LAPACKE_che_trans( LAPACK_COL_MAJOR, uplo, n, a_t, lda_t, a, lda ); + } /* Release memory and exit */ LAPACKE_free( a_t ); exit_level_0: diff --git a/lapack-netlib/LAPACKE/src/lapacke_cheevr.c b/lapack-netlib/LAPACKE/src/lapacke_cheevr.c index 6fe261624..f277e7f70 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_cheevr.c +++ b/lapack-netlib/LAPACKE/src/lapacke_cheevr.c @@ -83,7 +83,7 @@ lapack_int LAPACKE_cheevr( int matrix_layout, char jobz, char range, char uplo, if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lrwork = (lapack_int)rwork_query; lwork = LAPACK_C2INT( work_query ); /* Allocate memory for work arrays */ diff --git a/lapack-netlib/LAPACKE/src/lapacke_cheevr_2stage.c b/lapack-netlib/LAPACKE/src/lapacke_cheevr_2stage.c index 5b3f5c77a..a09eac1bd 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_cheevr_2stage.c +++ b/lapack-netlib/LAPACKE/src/lapacke_cheevr_2stage.c @@ -83,7 +83,7 @@ lapack_int LAPACKE_cheevr_2stage( int matrix_layout, char jobz, char range, char if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lrwork = (lapack_int)rwork_query; lwork = LAPACK_C2INT( work_query ); /* Allocate memory for work arrays */ diff --git a/lapack-netlib/LAPACKE/src/lapacke_chegst.c b/lapack-netlib/LAPACKE/src/lapacke_chegst.c index c628017c2..ff7dd3532 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_chegst.c +++ b/lapack-netlib/LAPACKE/src/lapacke_chegst.c @@ -35,7 +35,7 @@ lapack_int LAPACKE_chegst( int matrix_layout, lapack_int itype, char uplo, lapack_int n, lapack_complex_float* a, - lapack_int lda, const lapack_complex_float* b, + lapack_int lda, lapack_complex_float* b, lapack_int ldb ) { if( matrix_layout != LAPACK_COL_MAJOR && matrix_layout != LAPACK_ROW_MAJOR ) { diff --git a/lapack-netlib/LAPACKE/src/lapacke_chegst_work.c b/lapack-netlib/LAPACKE/src/lapacke_chegst_work.c index 001863819..a29e01961 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_chegst_work.c +++ b/lapack-netlib/LAPACKE/src/lapacke_chegst_work.c @@ -35,7 +35,7 @@ lapack_int LAPACKE_chegst_work( int matrix_layout, lapack_int itype, char uplo, lapack_int n, lapack_complex_float* a, - lapack_int lda, const lapack_complex_float* b, + lapack_int lda, lapack_complex_float* b, lapack_int ldb ) { lapack_int info = 0; diff --git a/lapack-netlib/LAPACKE/src/lapacke_chegvd.c b/lapack-netlib/LAPACKE/src/lapacke_chegvd.c index 2959cb0dc..98c901982 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_chegvd.c +++ b/lapack-netlib/LAPACKE/src/lapacke_chegvd.c @@ -70,7 +70,7 @@ lapack_int LAPACKE_chegvd( int matrix_layout, lapack_int itype, char jobz, if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lrwork = (lapack_int)rwork_query; lwork = LAPACK_C2INT( work_query ); /* Allocate memory for work arrays */ diff --git a/lapack-netlib/LAPACKE/src/lapacke_chpevd.c b/lapack-netlib/LAPACKE/src/lapacke_chpevd.c index 47c7bbe23..fbdb73802 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_chpevd.c +++ b/lapack-netlib/LAPACKE/src/lapacke_chpevd.c @@ -66,7 +66,7 @@ lapack_int LAPACKE_chpevd( int matrix_layout, char jobz, char uplo, lapack_int n if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lrwork = (lapack_int)rwork_query; lwork = LAPACK_C2INT( work_query ); /* Allocate memory for work arrays */ diff --git a/lapack-netlib/LAPACKE/src/lapacke_chpgvd.c b/lapack-netlib/LAPACKE/src/lapacke_chpgvd.c index 568882ec9..587d1509a 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_chpgvd.c +++ b/lapack-netlib/LAPACKE/src/lapacke_chpgvd.c @@ -70,7 +70,7 @@ lapack_int LAPACKE_chpgvd( int matrix_layout, lapack_int itype, char jobz, if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lrwork = (lapack_int)rwork_query; lwork = LAPACK_C2INT( work_query ); /* Allocate memory for work arrays */ diff --git a/lapack-netlib/LAPACKE/src/lapacke_clantr_work.c b/lapack-netlib/LAPACKE/src/lapacke_clantr_work.c index 7f74a9789..8c4c21935 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_clantr_work.c +++ b/lapack-netlib/LAPACKE/src/lapacke_clantr_work.c @@ -43,12 +43,10 @@ float LAPACKE_clantr_work( int matrix_layout, char norm, char uplo, if( matrix_layout == LAPACK_COL_MAJOR ) { /* Call LAPACK function and adjust info */ res = LAPACK_clantr( &norm, &uplo, &diag, &m, &n, a, &lda, work ); - if( info < 0 ) { - info = info - 1; - } } else if( matrix_layout == LAPACK_ROW_MAJOR ) { lapack_int lda_t = MAX(1,m); lapack_complex_float* a_t = NULL; + float* work_lapack = NULL; /* Check leading dimension(s) */ if( lda < n ) { info = -8; @@ -62,12 +60,23 @@ float LAPACKE_clantr_work( int matrix_layout, char norm, char uplo, info = LAPACK_TRANSPOSE_MEMORY_ERROR; goto exit_level_0; } + /* Allocate memory for work array(s) */ + if( LAPACKE_lsame( norm, 'i' ) ) { + work_lapack = (float*)LAPACKE_malloc( sizeof(float) * MAX(1,m) ); + if( work_lapack == NULL ) { + info = LAPACK_WORK_MEMORY_ERROR; + goto exit_level_1; + } + } /* Transpose input matrices */ LAPACKE_ctr_trans( matrix_layout, uplo, diag, MAX(m,n), a, lda, a_t, lda_t ); /* Call LAPACK function and adjust info */ - res = LAPACK_clantr( &norm, &uplo, &diag, &m, &n, a_t, &lda_t, work ); - info = 0; /* LAPACK call is ok! */ + res = LAPACK_clantr( &norm, &uplo, &diag, &m, &n, a_t, &lda_t, work_lapack ); /* Release memory and exit */ + if( work_lapack ) { + LAPACKE_free( work_lapack ); + } +exit_level_1: LAPACKE_free( a_t ); exit_level_0: if( info == LAPACK_TRANSPOSE_MEMORY_ERROR ) { diff --git a/lapack-netlib/LAPACKE/src/lapacke_cstedc.c b/lapack-netlib/LAPACKE/src/lapacke_cstedc.c index 5be3cec70..3c0be27d5 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_cstedc.c +++ b/lapack-netlib/LAPACKE/src/lapacke_cstedc.c @@ -73,7 +73,7 @@ lapack_int LAPACKE_cstedc( int matrix_layout, char compz, lapack_int n, float* d if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lrwork = (lapack_int)rwork_query; lwork = LAPACK_C2INT( work_query ); /* Allocate memory for work arrays */ diff --git a/lapack-netlib/LAPACKE/src/lapacke_cstegr.c b/lapack-netlib/LAPACKE/src/lapacke_cstegr.c index 986702e62..86a0cd72d 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_cstegr.c +++ b/lapack-netlib/LAPACKE/src/lapacke_cstegr.c @@ -81,7 +81,7 @@ lapack_int LAPACKE_cstegr( int matrix_layout, char jobz, char range, if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lwork = (lapack_int)work_query; /* Allocate memory for work arrays */ iwork = (lapack_int*)LAPACKE_malloc( sizeof(lapack_int) * liwork ); diff --git a/lapack-netlib/LAPACKE/src/lapacke_cstemr.c b/lapack-netlib/LAPACKE/src/lapacke_cstemr.c index 9b9b84e49..51e63c675 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_cstemr.c +++ b/lapack-netlib/LAPACKE/src/lapacke_cstemr.c @@ -75,7 +75,7 @@ lapack_int LAPACKE_cstemr( int matrix_layout, char jobz, char range, if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lwork = (lapack_int)work_query; /* Allocate memory for work arrays */ iwork = (lapack_int*)LAPACKE_malloc( sizeof(lapack_int) * liwork ); diff --git a/lapack-netlib/LAPACKE/src/lapacke_csytrs2.c b/lapack-netlib/LAPACKE/src/lapacke_csytrs2.c index f4a0a4334..44405c993 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_csytrs2.c +++ b/lapack-netlib/LAPACKE/src/lapacke_csytrs2.c @@ -34,7 +34,7 @@ #include "lapacke_utils.h" lapack_int LAPACKE_csytrs2( int matrix_layout, char uplo, lapack_int n, - lapack_int nrhs, const lapack_complex_float* a, + lapack_int nrhs, lapack_complex_float* a, lapack_int lda, const lapack_int* ipiv, lapack_complex_float* b, lapack_int ldb ) { diff --git a/lapack-netlib/LAPACKE/src/lapacke_csytrs2_work.c b/lapack-netlib/LAPACKE/src/lapacke_csytrs2_work.c index d914c1d69..8567a07d5 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_csytrs2_work.c +++ b/lapack-netlib/LAPACKE/src/lapacke_csytrs2_work.c @@ -34,7 +34,7 @@ #include "lapacke_utils.h" lapack_int LAPACKE_csytrs2_work( int matrix_layout, char uplo, lapack_int n, - lapack_int nrhs, const lapack_complex_float* a, + lapack_int nrhs, lapack_complex_float* a, lapack_int lda, const lapack_int* ipiv, lapack_complex_float* b, lapack_int ldb, lapack_complex_float* work ) diff --git a/lapack-netlib/LAPACKE/src/lapacke_ctgsen.c b/lapack-netlib/LAPACKE/src/lapacke_ctgsen.c index e2f38c87b..6bfcdc996 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_ctgsen.c +++ b/lapack-netlib/LAPACKE/src/lapacke_ctgsen.c @@ -84,7 +84,7 @@ lapack_int LAPACKE_ctgsen( int matrix_layout, lapack_int ijob, if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lwork = LAPACK_C2INT( work_query ); /* Allocate memory for work arrays */ if( ijob != 0 ) { diff --git a/lapack-netlib/LAPACKE/src/lapacke_ctprfb.c b/lapack-netlib/LAPACKE/src/lapacke_ctprfb.c index 9d2684e4c..fd49d6a7f 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_ctprfb.c +++ b/lapack-netlib/LAPACKE/src/lapacke_ctprfb.c @@ -41,7 +41,7 @@ lapack_int LAPACKE_ctprfb( int matrix_layout, char side, char trans, char direct lapack_complex_float* a, lapack_int lda, lapack_complex_float* b, lapack_int ldb ) { - lapack_int ncols_v, nrows_v; + lapack_int ncols_v, nrows_v, ncols_a, nrows_a; lapack_int info = 0; lapack_int ldwork; lapack_int work_size; @@ -52,20 +52,33 @@ lapack_int LAPACKE_ctprfb( int matrix_layout, char side, char trans, char direct } #ifndef LAPACK_DISABLE_NAN_CHECK if( LAPACKE_get_nancheck() ) { - /* Optionally check input matrices for NaNs */ + /* Optionally check input matrices for NaNs + * V is m-by-k (left, columnwise) + * or n-by-k (right, columnwise) + * or k-by-m (left, rowwise) + * or k-by-n (right, rowwise) + * T is k-by-k + * A is k-by-n (left) + * or m-by-k (right) + * B is m-by-n + */ if( LAPACKE_lsame( storev, 'C' ) ) { ncols_v = k; nrows_v = LAPACKE_lsame( side, 'L' ) ? m : - ( LAPACKE_lsame( side, 'R' ) ? n : 0 ); + LAPACKE_lsame( side, 'R' ) ? n : 0; } else if( LAPACKE_lsame( storev, 'R' ) ) { ncols_v = LAPACKE_lsame( side, 'L' ) ? m : - ( LAPACKE_lsame( side, 'R' ) ? n : 0 ); + LAPACKE_lsame( side, 'R' ) ? n : 0; nrows_v = k; } else { ncols_v = 0; nrows_v = 0; } - if( LAPACKE_cge_nancheck( matrix_layout, k, m, a, lda ) ) { + nrows_a = LAPACKE_lsame( side, 'L' ) ? k : + LAPACKE_lsame( side, 'R' ) ? m : 0; + ncols_a = LAPACKE_lsame( side, 'L' ) ? n : + LAPACKE_lsame( side, 'R' ) ? k : 0; + if( LAPACKE_cge_nancheck( matrix_layout, ncols_a, nrows_a, a, lda ) ) { return -14; } if( LAPACKE_cge_nancheck( matrix_layout, m, n, b, ldb ) ) { @@ -80,13 +93,13 @@ lapack_int LAPACKE_ctprfb( int matrix_layout, char side, char trans, char direct } #endif if (side=='l' || side=='L') { - ldwork = k; - work_size = MAX(1,ldwork) * MAX(1,n); - } + ldwork = k; + work_size = MAX(1,ldwork) * MAX(1,n); + } else { - ldwork = m; - work_size = MAX(1,ldwork) * MAX(1,k); - } + ldwork = m; + work_size = MAX(1,ldwork) * MAX(1,k); + } /* Allocate memory for working array(s) */ work = (lapack_complex_float*) LAPACKE_malloc( sizeof(lapack_complex_float) * work_size ); diff --git a/lapack-netlib/LAPACKE/src/lapacke_cunmhr.c b/lapack-netlib/LAPACKE/src/lapacke_cunmhr.c index 592c6de45..127dd8c57 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_cunmhr.c +++ b/lapack-netlib/LAPACKE/src/lapacke_cunmhr.c @@ -58,7 +58,7 @@ lapack_int LAPACKE_cunmhr( int matrix_layout, char side, char trans, if( LAPACKE_cge_nancheck( matrix_layout, m, n, c, ldc ) ) { return -11; } - if( LAPACKE_c_nancheck( m-1, tau, 1 ) ) { + if( LAPACKE_c_nancheck( r-1, tau, 1 ) ) { return -10; } } diff --git a/lapack-netlib/LAPACKE/src/lapacke_dgeesx.c b/lapack-netlib/LAPACKE/src/lapacke_dgeesx.c index 27647954b..193d65737 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_dgeesx.c +++ b/lapack-netlib/LAPACKE/src/lapacke_dgeesx.c @@ -76,7 +76,7 @@ lapack_int LAPACKE_dgeesx( int matrix_layout, char jobvs, char sort, if( info != 0 ) { goto exit_level_1; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lwork = (lapack_int)work_query; /* Allocate memory for work arrays */ if( LAPACKE_lsame( sense, 'b' ) || LAPACKE_lsame( sense, 'v' ) ) { diff --git a/lapack-netlib/LAPACKE/src/lapacke_dgejsv.c b/lapack-netlib/LAPACKE/src/lapacke_dgejsv.c index 444a07b35..d9709bf89 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_dgejsv.c +++ b/lapack-netlib/LAPACKE/src/lapacke_dgejsv.c @@ -74,7 +74,6 @@ lapack_int LAPACKE_dgejsv( int matrix_layout, char joba, char jobu, char jobv, lapack_int* iwork = NULL; double* work = NULL; lapack_int i; - lapack_int nu, nv; if( matrix_layout != LAPACK_COL_MAJOR && matrix_layout != LAPACK_ROW_MAJOR ) { LAPACKE_xerbla( "LAPACKE_dgejsv", -1 ); return -1; @@ -82,8 +81,6 @@ lapack_int LAPACKE_dgejsv( int matrix_layout, char joba, char jobu, char jobv, #ifndef LAPACK_DISABLE_NAN_CHECK if( LAPACKE_get_nancheck() ) { /* Optionally check input matrices for NaNs */ - nu = LAPACKE_lsame( jobu, 'n' ) ? 1 : m; - nv = LAPACKE_lsame( jobv, 'n' ) ? 1 : n; if( LAPACKE_dge_nancheck( matrix_layout, m, n, a, lda ) ) { return -10; } diff --git a/lapack-netlib/LAPACKE/src/lapacke_dgelsd.c b/lapack-netlib/LAPACKE/src/lapacke_dgelsd.c index 6750597bb..790119596 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_dgelsd.c +++ b/lapack-netlib/LAPACKE/src/lapacke_dgelsd.c @@ -70,7 +70,7 @@ lapack_int LAPACKE_dgelsd( int matrix_layout, lapack_int m, lapack_int n, if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lwork = (lapack_int)work_query; /* Allocate memory for work arrays */ iwork = (lapack_int*)LAPACKE_malloc( sizeof(lapack_int) * liwork ); diff --git a/lapack-netlib/LAPACKE/src/lapacke_dgesvdq.c b/lapack-netlib/LAPACKE/src/lapacke_dgesvdq.c new file mode 100644 index 000000000..7bf831f8b --- /dev/null +++ b/lapack-netlib/LAPACKE/src/lapacke_dgesvdq.c @@ -0,0 +1,106 @@ +/***************************************************************************** + Copyright (c) 2014, Intel Corp. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + THE POSSIBILITY OF SUCH DAMAGE. +***************************************************************************** +* Contents: Native high-level C interface to LAPACK function dgesvdq +* Author: Intel Corporation +* Generated November 2018 +*****************************************************************************/ + +#include "lapacke_utils.h" + +lapack_int LAPACKE_dgesvdq( int matrix_layout, char joba, char jobp, + char jobr, char jobu, char jobv, + lapack_int m, lapack_int n, double* a, + lapack_int lda, double* s, double* u, lapack_int ldu, + double* v, lapack_int ldv, lapack_int* numrank) +{ + lapack_int info = 0; + lapack_int liwork = -1; + lapack_int* iwork = NULL; + lapack_int iwork_query; + lapack_int lwork = -1; + double* work = NULL; + double work_query; + lapack_int lrwork = -1; + double* rwork = NULL; + double rwork_query; + lapack_int i; + if( matrix_layout != LAPACK_COL_MAJOR && matrix_layout != LAPACK_ROW_MAJOR ) { + LAPACKE_xerbla( "LAPACKE_dgesvdq", -1 ); + return -1; + } +#ifndef LAPACK_DISABLE_NAN_CHECK + if( LAPACKE_get_nancheck() ) { + /* Optionally check input matrices for NaNs */ + if( LAPACKE_dge_nancheck( matrix_layout, m, n, a, lda ) ) { + return -6; + } + } +#endif + /* Query optimal working array(s) size */ + info = LAPACKE_dgesvdq_work( matrix_layout, joba, jobp, jobr, jobu, jobv, + m, n, a, lda, s, u, ldu, v, ldv, numrank, + &iwork_query, liwork, &work_query, lwork, + &rwork_query, lrwork ); + if( info != 0 ) { + goto exit_level_0; + } + liwork = iwork_query; + lwork = (lapack_int)work_query; + lrwork = (lapack_int)rwork_query; + /* Allocate memory for work arrays */ + iwork = (lapack_int*)LAPACKE_malloc( sizeof(lapack_int) * liwork ); + if( iwork == NULL ) { + info = LAPACK_WORK_MEMORY_ERROR; + goto exit_level_0; + } + work = (double*)LAPACKE_malloc( sizeof(double) * lwork ); + if( work == NULL ) { + info = LAPACK_WORK_MEMORY_ERROR; + goto exit_level_0; + } + rwork = (double*)LAPACKE_malloc( sizeof(double) * lrwork ); + if( rwork == NULL ) { + info = LAPACK_WORK_MEMORY_ERROR; + goto exit_level_0; + } + /* Call middle-level interface */ + info = LAPACKE_dgesvdq_work( matrix_layout, joba, jobp, jobr, jobu, jobv, + m, n, a, lda, s, u, ldu, v, ldv, numrank, + iwork, liwork, work, lwork, rwork, lrwork ); + + /* Release memory and exit */ + LAPACKE_free( iwork ); + LAPACKE_free( work ); + LAPACKE_free( rwork ); +exit_level_0: + if( info == LAPACK_WORK_MEMORY_ERROR ) { + LAPACKE_xerbla( "LAPACKE_dgesvdq", info ); + } + return info; +} diff --git a/lapack-netlib/LAPACKE/src/lapacke_dgesvdq_work.c b/lapack-netlib/LAPACKE/src/lapacke_dgesvdq_work.c new file mode 100644 index 000000000..0de92a254 --- /dev/null +++ b/lapack-netlib/LAPACKE/src/lapacke_dgesvdq_work.c @@ -0,0 +1,149 @@ +/***************************************************************************** + Copyright (c) 2014, Intel Corp. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + THE POSSIBILITY OF SUCH DAMAGE. +***************************************************************************** +* Contents: Native middle-level C interface to LAPACK function dgesvdq +* Author: Intel Corporation +* Generated November 2015 +*****************************************************************************/ + +#include "lapacke_utils.h" + +lapack_int LAPACKE_dgesvdq_work( int matrix_layout, char joba, char jobp, + char jobr, char jobu, char jobv, + lapack_int m, lapack_int n, double* a, + lapack_int lda, double* s, double* u, lapack_int ldu, + double* v, lapack_int ldv, lapack_int* numrank, + lapack_int* iwork, lapack_int liwork, + double* work, lapack_int lwork, + double* rwork, lapack_int lrwork ) +{ + lapack_int info = 0; + if( matrix_layout == LAPACK_COL_MAJOR ) { + /* Call LAPACK function and adjust info */ + LAPACK_dgesvdq( &joba, &jobp, &jobr, &jobu, &jobv, &m, &n, a, &lda, s, u, &ldu, v, &ldv, + numrank, iwork, &liwork, work, &lwork, rwork, &lrwork, &info ); + if( info < 0 ) { + info = info - 1; + } + } else if( matrix_layout == LAPACK_ROW_MAJOR ) { + lapack_int nrows_u = ( LAPACKE_lsame( jobu, 'a' ) || + LAPACKE_lsame( jobu, 's' ) ) ? m : 1; + lapack_int ncols_u = LAPACKE_lsame( jobu, 'a' ) ? m : + (LAPACKE_lsame( jobu, 's' ) ? MIN(m,n) : 1); + lapack_int nrows_v = LAPACKE_lsame( jobv, 'a' ) ? n : + ( LAPACKE_lsame( jobv, 's' ) ? MIN(m,n) : 1); + lapack_int lda_t = MAX(1,m); + lapack_int ldu_t = MAX(1,nrows_u); + lapack_int ldv_t = MAX(1,nrows_v); + double* a_t = NULL; + double* u_t = NULL; + double* v_t = NULL; + /* Check leading dimension(s) */ + if( lda < n ) { + info = -9; + LAPACKE_xerbla( "LAPACKE_dgesvdq_work", info ); + return info; + } + if( ldu < ncols_u ) { + info = -12; + LAPACKE_xerbla( "LAPACKE_dgesvdq_work", info ); + return info; + } + if( ldv < n ) { + info = -14; + LAPACKE_xerbla( "LAPACKE_dgesvdq_work", info ); + return info; + } + /* Query optimal working array(s) size if requested */ + if( lwork == -1 ) { + LAPACK_dgesvdq( &joba, &jobp, &jobr, &jobu, &jobv, &m, &n, a, &lda_t, + s, u, &ldu_t, v, &ldv_t, numrank, iwork, &liwork, + work, &lwork, rwork, &lrwork, &info ); + return (info < 0) ? (info - 1) : info; + } + /* Allocate memory for temporary array(s) */ + a_t = (double*)LAPACKE_malloc( sizeof(double) * lda_t * MAX(1,n) ); + if( a_t == NULL ) { + info = LAPACK_TRANSPOSE_MEMORY_ERROR; + goto exit_level_0; + } + if( LAPACKE_lsame( jobu, 'a' ) || LAPACKE_lsame( jobu, 's' ) ) { + u_t = (double*) + LAPACKE_malloc( sizeof(double) * ldu_t * MAX(1,ncols_u) ); + if( u_t == NULL ) { + info = LAPACK_TRANSPOSE_MEMORY_ERROR; + goto exit_level_1; + } + } + if( LAPACKE_lsame( jobv, 'a' ) || LAPACKE_lsame( jobv, 's' ) ) { + v_t = (double*) + LAPACKE_malloc( sizeof(double) * ldv_t * MAX(1,n) ); + if( v_t == NULL ) { + info = LAPACK_TRANSPOSE_MEMORY_ERROR; + goto exit_level_2; + } + } + /* Transpose input matrices */ + LAPACKE_dge_trans( matrix_layout, m, n, a, lda, a_t, lda_t ); + /* Call LAPACK function and adjust info */ + LAPACK_dgesvdq( &joba, &jobp, &jobr, &jobu, &jobv, &m, &n, a, &lda_t, + s, u, &ldu_t, v, &ldv_t, numrank, iwork, &liwork, + work, &lwork, rwork, &lrwork, &info ); + if( info < 0 ) { + info = info - 1; + } + /* Transpose output matrices */ + LAPACKE_dge_trans( LAPACK_COL_MAJOR, m, n, a_t, lda_t, a, lda ); + if( LAPACKE_lsame( jobu, 'a' ) || LAPACKE_lsame( jobu, 's' ) ) { + LAPACKE_dge_trans( LAPACK_COL_MAJOR, nrows_u, ncols_u, u_t, ldu_t, + u, ldu ); + } + if( LAPACKE_lsame( jobv, 'a' ) || LAPACKE_lsame( jobv, 's' ) ) { + LAPACKE_dge_trans( LAPACK_COL_MAJOR, nrows_v, n, v_t, ldv_t, v, + ldv ); + } + /* Release memory and exit */ + if( LAPACKE_lsame( jobv, 'a' ) || LAPACKE_lsame( jobv, 's' ) ) { + LAPACKE_free( v_t ); + } +exit_level_2: + if( LAPACKE_lsame( jobu, 'a' ) || LAPACKE_lsame( jobu, 's' ) ) { + LAPACKE_free( u_t ); + } +exit_level_1: + LAPACKE_free( a_t ); +exit_level_0: + if( info == LAPACK_TRANSPOSE_MEMORY_ERROR ) { + LAPACKE_xerbla( "LAPACKE_dgesvdq_work", info ); + } + } else { + info = -1; + LAPACKE_xerbla( "LAPACKE_dgesvdq_work", info ); + } + return info; +} diff --git a/lapack-netlib/LAPACKE/src/lapacke_dggesx.c b/lapack-netlib/LAPACKE/src/lapacke_dggesx.c index 36addda74..91eb7bf8c 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_dggesx.c +++ b/lapack-netlib/LAPACKE/src/lapacke_dggesx.c @@ -82,7 +82,7 @@ lapack_int LAPACKE_dggesx( int matrix_layout, char jobvsl, char jobvsr, if( info != 0 ) { goto exit_level_1; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lwork = (lapack_int)work_query; /* Allocate memory for work arrays */ iwork = (lapack_int*)LAPACKE_malloc( sizeof(lapack_int) * liwork ); diff --git a/lapack-netlib/LAPACKE/src/lapacke_dlantr_work.c b/lapack-netlib/LAPACKE/src/lapacke_dlantr_work.c index 2d570ce42..5b2a6c535 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_dlantr_work.c +++ b/lapack-netlib/LAPACKE/src/lapacke_dlantr_work.c @@ -42,12 +42,10 @@ double LAPACKE_dlantr_work( int matrix_layout, char norm, char uplo, if( matrix_layout == LAPACK_COL_MAJOR ) { /* Call LAPACK function and adjust info */ res = LAPACK_dlantr( &norm, &uplo, &diag, &m, &n, a, &lda, work ); - if( info < 0 ) { - info = info - 1; - } } else if( matrix_layout == LAPACK_ROW_MAJOR ) { lapack_int lda_t = MAX(1,m); double* a_t = NULL; + double* work_lapack = NULL; /* Check leading dimension(s) */ if( lda < n ) { info = -8; @@ -60,12 +58,23 @@ double LAPACKE_dlantr_work( int matrix_layout, char norm, char uplo, info = LAPACK_TRANSPOSE_MEMORY_ERROR; goto exit_level_0; } + /* Allocate memory for work array(s) */ + if( LAPACKE_lsame( norm, 'i' ) ) { + work_lapack = (double*)LAPACKE_malloc( sizeof(double) * MAX(1,m) ); + if( work_lapack == NULL ) { + info = LAPACK_WORK_MEMORY_ERROR; + goto exit_level_1; + } + } /* Transpose input matrices */ LAPACKE_dtr_trans( matrix_layout, uplo, diag, MAX(m,n), a, lda, a_t, lda_t ); /* Call LAPACK function and adjust info */ - res = LAPACK_dlantr( &norm, &uplo, &diag, &m, &n, a_t, &lda_t, work ); - info = 0; /* LAPACK call is ok! */ + res = LAPACK_dlantr( &norm, &uplo, &diag, &m, &n, a_t, &lda_t, work_lapack ); /* Release memory and exit */ + if( work_lapack ) { + LAPACKE_free( work_lapack ); + } +exit_level_1: LAPACKE_free( a_t ); exit_level_0: if( info == LAPACK_TRANSPOSE_MEMORY_ERROR ) { diff --git a/lapack-netlib/LAPACKE/src/lapacke_dormhr.c b/lapack-netlib/LAPACKE/src/lapacke_dormhr.c index de4355a74..4b9526f14 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_dormhr.c +++ b/lapack-netlib/LAPACKE/src/lapacke_dormhr.c @@ -57,7 +57,7 @@ lapack_int LAPACKE_dormhr( int matrix_layout, char side, char trans, if( LAPACKE_dge_nancheck( matrix_layout, m, n, c, ldc ) ) { return -11; } - if( LAPACKE_d_nancheck( m-1, tau, 1 ) ) { + if( LAPACKE_d_nancheck( r-1, tau, 1 ) ) { return -10; } } diff --git a/lapack-netlib/LAPACKE/src/lapacke_dsbevd.c b/lapack-netlib/LAPACKE/src/lapacke_dsbevd.c index 4ecd1b522..3a9abbbe1 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_dsbevd.c +++ b/lapack-netlib/LAPACKE/src/lapacke_dsbevd.c @@ -62,7 +62,7 @@ lapack_int LAPACKE_dsbevd( int matrix_layout, char jobz, char uplo, lapack_int n if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lwork = (lapack_int)work_query; /* Allocate memory for work arrays */ iwork = (lapack_int*)LAPACKE_malloc( sizeof(lapack_int) * liwork ); diff --git a/lapack-netlib/LAPACKE/src/lapacke_dsbevd_2stage.c b/lapack-netlib/LAPACKE/src/lapacke_dsbevd_2stage.c index b0ccc0b1e..4d42b6208 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_dsbevd_2stage.c +++ b/lapack-netlib/LAPACKE/src/lapacke_dsbevd_2stage.c @@ -62,7 +62,7 @@ lapack_int LAPACKE_dsbevd_2stage( int matrix_layout, char jobz, char uplo, lapac if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lwork = (lapack_int)work_query; /* Allocate memory for work arrays */ iwork = (lapack_int*)LAPACKE_malloc( sizeof(lapack_int) * liwork ); diff --git a/lapack-netlib/LAPACKE/src/lapacke_dsbgvd.c b/lapack-netlib/LAPACKE/src/lapacke_dsbgvd.c index 36f912ee5..cab2a64bb 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_dsbgvd.c +++ b/lapack-netlib/LAPACKE/src/lapacke_dsbgvd.c @@ -67,7 +67,7 @@ lapack_int LAPACKE_dsbgvd( int matrix_layout, char jobz, char uplo, lapack_int n if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lwork = (lapack_int)work_query; /* Allocate memory for work arrays */ iwork = (lapack_int*)LAPACKE_malloc( sizeof(lapack_int) * liwork ); diff --git a/lapack-netlib/LAPACKE/src/lapacke_dspevd.c b/lapack-netlib/LAPACKE/src/lapacke_dspevd.c index 3b6b25d5e..c7d93b6b3 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_dspevd.c +++ b/lapack-netlib/LAPACKE/src/lapacke_dspevd.c @@ -61,7 +61,7 @@ lapack_int LAPACKE_dspevd( int matrix_layout, char jobz, char uplo, lapack_int n if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lwork = (lapack_int)work_query; /* Allocate memory for work arrays */ iwork = (lapack_int*)LAPACKE_malloc( sizeof(lapack_int) * liwork ); diff --git a/lapack-netlib/LAPACKE/src/lapacke_dspgvd.c b/lapack-netlib/LAPACKE/src/lapacke_dspgvd.c index 8ca478ed1..b49ce95ec 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_dspgvd.c +++ b/lapack-netlib/LAPACKE/src/lapacke_dspgvd.c @@ -66,7 +66,7 @@ lapack_int LAPACKE_dspgvd( int matrix_layout, lapack_int itype, char jobz, if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lwork = (lapack_int)work_query; /* Allocate memory for work arrays */ iwork = (lapack_int*)LAPACKE_malloc( sizeof(lapack_int) * liwork ); diff --git a/lapack-netlib/LAPACKE/src/lapacke_dstedc.c b/lapack-netlib/LAPACKE/src/lapacke_dstedc.c index 4f88a04c4..16e308450 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_dstedc.c +++ b/lapack-netlib/LAPACKE/src/lapacke_dstedc.c @@ -69,7 +69,7 @@ lapack_int LAPACKE_dstedc( int matrix_layout, char compz, lapack_int n, if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lwork = (lapack_int)work_query; /* Allocate memory for work arrays */ iwork = (lapack_int*)LAPACKE_malloc( sizeof(lapack_int) * liwork ); diff --git a/lapack-netlib/LAPACKE/src/lapacke_dstegr.c b/lapack-netlib/LAPACKE/src/lapacke_dstegr.c index 9191f0a9f..7e4f9d694 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_dstegr.c +++ b/lapack-netlib/LAPACKE/src/lapacke_dstegr.c @@ -81,7 +81,7 @@ lapack_int LAPACKE_dstegr( int matrix_layout, char jobz, char range, if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lwork = (lapack_int)work_query; /* Allocate memory for work arrays */ iwork = (lapack_int*)LAPACKE_malloc( sizeof(lapack_int) * liwork ); diff --git a/lapack-netlib/LAPACKE/src/lapacke_dstemr.c b/lapack-netlib/LAPACKE/src/lapacke_dstemr.c index 8dc2bd237..1a3b0ac7b 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_dstemr.c +++ b/lapack-netlib/LAPACKE/src/lapacke_dstemr.c @@ -75,7 +75,7 @@ lapack_int LAPACKE_dstemr( int matrix_layout, char jobz, char range, if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lwork = (lapack_int)work_query; /* Allocate memory for work arrays */ iwork = (lapack_int*)LAPACKE_malloc( sizeof(lapack_int) * liwork ); diff --git a/lapack-netlib/LAPACKE/src/lapacke_dstevd.c b/lapack-netlib/LAPACKE/src/lapacke_dstevd.c index e824a164b..251a2ae2e 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_dstevd.c +++ b/lapack-netlib/LAPACKE/src/lapacke_dstevd.c @@ -64,7 +64,7 @@ lapack_int LAPACKE_dstevd( int matrix_layout, char jobz, lapack_int n, double* d if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lwork = (lapack_int)work_query; /* Allocate memory for work arrays */ iwork = (lapack_int*)LAPACKE_malloc( sizeof(lapack_int) * liwork ); diff --git a/lapack-netlib/LAPACKE/src/lapacke_dstevr.c b/lapack-netlib/LAPACKE/src/lapacke_dstevr.c index fd53e0ac0..d49e0ff1c 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_dstevr.c +++ b/lapack-netlib/LAPACKE/src/lapacke_dstevr.c @@ -81,7 +81,7 @@ lapack_int LAPACKE_dstevr( int matrix_layout, char jobz, char range, if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lwork = (lapack_int)work_query; /* Allocate memory for work arrays */ iwork = (lapack_int*)LAPACKE_malloc( sizeof(lapack_int) * liwork ); diff --git a/lapack-netlib/LAPACKE/src/lapacke_dsyev_work.c b/lapack-netlib/LAPACKE/src/lapacke_dsyev_work.c index 9dc67f022..f696c608f 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_dsyev_work.c +++ b/lapack-netlib/LAPACKE/src/lapacke_dsyev_work.c @@ -65,14 +65,18 @@ lapack_int LAPACKE_dsyev_work( int matrix_layout, char jobz, char uplo, goto exit_level_0; } /* Transpose input matrices */ - LAPACKE_dge_trans( matrix_layout, n, n, a, lda, a_t, lda_t ); + LAPACKE_dsy_trans( matrix_layout, uplo, n, a, lda, a_t, lda_t ); /* Call LAPACK function and adjust info */ LAPACK_dsyev( &jobz, &uplo, &n, a_t, &lda_t, w, work, &lwork, &info ); if( info < 0 ) { info = info - 1; } /* Transpose output matrices */ - LAPACKE_dge_trans( LAPACK_COL_MAJOR, n, n, a_t, lda_t, a, lda ); + if ( jobz == 'V') { + LAPACKE_dge_trans( LAPACK_COL_MAJOR, n, n, a_t, lda_t, a, lda ); + } else { + LAPACKE_dsy_trans( LAPACK_COL_MAJOR, uplo, n, a_t, lda_t, a, lda ); + } /* Release memory and exit */ LAPACKE_free( a_t ); exit_level_0: diff --git a/lapack-netlib/LAPACKE/src/lapacke_dsyevd.c b/lapack-netlib/LAPACKE/src/lapacke_dsyevd.c index 870148b31..d6772ea01 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_dsyevd.c +++ b/lapack-netlib/LAPACKE/src/lapacke_dsyevd.c @@ -50,7 +50,7 @@ lapack_int LAPACKE_dsyevd( int matrix_layout, char jobz, char uplo, lapack_int n #ifndef LAPACK_DISABLE_NAN_CHECK if( LAPACKE_get_nancheck() ) { /* Optionally check input matrices for NaNs */ - if( LAPACKE_dge_nancheck( matrix_layout, n, n, a, lda ) ) { + if( LAPACKE_dsy_nancheck( matrix_layout, uplo, n, a, lda ) ) { return -5; } } @@ -61,7 +61,7 @@ lapack_int LAPACKE_dsyevd( int matrix_layout, char jobz, char uplo, lapack_int n if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lwork = (lapack_int)work_query; /* Allocate memory for work arrays */ iwork = (lapack_int*)LAPACKE_malloc( sizeof(lapack_int) * liwork ); diff --git a/lapack-netlib/LAPACKE/src/lapacke_dsyevd_2stage.c b/lapack-netlib/LAPACKE/src/lapacke_dsyevd_2stage.c index a5507394c..e866451a5 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_dsyevd_2stage.c +++ b/lapack-netlib/LAPACKE/src/lapacke_dsyevd_2stage.c @@ -50,7 +50,7 @@ lapack_int LAPACKE_dsyevd_2stage( int matrix_layout, char jobz, char uplo, lapac #ifndef LAPACK_DISABLE_NAN_CHECK if( LAPACKE_get_nancheck() ) { /* Optionally check input matrices for NaNs */ - if( LAPACKE_dge_nancheck( matrix_layout, n, n, a, lda ) ) { + if( LAPACKE_dsy_nancheck( matrix_layout, uplo, n, a, lda ) ) { return -5; } } @@ -61,7 +61,7 @@ lapack_int LAPACKE_dsyevd_2stage( int matrix_layout, char jobz, char uplo, lapac if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lwork = (lapack_int)work_query; /* Allocate memory for work arrays */ iwork = (lapack_int*)LAPACKE_malloc( sizeof(lapack_int) * liwork ); diff --git a/lapack-netlib/LAPACKE/src/lapacke_dsyevd_2stage_work.c b/lapack-netlib/LAPACKE/src/lapacke_dsyevd_2stage_work.c index 1d06250d1..6f9c02f6a 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_dsyevd_2stage_work.c +++ b/lapack-netlib/LAPACKE/src/lapacke_dsyevd_2stage_work.c @@ -68,7 +68,7 @@ lapack_int LAPACKE_dsyevd_2stage_work( int matrix_layout, char jobz, char uplo, goto exit_level_0; } /* Transpose input matrices */ - LAPACKE_dge_trans( matrix_layout, n, n, a, lda, a_t, lda_t ); + LAPACKE_dsy_trans( matrix_layout, uplo, n, a, lda, a_t, lda_t ); /* Call LAPACK function and adjust info */ LAPACK_dsyevd_2stage( &jobz, &uplo, &n, a_t, &lda_t, w, work, &lwork, iwork, &liwork, &info ); @@ -76,7 +76,11 @@ lapack_int LAPACKE_dsyevd_2stage_work( int matrix_layout, char jobz, char uplo, info = info - 1; } /* Transpose output matrices */ - LAPACKE_dge_trans( LAPACK_COL_MAJOR, n, n, a_t, lda_t, a, lda ); + if ( jobz == 'V') { + LAPACKE_dge_trans( LAPACK_COL_MAJOR, n, n, a_t, lda_t, a, lda ); + } else { + LAPACKE_dsy_trans( LAPACK_COL_MAJOR, uplo, n, a_t, lda_t, a, lda ); + } /* Release memory and exit */ LAPACKE_free( a_t ); exit_level_0: diff --git a/lapack-netlib/LAPACKE/src/lapacke_dsyevd_work.c b/lapack-netlib/LAPACKE/src/lapacke_dsyevd_work.c index 925912619..81ba2acb3 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_dsyevd_work.c +++ b/lapack-netlib/LAPACKE/src/lapacke_dsyevd_work.c @@ -68,7 +68,7 @@ lapack_int LAPACKE_dsyevd_work( int matrix_layout, char jobz, char uplo, goto exit_level_0; } /* Transpose input matrices */ - LAPACKE_dge_trans( matrix_layout, n, n, a, lda, a_t, lda_t ); + LAPACKE_dsy_trans( matrix_layout, uplo, n, a, lda, a_t, lda_t ); /* Call LAPACK function and adjust info */ LAPACK_dsyevd( &jobz, &uplo, &n, a_t, &lda_t, w, work, &lwork, iwork, &liwork, &info ); @@ -76,7 +76,11 @@ lapack_int LAPACKE_dsyevd_work( int matrix_layout, char jobz, char uplo, info = info - 1; } /* Transpose output matrices */ - LAPACKE_dge_trans( LAPACK_COL_MAJOR, n, n, a_t, lda_t, a, lda ); + if ( jobz == 'V') { + LAPACKE_dge_trans( LAPACK_COL_MAJOR, n, n, a_t, lda_t, a, lda ); + } else { + LAPACKE_dsy_trans( LAPACK_COL_MAJOR, uplo, n, a_t, lda_t, a, lda ); + } /* Release memory and exit */ LAPACKE_free( a_t ); exit_level_0: diff --git a/lapack-netlib/LAPACKE/src/lapacke_dsyevr.c b/lapack-netlib/LAPACKE/src/lapacke_dsyevr.c index bae72f6c3..290ae0bd4 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_dsyevr.c +++ b/lapack-netlib/LAPACKE/src/lapacke_dsyevr.c @@ -78,7 +78,7 @@ lapack_int LAPACKE_dsyevr( int matrix_layout, char jobz, char range, char uplo, if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lwork = (lapack_int)work_query; /* Allocate memory for work arrays */ iwork = (lapack_int*)LAPACKE_malloc( sizeof(lapack_int) * liwork ); diff --git a/lapack-netlib/LAPACKE/src/lapacke_dsyevr_2stage.c b/lapack-netlib/LAPACKE/src/lapacke_dsyevr_2stage.c index dad20209e..7ee7dbc0b 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_dsyevr_2stage.c +++ b/lapack-netlib/LAPACKE/src/lapacke_dsyevr_2stage.c @@ -78,7 +78,7 @@ lapack_int LAPACKE_dsyevr_2stage( int matrix_layout, char jobz, char range, char if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lwork = (lapack_int)work_query; /* Allocate memory for work arrays */ iwork = (lapack_int*)LAPACKE_malloc( sizeof(lapack_int) * liwork ); diff --git a/lapack-netlib/LAPACKE/src/lapacke_dsygvd.c b/lapack-netlib/LAPACKE/src/lapacke_dsygvd.c index 907ad50bd..51f333359 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_dsygvd.c +++ b/lapack-netlib/LAPACKE/src/lapacke_dsygvd.c @@ -66,7 +66,7 @@ lapack_int LAPACKE_dsygvd( int matrix_layout, lapack_int itype, char jobz, if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lwork = (lapack_int)work_query; /* Allocate memory for work arrays */ iwork = (lapack_int*)LAPACKE_malloc( sizeof(lapack_int) * liwork ); diff --git a/lapack-netlib/LAPACKE/src/lapacke_dsytrs2.c b/lapack-netlib/LAPACKE/src/lapacke_dsytrs2.c index 46c90190f..4d73ef3c1 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_dsytrs2.c +++ b/lapack-netlib/LAPACKE/src/lapacke_dsytrs2.c @@ -34,7 +34,7 @@ #include "lapacke_utils.h" lapack_int LAPACKE_dsytrs2( int matrix_layout, char uplo, lapack_int n, - lapack_int nrhs, const double* a, lapack_int lda, + lapack_int nrhs, double* a, lapack_int lda, const lapack_int* ipiv, double* b, lapack_int ldb ) { lapack_int info = 0; diff --git a/lapack-netlib/LAPACKE/src/lapacke_dsytrs2_work.c b/lapack-netlib/LAPACKE/src/lapacke_dsytrs2_work.c index c937c39c5..caffa5b4b 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_dsytrs2_work.c +++ b/lapack-netlib/LAPACKE/src/lapacke_dsytrs2_work.c @@ -34,7 +34,7 @@ #include "lapacke_utils.h" lapack_int LAPACKE_dsytrs2_work( int matrix_layout, char uplo, lapack_int n, - lapack_int nrhs, const double* a, + lapack_int nrhs, double* a, lapack_int lda, const lapack_int* ipiv, double* b, lapack_int ldb, double* work ) { diff --git a/lapack-netlib/LAPACKE/src/lapacke_dtgsen.c b/lapack-netlib/LAPACKE/src/lapacke_dtgsen.c index 2cb7fce4b..baa63abe7 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_dtgsen.c +++ b/lapack-netlib/LAPACKE/src/lapacke_dtgsen.c @@ -81,7 +81,7 @@ lapack_int LAPACKE_dtgsen( int matrix_layout, lapack_int ijob, if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lwork = (lapack_int)work_query; /* Allocate memory for work arrays */ if( ijob != 0 ) { diff --git a/lapack-netlib/LAPACKE/src/lapacke_dtprfb.c b/lapack-netlib/LAPACKE/src/lapacke_dtprfb.c index 5191f79bb..11031b9bb 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_dtprfb.c +++ b/lapack-netlib/LAPACKE/src/lapacke_dtprfb.c @@ -39,7 +39,7 @@ lapack_int LAPACKE_dtprfb( int matrix_layout, char side, char trans, char direct lapack_int ldv, const double* t, lapack_int ldt, double* a, lapack_int lda, double* b, lapack_int ldb ) { - lapack_int ncols_v, nrows_v; + lapack_int ncols_v, nrows_v, ncols_a, nrows_a; lapack_int info = 0; lapack_int ldwork; lapack_int work_size; @@ -50,20 +50,33 @@ lapack_int LAPACKE_dtprfb( int matrix_layout, char side, char trans, char direct } #ifndef LAPACK_DISABLE_NAN_CHECK if( LAPACKE_get_nancheck() ) { - /* Optionally check input matrices for NaNs */ + /* Optionally check input matrices for NaNs + * V is m-by-k (left, columnwise) + * or n-by-k (right, columnwise) + * or k-by-m (left, rowwise) + * or k-by-n (right, rowwise) + * T is k-by-k + * A is k-by-n (left) + * or m-by-k (right) + * B is m-by-n + */ if( LAPACKE_lsame( storev, 'C' ) ) { ncols_v = k; nrows_v = LAPACKE_lsame( side, 'L' ) ? m : - ( LAPACKE_lsame( side, 'R' ) ? n : 0 ); + LAPACKE_lsame( side, 'R' ) ? n : 0; } else if( LAPACKE_lsame( storev, 'R' ) ) { ncols_v = LAPACKE_lsame( side, 'L' ) ? m : - ( LAPACKE_lsame( side, 'R' ) ? n : 0 ); + LAPACKE_lsame( side, 'R' ) ? n : 0; nrows_v = k; } else { ncols_v = 0; nrows_v = 0; } - if( LAPACKE_dge_nancheck( matrix_layout, k, m, a, lda ) ) { + nrows_a = LAPACKE_lsame( side, 'L' ) ? k : + LAPACKE_lsame( side, 'R' ) ? m : 0; + ncols_a = LAPACKE_lsame( side, 'L' ) ? n : + LAPACKE_lsame( side, 'R' ) ? k : 0; + if( LAPACKE_dge_nancheck( matrix_layout, ncols_a, nrows_a, a, lda ) ) { return -14; } if( LAPACKE_dge_nancheck( matrix_layout, m, n, b, ldb ) ) { @@ -78,16 +91,16 @@ lapack_int LAPACKE_dtprfb( int matrix_layout, char side, char trans, char direct } #endif if (side=='l' || side=='L') { - ldwork = k; - work_size = MAX(1,ldwork) * MAX(1,n); - } + ldwork = k; + work_size = MAX(1,ldwork) * MAX(1,n); + } else { - ldwork = m; - work_size = MAX(1,ldwork) * MAX(1,k); - } + ldwork = m; + work_size = MAX(1,ldwork) * MAX(1,k); + } /* Allocate memory for working array(s) */ work = (double*) - LAPACKE_malloc( sizeof(double) * work_size ); + LAPACKE_malloc( sizeof(double) * work_size ); if( work == NULL ) { info = LAPACK_WORK_MEMORY_ERROR; goto exit_level_0; diff --git a/lapack-netlib/LAPACKE/src/lapacke_dtrsen.c b/lapack-netlib/LAPACKE/src/lapacke_dtrsen.c index 521bc2701..67932fd98 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_dtrsen.c +++ b/lapack-netlib/LAPACKE/src/lapacke_dtrsen.c @@ -70,7 +70,7 @@ lapack_int LAPACKE_dtrsen( int matrix_layout, char job, char compq, if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lwork = (lapack_int)work_query; /* Allocate memory for work arrays */ if( LAPACKE_lsame( job, 'b' ) || LAPACKE_lsame( job, 'v' ) ) { diff --git a/lapack-netlib/LAPACKE/src/lapacke_sgeesx.c b/lapack-netlib/LAPACKE/src/lapacke_sgeesx.c index 91cfc4fa5..0bc14b33e 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_sgeesx.c +++ b/lapack-netlib/LAPACKE/src/lapacke_sgeesx.c @@ -76,7 +76,7 @@ lapack_int LAPACKE_sgeesx( int matrix_layout, char jobvs, char sort, if( info != 0 ) { goto exit_level_1; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lwork = (lapack_int)work_query; /* Allocate memory for work arrays */ if( LAPACKE_lsame( sense, 'b' ) || LAPACKE_lsame( sense, 'v' ) ) { diff --git a/lapack-netlib/LAPACKE/src/lapacke_sgejsv.c b/lapack-netlib/LAPACKE/src/lapacke_sgejsv.c index aa0eeb746..0703e902f 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_sgejsv.c +++ b/lapack-netlib/LAPACKE/src/lapacke_sgejsv.c @@ -74,7 +74,6 @@ lapack_int LAPACKE_sgejsv( int matrix_layout, char joba, char jobu, char jobv, lapack_int* iwork = NULL; float* work = NULL; lapack_int i; - lapack_int nu, nv; if( matrix_layout != LAPACK_COL_MAJOR && matrix_layout != LAPACK_ROW_MAJOR ) { LAPACKE_xerbla( "LAPACKE_sgejsv", -1 ); return -1; @@ -82,8 +81,6 @@ lapack_int LAPACKE_sgejsv( int matrix_layout, char joba, char jobu, char jobv, #ifndef LAPACK_DISABLE_NAN_CHECK if( LAPACKE_get_nancheck() ) { /* Optionally check input matrices for NaNs */ - nu = LAPACKE_lsame( jobu, 'n' ) ? 1 : m; - nv = LAPACKE_lsame( jobv, 'n' ) ? 1 : n; if( LAPACKE_sge_nancheck( matrix_layout, m, n, a, lda ) ) { return -10; } diff --git a/lapack-netlib/LAPACKE/src/lapacke_sgelsd.c b/lapack-netlib/LAPACKE/src/lapacke_sgelsd.c index fc42b1eec..9d00ded10 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_sgelsd.c +++ b/lapack-netlib/LAPACKE/src/lapacke_sgelsd.c @@ -70,7 +70,7 @@ lapack_int LAPACKE_sgelsd( int matrix_layout, lapack_int m, lapack_int n, if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lwork = (lapack_int)work_query; /* Allocate memory for work arrays */ iwork = (lapack_int*)LAPACKE_malloc( sizeof(lapack_int) * liwork ); diff --git a/lapack-netlib/LAPACKE/src/lapacke_sgesvdq.c b/lapack-netlib/LAPACKE/src/lapacke_sgesvdq.c new file mode 100644 index 000000000..5ff543d10 --- /dev/null +++ b/lapack-netlib/LAPACKE/src/lapacke_sgesvdq.c @@ -0,0 +1,106 @@ +/***************************************************************************** + Copyright (c) 2014, Intel Corp. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + THE POSSIBILITY OF SUCH DAMAGE. +***************************************************************************** +* Contents: Native high-level C interface to LAPACK function sgesvdq +* Author: Intel Corporation +* Generated November 2018 +*****************************************************************************/ + +#include "lapacke_utils.h" + +lapack_int LAPACKE_sgesvdq( int matrix_layout, char joba, char jobp, + char jobr, char jobu, char jobv, + lapack_int m, lapack_int n, float* a, + lapack_int lda, float* s, float* u, lapack_int ldu, + float* v, lapack_int ldv, lapack_int* numrank) +{ + lapack_int info = 0; + lapack_int liwork = -1; + lapack_int* iwork = NULL; + lapack_int iwork_query; + lapack_int lwork = -1; + float* work = NULL; + float work_query; + lapack_int lrwork = -1; + float* rwork = NULL; + float rwork_query; + lapack_int i; + if( matrix_layout != LAPACK_COL_MAJOR && matrix_layout != LAPACK_ROW_MAJOR ) { + LAPACKE_xerbla( "LAPACKE_sgesvdq", -1 ); + return -1; + } +#ifndef LAPACK_DISABLE_NAN_CHECK + if( LAPACKE_get_nancheck() ) { + /* Optionally check input matrices for NaNs */ + if( LAPACKE_sge_nancheck( matrix_layout, m, n, a, lda ) ) { + return -6; + } + } +#endif + /* Query optimal working array(s) size */ + info = LAPACKE_sgesvdq_work( matrix_layout, joba, jobp, jobr, jobu, jobv, + m, n, a, lda, s, u, ldu, v, ldv, numrank, + &iwork_query, liwork, &work_query, lwork, + &rwork_query, lrwork ); + if( info != 0 ) { + goto exit_level_0; + } + liwork = iwork_query; + lwork = (lapack_int)work_query; + lrwork = (lapack_int)rwork_query; + /* Allocate memory for work arrays */ + iwork = (lapack_int*)LAPACKE_malloc( sizeof(lapack_int) * liwork ); + if( iwork == NULL ) { + info = LAPACK_WORK_MEMORY_ERROR; + goto exit_level_0; + } + work = (float*)LAPACKE_malloc( sizeof(float) * lwork ); + if( work == NULL ) { + info = LAPACK_WORK_MEMORY_ERROR; + goto exit_level_0; + } + rwork = (float*)LAPACKE_malloc( sizeof(float) * lrwork ); + if( rwork == NULL ) { + info = LAPACK_WORK_MEMORY_ERROR; + goto exit_level_0; + } + /* Call middle-level interface */ + info = LAPACKE_sgesvdq_work( matrix_layout, joba, jobp, jobr, jobu, jobv, + m, n, a, lda, s, u, ldu, v, ldv, numrank, + iwork, liwork, work, lwork, rwork, lrwork ); + + /* Release memory and exit */ + LAPACKE_free( iwork ); + LAPACKE_free( work ); + LAPACKE_free( rwork ); +exit_level_0: + if( info == LAPACK_WORK_MEMORY_ERROR ) { + LAPACKE_xerbla( "LAPACKE_sgesvdq", info ); + } + return info; +} diff --git a/lapack-netlib/LAPACKE/src/lapacke_sgesvdq_work.c b/lapack-netlib/LAPACKE/src/lapacke_sgesvdq_work.c new file mode 100644 index 000000000..9eab982c2 --- /dev/null +++ b/lapack-netlib/LAPACKE/src/lapacke_sgesvdq_work.c @@ -0,0 +1,148 @@ +/***************************************************************************** + Copyright (c) 2014, Intel Corp. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + THE POSSIBILITY OF SUCH DAMAGE. +***************************************************************************** +* Contents: Native middle-level C interface to LAPACK function sgesvdq +* Author: Intel Corporation +* Generated November 2015 +*****************************************************************************/ + +#include "lapacke_utils.h" + +lapack_int LAPACKE_sgesvdq_work( int matrix_layout, char joba, char jobp, + char jobr, char jobu, char jobv, + lapack_int m, lapack_int n, float* a, + lapack_int lda, float* s, float* u, lapack_int ldu, + float* v, lapack_int ldv, lapack_int* numrank, + lapack_int* iwork, lapack_int liwork, + float* work, lapack_int lwork, + float* rwork, lapack_int lrwork ) +{ + lapack_int info = 0; + if( matrix_layout == LAPACK_COL_MAJOR ) { + /* Call LAPACK function and adjust info */ + LAPACK_sgesvdq( &joba, &jobp, &jobr, &jobu, &jobv, &m, &n, a, &lda, s, u, &ldu, v, &ldv, + numrank, iwork, &liwork, work, &lwork, rwork, &lrwork, &info ); + if( info < 0 ) { + info = info - 1; + } + } else if( matrix_layout == LAPACK_ROW_MAJOR ) { + lapack_int nrows_u = ( LAPACKE_lsame( jobu, 'a' ) || + LAPACKE_lsame( jobu, 's' ) ) ? m : 1; + lapack_int ncols_u = LAPACKE_lsame( jobu, 'a' ) ? m : + (LAPACKE_lsame( jobu, 's' ) ? MIN(m,n) : 1); + lapack_int nrows_v = LAPACKE_lsame( jobv, 'a' ) ? n : 1; + lapack_int lda_t = MAX(1,m); + lapack_int ldu_t = MAX(1,nrows_u); + lapack_int ldv_t = MAX(1,nrows_v); + float* a_t = NULL; + float* u_t = NULL; + float* v_t = NULL; + /* Check leading dimension(s) */ + if( lda < n ) { + info = -9; + LAPACKE_xerbla( "LAPACKE_sgesvdq_work", info ); + return info; + } + if( ldu < ncols_u ) { + info = -12; + LAPACKE_xerbla( "LAPACKE_sgesvdq_work", info ); + return info; + } + if( ldv < n ) { + info = -14; + LAPACKE_xerbla( "LAPACKE_sgesvdq_work", info ); + return info; + } + /* Query optimal working array(s) size if requested */ + if( lwork == -1 ) { + LAPACK_sgesvdq( &joba, &jobp, &jobr, &jobu, &jobv, &m, &n, a, &lda_t, + s, u, &ldu_t, v, &ldv_t, numrank, iwork, &liwork, + work, &lwork, rwork, &lrwork, &info ); + return (info < 0) ? (info - 1) : info; + } + /* Allocate memory for temporary array(s) */ + a_t = (float*)LAPACKE_malloc( sizeof(float) * lda_t * MAX(1,n) ); + if( a_t == NULL ) { + info = LAPACK_TRANSPOSE_MEMORY_ERROR; + goto exit_level_0; + } + if( LAPACKE_lsame( jobu, 'a' ) || LAPACKE_lsame( jobu, 's' ) ) { + u_t = (float*) + LAPACKE_malloc( sizeof(float) * ldu_t * MAX(1,ncols_u) ); + if( u_t == NULL ) { + info = LAPACK_TRANSPOSE_MEMORY_ERROR; + goto exit_level_1; + } + } + if( LAPACKE_lsame( jobv, 'a' ) || LAPACKE_lsame( jobv, 's' ) ) { + v_t = (float*) + LAPACKE_malloc( sizeof(float) * ldv_t * MAX(1,n) ); + if( v_t == NULL ) { + info = LAPACK_TRANSPOSE_MEMORY_ERROR; + goto exit_level_2; + } + } + /* Transpose input matrices */ + LAPACKE_sge_trans( matrix_layout, m, n, a, lda, a_t, lda_t ); + /* Call LAPACK function and adjust info */ + LAPACK_sgesvdq( &joba, &jobp, &jobr, &jobu, &jobv, &m, &n, a, &lda_t, + s, u, &ldu_t, v, &ldv_t, numrank, iwork, &liwork, + work, &lwork, rwork, &lrwork, &info ); + if( info < 0 ) { + info = info - 1; + } + /* Transpose output matrices */ + LAPACKE_sge_trans( LAPACK_COL_MAJOR, m, n, a_t, lda_t, a, lda ); + if( LAPACKE_lsame( jobu, 'a' ) || LAPACKE_lsame( jobu, 's' ) ) { + LAPACKE_sge_trans( LAPACK_COL_MAJOR, nrows_u, ncols_u, u_t, ldu_t, + u, ldu ); + } + if( LAPACKE_lsame( jobv, 'a' ) || LAPACKE_lsame( jobv, 's' ) ) { + LAPACKE_sge_trans( LAPACK_COL_MAJOR, nrows_v, n, v_t, ldv_t, v, + ldv ); + } + /* Release memory and exit */ + if( LAPACKE_lsame( jobv, 'a' ) || LAPACKE_lsame( jobv, 's' ) ) { + LAPACKE_free( v_t ); + } +exit_level_2: + if( LAPACKE_lsame( jobu, 'a' ) || LAPACKE_lsame( jobu, 's' ) ) { + LAPACKE_free( u_t ); + } +exit_level_1: + LAPACKE_free( a_t ); +exit_level_0: + if( info == LAPACK_TRANSPOSE_MEMORY_ERROR ) { + LAPACKE_xerbla( "LAPACKE_sgesvdq_work", info ); + } + } else { + info = -1; + LAPACKE_xerbla( "LAPACKE_sgesvdq_work", info ); + } + return info; +} diff --git a/lapack-netlib/LAPACKE/src/lapacke_sggesx.c b/lapack-netlib/LAPACKE/src/lapacke_sggesx.c index f0acb70a4..d552a2010 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_sggesx.c +++ b/lapack-netlib/LAPACKE/src/lapacke_sggesx.c @@ -82,7 +82,7 @@ lapack_int LAPACKE_sggesx( int matrix_layout, char jobvsl, char jobvsr, if( info != 0 ) { goto exit_level_1; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lwork = (lapack_int)work_query; /* Allocate memory for work arrays */ iwork = (lapack_int*)LAPACKE_malloc( sizeof(lapack_int) * liwork ); diff --git a/lapack-netlib/LAPACKE/src/lapacke_slantr_work.c b/lapack-netlib/LAPACKE/src/lapacke_slantr_work.c index e9f84b55c..e1d4c270d 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_slantr_work.c +++ b/lapack-netlib/LAPACKE/src/lapacke_slantr_work.c @@ -42,12 +42,10 @@ float LAPACKE_slantr_work( int matrix_layout, char norm, char uplo, if( matrix_layout == LAPACK_COL_MAJOR ) { /* Call LAPACK function and adjust info */ res = LAPACK_slantr( &norm, &uplo, &diag, &m, &n, a, &lda, work ); - if( info < 0 ) { - info = info - 1; - } } else if( matrix_layout == LAPACK_ROW_MAJOR ) { lapack_int lda_t = MAX(1,m); float* a_t = NULL; + float* work_lapack = NULL; /* Check leading dimension(s) */ if( lda < n ) { info = -8; @@ -60,12 +58,23 @@ float LAPACKE_slantr_work( int matrix_layout, char norm, char uplo, info = LAPACK_TRANSPOSE_MEMORY_ERROR; goto exit_level_0; } + /* Allocate memory for work array(s) */ + if( LAPACKE_lsame( norm, 'i' ) ) { + work_lapack = (float*)LAPACKE_malloc( sizeof(float) * MAX(1,m) ); + if( work_lapack == NULL ) { + info = LAPACK_WORK_MEMORY_ERROR; + goto exit_level_1; + } + } /* Transpose input matrices */ LAPACKE_str_trans( matrix_layout, uplo, diag, MAX(m,n), a, lda, a_t, lda_t ); /* Call LAPACK function and adjust info */ - res = LAPACK_slantr( &norm, &uplo, &diag, &m, &n, a_t, &lda_t, work ); - info = 0; /* LAPACK call is ok! */ + res = LAPACK_slantr( &norm, &uplo, &diag, &m, &n, a_t, &lda_t, work_lapack ); /* Release memory and exit */ + if( work_lapack ) { + LAPACKE_free( work_lapack ); + } +exit_level_1: LAPACKE_free( a_t ); exit_level_0: if( info == LAPACK_TRANSPOSE_MEMORY_ERROR ) { diff --git a/lapack-netlib/LAPACKE/src/lapacke_sormhr.c b/lapack-netlib/LAPACKE/src/lapacke_sormhr.c index a5cca2c45..fba215a19 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_sormhr.c +++ b/lapack-netlib/LAPACKE/src/lapacke_sormhr.c @@ -57,7 +57,7 @@ lapack_int LAPACKE_sormhr( int matrix_layout, char side, char trans, if( LAPACKE_sge_nancheck( matrix_layout, m, n, c, ldc ) ) { return -11; } - if( LAPACKE_s_nancheck( m-1, tau, 1 ) ) { + if( LAPACKE_s_nancheck( r-1, tau, 1 ) ) { return -10; } } diff --git a/lapack-netlib/LAPACKE/src/lapacke_ssbevd.c b/lapack-netlib/LAPACKE/src/lapacke_ssbevd.c index 3acdeb95d..b41e5b156 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_ssbevd.c +++ b/lapack-netlib/LAPACKE/src/lapacke_ssbevd.c @@ -62,7 +62,7 @@ lapack_int LAPACKE_ssbevd( int matrix_layout, char jobz, char uplo, lapack_int n if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lwork = (lapack_int)work_query; /* Allocate memory for work arrays */ iwork = (lapack_int*)LAPACKE_malloc( sizeof(lapack_int) * liwork ); diff --git a/lapack-netlib/LAPACKE/src/lapacke_ssbevd_2stage.c b/lapack-netlib/LAPACKE/src/lapacke_ssbevd_2stage.c index 2eda9cde9..a76d92c71 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_ssbevd_2stage.c +++ b/lapack-netlib/LAPACKE/src/lapacke_ssbevd_2stage.c @@ -62,7 +62,7 @@ lapack_int LAPACKE_ssbevd_2stage( int matrix_layout, char jobz, char uplo, lapac if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lwork = (lapack_int)work_query; /* Allocate memory for work arrays */ iwork = (lapack_int*)LAPACKE_malloc( sizeof(lapack_int) * liwork ); diff --git a/lapack-netlib/LAPACKE/src/lapacke_ssbgvd.c b/lapack-netlib/LAPACKE/src/lapacke_ssbgvd.c index a6c036846..b40ccb9e5 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_ssbgvd.c +++ b/lapack-netlib/LAPACKE/src/lapacke_ssbgvd.c @@ -67,7 +67,7 @@ lapack_int LAPACKE_ssbgvd( int matrix_layout, char jobz, char uplo, lapack_int n if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lwork = (lapack_int)work_query; /* Allocate memory for work arrays */ iwork = (lapack_int*)LAPACKE_malloc( sizeof(lapack_int) * liwork ); diff --git a/lapack-netlib/LAPACKE/src/lapacke_sspevd.c b/lapack-netlib/LAPACKE/src/lapacke_sspevd.c index bd06a8ba6..9b518751b 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_sspevd.c +++ b/lapack-netlib/LAPACKE/src/lapacke_sspevd.c @@ -61,7 +61,7 @@ lapack_int LAPACKE_sspevd( int matrix_layout, char jobz, char uplo, lapack_int n if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lwork = (lapack_int)work_query; /* Allocate memory for work arrays */ iwork = (lapack_int*)LAPACKE_malloc( sizeof(lapack_int) * liwork ); diff --git a/lapack-netlib/LAPACKE/src/lapacke_sspgvd.c b/lapack-netlib/LAPACKE/src/lapacke_sspgvd.c index 749abb0b1..e80e24647 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_sspgvd.c +++ b/lapack-netlib/LAPACKE/src/lapacke_sspgvd.c @@ -66,7 +66,7 @@ lapack_int LAPACKE_sspgvd( int matrix_layout, lapack_int itype, char jobz, if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lwork = (lapack_int)work_query; /* Allocate memory for work arrays */ iwork = (lapack_int*)LAPACKE_malloc( sizeof(lapack_int) * liwork ); diff --git a/lapack-netlib/LAPACKE/src/lapacke_sstedc.c b/lapack-netlib/LAPACKE/src/lapacke_sstedc.c index 157874668..f902e8c30 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_sstedc.c +++ b/lapack-netlib/LAPACKE/src/lapacke_sstedc.c @@ -69,7 +69,7 @@ lapack_int LAPACKE_sstedc( int matrix_layout, char compz, lapack_int n, float* d if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lwork = (lapack_int)work_query; /* Allocate memory for work arrays */ iwork = (lapack_int*)LAPACKE_malloc( sizeof(lapack_int) * liwork ); diff --git a/lapack-netlib/LAPACKE/src/lapacke_sstegr.c b/lapack-netlib/LAPACKE/src/lapacke_sstegr.c index c6a73b2b4..c02372ba2 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_sstegr.c +++ b/lapack-netlib/LAPACKE/src/lapacke_sstegr.c @@ -81,7 +81,7 @@ lapack_int LAPACKE_sstegr( int matrix_layout, char jobz, char range, if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lwork = (lapack_int)work_query; /* Allocate memory for work arrays */ iwork = (lapack_int*)LAPACKE_malloc( sizeof(lapack_int) * liwork ); diff --git a/lapack-netlib/LAPACKE/src/lapacke_sstemr.c b/lapack-netlib/LAPACKE/src/lapacke_sstemr.c index 4229819ab..65dcc9170 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_sstemr.c +++ b/lapack-netlib/LAPACKE/src/lapacke_sstemr.c @@ -74,7 +74,7 @@ lapack_int LAPACKE_sstemr( int matrix_layout, char jobz, char range, if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lwork = (lapack_int)work_query; /* Allocate memory for work arrays */ iwork = (lapack_int*)LAPACKE_malloc( sizeof(lapack_int) * liwork ); diff --git a/lapack-netlib/LAPACKE/src/lapacke_sstevd.c b/lapack-netlib/LAPACKE/src/lapacke_sstevd.c index 9f9e2e79e..c5db5d79d 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_sstevd.c +++ b/lapack-netlib/LAPACKE/src/lapacke_sstevd.c @@ -64,7 +64,7 @@ lapack_int LAPACKE_sstevd( int matrix_layout, char jobz, lapack_int n, float* d, if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lwork = (lapack_int)work_query; /* Allocate memory for work arrays */ iwork = (lapack_int*)LAPACKE_malloc( sizeof(lapack_int) * liwork ); diff --git a/lapack-netlib/LAPACKE/src/lapacke_sstevr.c b/lapack-netlib/LAPACKE/src/lapacke_sstevr.c index f45c49087..4043e3090 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_sstevr.c +++ b/lapack-netlib/LAPACKE/src/lapacke_sstevr.c @@ -81,7 +81,7 @@ lapack_int LAPACKE_sstevr( int matrix_layout, char jobz, char range, if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lwork = (lapack_int)work_query; /* Allocate memory for work arrays */ iwork = (lapack_int*)LAPACKE_malloc( sizeof(lapack_int) * liwork ); diff --git a/lapack-netlib/LAPACKE/src/lapacke_ssyev_work.c b/lapack-netlib/LAPACKE/src/lapacke_ssyev_work.c index fb8c8971b..abd62ddf3 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_ssyev_work.c +++ b/lapack-netlib/LAPACKE/src/lapacke_ssyev_work.c @@ -65,14 +65,18 @@ lapack_int LAPACKE_ssyev_work( int matrix_layout, char jobz, char uplo, goto exit_level_0; } /* Transpose input matrices */ - LAPACKE_sge_trans( matrix_layout, n, n, a, lda, a_t, lda_t ); + LAPACKE_ssy_trans( matrix_layout, uplo, n, a, lda, a_t, lda_t ); /* Call LAPACK function and adjust info */ LAPACK_ssyev( &jobz, &uplo, &n, a_t, &lda_t, w, work, &lwork, &info ); if( info < 0 ) { info = info - 1; } /* Transpose output matrices */ - LAPACKE_sge_trans( LAPACK_COL_MAJOR, n, n, a_t, lda_t, a, lda ); + if ( jobz == 'V') { + LAPACKE_sge_trans( LAPACK_COL_MAJOR, n, n, a_t, lda_t, a, lda ); + } else { + LAPACKE_ssy_trans( LAPACK_COL_MAJOR, uplo, n, a_t, lda_t, a, lda ); + } /* Release memory and exit */ LAPACKE_free( a_t ); exit_level_0: diff --git a/lapack-netlib/LAPACKE/src/lapacke_ssyevd.c b/lapack-netlib/LAPACKE/src/lapacke_ssyevd.c index 1995e7950..f5924bd94 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_ssyevd.c +++ b/lapack-netlib/LAPACKE/src/lapacke_ssyevd.c @@ -50,7 +50,7 @@ lapack_int LAPACKE_ssyevd( int matrix_layout, char jobz, char uplo, lapack_int n #ifndef LAPACK_DISABLE_NAN_CHECK if( LAPACKE_get_nancheck() ) { /* Optionally check input matrices for NaNs */ - if( LAPACKE_sge_nancheck( matrix_layout, n, n, a, lda ) ) { + if( LAPACKE_ssy_nancheck( matrix_layout, uplo, n, a, lda ) ) { return -5; } } @@ -61,7 +61,7 @@ lapack_int LAPACKE_ssyevd( int matrix_layout, char jobz, char uplo, lapack_int n if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lwork = (lapack_int)work_query; /* Allocate memory for work arrays */ iwork = (lapack_int*)LAPACKE_malloc( sizeof(lapack_int) * liwork ); diff --git a/lapack-netlib/LAPACKE/src/lapacke_ssyevd_2stage.c b/lapack-netlib/LAPACKE/src/lapacke_ssyevd_2stage.c index 6d6785acc..40ef1bcc2 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_ssyevd_2stage.c +++ b/lapack-netlib/LAPACKE/src/lapacke_ssyevd_2stage.c @@ -50,7 +50,7 @@ lapack_int LAPACKE_ssyevd_2stage( int matrix_layout, char jobz, char uplo, lapac #ifndef LAPACK_DISABLE_NAN_CHECK if( LAPACKE_get_nancheck() ) { /* Optionally check input matrices for NaNs */ - if( LAPACKE_sge_nancheck( matrix_layout, n, n, a, lda ) ) { + if( LAPACKE_ssy_nancheck( matrix_layout, uplo, n, a, lda ) ) { return -5; } } @@ -61,7 +61,7 @@ lapack_int LAPACKE_ssyevd_2stage( int matrix_layout, char jobz, char uplo, lapac if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lwork = (lapack_int)work_query; /* Allocate memory for work arrays */ iwork = (lapack_int*)LAPACKE_malloc( sizeof(lapack_int) * liwork ); diff --git a/lapack-netlib/LAPACKE/src/lapacke_ssyevd_2stage_work.c b/lapack-netlib/LAPACKE/src/lapacke_ssyevd_2stage_work.c index 5942a9abb..d9fe47599 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_ssyevd_2stage_work.c +++ b/lapack-netlib/LAPACKE/src/lapacke_ssyevd_2stage_work.c @@ -68,7 +68,7 @@ lapack_int LAPACKE_ssyevd_2stage_work( int matrix_layout, char jobz, char uplo, goto exit_level_0; } /* Transpose input matrices */ - LAPACKE_sge_trans( matrix_layout, n, n, a, lda, a_t, lda_t ); + LAPACKE_ssy_trans( matrix_layout, uplo, n, a, lda, a_t, lda_t ); /* Call LAPACK function and adjust info */ LAPACK_ssyevd_2stage( &jobz, &uplo, &n, a_t, &lda_t, w, work, &lwork, iwork, &liwork, &info ); @@ -76,7 +76,11 @@ lapack_int LAPACKE_ssyevd_2stage_work( int matrix_layout, char jobz, char uplo, info = info - 1; } /* Transpose output matrices */ - LAPACKE_sge_trans( LAPACK_COL_MAJOR, n, n, a_t, lda_t, a, lda ); + if ( jobz == 'V') { + LAPACKE_sge_trans( LAPACK_COL_MAJOR, n, n, a_t, lda_t, a, lda ); + } else { + LAPACKE_ssy_trans( LAPACK_COL_MAJOR, uplo, n, a_t, lda_t, a, lda ); + } /* Release memory and exit */ LAPACKE_free( a_t ); exit_level_0: diff --git a/lapack-netlib/LAPACKE/src/lapacke_ssyevd_work.c b/lapack-netlib/LAPACKE/src/lapacke_ssyevd_work.c index 7b2e19adc..bfbf49aee 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_ssyevd_work.c +++ b/lapack-netlib/LAPACKE/src/lapacke_ssyevd_work.c @@ -68,7 +68,7 @@ lapack_int LAPACKE_ssyevd_work( int matrix_layout, char jobz, char uplo, goto exit_level_0; } /* Transpose input matrices */ - LAPACKE_sge_trans( matrix_layout, n, n, a, lda, a_t, lda_t ); + LAPACKE_ssy_trans( matrix_layout, uplo, n, a, lda, a_t, lda_t ); /* Call LAPACK function and adjust info */ LAPACK_ssyevd( &jobz, &uplo, &n, a_t, &lda_t, w, work, &lwork, iwork, &liwork, &info ); @@ -76,7 +76,11 @@ lapack_int LAPACKE_ssyevd_work( int matrix_layout, char jobz, char uplo, info = info - 1; } /* Transpose output matrices */ - LAPACKE_sge_trans( LAPACK_COL_MAJOR, n, n, a_t, lda_t, a, lda ); + if ( jobz == 'V') { + LAPACKE_sge_trans( LAPACK_COL_MAJOR, n, n, a_t, lda_t, a, lda ); + } else { + LAPACKE_ssy_trans( LAPACK_COL_MAJOR, uplo, n, a_t, lda_t, a, lda ); + } /* Release memory and exit */ LAPACKE_free( a_t ); exit_level_0: diff --git a/lapack-netlib/LAPACKE/src/lapacke_ssyevr.c b/lapack-netlib/LAPACKE/src/lapacke_ssyevr.c index d7e050143..3274f6bab 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_ssyevr.c +++ b/lapack-netlib/LAPACKE/src/lapacke_ssyevr.c @@ -78,7 +78,7 @@ lapack_int LAPACKE_ssyevr( int matrix_layout, char jobz, char range, char uplo, if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lwork = (lapack_int)work_query; /* Allocate memory for work arrays */ iwork = (lapack_int*)LAPACKE_malloc( sizeof(lapack_int) * liwork ); diff --git a/lapack-netlib/LAPACKE/src/lapacke_ssyevr_2stage.c b/lapack-netlib/LAPACKE/src/lapacke_ssyevr_2stage.c index cbc3014e9..8958be31d 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_ssyevr_2stage.c +++ b/lapack-netlib/LAPACKE/src/lapacke_ssyevr_2stage.c @@ -78,7 +78,7 @@ lapack_int LAPACKE_ssyevr_2stage( int matrix_layout, char jobz, char range, char if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lwork = (lapack_int)work_query; /* Allocate memory for work arrays */ iwork = (lapack_int*)LAPACKE_malloc( sizeof(lapack_int) * liwork ); diff --git a/lapack-netlib/LAPACKE/src/lapacke_ssygvd.c b/lapack-netlib/LAPACKE/src/lapacke_ssygvd.c index 2a1c62aef..5afe8d2de 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_ssygvd.c +++ b/lapack-netlib/LAPACKE/src/lapacke_ssygvd.c @@ -66,7 +66,7 @@ lapack_int LAPACKE_ssygvd( int matrix_layout, lapack_int itype, char jobz, if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lwork = (lapack_int)work_query; /* Allocate memory for work arrays */ iwork = (lapack_int*)LAPACKE_malloc( sizeof(lapack_int) * liwork ); diff --git a/lapack-netlib/LAPACKE/src/lapacke_ssytrs2.c b/lapack-netlib/LAPACKE/src/lapacke_ssytrs2.c index a95a71469..19f447cd8 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_ssytrs2.c +++ b/lapack-netlib/LAPACKE/src/lapacke_ssytrs2.c @@ -34,7 +34,7 @@ #include "lapacke_utils.h" lapack_int LAPACKE_ssytrs2( int matrix_layout, char uplo, lapack_int n, - lapack_int nrhs, const float* a, lapack_int lda, + lapack_int nrhs, float* a, lapack_int lda, const lapack_int* ipiv, float* b, lapack_int ldb ) { lapack_int info = 0; diff --git a/lapack-netlib/LAPACKE/src/lapacke_ssytrs2_work.c b/lapack-netlib/LAPACKE/src/lapacke_ssytrs2_work.c index cf98f443d..7d348b382 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_ssytrs2_work.c +++ b/lapack-netlib/LAPACKE/src/lapacke_ssytrs2_work.c @@ -34,7 +34,7 @@ #include "lapacke_utils.h" lapack_int LAPACKE_ssytrs2_work( int matrix_layout, char uplo, lapack_int n, - lapack_int nrhs, const float* a, + lapack_int nrhs, float* a, lapack_int lda, const lapack_int* ipiv, float* b, lapack_int ldb, float* work ) { diff --git a/lapack-netlib/LAPACKE/src/lapacke_stgsen.c b/lapack-netlib/LAPACKE/src/lapacke_stgsen.c index 5464fd22b..d0250eb63 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_stgsen.c +++ b/lapack-netlib/LAPACKE/src/lapacke_stgsen.c @@ -81,7 +81,7 @@ lapack_int LAPACKE_stgsen( int matrix_layout, lapack_int ijob, if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lwork = (lapack_int)work_query; /* Allocate memory for work arrays */ if( ijob != 0 ) { diff --git a/lapack-netlib/LAPACKE/src/lapacke_stprfb.c b/lapack-netlib/LAPACKE/src/lapacke_stprfb.c index 846d4ccb3..2ea20f08d 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_stprfb.c +++ b/lapack-netlib/LAPACKE/src/lapacke_stprfb.c @@ -39,7 +39,7 @@ lapack_int LAPACKE_stprfb( int matrix_layout, char side, char trans, char direct lapack_int ldv, const float* t, lapack_int ldt, float* a, lapack_int lda, float* b, lapack_int ldb) { - lapack_int ncols_v, nrows_v; + lapack_int ncols_v, nrows_v, ncols_a, nrows_a; lapack_int info = 0; lapack_int ldwork; lapack_int work_size; @@ -50,20 +50,33 @@ lapack_int LAPACKE_stprfb( int matrix_layout, char side, char trans, char direct } #ifndef LAPACK_DISABLE_NAN_CHECK if( LAPACKE_get_nancheck() ) { - /* Optionally check input matrices for NaNs */ + /* Optionally check input matrices for NaNs + * V is m-by-k (left, columnwise) + * or n-by-k (right, columnwise) + * or k-by-m (left, rowwise) + * or k-by-n (right, rowwise) + * T is k-by-k + * A is k-by-n (left) + * or m-by-k (right) + * B is m-by-n + */ if( LAPACKE_lsame( storev, 'C' ) ) { ncols_v = k; nrows_v = LAPACKE_lsame( side, 'L' ) ? m : - ( LAPACKE_lsame( side, 'R' ) ? n : 0 ); + LAPACKE_lsame( side, 'R' ) ? n : 0; } else if( LAPACKE_lsame( storev, 'R' ) ) { ncols_v = LAPACKE_lsame( side, 'L' ) ? m : - ( LAPACKE_lsame( side, 'R' ) ? n : 0 ); + LAPACKE_lsame( side, 'R' ) ? n : 0; nrows_v = k; } else { ncols_v = 0; nrows_v = 0; } - if( LAPACKE_sge_nancheck( matrix_layout, k, m, a, lda ) ) { + nrows_a = LAPACKE_lsame( side, 'L' ) ? k : + LAPACKE_lsame( side, 'R' ) ? m : 0; + ncols_a = LAPACKE_lsame( side, 'L' ) ? n : + LAPACKE_lsame( side, 'R' ) ? k : 0; + if( LAPACKE_sge_nancheck( matrix_layout, ncols_a, nrows_a, a, lda ) ) { return -14; } if( LAPACKE_sge_nancheck( matrix_layout, m, n, b, ldb ) ) { @@ -78,14 +91,14 @@ lapack_int LAPACKE_stprfb( int matrix_layout, char side, char trans, char direct } #endif if (side=='l' || side=='L') { - ldwork = k; - work_size = MAX(1,ldwork) * MAX(1,n); - } + ldwork = k; + work_size = MAX(1,ldwork) * MAX(1,n); + } else { - ldwork = m; - work_size = MAX(1,ldwork) * MAX(1,k); - } - /* Allocate memory for working array(s) */ + ldwork = m; + work_size = MAX(1,ldwork) * MAX(1,k); + } + /* Allocate memory for working array(s) */ work = (float*) LAPACKE_malloc( sizeof(float) * work_size ); if( work == NULL ) { diff --git a/lapack-netlib/LAPACKE/src/lapacke_strsen.c b/lapack-netlib/LAPACKE/src/lapacke_strsen.c index efba91af8..0ec3ee907 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_strsen.c +++ b/lapack-netlib/LAPACKE/src/lapacke_strsen.c @@ -69,7 +69,7 @@ lapack_int LAPACKE_strsen( int matrix_layout, char job, char compq, if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lwork = (lapack_int)work_query; /* Allocate memory for work arrays */ if( LAPACKE_lsame( job, 'b' ) || LAPACKE_lsame( job, 'v' ) ) { diff --git a/lapack-netlib/LAPACKE/src/lapacke_zgejsv.c b/lapack-netlib/LAPACKE/src/lapacke_zgejsv.c index f3b5110a7..153efb371 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_zgejsv.c +++ b/lapack-netlib/LAPACKE/src/lapacke_zgejsv.c @@ -124,7 +124,6 @@ lapack_int LAPACKE_zgejsv( int matrix_layout, char joba, char jobu, char jobv, double* rwork = NULL; lapack_complex_double* cwork = NULL; lapack_int i; - lapack_int nu, nv; if( matrix_layout != LAPACK_COL_MAJOR && matrix_layout != LAPACK_ROW_MAJOR ) { LAPACKE_xerbla( "LAPACKE_zgejsv", -1 ); return -1; @@ -132,8 +131,6 @@ lapack_int LAPACKE_zgejsv( int matrix_layout, char joba, char jobu, char jobv, #ifndef LAPACK_DISABLE_NAN_CHECK if( LAPACKE_get_nancheck() ) { /* Optionally check input matrices for NaNs */ - nu = LAPACKE_lsame( jobu, 'n' ) ? 1 : m; - nv = LAPACKE_lsame( jobv, 'n' ) ? 1 : n; if( LAPACKE_zge_nancheck( matrix_layout, m, n, a, lda ) ) { return -10; } diff --git a/lapack-netlib/LAPACKE/src/lapacke_zgelsd.c b/lapack-netlib/LAPACKE/src/lapacke_zgelsd.c index 6d111c69f..eca145090 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_zgelsd.c +++ b/lapack-netlib/LAPACKE/src/lapacke_zgelsd.c @@ -75,7 +75,7 @@ lapack_int LAPACKE_zgelsd( int matrix_layout, lapack_int m, lapack_int n, if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lrwork = (lapack_int)rwork_query; lwork = LAPACK_Z2INT( work_query ); /* Allocate memory for work arrays */ diff --git a/lapack-netlib/LAPACKE/src/lapacke_zgesvdq.c b/lapack-netlib/LAPACKE/src/lapacke_zgesvdq.c new file mode 100644 index 000000000..4928b1bc0 --- /dev/null +++ b/lapack-netlib/LAPACKE/src/lapacke_zgesvdq.c @@ -0,0 +1,106 @@ +/***************************************************************************** + Copyright (c) 2014, Intel Corp. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + THE POSSIBILITY OF SUCH DAMAGE. +***************************************************************************** +* Contents: Native high-level C interface to LAPACK function zgesvdq +* Author: Intel Corporation +* Generated November 2018 +*****************************************************************************/ + +#include "lapacke_utils.h" + +lapack_int LAPACKE_zgesvdq( int matrix_layout, char joba, char jobp, + char jobr, char jobu, char jobv, + lapack_int m, lapack_int n, lapack_complex_double* a, + lapack_int lda, double* s, lapack_complex_double* u, lapack_int ldu, + lapack_complex_double* v, lapack_int ldv, lapack_int* numrank) +{ + lapack_int info = 0; + lapack_int liwork = -1; + lapack_int* iwork = NULL; + lapack_int iwork_query; + lapack_int lcwork = -1; + lapack_complex_double* cwork = NULL; + lapack_complex_double cwork_query; + lapack_int lrwork = -1; + double* rwork = NULL; + double rwork_query; + lapack_int i; + if( matrix_layout != LAPACK_COL_MAJOR && matrix_layout != LAPACK_ROW_MAJOR ) { + LAPACKE_xerbla( "LAPACKE_zgesvdq", -1 ); + return -1; + } +#ifndef LAPACK_DISABLE_NAN_CHECK + if( LAPACKE_get_nancheck() ) { + /* Optionally check input matrices for NaNs */ + if( LAPACKE_zge_nancheck( matrix_layout, m, n, a, lda ) ) { + return -6; + } + } +#endif + /* Query optimal working array(s) size */ + info = LAPACKE_zgesvdq_work( matrix_layout, joba, jobp, jobr, jobu, jobv, + m, n, a, lda, s, u, ldu, v, ldv, numrank, + &iwork_query, liwork, &cwork_query, lcwork, + &rwork_query, lrwork ); + if( info != 0 ) { + goto exit_level_0; + } + liwork = iwork_query; + lcwork = LAPACK_Z2INT(cwork_query); + lrwork = (lapack_int)rwork_query; + /* Allocate memory for work arrays */ + iwork = (lapack_int*)LAPACKE_malloc( sizeof(lapack_int) * liwork ); + if( iwork == NULL ) { + info = LAPACK_WORK_MEMORY_ERROR; + goto exit_level_0; + } + cwork = (lapack_complex_double*)LAPACKE_malloc( sizeof(lapack_complex_double) * lcwork ); + if( cwork == NULL ) { + info = LAPACK_WORK_MEMORY_ERROR; + goto exit_level_0; + } + rwork = (double*)LAPACKE_malloc( sizeof(double) * lrwork ); + if( rwork == NULL ) { + info = LAPACK_WORK_MEMORY_ERROR; + goto exit_level_0; + } + /* Call middle-level interface */ + info = LAPACKE_zgesvdq_work( matrix_layout, joba, jobp, jobr, jobu, jobv, + m, n, a, lda, s, u, ldu, v, ldv, numrank, + iwork, liwork, cwork, lcwork, rwork, lrwork ); + + /* Release memory and exit */ + LAPACKE_free( iwork ); + LAPACKE_free( cwork ); + LAPACKE_free( rwork ); +exit_level_0: + if( info == LAPACK_WORK_MEMORY_ERROR ) { + LAPACKE_xerbla( "LAPACKE_zgesvdq", info ); + } + return info; +} diff --git a/lapack-netlib/LAPACKE/src/lapacke_zgesvdq_work.c b/lapack-netlib/LAPACKE/src/lapacke_zgesvdq_work.c new file mode 100644 index 000000000..5824de4e0 --- /dev/null +++ b/lapack-netlib/LAPACKE/src/lapacke_zgesvdq_work.c @@ -0,0 +1,149 @@ +/***************************************************************************** + Copyright (c) 2014, Intel Corp. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + THE POSSIBILITY OF SUCH DAMAGE. +***************************************************************************** +* Contents: Native middle-level C interface to LAPACK function zgesvdq +* Author: Intel Corporation +* Generated November 2015 +*****************************************************************************/ + +#include "lapacke_utils.h" + +lapack_int LAPACKE_zgesvdq_work( int matrix_layout, char joba, char jobp, + char jobr, char jobu, char jobv, + lapack_int m, lapack_int n, lapack_complex_double* a, + lapack_int lda, double* s, lapack_complex_double* u, lapack_int ldu, + lapack_complex_double* v, lapack_int ldv, lapack_int* numrank, + lapack_int* iwork, lapack_int liwork, + lapack_complex_double* cwork, lapack_int lcwork, + double* rwork, lapack_int lrwork ) +{ + lapack_int info = 0; + if( matrix_layout == LAPACK_COL_MAJOR ) { + /* Call LAPACK function and adjust info */ + LAPACK_zgesvdq( &joba, &jobp, &jobr, &jobu, &jobv, &m, &n, a, &lda, s, u, &ldu, v, &ldv, + numrank, iwork, &liwork, cwork, &lcwork, rwork, &lrwork, &info ); + if( info < 0 ) { + info = info - 1; + } + } else if( matrix_layout == LAPACK_ROW_MAJOR ) { + lapack_int nrows_u = ( LAPACKE_lsame( jobu, 'a' ) || + LAPACKE_lsame( jobu, 's' ) ) ? m : 1; + lapack_int ncols_u = LAPACKE_lsame( jobu, 'a' ) ? m : + (LAPACKE_lsame( jobu, 's' ) ? MIN(m,n) : 1); + lapack_int nrows_v = LAPACKE_lsame( jobv, 'a' ) ? n : + ( LAPACKE_lsame( jobv, 's' ) ? MIN(m,n) : 1); + lapack_int lda_t = MAX(1,m); + lapack_int ldu_t = MAX(1,nrows_u); + lapack_int ldv_t = MAX(1,nrows_v); + lapack_complex_double* a_t = NULL; + lapack_complex_double* u_t = NULL; + lapack_complex_double* v_t = NULL; + /* Check leading dimension(s) */ + if( lda < n ) { + info = -9; + LAPACKE_xerbla( "LAPACKE_zgesvdq_work", info ); + return info; + } + if( ldu < ncols_u ) { + info = -12; + LAPACKE_xerbla( "LAPACKE_zgesvdq_work", info ); + return info; + } + if( ldv < n ) { + info = -14; + LAPACKE_xerbla( "LAPACKE_zgesvdq_work", info ); + return info; + } + /* Query optimal working array(s) size if requested */ + if( lcwork == -1 ) { + LAPACK_zgesvdq( &joba, &jobp, &jobr, &jobu, &jobv, &m, &n, a, &lda_t, + s, u, &ldu_t, v, &ldv_t, numrank, iwork, &liwork, + cwork, &lcwork, rwork, &lrwork, &info ); + return (info < 0) ? (info - 1) : info; + } + /* Allocate memory for temporary array(s) */ + a_t = (lapack_complex_double*)LAPACKE_malloc( sizeof(lapack_complex_double) * lda_t * MAX(1,n) ); + if( a_t == NULL ) { + info = LAPACK_TRANSPOSE_MEMORY_ERROR; + goto exit_level_0; + } + if( LAPACKE_lsame( jobu, 'a' ) || LAPACKE_lsame( jobu, 's' ) ) { + u_t = (lapack_complex_double*) + LAPACKE_malloc( sizeof(lapack_complex_double) * ldu_t * MAX(1,ncols_u) ); + if( u_t == NULL ) { + info = LAPACK_TRANSPOSE_MEMORY_ERROR; + goto exit_level_1; + } + } + if( LAPACKE_lsame( jobv, 'a' ) || LAPACKE_lsame( jobv, 's' ) ) { + v_t = (lapack_complex_double*) + LAPACKE_malloc( sizeof(lapack_complex_double) * ldv_t * MAX(1,n) ); + if( v_t == NULL ) { + info = LAPACK_TRANSPOSE_MEMORY_ERROR; + goto exit_level_2; + } + } + /* Transpose input matrices */ + LAPACKE_zge_trans( matrix_layout, m, n, a, lda, a_t, lda_t ); + /* Call LAPACK function and adjust info */ + LAPACK_zgesvdq( &joba, &jobp, &jobr, &jobu, &jobv, &m, &n, a, &lda_t, + s, u, &ldu_t, v, &ldv_t, numrank, iwork, &liwork, + cwork, &lcwork, rwork, &lrwork, &info ); + if( info < 0 ) { + info = info - 1; + } + /* Transpose output matrices */ + LAPACKE_zge_trans( LAPACK_COL_MAJOR, m, n, a_t, lda_t, a, lda ); + if( LAPACKE_lsame( jobu, 'a' ) || LAPACKE_lsame( jobu, 's' ) ) { + LAPACKE_zge_trans( LAPACK_COL_MAJOR, nrows_u, ncols_u, u_t, ldu_t, + u, ldu ); + } + if( LAPACKE_lsame( jobv, 'a' ) || LAPACKE_lsame( jobv, 's' ) ) { + LAPACKE_zge_trans( LAPACK_COL_MAJOR, nrows_v, n, v_t, ldv_t, v, + ldv ); + } + /* Release memory and exit */ + if( LAPACKE_lsame( jobv, 'a' ) || LAPACKE_lsame( jobv, 's' ) ) { + LAPACKE_free( v_t ); + } +exit_level_2: + if( LAPACKE_lsame( jobu, 'a' ) || LAPACKE_lsame( jobu, 's' ) ) { + LAPACKE_free( u_t ); + } +exit_level_1: + LAPACKE_free( a_t ); +exit_level_0: + if( info == LAPACK_TRANSPOSE_MEMORY_ERROR ) { + LAPACKE_xerbla( "LAPACKE_zgesvdq_work", info ); + } + } else { + info = -1; + LAPACKE_xerbla( "LAPACKE_zgesvdq_work", info ); + } + return info; +} diff --git a/lapack-netlib/LAPACKE/src/lapacke_zggesx.c b/lapack-netlib/LAPACKE/src/lapacke_zggesx.c index 6b4d27045..53e086753 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_zggesx.c +++ b/lapack-netlib/LAPACKE/src/lapacke_zggesx.c @@ -91,7 +91,7 @@ lapack_int LAPACKE_zggesx( int matrix_layout, char jobvsl, char jobvsr, if( info != 0 ) { goto exit_level_2; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lwork = LAPACK_Z2INT( work_query ); /* Allocate memory for work arrays */ iwork = (lapack_int*)LAPACKE_malloc( sizeof(lapack_int) * liwork ); diff --git a/lapack-netlib/LAPACKE/src/lapacke_zhbevd.c b/lapack-netlib/LAPACKE/src/lapacke_zhbevd.c index 95c6d3a54..ac9467496 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_zhbevd.c +++ b/lapack-netlib/LAPACKE/src/lapacke_zhbevd.c @@ -67,7 +67,7 @@ lapack_int LAPACKE_zhbevd( int matrix_layout, char jobz, char uplo, lapack_int n if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lrwork = (lapack_int)rwork_query; lwork = LAPACK_Z2INT( work_query ); /* Allocate memory for work arrays */ diff --git a/lapack-netlib/LAPACKE/src/lapacke_zhbevd_2stage.c b/lapack-netlib/LAPACKE/src/lapacke_zhbevd_2stage.c index eca867b28..9b6005b2d 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_zhbevd_2stage.c +++ b/lapack-netlib/LAPACKE/src/lapacke_zhbevd_2stage.c @@ -67,7 +67,7 @@ lapack_int LAPACKE_zhbevd_2stage( int matrix_layout, char jobz, char uplo, lapac if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lrwork = (lapack_int)rwork_query; lwork = LAPACK_Z2INT( work_query ); /* Allocate memory for work arrays */ diff --git a/lapack-netlib/LAPACKE/src/lapacke_zhbgvd.c b/lapack-netlib/LAPACKE/src/lapacke_zhbgvd.c index 91bfc0a73..76c3bac3a 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_zhbgvd.c +++ b/lapack-netlib/LAPACKE/src/lapacke_zhbgvd.c @@ -71,7 +71,7 @@ lapack_int LAPACKE_zhbgvd( int matrix_layout, char jobz, char uplo, lapack_int n if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lrwork = (lapack_int)rwork_query; lwork = LAPACK_Z2INT( work_query ); /* Allocate memory for work arrays */ diff --git a/lapack-netlib/LAPACKE/src/lapacke_zheev_work.c b/lapack-netlib/LAPACKE/src/lapacke_zheev_work.c index 32b4a76f0..d4e93aed2 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_zheev_work.c +++ b/lapack-netlib/LAPACKE/src/lapacke_zheev_work.c @@ -70,7 +70,7 @@ lapack_int LAPACKE_zheev_work( int matrix_layout, char jobz, char uplo, goto exit_level_0; } /* Transpose input matrices */ - LAPACKE_zge_trans( matrix_layout, n, n, a, lda, a_t, lda_t ); + LAPACKE_zhe_trans( matrix_layout, uplo, n, a, lda, a_t, lda_t ); /* Call LAPACK function and adjust info */ LAPACK_zheev( &jobz, &uplo, &n, a_t, &lda_t, w, work, &lwork, rwork, &info ); @@ -78,7 +78,11 @@ lapack_int LAPACKE_zheev_work( int matrix_layout, char jobz, char uplo, info = info - 1; } /* Transpose output matrices */ - LAPACKE_zge_trans( LAPACK_COL_MAJOR, n, n, a_t, lda_t, a, lda ); + if ( jobz == 'V') { + LAPACKE_zge_trans( LAPACK_COL_MAJOR, n, n, a_t, lda_t, a, lda ); + } else { + LAPACKE_zhe_trans( LAPACK_COL_MAJOR, uplo, n, a_t, lda_t, a, lda ); + } /* Release memory and exit */ LAPACKE_free( a_t ); exit_level_0: diff --git a/lapack-netlib/LAPACKE/src/lapacke_zheevd.c b/lapack-netlib/LAPACKE/src/lapacke_zheevd.c index 4b1afb95c..1305ebfb3 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_zheevd.c +++ b/lapack-netlib/LAPACKE/src/lapacke_zheevd.c @@ -53,7 +53,7 @@ lapack_int LAPACKE_zheevd( int matrix_layout, char jobz, char uplo, lapack_int n #ifndef LAPACK_DISABLE_NAN_CHECK if( LAPACKE_get_nancheck() ) { /* Optionally check input matrices for NaNs */ - if( LAPACKE_zge_nancheck( matrix_layout, n, n, a, lda ) ) { + if( LAPACKE_zhe_nancheck( matrix_layout, uplo, n, a, lda ) ) { return -5; } } @@ -65,7 +65,7 @@ lapack_int LAPACKE_zheevd( int matrix_layout, char jobz, char uplo, lapack_int n if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lrwork = (lapack_int)rwork_query; lwork = LAPACK_Z2INT( work_query ); /* Allocate memory for work arrays */ diff --git a/lapack-netlib/LAPACKE/src/lapacke_zheevd_2stage.c b/lapack-netlib/LAPACKE/src/lapacke_zheevd_2stage.c index 9016da54c..63f139435 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_zheevd_2stage.c +++ b/lapack-netlib/LAPACKE/src/lapacke_zheevd_2stage.c @@ -53,7 +53,7 @@ lapack_int LAPACKE_zheevd_2stage( int matrix_layout, char jobz, char uplo, lapac #ifndef LAPACK_DISABLE_NAN_CHECK if( LAPACKE_get_nancheck() ) { /* Optionally check input matrices for NaNs */ - if( LAPACKE_zge_nancheck( matrix_layout, n, n, a, lda ) ) { + if( LAPACKE_zhe_nancheck( matrix_layout, uplo, n, a, lda ) ) { return -5; } } @@ -65,7 +65,7 @@ lapack_int LAPACKE_zheevd_2stage( int matrix_layout, char jobz, char uplo, lapac if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lrwork = (lapack_int)rwork_query; lwork = LAPACK_Z2INT( work_query ); /* Allocate memory for work arrays */ diff --git a/lapack-netlib/LAPACKE/src/lapacke_zheevd_2stage_work.c b/lapack-netlib/LAPACKE/src/lapacke_zheevd_2stage_work.c index d4b648ee1..fb33c3e2a 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_zheevd_2stage_work.c +++ b/lapack-netlib/LAPACKE/src/lapacke_zheevd_2stage_work.c @@ -71,7 +71,7 @@ lapack_int LAPACKE_zheevd_2stage_work( int matrix_layout, char jobz, char uplo, goto exit_level_0; } /* Transpose input matrices */ - LAPACKE_zge_trans( matrix_layout, n, n, a, lda, a_t, lda_t ); + LAPACKE_zhe_trans( matrix_layout, uplo, n, a, lda, a_t, lda_t ); /* Call LAPACK function and adjust info */ LAPACK_zheevd_2stage( &jobz, &uplo, &n, a_t, &lda_t, w, work, &lwork, rwork, &lrwork, iwork, &liwork, &info ); @@ -79,7 +79,11 @@ lapack_int LAPACKE_zheevd_2stage_work( int matrix_layout, char jobz, char uplo, info = info - 1; } /* Transpose output matrices */ - LAPACKE_zge_trans( LAPACK_COL_MAJOR, n, n, a_t, lda_t, a, lda ); + if ( jobz == 'V') { + LAPACKE_zge_trans( LAPACK_COL_MAJOR, n, n, a_t, lda_t, a, lda ); + } else { + LAPACKE_zhe_trans( LAPACK_COL_MAJOR, uplo, n, a_t, lda_t, a, lda ); + } /* Release memory and exit */ LAPACKE_free( a_t ); exit_level_0: diff --git a/lapack-netlib/LAPACKE/src/lapacke_zheevd_work.c b/lapack-netlib/LAPACKE/src/lapacke_zheevd_work.c index 9672e6a22..5af2a1269 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_zheevd_work.c +++ b/lapack-netlib/LAPACKE/src/lapacke_zheevd_work.c @@ -71,7 +71,7 @@ lapack_int LAPACKE_zheevd_work( int matrix_layout, char jobz, char uplo, goto exit_level_0; } /* Transpose input matrices */ - LAPACKE_zge_trans( matrix_layout, n, n, a, lda, a_t, lda_t ); + LAPACKE_zhe_trans( matrix_layout, uplo, n, a, lda, a_t, lda_t ); /* Call LAPACK function and adjust info */ LAPACK_zheevd( &jobz, &uplo, &n, a_t, &lda_t, w, work, &lwork, rwork, &lrwork, iwork, &liwork, &info ); @@ -79,7 +79,11 @@ lapack_int LAPACKE_zheevd_work( int matrix_layout, char jobz, char uplo, info = info - 1; } /* Transpose output matrices */ - LAPACKE_zge_trans( LAPACK_COL_MAJOR, n, n, a_t, lda_t, a, lda ); + if ( jobz == 'V') { + LAPACKE_zge_trans( LAPACK_COL_MAJOR, n, n, a_t, lda_t, a, lda ); + } else { + LAPACKE_zhe_trans( LAPACK_COL_MAJOR, uplo, n, a_t, lda_t, a, lda ); + } /* Release memory and exit */ LAPACKE_free( a_t ); exit_level_0: diff --git a/lapack-netlib/LAPACKE/src/lapacke_zheevr.c b/lapack-netlib/LAPACKE/src/lapacke_zheevr.c index 52e7a5bee..0d26dc2f9 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_zheevr.c +++ b/lapack-netlib/LAPACKE/src/lapacke_zheevr.c @@ -83,7 +83,7 @@ lapack_int LAPACKE_zheevr( int matrix_layout, char jobz, char range, char uplo, if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lrwork = (lapack_int)rwork_query; lwork = LAPACK_Z2INT( work_query ); /* Allocate memory for work arrays */ diff --git a/lapack-netlib/LAPACKE/src/lapacke_zheevr_2stage.c b/lapack-netlib/LAPACKE/src/lapacke_zheevr_2stage.c index faf949aef..6fa69c44b 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_zheevr_2stage.c +++ b/lapack-netlib/LAPACKE/src/lapacke_zheevr_2stage.c @@ -83,7 +83,7 @@ lapack_int LAPACKE_zheevr_2stage( int matrix_layout, char jobz, char range, char if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lrwork = (lapack_int)rwork_query; lwork = LAPACK_Z2INT( work_query ); /* Allocate memory for work arrays */ diff --git a/lapack-netlib/LAPACKE/src/lapacke_zhegst.c b/lapack-netlib/LAPACKE/src/lapacke_zhegst.c index aa2d84d84..8c4a5c374 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_zhegst.c +++ b/lapack-netlib/LAPACKE/src/lapacke_zhegst.c @@ -35,7 +35,7 @@ lapack_int LAPACKE_zhegst( int matrix_layout, lapack_int itype, char uplo, lapack_int n, lapack_complex_double* a, - lapack_int lda, const lapack_complex_double* b, + lapack_int lda, lapack_complex_double* b, lapack_int ldb ) { if( matrix_layout != LAPACK_COL_MAJOR && matrix_layout != LAPACK_ROW_MAJOR ) { diff --git a/lapack-netlib/LAPACKE/src/lapacke_zhegst_work.c b/lapack-netlib/LAPACKE/src/lapacke_zhegst_work.c index f77894204..62fce1f27 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_zhegst_work.c +++ b/lapack-netlib/LAPACKE/src/lapacke_zhegst_work.c @@ -35,7 +35,7 @@ lapack_int LAPACKE_zhegst_work( int matrix_layout, lapack_int itype, char uplo, lapack_int n, lapack_complex_double* a, - lapack_int lda, const lapack_complex_double* b, + lapack_int lda, lapack_complex_double* b, lapack_int ldb ) { lapack_int info = 0; diff --git a/lapack-netlib/LAPACKE/src/lapacke_zhegvd.c b/lapack-netlib/LAPACKE/src/lapacke_zhegvd.c index 81c3d29b4..1242a0eda 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_zhegvd.c +++ b/lapack-netlib/LAPACKE/src/lapacke_zhegvd.c @@ -70,7 +70,7 @@ lapack_int LAPACKE_zhegvd( int matrix_layout, lapack_int itype, char jobz, if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lrwork = (lapack_int)rwork_query; lwork = LAPACK_Z2INT( work_query ); /* Allocate memory for work arrays */ diff --git a/lapack-netlib/LAPACKE/src/lapacke_zhpevd.c b/lapack-netlib/LAPACKE/src/lapacke_zhpevd.c index 948bb9c10..a470ca3bb 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_zhpevd.c +++ b/lapack-netlib/LAPACKE/src/lapacke_zhpevd.c @@ -66,7 +66,7 @@ lapack_int LAPACKE_zhpevd( int matrix_layout, char jobz, char uplo, lapack_int n if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lrwork = (lapack_int)rwork_query; lwork = LAPACK_Z2INT( work_query ); /* Allocate memory for work arrays */ diff --git a/lapack-netlib/LAPACKE/src/lapacke_zhpgvd.c b/lapack-netlib/LAPACKE/src/lapacke_zhpgvd.c index be18d3313..91fa26443 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_zhpgvd.c +++ b/lapack-netlib/LAPACKE/src/lapacke_zhpgvd.c @@ -70,7 +70,7 @@ lapack_int LAPACKE_zhpgvd( int matrix_layout, lapack_int itype, char jobz, if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lrwork = (lapack_int)rwork_query; lwork = LAPACK_Z2INT( work_query ); /* Allocate memory for work arrays */ diff --git a/lapack-netlib/LAPACKE/src/lapacke_zlantr_work.c b/lapack-netlib/LAPACKE/src/lapacke_zlantr_work.c index 0d8bcf550..e62f8a4e3 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_zlantr_work.c +++ b/lapack-netlib/LAPACKE/src/lapacke_zlantr_work.c @@ -43,12 +43,10 @@ double LAPACKE_zlantr_work( int matrix_layout, char norm, char uplo, if( matrix_layout == LAPACK_COL_MAJOR ) { /* Call LAPACK function and adjust info */ res = LAPACK_zlantr( &norm, &uplo, &diag, &m, &n, a, &lda, work ); - if( info < 0 ) { - info = info - 1; - } } else if( matrix_layout == LAPACK_ROW_MAJOR ) { lapack_int lda_t = MAX(1,m); lapack_complex_double* a_t = NULL; + double* work_lapack = NULL; /* Check leading dimension(s) */ if( lda < n ) { info = -8; @@ -62,12 +60,23 @@ double LAPACKE_zlantr_work( int matrix_layout, char norm, char uplo, info = LAPACK_TRANSPOSE_MEMORY_ERROR; goto exit_level_0; } + /* Allocate memory for work array(s) */ + if( LAPACKE_lsame( norm, 'i' ) ) { + work_lapack = (double*)LAPACKE_malloc( sizeof(double) * MAX(1,m) ); + if( work_lapack == NULL ) { + info = LAPACK_WORK_MEMORY_ERROR; + goto exit_level_1; + } + } /* Transpose input matrices */ LAPACKE_ztr_trans( matrix_layout, uplo, diag, MAX(m,n), a, lda, a_t, lda_t ); /* Call LAPACK function and adjust info */ - res = LAPACK_zlantr( &norm, &uplo, &diag, &m, &n, a_t, &lda_t, work ); - info = 0; /* LAPACK call is ok! */ + res = LAPACK_zlantr( &norm, &uplo, &diag, &m, &n, a_t, &lda_t, work_lapack ); /* Release memory and exit */ + if( work_lapack ) { + LAPACKE_free( work_lapack ); + } +exit_level_1: LAPACKE_free( a_t ); exit_level_0: if( info == LAPACK_TRANSPOSE_MEMORY_ERROR ) { diff --git a/lapack-netlib/LAPACKE/src/lapacke_zstedc.c b/lapack-netlib/LAPACKE/src/lapacke_zstedc.c index 1bd7274c1..665c4414f 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_zstedc.c +++ b/lapack-netlib/LAPACKE/src/lapacke_zstedc.c @@ -74,7 +74,7 @@ lapack_int LAPACKE_zstedc( int matrix_layout, char compz, lapack_int n, if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lrwork = (lapack_int)rwork_query; lwork = LAPACK_Z2INT( work_query ); /* Allocate memory for work arrays */ diff --git a/lapack-netlib/LAPACKE/src/lapacke_zstegr.c b/lapack-netlib/LAPACKE/src/lapacke_zstegr.c index 2a65dcc4d..07b5ce81d 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_zstegr.c +++ b/lapack-netlib/LAPACKE/src/lapacke_zstegr.c @@ -82,7 +82,7 @@ lapack_int LAPACKE_zstegr( int matrix_layout, char jobz, char range, if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lwork = (lapack_int)work_query; /* Allocate memory for work arrays */ iwork = (lapack_int*)LAPACKE_malloc( sizeof(lapack_int) * liwork ); diff --git a/lapack-netlib/LAPACKE/src/lapacke_zstemr.c b/lapack-netlib/LAPACKE/src/lapacke_zstemr.c index c1144488e..d1d1d5692 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_zstemr.c +++ b/lapack-netlib/LAPACKE/src/lapacke_zstemr.c @@ -75,7 +75,7 @@ lapack_int LAPACKE_zstemr( int matrix_layout, char jobz, char range, if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lwork = (lapack_int)work_query; /* Allocate memory for work arrays */ iwork = (lapack_int*)LAPACKE_malloc( sizeof(lapack_int) * liwork ); diff --git a/lapack-netlib/LAPACKE/src/lapacke_zsytrs2.c b/lapack-netlib/LAPACKE/src/lapacke_zsytrs2.c index 3c85f9796..7442702aa 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_zsytrs2.c +++ b/lapack-netlib/LAPACKE/src/lapacke_zsytrs2.c @@ -34,7 +34,7 @@ #include "lapacke_utils.h" lapack_int LAPACKE_zsytrs2( int matrix_layout, char uplo, lapack_int n, - lapack_int nrhs, const lapack_complex_double* a, + lapack_int nrhs, lapack_complex_double* a, lapack_int lda, const lapack_int* ipiv, lapack_complex_double* b, lapack_int ldb ) { diff --git a/lapack-netlib/LAPACKE/src/lapacke_zsytrs2_work.c b/lapack-netlib/LAPACKE/src/lapacke_zsytrs2_work.c index cdc97fa02..ec05ce6d5 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_zsytrs2_work.c +++ b/lapack-netlib/LAPACKE/src/lapacke_zsytrs2_work.c @@ -35,7 +35,7 @@ lapack_int LAPACKE_zsytrs2_work( int matrix_layout, char uplo, lapack_int n, lapack_int nrhs, - const lapack_complex_double* a, lapack_int lda, + lapack_complex_double* a, lapack_int lda, const lapack_int* ipiv, lapack_complex_double* b, lapack_int ldb, lapack_complex_double* work ) diff --git a/lapack-netlib/LAPACKE/src/lapacke_ztgsen.c b/lapack-netlib/LAPACKE/src/lapacke_ztgsen.c index 60f48ba8f..f6f58becd 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_ztgsen.c +++ b/lapack-netlib/LAPACKE/src/lapacke_ztgsen.c @@ -84,7 +84,7 @@ lapack_int LAPACKE_ztgsen( int matrix_layout, lapack_int ijob, if( info != 0 ) { goto exit_level_0; } - liwork = (lapack_int)iwork_query; + liwork = iwork_query; lwork = LAPACK_Z2INT( work_query ); /* Allocate memory for work arrays */ if( ijob != 0 ) { diff --git a/lapack-netlib/LAPACKE/src/lapacke_ztprfb.c b/lapack-netlib/LAPACKE/src/lapacke_ztprfb.c index fce801762..7a791c0d4 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_ztprfb.c +++ b/lapack-netlib/LAPACKE/src/lapacke_ztprfb.c @@ -41,7 +41,7 @@ lapack_int LAPACKE_ztprfb( int matrix_layout, char side, char trans, char direct lapack_complex_double* a, lapack_int lda, lapack_complex_double* b, lapack_int ldb) { - lapack_int ncols_v, nrows_v; + lapack_int ncols_v, nrows_v, ncols_a, nrows_a; lapack_int info = 0; lapack_int ldwork; lapack_int work_size; @@ -52,20 +52,33 @@ lapack_int LAPACKE_ztprfb( int matrix_layout, char side, char trans, char direct } #ifndef LAPACK_DISABLE_NAN_CHECK if( LAPACKE_get_nancheck() ) { - /* Optionally check input matrices for NaNs */ + /* Optionally check input matrices for NaNs + * V is m-by-k (left, columnwise) + * or n-by-k (right, columnwise) + * or k-by-m (left, rowwise) + * or k-by-n (right, rowwise) + * T is k-by-k + * A is k-by-n (left) + * or m-by-k (right) + * B is m-by-n + */ if( LAPACKE_lsame( storev, 'C' ) ) { ncols_v = k; nrows_v = LAPACKE_lsame( side, 'L' ) ? m : - ( LAPACKE_lsame( side, 'R' ) ? n : 0 ); + LAPACKE_lsame( side, 'R' ) ? n : 0; } else if( LAPACKE_lsame( storev, 'R' ) ) { ncols_v = LAPACKE_lsame( side, 'L' ) ? m : - ( LAPACKE_lsame( side, 'R' ) ? n : 0 ); + LAPACKE_lsame( side, 'R' ) ? n : 0; nrows_v = k; } else { ncols_v = 0; nrows_v = 0; } - if( LAPACKE_zge_nancheck( matrix_layout, k, m, a, lda ) ) { + nrows_a = LAPACKE_lsame( side, 'L' ) ? k : + LAPACKE_lsame( side, 'R' ) ? m : 0; + ncols_a = LAPACKE_lsame( side, 'L' ) ? n : + LAPACKE_lsame( side, 'R' ) ? k : 0; + if( LAPACKE_zge_nancheck( matrix_layout, ncols_a, nrows_a, a, lda ) ) { return -14; } if( LAPACKE_zge_nancheck( matrix_layout, m, n, b, ldb ) ) { @@ -80,17 +93,16 @@ lapack_int LAPACKE_ztprfb( int matrix_layout, char side, char trans, char direct } #endif if (side=='l' || side=='L') { - ldwork = k; - work_size = MAX(1,ldwork) * MAX(1,n); - } + ldwork = k; + work_size = MAX(1,ldwork) * MAX(1,n); + } else { - ldwork = m; - work_size = MAX(1,ldwork) * MAX(1,k); - } - + ldwork = m; + work_size = MAX(1,ldwork) * MAX(1,k); + } /* Allocate memory for working array(s) */ work = (lapack_complex_double*) - LAPACKE_malloc( sizeof(lapack_complex_double) * work_size ); + LAPACKE_malloc( sizeof(lapack_complex_double) * work_size ); if( work == NULL ) { info = LAPACK_WORK_MEMORY_ERROR; goto exit_level_0; diff --git a/lapack-netlib/LAPACKE/src/lapacke_zunmhr.c b/lapack-netlib/LAPACKE/src/lapacke_zunmhr.c index 357d71184..61ed6f6f2 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_zunmhr.c +++ b/lapack-netlib/LAPACKE/src/lapacke_zunmhr.c @@ -58,7 +58,7 @@ lapack_int LAPACKE_zunmhr( int matrix_layout, char side, char trans, if( LAPACKE_zge_nancheck( matrix_layout, m, n, c, ldc ) ) { return -11; } - if( LAPACKE_z_nancheck( m-1, tau, 1 ) ) { + if( LAPACKE_z_nancheck( r-1, tau, 1 ) ) { return -10; } } diff --git a/lapack-netlib/LAPACKE/utils/Makefile b/lapack-netlib/LAPACKE/utils/Makefile index 1f639c6ea..648a8c141 100644 --- a/lapack-netlib/LAPACKE/utils/Makefile +++ b/lapack-netlib/LAPACKE/utils/Makefile @@ -32,7 +32,12 @@ ############################################################################## # makefile for LAPACKE, used to build lapacke binary. # -include ../../make.inc +TOPSRCDIR = ../.. +include $(TOPSRCDIR)/make.inc + +.SUFFIXES: .c .o +.c.o: + $(CC) $(CFLAGS) -I../include -c -o $@ $< OBJ = lapacke_cgb_nancheck.o \ lapacke_cgb_trans.o \ @@ -183,15 +188,15 @@ OBJ = lapacke_cgb_nancheck.o \ lapacke_make_complex_float.o \ lapacke_make_complex_double.o +.PHONY: all all: lib +.PHONY: lib lib: $(OBJ) - $(ARCH) $(ARCHFLAGS) ../../$(LAPACKELIB) $^ - $(RANLIB) ../../$(LAPACKELIB) + $(AR) $(ARFLAGS) $(LAPACKELIB) $^ + $(RANLIB) $(LAPACKELIB) +.PHONY: clean cleanobj clean: cleanobj cleanobj: rm -f *.o - -.c.o: - $(CC) $(CFLAGS) -I../include -c -o $@ $< diff --git a/lapack-netlib/LAPACKE/utils/lapacke_chp_nancheck.c b/lapack-netlib/LAPACKE/utils/lapacke_chp_nancheck.c index 5e51e237c..0a7e6a2e2 100644 --- a/lapack-netlib/LAPACKE/utils/lapacke_chp_nancheck.c +++ b/lapack-netlib/LAPACKE/utils/lapacke_chp_nancheck.c @@ -33,7 +33,7 @@ #include "lapacke_utils.h" /* Check a matrix for NaN entries. - * Since matrix in packed format stored continiously it just required to + * Since matrix in packed format stored continuously it just required to * check 1d array for NaNs. It doesn't depend upon uplo or matrix_layout. */ diff --git a/lapack-netlib/LAPACKE/utils/lapacke_cpf_nancheck.c b/lapack-netlib/LAPACKE/utils/lapacke_cpf_nancheck.c index a1f14fd69..5e058418e 100644 --- a/lapack-netlib/LAPACKE/utils/lapacke_cpf_nancheck.c +++ b/lapack-netlib/LAPACKE/utils/lapacke_cpf_nancheck.c @@ -33,7 +33,7 @@ #include "lapacke_utils.h" /* Check a matrix for NaN entries. - * Since matrix in packed format stored continiously it just required to + * Since matrix in packed format stored continuously it just required to * check 1d array for NaNs. It doesn't depend upon uplo, transr or * matrix_layout. */ diff --git a/lapack-netlib/LAPACKE/utils/lapacke_cpp_nancheck.c b/lapack-netlib/LAPACKE/utils/lapacke_cpp_nancheck.c index fc00ce2df..23174d68b 100644 --- a/lapack-netlib/LAPACKE/utils/lapacke_cpp_nancheck.c +++ b/lapack-netlib/LAPACKE/utils/lapacke_cpp_nancheck.c @@ -33,7 +33,7 @@ #include "lapacke_utils.h" /* Check a matrix for NaN entries. - * Since matrix in packed format stored continiously it just required to + * Since matrix in packed format stored continuously it just required to * check 1d array for NaNs. It doesn't depend upon uplo or matrix_layout. */ diff --git a/lapack-netlib/LAPACKE/utils/lapacke_csp_nancheck.c b/lapack-netlib/LAPACKE/utils/lapacke_csp_nancheck.c index 56d53c74b..d1a8aa290 100644 --- a/lapack-netlib/LAPACKE/utils/lapacke_csp_nancheck.c +++ b/lapack-netlib/LAPACKE/utils/lapacke_csp_nancheck.c @@ -33,7 +33,7 @@ #include "lapacke_utils.h" /* Check a matrix for NaN entries. - * Since matrix in packed format stored continiously it just required to + * Since matrix in packed format stored continuously it just required to * check 1d array for NaNs. It doesn't depend upon uplo or matrix_layout. */ diff --git a/lapack-netlib/LAPACKE/utils/lapacke_ctp_nancheck.c b/lapack-netlib/LAPACKE/utils/lapacke_ctp_nancheck.c index 97d1ab083..35c48a409 100644 --- a/lapack-netlib/LAPACKE/utils/lapacke_ctp_nancheck.c +++ b/lapack-netlib/LAPACKE/utils/lapacke_ctp_nancheck.c @@ -33,7 +33,7 @@ #include "lapacke_utils.h" /* Check a matrix for NaN entries. - * Since matrix in packed format stored continiously it just required to + * Since matrix in packed format stored continuously it just required to * check 1d array for NaNs. It doesn't depend upon uplo or matrix_layout. */ diff --git a/lapack-netlib/LAPACKE/utils/lapacke_dpf_nancheck.c b/lapack-netlib/LAPACKE/utils/lapacke_dpf_nancheck.c index 69c4cfdb4..df95f1318 100644 --- a/lapack-netlib/LAPACKE/utils/lapacke_dpf_nancheck.c +++ b/lapack-netlib/LAPACKE/utils/lapacke_dpf_nancheck.c @@ -33,7 +33,7 @@ #include "lapacke_utils.h" /* Check a matrix for NaN entries. - * Since matrix in packed format stored continiously it just required to + * Since matrix in packed format stored continuously it just required to * check 1d array for NaNs. It doesn't depend upon uplo, transr or * matrix_layout. */ diff --git a/lapack-netlib/LAPACKE/utils/lapacke_dpp_nancheck.c b/lapack-netlib/LAPACKE/utils/lapacke_dpp_nancheck.c index 214496710..0ba66f96c 100644 --- a/lapack-netlib/LAPACKE/utils/lapacke_dpp_nancheck.c +++ b/lapack-netlib/LAPACKE/utils/lapacke_dpp_nancheck.c @@ -33,7 +33,7 @@ #include "lapacke_utils.h" /* Check a matrix for NaN entries. - * Since matrix in packed format stored continiously it just required to + * Since matrix in packed format stored continuously it just required to * check 1d array for NaNs. It doesn't depend upon uplo or matrix_layout. */ diff --git a/lapack-netlib/LAPACKE/utils/lapacke_dsp_nancheck.c b/lapack-netlib/LAPACKE/utils/lapacke_dsp_nancheck.c index 2eada7c99..69d24611c 100644 --- a/lapack-netlib/LAPACKE/utils/lapacke_dsp_nancheck.c +++ b/lapack-netlib/LAPACKE/utils/lapacke_dsp_nancheck.c @@ -33,7 +33,7 @@ #include "lapacke_utils.h" /* Check a matrix for NaN entries. - * Since matrix in packed format stored continiously it just required to + * Since matrix in packed format stored continuously it just required to * check 1d array for NaNs. It doesn't depend upon uplo or matrix_layout. */ diff --git a/lapack-netlib/LAPACKE/utils/lapacke_dtp_nancheck.c b/lapack-netlib/LAPACKE/utils/lapacke_dtp_nancheck.c index 29666e273..43f33bdd2 100644 --- a/lapack-netlib/LAPACKE/utils/lapacke_dtp_nancheck.c +++ b/lapack-netlib/LAPACKE/utils/lapacke_dtp_nancheck.c @@ -33,7 +33,7 @@ #include "lapacke_utils.h" /* Check a matrix for NaN entries. - * Since matrix in packed format stored continiously it just required to + * Since matrix in packed format stored continuously it just required to * check 1d array for NaNs. It doesn't depend upon uplo or matrix_layout. */ diff --git a/lapack-netlib/LAPACKE/utils/lapacke_spf_nancheck.c b/lapack-netlib/LAPACKE/utils/lapacke_spf_nancheck.c index 0e5b4659f..20666c4d6 100644 --- a/lapack-netlib/LAPACKE/utils/lapacke_spf_nancheck.c +++ b/lapack-netlib/LAPACKE/utils/lapacke_spf_nancheck.c @@ -33,7 +33,7 @@ #include "lapacke_utils.h" /* Check a matrix for NaN entries. - * Since matrix in packed format stored continiously it just required to + * Since matrix in packed format stored continuously it just required to * check 1d array for NaNs. It doesn't depend upon uplo, transr or * matrix_layout. */ diff --git a/lapack-netlib/LAPACKE/utils/lapacke_spp_nancheck.c b/lapack-netlib/LAPACKE/utils/lapacke_spp_nancheck.c index eae73fa5c..c1098de70 100644 --- a/lapack-netlib/LAPACKE/utils/lapacke_spp_nancheck.c +++ b/lapack-netlib/LAPACKE/utils/lapacke_spp_nancheck.c @@ -33,7 +33,7 @@ #include "lapacke_utils.h" /* Check a matrix for NaN entries. - * Since matrix in packed format stored continiously it just required to + * Since matrix in packed format stored continuously it just required to * check 1d array for NaNs. It doesn't depend upon uplo or matrix_layout. */ diff --git a/lapack-netlib/LAPACKE/utils/lapacke_ssp_nancheck.c b/lapack-netlib/LAPACKE/utils/lapacke_ssp_nancheck.c index 447724b01..35ffe6522 100644 --- a/lapack-netlib/LAPACKE/utils/lapacke_ssp_nancheck.c +++ b/lapack-netlib/LAPACKE/utils/lapacke_ssp_nancheck.c @@ -33,7 +33,7 @@ #include "lapacke_utils.h" /* Check a matrix for NaN entries. - * Since matrix in packed format stored continiously it just required to + * Since matrix in packed format stored continuously it just required to * check 1d array for NaNs. It doesn't depend upon uplo or matrix_layout. */ diff --git a/lapack-netlib/LAPACKE/utils/lapacke_stp_nancheck.c b/lapack-netlib/LAPACKE/utils/lapacke_stp_nancheck.c index 2932d4040..4dfef0200 100644 --- a/lapack-netlib/LAPACKE/utils/lapacke_stp_nancheck.c +++ b/lapack-netlib/LAPACKE/utils/lapacke_stp_nancheck.c @@ -33,7 +33,7 @@ #include "lapacke_utils.h" /* Check a matrix for NaN entries. - * Since matrix in packed format stored continiously it just required to + * Since matrix in packed format stored continuously it just required to * check 1d array for NaNs. It doesn't depend upon uplo or matrix_layout. */ diff --git a/lapack-netlib/LAPACKE/utils/lapacke_zhp_nancheck.c b/lapack-netlib/LAPACKE/utils/lapacke_zhp_nancheck.c index 694e1310e..bcf331fe1 100644 --- a/lapack-netlib/LAPACKE/utils/lapacke_zhp_nancheck.c +++ b/lapack-netlib/LAPACKE/utils/lapacke_zhp_nancheck.c @@ -33,7 +33,7 @@ #include "lapacke_utils.h" /* Check a matrix for NaN entries. - * Since matrix in packed format stored continiously it just required to + * Since matrix in packed format stored continuously it just required to * check 1d array for NaNs. It doesn't depend upon uplo or matrix_layout. */ diff --git a/lapack-netlib/LAPACKE/utils/lapacke_zpf_nancheck.c b/lapack-netlib/LAPACKE/utils/lapacke_zpf_nancheck.c index a0682290b..c510b1d1a 100644 --- a/lapack-netlib/LAPACKE/utils/lapacke_zpf_nancheck.c +++ b/lapack-netlib/LAPACKE/utils/lapacke_zpf_nancheck.c @@ -33,7 +33,7 @@ #include "lapacke_utils.h" /* Check a matrix for NaN entries. - * Since matrix in packed format stored continiously it just required to + * Since matrix in packed format stored continuously it just required to * check 1d array for NaNs. It doesn't depend upon uplo, transr or * matrix_layout. */ diff --git a/lapack-netlib/LAPACKE/utils/lapacke_zpp_nancheck.c b/lapack-netlib/LAPACKE/utils/lapacke_zpp_nancheck.c index 141a796aa..450878bcf 100644 --- a/lapack-netlib/LAPACKE/utils/lapacke_zpp_nancheck.c +++ b/lapack-netlib/LAPACKE/utils/lapacke_zpp_nancheck.c @@ -33,7 +33,7 @@ #include "lapacke_utils.h" /* Check a matrix for NaN entries. - * Since matrix in packed format stored continiously it just required to + * Since matrix in packed format stored continuously it just required to * check 1d array for NaNs. It doesn't depend upon uplo or matrix_layout. */ diff --git a/lapack-netlib/LAPACKE/utils/lapacke_zsp_nancheck.c b/lapack-netlib/LAPACKE/utils/lapacke_zsp_nancheck.c index d1a88641c..2d7795166 100644 --- a/lapack-netlib/LAPACKE/utils/lapacke_zsp_nancheck.c +++ b/lapack-netlib/LAPACKE/utils/lapacke_zsp_nancheck.c @@ -33,7 +33,7 @@ #include "lapacke_utils.h" /* Check a matrix for NaN entries. - * Since matrix in packed format stored continiously it just required to + * Since matrix in packed format stored continuously it just required to * check 1d array for NaNs. It doesn't depend upon uplo or matrix_layout. */ diff --git a/lapack-netlib/LAPACKE/utils/lapacke_ztp_nancheck.c b/lapack-netlib/LAPACKE/utils/lapacke_ztp_nancheck.c index 8e1eec971..d3a06c381 100644 --- a/lapack-netlib/LAPACKE/utils/lapacke_ztp_nancheck.c +++ b/lapack-netlib/LAPACKE/utils/lapacke_ztp_nancheck.c @@ -33,7 +33,7 @@ #include "lapacke_utils.h" /* Check a matrix for NaN entries. - * Since matrix in packed format stored continiously it just required to + * Since matrix in packed format stored continuously it just required to * check 1d array for NaNs. It doesn't depend upon uplo or matrix_layout. */ diff --git a/lapack-netlib/Makefile b/lapack-netlib/Makefile index 1d7e82c34..d5e75b69e 100644 --- a/lapack-netlib/Makefile +++ b/lapack-netlib/Makefile @@ -4,89 +4,120 @@ # April 2012 # -include make.inc +TOPSRCDIR = . +include $(TOPSRCDIR)/make.inc +.PHONY: all all: lapack_install lib blas_testing lapack_testing +.PHONY: lib lib: lapacklib tmglib #lib: blaslib variants lapacklib tmglib +.PHONY: blaslib blaslib: $(MAKE) -C BLAS +.PHONY: cblaslib cblaslib: $(MAKE) -C CBLAS +.PHONY: lapacklib lapacklib: $(MAKE) -C SRC +.PHONY: lapackelib lapackelib: $(MAKE) -C LAPACKE +.PHONY: blaspplib +blaspplib: + @echo "Thank you for your interest in BLAS++, a newly developed C++ API for BLAS library" + @echo "The objective of BLAS++ is to provide a convenient, performance oriented API for development in the C++ language, that, for the most part, preserves established conventions, while, at the same time, takes advantages of modern C++ features, such as: namespaces, templates, exceptions, etc." + @echo "We are still working on integrating BLAS++ in our library. For the moment, you can download directly blas++ from https://bitbucket.org/icl/blaspp" + @echo "For support BLAS++ related question, please email: slate-user@icl.utk.edu" + +.PHONY: lapackpplib +lapackpplib: + @echo "Thank you for your interest in LAPACK++, a newly developed C++ API for LAPACK library" + @echo "The objective of LAPACK++ is to provide a convenient, performance oriented API for development in the C++ language, that, for the most part, preserves established conventions, while, at the same time, takes advantages of modern C++ features, such as: namespaces, templates, exceptions, etc." + @echo "We are still working on integrating LAPACK++ in our library. For the moment, you can download directly lapack++ from https://bitbucket.org/icl/lapackpp" + @echo "For support LAPACK++ related question, please email: slate-user@icl.utk.edu" + +.PHONY: tmglib tmglib: $(MAKE) -C TESTING/MATGEN +.PHONY: variants variants: $(MAKE) -C SRC/VARIANTS +.PHONY: lapack_install lapack_install: $(MAKE) -C INSTALL run +.PHONY: blas_testing blas_testing: blaslib $(MAKE) -C BLAS blas_testing +.PHONY: cblas_testing cblas_testing: cblaslib blaslib $(MAKE) -C CBLAS cblas_testing +.PHONY: lapack_testing lapack_testing: tmglib lapacklib blaslib $(MAKE) -C TESTING/LIN cleanexe $(MAKE) -C TESTING ./lapack_testing.py +.PHONY: variants_testing variants_testing: tmglib variants lapacklib blaslib $(MAKE) -C TESTING/LIN cleanexe - $(MAKE) -C TESTING/LIN VARLIB='SRC/VARIANTS/cholrl.a' + $(MAKE) -C TESTING/LIN VARLIB='../../SRC/VARIANTS/cholrl.a' $(MAKE) -C TESTING stest.out && mv TESTING/stest.out TESTING/stest_cholrl.out $(MAKE) -C TESTING dtest.out && mv TESTING/dtest.out TESTING/dtest_cholrl.out $(MAKE) -C TESTING ctest.out && mv TESTING/ctest.out TESTING/ctest_cholrl.out $(MAKE) -C TESTING ztest.out && mv TESTING/ztest.out TESTING/ztest_cholrl.out $(MAKE) -C TESTING/LIN cleanexe - $(MAKE) -C TESTING/LIN VARLIB='SRC/VARIANTS/choltop.a' + $(MAKE) -C TESTING/LIN VARLIB='../../SRC/VARIANTS/choltop.a' $(MAKE) -C TESTING stest.out && mv TESTING/stest.out TESTING/stest_choltop.out $(MAKE) -C TESTING dtest.out && mv TESTING/dtest.out TESTING/dtest_choltop.out $(MAKE) -C TESTING ctest.out && mv TESTING/ctest.out TESTING/ctest_choltop.out $(MAKE) -C TESTING ztest.out && mv TESTING/ztest.out TESTING/ztest_choltop.out $(MAKE) -C TESTING/LIN cleanexe - $(MAKE) -C TESTING/LIN VARLIB='SRC/VARIANTS/lucr.a' + $(MAKE) -C TESTING/LIN VARLIB='../../SRC/VARIANTS/lucr.a' $(MAKE) -C TESTING stest.out && mv TESTING/stest.out TESTING/stest_lucr.out $(MAKE) -C TESTING dtest.out && mv TESTING/dtest.out TESTING/dtest_lucr.out $(MAKE) -C TESTING ctest.out && mv TESTING/ctest.out TESTING/ctest_lucr.out $(MAKE) -C TESTING ztest.out && mv TESTING/ztest.out TESTING/ztest_lucr.out $(MAKE) -C TESTING/LIN cleanexe - $(MAKE) -C TESTING/LIN VARLIB='SRC/VARIANTS/lull.a' + $(MAKE) -C TESTING/LIN VARLIB='../../SRC/VARIANTS/lull.a' $(MAKE) -C TESTING stest.out && mv TESTING/stest.out TESTING/stest_lull.out $(MAKE) -C TESTING dtest.out && mv TESTING/dtest.out TESTING/dtest_lull.out $(MAKE) -C TESTING ctest.out && mv TESTING/ctest.out TESTING/ctest_lull.out $(MAKE) -C TESTING ztest.out && mv TESTING/ztest.out TESTING/ztest_lull.out $(MAKE) -C TESTING/LIN cleanexe - $(MAKE) -C TESTING/LIN VARLIB='SRC/VARIANTS/lurec.a' + $(MAKE) -C TESTING/LIN VARLIB='../../SRC/VARIANTS/lurec.a' $(MAKE) -C TESTING stest.out && mv TESTING/stest.out TESTING/stest_lurec.out $(MAKE) -C TESTING dtest.out && mv TESTING/dtest.out TESTING/dtest_lurec.out $(MAKE) -C TESTING ctest.out && mv TESTING/ctest.out TESTING/ctest_lurec.out $(MAKE) -C TESTING ztest.out && mv TESTING/ztest.out TESTING/ztest_lurec.out $(MAKE) -C TESTING/LIN cleanexe - $(MAKE) -C TESTING/LIN VARLIB='SRC/VARIANTS/qrll.a' + $(MAKE) -C TESTING/LIN VARLIB='../../SRC/VARIANTS/qrll.a' $(MAKE) -C TESTING stest.out && mv TESTING/stest.out TESTING/stest_qrll.out $(MAKE) -C TESTING dtest.out && mv TESTING/dtest.out TESTING/dtest_qrll.out $(MAKE) -C TESTING ctest.out && mv TESTING/ctest.out TESTING/ctest_qrll.out $(MAKE) -C TESTING ztest.out && mv TESTING/ztest.out TESTING/ztest_qrll.out +.PHONY: cblas_example cblas_example: cblaslib blaslib $(MAKE) -C CBLAS cblas_example +.PHONY: lapacke_example lapacke_example: lapackelib lapacklib blaslib $(MAKE) -C LAPACKE lapacke_example +.PHONY: html html: @echo "LAPACK HTML PAGES GENERATION with Doxygen" doxygen DOCS/Doxyfile @@ -96,6 +127,7 @@ html: @echo "Online version available at http://www.netlib.org/lapack/explore-html/" @echo "==================" +.PHONY: man man: @echo "LAPACK MAN PAGES GENERATION with Doxygen" doxygen DOCS/Doxyfile_man @@ -105,6 +137,7 @@ man: @echo "Usage: man dgetrf.f" @echo "==================" +.PHONY: clean cleanobj cleanlib cleanexe cleantest clean: $(MAKE) -C INSTALL clean $(MAKE) -C BLAS clean @@ -146,4 +179,4 @@ cleantest: $(MAKE) -C INSTALL cleantest $(MAKE) -C BLAS cleantest $(MAKE) -C CBLAS cleantest - $(MAKE) -C TESTING cleantest + $(MAKE) -C TESTING cleantest \ No newline at end of file diff --git a/lapack-netlib/README.md b/lapack-netlib/README.md index e5ac2d9c8..f0aed6c18 100644 --- a/lapack-netlib/README.md +++ b/lapack-netlib/README.md @@ -3,6 +3,7 @@ [![Build Status](https://travis-ci.org/Reference-LAPACK/lapack.svg?branch=master)](https://travis-ci.org/Reference-LAPACK/lapack) [![Appveyor](https://ci.appveyor.com/api/projects/status/bh38iin398msrbtr?svg=true)](https://ci.appveyor.com/project/langou/lapack/) [![codecov](https://codecov.io/gh/Reference-LAPACK/lapack/branch/master/graph/badge.svg)](https://codecov.io/gh/Reference-LAPACK/lapack) +[![Packaging status](https://repology.org/badge/tiny-repos/lapack.svg)](https://repology.org/metapackage/lapack/versions) * VERSION 1.0 : February 29, 1992 @@ -29,6 +30,7 @@ * VERSION 3.7.0 : December 2016 * VERSION 3.7.1 : June 2017 * VERSION 3.8.0 : November 2017 +* VERSION 3.9.0 : November 2019 LAPACK is a library of Fortran subroutines for solving the most commonly occurring problems in numerical linear algebra. @@ -70,6 +72,14 @@ CBLAS, a C interface to the BLAS, and (5) LAPACKE, a C interface to LAPACK. - LAPACK includes also the CMake build. You will need to have CMake installed on your machine (CMake is available at http://www.cmake.org/). CMake will allow an easy installation on a Windows Machine. + An example CMake build is: + ```sh + mkdir build + cd build + cmake -DCMAKE_INSTALL_LIBDIR=$HOME/.local/lapack .. + cmake --build -j . --target install + ``` + That installs the LAPACK library under $HOME/.local/lapack/ - Specific information to run LAPACK under Windows is available at http://icl.cs.utk.edu/lapack-for-windows/lapack/. @@ -99,7 +109,7 @@ You can also contact directly the LAPACK team at lapack@icl.utk.edu. ## Testing LAPACK includes a thorough test suite. We recommend that, after compilation, -you run the test suite. +you run the test suite. For complete information on the LAPACK Testing please consult LAPACK Working Note 41 "Installation Guide for LAPACK". @@ -115,4 +125,3 @@ LAPACK now includes the LAPACKE package. LAPACKE is a Standard C language API for LAPACK This was born from a collaboration of the LAPACK and INTEL Math Kernel Library teams. See: http://www.netlib.org/lapack/#_standard_c_language_apis_for_lapack. - diff --git a/lapack-netlib/SRC/CMakeLists.txt b/lapack-netlib/SRC/CMakeLists.txt index 944401beb..f19bdd302 100644 --- a/lapack-netlib/SRC/CMakeLists.txt +++ b/lapack-netlib/SRC/CMakeLists.txt @@ -106,7 +106,7 @@ set(SLASRC slatbs.f slatdf.f slatps.f slatrd.f slatrs.f slatrz.f slauu2.f slauum.f sopgtr.f sopmtr.f sorg2l.f sorg2r.f sorgbr.f sorghr.f sorgl2.f sorglq.f sorgql.f sorgqr.f sorgr2.f - sorgrq.f sorgtr.f sorm2l.f sorm2r.f sorm22.f + sorgrq.f sorgtr.f sorgtsqr.f sorm2l.f sorm2r.f sorm22.f sormbr.f sormhr.f sorml2.f sormlq.f sormql.f sormqr.f sormr2.f sormr3.f sormrq.f sormrz.f sormtr.f spbcon.f spbequ.f spbrfs.f spbstf.f spbsv.f spbsvx.f @@ -148,9 +148,11 @@ set(SLASRC sgetsls.f sgeqr.f slatsqr.f slamtsqr.f sgemqr.f sgelq.f slaswlq.f slamswlq.f sgemlq.f stplqt.f stplqt2.f stpmlqt.f + sorhr_col.f slaorhr_col_getrfnp.f slaorhr_col_getrfnp2.f ssytrd_2stage.f ssytrd_sy2sb.f ssytrd_sb2st.F ssb2st_kernels.f ssyevd_2stage.f ssyev_2stage.f ssyevx_2stage.f ssyevr_2stage.f - ssbev_2stage.f ssbevx_2stage.f ssbevd_2stage.f ssygv_2stage.f) + ssbev_2stage.f ssbevx_2stage.f ssbevd_2stage.f ssygv_2stage.f + sgesvdq.f scombssq.f) set(DSLASRC spotrs.f sgetrs.f spotrf.f sgetrf.f) @@ -233,7 +235,7 @@ set(CLASRC ctptrs.f ctrcon.f ctrevc.f ctrevc3.f ctrexc.f ctrrfs.f ctrsen.f ctrsna.f ctrsyl.f ctrti2.f ctrtri.f ctrtrs.f ctzrzf.f cung2l.f cung2r.f cungbr.f cunghr.f cungl2.f cunglq.f cungql.f cungqr.f cungr2.f - cungrq.f cungtr.f cunm2l.f cunm2r.f cunmbr.f cunmhr.f cunml2.f cunm22.f + cungrq.f cungtr.f cungtsqr.f cunm2l.f cunm2r.f cunmbr.f cunmhr.f cunml2.f cunm22.f cunmlq.f cunmql.f cunmqr.f cunmr2.f cunmr3.f cunmrq.f cunmrz.f cunmtr.f cupgtr.f cupmtr.f icmax1.f scsum1.f cstemr.f chfrk.f ctfttp.f clanhf.f cpftrf.f cpftri.f cpftrs.f ctfsm.f ctftri.f @@ -247,9 +249,11 @@ set(CLASRC cgetsls.f cgeqr.f clatsqr.f clamtsqr.f cgemqr.f cgelq.f claswlq.f clamswlq.f cgemlq.f ctplqt.f ctplqt2.f ctpmlqt.f + cunhr_col.f claunhr_col_getrfnp.f claunhr_col_getrfnp2.f chetrd_2stage.f chetrd_he2hb.f chetrd_hb2st.F chb2st_kernels.f cheevd_2stage.f cheev_2stage.f cheevx_2stage.f cheevr_2stage.f - chbev_2stage.f chbevx_2stage.f chbevd_2stage.f chegv_2stage.f) + chbev_2stage.f chbevx_2stage.f chbevd_2stage.f chegv_2stage.f + cgesvdq.f) set(CXLASRC cgesvxx.f cgerfsx.f cla_gerfsx_extended.f cla_geamv.f cla_gercond_c.f cla_gercond_x.f cla_gerpvgrw.f @@ -295,7 +299,7 @@ set(DLASRC dlatbs.f dlatdf.f dlatps.f dlatrd.f dlatrs.f dlatrz.f dlauu2.f dlauum.f dopgtr.f dopmtr.f dorg2l.f dorg2r.f dorgbr.f dorghr.f dorgl2.f dorglq.f dorgql.f dorgqr.f dorgr2.f - dorgrq.f dorgtr.f dorm2l.f dorm2r.f dorm22.f + dorgrq.f dorgtr.f dorgtsqr.f dorm2l.f dorm2r.f dorm22.f dormbr.f dormhr.f dorml2.f dormlq.f dormql.f dormqr.f dormr2.f dormr3.f dormrq.f dormrz.f dormtr.f dpbcon.f dpbequ.f dpbrfs.f dpbstf.f dpbsv.f dpbsvx.f @@ -339,9 +343,11 @@ set(DLASRC dgetsls.f dgeqr.f dlatsqr.f dlamtsqr.f dgemqr.f dgelq.f dlaswlq.f dlamswlq.f dgemlq.f dtplqt.f dtplqt2.f dtpmlqt.f + dorhr_col.f dlaorhr_col_getrfnp.f dlaorhr_col_getrfnp2.f dsytrd_2stage.f dsytrd_sy2sb.f dsytrd_sb2st.F dsb2st_kernels.f dsyevd_2stage.f dsyev_2stage.f dsyevx_2stage.f dsyevr_2stage.f - dsbev_2stage.f dsbevx_2stage.f dsbevd_2stage.f dsygv_2stage.f) + dsbev_2stage.f dsbevx_2stage.f dsbevd_2stage.f dsygv_2stage.f + dgesvdq.f dcombssq.f) set(DXLASRC dgesvxx.f dgerfsx.f dla_gerfsx_extended.f dla_geamv.f dla_gercond.f dla_gerpvgrw.f dsysvxx.f dsyrfsx.f @@ -424,7 +430,7 @@ set(ZLASRC ztptrs.f ztrcon.f ztrevc.f ztrevc3.f ztrexc.f ztrrfs.f ztrsen.f ztrsna.f ztrsyl.f ztrti2.f ztrtri.f ztrtrs.f ztzrzf.f zung2l.f zung2r.f zungbr.f zunghr.f zungl2.f zunglq.f zungql.f zungqr.f zungr2.f - zungrq.f zungtr.f zunm2l.f zunm2r.f zunmbr.f zunmhr.f zunml2.f zunm22.f + zungrq.f zungtr.f zungtsqr.f zunm2l.f zunm2r.f zunmbr.f zunmhr.f zunml2.f zunm22.f zunmlq.f zunmql.f zunmqr.f zunmr2.f zunmr3.f zunmrq.f zunmrz.f zunmtr.f zupgtr.f zupmtr.f izmax1.f dzsum1.f zstemr.f @@ -440,9 +446,11 @@ set(ZLASRC zgelqt.f zgelqt3.f zgemlqt.f zgetsls.f zgeqr.f zlatsqr.f zlamtsqr.f zgemqr.f zgelq.f zlaswlq.f zlamswlq.f zgemlq.f + zunhr_col.f zlaunhr_col_getrfnp.f zlaunhr_col_getrfnp2.f zhetrd_2stage.f zhetrd_he2hb.f zhetrd_hb2st.F zhb2st_kernels.f zheevd_2stage.f zheev_2stage.f zheevx_2stage.f zheevr_2stage.f - zhbev_2stage.f zhbevx_2stage.f zhbevd_2stage.f zhegv_2stage.f) + zhbev_2stage.f zhbevx_2stage.f zhbevd_2stage.f zhegv_2stage.f + zgesvdq.f) set(ZXLASRC zgesvxx.f zgerfsx.f zla_gerfsx_extended.f zla_geamv.f zla_gercond_c.f zla_gercond_x.f zla_gerpvgrw.f zsysvxx.f zsyrfsx.f @@ -504,7 +512,7 @@ if(USE_XBLAS) endif() target_link_libraries(lapack PRIVATE ${BLAS_LIBRARIES}) -if (${CMAKE_BUILD_TYPE_UPPER} STREQUAL "COVERAGE") +if(_is_coverage_build) target_link_libraries(lapack PRIVATE gcov) add_coverage(lapack) endif() diff --git a/lapack-netlib/SRC/Makefile b/lapack-netlib/SRC/Makefile index 87a8f51e4..83baac875 100644 --- a/lapack-netlib/SRC/Makefile +++ b/lapack-netlib/SRC/Makefile @@ -1,5 +1,3 @@ -include ../make.inc - ####################################################################### # This is the makefile to create a library for LAPACK. # The files are organized as follows: @@ -44,7 +42,7 @@ include ../make.inc # and is created at the next higher directory level. # # To remove the object files after the library is created, enter -# make clean +# make cleanobj # On some systems, you can force the source files to be recompiled by # entering (for example) # make single FRC=FRC @@ -56,12 +54,21 @@ include ../make.inc # ####################################################################### +TOPSRCDIR = .. +include $(TOPSRCDIR)/make.inc + +.SUFFIXES: .F .o +.F.o: + $(FC) $(FFLAGS) -c -o $@ $< + ALLAUX_O = ilaenv.o ilaenv2stage.o ieeeck.o lsamen.o xerbla.o xerbla_array.o \ iparmq.o iparam2stage.o \ ilaprec.o ilatrans.o ilauplo.o iladiag.o chla_transtype.o \ ../INSTALL/ilaver.o ../INSTALL/lsame.o ../INSTALL/slamch.o +ifneq "$(or $(BUILD_SINGLE),$(BUILD_COMPLEX))" "" SCLAUX = \ + sbdsvdx.o sstevx.o sstein.o \ sbdsdc.o \ sbdsqr.o sdisna.o slabad.o slacpy.o sladiv.o slae2.o slaebz.o \ slaed0.o slaed1.o slaed2.o slaed3.o slaed4.o slaed5.o slaed6.o \ @@ -76,10 +83,14 @@ SCLAUX = \ slaset.o slasq1.o slasq2.o slasq3.o slasq4.o slasq5.o slasq6.o \ slasr.o slasrt.o slassq.o slasv2.o spttrf.o sstebz.o sstedc.o \ ssteqr.o ssterf.o slaisnan.o sisnan.o \ - slartgp.o slartgs.o \ + slartgp.o slartgs.o scombssq.o \ ../INSTALL/second_$(TIMER).o +endif +ifneq "$(or $(BUILD_DOUBLE),$(BUILD_COMPLEX16))" "" DZLAUX = \ + dcombssq.o \ + dbdsvdx.o dstevx.o dstein.o \ dbdsdc.o \ dbdsqr.o ddisna.o dlabad.o dlacpy.o dladiv.o dlae2.o dlaebz.o \ dlaed0.o dlaed1.o dlaed2.o dlaed3.o dlaed4.o dlaed5.o dlaed6.o \ @@ -96,9 +107,12 @@ DZLAUX = \ dsteqr.o dsterf.o dlaisnan.o disnan.o \ dlartgp.o dlartgs.o \ ../INSTALL/dlamch.o ../INSTALL/dsecnd_$(TIMER).o +endif +#ifeq ($(BUILD_SINGLE),1) +ifdef BUILD_SINGLE SLASRC_O = \ - sbdsvdx.o spotrf2.o sgetrf2.o \ + spotrf2.o sgetrf2.o \ sgbbrd.o sgbcon.o sgbequ.o sgbrfs.o sgbsv.o \ sgbsvx.o sgbtf2.o sgbtrf.o sgbtrs.o sgebak.o sgebal.o sgebd2.o \ sgebrd.o sgecon.o sgeequ.o sgees.o sgeesx.o sgeev.o sgeevx.o \ @@ -128,7 +142,7 @@ SLASRC_O = \ slatbs.o slatdf.o slatps.o slatrd.o slatrs.o slatrz.o \ slauu2.o slauum.o sopgtr.o sopmtr.o sorg2l.o sorg2r.o \ sorgbr.o sorghr.o sorgl2.o sorglq.o sorgql.o sorgqr.o sorgr2.o \ - sorgrq.o sorgtr.o sorm2l.o sorm2r.o sorm22.o \ + sorgrq.o sorgtr.o sorgtsqr.o sorm2l.o sorm2r.o sorm22.o \ sormbr.o sormhr.o sorml2.o sormlq.o sormql.o sormqr.o sormr2.o \ sormr3.o sormrq.o sormrz.o sormtr.o spbcon.o spbequ.o spbrfs.o \ spbstf.o spbsv.o spbsvx.o \ @@ -140,8 +154,7 @@ SLASRC_O = \ ssbev.o ssbevd.o ssbevx.o ssbgst.o ssbgv.o ssbgvd.o ssbgvx.o \ ssbtrd.o sspcon.o sspev.o sspevd.o sspevx.o sspgst.o \ sspgv.o sspgvd.o sspgvx.o ssprfs.o sspsv.o sspsvx.o ssptrd.o \ - ssptrf.o ssptri.o ssptrs.o sstegr.o sstein.o sstev.o sstevd.o sstevr.o \ - sstevx.o \ + ssptrf.o ssptri.o ssptrs.o sstegr.o sstev.o sstevd.o sstevr.o \ ssycon.o ssyev.o ssyevd.o ssyevr.o ssyevx.o ssygs2.o \ ssygst.o ssygv.o ssygvd.o ssygvx.o ssyrfs.o ssysv.o ssysvx.o \ ssytd2.o ssytf2.o ssytrd.o ssytrf.o ssytri.o ssytri2.o ssytri2x.o \ @@ -171,11 +184,17 @@ SLASRC_O = \ sgetsls.o sgeqr.o slatsqr.o slamtsqr.o sgemqr.o \ sgelq.o slaswlq.o slamswlq.o sgemlq.o \ stplqt.o stplqt2.o stpmlqt.o \ + sorhr_col.o slaorhr_col_getrfnp.o slaorhr_col_getrfnp2.o \ ssytrd_2stage.o ssytrd_sy2sb.o ssytrd_sb2st.o ssb2st_kernels.o \ ssyevd_2stage.o ssyev_2stage.o ssyevx_2stage.o ssyevr_2stage.o \ - ssbev_2stage.o ssbevx_2stage.o ssbevd_2stage.o ssygv_2stage.o + ssbev_2stage.o ssbevx_2stage.o ssbevd_2stage.o ssygv_2stage.o \ + sgesvdq.o + +endif +ifneq "$(or $(BUILD_SINGLE),$(BUILD_DOUBLE))" "" DSLASRC_O = spotrs.o sgetrs.o spotrf.o sgetrf.o +endif ifdef USEXBLAS SXLASRC = sgesvxx.o sgerfsx.o sla_gerfsx_extended.o sla_geamv.o \ @@ -187,6 +206,7 @@ SXLASRC = sgesvxx.o sgerfsx.o sla_gerfsx_extended.o sla_geamv.o \ slascl2.o sla_wwaddw.o endif +ifeq ($(BUILD_COMPLEX),1) CLASRC_O = \ cpotrf2.o cgetrf2.o \ cbdsqr.o cgbbrd.o cgbcon.o cgbequ.o cgbrfs.o cgbsv.o cgbsvx.o \ @@ -258,7 +278,7 @@ CLASRC_O = \ ctptrs.o ctrcon.o ctrevc.o ctrevc3.o ctrexc.o ctrrfs.o ctrsen.o ctrsna.o \ ctrsyl.o ctrti2.o ctrtri.o ctrtrs.o ctzrzf.o cung2l.o cung2r.o \ cungbr.o cunghr.o cungl2.o cunglq.o cungql.o cungqr.o cungr2.o \ - cungrq.o cungtr.o cunm2l.o cunm2r.o cunmbr.o cunmhr.o cunml2.o cunm22.o \ + cungrq.o cungtr.o cungtsqr.o cunm2l.o cunm2r.o cunmbr.o cunmhr.o cunml2.o cunm22.o \ cunmlq.o cunmql.o cunmqr.o cunmr2.o cunmr3.o cunmrq.o cunmrz.o \ cunmtr.o cupgtr.o cupmtr.o icmax1.o scsum1.o cstemr.o \ chfrk.o ctfttp.o clanhf.o cpftrf.o cpftri.o cpftrs.o ctfsm.o ctftri.o \ @@ -272,9 +292,12 @@ CLASRC_O = \ cgetsls.o cgeqr.o clatsqr.o clamtsqr.o cgemqr.o \ cgelq.o claswlq.o clamswlq.o cgemlq.o \ ctplqt.o ctplqt2.o ctpmlqt.o \ + cunhr_col.o claunhr_col_getrfnp.o claunhr_col_getrfnp2.o \ chetrd_2stage.o chetrd_he2hb.o chetrd_hb2st.o chb2st_kernels.o \ cheevd_2stage.o cheev_2stage.o cheevx_2stage.o cheevr_2stage.o \ - chbev_2stage.o chbevx_2stage.o chbevd_2stage.o chegv_2stage.o + chbev_2stage.o chbevx_2stage.o chbevd_2stage.o chegv_2stage.o \ + cgesvdq.o +endif ifdef USEXBLAS CXLASRC = cgesvxx.o cgerfsx.o cla_gerfsx_extended.o cla_geamv.o \ @@ -290,11 +313,13 @@ CXLASRC = cgesvxx.o cgerfsx.o cla_gerfsx_extended.o cla_geamv.o \ cla_lin_berr.o clarscl2.o clascl2.o cla_wwaddw.o endif -ZCLASRC_O = cpotrs.o cgetrs.o cpotrf.o cgetrf.o +ifneq "$(or $(BUILD_COMPLEX),$(BUILD_COMPLEX16))" "" +ZCLASRC_O = cpotrs.o cgetrs.o cpotrf.o cgetrf.o clag2z.o +endif +ifeq ($(BUILD_DOUBLE),1) DLASRC_O = \ dpotrf2.o dgetrf2.o \ - dbdsvdx.o \ dgbbrd.o dgbcon.o dgbequ.o dgbrfs.o dgbsv.o \ dgbsvx.o dgbtf2.o dgbtrf.o dgbtrs.o dgebak.o dgebal.o dgebd2.o \ dgebrd.o dgecon.o dgeequ.o dgees.o dgeesx.o dgeev.o dgeevx.o \ @@ -324,7 +349,7 @@ DLASRC_O = \ dlatbs.o dlatdf.o dlatps.o dlatrd.o dlatrs.o dlatrz.o dlauu2.o \ dlauum.o dopgtr.o dopmtr.o dorg2l.o dorg2r.o \ dorgbr.o dorghr.o dorgl2.o dorglq.o dorgql.o dorgqr.o dorgr2.o \ - dorgrq.o dorgtr.o dorm2l.o dorm2r.o dorm22.o \ + dorgrq.o dorgtr.o dorgtsqr.o dorm2l.o dorm2r.o dorm22.o \ dormbr.o dormhr.o dorml2.o dormlq.o dormql.o dormqr.o dormr2.o \ dormr3.o dormrq.o dormrz.o dormtr.o dpbcon.o dpbequ.o dpbrfs.o \ dpbstf.o dpbsv.o dpbsvx.o \ @@ -336,8 +361,7 @@ DLASRC_O = \ dsbev.o dsbevd.o dsbevx.o dsbgst.o dsbgv.o dsbgvd.o dsbgvx.o \ dsbtrd.o dspcon.o dspev.o dspevd.o dspevx.o dspgst.o \ dspgv.o dspgvd.o dspgvx.o dsprfs.o dspsv.o dspsvx.o dsptrd.o \ - dsptrf.o dsptri.o dsptrs.o dstegr.o dstein.o dstev.o dstevd.o dstevr.o \ - dstevx.o \ + dsptrf.o dsptri.o dsptrs.o dstegr.o dstev.o dstevd.o dstevr.o \ dsycon.o dsyev.o dsyevd.o dsyevr.o \ dsyevx.o dsygs2.o dsygst.o dsygv.o dsygvd.o dsygvx.o dsyrfs.o \ dsysv.o dsysvx.o \ @@ -368,9 +392,12 @@ DLASRC_O = \ dgetsls.o dgeqr.o dlatsqr.o dlamtsqr.o dgemqr.o \ dgelq.o dlaswlq.o dlamswlq.o dgemlq.o \ dtplqt.o dtplqt2.o dtpmlqt.o \ + dorhr_col.o dlaorhr_col_getrfnp.o dlaorhr_col_getrfnp2.o \ dsytrd_2stage.o dsytrd_sy2sb.o dsytrd_sb2st.o dsb2st_kernels.o \ dsyevd_2stage.o dsyev_2stage.o dsyevx_2stage.o dsyevr_2stage.o \ - dsbev_2stage.o dsbevx_2stage.o dsbevd_2stage.o dsygv_2stage.o + dsbev_2stage.o dsbevx_2stage.o dsbevd_2stage.o dsygv_2stage.o \ + dgesvdq.o +endif ifdef USEXBLAS DXLASRC = dgesvxx.o dgerfsx.o dla_gerfsx_extended.o dla_geamv.o \ @@ -382,6 +409,7 @@ DXLASRC = dgesvxx.o dgerfsx.o dla_gerfsx_extended.o dla_geamv.o \ dlascl2.o dla_wwaddw.o endif +ifeq ($(BUILD_COMPLEX16),1) ZLASRC_O = \ zpotrf2.o zgetrf2.o \ zbdsqr.o zgbbrd.o zgbcon.o zgbequ.o zgbrfs.o zgbsv.o zgbsvx.o \ @@ -456,11 +484,11 @@ ZLASRC_O = \ ztptrs.o ztrcon.o ztrevc.o ztrevc3.o ztrexc.o ztrrfs.o ztrsen.o ztrsna.o \ ztrsyl.o ztrti2.o ztrtri.o ztrtrs.o ztzrzf.o zung2l.o \ zung2r.o zungbr.o zunghr.o zungl2.o zunglq.o zungql.o zungqr.o zungr2.o \ - zungrq.o zungtr.o zunm2l.o zunm2r.o zunmbr.o zunmhr.o zunml2.o zunm22.o \ + zungrq.o zungtr.o zungtsqr.o zunm2l.o zunm2r.o zunmbr.o zunmhr.o zunml2.o zunm22.o \ zunmlq.o zunmql.o zunmqr.o zunmr2.o zunmr3.o zunmrq.o zunmrz.o \ zunmtr.o zupgtr.o \ zupmtr.o izmax1.o dzsum1.o zstemr.o \ - zcgesv.o zcposv.o zlag2c.o clag2z.o zlat2c.o \ + zcgesv.o zcposv.o zlag2c.o zlat2c.o \ zhfrk.o ztfttp.o zlanhf.o zpftrf.o zpftri.o zpftrs.o ztfsm.o ztftri.o \ ztfttr.o ztpttf.o ztpttr.o ztrttf.o ztrttp.o \ zgeequb.o zgbequb.o zsyequb.o zpoequb.o zheequb.o \ @@ -472,9 +500,12 @@ ZLASRC_O = \ zgelqt.o zgelqt3.o zgemlqt.o \ zgetsls.o zgeqr.o zlatsqr.o zlamtsqr.o zgemqr.o \ zgelq.o zlaswlq.o zlamswlq.o zgemlq.o \ + zunhr_col.o zlaunhr_col_getrfnp.o zlaunhr_col_getrfnp2.o \ zhetrd_2stage.o zhetrd_he2hb.o zhetrd_hb2st.o zhb2st_kernels.o \ zheevd_2stage.o zheev_2stage.o zheevx_2stage.o zheevr_2stage.o \ - zhbev_2stage.o zhbevx_2stage.o zhbevd_2stage.o zhegv_2stage.o + zhbev_2stage.o zhbevx_2stage.o zhbevd_2stage.o zhegv_2stage.o \ + zgesvdq.o +endif ifdef USEXBLAS ZXLASRC = zgesvxx.o zgerfsx.o zla_gerfsx_extended.o zla_geamv.o \ @@ -488,18 +519,30 @@ ZXLASRC = zgesvxx.o zgerfsx.o zla_gerfsx_extended.o zla_geamv.o \ zla_lin_berr.o zlarscl2.o zlascl2.o zla_wwaddw.o endif -DEPRECSRC = DEPRECATED/cgegs.o DEPRECATED/cgegv.o DEPRECATED/cgelsx.o \ +ifeq ($(BUILD_COMPLEX),1) +CDEPRECSRC = DEPRECATED/cgegs.o DEPRECATED/cgegv.o DEPRECATED/cgelsx.o \ DEPRECATED/cgeqpf.o DEPRECATED/cggsvd.o DEPRECATED/cggsvp.o \ - DEPRECATED/clahrd.o DEPRECATED/clatzm.o DEPRECATED/ctzrqf.o \ + DEPRECATED/clahrd.o DEPRECATED/clatzm.o DEPRECATED/ctzrqf.o +endif + +ifeq ($(BUILD_DOUBLE),1) +DDEPRECSRC = \ DEPRECATED/dgegs.o DEPRECATED/dgegv.o DEPRECATED/dgelsx.o \ DEPRECATED/dgeqpf.o DEPRECATED/dggsvd.o DEPRECATED/dggsvp.o \ - DEPRECATED/dlahrd.o DEPRECATED/dlatzm.o DEPRECATED/dtzrqf.o \ + DEPRECATED/dlahrd.o DEPRECATED/dlatzm.o DEPRECATED/dtzrqf.o +endif +ifeq ($(BUILD_SINGLE),1) +SDEPRECSRC = \ DEPRECATED/sgegs.o DEPRECATED/sgegv.o DEPRECATED/sgelsx.o \ DEPRECATED/sgeqpf.o DEPRECATED/sggsvd.o DEPRECATED/sggsvp.o \ - DEPRECATED/slahrd.o DEPRECATED/slatzm.o DEPRECATED/stzrqf.o \ + DEPRECATED/slahrd.o DEPRECATED/slatzm.o DEPRECATED/stzrqf.o +endif +ifeq ($(BUILD_COMPLEX16),1) +ZDEPRECSRC = \ DEPRECATED/zgegs.o DEPRECATED/zgegv.o DEPRECATED/zgelsx.o \ DEPRECATED/zgeqpf.o DEPRECATED/zggsvd.o DEPRECATED/zggsvp.o \ DEPRECATED/zlahrd.o DEPRECATED/zlatzm.o DEPRECATED/ztzrqf.o +endif # filter out optimized codes from OpenBLAS ALL_AUX_OBJS = xerbla.o ../INSTALL/lsame.o @@ -507,22 +550,22 @@ ALL_AUX_OBJS = xerbla.o ../INSTALL/lsame.o SLAPACKOBJS = \ sgetrf.o sgetrs.o spotrf.o sgetf2.o \ spotf2.o slaswp.o sgesv.o slauu2.o \ - slauum.o strti2.o strtri.o + slauum.o strti2.o strtri.o strtrs.o DLAPACKOBJS = \ dgetrf.o dgetrs.o dpotrf.o dgetf2.o \ dpotf2.o dlaswp.o dgesv.o dlauu2.o \ - dlauum.o dtrti2.o dtrtri.o + dlauum.o dtrti2.o dtrtri.o dtrtrs.o CLAPACKOBJS = \ cgetrf.o cgetrs.o cpotrf.o cgetf2.o \ cpotf2.o claswp.o cgesv.o clauu2.o \ - clauum.o ctrti2.o ctrtri.o + clauum.o ctrti2.o ctrtri.o ctrtrs.o ZLAPACKOBJS = \ zgetrf.o zgetrs.o zpotrf.o zgetf2.o \ zpotf2.o zlaswp.o zgesv.o zlauu2.o \ - zlauum.o ztrti2.o ztrtri.o + zlauum.o ztrti2.o ztrtri.o ztrtrs.o ALLAUX = $(filter-out $(ALL_AUX_OBJS),$(ALLAUX_O)) @@ -547,36 +590,32 @@ ALLXOBJ = $(SXLASRC) $(DXLASRC) $(CXLASRC) $(ZXLASRC) endif ifdef BUILD_DEPRECATED -DEPRECATED = $(DEPRECSRC) +DEPRECATED = $(SDEPRECSRC) $(DDEPRECSRC) $(CDEPRECSRC) $(ZDEPRECSRC) endif -all: ../$(LAPACKLIB) - -.PHONY: ../$(LAPACKLIB) +.PHONY: all +all: $(LAPACKLIB) -../$(LAPACKLIB): $(ALLOBJ) $(ALLXOBJ) $(DEPRECATED) - $(ARCH) $(ARCHFLAGS) $@ $(ALLOBJ) $(ALLXOBJ) $(DEPRECATED) +$(LAPACKLIB): $(ALLOBJ) $(ALLXOBJ) $(DEPRECATED) + $(AR) $(ARFLAGS) $@ $^ $(RANLIB) $@ +.PHONY: single complex double complex16 single: $(SLASRC) $(DSLASRC) $(SXLASRC) $(SCLAUX) $(ALLAUX) - $(ARCH) $(ARCHFLAGS) ../$(LAPACKLIB) $(SLASRC) $(DSLASRC) \ - $(SXLASRC) $(SCLAUX) $(ALLAUX) - $(RANLIB) ../$(LAPACKLIB) + $(AR) $(ARFLAGS) $(LAPACKLIB) $^ + $(RANLIB) $(LAPACKLIB) complex: $(CLASRC) $(ZCLASRC) $(CXLASRC) $(SCLAUX) $(ALLAUX) - $(ARCH) $(ARCHFLAGS) ../$(LAPACKLIB) $(CLASRC) $(ZCLASRC) \ - $(CXLASRC) $(SCLAUX) $(ALLAUX) - $(RANLIB) ../$(LAPACKLIB) + $(AR) $(ARFLAGS) $(LAPACKLIB) $^ + $(RANLIB) $(LAPACKLIB) double: $(DLASRC) $(DSLASRC) $(DXLASRC) $(DZLAUX) $(ALLAUX) - $(ARCH) $(ARCHFLAGS) ../$(LAPACKLIB) $(DLASRC) $(DSLASRC) \ - $(DXLASRC) $(DZLAUX) $(ALLAUX) - $(RANLIB) ../$(LAPACKLIB) + $(AR) $(ARFLAGS) $(LAPACKLIB) $^ + $(RANLIB) $(LAPACKLIB) complex16: $(ZLASRC) $(ZCLASRC) $(ZXLASRC) $(DZLAUX) $(ALLAUX) - $(ARCH) $(ARCHFLAGS) ../$(LAPACKLIB) $(ZLASRC) $(ZCLASRC) \ - $(ZXLASRC) $(DZLAUX) $(ALLAUX) - $(RANLIB) ../$(LAPACKLIB) + $(AR) $(ARFLAGS) $(LAPACKLIB) $^ + $(RANLIB) $(LAPACKLIB) $(ALLAUX): $(FRC) $(SCLAUX): $(FRC) @@ -597,18 +636,16 @@ endif FRC: @FRC=$(FRC) -clean: +.PHONY: clean cleanobj cleanlib +clean: cleanobj cleanlib +cleanobj: rm -f *.o DEPRECATED/*.o - -.f.o: - $(FORTRAN) $(OPTS) -c -o $@ $< - -.F.o: - $(FORTRAN) $(OPTS) -c $< -o $@ - -slaruv.o: slaruv.f ; $(FORTRAN) $(NOOPT) -c -o $@ $< -dlaruv.o: dlaruv.f ; $(FORTRAN) $(NOOPT) -c -o $@ $< -sla_wwaddw.o: sla_wwaddw.f ; $(FORTRAN) $(NOOPT) -c -o $@ $< -dla_wwaddw.o: dla_wwaddw.f ; $(FORTRAN) $(NOOPT) -c -o $@ $< -cla_wwaddw.o: cla_wwaddw.f ; $(FORTRAN) $(NOOPT) -c -o $@ $< -zla_wwaddw.o: zla_wwaddw.f ; $(FORTRAN) $(NOOPT) -c -o $@ $< +cleanlib: + rm -f $(LAPACKLIB) + +slaruv.o: slaruv.f ; $(FC) $(FFLAGS_NOOPT) -c -o $@ $< +dlaruv.o: dlaruv.f ; $(FC) $(FFLAGS_NOOPT) -c -o $@ $< +sla_wwaddw.o: sla_wwaddw.f ; $(FC) $(FFLAGS_NOOPT) -c -o $@ $< +dla_wwaddw.o: dla_wwaddw.f ; $(FC) $(FFLAGS_NOOPT) -c -o $@ $< +cla_wwaddw.o: cla_wwaddw.f ; $(FC) $(FFLAGS_NOOPT) -c -o $@ $< +zla_wwaddw.o: zla_wwaddw.f ; $(FC) $(FFLAGS_NOOPT) -c -o $@ $< diff --git a/lapack-netlib/SRC/VARIANTS/Makefile b/lapack-netlib/SRC/VARIANTS/Makefile index 9f1410755..25d8ee175 100644 --- a/lapack-netlib/SRC/VARIANTS/Makefile +++ b/lapack-netlib/SRC/VARIANTS/Makefile @@ -1,5 +1,3 @@ -include ../../make.inc - ####################################################################### # This is the makefile to create a the variants libraries for LAPACK. # The files are organized as follows: @@ -17,6 +15,9 @@ include ../../make.inc # 1065-1081. http://dx.doi.org/10.1137/S0895479896297744 ####################################################################### +TOPSRCDIR = ../.. +include $(TOPSRCDIR)/make.inc + CHOLRL = cholesky/RL/cpotrf.o cholesky/RL/dpotrf.o cholesky/RL/spotrf.o cholesky/RL/zpotrf.o CHOLTOP = cholesky/TOP/cpotrf.o cholesky/TOP/dpotrf.o cholesky/TOP/spotrf.o cholesky/TOP/zpotrf.o @@ -30,37 +31,36 @@ LUREC = lu/REC/cgetrf.o lu/REC/dgetrf.o lu/REC/sgetrf.o lu/REC/zgetrf.o QRLL = qr/LL/cgeqrf.o qr/LL/dgeqrf.o qr/LL/sgeqrf.o qr/LL/zgeqrf.o qr/LL/sceil.o +.PHONY: all all: cholrl.a choltop.a lucr.a lull.a lurec.a qrll.a cholrl.a: $(CHOLRL) - $(ARCH) $(ARCHFLAGS) $@ $^ + $(AR) $(ARFLAGS) $@ $^ $(RANLIB) $@ choltop.a: $(CHOLTOP) - $(ARCH) $(ARCHFLAGS) $@ $^ + $(AR) $(ARFLAGS) $@ $^ $(RANLIB) $@ lucr.a: $(LUCR) - $(ARCH) $(ARCHFLAGS) $@ $^ + $(AR) $(ARFLAGS) $@ $^ $(RANLIB) $@ lull.a: $(LULL) - $(ARCH) $(ARCHFLAGS) $@ $^ + $(AR) $(ARFLAGS) $@ $^ $(RANLIB) $@ lurec.a: $(LUREC) - $(ARCH) $(ARCHFLAGS) $@ $^ + $(AR) $(ARFLAGS) $@ $^ $(RANLIB) $@ qrll.a: $(QRLL) - $(ARCH) $(ARCHFLAGS) $@ $^ + $(AR) $(ARFLAGS) $@ $^ $(RANLIB) $@ +.PHONY: clean cleanobj cleanlib clean: cleanobj cleanlib cleanobj: rm -f $(CHOLRL) $(CHOLTOP) $(LUCR) $(LULL) $(LUREC) $(QRLL) cleanlib: rm -f *.a - -.f.o: - $(FORTRAN) $(OPTS) -c -o $@ $< diff --git a/lapack-netlib/SRC/VARIANTS/README b/lapack-netlib/SRC/VARIANTS/README index 4d301cc6e..ef7626deb 100644 --- a/lapack-netlib/SRC/VARIANTS/README +++ b/lapack-netlib/SRC/VARIANTS/README @@ -34,7 +34,7 @@ References:For a more detailed description please refer to ========= These variants are compiled by default in the build process but they are not tested by default. -The build process creates one new library per variants in the four arithmetics (single real/double real/single complex/double complex). +The build process creates one new library per variants in the four arithmetic (single real/double real/single complex/double complex). The libraries are in the SRC/VARIANTS directory. Corresponding libraries created in SRC/VARIANTS: @@ -64,16 +64,16 @@ You should then see the following files in the TESTING directory: = LINKING YOUR PROGRAM = ======================== -You just need to add the variants methods library in your linking sequence before your lapack libary. +You just need to add the variants methods library in your linking sequence before your lapack library. Here is a quick example for LU Default using LU Right Looking version: - $(FORTRAN) -c myprog.f - $(FORTRAN) -o myexe myprog.o $(LAPACKLIB) $(BLASLIB) + $(FC) $(FFLAGS) -c myprog.f + $(FC) $(FFLAGS) $(LDFLAGS) -o myexe myprog.o $(LAPACKLIB) $(BLASLIB) Using LU Left Looking version: - $(FORTRAN) -c myprog.f - $(FORTRAN) -o myexe myprog.o $(PATH TO LAPACK/SRC/VARIANTS)/lull.a $(LAPACKLIB) $(BLASLIB) + $(FC) $(FFLAGS) -c myprog.f + $(FC) $(FFLAGS) $(LDFLAGS) -o myexe myprog.o $(PATH TO LAPACK/SRC/VARIANTS)/lull.a $(LAPACKLIB) $(BLASLIB) =========== = SUPPORT = diff --git a/lapack-netlib/SRC/cgbrfsx.f b/lapack-netlib/SRC/cgbrfsx.f index 041b6a1b6..c23608afb 100644 --- a/lapack-netlib/SRC/cgbrfsx.f +++ b/lapack-netlib/SRC/cgbrfsx.f @@ -75,7 +75,7 @@ *> Specifies the form of the system of equations: *> = 'N': A * X = B (No transpose) *> = 'T': A**T * X = B (Transpose) -*> = 'C': A**H * X = B (Conjugate transpose = Transpose) +*> = 'C': A**H * X = B (Conjugate transpose) *> \endverbatim *> *> \param[in] EQUED @@ -308,7 +308,7 @@ *> information as described below. There currently are up to three *> pieces of information returned for each right-hand side. If *> componentwise accuracy is not requested (PARAMS(3) = 0.0), then -*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS .LT. 3, then at most +*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS < 3, then at most *> the first (:,N_ERR_BNDS) entries are returned. *> *> The first index in ERR_BNDS_COMP(i,:) corresponds to the ith @@ -344,14 +344,14 @@ *> \param[in] NPARAMS *> \verbatim *> NPARAMS is INTEGER -*> Specifies the number of parameters set in PARAMS. If .LE. 0, the +*> Specifies the number of parameters set in PARAMS. If <= 0, the *> PARAMS array is never referenced and default values are used. *> \endverbatim *> *> \param[in,out] PARAMS *> \verbatim *> PARAMS is REAL array, dimension NPARAMS -*> Specifies algorithm parameters. If an entry is .LT. 0.0, then +*> Specifies algorithm parameters. If an entry is < 0.0, then *> that entry will be filled with default value used for that *> parameter. Only positions up to NPARAMS are accessed; defaults *> are used for higher-numbered parameters. @@ -359,9 +359,9 @@ *> PARAMS(LA_LINRX_ITREF_I = 1) : Whether to perform iterative *> refinement or not. *> Default: 1.0 -*> = 0.0 : No refinement is performed, and no error bounds are +*> = 0.0: No refinement is performed, and no error bounds are *> computed. -*> = 1.0 : Use the double-precision refinement algorithm, +*> = 1.0: Use the double-precision refinement algorithm, *> possibly with doubled-single computations if the *> compilation environment does not support DOUBLE *> PRECISION. diff --git a/lapack-netlib/SRC/cgbsvxx.f b/lapack-netlib/SRC/cgbsvxx.f index 2e113f99c..9f2bbbc1c 100644 --- a/lapack-netlib/SRC/cgbsvxx.f +++ b/lapack-netlib/SRC/cgbsvxx.f @@ -431,7 +431,7 @@ *> information as described below. There currently are up to three *> pieces of information returned for each right-hand side. If *> componentwise accuracy is not requested (PARAMS(3) = 0.0), then -*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS .LT. 3, then at most +*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS < 3, then at most *> the first (:,N_ERR_BNDS) entries are returned. *> *> The first index in ERR_BNDS_COMP(i,:) corresponds to the ith @@ -467,14 +467,14 @@ *> \param[in] NPARAMS *> \verbatim *> NPARAMS is INTEGER -*> Specifies the number of parameters set in PARAMS. If .LE. 0, the +*> Specifies the number of parameters set in PARAMS. If <= 0, the *> PARAMS array is never referenced and default values are used. *> \endverbatim *> *> \param[in,out] PARAMS *> \verbatim *> PARAMS is REAL array, dimension NPARAMS -*> Specifies algorithm parameters. If an entry is .LT. 0.0, then +*> Specifies algorithm parameters. If an entry is < 0.0, then *> that entry will be filled with default value used for that *> parameter. Only positions up to NPARAMS are accessed; defaults *> are used for higher-numbered parameters. @@ -482,9 +482,9 @@ *> PARAMS(LA_LINRX_ITREF_I = 1) : Whether to perform iterative *> refinement or not. *> Default: 1.0 -*> = 0.0 : No refinement is performed, and no error bounds are +*> = 0.0: No refinement is performed, and no error bounds are *> computed. -*> = 1.0 : Use the double-precision refinement algorithm, +*> = 1.0: Use the double-precision refinement algorithm, *> possibly with doubled-single computations if the *> compilation environment does not support DOUBLE *> PRECISION. diff --git a/lapack-netlib/SRC/cgebak.f b/lapack-netlib/SRC/cgebak.f index 63c73bfa7..9b6402622 100644 --- a/lapack-netlib/SRC/cgebak.f +++ b/lapack-netlib/SRC/cgebak.f @@ -48,10 +48,10 @@ *> \verbatim *> JOB is CHARACTER*1 *> Specifies the type of backward transformation required: -*> = 'N', do nothing, return immediately; -*> = 'P', do backward transformation for permutation only; -*> = 'S', do backward transformation for scaling only; -*> = 'B', do backward transformations for both permutation and +*> = 'N': do nothing, return immediately; +*> = 'P': do backward transformation for permutation only; +*> = 'S': do backward transformation for scaling only; +*> = 'B': do backward transformations for both permutation and *> scaling. *> JOB must be the same as the argument JOB supplied to CGEBAL. *> \endverbatim diff --git a/lapack-netlib/SRC/cgeev.f b/lapack-netlib/SRC/cgeev.f index bdd75e4f1..f07d9b755 100644 --- a/lapack-netlib/SRC/cgeev.f +++ b/lapack-netlib/SRC/cgeev.f @@ -157,7 +157,7 @@ *> < 0: if INFO = -i, the i-th argument had an illegal value. *> > 0: if INFO = i, the QR algorithm failed to compute all the *> eigenvalues, and no eigenvectors have been computed; -*> elements and i+1:N of W contain eigenvalues which have +*> elements i+1:N of W contain eigenvalues which have *> converged. *> \endverbatim * diff --git a/lapack-netlib/SRC/cgejsv.f b/lapack-netlib/SRC/cgejsv.f index a7b1c451c..350da4c40 100644 --- a/lapack-netlib/SRC/cgejsv.f +++ b/lapack-netlib/SRC/cgejsv.f @@ -80,13 +80,13 @@ *> desirable, then this option is advisable. The input matrix A *> is preprocessed with QR factorization with FULL (row and *> column) pivoting. -*> = 'G' Computation as with 'F' with an additional estimate of the +*> = 'G': Computation as with 'F' with an additional estimate of the *> condition number of B, where A=B*D. If A has heavily weighted *> rows, then using this condition number gives too pessimistic *> error bound. *> = 'A': Small singular values are not well determined by the data *> and are considered as noisy; the matrix is treated as -*> numerically rank defficient. The error in the computed +*> numerically rank deficient. The error in the computed *> singular values is bounded by f(m,n)*epsilon*||A||. *> The computed SVD A = U * S * V^* restores A up to *> f(m,n)*epsilon*||A||. @@ -117,7 +117,7 @@ *> = 'V': N columns of V are returned in the array V; Jacobi rotations *> are not explicitly accumulated. *> = 'J': N columns of V are returned in the array V, but they are -*> computed as the product of Jacobi rotations, if JOBT .EQ. 'N'. +*> computed as the product of Jacobi rotations, if JOBT = 'N'. *> = 'W': V may be used as workspace of length N*N. See the description *> of V. *> = 'N': V is not computed. @@ -131,7 +131,7 @@ *> specified range. If A .NE. 0 is scaled so that the largest singular *> value of c*A is around SQRT(BIG), BIG=SLAMCH('O'), then JOBR issues *> the licence to kill columns of A whose norm in c*A is less than -*> SQRT(SFMIN) (for JOBR.EQ.'R'), or less than SMALL=SFMIN/EPSLN, +*> SQRT(SFMIN) (for JOBR = 'R'), or less than SMALL=SFMIN/EPSLN, *> where SFMIN=SLAMCH('S'), EPSLN=SLAMCH('E'). *> = 'N': Do not kill small columns of c*A. This option assumes that *> BLAS and QR factorizations and triangular solvers are @@ -229,7 +229,7 @@ *> If JOBU = 'F', then U contains on exit the M-by-M matrix of *> the left singular vectors, including an ONB *> of the orthogonal complement of the Range(A). -*> If JOBU = 'W' .AND. (JOBV.EQ.'V' .AND. JOBT.EQ.'T' .AND. M.EQ.N), +*> If JOBU = 'W' .AND. (JOBV = 'V' .AND. JOBT = 'T' .AND. M = N), *> then U is used as workspace if the procedure *> replaces A with A^*. In that case, [V] is computed *> in U as left singular vectors of A^* and then @@ -251,7 +251,7 @@ *> V is COMPLEX array, dimension ( LDV, N ) *> If JOBV = 'V', 'J' then V contains on exit the N-by-N matrix of *> the right singular vectors; -*> If JOBV = 'W', AND (JOBU.EQ.'U' AND JOBT.EQ.'T' AND M.EQ.N), +*> If JOBV = 'W', AND (JOBU = 'U' AND JOBT = 'T' AND M = N), *> then V is used as workspace if the pprocedure *> replaces A with A^*. In that case, [U] is computed *> in V as right singular vectors of A^* and then @@ -282,7 +282,7 @@ *> Length of CWORK to confirm proper allocation of workspace. *> LWORK depends on the job: *> -*> 1. If only SIGMA is needed ( JOBU.EQ.'N', JOBV.EQ.'N' ) and +*> 1. If only SIGMA is needed ( JOBU = 'N', JOBV = 'N' ) and *> 1.1 .. no scaled condition estimate required (JOBA.NE.'E'.AND.JOBA.NE.'G'): *> LWORK >= 2*N+1. This is the minimal requirement. *> ->> For optimal performance (blocked code) the optimal value @@ -298,9 +298,9 @@ *> In general, the optimal length LWORK is computed as *> LWORK >= max(N+LWORK(CGEQP3),N+LWORK(CGEQRF), LWORK(CGESVJ), *> N*N+LWORK(CPOCON)). -*> 2. If SIGMA and the right singular vectors are needed (JOBV.EQ.'V'), -*> (JOBU.EQ.'N') -*> 2.1 .. no scaled condition estimate requested (JOBE.EQ.'N'): +*> 2. If SIGMA and the right singular vectors are needed (JOBV = 'V'), +*> (JOBU = 'N') +*> 2.1 .. no scaled condition estimate requested (JOBE = 'N'): *> -> the minimal requirement is LWORK >= 3*N. *> -> For optimal performance, *> LWORK >= max(N+(N+1)*NB, 2*N+N*NB)=2*N+N*NB, @@ -318,10 +318,10 @@ *> LWORK >= max(N+LWORK(CGEQP3), LWORK(CPOCON), N+LWORK(CGESVJ), *> N+LWORK(CGELQF), 2*N+LWORK(CGEQRF), N+LWORK(CUNMLQ)). *> 3. If SIGMA and the left singular vectors are needed -*> 3.1 .. no scaled condition estimate requested (JOBE.EQ.'N'): +*> 3.1 .. no scaled condition estimate requested (JOBE = 'N'): *> -> the minimal requirement is LWORK >= 3*N. *> -> For optimal performance: -*> if JOBU.EQ.'U' :: LWORK >= max(3*N, N+(N+1)*NB, 2*N+N*NB)=2*N+N*NB, +*> if JOBU = 'U' :: LWORK >= max(3*N, N+(N+1)*NB, 2*N+N*NB)=2*N+N*NB, *> where NB is the optimal block size for CGEQP3, CGEQRF, CUNMQR. *> In general, the optimal length LWORK is computed as *> LWORK >= max(N+LWORK(CGEQP3), 2*N+LWORK(CGEQRF), N+LWORK(CUNMQR)). @@ -329,16 +329,16 @@ *> required (JOBA='E', or 'G'). *> -> the minimal requirement is LWORK >= 3*N. *> -> For optimal performance: -*> if JOBU.EQ.'U' :: LWORK >= max(3*N, N+(N+1)*NB, 2*N+N*NB)=2*N+N*NB, +*> if JOBU = 'U' :: LWORK >= max(3*N, N+(N+1)*NB, 2*N+N*NB)=2*N+N*NB, *> where NB is the optimal block size for CGEQP3, CGEQRF, CUNMQR. *> In general, the optimal length LWORK is computed as *> LWORK >= max(N+LWORK(CGEQP3),N+LWORK(CPOCON), *> 2*N+LWORK(CGEQRF), N+LWORK(CUNMQR)). *> -*> 4. If the full SVD is needed: (JOBU.EQ.'U' or JOBU.EQ.'F') and -*> 4.1. if JOBV.EQ.'V' +*> 4. If the full SVD is needed: (JOBU = 'U' or JOBU = 'F') and +*> 4.1. if JOBV = 'V' *> the minimal requirement is LWORK >= 5*N+2*N*N. -*> 4.2. if JOBV.EQ.'J' the minimal requirement is +*> 4.2. if JOBV = 'J' the minimal requirement is *> LWORK >= 4*N+N*N. *> In both cases, the allocated CWORK can accommodate blocked runs *> of CGEQP3, CGEQRF, CGELQF, CUNMQR, CUNMLQ. @@ -357,7 +357,7 @@ *> of A. (See the description of SVA().) *> RWORK(2) = See the description of RWORK(1). *> RWORK(3) = SCONDA is an estimate for the condition number of -*> column equilibrated A. (If JOBA .EQ. 'E' or 'G') +*> column equilibrated A. (If JOBA = 'E' or 'G') *> SCONDA is an estimate of SQRT(||(R^* * R)^(-1)||_1). *> It is computed using SPOCON. It holds *> N^(-1/4) * SCONDA <= ||R^(-1)||_2 <= N^(1/4) * SCONDA @@ -376,7 +376,7 @@ *> triangular factor in the first QR factorization. *> RWORK(5) = an estimate of the scaled condition number of the *> triangular factor in the second QR factorization. -*> The following two parameters are computed if JOBT .EQ. 'T'. +*> The following two parameters are computed if JOBT = 'T'. *> They are provided for a developer/implementer who is familiar *> with the details of the method. *> RWORK(6) = the entropy of A^* * A :: this is the Shannon entropy @@ -457,23 +457,23 @@ *> of JOBA and JOBR. *> IWORK(2) = the number of the computed nonzero singular values *> IWORK(3) = if nonzero, a warning message: -*> If IWORK(3).EQ.1 then some of the column norms of A +*> If IWORK(3) = 1 then some of the column norms of A *> were denormalized floats. The requested high accuracy *> is not warranted by the data. -*> IWORK(4) = 1 or -1. If IWORK(4) .EQ. 1, then the procedure used A^* to +*> IWORK(4) = 1 or -1. If IWORK(4) = 1, then the procedure used A^* to *> do the job as specified by the JOB parameters. -*> If the call to CGEJSV is a workspace query (indicated by LWORK .EQ. -1 and -*> LRWORK .EQ. -1), then on exit IWORK(1) contains the required length of +*> If the call to CGEJSV is a workspace query (indicated by LWORK = -1 and +*> LRWORK = -1), then on exit IWORK(1) contains the required length of *> IWORK for the job parameters used in the call. *> \endverbatim *> *> \param[out] INFO *> \verbatim *> INFO is INTEGER -*> < 0 : if INFO = -i, then the i-th argument had an illegal value. -*> = 0 : successful exit; -*> > 0 : CGEJSV did not converge in the maximal allowed number -*> of sweeps. The computed values may be inaccurate. +*> < 0: if INFO = -i, then the i-th argument had an illegal value. +*> = 0: successful exit; +*> > 0: CGEJSV did not converge in the maximal allowed number +*> of sweeps. The computed values may be inaccurate. *> \endverbatim * * Authors: @@ -1336,7 +1336,7 @@ IF ( L2ABER ) THEN * Standard absolute error bound suffices. All sigma_i with * sigma_i < N*EPSLN*||A|| are flushed to zero. This is an -* agressive enforcement of lower numerical rank by introducing a +* aggressive enforcement of lower numerical rank by introducing a * backward error of the order of N*EPSLN*||A||. TEMP1 = SQRT(REAL(N))*EPSLN DO 3001 p = 2, N @@ -1348,9 +1348,9 @@ 3001 CONTINUE 3002 CONTINUE ELSE IF ( L2RANK ) THEN -* .. similarly as above, only slightly more gentle (less agressive). +* .. similarly as above, only slightly more gentle (less aggressive). * Sudden drop on the diagonal of R1 is used as the criterion for -* close-to-rank-defficient. +* close-to-rank-deficient. TEMP1 = SQRT(SFMIN) DO 3401 p = 2, N IF ( ( ABS(A(p,p)) .LT. (EPSLN*ABS(A(p-1,p-1))) ) .OR. @@ -1718,7 +1718,7 @@ CALL CPOCON('L',NR,CWORK(2*N+1),NR,ONE,TEMP1, $ CWORK(2*N+NR*NR+1),RWORK,IERR) CONDR1 = ONE / SQRT(TEMP1) -* .. here need a second oppinion on the condition number +* .. here need a second opinion on the condition number * .. then assume worst case scenario * R1 is OK for inverse <=> CONDR1 .LT. REAL(N) * more conservative <=> CONDR1 .LT. SQRT(REAL(N)) @@ -1763,7 +1763,7 @@ ELSE * * .. ill-conditioned case: second QRF with pivoting -* Note that windowed pivoting would be equaly good +* Note that windowed pivoting would be equally good * numerically, and more run-time efficient. So, in * an optimal implementation, the next call to CGEQP3 * should be replaced with eg. CALL CGEQPX (ACM TOMS #782) @@ -1821,7 +1821,7 @@ * IF ( CONDR2 .GE. COND_OK ) THEN * .. save the Householder vectors used for Q3 -* (this overwrittes the copy of R2, as it will not be +* (this overwrites the copy of R2, as it will not be * needed in this branch, but it does not overwritte the * Huseholder vectors of Q2.). CALL CLACPY( 'U', NR, NR, V, LDV, CWORK(2*N+1), N ) @@ -2077,7 +2077,7 @@ * * This branch deploys a preconditioned Jacobi SVD with explicitly * accumulated rotations. It is included as optional, mainly for -* experimental purposes. It does perfom well, and can also be used. +* experimental purposes. It does perform well, and can also be used. * In this implementation, this branch will be automatically activated * if the condition number sigma_max(A) / sigma_min(A) is predicted * to be greater than the overflow threshold. This is because the diff --git a/lapack-netlib/SRC/cgelq.f b/lapack-netlib/SRC/cgelq.f index 909162ebc..f0ff3a20d 100644 --- a/lapack-netlib/SRC/cgelq.f +++ b/lapack-netlib/SRC/cgelq.f @@ -1,3 +1,4 @@ +*> \brief \b CGELQ * * Definition: * =========== @@ -17,7 +18,17 @@ * ============= *> *> \verbatim -*> CGELQ computes a LQ factorization of an M-by-N matrix A. +*> +*> CGELQ computes an LQ factorization of a complex M-by-N matrix A: +*> +*> A = ( L 0 ) * Q +*> +*> where: +*> +*> Q is a N-by-N orthogonal matrix; +*> L is a lower-triangular M-by-M matrix; +*> 0 is a M-by-(N-M) zero matrix, if M < N. +*> *> \endverbatim * * Arguments: @@ -138,7 +149,7 @@ *> \verbatim *> *> These details are particular for this LAPACK implementation. Users should not -*> take them for granted. These details may change in the future, and are unlikely not +*> take them for granted. These details may change in the future, and are not likely *> true for another LAPACK implementation. These details are relevant if one wants *> to try to understand the code. They are not part of the interface. *> @@ -159,10 +170,10 @@ SUBROUTINE CGELQ( M, N, A, LDA, T, TSIZE, WORK, LWORK, $ INFO ) * -* -- LAPACK computational routine (version 3.7.0) -- +* -- LAPACK computational routine (version 3.9.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd. -- -* December 2016 +* November 2019 * * .. Scalar Arguments .. INTEGER INFO, LDA, M, N, TSIZE, LWORK @@ -176,7 +187,7 @@ * .. * .. Local Scalars .. LOGICAL LQUERY, LMINWS, MINT, MINW - INTEGER MB, NB, MINTSZ, NBLCKS + INTEGER MB, NB, MINTSZ, NBLCKS, LWMIN, LWOPT, LWREQ * .. * .. External Functions .. LOGICAL LSAME @@ -232,20 +243,32 @@ * * Determine if the workspace size satisfies minimal size * + IF( ( N.LE.M ) .OR. ( NB.LE.M ) .OR. ( NB.GE.N ) ) THEN + LWMIN = MAX( 1, N ) + LWOPT = MAX( 1, MB*N ) + ELSE + LWMIN = MAX( 1, M ) + LWOPT = MAX( 1, MB*M ) + END IF LMINWS = .FALSE. - IF( ( TSIZE.LT.MAX( 1, MB*M*NBLCKS + 5 ) .OR. LWORK.LT.MB*M ) - $ .AND. ( LWORK.GE.M ) .AND. ( TSIZE.GE.MINTSZ ) + IF( ( TSIZE.LT.MAX( 1, MB*M*NBLCKS + 5 ) .OR. LWORK.LT.LWOPT ) + $ .AND. ( LWORK.GE.LWMIN ) .AND. ( TSIZE.GE.MINTSZ ) $ .AND. ( .NOT.LQUERY ) ) THEN IF( TSIZE.LT.MAX( 1, MB*M*NBLCKS + 5 ) ) THEN LMINWS = .TRUE. MB = 1 NB = N END IF - IF( LWORK.LT.MB*M ) THEN + IF( LWORK.LT.LWOPT ) THEN LMINWS = .TRUE. MB = 1 END IF END IF + IF( ( N.LE.M ) .OR. ( NB.LE.M ) .OR. ( NB.GE.N ) ) THEN + LWREQ = MAX( 1, MB*N ) + ELSE + LWREQ = MAX( 1, MB*M ) + END IF * IF( M.LT.0 ) THEN INFO = -1 @@ -256,7 +279,7 @@ ELSE IF( TSIZE.LT.MAX( 1, MB*M*NBLCKS + 5 ) $ .AND. ( .NOT.LQUERY ) .AND. ( .NOT.LMINWS ) ) THEN INFO = -6 - ELSE IF( ( LWORK.LT.MAX( 1, M*MB ) ) .AND .( .NOT.LQUERY ) + ELSE IF( ( LWORK.LT.LWREQ ) .AND .( .NOT.LQUERY ) $ .AND. ( .NOT.LMINWS ) ) THEN INFO = -8 END IF @@ -270,9 +293,9 @@ T( 2 ) = MB T( 3 ) = NB IF( MINW ) THEN - WORK( 1 ) = MAX( 1, N ) + WORK( 1 ) = LWMIN ELSE - WORK( 1 ) = MAX( 1, MB*M ) + WORK( 1 ) = LWREQ END IF END IF IF( INFO.NE.0 ) THEN @@ -297,7 +320,7 @@ $ LWORK, INFO ) END IF * - WORK( 1 ) = MAX( 1, MB*M ) + WORK( 1 ) = LWREQ * RETURN * diff --git a/lapack-netlib/SRC/cgelq2.f b/lapack-netlib/SRC/cgelq2.f index 9742d359b..3fab2c396 100644 --- a/lapack-netlib/SRC/cgelq2.f +++ b/lapack-netlib/SRC/cgelq2.f @@ -33,8 +33,16 @@ *> *> \verbatim *> -*> CGELQ2 computes an LQ factorization of a complex m by n matrix A: -*> A = L * Q. +*> CGELQ2 computes an LQ factorization of a complex m-by-n matrix A: +*> +*> A = ( L 0 ) * Q +*> +*> where: +*> +*> Q is a n-by-n orthogonal matrix; +*> L is an lower-triangular m-by-m matrix; +*> 0 is a m-by-(n-m) zero matrix, if m < n. +*> *> \endverbatim * * Arguments: @@ -96,7 +104,7 @@ *> \author Univ. of Colorado Denver *> \author NAG Ltd. * -*> \date December 2016 +*> \date November 2019 * *> \ingroup complexGEcomputational * @@ -121,10 +129,10 @@ * ===================================================================== SUBROUTINE CGELQ2( M, N, A, LDA, TAU, WORK, INFO ) * -* -- LAPACK computational routine (version 3.7.0) -- +* -- LAPACK computational routine (version 3.9.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- -* December 2016 +* November 2019 * * .. Scalar Arguments .. INTEGER INFO, LDA, M, N diff --git a/lapack-netlib/SRC/cgelqf.f b/lapack-netlib/SRC/cgelqf.f index 216630e88..030ac0b4d 100644 --- a/lapack-netlib/SRC/cgelqf.f +++ b/lapack-netlib/SRC/cgelqf.f @@ -34,7 +34,15 @@ *> \verbatim *> *> CGELQF computes an LQ factorization of a complex M-by-N matrix A: -*> A = L * Q. +*> +*> A = ( L 0 ) * Q +*> +*> where: +*> +*> Q is a N-by-N orthogonal matrix; +*> L is an lower-triangular M-by-M matrix; +*> 0 is a M-by-(N-M) zero matrix, if M < N. +*> *> \endverbatim * * Arguments: @@ -110,7 +118,7 @@ *> \author Univ. of Colorado Denver *> \author NAG Ltd. * -*> \date December 2016 +*> \date November 2019 * *> \ingroup complexGEcomputational * @@ -135,10 +143,10 @@ * ===================================================================== SUBROUTINE CGELQF( M, N, A, LDA, TAU, WORK, LWORK, INFO ) * -* -- LAPACK computational routine (version 3.7.0) -- +* -- LAPACK computational routine (version 3.9.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- -* December 2016 +* November 2019 * * .. Scalar Arguments .. INTEGER INFO, LDA, LWORK, M, N diff --git a/lapack-netlib/SRC/cgelqt.f b/lapack-netlib/SRC/cgelqt.f index e151f10fe..f40db0b02 100644 --- a/lapack-netlib/SRC/cgelqt.f +++ b/lapack-netlib/SRC/cgelqt.f @@ -1,3 +1,4 @@ +*> \brief \b CGELQT * * Definition: * =========== diff --git a/lapack-netlib/SRC/cgelqt3.f b/lapack-netlib/SRC/cgelqt3.f index f64379722..80a9a9fc7 100644 --- a/lapack-netlib/SRC/cgelqt3.f +++ b/lapack-netlib/SRC/cgelqt3.f @@ -1,3 +1,5 @@ +*> \brief \b CGELQT3 +* * Definition: * =========== * diff --git a/lapack-netlib/SRC/cgemlq.f b/lapack-netlib/SRC/cgemlq.f index 2f44e7cfb..4e374077e 100644 --- a/lapack-netlib/SRC/cgemlq.f +++ b/lapack-netlib/SRC/cgemlq.f @@ -1,3 +1,4 @@ +*> \brief \b CGEMLQ * * Definition: * =========== @@ -143,7 +144,7 @@ *> \verbatim *> *> These details are particular for this LAPACK implementation. Users should not -*> take them for granted. These details may change in the future, and are unlikely not +*> take them for granted. These details may change in the future, and are not likely *> true for another LAPACK implementation. These details are relevant if one wants *> to try to understand the code. They are not part of the interface. *> diff --git a/lapack-netlib/SRC/cgemlqt.f b/lapack-netlib/SRC/cgemlqt.f index e35e421b1..66b186bff 100644 --- a/lapack-netlib/SRC/cgemlqt.f +++ b/lapack-netlib/SRC/cgemlqt.f @@ -1,3 +1,5 @@ +*> \brief \b CGEMLQT +* * Definition: * =========== * diff --git a/lapack-netlib/SRC/cgemqr.f b/lapack-netlib/SRC/cgemqr.f index a43d7be5b..54ab7aa74 100644 --- a/lapack-netlib/SRC/cgemqr.f +++ b/lapack-netlib/SRC/cgemqr.f @@ -1,3 +1,4 @@ +*> \brief \b CGEMQR * * Definition: * =========== @@ -144,7 +145,7 @@ *> \verbatim *> *> These details are particular for this LAPACK implementation. Users should not -*> take them for granted. These details may change in the future, and are unlikely not +*> take them for granted. These details may change in the future, and are not likely *> true for another LAPACK implementation. These details are relevant if one wants *> to try to understand the code. They are not part of the interface. *> diff --git a/lapack-netlib/SRC/cgeqr.f b/lapack-netlib/SRC/cgeqr.f index a00ef45c0..e0aea88b1 100644 --- a/lapack-netlib/SRC/cgeqr.f +++ b/lapack-netlib/SRC/cgeqr.f @@ -1,3 +1,4 @@ +*> \brief \b CGEQR * * Definition: * =========== @@ -17,7 +18,18 @@ * ============= *> *> \verbatim -*> CGEQR computes a QR factorization of an M-by-N matrix A. +*> +*> CGEQR computes a QR factorization of a complex M-by-N matrix A: +*> +*> A = Q * ( R ), +*> ( 0 ) +*> +*> where: +*> +*> Q is a M-by-M orthogonal matrix; +*> R is an upper-triangular N-by-N matrix; +*> 0 is a (M-N)-by-N zero matrix, if M > N. +*> *> \endverbatim * * Arguments: @@ -138,7 +150,7 @@ *> \verbatim *> *> These details are particular for this LAPACK implementation. Users should not -*> take them for granted. These details may change in the future, and are unlikely not +*> take them for granted. These details may change in the future, and are not likely *> true for another LAPACK implementation. These details are relevant if one wants *> to try to understand the code. They are not part of the interface. *> @@ -160,10 +172,10 @@ SUBROUTINE CGEQR( M, N, A, LDA, T, TSIZE, WORK, LWORK, $ INFO ) * -* -- LAPACK computational routine (version 3.7.0) -- +* -- LAPACK computational routine (version 3.9.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd. -- -* December 2016 +* November 2019 * * .. Scalar Arguments .. INTEGER INFO, LDA, M, N, TSIZE, LWORK diff --git a/lapack-netlib/SRC/cgeqr2.f b/lapack-netlib/SRC/cgeqr2.f index 1b2030b47..8cb2fa119 100644 --- a/lapack-netlib/SRC/cgeqr2.f +++ b/lapack-netlib/SRC/cgeqr2.f @@ -33,8 +33,17 @@ *> *> \verbatim *> -*> CGEQR2 computes a QR factorization of a complex m by n matrix A: -*> A = Q * R. +*> CGEQR2 computes a QR factorization of a complex m-by-n matrix A: +*> +*> A = Q * ( R ), +*> ( 0 ) +*> +*> where: +*> +*> Q is a m-by-m orthogonal matrix; +*> R is an upper-triangular n-by-n matrix; +*> 0 is a (m-n)-by-n zero matrix, if m > n. +*> *> \endverbatim * * Arguments: @@ -96,7 +105,7 @@ *> \author Univ. of Colorado Denver *> \author NAG Ltd. * -*> \date December 2016 +*> \date November 2019 * *> \ingroup complexGEcomputational * @@ -121,10 +130,10 @@ * ===================================================================== SUBROUTINE CGEQR2( M, N, A, LDA, TAU, WORK, INFO ) * -* -- LAPACK computational routine (version 3.7.0) -- +* -- LAPACK computational routine (version 3.9.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- -* December 2016 +* November 2019 * * .. Scalar Arguments .. INTEGER INFO, LDA, M, N diff --git a/lapack-netlib/SRC/cgeqr2p.f b/lapack-netlib/SRC/cgeqr2p.f index 3c64255d9..1e7b980df 100644 --- a/lapack-netlib/SRC/cgeqr2p.f +++ b/lapack-netlib/SRC/cgeqr2p.f @@ -33,8 +33,18 @@ *> *> \verbatim *> -*> CGEQR2P computes a QR factorization of a complex m by n matrix A: -*> A = Q * R. The diagonal entries of R are real and nonnegative. +*> CGEQR2P computes a QR factorization of a complex m-by-n matrix A: +*> +*> A = Q * ( R ), +*> ( 0 ) +*> +*> where: +*> +*> Q is a m-by-m orthogonal matrix; +*> R is an upper-triangular n-by-n matrix with nonnegative diagonal +*> entries; +*> 0 is a (m-n)-by-n zero matrix, if m > n. +*> *> \endverbatim * * Arguments: @@ -97,7 +107,7 @@ *> \author Univ. of Colorado Denver *> \author NAG Ltd. * -*> \date December 2016 +*> \date November 2019 * *> \ingroup complexGEcomputational * @@ -124,10 +134,10 @@ * ===================================================================== SUBROUTINE CGEQR2P( M, N, A, LDA, TAU, WORK, INFO ) * -* -- LAPACK computational routine (version 3.7.0) -- +* -- LAPACK computational routine (version 3.9.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- -* December 2016 +* November 2019 * * .. Scalar Arguments .. INTEGER INFO, LDA, M, N diff --git a/lapack-netlib/SRC/cgeqrf.f b/lapack-netlib/SRC/cgeqrf.f index 833384707..ff0c53f2f 100644 --- a/lapack-netlib/SRC/cgeqrf.f +++ b/lapack-netlib/SRC/cgeqrf.f @@ -34,7 +34,16 @@ *> \verbatim *> *> CGEQRF computes a QR factorization of a complex M-by-N matrix A: -*> A = Q * R. +*> +*> A = Q * ( R ), +*> ( 0 ) +*> +*> where: +*> +*> Q is a M-by-M orthogonal matrix; +*> R is an upper-triangular N-by-N matrix; +*> 0 is a (M-N)-by-N zero matrix, if M > N. +*> *> \endverbatim * * Arguments: @@ -111,7 +120,7 @@ *> \author Univ. of Colorado Denver *> \author NAG Ltd. * -*> \date December 2016 +*> \date November 2019 * *> \ingroup complexGEcomputational * @@ -136,10 +145,10 @@ * ===================================================================== SUBROUTINE CGEQRF( M, N, A, LDA, TAU, WORK, LWORK, INFO ) * -* -- LAPACK computational routine (version 3.7.0) -- +* -- LAPACK computational routine (version 3.9.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- -* December 2016 +* November 2019 * * .. Scalar Arguments .. INTEGER INFO, LDA, LWORK, M, N diff --git a/lapack-netlib/SRC/cgeqrfp.f b/lapack-netlib/SRC/cgeqrfp.f index a56508b4e..9c29ac90b 100644 --- a/lapack-netlib/SRC/cgeqrfp.f +++ b/lapack-netlib/SRC/cgeqrfp.f @@ -33,8 +33,18 @@ *> *> \verbatim *> -*> CGEQRFP computes a QR factorization of a complex M-by-N matrix A: -*> A = Q * R. The diagonal entries of R are real and nonnegative. +*> CGEQR2P computes a QR factorization of a complex M-by-N matrix A: +*> +*> A = Q * ( R ), +*> ( 0 ) +*> +*> where: +*> +*> Q is a M-by-M orthogonal matrix; +*> R is an upper-triangular N-by-N matrix with nonnegative diagonal +*> entries; +*> 0 is a (M-N)-by-N zero matrix, if M > N. +*> *> \endverbatim * * Arguments: @@ -112,7 +122,7 @@ *> \author Univ. of Colorado Denver *> \author NAG Ltd. * -*> \date December 2016 +*> \date November 2019 * *> \ingroup complexGEcomputational * @@ -139,10 +149,10 @@ * ===================================================================== SUBROUTINE CGEQRFP( M, N, A, LDA, TAU, WORK, LWORK, INFO ) * -* -- LAPACK computational routine (version 3.7.0) -- +* -- LAPACK computational routine (version 3.9.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- -* December 2016 +* November 2019 * * .. Scalar Arguments .. INTEGER INFO, LDA, LWORK, M, N diff --git a/lapack-netlib/SRC/cgerfsx.f b/lapack-netlib/SRC/cgerfsx.f index 7b72f9c9a..a6e24ae4f 100644 --- a/lapack-netlib/SRC/cgerfsx.f +++ b/lapack-netlib/SRC/cgerfsx.f @@ -74,7 +74,7 @@ *> Specifies the form of the system of equations: *> = 'N': A * X = B (No transpose) *> = 'T': A**T * X = B (Transpose) -*> = 'C': A**H * X = B (Conjugate transpose = Transpose) +*> = 'C': A**H * X = B (Conjugate transpose) *> \endverbatim *> *> \param[in] EQUED @@ -283,7 +283,7 @@ *> information as described below. There currently are up to three *> pieces of information returned for each right-hand side. If *> componentwise accuracy is not requested (PARAMS(3) = 0.0), then -*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS .LT. 3, then at most +*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS < 3, then at most *> the first (:,N_ERR_BNDS) entries are returned. *> *> The first index in ERR_BNDS_COMP(i,:) corresponds to the ith @@ -319,14 +319,14 @@ *> \param[in] NPARAMS *> \verbatim *> NPARAMS is INTEGER -*> Specifies the number of parameters set in PARAMS. If .LE. 0, the +*> Specifies the number of parameters set in PARAMS. If <= 0, the *> PARAMS array is never referenced and default values are used. *> \endverbatim *> *> \param[in,out] PARAMS *> \verbatim *> PARAMS is REAL array, dimension NPARAMS -*> Specifies algorithm parameters. If an entry is .LT. 0.0, then +*> Specifies algorithm parameters. If an entry is < 0.0, then *> that entry will be filled with default value used for that *> parameter. Only positions up to NPARAMS are accessed; defaults *> are used for higher-numbered parameters. @@ -334,9 +334,9 @@ *> PARAMS(LA_LINRX_ITREF_I = 1) : Whether to perform iterative *> refinement or not. *> Default: 1.0 -*> = 0.0 : No refinement is performed, and no error bounds are +*> = 0.0: No refinement is performed, and no error bounds are *> computed. -*> = 1.0 : Use the double-precision refinement algorithm, +*> = 1.0: Use the double-precision refinement algorithm, *> possibly with doubled-single computations if the *> compilation environment does not support DOUBLE *> PRECISION. diff --git a/lapack-netlib/SRC/cgesc2.f b/lapack-netlib/SRC/cgesc2.f index c0b91107e..6f45a09a6 100644 --- a/lapack-netlib/SRC/cgesc2.f +++ b/lapack-netlib/SRC/cgesc2.f @@ -91,7 +91,7 @@ *> \verbatim *> SCALE is REAL *> On exit, SCALE contains the scale factor. SCALE is chosen -*> 0 <= SCALE <= 1 to prevent owerflow in the solution. +*> 0 <= SCALE <= 1 to prevent overflow in the solution. *> \endverbatim * * Authors: diff --git a/lapack-netlib/SRC/cgesvdq.f b/lapack-netlib/SRC/cgesvdq.f new file mode 100644 index 000000000..77c883dde --- /dev/null +++ b/lapack-netlib/SRC/cgesvdq.f @@ -0,0 +1,1391 @@ +*> \brief CGESVDQ computes the singular value decomposition (SVD) with a QR-Preconditioned QR SVD Method for GE matrices +* +* =========== DOCUMENTATION =========== +* +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ +* +*> \htmlonly +*> Download CGESVDQ + dependencies +*> +*> [TGZ] +*> +*> [ZIP] +*> +*> [TXT] +*> \endhtmlonly +* +* Definition: +* =========== +* +* SUBROUTINE CGESVDQ( JOBA, JOBP, JOBR, JOBU, JOBV, M, N, A, LDA, +* S, U, LDU, V, LDV, NUMRANK, IWORK, LIWORK, +* CWORK, LCWORK, RWORK, LRWORK, INFO ) +* +* .. Scalar Arguments .. +* IMPLICIT NONE +* CHARACTER JOBA, JOBP, JOBR, JOBU, JOBV +* INTEGER M, N, LDA, LDU, LDV, NUMRANK, LIWORK, LCWORK, LRWORK, +* INFO +* .. +* .. Array Arguments .. +* COMPLEX A( LDA, * ), U( LDU, * ), V( LDV, * ), CWORK( * ) +* REAL S( * ), RWORK( * ) +* INTEGER IWORK( * ) +* .. +* +* +*> \par Purpose: +* ============= +*> +*> \verbatim +*> +*> CGESVDQ computes the singular value decomposition (SVD) of a complex +*> M-by-N matrix A, where M >= N. The SVD of A is written as +*> [++] [xx] [x0] [xx] +*> A = U * SIGMA * V^*, [++] = [xx] * [ox] * [xx] +*> [++] [xx] +*> where SIGMA is an N-by-N diagonal matrix, U is an M-by-N orthonormal +*> matrix, and V is an N-by-N unitary matrix. The diagonal elements +*> of SIGMA are the singular values of A. The columns of U and V are the +*> left and the right singular vectors of A, respectively. +*> \endverbatim +* +* Arguments: +* ========== +* +*> \param[in] JOBA +*> \verbatim +*> JOBA is CHARACTER*1 +*> Specifies the level of accuracy in the computed SVD +*> = 'A' The requested accuracy corresponds to having the backward +*> error bounded by || delta A ||_F <= f(m,n) * EPS * || A ||_F, +*> where EPS = SLAMCH('Epsilon'). This authorises CGESVDQ to +*> truncate the computed triangular factor in a rank revealing +*> QR factorization whenever the truncated part is below the +*> threshold of the order of EPS * ||A||_F. This is aggressive +*> truncation level. +*> = 'M' Similarly as with 'A', but the truncation is more gentle: it +*> is allowed only when there is a drop on the diagonal of the +*> triangular factor in the QR factorization. This is medium +*> truncation level. +*> = 'H' High accuracy requested. No numerical rank determination based +*> on the rank revealing QR factorization is attempted. +*> = 'E' Same as 'H', and in addition the condition number of column +*> scaled A is estimated and returned in RWORK(1). +*> N^(-1/4)*RWORK(1) <= ||pinv(A_scaled)||_2 <= N^(1/4)*RWORK(1) +*> \endverbatim +*> +*> \param[in] JOBP +*> \verbatim +*> JOBP is CHARACTER*1 +*> = 'P' The rows of A are ordered in decreasing order with respect to +*> ||A(i,:)||_\infty. This enhances numerical accuracy at the cost +*> of extra data movement. Recommended for numerical robustness. +*> = 'N' No row pivoting. +*> \endverbatim +*> +*> \param[in] JOBR +*> \verbatim +*> JOBR is CHARACTER*1 +*> = 'T' After the initial pivoted QR factorization, CGESVD is applied to +*> the adjoint R**H of the computed triangular factor R. This involves +*> some extra data movement (matrix transpositions). Useful for +*> experiments, research and development. +*> = 'N' The triangular factor R is given as input to CGESVD. This may be +*> preferred as it involves less data movement. +*> \endverbatim +*> +*> \param[in] JOBU +*> \verbatim +*> JOBU is CHARACTER*1 +*> = 'A' All M left singular vectors are computed and returned in the +*> matrix U. See the description of U. +*> = 'S' or 'U' N = min(M,N) left singular vectors are computed and returned +*> in the matrix U. See the description of U. +*> = 'R' Numerical rank NUMRANK is determined and only NUMRANK left singular +*> vectors are computed and returned in the matrix U. +*> = 'F' The N left singular vectors are returned in factored form as the +*> product of the Q factor from the initial QR factorization and the +*> N left singular vectors of (R**H , 0)**H. If row pivoting is used, +*> then the necessary information on the row pivoting is stored in +*> IWORK(N+1:N+M-1). +*> = 'N' The left singular vectors are not computed. +*> \endverbatim +*> +*> \param[in] JOBV +*> \verbatim +*> JOBV is CHARACTER*1 +*> = 'A', 'V' All N right singular vectors are computed and returned in +*> the matrix V. +*> = 'R' Numerical rank NUMRANK is determined and only NUMRANK right singular +*> vectors are computed and returned in the matrix V. This option is +*> allowed only if JOBU = 'R' or JOBU = 'N'; otherwise it is illegal. +*> = 'N' The right singular vectors are not computed. +*> \endverbatim +*> +*> \param[in] M +*> \verbatim +*> M is INTEGER +*> The number of rows of the input matrix A. M >= 0. +*> \endverbatim +*> +*> \param[in] N +*> \verbatim +*> N is INTEGER +*> The number of columns of the input matrix A. M >= N >= 0. +*> \endverbatim +*> +*> \param[in,out] A +*> \verbatim +*> A is COMPLEX array of dimensions LDA x N +*> On entry, the input matrix A. +*> On exit, if JOBU .NE. 'N' or JOBV .NE. 'N', the lower triangle of A contains +*> the Householder vectors as stored by CGEQP3. If JOBU = 'F', these Householder +*> vectors together with CWORK(1:N) can be used to restore the Q factors from +*> the initial pivoted QR factorization of A. See the description of U. +*> \endverbatim +*> +*> \param[in] LDA +*> \verbatim +*> LDA is INTEGER. +*> The leading dimension of the array A. LDA >= max(1,M). +*> \endverbatim +*> +*> \param[out] S +*> \verbatim +*> S is REAL array of dimension N. +*> The singular values of A, ordered so that S(i) >= S(i+1). +*> \endverbatim +*> +*> \param[out] U +*> \verbatim +*> U is COMPLEX array, dimension +*> LDU x M if JOBU = 'A'; see the description of LDU. In this case, +*> on exit, U contains the M left singular vectors. +*> LDU x N if JOBU = 'S', 'U', 'R' ; see the description of LDU. In this +*> case, U contains the leading N or the leading NUMRANK left singular vectors. +*> LDU x N if JOBU = 'F' ; see the description of LDU. In this case U +*> contains N x N unitary matrix that can be used to form the left +*> singular vectors. +*> If JOBU = 'N', U is not referenced. +*> \endverbatim +*> +*> \param[in] LDU +*> \verbatim +*> LDU is INTEGER. +*> The leading dimension of the array U. +*> If JOBU = 'A', 'S', 'U', 'R', LDU >= max(1,M). +*> If JOBU = 'F', LDU >= max(1,N). +*> Otherwise, LDU >= 1. +*> \endverbatim +*> +*> \param[out] V +*> \verbatim +*> V is COMPLEX array, dimension +*> LDV x N if JOBV = 'A', 'V', 'R' or if JOBA = 'E' . +*> If JOBV = 'A', or 'V', V contains the N-by-N unitary matrix V**H; +*> If JOBV = 'R', V contains the first NUMRANK rows of V**H (the right +*> singular vectors, stored rowwise, of the NUMRANK largest singular values). +*> If JOBV = 'N' and JOBA = 'E', V is used as a workspace. +*> If JOBV = 'N', and JOBA.NE.'E', V is not referenced. +*> \endverbatim +*> +*> \param[in] LDV +*> \verbatim +*> LDV is INTEGER +*> The leading dimension of the array V. +*> If JOBV = 'A', 'V', 'R', or JOBA = 'E', LDV >= max(1,N). +*> Otherwise, LDV >= 1. +*> \endverbatim +*> +*> \param[out] NUMRANK +*> \verbatim +*> NUMRANK is INTEGER +*> NUMRANK is the numerical rank first determined after the rank +*> revealing QR factorization, following the strategy specified by the +*> value of JOBA. If JOBV = 'R' and JOBU = 'R', only NUMRANK +*> leading singular values and vectors are then requested in the call +*> of CGESVD. The final value of NUMRANK might be further reduced if +*> some singular values are computed as zeros. +*> \endverbatim +*> +*> \param[out] IWORK +*> \verbatim +*> IWORK is INTEGER array, dimension (max(1, LIWORK)). +*> On exit, IWORK(1:N) contains column pivoting permutation of the +*> rank revealing QR factorization. +*> If JOBP = 'P', IWORK(N+1:N+M-1) contains the indices of the sequence +*> of row swaps used in row pivoting. These can be used to restore the +*> left singular vectors in the case JOBU = 'F'. +*> +*> If LIWORK, LCWORK, or LRWORK = -1, then on exit, if INFO = 0, +*> LIWORK(1) returns the minimal LIWORK. +*> \endverbatim +*> +*> \param[in] LIWORK +*> \verbatim +*> LIWORK is INTEGER +*> The dimension of the array IWORK. +*> LIWORK >= N + M - 1, if JOBP = 'P'; +*> LIWORK >= N if JOBP = 'N'. +*> +*> If LIWORK = -1, then a workspace query is assumed; the routine +*> only calculates and returns the optimal and minimal sizes +*> for the CWORK, IWORK, and RWORK arrays, and no error +*> message related to LCWORK is issued by XERBLA. +*> \endverbatim +*> +*> \param[out] CWORK +*> \verbatim +*> CWORK is COMPLEX array, dimension (max(2, LCWORK)), used as a workspace. +*> On exit, if, on entry, LCWORK.NE.-1, CWORK(1:N) contains parameters +*> needed to recover the Q factor from the QR factorization computed by +*> CGEQP3. +*> +*> If LIWORK, LCWORK, or LRWORK = -1, then on exit, if INFO = 0, +*> CWORK(1) returns the optimal LCWORK, and +*> CWORK(2) returns the minimal LCWORK. +*> \endverbatim +*> +*> \param[in,out] LCWORK +*> \verbatim +*> LCWORK is INTEGER +*> The dimension of the array CWORK. It is determined as follows: +*> Let LWQP3 = N+1, LWCON = 2*N, and let +*> LWUNQ = { MAX( N, 1 ), if JOBU = 'R', 'S', or 'U' +*> { MAX( M, 1 ), if JOBU = 'A' +*> LWSVD = MAX( 3*N, 1 ) +*> LWLQF = MAX( N/2, 1 ), LWSVD2 = MAX( 3*(N/2), 1 ), LWUNLQ = MAX( N, 1 ), +*> LWQRF = MAX( N/2, 1 ), LWUNQ2 = MAX( N, 1 ) +*> Then the minimal value of LCWORK is: +*> = MAX( N + LWQP3, LWSVD ) if only the singular values are needed; +*> = MAX( N + LWQP3, LWCON, LWSVD ) if only the singular values are needed, +*> and a scaled condition estimate requested; +*> +*> = N + MAX( LWQP3, LWSVD, LWUNQ ) if the singular values and the left +*> singular vectors are requested; +*> = N + MAX( LWQP3, LWCON, LWSVD, LWUNQ ) if the singular values and the left +*> singular vectors are requested, and also +*> a scaled condition estimate requested; +*> +*> = N + MAX( LWQP3, LWSVD ) if the singular values and the right +*> singular vectors are requested; +*> = N + MAX( LWQP3, LWCON, LWSVD ) if the singular values and the right +*> singular vectors are requested, and also +*> a scaled condition etimate requested; +*> +*> = N + MAX( LWQP3, LWSVD, LWUNQ ) if the full SVD is requested with JOBV = 'R'; +*> independent of JOBR; +*> = N + MAX( LWQP3, LWCON, LWSVD, LWUNQ ) if the full SVD is requested, +*> JOBV = 'R' and, also a scaled condition +*> estimate requested; independent of JOBR; +*> = MAX( N + MAX( LWQP3, LWSVD, LWUNQ ), +*> N + MAX( LWQP3, N/2+LWLQF, N/2+LWSVD2, N/2+LWUNLQ, LWUNQ) ) if the +*> full SVD is requested with JOBV = 'A' or 'V', and +*> JOBR ='N' +*> = MAX( N + MAX( LWQP3, LWCON, LWSVD, LWUNQ ), +*> N + MAX( LWQP3, LWCON, N/2+LWLQF, N/2+LWSVD2, N/2+LWUNLQ, LWUNQ ) ) +*> if the full SVD is requested with JOBV = 'A' or 'V', and +*> JOBR ='N', and also a scaled condition number estimate +*> requested. +*> = MAX( N + MAX( LWQP3, LWSVD, LWUNQ ), +*> N + MAX( LWQP3, N/2+LWQRF, N/2+LWSVD2, N/2+LWUNQ2, LWUNQ ) ) if the +*> full SVD is requested with JOBV = 'A', 'V', and JOBR ='T' +*> = MAX( N + MAX( LWQP3, LWCON, LWSVD, LWUNQ ), +*> N + MAX( LWQP3, LWCON, N/2+LWQRF, N/2+LWSVD2, N/2+LWUNQ2, LWUNQ ) ) +*> if the full SVD is requested with JOBV = 'A', 'V' and +*> JOBR ='T', and also a scaled condition number estimate +*> requested. +*> Finally, LCWORK must be at least two: LCWORK = MAX( 2, LCWORK ). +*> +*> If LCWORK = -1, then a workspace query is assumed; the routine +*> only calculates and returns the optimal and minimal sizes +*> for the CWORK, IWORK, and RWORK arrays, and no error +*> message related to LCWORK is issued by XERBLA. +*> \endverbatim +*> +*> \param[out] RWORK +*> \verbatim +*> RWORK is REAL array, dimension (max(1, LRWORK)). +*> On exit, +*> 1. If JOBA = 'E', RWORK(1) contains an estimate of the condition +*> number of column scaled A. If A = C * D where D is diagonal and C +*> has unit columns in the Euclidean norm, then, assuming full column rank, +*> N^(-1/4) * RWORK(1) <= ||pinv(C)||_2 <= N^(1/4) * RWORK(1). +*> Otherwise, RWORK(1) = -1. +*> 2. RWORK(2) contains the number of singular values computed as +*> exact zeros in CGESVD applied to the upper triangular or trapeziodal +*> R (from the initial QR factorization). In case of early exit (no call to +*> CGESVD, such as in the case of zero matrix) RWORK(2) = -1. +*> +*> If LIWORK, LCWORK, or LRWORK = -1, then on exit, if INFO = 0, +*> RWORK(1) returns the minimal LRWORK. +*> \endverbatim +*> +*> \param[in] LRWORK +*> \verbatim +*> LRWORK is INTEGER. +*> The dimension of the array RWORK. +*> If JOBP ='P', then LRWORK >= MAX(2, M, 5*N); +*> Otherwise, LRWORK >= MAX(2, 5*N). +*> +*> If LRWORK = -1, then a workspace query is assumed; the routine +*> only calculates and returns the optimal and minimal sizes +*> for the CWORK, IWORK, and RWORK arrays, and no error +*> message related to LCWORK is issued by XERBLA. +*> \endverbatim +*> +*> \param[out] INFO +*> \verbatim +*> INFO is INTEGER +*> = 0: successful exit. +*> < 0: if INFO = -i, the i-th argument had an illegal value. +*> > 0: if CBDSQR did not converge, INFO specifies how many superdiagonals +*> of an intermediate bidiagonal form B (computed in CGESVD) did not +*> converge to zero. +*> \endverbatim +* +*> \par Further Details: +* ======================== +*> +*> \verbatim +*> +*> 1. The data movement (matrix transpose) is coded using simple nested +*> DO-loops because BLAS and LAPACK do not provide corresponding subroutines. +*> Those DO-loops are easily identified in this source code - by the CONTINUE +*> statements labeled with 11**. In an optimized version of this code, the +*> nested DO loops should be replaced with calls to an optimized subroutine. +*> 2. This code scales A by 1/SQRT(M) if the largest ABS(A(i,j)) could cause +*> column norm overflow. This is the minial precaution and it is left to the +*> SVD routine (CGESVD) to do its own preemptive scaling if potential over- +*> or underflows are detected. To avoid repeated scanning of the array A, +*> an optimal implementation would do all necessary scaling before calling +*> CGESVD and the scaling in CGESVD can be switched off. +*> 3. Other comments related to code optimization are given in comments in the +*> code, enlosed in [[double brackets]]. +*> \endverbatim +* +*> \par Bugs, examples and comments +* =========================== +* +*> \verbatim +*> Please report all bugs and send interesting examples and/or comments to +*> drmac@math.hr. Thank you. +*> \endverbatim +* +*> \par References +* =============== +* +*> \verbatim +*> [1] Zlatko Drmac, Algorithm 977: A QR-Preconditioned QR SVD Method for +*> Computing the SVD with High Accuracy. ACM Trans. Math. Softw. +*> 44(1): 11:1-11:30 (2017) +*> +*> SIGMA library, xGESVDQ section updated February 2016. +*> Developed and coded by Zlatko Drmac, Department of Mathematics +*> University of Zagreb, Croatia, drmac@math.hr +*> \endverbatim +* +* +*> \par Contributors: +* ================== +*> +*> \verbatim +*> Developed and coded by Zlatko Drmac, Department of Mathematics +*> University of Zagreb, Croatia, drmac@math.hr +*> \endverbatim +* +* Authors: +* ======== +* +*> \author Univ. of Tennessee +*> \author Univ. of California Berkeley +*> \author Univ. of Colorado Denver +*> \author NAG Ltd. +* +*> \date November 2018 +* +*> \ingroup complexGEsing +* +* ===================================================================== + SUBROUTINE CGESVDQ( JOBA, JOBP, JOBR, JOBU, JOBV, M, N, A, LDA, + $ S, U, LDU, V, LDV, NUMRANK, IWORK, LIWORK, + $ CWORK, LCWORK, RWORK, LRWORK, INFO ) +* .. Scalar Arguments .. + IMPLICIT NONE + CHARACTER JOBA, JOBP, JOBR, JOBU, JOBV + INTEGER M, N, LDA, LDU, LDV, NUMRANK, LIWORK, LCWORK, LRWORK, + $ INFO +* .. +* .. Array Arguments .. + COMPLEX A( LDA, * ), U( LDU, * ), V( LDV, * ), CWORK( * ) + REAL S( * ), RWORK( * ) + INTEGER IWORK( * ) +* +* ===================================================================== +* +* .. Parameters .. + REAL ZERO, ONE + PARAMETER ( ZERO = 0.0E0, ONE = 1.0E0 ) + COMPLEX CZERO, CONE + PARAMETER ( CZERO = ( 0.0E0, 0.0E0 ), CONE = ( 1.0E0, 0.0E0 ) ) +* .. +* .. Local Scalars .. + INTEGER IERR, NR, N1, OPTRATIO, p, q + INTEGER LWCON, LWQP3, LWRK_CGELQF, LWRK_CGESVD, LWRK_CGESVD2, + $ LWRK_CGEQP3, LWRK_CGEQRF, LWRK_CUNMLQ, LWRK_CUNMQR, + $ LWRK_CUNMQR2, LWLQF, LWQRF, LWSVD, LWSVD2, LWUNQ, + $ LWUNQ2, LWUNLQ, MINWRK, MINWRK2, OPTWRK, OPTWRK2, + $ IMINWRK, RMINWRK + LOGICAL ACCLA, ACCLM, ACCLH, ASCALED, CONDA, DNTWU, DNTWV, + $ LQUERY, LSVC0, LSVEC, ROWPRM, RSVEC, RTRANS, WNTUA, + $ WNTUF, WNTUR, WNTUS, WNTVA, WNTVR + REAL BIG, EPSLN, RTMP, SCONDA, SFMIN + COMPLEX CTMP +* .. +* .. Local Arrays + COMPLEX CDUMMY(1) + REAL RDUMMY(1) +* .. +* .. External Subroutines (BLAS, LAPACK) + EXTERNAL CGELQF, CGEQP3, CGEQRF, CGESVD, CLACPY, CLAPMT, + $ CLASCL, CLASET, CLASWP, CSSCAL, SLASET, SLASCL, + $ CPOCON, CUNMLQ, CUNMQR, XERBLA +* .. +* .. External Functions (BLAS, LAPACK) + LOGICAL LSAME + INTEGER ISAMAX + REAL CLANGE, SCNRM2, SLAMCH + EXTERNAL CLANGE, LSAME, ISAMAX, SCNRM2, SLAMCH +* .. +* .. Intrinsic Functions .. + INTRINSIC ABS, CONJG, MAX, MIN, REAL, SQRT +* .. +* .. Executable Statements .. +* +* Test the input arguments +* + WNTUS = LSAME( JOBU, 'S' ) .OR. LSAME( JOBU, 'U' ) + WNTUR = LSAME( JOBU, 'R' ) + WNTUA = LSAME( JOBU, 'A' ) + WNTUF = LSAME( JOBU, 'F' ) + LSVC0 = WNTUS .OR. WNTUR .OR. WNTUA + LSVEC = LSVC0 .OR. WNTUF + DNTWU = LSAME( JOBU, 'N' ) +* + WNTVR = LSAME( JOBV, 'R' ) + WNTVA = LSAME( JOBV, 'A' ) .OR. LSAME( JOBV, 'V' ) + RSVEC = WNTVR .OR. WNTVA + DNTWV = LSAME( JOBV, 'N' ) +* + ACCLA = LSAME( JOBA, 'A' ) + ACCLM = LSAME( JOBA, 'M' ) + CONDA = LSAME( JOBA, 'E' ) + ACCLH = LSAME( JOBA, 'H' ) .OR. CONDA +* + ROWPRM = LSAME( JOBP, 'P' ) + RTRANS = LSAME( JOBR, 'T' ) +* + IF ( ROWPRM ) THEN + IMINWRK = MAX( 1, N + M - 1 ) + RMINWRK = MAX( 2, M, 5*N ) + ELSE + IMINWRK = MAX( 1, N ) + RMINWRK = MAX( 2, 5*N ) + END IF + LQUERY = (LIWORK .EQ. -1 .OR. LCWORK .EQ. -1 .OR. LRWORK .EQ. -1) + INFO = 0 + IF ( .NOT. ( ACCLA .OR. ACCLM .OR. ACCLH ) ) THEN + INFO = -1 + ELSE IF ( .NOT.( ROWPRM .OR. LSAME( JOBP, 'N' ) ) ) THEN + INFO = -2 + ELSE IF ( .NOT.( RTRANS .OR. LSAME( JOBR, 'N' ) ) ) THEN + INFO = -3 + ELSE IF ( .NOT.( LSVEC .OR. DNTWU ) ) THEN + INFO = -4 + ELSE IF ( WNTUR .AND. WNTVA ) THEN + INFO = -5 + ELSE IF ( .NOT.( RSVEC .OR. DNTWV )) THEN + INFO = -5 + ELSE IF ( M.LT.0 ) THEN + INFO = -6 + ELSE IF ( ( N.LT.0 ) .OR. ( N.GT.M ) ) THEN + INFO = -7 + ELSE IF ( LDA.LT.MAX( 1, M ) ) THEN + INFO = -9 + ELSE IF ( LDU.LT.1 .OR. ( LSVC0 .AND. LDU.LT.M ) .OR. + $ ( WNTUF .AND. LDU.LT.N ) ) THEN + INFO = -12 + ELSE IF ( LDV.LT.1 .OR. ( RSVEC .AND. LDV.LT.N ) .OR. + $ ( CONDA .AND. LDV.LT.N ) ) THEN + INFO = -14 + ELSE IF ( LIWORK .LT. IMINWRK .AND. .NOT. LQUERY ) THEN + INFO = -17 + END IF +* +* + IF ( INFO .EQ. 0 ) THEN +* +* Compute workspace +* .. compute the minimal and the optimal workspace lengths +* [[The expressions for computing the minimal and the optimal +* values of LCWORK are written with a lot of redundancy and +* can be simplified. However, this detailed form is easier for +* maintenance and modifications of the code.]] +* +* .. minimal workspace length for CGEQP3 of an M x N matrix + LWQP3 = N+1 +* .. minimal workspace length for CUNMQR to build left singular vectors + IF ( WNTUS .OR. WNTUR ) THEN + LWUNQ = MAX( N , 1 ) + ELSE IF ( WNTUA ) THEN + LWUNQ = MAX( M , 1 ) + END IF +* .. minimal workspace length for CPOCON of an N x N matrix + LWCON = 2 * N +* .. CGESVD of an N x N matrix + LWSVD = MAX( 3 * N, 1 ) + IF ( LQUERY ) THEN + CALL CGEQP3( M, N, A, LDA, IWORK, CDUMMY, CDUMMY, -1, + $ RDUMMY, IERR ) + LWRK_CGEQP3 = INT( CDUMMY(1) ) + IF ( WNTUS .OR. WNTUR ) THEN + CALL CUNMQR( 'L', 'N', M, N, N, A, LDA, CDUMMY, U, + $ LDU, CDUMMY, -1, IERR ) + LWRK_CUNMQR = INT( CDUMMY(1) ) + ELSE IF ( WNTUA ) THEN + CALL CUNMQR( 'L', 'N', M, M, N, A, LDA, CDUMMY, U, + $ LDU, CDUMMY, -1, IERR ) + LWRK_CUNMQR = INT( CDUMMY(1) ) + ELSE + LWRK_CUNMQR = 0 + END IF + END IF + MINWRK = 2 + OPTWRK = 2 + IF ( .NOT. (LSVEC .OR. RSVEC )) THEN +* .. minimal and optimal sizes of the complex workspace if +* only the singular values are requested + IF ( CONDA ) THEN + MINWRK = MAX( N+LWQP3, LWCON, LWSVD ) + ELSE + MINWRK = MAX( N+LWQP3, LWSVD ) + END IF + IF ( LQUERY ) THEN + CALL CGESVD( 'N', 'N', N, N, A, LDA, S, U, LDU, + $ V, LDV, CDUMMY, -1, RDUMMY, IERR ) + LWRK_CGESVD = INT( CDUMMY(1) ) + IF ( CONDA ) THEN + OPTWRK = MAX( N+LWRK_CGEQP3, N+LWCON, LWRK_CGESVD ) + ELSE + OPTWRK = MAX( N+LWRK_CGEQP3, LWRK_CGESVD ) + END IF + END IF + ELSE IF ( LSVEC .AND. (.NOT.RSVEC) ) THEN +* .. minimal and optimal sizes of the complex workspace if the +* singular values and the left singular vectors are requested + IF ( CONDA ) THEN + MINWRK = N + MAX( LWQP3, LWCON, LWSVD, LWUNQ ) + ELSE + MINWRK = N + MAX( LWQP3, LWSVD, LWUNQ ) + END IF + IF ( LQUERY ) THEN + IF ( RTRANS ) THEN + CALL CGESVD( 'N', 'O', N, N, A, LDA, S, U, LDU, + $ V, LDV, CDUMMY, -1, RDUMMY, IERR ) + ELSE + CALL CGESVD( 'O', 'N', N, N, A, LDA, S, U, LDU, + $ V, LDV, CDUMMY, -1, RDUMMY, IERR ) + END IF + LWRK_CGESVD = INT( CDUMMY(1) ) + IF ( CONDA ) THEN + OPTWRK = N + MAX( LWRK_CGEQP3, LWCON, LWRK_CGESVD, + $ LWRK_CUNMQR ) + ELSE + OPTWRK = N + MAX( LWRK_CGEQP3, LWRK_CGESVD, + $ LWRK_CUNMQR ) + END IF + END IF + ELSE IF ( RSVEC .AND. (.NOT.LSVEC) ) THEN +* .. minimal and optimal sizes of the complex workspace if the +* singular values and the right singular vectors are requested + IF ( CONDA ) THEN + MINWRK = N + MAX( LWQP3, LWCON, LWSVD ) + ELSE + MINWRK = N + MAX( LWQP3, LWSVD ) + END IF + IF ( LQUERY ) THEN + IF ( RTRANS ) THEN + CALL CGESVD( 'O', 'N', N, N, A, LDA, S, U, LDU, + $ V, LDV, CDUMMY, -1, RDUMMY, IERR ) + ELSE + CALL CGESVD( 'N', 'O', N, N, A, LDA, S, U, LDU, + $ V, LDV, CDUMMY, -1, RDUMMY, IERR ) + END IF + LWRK_CGESVD = INT( CDUMMY(1) ) + IF ( CONDA ) THEN + OPTWRK = N + MAX( LWRK_CGEQP3, LWCON, LWRK_CGESVD ) + ELSE + OPTWRK = N + MAX( LWRK_CGEQP3, LWRK_CGESVD ) + END IF + END IF + ELSE +* .. minimal and optimal sizes of the complex workspace if the +* full SVD is requested + IF ( RTRANS ) THEN + MINWRK = MAX( LWQP3, LWSVD, LWUNQ ) + IF ( CONDA ) MINWRK = MAX( MINWRK, LWCON ) + MINWRK = MINWRK + N + IF ( WNTVA ) THEN +* .. minimal workspace length for N x N/2 CGEQRF + LWQRF = MAX( N/2, 1 ) +* .. minimal workspace lengt for N/2 x N/2 CGESVD + LWSVD2 = MAX( 3 * (N/2), 1 ) + LWUNQ2 = MAX( N, 1 ) + MINWRK2 = MAX( LWQP3, N/2+LWQRF, N/2+LWSVD2, + $ N/2+LWUNQ2, LWUNQ ) + IF ( CONDA ) MINWRK2 = MAX( MINWRK2, LWCON ) + MINWRK2 = N + MINWRK2 + MINWRK = MAX( MINWRK, MINWRK2 ) + END IF + ELSE + MINWRK = MAX( LWQP3, LWSVD, LWUNQ ) + IF ( CONDA ) MINWRK = MAX( MINWRK, LWCON ) + MINWRK = MINWRK + N + IF ( WNTVA ) THEN +* .. minimal workspace length for N/2 x N CGELQF + LWLQF = MAX( N/2, 1 ) + LWSVD2 = MAX( 3 * (N/2), 1 ) + LWUNLQ = MAX( N , 1 ) + MINWRK2 = MAX( LWQP3, N/2+LWLQF, N/2+LWSVD2, + $ N/2+LWUNLQ, LWUNQ ) + IF ( CONDA ) MINWRK2 = MAX( MINWRK2, LWCON ) + MINWRK2 = N + MINWRK2 + MINWRK = MAX( MINWRK, MINWRK2 ) + END IF + END IF + IF ( LQUERY ) THEN + IF ( RTRANS ) THEN + CALL CGESVD( 'O', 'A', N, N, A, LDA, S, U, LDU, + $ V, LDV, CDUMMY, -1, RDUMMY, IERR ) + LWRK_CGESVD = INT( CDUMMY(1) ) + OPTWRK = MAX(LWRK_CGEQP3,LWRK_CGESVD,LWRK_CUNMQR) + IF ( CONDA ) OPTWRK = MAX( OPTWRK, LWCON ) + OPTWRK = N + OPTWRK + IF ( WNTVA ) THEN + CALL CGEQRF(N,N/2,U,LDU,CDUMMY,CDUMMY,-1,IERR) + LWRK_CGEQRF = INT( CDUMMY(1) ) + CALL CGESVD( 'S', 'O', N/2,N/2, V,LDV, S, U,LDU, + $ V, LDV, CDUMMY, -1, RDUMMY, IERR ) + LWRK_CGESVD2 = INT( CDUMMY(1) ) + CALL CUNMQR( 'R', 'C', N, N, N/2, U, LDU, CDUMMY, + $ V, LDV, CDUMMY, -1, IERR ) + LWRK_CUNMQR2 = INT( CDUMMY(1) ) + OPTWRK2 = MAX( LWRK_CGEQP3, N/2+LWRK_CGEQRF, + $ N/2+LWRK_CGESVD2, N/2+LWRK_CUNMQR2 ) + IF ( CONDA ) OPTWRK2 = MAX( OPTWRK2, LWCON ) + OPTWRK2 = N + OPTWRK2 + OPTWRK = MAX( OPTWRK, OPTWRK2 ) + END IF + ELSE + CALL CGESVD( 'S', 'O', N, N, A, LDA, S, U, LDU, + $ V, LDV, CDUMMY, -1, RDUMMY, IERR ) + LWRK_CGESVD = INT( CDUMMY(1) ) + OPTWRK = MAX(LWRK_CGEQP3,LWRK_CGESVD,LWRK_CUNMQR) + IF ( CONDA ) OPTWRK = MAX( OPTWRK, LWCON ) + OPTWRK = N + OPTWRK + IF ( WNTVA ) THEN + CALL CGELQF(N/2,N,U,LDU,CDUMMY,CDUMMY,-1,IERR) + LWRK_CGELQF = INT( CDUMMY(1) ) + CALL CGESVD( 'S','O', N/2,N/2, V, LDV, S, U, LDU, + $ V, LDV, CDUMMY, -1, RDUMMY, IERR ) + LWRK_CGESVD2 = INT( CDUMMY(1) ) + CALL CUNMLQ( 'R', 'N', N, N, N/2, U, LDU, CDUMMY, + $ V, LDV, CDUMMY,-1,IERR ) + LWRK_CUNMLQ = INT( CDUMMY(1) ) + OPTWRK2 = MAX( LWRK_CGEQP3, N/2+LWRK_CGELQF, + $ N/2+LWRK_CGESVD2, N/2+LWRK_CUNMLQ ) + IF ( CONDA ) OPTWRK2 = MAX( OPTWRK2, LWCON ) + OPTWRK2 = N + OPTWRK2 + OPTWRK = MAX( OPTWRK, OPTWRK2 ) + END IF + END IF + END IF + END IF +* + MINWRK = MAX( 2, MINWRK ) + OPTWRK = MAX( 2, OPTWRK ) + IF ( LCWORK .LT. MINWRK .AND. (.NOT.LQUERY) ) INFO = -19 +* + END IF +* + IF (INFO .EQ. 0 .AND. LRWORK .LT. RMINWRK .AND. .NOT. LQUERY) THEN + INFO = -21 + END IF + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'CGESVDQ', -INFO ) + RETURN + ELSE IF ( LQUERY ) THEN +* +* Return optimal workspace +* + IWORK(1) = IMINWRK + CWORK(1) = OPTWRK + CWORK(2) = MINWRK + RWORK(1) = RMINWRK + RETURN + END IF +* +* Quick return if the matrix is void. +* + IF( ( M.EQ.0 ) .OR. ( N.EQ.0 ) ) THEN +* .. all output is void. + RETURN + END IF +* + BIG = SLAMCH('O') + ASCALED = .FALSE. + IF ( ROWPRM ) THEN +* .. reordering the rows in decreasing sequence in the +* ell-infinity norm - this enhances numerical robustness in +* the case of differently scaled rows. + DO 1904 p = 1, M +* RWORK(p) = ABS( A(p,ICAMAX(N,A(p,1),LDA)) ) +* [[CLANGE will return NaN if an entry of the p-th row is Nan]] + RWORK(p) = CLANGE( 'M', 1, N, A(p,1), LDA, RDUMMY ) +* .. check for NaN's and Inf's + IF ( ( RWORK(p) .NE. RWORK(p) ) .OR. + $ ( (RWORK(p)*ZERO) .NE. ZERO ) ) THEN + INFO = - 8 + CALL XERBLA( 'CGESVDQ', -INFO ) + RETURN + END IF + 1904 CONTINUE + DO 1952 p = 1, M - 1 + q = ISAMAX( M-p+1, RWORK(p), 1 ) + p - 1 + IWORK(N+p) = q + IF ( p .NE. q ) THEN + RTMP = RWORK(p) + RWORK(p) = RWORK(q) + RWORK(q) = RTMP + END IF + 1952 CONTINUE +* + IF ( RWORK(1) .EQ. ZERO ) THEN +* Quick return: A is the M x N zero matrix. + NUMRANK = 0 + CALL SLASET( 'G', N, 1, ZERO, ZERO, S, N ) + IF ( WNTUS ) CALL CLASET('G', M, N, CZERO, CONE, U, LDU) + IF ( WNTUA ) CALL CLASET('G', M, M, CZERO, CONE, U, LDU) + IF ( WNTVA ) CALL CLASET('G', N, N, CZERO, CONE, V, LDV) + IF ( WNTUF ) THEN + CALL CLASET( 'G', N, 1, CZERO, CZERO, CWORK, N ) + CALL CLASET( 'G', M, N, CZERO, CONE, U, LDU ) + END IF + DO 5001 p = 1, N + IWORK(p) = p + 5001 CONTINUE + IF ( ROWPRM ) THEN + DO 5002 p = N + 1, N + M - 1 + IWORK(p) = p - N + 5002 CONTINUE + END IF + IF ( CONDA ) RWORK(1) = -1 + RWORK(2) = -1 + RETURN + END IF +* + IF ( RWORK(1) .GT. BIG / SQRT(REAL(M)) ) THEN +* .. to prevent overflow in the QR factorization, scale the +* matrix by 1/sqrt(M) if too large entry detected + CALL CLASCL('G',0,0,SQRT(REAL(M)),ONE, M,N, A,LDA, IERR) + ASCALED = .TRUE. + END IF + CALL CLASWP( N, A, LDA, 1, M-1, IWORK(N+1), 1 ) + END IF +* +* .. At this stage, preemptive scaling is done only to avoid column +* norms overflows during the QR factorization. The SVD procedure should +* have its own scaling to save the singular values from overflows and +* underflows. That depends on the SVD procedure. +* + IF ( .NOT.ROWPRM ) THEN + RTMP = CLANGE( 'M', M, N, A, LDA, RWORK ) + IF ( ( RTMP .NE. RTMP ) .OR. + $ ( (RTMP*ZERO) .NE. ZERO ) ) THEN + INFO = - 8 + CALL XERBLA( 'CGESVDQ', -INFO ) + RETURN + END IF + IF ( RTMP .GT. BIG / SQRT(REAL(M)) ) THEN +* .. to prevent overflow in the QR factorization, scale the +* matrix by 1/sqrt(M) if too large entry detected + CALL CLASCL('G',0,0, SQRT(REAL(M)),ONE, M,N, A,LDA, IERR) + ASCALED = .TRUE. + END IF + END IF +* +* .. QR factorization with column pivoting +* +* A * P = Q * [ R ] +* [ 0 ] +* + DO 1963 p = 1, N +* .. all columns are free columns + IWORK(p) = 0 + 1963 CONTINUE + CALL CGEQP3( M, N, A, LDA, IWORK, CWORK, CWORK(N+1), LCWORK-N, + $ RWORK, IERR ) +* +* If the user requested accuracy level allows truncation in the +* computed upper triangular factor, the matrix R is examined and, +* if possible, replaced with its leading upper trapezoidal part. +* + EPSLN = SLAMCH('E') + SFMIN = SLAMCH('S') +* SMALL = SFMIN / EPSLN + NR = N +* + IF ( ACCLA ) THEN +* +* Standard absolute error bound suffices. All sigma_i with +* sigma_i < N*EPS*||A||_F are flushed to zero. This is an +* aggressive enforcement of lower numerical rank by introducing a +* backward error of the order of N*EPS*||A||_F. + NR = 1 + RTMP = SQRT(REAL(N))*EPSLN + DO 3001 p = 2, N + IF ( ABS(A(p,p)) .LT. (RTMP*ABS(A(1,1))) ) GO TO 3002 + NR = NR + 1 + 3001 CONTINUE + 3002 CONTINUE +* + ELSEIF ( ACCLM ) THEN +* .. similarly as above, only slightly more gentle (less aggressive). +* Sudden drop on the diagonal of R is used as the criterion for being +* close-to-rank-deficient. The threshold is set to EPSLN=SLAMCH('E'). +* [[This can be made more flexible by replacing this hard-coded value +* with a user specified threshold.]] Also, the values that underflow +* will be truncated. + NR = 1 + DO 3401 p = 2, N + IF ( ( ABS(A(p,p)) .LT. (EPSLN*ABS(A(p-1,p-1))) ) .OR. + $ ( ABS(A(p,p)) .LT. SFMIN ) ) GO TO 3402 + NR = NR + 1 + 3401 CONTINUE + 3402 CONTINUE +* + ELSE +* .. RRQR not authorized to determine numerical rank except in the +* obvious case of zero pivots. +* .. inspect R for exact zeros on the diagonal; +* R(i,i)=0 => R(i:N,i:N)=0. + NR = 1 + DO 3501 p = 2, N + IF ( ABS(A(p,p)) .EQ. ZERO ) GO TO 3502 + NR = NR + 1 + 3501 CONTINUE + 3502 CONTINUE +* + IF ( CONDA ) THEN +* Estimate the scaled condition number of A. Use the fact that it is +* the same as the scaled condition number of R. +* .. V is used as workspace + CALL CLACPY( 'U', N, N, A, LDA, V, LDV ) +* Only the leading NR x NR submatrix of the triangular factor +* is considered. Only if NR=N will this give a reliable error +* bound. However, even for NR < N, this can be used on an +* expert level and obtain useful information in the sense of +* perturbation theory. + DO 3053 p = 1, NR + RTMP = SCNRM2( p, V(1,p), 1 ) + CALL CSSCAL( p, ONE/RTMP, V(1,p), 1 ) + 3053 CONTINUE + IF ( .NOT. ( LSVEC .OR. RSVEC ) ) THEN + CALL CPOCON( 'U', NR, V, LDV, ONE, RTMP, + $ CWORK, RWORK, IERR ) + ELSE + CALL CPOCON( 'U', NR, V, LDV, ONE, RTMP, + $ CWORK(N+1), RWORK, IERR ) + END IF + SCONDA = ONE / SQRT(RTMP) +* For NR=N, SCONDA is an estimate of SQRT(||(R^* * R)^(-1)||_1), +* N^(-1/4) * SCONDA <= ||R^(-1)||_2 <= N^(1/4) * SCONDA +* See the reference [1] for more details. + END IF +* + ENDIF +* + IF ( WNTUR ) THEN + N1 = NR + ELSE IF ( WNTUS .OR. WNTUF) THEN + N1 = N + ELSE IF ( WNTUA ) THEN + N1 = M + END IF +* + IF ( .NOT. ( RSVEC .OR. LSVEC ) ) THEN +*....................................................................... +* .. only the singular values are requested +*....................................................................... + IF ( RTRANS ) THEN +* +* .. compute the singular values of R**H = [A](1:NR,1:N)**H +* .. set the lower triangle of [A] to [A](1:NR,1:N)**H and +* the upper triangle of [A] to zero. + DO 1146 p = 1, MIN( N, NR ) + A(p,p) = CONJG(A(p,p)) + DO 1147 q = p + 1, N + A(q,p) = CONJG(A(p,q)) + IF ( q .LE. NR ) A(p,q) = CZERO + 1147 CONTINUE + 1146 CONTINUE +* + CALL CGESVD( 'N', 'N', N, NR, A, LDA, S, U, LDU, + $ V, LDV, CWORK, LCWORK, RWORK, INFO ) +* + ELSE +* +* .. compute the singular values of R = [A](1:NR,1:N) +* + IF ( NR .GT. 1 ) + $ CALL CLASET( 'L', NR-1,NR-1, CZERO,CZERO, A(2,1), LDA ) + CALL CGESVD( 'N', 'N', NR, N, A, LDA, S, U, LDU, + $ V, LDV, CWORK, LCWORK, RWORK, INFO ) +* + END IF +* + ELSE IF ( LSVEC .AND. ( .NOT. RSVEC) ) THEN +*....................................................................... +* .. the singular values and the left singular vectors requested +*......................................................................."""""""" + IF ( RTRANS ) THEN +* .. apply CGESVD to R**H +* .. copy R**H into [U] and overwrite [U] with the right singular +* vectors of R + DO 1192 p = 1, NR + DO 1193 q = p, N + U(q,p) = CONJG(A(p,q)) + 1193 CONTINUE + 1192 CONTINUE + IF ( NR .GT. 1 ) + $ CALL CLASET( 'U', NR-1,NR-1, CZERO,CZERO, U(1,2), LDU ) +* .. the left singular vectors not computed, the NR right singular +* vectors overwrite [U](1:NR,1:NR) as conjugate transposed. These +* will be pre-multiplied by Q to build the left singular vectors of A. + CALL CGESVD( 'N', 'O', N, NR, U, LDU, S, U, LDU, + $ U, LDU, CWORK(N+1), LCWORK-N, RWORK, INFO ) +* + DO 1119 p = 1, NR + U(p,p) = CONJG(U(p,p)) + DO 1120 q = p + 1, NR + CTMP = CONJG(U(q,p)) + U(q,p) = CONJG(U(p,q)) + U(p,q) = CTMP + 1120 CONTINUE + 1119 CONTINUE +* + ELSE +* .. apply CGESVD to R +* .. copy R into [U] and overwrite [U] with the left singular vectors + CALL CLACPY( 'U', NR, N, A, LDA, U, LDU ) + IF ( NR .GT. 1 ) + $ CALL CLASET( 'L', NR-1, NR-1, CZERO, CZERO, U(2,1), LDU ) +* .. the right singular vectors not computed, the NR left singular +* vectors overwrite [U](1:NR,1:NR) + CALL CGESVD( 'O', 'N', NR, N, U, LDU, S, U, LDU, + $ V, LDV, CWORK(N+1), LCWORK-N, RWORK, INFO ) +* .. now [U](1:NR,1:NR) contains the NR left singular vectors of +* R. These will be pre-multiplied by Q to build the left singular +* vectors of A. + END IF +* +* .. assemble the left singular vector matrix U of dimensions +* (M x NR) or (M x N) or (M x M). + IF ( ( NR .LT. M ) .AND. ( .NOT.WNTUF ) ) THEN + CALL CLASET('A', M-NR, NR, CZERO, CZERO, U(NR+1,1), LDU) + IF ( NR .LT. N1 ) THEN + CALL CLASET( 'A',NR,N1-NR,CZERO,CZERO,U(1,NR+1), LDU ) + CALL CLASET( 'A',M-NR,N1-NR,CZERO,CONE, + $ U(NR+1,NR+1), LDU ) + END IF + END IF +* +* The Q matrix from the first QRF is built into the left singular +* vectors matrix U. +* + IF ( .NOT.WNTUF ) + $ CALL CUNMQR( 'L', 'N', M, N1, N, A, LDA, CWORK, U, + $ LDU, CWORK(N+1), LCWORK-N, IERR ) + IF ( ROWPRM .AND. .NOT.WNTUF ) + $ CALL CLASWP( N1, U, LDU, 1, M-1, IWORK(N+1), -1 ) +* + ELSE IF ( RSVEC .AND. ( .NOT. LSVEC ) ) THEN +*....................................................................... +* .. the singular values and the right singular vectors requested +*....................................................................... + IF ( RTRANS ) THEN +* .. apply CGESVD to R**H +* .. copy R**H into V and overwrite V with the left singular vectors + DO 1165 p = 1, NR + DO 1166 q = p, N + V(q,p) = CONJG(A(p,q)) + 1166 CONTINUE + 1165 CONTINUE + IF ( NR .GT. 1 ) + $ CALL CLASET( 'U', NR-1,NR-1, CZERO,CZERO, V(1,2), LDV ) +* .. the left singular vectors of R**H overwrite V, the right singular +* vectors not computed + IF ( WNTVR .OR. ( NR .EQ. N ) ) THEN + CALL CGESVD( 'O', 'N', N, NR, V, LDV, S, U, LDU, + $ U, LDU, CWORK(N+1), LCWORK-N, RWORK, INFO ) +* + DO 1121 p = 1, NR + V(p,p) = CONJG(V(p,p)) + DO 1122 q = p + 1, NR + CTMP = CONJG(V(q,p)) + V(q,p) = CONJG(V(p,q)) + V(p,q) = CTMP + 1122 CONTINUE + 1121 CONTINUE +* + IF ( NR .LT. N ) THEN + DO 1103 p = 1, NR + DO 1104 q = NR + 1, N + V(p,q) = CONJG(V(q,p)) + 1104 CONTINUE + 1103 CONTINUE + END IF + CALL CLAPMT( .FALSE., NR, N, V, LDV, IWORK ) + ELSE +* .. need all N right singular vectors and NR < N +* [!] This is simple implementation that augments [V](1:N,1:NR) +* by padding a zero block. In the case NR << N, a more efficient +* way is to first use the QR factorization. For more details +* how to implement this, see the " FULL SVD " branch. + CALL CLASET('G', N, N-NR, CZERO, CZERO, V(1,NR+1), LDV) + CALL CGESVD( 'O', 'N', N, N, V, LDV, S, U, LDU, + $ U, LDU, CWORK(N+1), LCWORK-N, RWORK, INFO ) +* + DO 1123 p = 1, N + V(p,p) = CONJG(V(p,p)) + DO 1124 q = p + 1, N + CTMP = CONJG(V(q,p)) + V(q,p) = CONJG(V(p,q)) + V(p,q) = CTMP + 1124 CONTINUE + 1123 CONTINUE + CALL CLAPMT( .FALSE., N, N, V, LDV, IWORK ) + END IF +* + ELSE +* .. aply CGESVD to R +* .. copy R into V and overwrite V with the right singular vectors + CALL CLACPY( 'U', NR, N, A, LDA, V, LDV ) + IF ( NR .GT. 1 ) + $ CALL CLASET( 'L', NR-1, NR-1, CZERO, CZERO, V(2,1), LDV ) +* .. the right singular vectors overwrite V, the NR left singular +* vectors stored in U(1:NR,1:NR) + IF ( WNTVR .OR. ( NR .EQ. N ) ) THEN + CALL CGESVD( 'N', 'O', NR, N, V, LDV, S, U, LDU, + $ V, LDV, CWORK(N+1), LCWORK-N, RWORK, INFO ) + CALL CLAPMT( .FALSE., NR, N, V, LDV, IWORK ) +* .. now [V](1:NR,1:N) contains V(1:N,1:NR)**H + ELSE +* .. need all N right singular vectors and NR < N +* [!] This is simple implementation that augments [V](1:NR,1:N) +* by padding a zero block. In the case NR << N, a more efficient +* way is to first use the LQ factorization. For more details +* how to implement this, see the " FULL SVD " branch. + CALL CLASET('G', N-NR, N, CZERO,CZERO, V(NR+1,1), LDV) + CALL CGESVD( 'N', 'O', N, N, V, LDV, S, U, LDU, + $ V, LDV, CWORK(N+1), LCWORK-N, RWORK, INFO ) + CALL CLAPMT( .FALSE., N, N, V, LDV, IWORK ) + END IF +* .. now [V] contains the adjoint of the matrix of the right singular +* vectors of A. + END IF +* + ELSE +*....................................................................... +* .. FULL SVD requested +*....................................................................... + IF ( RTRANS ) THEN +* +* .. apply CGESVD to R**H [[this option is left for R&D&T]] +* + IF ( WNTVR .OR. ( NR .EQ. N ) ) THEN +* .. copy R**H into [V] and overwrite [V] with the left singular +* vectors of R**H + DO 1168 p = 1, NR + DO 1169 q = p, N + V(q,p) = CONJG(A(p,q)) + 1169 CONTINUE + 1168 CONTINUE + IF ( NR .GT. 1 ) + $ CALL CLASET( 'U', NR-1,NR-1, CZERO,CZERO, V(1,2), LDV ) +* +* .. the left singular vectors of R**H overwrite [V], the NR right +* singular vectors of R**H stored in [U](1:NR,1:NR) as conjugate +* transposed + CALL CGESVD( 'O', 'A', N, NR, V, LDV, S, V, LDV, + $ U, LDU, CWORK(N+1), LCWORK-N, RWORK, INFO ) +* .. assemble V + DO 1115 p = 1, NR + V(p,p) = CONJG(V(p,p)) + DO 1116 q = p + 1, NR + CTMP = CONJG(V(q,p)) + V(q,p) = CONJG(V(p,q)) + V(p,q) = CTMP + 1116 CONTINUE + 1115 CONTINUE + IF ( NR .LT. N ) THEN + DO 1101 p = 1, NR + DO 1102 q = NR+1, N + V(p,q) = CONJG(V(q,p)) + 1102 CONTINUE + 1101 CONTINUE + END IF + CALL CLAPMT( .FALSE., NR, N, V, LDV, IWORK ) +* + DO 1117 p = 1, NR + U(p,p) = CONJG(U(p,p)) + DO 1118 q = p + 1, NR + CTMP = CONJG(U(q,p)) + U(q,p) = CONJG(U(p,q)) + U(p,q) = CTMP + 1118 CONTINUE + 1117 CONTINUE +* + IF ( ( NR .LT. M ) .AND. .NOT.(WNTUF)) THEN + CALL CLASET('A', M-NR,NR, CZERO,CZERO, U(NR+1,1), LDU) + IF ( NR .LT. N1 ) THEN + CALL CLASET('A',NR,N1-NR,CZERO,CZERO,U(1,NR+1),LDU) + CALL CLASET( 'A',M-NR,N1-NR,CZERO,CONE, + $ U(NR+1,NR+1), LDU ) + END IF + END IF +* + ELSE +* .. need all N right singular vectors and NR < N +* .. copy R**H into [V] and overwrite [V] with the left singular +* vectors of R**H +* [[The optimal ratio N/NR for using QRF instead of padding +* with zeros. Here hard coded to 2; it must be at least +* two due to work space constraints.]] +* OPTRATIO = ILAENV(6, 'CGESVD', 'S' // 'O', NR,N,0,0) +* OPTRATIO = MAX( OPTRATIO, 2 ) + OPTRATIO = 2 + IF ( OPTRATIO*NR .GT. N ) THEN + DO 1198 p = 1, NR + DO 1199 q = p, N + V(q,p) = CONJG(A(p,q)) + 1199 CONTINUE + 1198 CONTINUE + IF ( NR .GT. 1 ) + $ CALL CLASET('U',NR-1,NR-1, CZERO,CZERO, V(1,2),LDV) +* + CALL CLASET('A',N,N-NR,CZERO,CZERO,V(1,NR+1),LDV) + CALL CGESVD( 'O', 'A', N, N, V, LDV, S, V, LDV, + $ U, LDU, CWORK(N+1), LCWORK-N, RWORK, INFO ) +* + DO 1113 p = 1, N + V(p,p) = CONJG(V(p,p)) + DO 1114 q = p + 1, N + CTMP = CONJG(V(q,p)) + V(q,p) = CONJG(V(p,q)) + V(p,q) = CTMP + 1114 CONTINUE + 1113 CONTINUE + CALL CLAPMT( .FALSE., N, N, V, LDV, IWORK ) +* .. assemble the left singular vector matrix U of dimensions +* (M x N1), i.e. (M x N) or (M x M). +* + DO 1111 p = 1, N + U(p,p) = CONJG(U(p,p)) + DO 1112 q = p + 1, N + CTMP = CONJG(U(q,p)) + U(q,p) = CONJG(U(p,q)) + U(p,q) = CTMP + 1112 CONTINUE + 1111 CONTINUE +* + IF ( ( N .LT. M ) .AND. .NOT.(WNTUF)) THEN + CALL CLASET('A',M-N,N,CZERO,CZERO,U(N+1,1),LDU) + IF ( N .LT. N1 ) THEN + CALL CLASET('A',N,N1-N,CZERO,CZERO,U(1,N+1),LDU) + CALL CLASET('A',M-N,N1-N,CZERO,CONE, + $ U(N+1,N+1), LDU ) + END IF + END IF + ELSE +* .. copy R**H into [U] and overwrite [U] with the right +* singular vectors of R + DO 1196 p = 1, NR + DO 1197 q = p, N + U(q,NR+p) = CONJG(A(p,q)) + 1197 CONTINUE + 1196 CONTINUE + IF ( NR .GT. 1 ) + $ CALL CLASET('U',NR-1,NR-1,CZERO,CZERO,U(1,NR+2),LDU) + CALL CGEQRF( N, NR, U(1,NR+1), LDU, CWORK(N+1), + $ CWORK(N+NR+1), LCWORK-N-NR, IERR ) + DO 1143 p = 1, NR + DO 1144 q = 1, N + V(q,p) = CONJG(U(p,NR+q)) + 1144 CONTINUE + 1143 CONTINUE + CALL CLASET('U',NR-1,NR-1,CZERO,CZERO,V(1,2),LDV) + CALL CGESVD( 'S', 'O', NR, NR, V, LDV, S, U, LDU, + $ V,LDV, CWORK(N+NR+1),LCWORK-N-NR,RWORK, INFO ) + CALL CLASET('A',N-NR,NR,CZERO,CZERO,V(NR+1,1),LDV) + CALL CLASET('A',NR,N-NR,CZERO,CZERO,V(1,NR+1),LDV) + CALL CLASET('A',N-NR,N-NR,CZERO,CONE,V(NR+1,NR+1),LDV) + CALL CUNMQR('R','C', N, N, NR, U(1,NR+1), LDU, + $ CWORK(N+1),V,LDV,CWORK(N+NR+1),LCWORK-N-NR,IERR) + CALL CLAPMT( .FALSE., N, N, V, LDV, IWORK ) +* .. assemble the left singular vector matrix U of dimensions +* (M x NR) or (M x N) or (M x M). + IF ( ( NR .LT. M ) .AND. .NOT.(WNTUF)) THEN + CALL CLASET('A',M-NR,NR,CZERO,CZERO,U(NR+1,1),LDU) + IF ( NR .LT. N1 ) THEN + CALL CLASET('A',NR,N1-NR,CZERO,CZERO,U(1,NR+1),LDU) + CALL CLASET( 'A',M-NR,N1-NR,CZERO,CONE, + $ U(NR+1,NR+1),LDU) + END IF + END IF + END IF + END IF +* + ELSE +* +* .. apply CGESVD to R [[this is the recommended option]] +* + IF ( WNTVR .OR. ( NR .EQ. N ) ) THEN +* .. copy R into [V] and overwrite V with the right singular vectors + CALL CLACPY( 'U', NR, N, A, LDA, V, LDV ) + IF ( NR .GT. 1 ) + $ CALL CLASET( 'L', NR-1,NR-1, CZERO,CZERO, V(2,1), LDV ) +* .. the right singular vectors of R overwrite [V], the NR left +* singular vectors of R stored in [U](1:NR,1:NR) + CALL CGESVD( 'S', 'O', NR, N, V, LDV, S, U, LDU, + $ V, LDV, CWORK(N+1), LCWORK-N, RWORK, INFO ) + CALL CLAPMT( .FALSE., NR, N, V, LDV, IWORK ) +* .. now [V](1:NR,1:N) contains V(1:N,1:NR)**H +* .. assemble the left singular vector matrix U of dimensions +* (M x NR) or (M x N) or (M x M). + IF ( ( NR .LT. M ) .AND. .NOT.(WNTUF)) THEN + CALL CLASET('A', M-NR,NR, CZERO,CZERO, U(NR+1,1), LDU) + IF ( NR .LT. N1 ) THEN + CALL CLASET('A',NR,N1-NR,CZERO,CZERO,U(1,NR+1),LDU) + CALL CLASET( 'A',M-NR,N1-NR,CZERO,CONE, + $ U(NR+1,NR+1), LDU ) + END IF + END IF +* + ELSE +* .. need all N right singular vectors and NR < N +* .. the requested number of the left singular vectors +* is then N1 (N or M) +* [[The optimal ratio N/NR for using LQ instead of padding +* with zeros. Here hard coded to 2; it must be at least +* two due to work space constraints.]] +* OPTRATIO = ILAENV(6, 'CGESVD', 'S' // 'O', NR,N,0,0) +* OPTRATIO = MAX( OPTRATIO, 2 ) + OPTRATIO = 2 + IF ( OPTRATIO * NR .GT. N ) THEN + CALL CLACPY( 'U', NR, N, A, LDA, V, LDV ) + IF ( NR .GT. 1 ) + $ CALL CLASET('L', NR-1,NR-1, CZERO,CZERO, V(2,1),LDV) +* .. the right singular vectors of R overwrite [V], the NR left +* singular vectors of R stored in [U](1:NR,1:NR) + CALL CLASET('A', N-NR,N, CZERO,CZERO, V(NR+1,1),LDV) + CALL CGESVD( 'S', 'O', N, N, V, LDV, S, U, LDU, + $ V, LDV, CWORK(N+1), LCWORK-N, RWORK, INFO ) + CALL CLAPMT( .FALSE., N, N, V, LDV, IWORK ) +* .. now [V] contains the adjoint of the matrix of the right +* singular vectors of A. The leading N left singular vectors +* are in [U](1:N,1:N) +* .. assemble the left singular vector matrix U of dimensions +* (M x N1), i.e. (M x N) or (M x M). + IF ( ( N .LT. M ) .AND. .NOT.(WNTUF)) THEN + CALL CLASET('A',M-N,N,CZERO,CZERO,U(N+1,1),LDU) + IF ( N .LT. N1 ) THEN + CALL CLASET('A',N,N1-N,CZERO,CZERO,U(1,N+1),LDU) + CALL CLASET( 'A',M-N,N1-N,CZERO,CONE, + $ U(N+1,N+1), LDU ) + END IF + END IF + ELSE + CALL CLACPY( 'U', NR, N, A, LDA, U(NR+1,1), LDU ) + IF ( NR .GT. 1 ) + $ CALL CLASET('L',NR-1,NR-1,CZERO,CZERO,U(NR+2,1),LDU) + CALL CGELQF( NR, N, U(NR+1,1), LDU, CWORK(N+1), + $ CWORK(N+NR+1), LCWORK-N-NR, IERR ) + CALL CLACPY('L',NR,NR,U(NR+1,1),LDU,V,LDV) + IF ( NR .GT. 1 ) + $ CALL CLASET('U',NR-1,NR-1,CZERO,CZERO,V(1,2),LDV) + CALL CGESVD( 'S', 'O', NR, NR, V, LDV, S, U, LDU, + $ V, LDV, CWORK(N+NR+1), LCWORK-N-NR, RWORK, INFO ) + CALL CLASET('A',N-NR,NR,CZERO,CZERO,V(NR+1,1),LDV) + CALL CLASET('A',NR,N-NR,CZERO,CZERO,V(1,NR+1),LDV) + CALL CLASET('A',N-NR,N-NR,CZERO,CONE,V(NR+1,NR+1),LDV) + CALL CUNMLQ('R','N',N,N,NR,U(NR+1,1),LDU,CWORK(N+1), + $ V, LDV, CWORK(N+NR+1),LCWORK-N-NR,IERR) + CALL CLAPMT( .FALSE., N, N, V, LDV, IWORK ) +* .. assemble the left singular vector matrix U of dimensions +* (M x NR) or (M x N) or (M x M). + IF ( ( NR .LT. M ) .AND. .NOT.(WNTUF)) THEN + CALL CLASET('A',M-NR,NR,CZERO,CZERO,U(NR+1,1),LDU) + IF ( NR .LT. N1 ) THEN + CALL CLASET('A',NR,N1-NR,CZERO,CZERO,U(1,NR+1),LDU) + CALL CLASET( 'A',M-NR,N1-NR,CZERO,CONE, + $ U(NR+1,NR+1), LDU ) + END IF + END IF + END IF + END IF +* .. end of the "R**H or R" branch + END IF +* +* The Q matrix from the first QRF is built into the left singular +* vectors matrix U. +* + IF ( .NOT. WNTUF ) + $ CALL CUNMQR( 'L', 'N', M, N1, N, A, LDA, CWORK, U, + $ LDU, CWORK(N+1), LCWORK-N, IERR ) + IF ( ROWPRM .AND. .NOT.WNTUF ) + $ CALL CLASWP( N1, U, LDU, 1, M-1, IWORK(N+1), -1 ) +* +* ... end of the "full SVD" branch + END IF +* +* Check whether some singular values are returned as zeros, e.g. +* due to underflow, and update the numerical rank. + p = NR + DO 4001 q = p, 1, -1 + IF ( S(q) .GT. ZERO ) GO TO 4002 + NR = NR - 1 + 4001 CONTINUE + 4002 CONTINUE +* +* .. if numerical rank deficiency is detected, the truncated +* singular values are set to zero. + IF ( NR .LT. N ) CALL SLASET( 'G', N-NR,1, ZERO,ZERO, S(NR+1), N ) +* .. undo scaling; this may cause overflow in the largest singular +* values. + IF ( ASCALED ) + $ CALL SLASCL( 'G',0,0, ONE,SQRT(REAL(M)), NR,1, S, N, IERR ) + IF ( CONDA ) RWORK(1) = SCONDA + RWORK(2) = p - NR +* .. p-NR is the number of singular values that are computed as +* exact zeros in CGESVD() applied to the (possibly truncated) +* full row rank triangular (trapezoidal) factor of A. + NUMRANK = NR +* + RETURN +* +* End of CGESVDQ +* + END diff --git a/lapack-netlib/SRC/cgesvj.f b/lapack-netlib/SRC/cgesvj.f index 2a5ced225..81e40efef 100644 --- a/lapack-netlib/SRC/cgesvj.f +++ b/lapack-netlib/SRC/cgesvj.f @@ -89,12 +89,12 @@ *> Specifies whether to compute the right singular vectors, that *> is, the matrix V: *> = 'V' or 'J': the matrix V is computed and returned in the array V -*> = 'A' : the Jacobi rotations are applied to the MV-by-N +*> = 'A': the Jacobi rotations are applied to the MV-by-N *> array V. In other words, the right singular vector *> matrix V is not computed explicitly; instead it is *> applied to an MV-by-N matrix initially stored in the *> first MV rows of V. -*> = 'N' : the matrix V is not computed and the array V is not +*> = 'N': the matrix V is not computed and the array V is not *> referenced *> \endverbatim *> @@ -116,8 +116,8 @@ *> A is COMPLEX array, dimension (LDA,N) *> On entry, the M-by-N matrix A. *> On exit, -*> If JOBU .EQ. 'U' .OR. JOBU .EQ. 'C': -*> If INFO .EQ. 0 : +*> If JOBU = 'U' .OR. JOBU = 'C': +*> If INFO = 0 : *> RANKA orthonormal columns of U are returned in the *> leading RANKA columns of the array A. Here RANKA <= N *> is the number of computed singular values of A that are @@ -127,9 +127,9 @@ *> in the array RWORK as RANKA=NINT(RWORK(2)). Also see the *> descriptions of SVA and RWORK. The computed columns of U *> are mutually numerically orthogonal up to approximately -*> TOL=SQRT(M)*EPS (default); or TOL=CTOL*EPS (JOBU.EQ.'C'), +*> TOL=SQRT(M)*EPS (default); or TOL=CTOL*EPS (JOBU = 'C'), *> see the description of JOBU. -*> If INFO .GT. 0, +*> If INFO > 0, *> the procedure CGESVJ did not converge in the given number *> of iterations (sweeps). In that case, the computed *> columns of U may not be orthogonal up to TOL. The output @@ -137,8 +137,8 @@ *> values in SVA(1:N)) and V is still a decomposition of the *> input matrix A in the sense that the residual *> || A - SCALE * U * SIGMA * V^* ||_2 / ||A||_2 is small. -*> If JOBU .EQ. 'N': -*> If INFO .EQ. 0 : +*> If JOBU = 'N': +*> If INFO = 0 : *> Note that the left singular vectors are 'for free' in the *> one-sided Jacobi SVD algorithm. However, if only the *> singular values are needed, the level of numerical @@ -147,7 +147,7 @@ *> numerically orthogonal up to approximately M*EPS. Thus, *> on exit, A contains the columns of U scaled with the *> corresponding singular values. -*> If INFO .GT. 0 : +*> If INFO > 0 : *> the procedure CGESVJ did not converge in the given number *> of iterations (sweeps). *> \endverbatim @@ -162,9 +162,9 @@ *> \verbatim *> SVA is REAL array, dimension (N) *> On exit, -*> If INFO .EQ. 0 : +*> If INFO = 0 : *> depending on the value SCALE = RWORK(1), we have: -*> If SCALE .EQ. ONE: +*> If SCALE = ONE: *> SVA(1:N) contains the computed singular values of A. *> During the computation SVA contains the Euclidean column *> norms of the iterated matrices in the array A. @@ -173,7 +173,7 @@ *> factored representation is due to the fact that some of the *> singular values of A might underflow or overflow. *> -*> If INFO .GT. 0 : +*> If INFO > 0 : *> the procedure CGESVJ did not converge in the given number of *> iterations (sweeps) and SCALE*SVA(1:N) may not be accurate. *> \endverbatim @@ -181,7 +181,7 @@ *> \param[in] MV *> \verbatim *> MV is INTEGER -*> If JOBV .EQ. 'A', then the product of Jacobi rotations in CGESVJ +*> If JOBV = 'A', then the product of Jacobi rotations in CGESVJ *> is applied to the first MV rows of V. See the description of JOBV. *> \endverbatim *> @@ -199,16 +199,16 @@ *> \param[in] LDV *> \verbatim *> LDV is INTEGER -*> The leading dimension of the array V, LDV .GE. 1. -*> If JOBV .EQ. 'V', then LDV .GE. max(1,N). -*> If JOBV .EQ. 'A', then LDV .GE. max(1,MV) . +*> The leading dimension of the array V, LDV >= 1. +*> If JOBV = 'V', then LDV >= max(1,N). +*> If JOBV = 'A', then LDV >= max(1,MV) . *> \endverbatim *> *> \param[in,out] CWORK *> \verbatim *> CWORK is COMPLEX array, dimension (max(1,LWORK)) *> Used as workspace. -*> If on entry LWORK .EQ. -1, then a workspace query is assumed and +*> If on entry LWORK = -1, then a workspace query is assumed and *> no computation is done; CWORK(1) is set to the minial (and optimal) *> length of CWORK. *> \endverbatim @@ -223,7 +223,7 @@ *> \verbatim *> RWORK is REAL array, dimension (max(6,LRWORK)) *> On entry, -*> If JOBU .EQ. 'C' : +*> If JOBU = 'C' : *> RWORK(1) = CTOL, where CTOL defines the threshold for convergence. *> The process stops if all columns of A are mutually *> orthogonal up to CTOL*EPS, EPS=SLAMCH('E'). @@ -243,11 +243,11 @@ *> RWORK(5) = max_{i.NE.j} |COS(A(:,i),A(:,j))| in the last sweep. *> This is useful information in cases when CGESVJ did *> not converge, as it can be used to estimate whether -*> the output is stil useful and for post festum analysis. +*> the output is still useful and for post festum analysis. *> RWORK(6) = the largest absolute value over all sines of the *> Jacobi rotation angles in the last sweep. It can be *> useful for a post festum analysis. -*> If on entry LRWORK .EQ. -1, then a workspace query is assumed and +*> If on entry LRWORK = -1, then a workspace query is assumed and *> no computation is done; RWORK(1) is set to the minial (and optimal) *> length of RWORK. *> \endverbatim @@ -261,9 +261,9 @@ *> \param[out] INFO *> \verbatim *> INFO is INTEGER -*> = 0 : successful exit. -*> < 0 : if INFO = -i, then the i-th argument had an illegal value -*> > 0 : CGESVJ did not converge in the maximal allowed number +*> = 0: successful exit. +*> < 0: if INFO = -i, then the i-th argument had an illegal value +*> > 0: CGESVJ did not converge in the maximal allowed number *> (NSWEEP=30) of sweeps. The output may still be useful. *> See the description of RWORK. *> \endverbatim diff --git a/lapack-netlib/SRC/cgesvxx.f b/lapack-netlib/SRC/cgesvxx.f index 30d1beb33..383e4d011 100644 --- a/lapack-netlib/SRC/cgesvxx.f +++ b/lapack-netlib/SRC/cgesvxx.f @@ -411,7 +411,7 @@ *> information as described below. There currently are up to three *> pieces of information returned for each right-hand side. If *> componentwise accuracy is not requested (PARAMS(3) = 0.0), then -*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS .LT. 3, then at most +*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS < 3, then at most *> the first (:,N_ERR_BNDS) entries are returned. *> *> The first index in ERR_BNDS_COMP(i,:) corresponds to the ith @@ -447,14 +447,14 @@ *> \param[in] NPARAMS *> \verbatim *> NPARAMS is INTEGER -*> Specifies the number of parameters set in PARAMS. If .LE. 0, the +*> Specifies the number of parameters set in PARAMS. If <= 0, the *> PARAMS array is never referenced and default values are used. *> \endverbatim *> *> \param[in,out] PARAMS *> \verbatim *> PARAMS is REAL array, dimension NPARAMS -*> Specifies algorithm parameters. If an entry is .LT. 0.0, then +*> Specifies algorithm parameters. If an entry is < 0.0, then *> that entry will be filled with default value used for that *> parameter. Only positions up to NPARAMS are accessed; defaults *> are used for higher-numbered parameters. @@ -462,9 +462,9 @@ *> PARAMS(LA_LINRX_ITREF_I = 1) : Whether to perform iterative *> refinement or not. *> Default: 1.0 -*> = 0.0 : No refinement is performed, and no error bounds are +*> = 0.0: No refinement is performed, and no error bounds are *> computed. -*> = 1.0 : Use the double-precision refinement algorithm, +*> = 1.0: Use the double-precision refinement algorithm, *> possibly with doubled-single computations if the *> compilation environment does not support DOUBLE *> PRECISION. diff --git a/lapack-netlib/SRC/cgetsls.f b/lapack-netlib/SRC/cgetsls.f index e7c5d8120..01de3c984 100644 --- a/lapack-netlib/SRC/cgetsls.f +++ b/lapack-netlib/SRC/cgetsls.f @@ -1,3 +1,5 @@ +*> \brief \b CGETSLS +* * Definition: * =========== * @@ -259,7 +261,7 @@ TSZM = INT( TQ( 1 ) ) LWM = INT( WORKQ( 1 ) ) CALL CGEMLQ( 'L', TRANS, N, NRHS, M, A, LDA, TQ, - $ TSZO, B, LDB, WORKQ, -1, INFO2 ) + $ TSZM, B, LDB, WORKQ, -1, INFO2 ) LWM = MAX( LWM, INT( WORKQ( 1 ) ) ) WSIZEO = TSZO + LWO WSIZEM = TSZM + LWM diff --git a/lapack-netlib/SRC/cggesx.f b/lapack-netlib/SRC/cggesx.f index 74169ff80..acc4eda36 100644 --- a/lapack-netlib/SRC/cggesx.f +++ b/lapack-netlib/SRC/cggesx.f @@ -120,10 +120,10 @@ *> \verbatim *> SENSE is CHARACTER*1 *> Determines which reciprocal condition numbers are computed. -*> = 'N' : None are computed; -*> = 'E' : Computed for average of selected eigenvalues only; -*> = 'V' : Computed for selected deflating subspaces only; -*> = 'B' : Computed for both. +*> = 'N': None are computed; +*> = 'E': Computed for average of selected eigenvalues only; +*> = 'V': Computed for selected deflating subspaces only; +*> = 'B': Computed for both. *> If SENSE = 'E', 'V', or 'B', SORT must equal 'S'. *> \endverbatim *> diff --git a/lapack-netlib/SRC/cgsvj0.f b/lapack-netlib/SRC/cgsvj0.f index 80e67a06e..810df3367 100644 --- a/lapack-netlib/SRC/cgsvj0.f +++ b/lapack-netlib/SRC/cgsvj0.f @@ -117,7 +117,7 @@ *> \param[in] MV *> \verbatim *> MV is INTEGER -*> If JOBV .EQ. 'A', then MV rows of V are post-multipled by a +*> If JOBV = 'A', then MV rows of V are post-multipled by a *> sequence of Jacobi rotations. *> If JOBV = 'N', then MV is not referenced. *> \endverbatim @@ -125,9 +125,9 @@ *> \param[in,out] V *> \verbatim *> V is COMPLEX array, dimension (LDV,N) -*> If JOBV .EQ. 'V' then N rows of V are post-multipled by a +*> If JOBV = 'V' then N rows of V are post-multipled by a *> sequence of Jacobi rotations. -*> If JOBV .EQ. 'A' then MV rows of V are post-multipled by a +*> If JOBV = 'A' then MV rows of V are post-multipled by a *> sequence of Jacobi rotations. *> If JOBV = 'N', then V is not referenced. *> \endverbatim @@ -136,8 +136,8 @@ *> \verbatim *> LDV is INTEGER *> The leading dimension of the array V, LDV >= 1. -*> If JOBV = 'V', LDV .GE. N. -*> If JOBV = 'A', LDV .GE. MV. +*> If JOBV = 'V', LDV >= N. +*> If JOBV = 'A', LDV >= MV. *> \endverbatim *> *> \param[in] EPS @@ -157,7 +157,7 @@ *> TOL is REAL *> TOL is the threshold for Jacobi rotations. For a pair *> A(:,p), A(:,q) of pivot columns, the Jacobi rotation is -*> applied only if ABS(COS(angle(A(:,p),A(:,q)))) .GT. TOL. +*> applied only if ABS(COS(angle(A(:,p),A(:,q)))) > TOL. *> \endverbatim *> *> \param[in] NSWEEP @@ -175,14 +175,14 @@ *> \param[in] LWORK *> \verbatim *> LWORK is INTEGER -*> LWORK is the dimension of WORK. LWORK .GE. M. +*> LWORK is the dimension of WORK. LWORK >= M. *> \endverbatim *> *> \param[out] INFO *> \verbatim *> INFO is INTEGER -*> = 0 : successful exit. -*> < 0 : if INFO = -i, then the i-th argument had an illegal value +*> = 0: successful exit. +*> < 0: if INFO = -i, then the i-th argument had an illegal value *> \endverbatim * * Authors: diff --git a/lapack-netlib/SRC/cgsvj1.f b/lapack-netlib/SRC/cgsvj1.f index bebcd5c45..06b417cf2 100644 --- a/lapack-netlib/SRC/cgsvj1.f +++ b/lapack-netlib/SRC/cgsvj1.f @@ -61,7 +61,7 @@ *> In terms of the columns of A, the first N1 columns are rotated 'against' *> the remaining N-N1 columns, trying to increase the angle between the *> corresponding subspaces. The off-diagonal block is N1-by(N-N1) and it is -*> tiled using quadratic tiles of side KBL. Here, KBL is a tunning parmeter. +*> tiled using quadratic tiles of side KBL. Here, KBL is a tunning parameter. *> The number of sweeps is given in NSWEEP and the orthogonality threshold *> is given in TOL. *> \endverbatim @@ -147,7 +147,7 @@ *> \param[in] MV *> \verbatim *> MV is INTEGER -*> If JOBV .EQ. 'A', then MV rows of V are post-multipled by a +*> If JOBV = 'A', then MV rows of V are post-multipled by a *> sequence of Jacobi rotations. *> If JOBV = 'N', then MV is not referenced. *> \endverbatim @@ -155,9 +155,9 @@ *> \param[in,out] V *> \verbatim *> V is COMPLEX array, dimension (LDV,N) -*> If JOBV .EQ. 'V' then N rows of V are post-multipled by a +*> If JOBV = 'V' then N rows of V are post-multipled by a *> sequence of Jacobi rotations. -*> If JOBV .EQ. 'A' then MV rows of V are post-multipled by a +*> If JOBV = 'A' then MV rows of V are post-multipled by a *> sequence of Jacobi rotations. *> If JOBV = 'N', then V is not referenced. *> \endverbatim @@ -166,8 +166,8 @@ *> \verbatim *> LDV is INTEGER *> The leading dimension of the array V, LDV >= 1. -*> If JOBV = 'V', LDV .GE. N. -*> If JOBV = 'A', LDV .GE. MV. +*> If JOBV = 'V', LDV >= N. +*> If JOBV = 'A', LDV >= MV. *> \endverbatim *> *> \param[in] EPS @@ -187,7 +187,7 @@ *> TOL is REAL *> TOL is the threshold for Jacobi rotations. For a pair *> A(:,p), A(:,q) of pivot columns, the Jacobi rotation is -*> applied only if ABS(COS(angle(A(:,p),A(:,q)))) .GT. TOL. +*> applied only if ABS(COS(angle(A(:,p),A(:,q)))) > TOL. *> \endverbatim *> *> \param[in] NSWEEP @@ -205,14 +205,14 @@ *> \param[in] LWORK *> \verbatim *> LWORK is INTEGER -*> LWORK is the dimension of WORK. LWORK .GE. M. +*> LWORK is the dimension of WORK. LWORK >= M. *> \endverbatim *> *> \param[out] INFO *> \verbatim *> INFO is INTEGER -*> = 0 : successful exit. -*> < 0 : if INFO = -i, then the i-th argument had an illegal value +*> = 0: successful exit. +*> < 0: if INFO = -i, then the i-th argument had an illegal value *> \endverbatim * * Authors: diff --git a/lapack-netlib/SRC/chb2st_kernels.f b/lapack-netlib/SRC/chb2st_kernels.f index 25c9ab717..01ea217bb 100644 --- a/lapack-netlib/SRC/chb2st_kernels.f +++ b/lapack-netlib/SRC/chb2st_kernels.f @@ -1,26 +1,26 @@ *> \brief \b CHB2ST_KERNELS * * @generated from zhb2st_kernels.f, fortran z -> c, Wed Dec 7 08:22:40 2016 -* +* * =========== DOCUMENTATION =========== * -* Online html documentation available at -* http://www.netlib.org/lapack/explore-html/ +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ * *> \htmlonly -*> Download CHB2ST_KERNELS + dependencies -*> -*> [TGZ] -*> -*> [ZIP] -*> +*> Download CHB2ST_KERNELS + dependencies +*> +*> [TGZ] +*> +*> [ZIP] +*> *> [TXT] -*> \endhtmlonly +*> \endhtmlonly * * Definition: * =========== * -* SUBROUTINE CHB2ST_KERNELS( UPLO, WANTZ, TTYPE, +* SUBROUTINE CHB2ST_KERNELS( UPLO, WANTZ, TTYPE, * ST, ED, SWEEP, N, NB, IB, * A, LDA, V, TAU, LDVT, WORK) * @@ -32,9 +32,9 @@ * INTEGER TTYPE, ST, ED, SWEEP, N, NB, IB, LDA, LDVT * .. * .. Array Arguments .. -* COMPLEX A( LDA, * ), V( * ), +* COMPLEX A( LDA, * ), V( * ), * TAU( * ), WORK( * ) -* +* *> \par Purpose: * ============= *> @@ -124,7 +124,7 @@ *> LDVT is INTEGER. *> \endverbatim *> -*> \param[in] WORK +*> \param[out] WORK *> \verbatim *> WORK is COMPLEX array. Workspace of size nb. *> \endverbatim @@ -147,7 +147,7 @@ *> http://doi.acm.org/10.1145/2063384.2063394 *> *> A. Haidar, J. Kurzak, P. Luszczek, 2013. -*> An improved parallel singular value algorithm and its implementation +*> An improved parallel singular value algorithm and its implementation *> for multicore hardware, In Proceedings of 2013 International Conference *> for High Performance Computing, Networking, Storage and Analysis (SC '13). *> Denver, Colorado, USA, 2013. @@ -155,16 +155,16 @@ *> http://doi.acm.org/10.1145/2503210.2503292 *> *> A. Haidar, R. Solca, S. Tomov, T. Schulthess and J. Dongarra. -*> A novel hybrid CPU-GPU generalized eigensolver for electronic structure +*> A novel hybrid CPU-GPU generalized eigensolver for electronic structure *> calculations based on fine-grained memory aware tasks. *> International Journal of High Performance Computing Applications. *> Volume 28 Issue 2, Pages 196-209, May 2014. -*> http://hpc.sagepub.com/content/28/2/196 +*> http://hpc.sagepub.com/content/28/2/196 *> *> \endverbatim *> * ===================================================================== - SUBROUTINE CHB2ST_KERNELS( UPLO, WANTZ, TTYPE, + SUBROUTINE CHB2ST_KERNELS( UPLO, WANTZ, TTYPE, $ ST, ED, SWEEP, N, NB, IB, $ A, LDA, V, TAU, LDVT, WORK) * @@ -181,7 +181,7 @@ INTEGER TTYPE, ST, ED, SWEEP, N, NB, IB, LDA, LDVT * .. * .. Array Arguments .. - COMPLEX A( LDA, * ), V( * ), + COMPLEX A( LDA, * ), V( * ), $ TAU( * ), WORK( * ) * .. * @@ -195,8 +195,8 @@ * .. Local Scalars .. LOGICAL UPPER INTEGER I, J1, J2, LM, LN, VPOS, TAUPOS, - $ DPOS, OFDPOS, AJETER - COMPLEX CTMP + $ DPOS, OFDPOS, AJETER + COMPLEX CTMP * .. * .. External Subroutines .. EXTERNAL CLARFG, CLARFX, CLARFY @@ -209,7 +209,7 @@ * .. * .. * .. Executable Statements .. -* +* AJETER = IB + LDVT UPPER = LSAME( UPLO, 'U' ) @@ -240,10 +240,10 @@ V( VPOS ) = ONE DO 10 I = 1, LM-1 V( VPOS+I ) = CONJG( A( OFDPOS-I, ST+I ) ) - A( OFDPOS-I, ST+I ) = ZERO + A( OFDPOS-I, ST+I ) = ZERO 10 CONTINUE CTMP = CONJG( A( OFDPOS, ST ) ) - CALL CLARFG( LM, CTMP, V( VPOS+1 ), 1, + CALL CLARFG( LM, CTMP, V( VPOS+1 ), 1, $ TAU( TAUPOS ) ) A( OFDPOS, ST ) = CTMP * @@ -281,14 +281,14 @@ * V( VPOS ) = ONE DO 30 I = 1, LM-1 - V( VPOS+I ) = + V( VPOS+I ) = $ CONJG( A( DPOS-NB-I, J1+I ) ) A( DPOS-NB-I, J1+I ) = ZERO 30 CONTINUE CTMP = CONJG( A( DPOS-NB, J1 ) ) CALL CLARFG( LM, CTMP, V( VPOS+1 ), 1, TAU( TAUPOS ) ) A( DPOS-NB, J1 ) = CTMP -* +* CALL CLARFX( 'Right', LN-1, LM, V( VPOS ), $ TAU( TAUPOS ), $ A( DPOS-NB+1, J1 ), LDA-1, WORK) @@ -296,9 +296,9 @@ ENDIF * * Lower case -* +* ELSE -* +* IF( WANTZ ) THEN VPOS = MOD( SWEEP-1, 2 ) * N + ST TAUPOS = MOD( SWEEP-1, 2 ) * N + ST @@ -313,9 +313,9 @@ V( VPOS ) = ONE DO 20 I = 1, LM-1 V( VPOS+I ) = A( OFDPOS+I, ST-1 ) - A( OFDPOS+I, ST-1 ) = ZERO + A( OFDPOS+I, ST-1 ) = ZERO 20 CONTINUE - CALL CLARFG( LM, A( OFDPOS, ST-1 ), V( VPOS+1 ), 1, + CALL CLARFG( LM, A( OFDPOS, ST-1 ), V( VPOS+1 ), 1, $ TAU( TAUPOS ) ) * LM = ED - ST + 1 @@ -342,7 +342,7 @@ LM = J2-J1+1 * IF( LM.GT.0) THEN - CALL CLARFX( 'Right', LM, LN, V( VPOS ), + CALL CLARFX( 'Right', LM, LN, V( VPOS ), $ TAU( TAUPOS ), A( DPOS+NB, ST ), $ LDA-1, WORK) * @@ -359,13 +359,13 @@ V( VPOS+I ) = A( DPOS+NB+I, ST ) A( DPOS+NB+I, ST ) = ZERO 40 CONTINUE - CALL CLARFG( LM, A( DPOS+NB, ST ), V( VPOS+1 ), 1, + CALL CLARFG( LM, A( DPOS+NB, ST ), V( VPOS+1 ), 1, $ TAU( TAUPOS ) ) * - CALL CLARFX( 'Left', LM, LN-1, V( VPOS ), + CALL CLARFX( 'Left', LM, LN-1, V( VPOS ), $ CONJG( TAU( TAUPOS ) ), $ A( DPOS+NB-1, ST+1 ), LDA-1, WORK) - + ENDIF ENDIF ENDIF @@ -374,4 +374,4 @@ * * END OF CHB2ST_KERNELS * - END + END diff --git a/lapack-netlib/SRC/checon_3.f b/lapack-netlib/SRC/checon_3.f index 6427dd594..5d9ed97e9 100644 --- a/lapack-netlib/SRC/checon_3.f +++ b/lapack-netlib/SRC/checon_3.f @@ -19,7 +19,7 @@ * =========== * * SUBROUTINE CHECON_3( UPLO, N, A, LDA, E, IPIV, ANORM, RCOND, -* WORK, IWORK, INFO ) +* WORK, INFO ) * * .. Scalar Arguments .. * CHARACTER UPLO @@ -27,7 +27,7 @@ * REAL ANORM, RCOND * .. * .. Array Arguments .. -* INTEGER IPIV( * ), IWORK( * ) +* INTEGER IPIV( * ) * COMPLEX A( LDA, * ), E ( * ), WORK( * ) * .. * @@ -129,11 +129,6 @@ *> WORK is COMPLEX array, dimension (2*N) *> \endverbatim *> -*> \param[out] IWORK -*> \verbatim -*> IWORK is INTEGER array, dimension (N) -*> \endverbatim -*> *> \param[out] INFO *> \verbatim *> INFO is INTEGER diff --git a/lapack-netlib/SRC/cheevr.f b/lapack-netlib/SRC/cheevr.f index 0b055baf6..c5deb1166 100644 --- a/lapack-netlib/SRC/cheevr.f +++ b/lapack-netlib/SRC/cheevr.f @@ -210,7 +210,7 @@ *> eigenvalues are computed to high relative accuracy when *> possible in future releases. The current code does not *> make any guarantees about high relative accuracy, but -*> furutre releases will. See J. Barlow and J. Demmel, +*> future releases will. See J. Barlow and J. Demmel, *> "Computing Accurate Eigensystems of Scaled Diagonally *> Dominant Matrices", LAPACK Working Note #7, for a discussion *> of which matrices define their eigenvalues to high relative diff --git a/lapack-netlib/SRC/cheevr_2stage.f b/lapack-netlib/SRC/cheevr_2stage.f index 20a1cb3f3..1489a322e 100644 --- a/lapack-netlib/SRC/cheevr_2stage.f +++ b/lapack-netlib/SRC/cheevr_2stage.f @@ -217,7 +217,7 @@ *> eigenvalues are computed to high relative accuracy when *> possible in future releases. The current code does not *> make any guarantees about high relative accuracy, but -*> furutre releases will. See J. Barlow and J. Demmel, +*> future releases will. See J. Barlow and J. Demmel, *> "Computing Accurate Eigensystems of Scaled Diagonally *> Dominant Matrices", LAPACK Working Note #7, for a discussion *> of which matrices define their eigenvalues to high relative diff --git a/lapack-netlib/SRC/chegs2.f b/lapack-netlib/SRC/chegs2.f index 68d2f6625..55a895fc3 100644 --- a/lapack-netlib/SRC/chegs2.f +++ b/lapack-netlib/SRC/chegs2.f @@ -97,6 +97,7 @@ *> B is COMPLEX array, dimension (LDB,N) *> The triangular factor from the Cholesky factorization of B, *> as returned by CPOTRF. +*> B is modified by the routine but restored on exit. *> \endverbatim *> *> \param[in] LDB diff --git a/lapack-netlib/SRC/chegst.f b/lapack-netlib/SRC/chegst.f index 2f933729c..b3fdff2d5 100644 --- a/lapack-netlib/SRC/chegst.f +++ b/lapack-netlib/SRC/chegst.f @@ -97,6 +97,7 @@ *> B is COMPLEX array, dimension (LDB,N) *> The triangular factor from the Cholesky factorization of B, *> as returned by CPOTRF. +*> B is modified by the routine but restored on exit. *> \endverbatim *> *> \param[in] LDB diff --git a/lapack-netlib/SRC/cherfsx.f b/lapack-netlib/SRC/cherfsx.f index 4ed2c99f7..76cef7cd1 100644 --- a/lapack-netlib/SRC/cherfsx.f +++ b/lapack-netlib/SRC/cherfsx.f @@ -102,7 +102,7 @@ *> \param[in] A *> \verbatim *> A is COMPLEX array, dimension (LDA,N) -*> The symmetric matrix A. If UPLO = 'U', the leading N-by-N +*> The Hermitian matrix A. If UPLO = 'U', the leading N-by-N *> upper triangular part of A contains the upper triangular *> part of the matrix A, and the strictly lower triangular *> part of A is not referenced. If UPLO = 'L', the leading @@ -270,7 +270,7 @@ *> information as described below. There currently are up to three *> pieces of information returned for each right-hand side. If *> componentwise accuracy is not requested (PARAMS(3) = 0.0), then -*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS .LT. 3, then at most +*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS < 3, then at most *> the first (:,N_ERR_BNDS) entries are returned. *> *> The first index in ERR_BNDS_COMP(i,:) corresponds to the ith @@ -306,14 +306,14 @@ *> \param[in] NPARAMS *> \verbatim *> NPARAMS is INTEGER -*> Specifies the number of parameters set in PARAMS. If .LE. 0, the +*> Specifies the number of parameters set in PARAMS. If <= 0, the *> PARAMS array is never referenced and default values are used. *> \endverbatim *> *> \param[in,out] PARAMS *> \verbatim *> PARAMS is REAL array, dimension NPARAMS -*> Specifies algorithm parameters. If an entry is .LT. 0.0, then +*> Specifies algorithm parameters. If an entry is < 0.0, then *> that entry will be filled with default value used for that *> parameter. Only positions up to NPARAMS are accessed; defaults *> are used for higher-numbered parameters. @@ -321,9 +321,9 @@ *> PARAMS(LA_LINRX_ITREF_I = 1) : Whether to perform iterative *> refinement or not. *> Default: 1.0 -*> = 0.0 : No refinement is performed, and no error bounds are +*> = 0.0: No refinement is performed, and no error bounds are *> computed. -*> = 1.0 : Use the double-precision refinement algorithm, +*> = 1.0: Use the double-precision refinement algorithm, *> possibly with doubled-single computations if the *> compilation environment does not support DOUBLE *> PRECISION. diff --git a/lapack-netlib/SRC/chesv_aa.f b/lapack-netlib/SRC/chesv_aa.f index 470f910bc..b934e624b 100644 --- a/lapack-netlib/SRC/chesv_aa.f +++ b/lapack-netlib/SRC/chesv_aa.f @@ -42,7 +42,7 @@ *> matrices. *> *> Aasen's algorithm is used to factor A as -*> A = U * T * U**H, if UPLO = 'U', or +*> A = U**H * T * U, if UPLO = 'U', or *> A = L * T * L**H, if UPLO = 'L', *> where U (or L) is a product of permutation and unit upper (lower) *> triangular matrices, and T is Hermitian and tridiagonal. The factored form @@ -86,7 +86,7 @@ *> *> On exit, if INFO = 0, the tridiagonal matrix T and the *> multipliers used to obtain the factor U or L from the -*> factorization A = U*T*U**H or A = L*T*L**H as computed by +*> factorization A = U**H*T*U or A = L*T*L**H as computed by *> CHETRF_AA. *> \endverbatim *> @@ -230,7 +230,7 @@ RETURN END IF * -* Compute the factorization A = U*T*U**H or A = L*T*L**H. +* Compute the factorization A = U**H*T*U or A = L*T*L**H. * CALL CHETRF_AA( UPLO, N, A, LDA, IPIV, WORK, LWORK, INFO ) IF( INFO.EQ.0 ) THEN diff --git a/lapack-netlib/SRC/chesv_aa_2stage.f b/lapack-netlib/SRC/chesv_aa_2stage.f index 05f6b7bb7..ab5786d57 100644 --- a/lapack-netlib/SRC/chesv_aa_2stage.f +++ b/lapack-netlib/SRC/chesv_aa_2stage.f @@ -43,7 +43,7 @@ *> matrices. *> *> Aasen's 2-stage algorithm is used to factor A as -*> A = U * T * U**H, if UPLO = 'U', or +*> A = U**H * T * U, if UPLO = 'U', or *> A = L * T * L**H, if UPLO = 'L', *> where U (or L) is a product of permutation and unit upper (lower) *> triangular matrices, and T is Hermitian and band. The matrix T is @@ -257,7 +257,7 @@ END IF * * -* Compute the factorization A = U*T*U**H or A = L*T*L**H. +* Compute the factorization A = U**H*T*U or A = L*T*L**H. * CALL CHETRF_AA_2STAGE( UPLO, N, A, LDA, TB, LTB, IPIV, IPIV2, $ WORK, LWORK, INFO ) diff --git a/lapack-netlib/SRC/chesvxx.f b/lapack-netlib/SRC/chesvxx.f index 3f4466d41..c59e72bbf 100644 --- a/lapack-netlib/SRC/chesvxx.f +++ b/lapack-netlib/SRC/chesvxx.f @@ -46,7 +46,7 @@ *> *> CHESVXX uses the diagonal pivoting factorization to compute the *> solution to a complex system of linear equations A * X = B, where -*> A is an N-by-N symmetric matrix and X and B are N-by-NRHS +*> A is an N-by-N Hermitian matrix and X and B are N-by-NRHS *> matrices. *> *> If requested, both normwise and maximum componentwise error bounds @@ -88,7 +88,7 @@ *> A = L * D * L**T, if UPLO = 'L', *> *> where U (or L) is a product of permutation and unit upper (lower) -*> triangular matrices, and D is symmetric and block diagonal with +*> triangular matrices, and D is Hermitian and block diagonal with *> 1-by-1 and 2-by-2 diagonal blocks. *> *> 3. If some D(i,i)=0, so that D is exactly singular, then the @@ -161,7 +161,7 @@ *> \param[in,out] A *> \verbatim *> A is COMPLEX array, dimension (LDA,N) -*> The symmetric matrix A. If UPLO = 'U', the leading N-by-N +*> The Hermitian matrix A. If UPLO = 'U', the leading N-by-N *> upper triangular part of A contains the upper triangular *> part of the matrix A, and the strictly lower triangular *> part of A is not referenced. If UPLO = 'L', the leading @@ -378,7 +378,7 @@ *> information as described below. There currently are up to three *> pieces of information returned for each right-hand side. If *> componentwise accuracy is not requested (PARAMS(3) = 0.0), then -*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS .LT. 3, then at most +*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS < 3, then at most *> the first (:,N_ERR_BNDS) entries are returned. *> *> The first index in ERR_BNDS_COMP(i,:) corresponds to the ith @@ -414,14 +414,14 @@ *> \param[in] NPARAMS *> \verbatim *> NPARAMS is INTEGER -*> Specifies the number of parameters set in PARAMS. If .LE. 0, the +*> Specifies the number of parameters set in PARAMS. If <= 0, the *> PARAMS array is never referenced and default values are used. *> \endverbatim *> *> \param[in,out] PARAMS *> \verbatim *> PARAMS is REAL array, dimension NPARAMS -*> Specifies algorithm parameters. If an entry is .LT. 0.0, then +*> Specifies algorithm parameters. If an entry is < 0.0, then *> that entry will be filled with default value used for that *> parameter. Only positions up to NPARAMS are accessed; defaults *> are used for higher-numbered parameters. @@ -429,9 +429,9 @@ *> PARAMS(LA_LINRX_ITREF_I = 1) : Whether to perform iterative *> refinement or not. *> Default: 1.0 -*> = 0.0 : No refinement is performed, and no error bounds are +*> = 0.0: No refinement is performed, and no error bounds are *> computed. -*> = 1.0 : Use the double-precision refinement algorithm, +*> = 1.0: Use the double-precision refinement algorithm, *> possibly with doubled-single computations if the *> compilation environment does not support DOUBLE *> PRECISION. diff --git a/lapack-netlib/SRC/chetf2_rk.f b/lapack-netlib/SRC/chetf2_rk.f index 38a0ce373..80e2f61b7 100644 --- a/lapack-netlib/SRC/chetf2_rk.f +++ b/lapack-netlib/SRC/chetf2_rk.f @@ -322,7 +322,7 @@ * * Factorize A as U*D*U**H using the upper triangle of A * -* Initilize the first entry of array E, where superdiagonal +* Initialize the first entry of array E, where superdiagonal * elements of D are stored * E( 1 ) = CZERO @@ -676,7 +676,7 @@ * * Factorize A as L*D*L**H using the lower triangle of A * -* Initilize the unused last entry of the subdiagonal array E. +* Initialize the unused last entry of the subdiagonal array E. * E( N ) = CZERO * diff --git a/lapack-netlib/SRC/chetrd_2stage.f b/lapack-netlib/SRC/chetrd_2stage.f index e7370a4dd..4575a5e90 100644 --- a/lapack-netlib/SRC/chetrd_2stage.f +++ b/lapack-netlib/SRC/chetrd_2stage.f @@ -123,23 +123,22 @@ *> *> \param[out] HOUS2 *> \verbatim -*> HOUS2 is COMPLEX array, dimension LHOUS2, that -*> store the Householder representation of the stage2 +*> HOUS2 is COMPLEX array, dimension (LHOUS2) +*> Stores the Householder representation of the stage2 *> band to tridiagonal. *> \endverbatim *> *> \param[in] LHOUS2 *> \verbatim *> LHOUS2 is INTEGER -*> The dimension of the array HOUS2. LHOUS2 = MAX(1, dimension) +*> The dimension of the array HOUS2. *> If LWORK = -1, or LHOUS2=-1, *> then a query is assumed; the routine *> only calculates the optimal size of the HOUS2 array, returns *> this value as the first entry of the HOUS2 array, and no error *> message related to LHOUS2 is issued by XERBLA. -*> LHOUS2 = MAX(1, dimension) where -*> dimension = 4*N if VECT='N' -*> not available now if VECT='H' +*> If VECT='N', LHOUS2 = max(1, 4*n); +*> if VECT='V', option not yet available. *> \endverbatim *> *> \param[out] WORK @@ -151,7 +150,7 @@ *> \verbatim *> LWORK is INTEGER *> The dimension of the array WORK. LWORK = MAX(1, dimension) -*> If LWORK = -1, or LHOUS2=-1, +*> If LWORK = -1, or LHOUS2 = -1, *> then a workspace query is assumed; the routine *> only calculates the optimal size of the WORK array, returns *> this value as the first entry of the WORK array, and no error diff --git a/lapack-netlib/SRC/chetrd_hb2st.F b/lapack-netlib/SRC/chetrd_hb2st.F index 43da45640..a3d8259d3 100644 --- a/lapack-netlib/SRC/chetrd_hb2st.F +++ b/lapack-netlib/SRC/chetrd_hb2st.F @@ -50,9 +50,9 @@ * Arguments: * ========== * -*> \param[in] STAGE +*> \param[in] STAGE1 *> \verbatim -*> STAGE is CHARACTER*1 +*> STAGE1 is CHARACTER*1 *> = 'N': "No": to mention that the stage 1 of the reduction *> from dense to band using the chetrd_he2hb routine *> was not called before this routine to reproduce AB. diff --git a/lapack-netlib/SRC/chetrd_he2hb.f b/lapack-netlib/SRC/chetrd_he2hb.f index e334532fe..e85c1fd01 100644 --- a/lapack-netlib/SRC/chetrd_he2hb.f +++ b/lapack-netlib/SRC/chetrd_he2hb.f @@ -363,7 +363,7 @@ * * * Set the workspace of the triangular matrix T to zero once such a -* way everytime T is generated the upper/lower portion will be always zero +* way every time T is generated the upper/lower portion will be always zero * CALL CLASET( "A", LDT, KD, ZERO, ZERO, WORK( TPOS ), LDT ) * diff --git a/lapack-netlib/SRC/chetrf_aa.f b/lapack-netlib/SRC/chetrf_aa.f index 2c5564893..c6f548d42 100644 --- a/lapack-netlib/SRC/chetrf_aa.f +++ b/lapack-netlib/SRC/chetrf_aa.f @@ -37,7 +37,7 @@ *> CHETRF_AA computes the factorization of a complex hermitian matrix A *> using the Aasen's algorithm. The form of the factorization is *> -*> A = U*T*U**H or A = L*T*L**H +*> A = U**H*T*U or A = L*T*L**H *> *> where U (or L) is a product of permutation and unit upper (lower) *> triangular matrices, and T is a hermitian tridiagonal matrix. @@ -223,7 +223,7 @@ IF( UPPER ) THEN * * ..................................................... -* Factorize A as L*D*L**H using the upper triangle of A +* Factorize A as U**H*D*U using the upper triangle of A * ..................................................... * * copy first row A(1, 1:N) into H(1:n) (stored in WORK(1:N)) @@ -256,7 +256,7 @@ $ A( MAX(1, J), J+1 ), LDA, $ IPIV( J+1 ), WORK, N, WORK( N*NB+1 ) ) * -* Ajust IPIV and apply it back (J-th step picks (J+1)-th pivot) +* Adjust IPIV and apply it back (J-th step picks (J+1)-th pivot) * DO J2 = J+2, MIN(N, J+JB+1) IPIV( J2 ) = IPIV( J2 ) + J @@ -376,7 +376,7 @@ $ A( J+1, MAX(1, J) ), LDA, $ IPIV( J+1 ), WORK, N, WORK( N*NB+1 ) ) * -* Ajust IPIV and apply it back (J-th step picks (J+1)-th pivot) +* Adjust IPIV and apply it back (J-th step picks (J+1)-th pivot) * DO J2 = J+2, MIN(N, J+JB+1) IPIV( J2 ) = IPIV( J2 ) + J diff --git a/lapack-netlib/SRC/chetrf_aa_2stage.f b/lapack-netlib/SRC/chetrf_aa_2stage.f index ce34d73cc..d2e0e0023 100644 --- a/lapack-netlib/SRC/chetrf_aa_2stage.f +++ b/lapack-netlib/SRC/chetrf_aa_2stage.f @@ -38,7 +38,7 @@ *> CHETRF_AA_2STAGE computes the factorization of a real hermitian matrix A *> using the Aasen's algorithm. The form of the factorization is *> -*> A = U*T*U**T or A = L*T*L**T +*> A = U**T*T*U or A = L*T*L**T *> *> where U (or L) is a product of permutation and unit upper (lower) *> triangular matrices, and T is a hermitian band matrix with the @@ -277,7 +277,7 @@ IF( UPPER ) THEN * * ..................................................... -* Factorize A as L*D*L**T using the upper triangle of A +* Factorize A as U**T*D*U using the upper triangle of A * ..................................................... * DO J = 0, NT-1 @@ -453,14 +453,17 @@ c END IF * > Apply pivots to previous columns of L CALL CSWAP( K-1, A( (J+1)*NB+1, I1 ), 1, $ A( (J+1)*NB+1, I2 ), 1 ) -* > Swap A(I1+1:M, I1) with A(I2, I1+1:M) - CALL CSWAP( I2-I1-1, A( I1, I1+1 ), LDA, - $ A( I1+1, I2 ), 1 ) +* > Swap A(I1+1:M, I1) with A(I2, I1+1:M) + IF( I2.GT.(I1+1) ) THEN + CALL CSWAP( I2-I1-1, A( I1, I1+1 ), LDA, + $ A( I1+1, I2 ), 1 ) + CALL CLACGV( I2-I1-1, A( I1+1, I2 ), 1 ) + END IF CALL CLACGV( I2-I1, A( I1, I1+1 ), LDA ) - CALL CLACGV( I2-I1-1, A( I1+1, I2 ), 1 ) * > Swap A(I2+1:M, I1) with A(I2+1:M, I2) - CALL CSWAP( N-I2, A( I1, I2+1 ), LDA, - $ A( I2, I2+1 ), LDA ) + IF( I2.LT.N ) + $ CALL CSWAP( N-I2, A( I1, I2+1 ), LDA, + $ A( I2, I2+1 ), LDA ) * > Swap A(I1, I1) with A(I2, I2) PIV = A( I1, I1 ) A( I1, I1 ) = A( I2, I2 ) @@ -630,14 +633,17 @@ c END IF * > Apply pivots to previous columns of L CALL CSWAP( K-1, A( I1, (J+1)*NB+1 ), LDA, $ A( I2, (J+1)*NB+1 ), LDA ) -* > Swap A(I1+1:M, I1) with A(I2, I1+1:M) - CALL CSWAP( I2-I1-1, A( I1+1, I1 ), 1, - $ A( I2, I1+1 ), LDA ) +* > Swap A(I1+1:M, I1) with A(I2, I1+1:M) + IF( I2.GT.(I1+1) ) THEN + CALL CSWAP( I2-I1-1, A( I1+1, I1 ), 1, + $ A( I2, I1+1 ), LDA ) + CALL CLACGV( I2-I1-1, A( I2, I1+1 ), LDA ) + END IF CALL CLACGV( I2-I1, A( I1+1, I1 ), 1 ) - CALL CLACGV( I2-I1-1, A( I2, I1+1 ), LDA ) * > Swap A(I2+1:M, I1) with A(I2+1:M, I2) - CALL CSWAP( N-I2, A( I2+1, I1 ), 1, - $ A( I2+1, I2 ), 1 ) + IF( I2.LT.N ) + $ CALL CSWAP( N-I2, A( I2+1, I1 ), 1, + $ A( I2+1, I2 ), 1 ) * > Swap A(I1, I1) with A(I2, I2) PIV = A( I1, I1 ) A( I1, I1 ) = A( I2, I2 ) diff --git a/lapack-netlib/SRC/chetri2.f b/lapack-netlib/SRC/chetri2.f index 722d13008..1e18202cf 100644 --- a/lapack-netlib/SRC/chetri2.f +++ b/lapack-netlib/SRC/chetri2.f @@ -62,7 +62,7 @@ *> \param[in,out] A *> \verbatim *> A is COMPLEX array, dimension (LDA,N) -*> On entry, the NB diagonal matrix D and the multipliers +*> On entry, the block diagonal matrix D and the multipliers *> used to obtain the factor U or L as computed by CHETRF. *> *> On exit, if INFO = 0, the (symmetric) inverse of the original @@ -82,7 +82,7 @@ *> \param[in] IPIV *> \verbatim *> IPIV is INTEGER array, dimension (N) -*> Details of the interchanges and the NB structure of D +*> Details of the interchanges and the block structure of D *> as determined by CHETRF. *> \endverbatim *> diff --git a/lapack-netlib/SRC/chetrs_aa.f b/lapack-netlib/SRC/chetrs_aa.f index 50e5692db..877517031 100644 --- a/lapack-netlib/SRC/chetrs_aa.f +++ b/lapack-netlib/SRC/chetrs_aa.f @@ -37,7 +37,7 @@ *> \verbatim *> *> CHETRS_AA solves a system of linear equations A*X = B with a complex -*> hermitian matrix A using the factorization A = U*T*U**H or +*> hermitian matrix A using the factorization A = U**H*T*U or *> A = L*T*L**H computed by CHETRF_AA. *> \endverbatim * @@ -49,7 +49,7 @@ *> UPLO is CHARACTER*1 *> Specifies whether the details of the factorization are stored *> as an upper or lower triangular matrix. -*> = 'U': Upper triangular, form is A = U*T*U**H; +*> = 'U': Upper triangular, form is A = U**H*T*U; *> = 'L': Lower triangular, form is A = L*T*L**H. *> \endverbatim *> @@ -97,14 +97,16 @@ *> The leading dimension of the array B. LDB >= max(1,N). *> \endverbatim *> -*> \param[in] WORK +*> \param[out] WORK *> \verbatim -*> WORK is DOUBLE array, dimension (MAX(1,LWORK)) +*> WORK is COMPLEX array, dimension (MAX(1,LWORK)) *> \endverbatim *> *> \param[in] LWORK *> \verbatim -*> LWORK is INTEGER, LWORK >= MAX(1,3*N-2). +*> LWORK is INTEGER +*> The dimension of the array WORK. LWORK >= max(1,3*N-2). +*> \endverbatim *> *> \param[out] INFO *> \verbatim @@ -198,24 +200,31 @@ * IF( UPPER ) THEN * -* Solve A*X = B, where A = U*T*U**T. +* Solve A*X = B, where A = U**H*T*U. +* +* 1) Forward substitution with U**H +* + IF( N.GT.1 ) THEN +* +* Pivot, P**T * B -> B * -* P**T * B + K = 1 + DO WHILE ( K.LE.N ) + KP = IPIV( K ) + IF( KP.NE.K ) + $ CALL CSWAP( NRHS, B( K, 1 ), LDB, B( KP, 1 ), LDB ) + K = K + 1 + END DO * - K = 1 - DO WHILE ( K.LE.N ) - KP = IPIV( K ) - IF( KP.NE.K ) - $ CALL CSWAP( NRHS, B( K, 1 ), LDB, B( KP, 1 ), LDB ) - K = K + 1 - END DO +* Compute U**H \ B -> B [ (U**H \P**T * B) ] * -* Compute (U \P**T * B) -> B [ (U \P**T * B) ] + CALL CTRSM( 'L', 'U', 'C', 'U', N-1, NRHS, ONE, A( 1, 2 ), + $ LDA, B( 2, 1 ), LDB) + END IF * - CALL CTRSM('L', 'U', 'C', 'U', N-1, NRHS, ONE, A( 1, 2 ), LDA, - $ B( 2, 1 ), LDB) +* 2) Solve with triangular matrix T * -* Compute T \ B -> B [ T \ (U \P**T * B) ] +* Compute T \ B -> B [ T \ (U**H \P**T * B) ] * CALL CLACPY( 'F', 1, N, A(1, 1), LDA+1, WORK(N), 1) IF( N.GT.1 ) THEN @@ -226,65 +235,82 @@ CALL CGTSV(N, NRHS, WORK(1), WORK(N), WORK(2*N), B, LDB, $ INFO) * -* Compute (U**T \ B) -> B [ U**T \ (T \ (U \P**T * B) ) ] +* 3) Backward substitution with U +* + IF( N.GT.1 ) THEN * - CALL CTRSM( 'L', 'U', 'N', 'U', N-1, NRHS, ONE, A( 1, 2 ), LDA, - $ B(2, 1), LDB) +* Compute U \ B -> B [ U \ (T \ (U**H \P**T * B) ) ] * -* Pivot, P * B [ P * (U**T \ (T \ (U \P**T * B) )) ] + CALL CTRSM( 'L', 'U', 'N', 'U', N-1, NRHS, ONE, A( 1, 2 ), + $ LDA, B(2, 1), LDB) * - K = N - DO WHILE ( K.GE.1 ) - KP = IPIV( K ) - IF( KP.NE.K ) - $ CALL CSWAP( NRHS, B( K, 1 ), LDB, B( KP, 1 ), LDB ) - K = K - 1 - END DO +* Pivot, P * B -> B [ P * (U \ (T \ (U**H \P**T * B) )) ] +* + K = N + DO WHILE ( K.GE.1 ) + KP = IPIV( K ) + IF( KP.NE.K ) + $ CALL CSWAP( NRHS, B( K, 1 ), LDB, B( KP, 1 ), LDB ) + K = K - 1 + END DO + END IF * ELSE * -* Solve A*X = B, where A = L*T*L**T. +* Solve A*X = B, where A = L*T*L**H. * -* Pivot, P**T * B +* 1) Forward substitution with L * - K = 1 - DO WHILE ( K.LE.N ) - KP = IPIV( K ) - IF( KP.NE.K ) - $ CALL CSWAP( NRHS, B( K, 1 ), LDB, B( KP, 1 ), LDB ) - K = K + 1 - END DO + IF( N.GT.1 ) THEN +* +* Pivot, P**T * B -> B +* + K = 1 + DO WHILE ( K.LE.N ) + KP = IPIV( K ) + IF( KP.NE.K ) + $ CALL CSWAP( NRHS, B( K, 1 ), LDB, B( KP, 1 ), LDB ) + K = K + 1 + END DO * -* Compute (L \P**T * B) -> B [ (L \P**T * B) ] +* Compute L \ B -> B [ (L \P**T * B) ] +* + CALL CTRSM( 'L', 'L', 'N', 'U', N-1, NRHS, ONE, A( 2, 1), + $ LDA, B(2, 1), LDB ) + END IF * - CALL CTRSM( 'L', 'L', 'N', 'U', N-1, NRHS, ONE, A( 2, 1), LDA, - $ B(2, 1), LDB) +* 2) Solve with triangular matrix T * * Compute T \ B -> B [ T \ (L \P**T * B) ] * CALL CLACPY( 'F', 1, N, A(1, 1), LDA+1, WORK(N), 1) IF( N.GT.1 ) THEN - CALL CLACPY( 'F', 1, N-1, A( 2, 1 ), LDA+1, WORK( 1 ), 1) + CALL CLACPY( 'F', 1, N-1, A( 2, 1 ), LDA+1, WORK( 1 ), 1 ) CALL CLACPY( 'F', 1, N-1, A( 2, 1 ), LDA+1, WORK( 2*N ), 1) CALL CLACGV( N-1, WORK( 2*N ), 1 ) END IF CALL CGTSV(N, NRHS, WORK(1), WORK(N), WORK(2*N), B, LDB, $ INFO) * -* Compute (L**T \ B) -> B [ L**T \ (T \ (L \P**T * B) ) ] +* 3) Backward substitution with L**H * - CALL CTRSM( 'L', 'L', 'C', 'U', N-1, NRHS, ONE, A( 2, 1 ), LDA, - $ B( 2, 1 ), LDB) + IF( N.GT.1 ) THEN +* +* Compute (L**H \ B) -> B [ L**H \ (T \ (L \P**T * B) ) ] * -* Pivot, P * B [ P * (L**T \ (T \ (L \P**T * B) )) ] + CALL CTRSM( 'L', 'L', 'C', 'U', N-1, NRHS, ONE, A( 2, 1 ), + $ LDA, B( 2, 1 ), LDB ) * - K = N - DO WHILE ( K.GE.1 ) - KP = IPIV( K ) - IF( KP.NE.K ) - $ CALL CSWAP( NRHS, B( K, 1 ), LDB, B( KP, 1 ), LDB ) - K = K - 1 - END DO +* Pivot, P * B -> B [ P * (L**H \ (T \ (L \P**T * B) )) ] +* + K = N + DO WHILE ( K.GE.1 ) + KP = IPIV( K ) + IF( KP.NE.K ) + $ CALL CSWAP( NRHS, B( K, 1 ), LDB, B( KP, 1 ), LDB ) + K = K - 1 + END DO + END IF * END IF * diff --git a/lapack-netlib/SRC/chetrs_aa_2stage.f b/lapack-netlib/SRC/chetrs_aa_2stage.f index 05d09275b..979d80a7c 100644 --- a/lapack-netlib/SRC/chetrs_aa_2stage.f +++ b/lapack-netlib/SRC/chetrs_aa_2stage.f @@ -38,7 +38,7 @@ *> \verbatim *> *> CHETRS_AA_2STAGE solves a system of linear equations A*X = B with a real -*> hermitian matrix A using the factorization A = U*T*U**T or +*> hermitian matrix A using the factorization A = U**T*T*U or *> A = L*T*L**T computed by CHETRF_AA_2STAGE. *> \endverbatim * @@ -50,7 +50,7 @@ *> UPLO is CHARACTER*1 *> Specifies whether the details of the factorization are stored *> as an upper or lower triangular matrix. -*> = 'U': Upper triangular, form is A = U*T*U**T; +*> = 'U': Upper triangular, form is A = U**T*T*U; *> = 'L': Lower triangular, form is A = L*T*L**T. *> \endverbatim *> @@ -210,15 +210,15 @@ * IF( UPPER ) THEN * -* Solve A*X = B, where A = U*T*U**T. +* Solve A*X = B, where A = U**T*T*U. * IF( N.GT.NB ) THEN * -* Pivot, P**T * B +* Pivot, P**T * B -> B * CALL CLASWP( NRHS, B, LDB, NB+1, N, IPIV, 1 ) * -* Compute (U**T \P**T * B) -> B [ (U**T \P**T * B) ] +* Compute (U**T \ B) -> B [ (U**T \P**T * B) ] * CALL CTRSM( 'L', 'U', 'C', 'U', N-NB, NRHS, ONE, A(1, NB+1), $ LDA, B(NB+1, 1), LDB) diff --git a/lapack-netlib/SRC/chgeqz.f b/lapack-netlib/SRC/chgeqz.f index 73d35621c..1616840ec 100644 --- a/lapack-netlib/SRC/chgeqz.f +++ b/lapack-netlib/SRC/chgeqz.f @@ -743,8 +743,14 @@ * * Exceptional shift. Chosen for no particularly good reason. * - ESHIFT = ESHIFT + (ASCALE*H(ILAST,ILAST-1))/ - $ (BSCALE*T(ILAST-1,ILAST-1)) + IF( ( IITER / 20 )*20.EQ.IITER .AND. + $ BSCALE*ABS1(T( ILAST, ILAST )).GT.SAFMIN ) THEN + ESHIFT = ESHIFT + ( ASCALE*H( ILAST, + $ ILAST ) )/( BSCALE*T( ILAST, ILAST ) ) + ELSE + ESHIFT = ESHIFT + ( ASCALE*H( ILAST, + $ ILAST-1 ) )/( BSCALE*T( ILAST-1, ILAST-1 ) ) + END IF SHIFT = ESHIFT END IF * diff --git a/lapack-netlib/SRC/chseqr.f b/lapack-netlib/SRC/chseqr.f index 34bf49249..cfcf725b2 100644 --- a/lapack-netlib/SRC/chseqr.f +++ b/lapack-netlib/SRC/chseqr.f @@ -69,7 +69,7 @@ *> \param[in] N *> \verbatim *> N is INTEGER -*> The order of the matrix H. N .GE. 0. +*> The order of the matrix H. N >= 0. *> \endverbatim *> *> \param[in] ILO @@ -86,7 +86,7 @@ *> set by a previous call to CGEBAL, and then passed to ZGEHRD *> when the matrix output by CGEBAL is reduced to Hessenberg *> form. Otherwise ILO and IHI should be set to 1 and N -*> respectively. If N.GT.0, then 1.LE.ILO.LE.IHI.LE.N. +*> respectively. If N > 0, then 1 <= ILO <= IHI <= N. *> If N = 0, then ILO = 1 and IHI = 0. *> \endverbatim *> @@ -98,17 +98,17 @@ *> triangular matrix T from the Schur decomposition (the *> Schur form). If INFO = 0 and JOB = 'E', the contents of *> H are unspecified on exit. (The output value of H when -*> INFO.GT.0 is given under the description of INFO below.) +*> INFO > 0 is given under the description of INFO below.) *> *> Unlike earlier versions of CHSEQR, this subroutine may -*> explicitly H(i,j) = 0 for i.GT.j and j = 1, 2, ... ILO-1 +*> explicitly H(i,j) = 0 for i > j and j = 1, 2, ... ILO-1 *> or j = IHI+1, IHI+2, ... N. *> \endverbatim *> *> \param[in] LDH *> \verbatim *> LDH is INTEGER -*> The leading dimension of the array H. LDH .GE. max(1,N). +*> The leading dimension of the array H. LDH >= max(1,N). *> \endverbatim *> *> \param[out] W @@ -131,7 +131,7 @@ *> if INFO = 0, Z contains Q*Z. *> Normally Q is the unitary matrix generated by CUNGHR *> after the call to CGEHRD which formed the Hessenberg matrix -*> H. (The output value of Z when INFO.GT.0 is given under +*> H. (The output value of Z when INFO > 0 is given under *> the description of INFO below.) *> \endverbatim *> @@ -139,7 +139,7 @@ *> \verbatim *> LDZ is INTEGER *> The leading dimension of the array Z. if COMPZ = 'I' or -*> COMPZ = 'V', then LDZ.GE.MAX(1,N). Otherwize, LDZ.GE.1. +*> COMPZ = 'V', then LDZ >= MAX(1,N). Otherwise, LDZ >= 1. *> \endverbatim *> *> \param[out] WORK @@ -152,7 +152,7 @@ *> \param[in] LWORK *> \verbatim *> LWORK is INTEGER -*> The dimension of the array WORK. LWORK .GE. max(1,N) +*> The dimension of the array WORK. LWORK >= max(1,N) *> is sufficient and delivers very good and sometimes *> optimal performance. However, LWORK as large as 11*N *> may be required for optimal performance. A workspace @@ -170,21 +170,21 @@ *> \param[out] INFO *> \verbatim *> INFO is INTEGER -*> = 0: successful exit -*> .LT. 0: if INFO = -i, the i-th argument had an illegal +*> = 0: successful exit +*> < 0: if INFO = -i, the i-th argument had an illegal *> value -*> .GT. 0: if INFO = i, CHSEQR failed to compute all of -*> the eigenvalues. Elements 1:ilo-1 and i+1:n of WR -*> and WI contain those eigenvalues which have been +*> > 0: if INFO = i, CHSEQR failed to compute all of +*> the eigenvalues. Elements 1:ilo-1 and i+1:n of W +*> contain those eigenvalues which have been *> successfully computed. (Failures are rare.) *> -*> If INFO .GT. 0 and JOB = 'E', then on exit, the +*> If INFO > 0 and JOB = 'E', then on exit, the *> remaining unconverged eigenvalues are the eigen- *> values of the upper Hessenberg matrix rows and *> columns ILO through INFO of the final, output *> value of H. *> -*> If INFO .GT. 0 and JOB = 'S', then on exit +*> If INFO > 0 and JOB = 'S', then on exit *> *> (*) (initial value of H)*U = U*(final value of H) *> @@ -192,19 +192,19 @@ *> value of H is upper Hessenberg and triangular in *> rows and columns INFO+1 through IHI. *> -*> If INFO .GT. 0 and COMPZ = 'V', then on exit +*> If INFO > 0 and COMPZ = 'V', then on exit *> *> (final value of Z) = (initial value of Z)*U *> *> where U is the unitary matrix in (*) (regard- *> less of the value of JOB.) *> -*> If INFO .GT. 0 and COMPZ = 'I', then on exit +*> If INFO > 0 and COMPZ = 'I', then on exit *> (final value of Z) = U *> where U is the unitary matrix in (*) (regard- *> less of the value of JOB.) *> -*> If INFO .GT. 0 and COMPZ = 'N', then Z is not +*> If INFO > 0 and COMPZ = 'N', then Z is not *> accessed. *> \endverbatim * @@ -244,8 +244,8 @@ *> This depends on ILO, IHI and NS. NS is the *> number of simultaneous shifts returned *> by ILAENV(ISPEC=15). (See ISPEC=15 below.) -*> The default for (IHI-ILO+1).LE.500 is NS. -*> The default for (IHI-ILO+1).GT.500 is 3*NS/2. +*> The default for (IHI-ILO+1) <= 500 is NS. +*> The default for (IHI-ILO+1) > 500 is 3*NS/2. *> *> ISPEC=14: Nibble crossover point. (See IPARMQ for *> details.) Default: 14% of deflation window @@ -323,8 +323,8 @@ PARAMETER ( NTINY = 11 ) * * ==== NL allocates some local workspace to help small matrices -* . through a rare CLAHQR failure. NL .GT. NTINY = 11 is -* . required and NL .LE. NMIN = ILAENV(ISPEC=12,...) is recom- +* . through a rare CLAHQR failure. NL > NTINY = 11 is +* . required and NL <= NMIN = ILAENV(ISPEC=12,...) is recom- * . mended. (The default value of NMIN is 75.) Using NL = 49 * . allows up to six simultaneous shifts and a 16-by-16 * . deflation window. ==== diff --git a/lapack-netlib/SRC/cla_gbrcond_c.f b/lapack-netlib/SRC/cla_gbrcond_c.f index 123aee26e..c382ac210 100644 --- a/lapack-netlib/SRC/cla_gbrcond_c.f +++ b/lapack-netlib/SRC/cla_gbrcond_c.f @@ -132,13 +132,13 @@ *> i > 0: The ith argument is invalid. *> \endverbatim *> -*> \param[in] WORK +*> \param[out] WORK *> \verbatim *> WORK is COMPLEX array, dimension (2*N). *> Workspace. *> \endverbatim *> -*> \param[in] RWORK +*> \param[out] RWORK *> \verbatim *> RWORK is REAL array, dimension (N). *> Workspace. diff --git a/lapack-netlib/SRC/cla_gbrcond_x.f b/lapack-netlib/SRC/cla_gbrcond_x.f index d04aa7fb8..46991ea14 100644 --- a/lapack-netlib/SRC/cla_gbrcond_x.f +++ b/lapack-netlib/SRC/cla_gbrcond_x.f @@ -125,13 +125,13 @@ *> i > 0: The ith argument is invalid. *> \endverbatim *> -*> \param[in] WORK +*> \param[out] WORK *> \verbatim *> WORK is COMPLEX array, dimension (2*N). *> Workspace. *> \endverbatim *> -*> \param[in] RWORK +*> \param[out] RWORK *> \verbatim *> RWORK is REAL array, dimension (N). *> Workspace. diff --git a/lapack-netlib/SRC/cla_gbrfsx_extended.f b/lapack-netlib/SRC/cla_gbrfsx_extended.f index 888ecd4f7..9f066137b 100644 --- a/lapack-netlib/SRC/cla_gbrfsx_extended.f +++ b/lapack-netlib/SRC/cla_gbrfsx_extended.f @@ -65,19 +65,19 @@ *> \verbatim *> PREC_TYPE is INTEGER *> Specifies the intermediate precision to be used in refinement. -*> The value is defined by ILAPREC(P) where P is a CHARACTER and -*> P = 'S': Single +*> The value is defined by ILAPREC(P) where P is a CHARACTER and P +*> = 'S': Single *> = 'D': Double *> = 'I': Indigenous -*> = 'X', 'E': Extra +*> = 'X' or 'E': Extra *> \endverbatim *> *> \param[in] TRANS_TYPE *> \verbatim *> TRANS_TYPE is INTEGER *> Specifies the transposition operation on A. -*> The value is defined by ILATRANS(T) where T is a CHARACTER and -*> T = 'N': No transpose +*> The value is defined by ILATRANS(T) where T is a CHARACTER and T +*> = 'N': No transpose *> = 'T': Transpose *> = 'C': Conjugate transpose *> \endverbatim @@ -269,7 +269,7 @@ *> information as described below. There currently are up to three *> pieces of information returned for each right-hand side. If *> componentwise accuracy is not requested (PARAMS(3) = 0.0), then -*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS .LT. 3, then at most +*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS < 3, then at most *> the first (:,N_ERR_BNDS) entries are returned. *> *> The first index in ERR_BNDS_COMP(i,:) corresponds to the ith diff --git a/lapack-netlib/SRC/cla_gercond_c.f b/lapack-netlib/SRC/cla_gercond_c.f index aabdc0bb9..1a2e8230e 100644 --- a/lapack-netlib/SRC/cla_gercond_c.f +++ b/lapack-netlib/SRC/cla_gercond_c.f @@ -21,7 +21,7 @@ * REAL FUNCTION CLA_GERCOND_C( TRANS, N, A, LDA, AF, LDAF, IPIV, C, * CAPPLY, INFO, WORK, RWORK ) * -* .. Scalar Aguments .. +* .. Scalar Arguments .. * CHARACTER TRANS * LOGICAL CAPPLY * INTEGER N, LDA, LDAF, INFO @@ -114,13 +114,13 @@ *> i > 0: The ith argument is invalid. *> \endverbatim *> -*> \param[in] WORK +*> \param[out] WORK *> \verbatim *> WORK is COMPLEX array, dimension (2*N). *> Workspace. *> \endverbatim *> -*> \param[in] RWORK +*> \param[out] RWORK *> \verbatim *> RWORK is REAL array, dimension (N). *> Workspace. @@ -147,7 +147,7 @@ * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * December 2016 * -* .. Scalar Aguments .. +* .. Scalar Arguments .. CHARACTER TRANS LOGICAL CAPPLY INTEGER N, LDA, LDAF, INFO diff --git a/lapack-netlib/SRC/cla_gercond_x.f b/lapack-netlib/SRC/cla_gercond_x.f index 6dce99f62..46e9b039f 100644 --- a/lapack-netlib/SRC/cla_gercond_x.f +++ b/lapack-netlib/SRC/cla_gercond_x.f @@ -107,13 +107,13 @@ *> i > 0: The ith argument is invalid. *> \endverbatim *> -*> \param[in] WORK +*> \param[out] WORK *> \verbatim *> WORK is COMPLEX array, dimension (2*N). *> Workspace. *> \endverbatim *> -*> \param[in] RWORK +*> \param[out] RWORK *> \verbatim *> RWORK is REAL array, dimension (N). *> Workspace. diff --git a/lapack-netlib/SRC/cla_gerfsx_extended.f b/lapack-netlib/SRC/cla_gerfsx_extended.f index 2e0596334..d231733e6 100644 --- a/lapack-netlib/SRC/cla_gerfsx_extended.f +++ b/lapack-netlib/SRC/cla_gerfsx_extended.f @@ -65,19 +65,19 @@ *> \verbatim *> PREC_TYPE is INTEGER *> Specifies the intermediate precision to be used in refinement. -*> The value is defined by ILAPREC(P) where P is a CHARACTER and -*> P = 'S': Single +*> The value is defined by ILAPREC(P) where P is a CHARACTER and P +*> = 'S': Single *> = 'D': Double *> = 'I': Indigenous -*> = 'X', 'E': Extra +*> = 'X' or 'E': Extra *> \endverbatim *> *> \param[in] TRANS_TYPE *> \verbatim *> TRANS_TYPE is INTEGER *> Specifies the transposition operation on A. -*> The value is defined by ILATRANS(T) where T is a CHARACTER and -*> T = 'N': No transpose +*> The value is defined by ILATRANS(T) where T is a CHARACTER and T +*> = 'N': No transpose *> = 'T': Transpose *> = 'C': Conjugate transpose *> \endverbatim @@ -257,7 +257,7 @@ *> information as described below. There currently are up to three *> pieces of information returned for each right-hand side. If *> componentwise accuracy is not requested (PARAMS(3) = 0.0), then -*> ERRS_C is not accessed. If N_ERR_BNDS .LT. 3, then at most +*> ERRS_C is not accessed. If N_ERR_BNDS < 3, then at most *> the first (:,N_ERR_BNDS) entries are returned. *> *> The first index in ERRS_C(i,:) corresponds to the ith diff --git a/lapack-netlib/SRC/cla_hercond_c.f b/lapack-netlib/SRC/cla_hercond_c.f index a5ebaf8a2..5f26822af 100644 --- a/lapack-netlib/SRC/cla_hercond_c.f +++ b/lapack-netlib/SRC/cla_hercond_c.f @@ -110,13 +110,13 @@ *> i > 0: The ith argument is invalid. *> \endverbatim *> -*> \param[in] WORK +*> \param[out] WORK *> \verbatim *> WORK is COMPLEX array, dimension (2*N). *> Workspace. *> \endverbatim *> -*> \param[in] RWORK +*> \param[out] RWORK *> \verbatim *> RWORK is REAL array, dimension (N). *> Workspace. diff --git a/lapack-netlib/SRC/cla_hercond_x.f b/lapack-netlib/SRC/cla_hercond_x.f index f0004102f..91c80a668 100644 --- a/lapack-netlib/SRC/cla_hercond_x.f +++ b/lapack-netlib/SRC/cla_hercond_x.f @@ -103,13 +103,13 @@ *> i > 0: The ith argument is invalid. *> \endverbatim *> -*> \param[in] WORK +*> \param[out] WORK *> \verbatim *> WORK is COMPLEX array, dimension (2*N). *> Workspace. *> \endverbatim *> -*> \param[in] RWORK +*> \param[out] RWORK *> \verbatim *> RWORK is REAL array, dimension (N). *> Workspace. diff --git a/lapack-netlib/SRC/cla_herfsx_extended.f b/lapack-netlib/SRC/cla_herfsx_extended.f index c69589dfa..d1aa8462c 100644 --- a/lapack-netlib/SRC/cla_herfsx_extended.f +++ b/lapack-netlib/SRC/cla_herfsx_extended.f @@ -66,11 +66,11 @@ *> \verbatim *> PREC_TYPE is INTEGER *> Specifies the intermediate precision to be used in refinement. -*> The value is defined by ILAPREC(P) where P is a CHARACTER and -*> P = 'S': Single +*> The value is defined by ILAPREC(P) where P is a CHARACTER and P +*> = 'S': Single *> = 'D': Double *> = 'I': Indigenous -*> = 'X', 'E': Extra +*> = 'X' or 'E': Extra *> \endverbatim *> *> \param[in] UPLO @@ -254,7 +254,7 @@ *> information as described below. There currently are up to three *> pieces of information returned for each right-hand side. If *> componentwise accuracy is not requested (PARAMS(3) = 0.0), then -*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS .LT. 3, then at most +*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS < 3, then at most *> the first (:,N_ERR_BNDS) entries are returned. *> *> The first index in ERR_BNDS_COMP(i,:) corresponds to the ith diff --git a/lapack-netlib/SRC/cla_porcond_c.f b/lapack-netlib/SRC/cla_porcond_c.f index 7a2bcfe63..c2356590f 100644 --- a/lapack-netlib/SRC/cla_porcond_c.f +++ b/lapack-netlib/SRC/cla_porcond_c.f @@ -102,13 +102,13 @@ *> i > 0: The ith argument is invalid. *> \endverbatim *> -*> \param[in] WORK +*> \param[out] WORK *> \verbatim *> WORK is COMPLEX array, dimension (2*N). *> Workspace. *> \endverbatim *> -*> \param[in] RWORK +*> \param[out] RWORK *> \verbatim *> RWORK is REAL array, dimension (N). *> Workspace. diff --git a/lapack-netlib/SRC/cla_porcond_x.f b/lapack-netlib/SRC/cla_porcond_x.f index f0844ec89..a5ff3aa61 100644 --- a/lapack-netlib/SRC/cla_porcond_x.f +++ b/lapack-netlib/SRC/cla_porcond_x.f @@ -95,13 +95,13 @@ *> i > 0: The ith argument is invalid. *> \endverbatim *> -*> \param[in] WORK +*> \param[out] WORK *> \verbatim *> WORK is COMPLEX array, dimension (2*N). *> Workspace. *> \endverbatim *> -*> \param[in] RWORK +*> \param[out] RWORK *> \verbatim *> RWORK is REAL array, dimension (N). *> Workspace. diff --git a/lapack-netlib/SRC/cla_porfsx_extended.f b/lapack-netlib/SRC/cla_porfsx_extended.f index 3a3409c9e..545bdc445 100644 --- a/lapack-netlib/SRC/cla_porfsx_extended.f +++ b/lapack-netlib/SRC/cla_porfsx_extended.f @@ -65,11 +65,11 @@ *> \verbatim *> PREC_TYPE is INTEGER *> Specifies the intermediate precision to be used in refinement. -*> The value is defined by ILAPREC(P) where P is a CHARACTER and -*> P = 'S': Single +*> The value is defined by ILAPREC(P) where P is a CHARACTER and P +*> = 'S': Single *> = 'D': Double *> = 'I': Indigenous -*> = 'X', 'E': Extra +*> = 'X' or 'E': Extra *> \endverbatim *> *> \param[in] UPLO @@ -246,7 +246,7 @@ *> information as described below. There currently are up to three *> pieces of information returned for each right-hand side. If *> componentwise accuracy is not requested (PARAMS(3) = 0.0), then -*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS .LT. 3, then at most +*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS < 3, then at most *> the first (:,N_ERR_BNDS) entries are returned. *> *> The first index in ERR_BNDS_COMP(i,:) corresponds to the ith diff --git a/lapack-netlib/SRC/cla_porpvgrw.f b/lapack-netlib/SRC/cla_porpvgrw.f index bd2e7af1c..f10299c5a 100644 --- a/lapack-netlib/SRC/cla_porpvgrw.f +++ b/lapack-netlib/SRC/cla_porpvgrw.f @@ -85,7 +85,7 @@ *> The leading dimension of the array AF. LDAF >= max(1,N). *> \endverbatim *> -*> \param[in] WORK +*> \param[out] WORK *> \verbatim *> WORK is REAL array, dimension (2*N) *> \endverbatim diff --git a/lapack-netlib/SRC/cla_syrcond_c.f b/lapack-netlib/SRC/cla_syrcond_c.f index fc52bf23b..e59e83aa6 100644 --- a/lapack-netlib/SRC/cla_syrcond_c.f +++ b/lapack-netlib/SRC/cla_syrcond_c.f @@ -110,13 +110,13 @@ *> i > 0: The ith argument is invalid. *> \endverbatim *> -*> \param[in] WORK +*> \param[out] WORK *> \verbatim *> WORK is COMPLEX array, dimension (2*N). *> Workspace. *> \endverbatim *> -*> \param[in] RWORK +*> \param[out] RWORK *> \verbatim *> RWORK is REAL array, dimension (N). *> Workspace. diff --git a/lapack-netlib/SRC/cla_syrcond_x.f b/lapack-netlib/SRC/cla_syrcond_x.f index f8fb566e7..3edf58f83 100644 --- a/lapack-netlib/SRC/cla_syrcond_x.f +++ b/lapack-netlib/SRC/cla_syrcond_x.f @@ -103,13 +103,13 @@ *> i > 0: The ith argument is invalid. *> \endverbatim *> -*> \param[in] WORK +*> \param[out] WORK *> \verbatim *> WORK is COMPLEX array, dimension (2*N). *> Workspace. *> \endverbatim *> -*> \param[in] RWORK +*> \param[out] RWORK *> \verbatim *> RWORK is REAL array, dimension (N). *> Workspace. diff --git a/lapack-netlib/SRC/cla_syrfsx_extended.f b/lapack-netlib/SRC/cla_syrfsx_extended.f index 5d2fa0cbb..92243abcb 100644 --- a/lapack-netlib/SRC/cla_syrfsx_extended.f +++ b/lapack-netlib/SRC/cla_syrfsx_extended.f @@ -66,11 +66,11 @@ *> \verbatim *> PREC_TYPE is INTEGER *> Specifies the intermediate precision to be used in refinement. -*> The value is defined by ILAPREC(P) where P is a CHARACTER and -*> P = 'S': Single +*> The value is defined by ILAPREC(P) where P is a CHARACTER and P +*> = 'S': Single *> = 'D': Double *> = 'I': Indigenous -*> = 'X', 'E': Extra +*> = 'X' or 'E': Extra *> \endverbatim *> *> \param[in] UPLO @@ -254,7 +254,7 @@ *> information as described below. There currently are up to three *> pieces of information returned for each right-hand side. If *> componentwise accuracy is not requested (PARAMS(3) = 0.0), then -*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS .LT. 3, then at most +*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS < 3, then at most *> the first (:,N_ERR_BNDS) entries are returned. *> *> The first index in ERR_BNDS_COMP(i,:) corresponds to the ith diff --git a/lapack-netlib/SRC/cla_syrpvgrw.f b/lapack-netlib/SRC/cla_syrpvgrw.f index ccea462c7..15e55ea7d 100644 --- a/lapack-netlib/SRC/cla_syrpvgrw.f +++ b/lapack-netlib/SRC/cla_syrpvgrw.f @@ -102,7 +102,7 @@ *> as determined by CSYTRF. *> \endverbatim *> -*> \param[in] WORK +*> \param[out] WORK *> \verbatim *> WORK is REAL array, dimension (2*N) *> \endverbatim diff --git a/lapack-netlib/SRC/cla_wwaddw.f b/lapack-netlib/SRC/cla_wwaddw.f index 9267c6df2..08e45ac79 100644 --- a/lapack-netlib/SRC/cla_wwaddw.f +++ b/lapack-netlib/SRC/cla_wwaddw.f @@ -36,7 +36,7 @@ *> CLA_WWADDW adds a vector W into a doubled-single vector (X, Y). *> *> This works for all extant IBM's hex and binary floating point -*> arithmetics, but not for decimal. +*> arithmetic, but not for decimal. *> \endverbatim * * Arguments: diff --git a/lapack-netlib/SRC/clahef_aa.f b/lapack-netlib/SRC/clahef_aa.f index 88bc3d216..934aa92f9 100644 --- a/lapack-netlib/SRC/clahef_aa.f +++ b/lapack-netlib/SRC/clahef_aa.f @@ -288,8 +288,9 @@ * * Swap A(I1, I2+1:N) with A(I2, I2+1:N) * - CALL CSWAP( M-I2, A( J1+I1-1, I2+1 ), LDA, - $ A( J1+I2-1, I2+1 ), LDA ) + IF( I2.LT.M ) + $ CALL CSWAP( M-I2, A( J1+I1-1, I2+1 ), LDA, + $ A( J1+I2-1, I2+1 ), LDA ) * * Swap A(I1, I1) with A(I2,I2) * @@ -329,13 +330,15 @@ * Compute L(J+2, J+1) = WORK( 3:N ) / T(J, J+1), * where A(J, J+1) = T(J, J+1) and A(J+2:N, J) = L(J+2:N, J+1) * - IF( A( K, J+1 ).NE.ZERO ) THEN - ALPHA = ONE / A( K, J+1 ) - CALL CCOPY( M-J-1, WORK( 3 ), 1, A( K, J+2 ), LDA ) - CALL CSCAL( M-J-1, ALPHA, A( K, J+2 ), LDA ) - ELSE - CALL CLASET( 'Full', 1, M-J-1, ZERO, ZERO, - $ A( K, J+2 ), LDA) + IF( J.LT.(M-1) ) THEN + IF( A( K, J+1 ).NE.ZERO ) THEN + ALPHA = ONE / A( K, J+1 ) + CALL CCOPY( M-J-1, WORK( 3 ), 1, A( K, J+2 ), LDA ) + CALL CSCAL( M-J-1, ALPHA, A( K, J+2 ), LDA ) + ELSE + CALL CLASET( 'Full', 1, M-J-1, ZERO, ZERO, + $ A( K, J+2 ), LDA) + END IF END IF END IF J = J + 1 @@ -440,8 +443,9 @@ * * Swap A(I2+1:N, I1) with A(I2+1:N, I2) * - CALL CSWAP( M-I2, A( I2+1, J1+I1-1 ), 1, - $ A( I2+1, J1+I2-1 ), 1 ) + IF( I2.LT.M ) + $ CALL CSWAP( M-I2, A( I2+1, J1+I1-1 ), 1, + $ A( I2+1, J1+I2-1 ), 1 ) * * Swap A(I1, I1) with A(I2, I2) * @@ -481,13 +485,15 @@ * Compute L(J+2, J+1) = WORK( 3:N ) / T(J, J+1), * where A(J, J+1) = T(J, J+1) and A(J+2:N, J) = L(J+2:N, J+1) * - IF( A( J+1, K ).NE.ZERO ) THEN - ALPHA = ONE / A( J+1, K ) - CALL CCOPY( M-J-1, WORK( 3 ), 1, A( J+2, K ), 1 ) - CALL CSCAL( M-J-1, ALPHA, A( J+2, K ), 1 ) - ELSE - CALL CLASET( 'Full', M-J-1, 1, ZERO, ZERO, - $ A( J+2, K ), LDA ) + IF( J.LT.(M-1) ) THEN + IF( A( J+1, K ).NE.ZERO ) THEN + ALPHA = ONE / A( J+1, K ) + CALL CCOPY( M-J-1, WORK( 3 ), 1, A( J+2, K ), 1 ) + CALL CSCAL( M-J-1, ALPHA, A( J+2, K ), 1 ) + ELSE + CALL CLASET( 'Full', M-J-1, 1, ZERO, ZERO, + $ A( J+2, K ), LDA ) + END IF END IF END IF J = J + 1 diff --git a/lapack-netlib/SRC/clahef_rk.f b/lapack-netlib/SRC/clahef_rk.f index 4d9dfbe8e..cc4603e9b 100644 --- a/lapack-netlib/SRC/clahef_rk.f +++ b/lapack-netlib/SRC/clahef_rk.f @@ -331,7 +331,7 @@ * of A and working backwards, and compute the matrix W = U12*D * for use in updating A11 (note that conjg(W) is actually stored) * -* Initilize the first entry of array E, where superdiagonal +* Initialize the first entry of array E, where superdiagonal * elements of D are stored * E( 1 ) = CZERO @@ -789,7 +789,7 @@ * of A and working forwards, and compute the matrix W = L21*D * for use in updating A22 (note that conjg(W) is actually stored) * -* Initilize the unused last entry of the subdiagonal array E. +* Initialize the unused last entry of the subdiagonal array E. * E( N ) = CZERO * diff --git a/lapack-netlib/SRC/clahqr.f b/lapack-netlib/SRC/clahqr.f index de2b3938b..ef50b5a56 100644 --- a/lapack-netlib/SRC/clahqr.f +++ b/lapack-netlib/SRC/clahqr.f @@ -138,26 +138,26 @@ *> \param[out] INFO *> \verbatim *> INFO is INTEGER -*> = 0: successful exit -*> .GT. 0: if INFO = i, CLAHQR failed to compute all the +*> = 0: successful exit +*> > 0: if INFO = i, CLAHQR failed to compute all the *> eigenvalues ILO to IHI in a total of 30 iterations *> per eigenvalue; elements i+1:ihi of W contain *> those eigenvalues which have been successfully *> computed. *> -*> If INFO .GT. 0 and WANTT is .FALSE., then on exit, +*> If INFO > 0 and WANTT is .FALSE., then on exit, *> the remaining unconverged eigenvalues are the *> eigenvalues of the upper Hessenberg matrix -*> rows and columns ILO thorugh INFO of the final, +*> rows and columns ILO through INFO of the final, *> output value of H. *> -*> If INFO .GT. 0 and WANTT is .TRUE., then on exit +*> If INFO > 0 and WANTT is .TRUE., then on exit *> (*) (initial value of H)*U = U*(final value of H) -*> where U is an orthognal matrix. The final +*> where U is an orthogonal matrix. The final *> value of H is upper Hessenberg and triangular in *> rows and columns INFO+1 through IHI. *> -*> If INFO .GT. 0 and WANTZ is .TRUE., then on exit +*> If INFO > 0 and WANTZ is .TRUE., then on exit *> (final value of Z) = (initial value of Z)*U *> where U is the orthogonal matrix in (*) *> (regardless of the value of WANTT.) diff --git a/lapack-netlib/SRC/clamswlq.f b/lapack-netlib/SRC/clamswlq.f index f2f9ab7f9..f6909b666 100644 --- a/lapack-netlib/SRC/clamswlq.f +++ b/lapack-netlib/SRC/clamswlq.f @@ -1,3 +1,4 @@ +*> \brief \b CLAMSWLQ * * Definition: * =========== diff --git a/lapack-netlib/SRC/clamtsqr.f b/lapack-netlib/SRC/clamtsqr.f index 77d09a573..c71e4aa7d 100644 --- a/lapack-netlib/SRC/clamtsqr.f +++ b/lapack-netlib/SRC/clamtsqr.f @@ -1,3 +1,4 @@ +*> \brief \b CLAMTSQR * * Definition: * =========== diff --git a/lapack-netlib/SRC/clangb.f b/lapack-netlib/SRC/clangb.f index 14a163ea7..9818360fe 100644 --- a/lapack-netlib/SRC/clangb.f +++ b/lapack-netlib/SRC/clangb.f @@ -130,6 +130,7 @@ * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * December 2016 * + IMPLICIT NONE * .. Scalar Arguments .. CHARACTER NORM INTEGER KL, KU, LDAB, N @@ -147,14 +148,17 @@ * .. * .. Local Scalars .. INTEGER I, J, K, L - REAL SCALE, SUM, VALUE, TEMP + REAL SUM, VALUE, TEMP +* .. +* .. Local Arrays .. + REAL SSQ( 2 ), COLSSQ( 2 ) * .. * .. External Functions .. LOGICAL LSAME, SISNAN EXTERNAL LSAME, SISNAN * .. * .. External Subroutines .. - EXTERNAL CLASSQ + EXTERNAL CLASSQ, SCOMBSSQ * .. * .. Intrinsic Functions .. INTRINSIC ABS, MAX, MIN, SQRT @@ -207,15 +211,22 @@ ELSE IF( ( LSAME( NORM, 'F' ) ) .OR. ( LSAME( NORM, 'E' ) ) ) THEN * * Find normF(A). +* SSQ(1) is scale +* SSQ(2) is sum-of-squares +* For better accuracy, sum each column separately. * - SCALE = ZERO - SUM = ONE + SSQ( 1 ) = ZERO + SSQ( 2 ) = ONE DO 90 J = 1, N L = MAX( 1, J-KU ) K = KU + 1 - J + L - CALL CLASSQ( MIN( N, J+KL )-L+1, AB( K, J ), 1, SCALE, SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL CLASSQ( MIN( N, J+KL )-L+1, AB( K, J ), 1, + $ COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL SCOMBSSQ( SSQ, COLSSQ ) 90 CONTINUE - VALUE = SCALE*SQRT( SUM ) + VALUE = SSQ( 1 )*SQRT( SSQ( 2 ) ) END IF * CLANGB = VALUE diff --git a/lapack-netlib/SRC/clange.f b/lapack-netlib/SRC/clange.f index 50f705a18..00895c8bc 100644 --- a/lapack-netlib/SRC/clange.f +++ b/lapack-netlib/SRC/clange.f @@ -120,6 +120,7 @@ * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * December 2016 * + IMPLICIT NONE * .. Scalar Arguments .. CHARACTER NORM INTEGER LDA, M, N @@ -137,14 +138,17 @@ * .. * .. Local Scalars .. INTEGER I, J - REAL SCALE, SUM, VALUE, TEMP + REAL SUM, VALUE, TEMP +* .. +* .. Local Arrays .. + REAL SSQ( 2 ), COLSSQ( 2 ) * .. * .. External Functions .. LOGICAL LSAME, SISNAN EXTERNAL LSAME, SISNAN * .. * .. External Subroutines .. - EXTERNAL CLASSQ + EXTERNAL CLASSQ, SCOMBSSQ * .. * .. Intrinsic Functions .. INTRINSIC ABS, MIN, SQRT @@ -196,13 +200,19 @@ ELSE IF( ( LSAME( NORM, 'F' ) ) .OR. ( LSAME( NORM, 'E' ) ) ) THEN * * Find normF(A). +* SSQ(1) is scale +* SSQ(2) is sum-of-squares +* For better accuracy, sum each column separately. * - SCALE = ZERO - SUM = ONE + SSQ( 1 ) = ZERO + SSQ( 2 ) = ONE DO 90 J = 1, N - CALL CLASSQ( M, A( 1, J ), 1, SCALE, SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL CLASSQ( M, A( 1, J ), 1, COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL SCOMBSSQ( SSQ, COLSSQ ) 90 CONTINUE - VALUE = SCALE*SQRT( SUM ) + VALUE = SSQ( 1 )*SQRT( SSQ( 2 ) ) END IF * CLANGE = VALUE diff --git a/lapack-netlib/SRC/clanhb.f b/lapack-netlib/SRC/clanhb.f index 2b034b19b..f78de23df 100644 --- a/lapack-netlib/SRC/clanhb.f +++ b/lapack-netlib/SRC/clanhb.f @@ -137,6 +137,7 @@ * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * December 2016 * + IMPLICIT NONE * .. Scalar Arguments .. CHARACTER NORM, UPLO INTEGER K, LDAB, N @@ -154,14 +155,17 @@ * .. * .. Local Scalars .. INTEGER I, J, L - REAL ABSA, SCALE, SUM, VALUE + REAL ABSA, SUM, VALUE +* .. +* .. Local Arrays .. + REAL SSQ( 2 ), COLSSQ( 2 ) * .. * .. External Functions .. LOGICAL LSAME, SISNAN EXTERNAL LSAME, SISNAN * .. * .. External Subroutines .. - EXTERNAL CLASSQ + EXTERNAL CLASSQ, SCOMBSSQ * .. * .. Intrinsic Functions .. INTRINSIC ABS, MAX, MIN, REAL, SQRT @@ -233,39 +237,57 @@ ELSE IF( ( LSAME( NORM, 'F' ) ) .OR. ( LSAME( NORM, 'E' ) ) ) THEN * * Find normF(A). +* SSQ(1) is scale +* SSQ(2) is sum-of-squares +* For better accuracy, sum each column separately. +* + SSQ( 1 ) = ZERO + SSQ( 2 ) = ONE +* +* Sum off-diagonals * - SCALE = ZERO - SUM = ONE IF( K.GT.0 ) THEN IF( LSAME( UPLO, 'U' ) ) THEN DO 110 J = 2, N + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE CALL CLASSQ( MIN( J-1, K ), AB( MAX( K+2-J, 1 ), J ), - $ 1, SCALE, SUM ) + $ 1, COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL SCOMBSSQ( SSQ, COLSSQ ) 110 CONTINUE L = K + 1 ELSE DO 120 J = 1, N - 1 - CALL CLASSQ( MIN( N-J, K ), AB( 2, J ), 1, SCALE, - $ SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL CLASSQ( MIN( N-J, K ), AB( 2, J ), 1, + $ COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL SCOMBSSQ( SSQ, COLSSQ ) 120 CONTINUE L = 1 END IF - SUM = 2*SUM + SSQ( 2 ) = 2*SSQ( 2 ) ELSE L = 1 END IF +* +* Sum diagonal +* + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE DO 130 J = 1, N IF( REAL( AB( L, J ) ).NE.ZERO ) THEN ABSA = ABS( REAL( AB( L, J ) ) ) - IF( SCALE.LT.ABSA ) THEN - SUM = ONE + SUM*( SCALE / ABSA )**2 - SCALE = ABSA + IF( COLSSQ( 1 ).LT.ABSA ) THEN + COLSSQ( 2 ) = ONE + COLSSQ(2)*( COLSSQ(1) / ABSA )**2 + COLSSQ( 1 ) = ABSA ELSE - SUM = SUM + ( ABSA / SCALE )**2 + COLSSQ( 2 ) = COLSSQ( 2 ) + ( ABSA / COLSSQ( 1 ) )**2 END IF END IF 130 CONTINUE - VALUE = SCALE*SQRT( SUM ) + CALL SCOMBSSQ( SSQ, COLSSQ ) + VALUE = SSQ( 1 )*SQRT( SSQ( 2 ) ) END IF * CLANHB = VALUE diff --git a/lapack-netlib/SRC/clanhe.f b/lapack-netlib/SRC/clanhe.f index 101d778eb..33d6c8b01 100644 --- a/lapack-netlib/SRC/clanhe.f +++ b/lapack-netlib/SRC/clanhe.f @@ -129,6 +129,7 @@ * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * December 2016 * + IMPLICIT NONE * .. Scalar Arguments .. CHARACTER NORM, UPLO INTEGER LDA, N @@ -146,14 +147,17 @@ * .. * .. Local Scalars .. INTEGER I, J - REAL ABSA, SCALE, SUM, VALUE + REAL ABSA, SUM, VALUE +* .. +* .. Local Arrays .. + REAL SSQ( 2 ), COLSSQ( 2 ) * .. * .. External Functions .. LOGICAL LSAME, SISNAN EXTERNAL LSAME, SISNAN * .. * .. External Subroutines .. - EXTERNAL CLASSQ + EXTERNAL CLASSQ, SCOMBSSQ * .. * .. Intrinsic Functions .. INTRINSIC ABS, REAL, SQRT @@ -223,31 +227,48 @@ ELSE IF( ( LSAME( NORM, 'F' ) ) .OR. ( LSAME( NORM, 'E' ) ) ) THEN * * Find normF(A). +* SSQ(1) is scale +* SSQ(2) is sum-of-squares +* For better accuracy, sum each column separately. +* + SSQ( 1 ) = ZERO + SSQ( 2 ) = ONE +* +* Sum off-diagonals * - SCALE = ZERO - SUM = ONE IF( LSAME( UPLO, 'U' ) ) THEN DO 110 J = 2, N - CALL CLASSQ( J-1, A( 1, J ), 1, SCALE, SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL CLASSQ( J-1, A( 1, J ), 1, + $ COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL SCOMBSSQ( SSQ, COLSSQ ) 110 CONTINUE ELSE DO 120 J = 1, N - 1 - CALL CLASSQ( N-J, A( J+1, J ), 1, SCALE, SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL CLASSQ( N-J, A( J+1, J ), 1, + $ COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL SCOMBSSQ( SSQ, COLSSQ ) 120 CONTINUE END IF - SUM = 2*SUM + SSQ( 2 ) = 2*SSQ( 2 ) +* +* Sum diagonal +* DO 130 I = 1, N IF( REAL( A( I, I ) ).NE.ZERO ) THEN ABSA = ABS( REAL( A( I, I ) ) ) - IF( SCALE.LT.ABSA ) THEN - SUM = ONE + SUM*( SCALE / ABSA )**2 - SCALE = ABSA + IF( SSQ( 1 ).LT.ABSA ) THEN + SSQ( 2 ) = ONE + SSQ( 2 )*( SSQ( 1 ) / ABSA )**2 + SSQ( 1 ) = ABSA ELSE - SUM = SUM + ( ABSA / SCALE )**2 + SSQ( 2 ) = SSQ( 2 ) + ( ABSA / SSQ( 1 ) )**2 END IF END IF 130 CONTINUE - VALUE = SCALE*SQRT( SUM ) + VALUE = SSQ( 1 )*SQRT( SSQ( 2 ) ) END IF * CLANHE = VALUE diff --git a/lapack-netlib/SRC/clanhp.f b/lapack-netlib/SRC/clanhp.f index c8927d503..e0e23abc7 100644 --- a/lapack-netlib/SRC/clanhp.f +++ b/lapack-netlib/SRC/clanhp.f @@ -122,6 +122,7 @@ * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * December 2016 * + IMPLICIT NONE * .. Scalar Arguments .. CHARACTER NORM, UPLO INTEGER N @@ -139,14 +140,17 @@ * .. * .. Local Scalars .. INTEGER I, J, K - REAL ABSA, SCALE, SUM, VALUE + REAL ABSA, SUM, VALUE +* .. +* .. Local Arrays .. + REAL SSQ( 2 ), COLSSQ( 2 ) * .. * .. External Functions .. LOGICAL LSAME, SISNAN EXTERNAL LSAME, SISNAN * .. * .. External Subroutines .. - EXTERNAL CLASSQ + EXTERNAL CLASSQ, SCOMBSSQ * .. * .. Intrinsic Functions .. INTRINSIC ABS, REAL, SQRT @@ -225,31 +229,48 @@ ELSE IF( ( LSAME( NORM, 'F' ) ) .OR. ( LSAME( NORM, 'E' ) ) ) THEN * * Find normF(A). +* SSQ(1) is scale +* SSQ(2) is sum-of-squares +* For better accuracy, sum each column separately. +* + SSQ( 1 ) = ZERO + SSQ( 2 ) = ONE +* +* Sum off-diagonals * - SCALE = ZERO - SUM = ONE K = 2 IF( LSAME( UPLO, 'U' ) ) THEN DO 110 J = 2, N - CALL CLASSQ( J-1, AP( K ), 1, SCALE, SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL CLASSQ( J-1, AP( K ), 1, COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL SCOMBSSQ( SSQ, COLSSQ ) K = K + J 110 CONTINUE ELSE DO 120 J = 1, N - 1 - CALL CLASSQ( N-J, AP( K ), 1, SCALE, SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL CLASSQ( N-J, AP( K ), 1, COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL SCOMBSSQ( SSQ, COLSSQ ) K = K + N - J + 1 120 CONTINUE END IF - SUM = 2*SUM + SSQ( 2 ) = 2*SSQ( 2 ) +* +* Sum diagonal +* K = 1 + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE DO 130 I = 1, N IF( REAL( AP( K ) ).NE.ZERO ) THEN ABSA = ABS( REAL( AP( K ) ) ) - IF( SCALE.LT.ABSA ) THEN - SUM = ONE + SUM*( SCALE / ABSA )**2 - SCALE = ABSA + IF( COLSSQ( 1 ).LT.ABSA ) THEN + COLSSQ( 2 ) = ONE + COLSSQ(2)*( COLSSQ(1) / ABSA )**2 + COLSSQ( 1 ) = ABSA ELSE - SUM = SUM + ( ABSA / SCALE )**2 + COLSSQ( 2 ) = COLSSQ( 2 ) + ( ABSA / COLSSQ( 1 ) )**2 END IF END IF IF( LSAME( UPLO, 'U' ) ) THEN @@ -258,7 +279,8 @@ K = K + N - I + 1 END IF 130 CONTINUE - VALUE = SCALE*SQRT( SUM ) + CALL SCOMBSSQ( SSQ, COLSSQ ) + VALUE = SSQ( 1 )*SQRT( SSQ( 2 ) ) END IF * CLANHP = VALUE diff --git a/lapack-netlib/SRC/clanhs.f b/lapack-netlib/SRC/clanhs.f index 35623b73d..661b4f901 100644 --- a/lapack-netlib/SRC/clanhs.f +++ b/lapack-netlib/SRC/clanhs.f @@ -114,6 +114,7 @@ * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * December 2016 * + IMPLICIT NONE * .. Scalar Arguments .. CHARACTER NORM INTEGER LDA, N @@ -131,14 +132,17 @@ * .. * .. Local Scalars .. INTEGER I, J - REAL SCALE, SUM, VALUE + REAL SUM, VALUE +* .. +* .. Local Arrays .. + REAL SSQ( 2 ), COLSSQ( 2 ) * .. * .. External Functions .. LOGICAL LSAME, SISNAN EXTERNAL LSAME, SISNAN * .. * .. External Subroutines .. - EXTERNAL CLASSQ + EXTERNAL CLASSQ, SCOMBSSQ * .. * .. Intrinsic Functions .. INTRINSIC ABS, MIN, SQRT @@ -190,13 +194,20 @@ ELSE IF( ( LSAME( NORM, 'F' ) ) .OR. ( LSAME( NORM, 'E' ) ) ) THEN * * Find normF(A). +* SSQ(1) is scale +* SSQ(2) is sum-of-squares +* For better accuracy, sum each column separately. * - SCALE = ZERO - SUM = ONE + SSQ( 1 ) = ZERO + SSQ( 2 ) = ONE DO 90 J = 1, N - CALL CLASSQ( MIN( N, J+1 ), A( 1, J ), 1, SCALE, SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL CLASSQ( MIN( N, J+1 ), A( 1, J ), 1, + $ COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL SCOMBSSQ( SSQ, COLSSQ ) 90 CONTINUE - VALUE = SCALE*SQRT( SUM ) + VALUE = SSQ( 1 )*SQRT( SSQ( 2 ) ) END IF * CLANHS = VALUE diff --git a/lapack-netlib/SRC/clansb.f b/lapack-netlib/SRC/clansb.f index fbc50674c..1085fd880 100644 --- a/lapack-netlib/SRC/clansb.f +++ b/lapack-netlib/SRC/clansb.f @@ -135,6 +135,7 @@ * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * December 2016 * + IMPLICIT NONE * .. Scalar Arguments .. CHARACTER NORM, UPLO INTEGER K, LDAB, N @@ -152,14 +153,17 @@ * .. * .. Local Scalars .. INTEGER I, J, L - REAL ABSA, SCALE, SUM, VALUE + REAL ABSA, SUM, VALUE +* .. +* .. Local Arrays .. + REAL SSQ( 2 ), COLSSQ( 2 ) * .. * .. External Functions .. LOGICAL LSAME, SISNAN EXTERNAL LSAME, SISNAN * .. * .. External Subroutines .. - EXTERNAL CLASSQ + EXTERNAL CLASSQ, SCOMBSSQ * .. * .. Intrinsic Functions .. INTRINSIC ABS, MAX, MIN, SQRT @@ -227,29 +231,47 @@ ELSE IF( ( LSAME( NORM, 'F' ) ) .OR. ( LSAME( NORM, 'E' ) ) ) THEN * * Find normF(A). +* SSQ(1) is scale +* SSQ(2) is sum-of-squares +* For better accuracy, sum each column separately. +* + SSQ( 1 ) = ZERO + SSQ( 2 ) = ONE +* +* Sum off-diagonals * - SCALE = ZERO - SUM = ONE IF( K.GT.0 ) THEN IF( LSAME( UPLO, 'U' ) ) THEN DO 110 J = 2, N + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE CALL CLASSQ( MIN( J-1, K ), AB( MAX( K+2-J, 1 ), J ), - $ 1, SCALE, SUM ) + $ 1, COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL SCOMBSSQ( SSQ, COLSSQ ) 110 CONTINUE L = K + 1 ELSE DO 120 J = 1, N - 1 - CALL CLASSQ( MIN( N-J, K ), AB( 2, J ), 1, SCALE, - $ SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL CLASSQ( MIN( N-J, K ), AB( 2, J ), 1, + $ COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL SCOMBSSQ( SSQ, COLSSQ ) 120 CONTINUE L = 1 END IF - SUM = 2*SUM + SSQ( 2 ) = 2*SSQ( 2 ) ELSE L = 1 END IF - CALL CLASSQ( N, AB( L, 1 ), LDAB, SCALE, SUM ) - VALUE = SCALE*SQRT( SUM ) +* +* Sum diagonal +* + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL CLASSQ( N, AB( L, 1 ), LDAB, COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL SCOMBSSQ( SSQ, COLSSQ ) + VALUE = SSQ( 1 )*SQRT( SSQ( 2 ) ) END IF * CLANSB = VALUE diff --git a/lapack-netlib/SRC/clansp.f b/lapack-netlib/SRC/clansp.f index fd64366c6..628dc0a75 100644 --- a/lapack-netlib/SRC/clansp.f +++ b/lapack-netlib/SRC/clansp.f @@ -120,6 +120,7 @@ * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * December 2016 * + IMPLICIT NONE * .. Scalar Arguments .. CHARACTER NORM, UPLO INTEGER N @@ -137,14 +138,17 @@ * .. * .. Local Scalars .. INTEGER I, J, K - REAL ABSA, SCALE, SUM, VALUE + REAL ABSA, SUM, VALUE +* .. +* .. Local Arrays .. + REAL SSQ( 2 ), COLSSQ( 2 ) * .. * .. External Functions .. LOGICAL LSAME, SISNAN EXTERNAL LSAME, SISNAN * .. * .. External Subroutines .. - EXTERNAL CLASSQ + EXTERNAL CLASSQ, SCOMBSSQ * .. * .. Intrinsic Functions .. INTRINSIC ABS, AIMAG, REAL, SQRT @@ -219,40 +223,57 @@ ELSE IF( ( LSAME( NORM, 'F' ) ) .OR. ( LSAME( NORM, 'E' ) ) ) THEN * * Find normF(A). +* SSQ(1) is scale +* SSQ(2) is sum-of-squares +* For better accuracy, sum each column separately. +* + SSQ( 1 ) = ZERO + SSQ( 2 ) = ONE +* +* Sum off-diagonals * - SCALE = ZERO - SUM = ONE K = 2 IF( LSAME( UPLO, 'U' ) ) THEN DO 110 J = 2, N - CALL CLASSQ( J-1, AP( K ), 1, SCALE, SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL CLASSQ( J-1, AP( K ), 1, COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL SCOMBSSQ( SSQ, COLSSQ ) K = K + J 110 CONTINUE ELSE DO 120 J = 1, N - 1 - CALL CLASSQ( N-J, AP( K ), 1, SCALE, SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL CLASSQ( N-J, AP( K ), 1, COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL SCOMBSSQ( SSQ, COLSSQ ) K = K + N - J + 1 120 CONTINUE END IF - SUM = 2*SUM + SSQ( 2 ) = 2*SSQ( 2 ) +* +* Sum diagonal +* K = 1 + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE DO 130 I = 1, N IF( REAL( AP( K ) ).NE.ZERO ) THEN ABSA = ABS( REAL( AP( K ) ) ) - IF( SCALE.LT.ABSA ) THEN - SUM = ONE + SUM*( SCALE / ABSA )**2 - SCALE = ABSA + IF( COLSSQ( 1 ).LT.ABSA ) THEN + COLSSQ( 2 ) = ONE + COLSSQ(2)*( COLSSQ(1) / ABSA )**2 + COLSSQ( 1 ) = ABSA ELSE - SUM = SUM + ( ABSA / SCALE )**2 + COLSSQ( 2 ) = COLSSQ( 2 ) + ( ABSA / COLSSQ( 1 ) )**2 END IF END IF IF( AIMAG( AP( K ) ).NE.ZERO ) THEN ABSA = ABS( AIMAG( AP( K ) ) ) - IF( SCALE.LT.ABSA ) THEN - SUM = ONE + SUM*( SCALE / ABSA )**2 - SCALE = ABSA + IF( COLSSQ( 1 ).LT.ABSA ) THEN + COLSSQ( 2 ) = ONE + COLSSQ(2)*( COLSSQ(1) / ABSA )**2 + COLSSQ( 1 ) = ABSA ELSE - SUM = SUM + ( ABSA / SCALE )**2 + COLSSQ( 2 ) = COLSSQ( 2 ) + ( ABSA / COLSSQ( 1 ) )**2 END IF END IF IF( LSAME( UPLO, 'U' ) ) THEN @@ -261,7 +282,8 @@ K = K + N - I + 1 END IF 130 CONTINUE - VALUE = SCALE*SQRT( SUM ) + CALL SCOMBSSQ( SSQ, COLSSQ ) + VALUE = SSQ( 1 )*SQRT( SSQ( 2 ) ) END IF * CLANSP = VALUE diff --git a/lapack-netlib/SRC/clansy.f b/lapack-netlib/SRC/clansy.f index 3aa787410..537fb7ba9 100644 --- a/lapack-netlib/SRC/clansy.f +++ b/lapack-netlib/SRC/clansy.f @@ -128,6 +128,7 @@ * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * December 2016 * + IMPLICIT NONE * .. Scalar Arguments .. CHARACTER NORM, UPLO INTEGER LDA, N @@ -145,14 +146,17 @@ * .. * .. Local Scalars .. INTEGER I, J - REAL ABSA, SCALE, SUM, VALUE + REAL ABSA, SUM, VALUE +* .. +* .. Local Arrays .. + REAL SSQ( 2 ), COLSSQ( 2 ) * .. * .. External Functions .. LOGICAL LSAME, SISNAN EXTERNAL LSAME, SISNAN * .. * .. External Subroutines .. - EXTERNAL CLASSQ + EXTERNAL CLASSQ, SCOMBSSQ * .. * .. Intrinsic Functions .. INTRINSIC ABS, SQRT @@ -218,21 +222,39 @@ ELSE IF( ( LSAME( NORM, 'F' ) ) .OR. ( LSAME( NORM, 'E' ) ) ) THEN * * Find normF(A). +* SSQ(1) is scale +* SSQ(2) is sum-of-squares +* For better accuracy, sum each column separately. +* + SSQ( 1 ) = ZERO + SSQ( 2 ) = ONE +* +* Sum off-diagonals * - SCALE = ZERO - SUM = ONE IF( LSAME( UPLO, 'U' ) ) THEN DO 110 J = 2, N - CALL CLASSQ( J-1, A( 1, J ), 1, SCALE, SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL CLASSQ( J-1, A( 1, J ), 1, COLSSQ(1), COLSSQ(2) ) + CALL SCOMBSSQ( SSQ, COLSSQ ) 110 CONTINUE ELSE DO 120 J = 1, N - 1 - CALL CLASSQ( N-J, A( J+1, J ), 1, SCALE, SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL CLASSQ( N-J, A( J+1, J ), 1, COLSSQ(1), COLSSQ(2) ) + CALL SCOMBSSQ( SSQ, COLSSQ ) 120 CONTINUE END IF - SUM = 2*SUM - CALL CLASSQ( N, A, LDA+1, SCALE, SUM ) - VALUE = SCALE*SQRT( SUM ) + SSQ( 2 ) = 2*SSQ( 2 ) +* +* Sum diagonal +* + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL CLASSQ( N, A, LDA+1, COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL SCOMBSSQ( SSQ, COLSSQ ) + VALUE = SSQ( 1 )*SQRT( SSQ( 2 ) ) END IF * CLANSY = VALUE diff --git a/lapack-netlib/SRC/clantb.f b/lapack-netlib/SRC/clantb.f index 4b4361c79..8066d0ef6 100644 --- a/lapack-netlib/SRC/clantb.f +++ b/lapack-netlib/SRC/clantb.f @@ -146,6 +146,7 @@ * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * December 2016 * + IMPLICIT NONE * .. Scalar Arguments .. CHARACTER DIAG, NORM, UPLO INTEGER K, LDAB, N @@ -164,14 +165,17 @@ * .. Local Scalars .. LOGICAL UDIAG INTEGER I, J, L - REAL SCALE, SUM, VALUE + REAL SUM, VALUE +* .. +* .. Local Arrays .. + REAL SSQ( 2 ), COLSSQ( 2 ) * .. * .. External Functions .. LOGICAL LSAME, SISNAN EXTERNAL LSAME, SISNAN * .. * .. External Subroutines .. - EXTERNAL CLASSQ + EXTERNAL CLASSQ, SCOMBSSQ * .. * .. Intrinsic Functions .. INTRINSIC ABS, MAX, MIN, SQRT @@ -313,46 +317,61 @@ ELSE IF( ( LSAME( NORM, 'F' ) ) .OR. ( LSAME( NORM, 'E' ) ) ) THEN * * Find normF(A). +* SSQ(1) is scale +* SSQ(2) is sum-of-squares +* For better accuracy, sum each column separately. * IF( LSAME( UPLO, 'U' ) ) THEN IF( LSAME( DIAG, 'U' ) ) THEN - SCALE = ONE - SUM = N + SSQ( 1 ) = ONE + SSQ( 2 ) = N IF( K.GT.0 ) THEN DO 280 J = 2, N + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE CALL CLASSQ( MIN( J-1, K ), - $ AB( MAX( K+2-J, 1 ), J ), 1, SCALE, - $ SUM ) + $ AB( MAX( K+2-J, 1 ), J ), 1, + $ COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL SCOMBSSQ( SSQ, COLSSQ ) 280 CONTINUE END IF ELSE - SCALE = ZERO - SUM = ONE + SSQ( 1 ) = ZERO + SSQ( 2 ) = ONE DO 290 J = 1, N + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE CALL CLASSQ( MIN( J, K+1 ), AB( MAX( K+2-J, 1 ), J ), - $ 1, SCALE, SUM ) + $ 1, COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL SCOMBSSQ( SSQ, COLSSQ ) 290 CONTINUE END IF ELSE IF( LSAME( DIAG, 'U' ) ) THEN - SCALE = ONE - SUM = N + SSQ( 1 ) = ONE + SSQ( 2 ) = N IF( K.GT.0 ) THEN DO 300 J = 1, N - 1 - CALL CLASSQ( MIN( N-J, K ), AB( 2, J ), 1, SCALE, - $ SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL CLASSQ( MIN( N-J, K ), AB( 2, J ), 1, + $ COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL SCOMBSSQ( SSQ, COLSSQ ) 300 CONTINUE END IF ELSE - SCALE = ZERO - SUM = ONE + SSQ( 1 ) = ZERO + SSQ( 2 ) = ONE DO 310 J = 1, N - CALL CLASSQ( MIN( N-J+1, K+1 ), AB( 1, J ), 1, SCALE, - $ SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL CLASSQ( MIN( N-J+1, K+1 ), AB( 1, J ), 1, + $ COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL SCOMBSSQ( SSQ, COLSSQ ) 310 CONTINUE END IF END IF - VALUE = SCALE*SQRT( SUM ) + VALUE = SSQ( 1 )*SQRT( SSQ( 2 ) ) END IF * CLANTB = VALUE diff --git a/lapack-netlib/SRC/clantp.f b/lapack-netlib/SRC/clantp.f index 148ac5436..b0c48eb46 100644 --- a/lapack-netlib/SRC/clantp.f +++ b/lapack-netlib/SRC/clantp.f @@ -130,6 +130,7 @@ * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * December 2016 * + IMPLICIT NONE * .. Scalar Arguments .. CHARACTER DIAG, NORM, UPLO INTEGER N @@ -148,14 +149,17 @@ * .. Local Scalars .. LOGICAL UDIAG INTEGER I, J, K - REAL SCALE, SUM, VALUE + REAL SUM, VALUE +* .. +* .. Local Arrays .. + REAL SSQ( 2 ), COLSSQ( 2 ) * .. * .. External Functions .. LOGICAL LSAME, SISNAN EXTERNAL LSAME, SISNAN * .. * .. External Subroutines .. - EXTERNAL CLASSQ + EXTERNAL CLASSQ, SCOMBSSQ * .. * .. Intrinsic Functions .. INTRINSIC ABS, SQRT @@ -308,45 +312,64 @@ ELSE IF( ( LSAME( NORM, 'F' ) ) .OR. ( LSAME( NORM, 'E' ) ) ) THEN * * Find normF(A). +* SSQ(1) is scale +* SSQ(2) is sum-of-squares +* For better accuracy, sum each column separately. * IF( LSAME( UPLO, 'U' ) ) THEN IF( LSAME( DIAG, 'U' ) ) THEN - SCALE = ONE - SUM = N + SSQ( 1 ) = ONE + SSQ( 2 ) = N K = 2 DO 280 J = 2, N - CALL CLASSQ( J-1, AP( K ), 1, SCALE, SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL CLASSQ( J-1, AP( K ), 1, + $ COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL SCOMBSSQ( SSQ, COLSSQ ) K = K + J 280 CONTINUE ELSE - SCALE = ZERO - SUM = ONE + SSQ( 1 ) = ZERO + SSQ( 2 ) = ONE K = 1 DO 290 J = 1, N - CALL CLASSQ( J, AP( K ), 1, SCALE, SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL CLASSQ( J, AP( K ), 1, + $ COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL SCOMBSSQ( SSQ, COLSSQ ) K = K + J 290 CONTINUE END IF ELSE IF( LSAME( DIAG, 'U' ) ) THEN - SCALE = ONE - SUM = N + SSQ( 1 ) = ONE + SSQ( 2 ) = N K = 2 DO 300 J = 1, N - 1 - CALL CLASSQ( N-J, AP( K ), 1, SCALE, SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL CLASSQ( N-J, AP( K ), 1, + $ COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL SCOMBSSQ( SSQ, COLSSQ ) K = K + N - J + 1 300 CONTINUE ELSE - SCALE = ZERO - SUM = ONE + SSQ( 1 ) = ZERO + SSQ( 2 ) = ONE K = 1 DO 310 J = 1, N - CALL CLASSQ( N-J+1, AP( K ), 1, SCALE, SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL CLASSQ( N-J+1, AP( K ), 1, + $ COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL SCOMBSSQ( SSQ, COLSSQ ) K = K + N - J + 1 310 CONTINUE END IF END IF - VALUE = SCALE*SQRT( SUM ) + VALUE = SSQ( 1 )*SQRT( SSQ( 2 ) ) END IF * CLANTP = VALUE diff --git a/lapack-netlib/SRC/clantr.f b/lapack-netlib/SRC/clantr.f index 4e1843d3d..3b361cc97 100644 --- a/lapack-netlib/SRC/clantr.f +++ b/lapack-netlib/SRC/clantr.f @@ -147,6 +147,7 @@ * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * December 2016 * + IMPLICIT NONE * .. Scalar Arguments .. CHARACTER DIAG, NORM, UPLO INTEGER LDA, M, N @@ -165,14 +166,17 @@ * .. Local Scalars .. LOGICAL UDIAG INTEGER I, J - REAL SCALE, SUM, VALUE + REAL SUM, VALUE +* .. +* .. Local Arrays .. + REAL SSQ( 2 ), COLSSQ( 2 ) * .. * .. External Functions .. LOGICAL LSAME, SISNAN EXTERNAL LSAME, SISNAN * .. * .. External Subroutines .. - EXTERNAL CLASSQ + EXTERNAL CLASSQ, SCOMBSSQ * .. * .. Intrinsic Functions .. INTRINSIC ABS, MIN, SQRT @@ -283,7 +287,7 @@ END IF ELSE IF( LSAME( DIAG, 'U' ) ) THEN - DO 210 I = 1, N + DO 210 I = 1, MIN( M, N ) WORK( I ) = ONE 210 CONTINUE DO 220 I = N + 1, M @@ -313,38 +317,56 @@ ELSE IF( ( LSAME( NORM, 'F' ) ) .OR. ( LSAME( NORM, 'E' ) ) ) THEN * * Find normF(A). +* SSQ(1) is scale +* SSQ(2) is sum-of-squares +* For better accuracy, sum each column separately. * IF( LSAME( UPLO, 'U' ) ) THEN IF( LSAME( DIAG, 'U' ) ) THEN - SCALE = ONE - SUM = MIN( M, N ) + SSQ( 1 ) = ONE + SSQ( 2 ) = MIN( M, N ) DO 290 J = 2, N - CALL CLASSQ( MIN( M, J-1 ), A( 1, J ), 1, SCALE, SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL CLASSQ( MIN( M, J-1 ), A( 1, J ), 1, + $ COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL SCOMBSSQ( SSQ, COLSSQ ) 290 CONTINUE ELSE - SCALE = ZERO - SUM = ONE + SSQ( 1 ) = ZERO + SSQ( 2 ) = ONE DO 300 J = 1, N - CALL CLASSQ( MIN( M, J ), A( 1, J ), 1, SCALE, SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL CLASSQ( MIN( M, J ), A( 1, J ), 1, + $ COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL SCOMBSSQ( SSQ, COLSSQ ) 300 CONTINUE END IF ELSE IF( LSAME( DIAG, 'U' ) ) THEN - SCALE = ONE - SUM = MIN( M, N ) + SSQ( 1 ) = ONE + SSQ( 2 ) = MIN( M, N ) DO 310 J = 1, N - CALL CLASSQ( M-J, A( MIN( M, J+1 ), J ), 1, SCALE, - $ SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL CLASSQ( M-J, A( MIN( M, J+1 ), J ), 1, + $ COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL SCOMBSSQ( SSQ, COLSSQ ) 310 CONTINUE ELSE - SCALE = ZERO - SUM = ONE + SSQ( 1 ) = ZERO + SSQ( 2 ) = ONE DO 320 J = 1, N - CALL CLASSQ( M-J+1, A( J, J ), 1, SCALE, SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL CLASSQ( M-J+1, A( J, J ), 1, + $ COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL SCOMBSSQ( SSQ, COLSSQ ) 320 CONTINUE END IF END IF - VALUE = SCALE*SQRT( SUM ) + VALUE = SSQ( 1 )*SQRT( SSQ( 2 ) ) END IF * CLANTR = VALUE diff --git a/lapack-netlib/SRC/claqps.f b/lapack-netlib/SRC/claqps.f index f47e852a0..d0b7efcd5 100644 --- a/lapack-netlib/SRC/claqps.f +++ b/lapack-netlib/SRC/claqps.f @@ -127,7 +127,7 @@ *> \param[in,out] AUXV *> \verbatim *> AUXV is COMPLEX array, dimension (NB) -*> Auxiliar vector. +*> Auxiliary vector. *> \endverbatim *> *> \param[in,out] F diff --git a/lapack-netlib/SRC/claqr0.f b/lapack-netlib/SRC/claqr0.f index b61c9f1e9..2f0ea20db 100644 --- a/lapack-netlib/SRC/claqr0.f +++ b/lapack-netlib/SRC/claqr0.f @@ -66,7 +66,7 @@ *> \param[in] N *> \verbatim *> N is INTEGER -*> The order of the matrix H. N .GE. 0. +*> The order of the matrix H. N >= 0. *> \endverbatim *> *> \param[in] ILO @@ -78,12 +78,12 @@ *> \verbatim *> IHI is INTEGER *> It is assumed that H is already upper triangular in rows -*> and columns 1:ILO-1 and IHI+1:N and, if ILO.GT.1, +*> and columns 1:ILO-1 and IHI+1:N and, if ILO > 1, *> H(ILO,ILO-1) is zero. ILO and IHI are normally set by a *> previous call to CGEBAL, and then passed to CGEHRD when the *> matrix output by CGEBAL is reduced to Hessenberg form. *> Otherwise, ILO and IHI should be set to 1 and N, -*> respectively. If N.GT.0, then 1.LE.ILO.LE.IHI.LE.N. +*> respectively. If N > 0, then 1 <= ILO <= IHI <= N. *> If N = 0, then ILO = 1 and IHI = 0. *> \endverbatim *> @@ -95,17 +95,17 @@ *> contains the upper triangular matrix T from the Schur *> decomposition (the Schur form). If INFO = 0 and WANT is *> .FALSE., then the contents of H are unspecified on exit. -*> (The output value of H when INFO.GT.0 is given under the +*> (The output value of H when INFO > 0 is given under the *> description of INFO below.) *> -*> This subroutine may explicitly set H(i,j) = 0 for i.GT.j and +*> This subroutine may explicitly set H(i,j) = 0 for i > j and *> j = 1, 2, ... ILO-1 or j = IHI+1, IHI+2, ... N. *> \endverbatim *> *> \param[in] LDH *> \verbatim *> LDH is INTEGER -*> The leading dimension of the array H. LDH .GE. max(1,N). +*> The leading dimension of the array H. LDH >= max(1,N). *> \endverbatim *> *> \param[out] W @@ -127,7 +127,7 @@ *> IHIZ is INTEGER *> Specify the rows of Z to which transformations must be *> applied if WANTZ is .TRUE.. -*> 1 .LE. ILOZ .LE. ILO; IHI .LE. IHIZ .LE. N. +*> 1 <= ILOZ <= ILO; IHI <= IHIZ <= N. *> \endverbatim *> *> \param[in,out] Z @@ -137,7 +137,7 @@ *> If WANTZ is .TRUE., then Z(ILO:IHI,ILOZ:IHIZ) is *> replaced by Z(ILO:IHI,ILOZ:IHIZ)*U where U is the *> orthogonal Schur factor of H(ILO:IHI,ILO:IHI). -*> (The output value of Z when INFO.GT.0 is given under +*> (The output value of Z when INFO > 0 is given under *> the description of INFO below.) *> \endverbatim *> @@ -145,7 +145,7 @@ *> \verbatim *> LDZ is INTEGER *> The leading dimension of the array Z. if WANTZ is .TRUE. -*> then LDZ.GE.MAX(1,IHIZ). Otherwize, LDZ.GE.1. +*> then LDZ >= MAX(1,IHIZ). Otherwise, LDZ >= 1. *> \endverbatim *> *> \param[out] WORK @@ -158,7 +158,7 @@ *> \param[in] LWORK *> \verbatim *> LWORK is INTEGER -*> The dimension of the array WORK. LWORK .GE. max(1,N) +*> The dimension of the array WORK. LWORK >= max(1,N) *> is sufficient, but LWORK typically as large as 6*N may *> be required for optimal performance. A workspace query *> to determine the optimal workspace size is recommended. @@ -174,19 +174,19 @@ *> \param[out] INFO *> \verbatim *> INFO is INTEGER -*> = 0: successful exit -*> .GT. 0: if INFO = i, CLAQR0 failed to compute all of +*> = 0: successful exit +*> > 0: if INFO = i, CLAQR0 failed to compute all of *> the eigenvalues. Elements 1:ilo-1 and i+1:n of WR *> and WI contain those eigenvalues which have been *> successfully computed. (Failures are rare.) *> -*> If INFO .GT. 0 and WANT is .FALSE., then on exit, +*> If INFO > 0 and WANT is .FALSE., then on exit, *> the remaining unconverged eigenvalues are the eigen- *> values of the upper Hessenberg matrix rows and *> columns ILO through INFO of the final, output *> value of H. *> -*> If INFO .GT. 0 and WANTT is .TRUE., then on exit +*> If INFO > 0 and WANTT is .TRUE., then on exit *> *> (*) (initial value of H)*U = U*(final value of H) *> @@ -194,7 +194,7 @@ *> value of H is upper Hessenberg and triangular in *> rows and columns INFO+1 through IHI. *> -*> If INFO .GT. 0 and WANTZ is .TRUE., then on exit +*> If INFO > 0 and WANTZ is .TRUE., then on exit *> *> (final value of Z(ILO:IHI,ILOZ:IHIZ) *> = (initial value of Z(ILO:IHI,ILOZ:IHIZ)*U @@ -202,7 +202,7 @@ *> where U is the unitary matrix in (*) (regard- *> less of the value of WANTT.) *> -*> If INFO .GT. 0 and WANTZ is .FALSE., then Z is not +*> If INFO > 0 and WANTZ is .FALSE., then Z is not *> accessed. *> \endverbatim * @@ -639,7 +639,7 @@ END IF END IF * -* ==== Use up to NS of the the smallest magnatiude +* ==== Use up to NS of the the smallest magnitude * . shifts. If there aren't NS shifts available, * . then use them all, possibly dropping one to * . make the number of shifts even. ==== diff --git a/lapack-netlib/SRC/claqr1.f b/lapack-netlib/SRC/claqr1.f index 977947196..87d53871a 100644 --- a/lapack-netlib/SRC/claqr1.f +++ b/lapack-netlib/SRC/claqr1.f @@ -64,7 +64,7 @@ *> \verbatim *> LDH is INTEGER *> The leading dimension of H as declared in -*> the calling procedure. LDH.GE.N +*> the calling procedure. LDH >= N *> \endverbatim *> *> \param[in] S1 diff --git a/lapack-netlib/SRC/claqr2.f b/lapack-netlib/SRC/claqr2.f index 03e9760cf..fc282b2d6 100644 --- a/lapack-netlib/SRC/claqr2.f +++ b/lapack-netlib/SRC/claqr2.f @@ -102,7 +102,7 @@ *> \param[in] NW *> \verbatim *> NW is INTEGER -*> Deflation window size. 1 .LE. NW .LE. (KBOT-KTOP+1). +*> Deflation window size. 1 <= NW <= (KBOT-KTOP+1). *> \endverbatim *> *> \param[in,out] H @@ -120,7 +120,7 @@ *> \verbatim *> LDH is INTEGER *> Leading dimension of H just as declared in the calling -*> subroutine. N .LE. LDH +*> subroutine. N <= LDH *> \endverbatim *> *> \param[in] ILOZ @@ -132,7 +132,7 @@ *> \verbatim *> IHIZ is INTEGER *> Specify the rows of Z to which transformations must be -*> applied if WANTZ is .TRUE.. 1 .LE. ILOZ .LE. IHIZ .LE. N. +*> applied if WANTZ is .TRUE.. 1 <= ILOZ <= IHIZ <= N. *> \endverbatim *> *> \param[in,out] Z @@ -148,7 +148,7 @@ *> \verbatim *> LDZ is INTEGER *> The leading dimension of Z just as declared in the -*> calling subroutine. 1 .LE. LDZ. +*> calling subroutine. 1 <= LDZ. *> \endverbatim *> *> \param[out] NS @@ -185,13 +185,13 @@ *> \verbatim *> LDV is INTEGER *> The leading dimension of V just as declared in the -*> calling subroutine. NW .LE. LDV +*> calling subroutine. NW <= LDV *> \endverbatim *> *> \param[in] NH *> \verbatim *> NH is INTEGER -*> The number of columns of T. NH.GE.NW. +*> The number of columns of T. NH >= NW. *> \endverbatim *> *> \param[out] T @@ -203,14 +203,14 @@ *> \verbatim *> LDT is INTEGER *> The leading dimension of T just as declared in the -*> calling subroutine. NW .LE. LDT +*> calling subroutine. NW <= LDT *> \endverbatim *> *> \param[in] NV *> \verbatim *> NV is INTEGER *> The number of rows of work array WV available for -*> workspace. NV.GE.NW. +*> workspace. NV >= NW. *> \endverbatim *> *> \param[out] WV @@ -222,7 +222,7 @@ *> \verbatim *> LDWV is INTEGER *> The leading dimension of W just as declared in the -*> calling subroutine. NW .LE. LDV +*> calling subroutine. NW <= LDV *> \endverbatim *> *> \param[out] WORK diff --git a/lapack-netlib/SRC/claqr3.f b/lapack-netlib/SRC/claqr3.f index 660a58376..84d57d4d6 100644 --- a/lapack-netlib/SRC/claqr3.f +++ b/lapack-netlib/SRC/claqr3.f @@ -99,7 +99,7 @@ *> \param[in] NW *> \verbatim *> NW is INTEGER -*> Deflation window size. 1 .LE. NW .LE. (KBOT-KTOP+1). +*> Deflation window size. 1 <= NW <= (KBOT-KTOP+1). *> \endverbatim *> *> \param[in,out] H @@ -117,7 +117,7 @@ *> \verbatim *> LDH is INTEGER *> Leading dimension of H just as declared in the calling -*> subroutine. N .LE. LDH +*> subroutine. N <= LDH *> \endverbatim *> *> \param[in] ILOZ @@ -129,7 +129,7 @@ *> \verbatim *> IHIZ is INTEGER *> Specify the rows of Z to which transformations must be -*> applied if WANTZ is .TRUE.. 1 .LE. ILOZ .LE. IHIZ .LE. N. +*> applied if WANTZ is .TRUE.. 1 <= ILOZ <= IHIZ <= N. *> \endverbatim *> *> \param[in,out] Z @@ -145,7 +145,7 @@ *> \verbatim *> LDZ is INTEGER *> The leading dimension of Z just as declared in the -*> calling subroutine. 1 .LE. LDZ. +*> calling subroutine. 1 <= LDZ. *> \endverbatim *> *> \param[out] NS @@ -182,13 +182,13 @@ *> \verbatim *> LDV is INTEGER *> The leading dimension of V just as declared in the -*> calling subroutine. NW .LE. LDV +*> calling subroutine. NW <= LDV *> \endverbatim *> *> \param[in] NH *> \verbatim *> NH is INTEGER -*> The number of columns of T. NH.GE.NW. +*> The number of columns of T. NH >= NW. *> \endverbatim *> *> \param[out] T @@ -200,14 +200,14 @@ *> \verbatim *> LDT is INTEGER *> The leading dimension of T just as declared in the -*> calling subroutine. NW .LE. LDT +*> calling subroutine. NW <= LDT *> \endverbatim *> *> \param[in] NV *> \verbatim *> NV is INTEGER *> The number of rows of work array WV available for -*> workspace. NV.GE.NW. +*> workspace. NV >= NW. *> \endverbatim *> *> \param[out] WV @@ -219,7 +219,7 @@ *> \verbatim *> LDWV is INTEGER *> The leading dimension of W just as declared in the -*> calling subroutine. NW .LE. LDV +*> calling subroutine. NW <= LDV *> \endverbatim *> *> \param[out] WORK diff --git a/lapack-netlib/SRC/claqr4.f b/lapack-netlib/SRC/claqr4.f index 647fa6774..fba286df7 100644 --- a/lapack-netlib/SRC/claqr4.f +++ b/lapack-netlib/SRC/claqr4.f @@ -74,7 +74,7 @@ *> \param[in] N *> \verbatim *> N is INTEGER -*> The order of the matrix H. N .GE. 0. +*> The order of the matrix H. N >= 0. *> \endverbatim *> *> \param[in] ILO @@ -86,12 +86,12 @@ *> \verbatim *> IHI is INTEGER *> It is assumed that H is already upper triangular in rows -*> and columns 1:ILO-1 and IHI+1:N and, if ILO.GT.1, +*> and columns 1:ILO-1 and IHI+1:N and, if ILO > 1, *> H(ILO,ILO-1) is zero. ILO and IHI are normally set by a *> previous call to CGEBAL, and then passed to CGEHRD when the *> matrix output by CGEBAL is reduced to Hessenberg form. *> Otherwise, ILO and IHI should be set to 1 and N, -*> respectively. If N.GT.0, then 1.LE.ILO.LE.IHI.LE.N. +*> respectively. If N > 0, then 1 <= ILO <= IHI <= N. *> If N = 0, then ILO = 1 and IHI = 0. *> \endverbatim *> @@ -103,17 +103,17 @@ *> contains the upper triangular matrix T from the Schur *> decomposition (the Schur form). If INFO = 0 and WANT is *> .FALSE., then the contents of H are unspecified on exit. -*> (The output value of H when INFO.GT.0 is given under the +*> (The output value of H when INFO > 0 is given under the *> description of INFO below.) *> -*> This subroutine may explicitly set H(i,j) = 0 for i.GT.j and +*> This subroutine may explicitly set H(i,j) = 0 for i > j and *> j = 1, 2, ... ILO-1 or j = IHI+1, IHI+2, ... N. *> \endverbatim *> *> \param[in] LDH *> \verbatim *> LDH is INTEGER -*> The leading dimension of the array H. LDH .GE. max(1,N). +*> The leading dimension of the array H. LDH >= max(1,N). *> \endverbatim *> *> \param[out] W @@ -135,7 +135,7 @@ *> IHIZ is INTEGER *> Specify the rows of Z to which transformations must be *> applied if WANTZ is .TRUE.. -*> 1 .LE. ILOZ .LE. ILO; IHI .LE. IHIZ .LE. N. +*> 1 <= ILOZ <= ILO; IHI <= IHIZ <= N. *> \endverbatim *> *> \param[in,out] Z @@ -145,7 +145,7 @@ *> If WANTZ is .TRUE., then Z(ILO:IHI,ILOZ:IHIZ) is *> replaced by Z(ILO:IHI,ILOZ:IHIZ)*U where U is the *> orthogonal Schur factor of H(ILO:IHI,ILO:IHI). -*> (The output value of Z when INFO.GT.0 is given under +*> (The output value of Z when INFO > 0 is given under *> the description of INFO below.) *> \endverbatim *> @@ -153,7 +153,7 @@ *> \verbatim *> LDZ is INTEGER *> The leading dimension of the array Z. if WANTZ is .TRUE. -*> then LDZ.GE.MAX(1,IHIZ). Otherwize, LDZ.GE.1. +*> then LDZ >= MAX(1,IHIZ). Otherwise, LDZ >= 1. *> \endverbatim *> *> \param[out] WORK @@ -166,7 +166,7 @@ *> \param[in] LWORK *> \verbatim *> LWORK is INTEGER -*> The dimension of the array WORK. LWORK .GE. max(1,N) +*> The dimension of the array WORK. LWORK >= max(1,N) *> is sufficient, but LWORK typically as large as 6*N may *> be required for optimal performance. A workspace query *> to determine the optimal workspace size is recommended. @@ -182,19 +182,19 @@ *> \param[out] INFO *> \verbatim *> INFO is INTEGER -*> = 0: successful exit -*> .GT. 0: if INFO = i, CLAQR4 failed to compute all of +*> = 0: successful exit +*> > 0: if INFO = i, CLAQR4 failed to compute all of *> the eigenvalues. Elements 1:ilo-1 and i+1:n of WR *> and WI contain those eigenvalues which have been *> successfully computed. (Failures are rare.) *> -*> If INFO .GT. 0 and WANT is .FALSE., then on exit, +*> If INFO > 0 and WANT is .FALSE., then on exit, *> the remaining unconverged eigenvalues are the eigen- *> values of the upper Hessenberg matrix rows and *> columns ILO through INFO of the final, output *> value of H. *> -*> If INFO .GT. 0 and WANTT is .TRUE., then on exit +*> If INFO > 0 and WANTT is .TRUE., then on exit *> *> (*) (initial value of H)*U = U*(final value of H) *> @@ -202,7 +202,7 @@ *> value of H is upper Hessenberg and triangular in *> rows and columns INFO+1 through IHI. *> -*> If INFO .GT. 0 and WANTZ is .TRUE., then on exit +*> If INFO > 0 and WANTZ is .TRUE., then on exit *> *> (final value of Z(ILO:IHI,ILOZ:IHIZ) *> = (initial value of Z(ILO:IHI,ILOZ:IHIZ)*U @@ -210,7 +210,7 @@ *> where U is the unitary matrix in (*) (regard- *> less of the value of WANTT.) *> -*> If INFO .GT. 0 and WANTZ is .FALSE., then Z is not +*> If INFO > 0 and WANTZ is .FALSE., then Z is not *> accessed. *> \endverbatim * @@ -643,7 +643,7 @@ END IF END IF * -* ==== Use up to NS of the the smallest magnatiude +* ==== Use up to NS of the the smallest magnitude * . shifts. If there aren't NS shifts available, * . then use them all, possibly dropping one to * . make the number of shifts even. ==== diff --git a/lapack-netlib/SRC/claqr5.f b/lapack-netlib/SRC/claqr5.f index 4c897895d..e4317a3ad 100644 --- a/lapack-netlib/SRC/claqr5.f +++ b/lapack-netlib/SRC/claqr5.f @@ -125,7 +125,7 @@ *> \verbatim *> LDH is INTEGER *> LDH is the leading dimension of H just as declared in the -*> calling procedure. LDH.GE.MAX(1,N). +*> calling procedure. LDH >= MAX(1,N). *> \endverbatim *> *> \param[in] ILOZ @@ -137,7 +137,7 @@ *> \verbatim *> IHIZ is INTEGER *> Specify the rows of Z to which transformations must be -*> applied if WANTZ is .TRUE.. 1 .LE. ILOZ .LE. IHIZ .LE. N +*> applied if WANTZ is .TRUE.. 1 <= ILOZ <= IHIZ <= N *> \endverbatim *> *> \param[in,out] Z @@ -153,7 +153,7 @@ *> \verbatim *> LDZ is INTEGER *> LDA is the leading dimension of Z just as declared in -*> the calling procedure. LDZ.GE.N. +*> the calling procedure. LDZ >= N. *> \endverbatim *> *> \param[out] V @@ -165,7 +165,7 @@ *> \verbatim *> LDV is INTEGER *> LDV is the leading dimension of V as declared in the -*> calling procedure. LDV.GE.3. +*> calling procedure. LDV >= 3. *> \endverbatim *> *> \param[out] U @@ -177,33 +177,14 @@ *> \verbatim *> LDU is INTEGER *> LDU is the leading dimension of U just as declared in the -*> in the calling subroutine. LDU.GE.3*NSHFTS-3. -*> \endverbatim -*> -*> \param[in] NH -*> \verbatim -*> NH is INTEGER -*> NH is the number of columns in array WH available for -*> workspace. NH.GE.1. -*> \endverbatim -*> -*> \param[out] WH -*> \verbatim -*> WH is COMPLEX array, dimension (LDWH,NH) -*> \endverbatim -*> -*> \param[in] LDWH -*> \verbatim -*> LDWH is INTEGER -*> Leading dimension of WH just as declared in the -*> calling procedure. LDWH.GE.3*NSHFTS-3. +*> in the calling subroutine. LDU >= 3*NSHFTS-3. *> \endverbatim *> *> \param[in] NV *> \verbatim *> NV is INTEGER *> NV is the number of rows in WV agailable for workspace. -*> NV.GE.1. +*> NV >= 1. *> \endverbatim *> *> \param[out] WV @@ -215,9 +196,28 @@ *> \verbatim *> LDWV is INTEGER *> LDWV is the leading dimension of WV as declared in the -*> in the calling subroutine. LDWV.GE.NV. +*> in the calling subroutine. LDWV >= NV. *> \endverbatim * +*> \param[in] NH +*> \verbatim +*> NH is INTEGER +*> NH is the number of columns in array WH available for +*> workspace. NH >= 1. +*> \endverbatim +*> +*> \param[out] WH +*> \verbatim +*> WH is COMPLEX array, dimension (LDWH,NH) +*> \endverbatim +*> +*> \param[in] LDWH +*> \verbatim +*> LDWH is INTEGER +*> Leading dimension of WH just as declared in the +*> calling procedure. LDWH >= 3*NSHFTS-3. +*> \endverbatim +*> * Authors: * ======== * diff --git a/lapack-netlib/SRC/clarfb.f b/lapack-netlib/SRC/clarfb.f index 8fdd5c89c..a4d429c09 100644 --- a/lapack-netlib/SRC/clarfb.f +++ b/lapack-netlib/SRC/clarfb.f @@ -92,6 +92,8 @@ *> K is INTEGER *> The order of the matrix T (= the number of elementary *> reflectors whose product defines the block reflector). +*> If SIDE = 'L', M >= K >= 0; +*> if SIDE = 'R', N >= K >= 0. *> \endverbatim *> *> \param[in] V diff --git a/lapack-netlib/SRC/clarfx.f b/lapack-netlib/SRC/clarfx.f index 1111c80f7..ad284883d 100644 --- a/lapack-netlib/SRC/clarfx.f +++ b/lapack-netlib/SRC/clarfx.f @@ -94,7 +94,7 @@ *> \param[in] LDC *> \verbatim *> LDC is INTEGER -*> The leading dimension of the array C. LDA >= max(1,M). +*> The leading dimension of the array C. LDC >= max(1,M). *> \endverbatim *> *> \param[out] WORK diff --git a/lapack-netlib/SRC/clarfy.f b/lapack-netlib/SRC/clarfy.f index a5743858c..fccd136a8 100644 --- a/lapack-netlib/SRC/clarfy.f +++ b/lapack-netlib/SRC/clarfy.f @@ -103,7 +103,7 @@ * *> \date December 2016 * -*> \ingroup complex_eig +*> \ingroup complexOTHERauxiliary * * ===================================================================== SUBROUTINE CLARFY( UPLO, N, V, INCV, TAU, C, LDC, WORK ) diff --git a/lapack-netlib/SRC/clargv.f b/lapack-netlib/SRC/clargv.f index ba53cae6f..36c5108df 100644 --- a/lapack-netlib/SRC/clargv.f +++ b/lapack-netlib/SRC/clargv.f @@ -200,7 +200,7 @@ FS = FS*SAFMN2 GS = GS*SAFMN2 SCALE = SCALE*SAFMN2 - IF( SCALE.GE.SAFMX2 ) + IF( SCALE.GE.SAFMX2 .AND. COUNT .LT. 20 ) $ GO TO 10 ELSE IF( SCALE.LE.SAFMN2 ) THEN IF( G.EQ.CZERO ) THEN diff --git a/lapack-netlib/SRC/clarrv.f b/lapack-netlib/SRC/clarrv.f index 72fe1f948..a45f55ac3 100644 --- a/lapack-netlib/SRC/clarrv.f +++ b/lapack-netlib/SRC/clarrv.f @@ -143,7 +143,7 @@ *> RTOL2 is REAL *> Parameters for bisection. *> An interval [LEFT,RIGHT] has converged if -*> RIGHT-LEFT.LT.MAX( RTOL1*GAP, RTOL2*MAX(|LEFT|,|RIGHT|) ) +*> RIGHT-LEFT < MAX( RTOL1*GAP, RTOL2*MAX(|LEFT|,|RIGHT|) ) *> \endverbatim *> *> \param[in,out] W diff --git a/lapack-netlib/SRC/clartg.f b/lapack-netlib/SRC/clartg.f index da9a1cdef..baa68b657 100644 --- a/lapack-netlib/SRC/clartg.f +++ b/lapack-netlib/SRC/clartg.f @@ -161,7 +161,7 @@ FS = FS*SAFMN2 GS = GS*SAFMN2 SCALE = SCALE*SAFMN2 - IF( SCALE.GE.SAFMX2 ) + IF( SCALE.GE.SAFMX2 .AND. COUNT .LT. 20) $ GO TO 10 ELSE IF( SCALE.LE.SAFMN2 ) THEN IF( G.EQ.CZERO.OR.SISNAN( ABS( G ) ) ) THEN diff --git a/lapack-netlib/SRC/classq.f b/lapack-netlib/SRC/classq.f index 28398596f..92e407ff3 100644 --- a/lapack-netlib/SRC/classq.f +++ b/lapack-netlib/SRC/classq.f @@ -41,7 +41,7 @@ *> where x( i ) = abs( X( 1 + ( i - 1 )*INCX ) ). The value of sumsq is *> assumed to be at least unity and the value of ssq will then satisfy *> -*> 1.0 .le. ssq .le. ( sumsq + 2*n ). +*> 1.0 <= ssq <= ( sumsq + 2*n ). *> *> scale is assumed to be non-negative and scl returns the value *> @@ -65,7 +65,7 @@ *> *> \param[in] X *> \verbatim -*> X is COMPLEX array, dimension (N) +*> X is COMPLEX array, dimension (1+(N-1)*INCX) *> The vector x as described above. *> x( i ) = X( 1 + ( i - 1 )*INCX ), 1 <= i <= n. *> \endverbatim diff --git a/lapack-netlib/SRC/claswlq.f b/lapack-netlib/SRC/claswlq.f index 5fa2276e8..dcbdc0d52 100644 --- a/lapack-netlib/SRC/claswlq.f +++ b/lapack-netlib/SRC/claswlq.f @@ -1,3 +1,4 @@ +*> \brief \b CLASWLQ * * Definition: * =========== @@ -18,9 +19,20 @@ *> *> \verbatim *> -*> CLASWLQ computes a blocked Short-Wide LQ factorization of a -*> M-by-N matrix A, where N >= M: -*> A = L * Q +*> CLASWLQ computes a blocked Tall-Skinny LQ factorization of +*> a complex M-by-N matrix A for M <= N: +*> +*> A = ( L 0 ) * Q, +*> +*> where: +*> +*> Q is a n-by-N orthogonal matrix, stored on exit in an implicit +*> form in the elements above the digonal of the array A and in +*> the elemenst of the array T; +*> L is an lower-triangular M-by-M matrix stored on exit in +*> the elements on and below the diagonal of the array A. +*> 0 is a M-by-(N-M) zero matrix, if M < N, and is not stored. +*> *> \endverbatim * * Arguments: @@ -150,7 +162,7 @@ SUBROUTINE CLASWLQ( M, N, MB, NB, A, LDA, T, LDT, WORK, LWORK, $ INFO) * -* -- LAPACK computational routine (version 3.7.1) -- +* -- LAPACK computational routine (version 3.9.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd. -- * June 2017 diff --git a/lapack-netlib/SRC/clasyf_aa.f b/lapack-netlib/SRC/clasyf_aa.f index 1bc96ee1b..a44a8f5b1 100644 --- a/lapack-netlib/SRC/clasyf_aa.f +++ b/lapack-netlib/SRC/clasyf_aa.f @@ -84,7 +84,7 @@ *> *> \param[in,out] A *> \verbatim -*> A is REAL array, dimension (LDA,M) for +*> A is COMPLEX array, dimension (LDA,M) for *> the first panel, while dimension (LDA,M+1) for the *> remaining panels. *> @@ -112,7 +112,7 @@ *> *> \param[in,out] H *> \verbatim -*> H is REAL workspace, dimension (LDH,NB). +*> H is COMPLEX workspace, dimension (LDH,NB). *> *> \endverbatim *> @@ -124,7 +124,7 @@ *> *> \param[out] WORK *> \verbatim -*> WORK is REAL workspace, dimension (M). +*> WORK is COMPLEX workspace, dimension (M). *> \endverbatim *> * @@ -284,8 +284,9 @@ * * Swap A(I1, I2+1:M) with A(I2, I2+1:M) * - CALL CSWAP( M-I2, A( J1+I1-1, I2+1 ), LDA, - $ A( J1+I2-1, I2+1 ), LDA ) + IF( I2.LT.M ) + $ CALL CSWAP( M-I2, A( J1+I1-1, I2+1 ), LDA, + $ A( J1+I2-1, I2+1 ), LDA ) * * Swap A(I1, I1) with A(I2,I2) * @@ -325,13 +326,15 @@ * Compute L(J+2, J+1) = WORK( 3:M ) / T(J, J+1), * where A(J, J+1) = T(J, J+1) and A(J+2:M, J) = L(J+2:M, J+1) * - IF( A( K, J+1 ).NE.ZERO ) THEN - ALPHA = ONE / A( K, J+1 ) - CALL CCOPY( M-J-1, WORK( 3 ), 1, A( K, J+2 ), LDA ) - CALL CSCAL( M-J-1, ALPHA, A( K, J+2 ), LDA ) - ELSE - CALL CLASET( 'Full', 1, M-J-1, ZERO, ZERO, - $ A( K, J+2 ), LDA) + IF( J.LT.(M-1) ) THEN + IF( A( K, J+1 ).NE.ZERO ) THEN + ALPHA = ONE / A( K, J+1 ) + CALL CCOPY( M-J-1, WORK( 3 ), 1, A( K, J+2 ), LDA ) + CALL CSCAL( M-J-1, ALPHA, A( K, J+2 ), LDA ) + ELSE + CALL CLASET( 'Full', 1, M-J-1, ZERO, ZERO, + $ A( K, J+2 ), LDA) + END IF END IF END IF J = J + 1 @@ -432,8 +435,9 @@ * * Swap A(I2+1:M, I1) with A(I2+1:M, I2) * - CALL CSWAP( M-I2, A( I2+1, J1+I1-1 ), 1, - $ A( I2+1, J1+I2-1 ), 1 ) + IF( I2.LT.M ) + $ CALL CSWAP( M-I2, A( I2+1, J1+I1-1 ), 1, + $ A( I2+1, J1+I2-1 ), 1 ) * * Swap A(I1, I1) with A(I2, I2) * @@ -473,13 +477,15 @@ * Compute L(J+2, J+1) = WORK( 3:M ) / T(J, J+1), * where A(J, J+1) = T(J, J+1) and A(J+2:M, J) = L(J+2:M, J+1) * - IF( A( J+1, K ).NE.ZERO ) THEN - ALPHA = ONE / A( J+1, K ) - CALL CCOPY( M-J-1, WORK( 3 ), 1, A( J+2, K ), 1 ) - CALL CSCAL( M-J-1, ALPHA, A( J+2, K ), 1 ) - ELSE - CALL CLASET( 'Full', M-J-1, 1, ZERO, ZERO, - $ A( J+2, K ), LDA ) + IF( J.LT.(M-1) ) THEN + IF( A( J+1, K ).NE.ZERO ) THEN + ALPHA = ONE / A( J+1, K ) + CALL CCOPY( M-J-1, WORK( 3 ), 1, A( J+2, K ), 1 ) + CALL CSCAL( M-J-1, ALPHA, A( J+2, K ), 1 ) + ELSE + CALL CLASET( 'Full', M-J-1, 1, ZERO, ZERO, + $ A( J+2, K ), LDA ) + END IF END IF END IF J = J + 1 diff --git a/lapack-netlib/SRC/clasyf_rk.f b/lapack-netlib/SRC/clasyf_rk.f index 0700c5cc2..bd7a0fb45 100644 --- a/lapack-netlib/SRC/clasyf_rk.f +++ b/lapack-netlib/SRC/clasyf_rk.f @@ -330,7 +330,7 @@ * of A and working backwards, and compute the matrix W = U12*D * for use in updating A11 * -* Initilize the first entry of array E, where superdiagonal +* Initialize the first entry of array E, where superdiagonal * elements of D are stored * E( 1 ) = CZERO @@ -658,7 +658,7 @@ * of A and working forwards, and compute the matrix W = L21*D * for use in updating A22 * -* Initilize the unused last entry of the subdiagonal array E. +* Initialize the unused last entry of the subdiagonal array E. * E( N ) = CZERO * diff --git a/lapack-netlib/SRC/clatdf.f b/lapack-netlib/SRC/clatdf.f index 357f66422..557830d1c 100644 --- a/lapack-netlib/SRC/clatdf.f +++ b/lapack-netlib/SRC/clatdf.f @@ -261,7 +261,7 @@ * * Solve for U- part, lockahead for RHS(N) = +-1. This is not done * In BSOLVE and will hopefully give us a better estimate because -* any ill-conditioning of the original matrix is transfered to U +* any ill-conditioning of the original matrix is transferred to U * and not to L. U(N, N) is an approximation to sigma_min(LU). * CALL CCOPY( N-1, RHS, 1, WORK, 1 ) diff --git a/lapack-netlib/SRC/clatsqr.f b/lapack-netlib/SRC/clatsqr.f index dab5774c1..e9c6d77c2 100644 --- a/lapack-netlib/SRC/clatsqr.f +++ b/lapack-netlib/SRC/clatsqr.f @@ -1,3 +1,4 @@ +*> \brief \b CLATSQR * * Definition: * =========== @@ -18,9 +19,23 @@ *> *> \verbatim *> -*> SLATSQR computes a blocked Tall-Skinny QR factorization of -*> an M-by-N matrix A, where M >= N: -*> A = Q * R . +*> CLATSQR computes a blocked Tall-Skinny QR factorization of +*> a complex M-by-N matrix A for M >= N: +*> +*> A = Q * ( R ), +*> ( 0 ) +*> +*> where: +*> +*> Q is a M-by-M orthogonal matrix, stored on exit in an implicit +*> form in the elements below the digonal of the array A and in +*> the elemenst of the array T; +*> +*> R is an upper-triangular N-by-N matrix, stored on exit in +*> the elements on and above the diagonal of the array A. +*> +*> 0 is a (M-N)-by-N zero matrix, and is not stored. +*> *> \endverbatim * * Arguments: @@ -149,10 +164,10 @@ SUBROUTINE CLATSQR( M, N, MB, NB, A, LDA, T, LDT, WORK, $ LWORK, INFO) * -* -- LAPACK computational routine (version 3.7.0) -- +* -- LAPACK computational routine (version 3.9.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd. -- -* December 2016 +* November 2019 * * .. Scalar Arguments .. INTEGER INFO, LDA, M, N, MB, NB, LDT, LWORK diff --git a/lapack-netlib/SRC/claunhr_col_getrfnp.f b/lapack-netlib/SRC/claunhr_col_getrfnp.f new file mode 100644 index 000000000..66b9c0407 --- /dev/null +++ b/lapack-netlib/SRC/claunhr_col_getrfnp.f @@ -0,0 +1,248 @@ +*> \brief \b CLAUNHR_COL_GETRFNP +* +* =========== DOCUMENTATION =========== +* +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ +* +*> \htmlonly +*> Download CLAUNHR_COL_GETRFNP + dependencies +*> +*> [TGZ] +*> +*> [ZIP] +*> +*> [TXT] +*> \endhtmlonly +* +* Definition: +* =========== +* +* SUBROUTINE CLAUNHR_COL_GETRFNP( M, N, A, LDA, D, INFO ) +* +* .. Scalar Arguments .. +* INTEGER INFO, LDA, M, N +* .. +* .. Array Arguments .. +* COMPLEX A( LDA, * ), D( * ) +* .. +* +* +*> \par Purpose: +* ============= +*> +*> \verbatim +*> +*> CLAUNHR_COL_GETRFNP computes the modified LU factorization without +*> pivoting of a complex general M-by-N matrix A. The factorization has +*> the form: +*> +*> A - S = L * U, +*> +*> where: +*> S is a m-by-n diagonal sign matrix with the diagonal D, so that +*> D(i) = S(i,i), 1 <= i <= min(M,N). The diagonal D is constructed +*> as D(i)=-SIGN(A(i,i)), where A(i,i) is the value after performing +*> i-1 steps of Gaussian elimination. This means that the diagonal +*> element at each step of "modified" Gaussian elimination is +*> at least one in absolute value (so that division-by-zero not +*> not possible during the division by the diagonal element); +*> +*> L is a M-by-N lower triangular matrix with unit diagonal elements +*> (lower trapezoidal if M > N); +*> +*> and U is a M-by-N upper triangular matrix +*> (upper trapezoidal if M < N). +*> +*> This routine is an auxiliary routine used in the Householder +*> reconstruction routine CUNHR_COL. In CUNHR_COL, this routine is +*> applied to an M-by-N matrix A with orthonormal columns, where each +*> element is bounded by one in absolute value. With the choice of +*> the matrix S above, one can show that the diagonal element at each +*> step of Gaussian elimination is the largest (in absolute value) in +*> the column on or below the diagonal, so that no pivoting is required +*> for numerical stability [1]. +*> +*> For more details on the Householder reconstruction algorithm, +*> including the modified LU factorization, see [1]. +*> +*> This is the blocked right-looking version of the algorithm, +*> calling Level 3 BLAS to update the submatrix. To factorize a block, +*> this routine calls the recursive routine CLAUNHR_COL_GETRFNP2. +*> +*> [1] "Reconstructing Householder vectors from tall-skinny QR", +*> G. Ballard, J. Demmel, L. Grigori, M. Jacquelin, H.D. Nguyen, +*> E. Solomonik, J. Parallel Distrib. Comput., +*> vol. 85, pp. 3-31, 2015. +*> \endverbatim +* +* Arguments: +* ========== +* +*> \param[in] M +*> \verbatim +*> M is INTEGER +*> The number of rows of the matrix A. M >= 0. +*> \endverbatim +*> +*> \param[in] N +*> \verbatim +*> N is INTEGER +*> The number of columns of the matrix A. N >= 0. +*> \endverbatim +*> +*> \param[in,out] A +*> \verbatim +*> A is COMPLEX array, dimension (LDA,N) +*> On entry, the M-by-N matrix to be factored. +*> On exit, the factors L and U from the factorization +*> A-S=L*U; the unit diagonal elements of L are not stored. +*> \endverbatim +*> +*> \param[in] LDA +*> \verbatim +*> LDA is INTEGER +*> The leading dimension of the array A. LDA >= max(1,M). +*> \endverbatim +*> +*> \param[out] D +*> \verbatim +*> D is COMPLEX array, dimension min(M,N) +*> The diagonal elements of the diagonal M-by-N sign matrix S, +*> D(i) = S(i,i), where 1 <= i <= min(M,N). The elements can be +*> only ( +1.0, 0.0 ) or (-1.0, 0.0 ). +*> \endverbatim +*> +*> \param[out] INFO +*> \verbatim +*> INFO is INTEGER +*> = 0: successful exit +*> < 0: if INFO = -i, the i-th argument had an illegal value +*> \endverbatim +*> +* Authors: +* ======== +* +*> \author Univ. of Tennessee +*> \author Univ. of California Berkeley +*> \author Univ. of Colorado Denver +*> \author NAG Ltd. +* +*> \date November 2019 +* +*> \ingroup complexGEcomputational +* +*> \par Contributors: +* ================== +*> +*> \verbatim +*> +*> November 2019, Igor Kozachenko, +*> Computer Science Division, +*> University of California, Berkeley +*> +*> \endverbatim +* +* ===================================================================== + SUBROUTINE CLAUNHR_COL_GETRFNP( M, N, A, LDA, D, INFO ) + IMPLICIT NONE +* +* -- LAPACK computational routine (version 3.9.0) -- +* -- LAPACK is a software package provided by Univ. of Tennessee, -- +* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- +* November 2019 +* +* .. Scalar Arguments .. + INTEGER INFO, LDA, M, N +* .. +* .. Array Arguments .. + COMPLEX A( LDA, * ), D( * ) +* .. +* +* ===================================================================== +* +* .. Parameters .. + COMPLEX CONE + PARAMETER ( CONE = ( 1.0E+0, 0.0E+0 ) ) +* .. +* .. Local Scalars .. + INTEGER IINFO, J, JB, NB +* .. +* .. External Subroutines .. + EXTERNAL CGEMM, CLAUNHR_COL_GETRFNP2, CTRSM, XERBLA +* .. +* .. External Functions .. + INTEGER ILAENV + EXTERNAL ILAENV +* .. +* .. Intrinsic Functions .. + INTRINSIC MAX, MIN +* .. +* .. Executable Statements .. +* +* Test the input parameters. +* + INFO = 0 + IF( M.LT.0 ) THEN + INFO = -1 + ELSE IF( N.LT.0 ) THEN + INFO = -2 + ELSE IF( LDA.LT.MAX( 1, M ) ) THEN + INFO = -4 + END IF + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'CLAUNHR_COL_GETRFNP', -INFO ) + RETURN + END IF +* +* Quick return if possible +* + IF( MIN( M, N ).EQ.0 ) + $ RETURN +* +* Determine the block size for this environment. +* + + NB = ILAENV( 1, 'CLAUNHR_COL_GETRFNP', ' ', M, N, -1, -1 ) + + IF( NB.LE.1 .OR. NB.GE.MIN( M, N ) ) THEN +* +* Use unblocked code. +* + CALL CLAUNHR_COL_GETRFNP2( M, N, A, LDA, D, INFO ) + ELSE +* +* Use blocked code. +* + DO J = 1, MIN( M, N ), NB + JB = MIN( MIN( M, N )-J+1, NB ) +* +* Factor diagonal and subdiagonal blocks. +* + CALL CLAUNHR_COL_GETRFNP2( M-J+1, JB, A( J, J ), LDA, + $ D( J ), IINFO ) +* + IF( J+JB.LE.N ) THEN +* +* Compute block row of U. +* + CALL CTRSM( 'Left', 'Lower', 'No transpose', 'Unit', JB, + $ N-J-JB+1, CONE, A( J, J ), LDA, A( J, J+JB ), + $ LDA ) + IF( J+JB.LE.M ) THEN +* +* Update trailing submatrix. +* + CALL CGEMM( 'No transpose', 'No transpose', M-J-JB+1, + $ N-J-JB+1, JB, -CONE, A( J+JB, J ), LDA, + $ A( J, J+JB ), LDA, CONE, A( J+JB, J+JB ), + $ LDA ) + END IF + END IF + END DO + END IF + RETURN +* +* End of CLAUNHR_COL_GETRFNP +* + END diff --git a/lapack-netlib/SRC/claunhr_col_getrfnp2.f b/lapack-netlib/SRC/claunhr_col_getrfnp2.f new file mode 100644 index 000000000..82fc329ee --- /dev/null +++ b/lapack-netlib/SRC/claunhr_col_getrfnp2.f @@ -0,0 +1,314 @@ +*> \brief \b CLAUNHR_COL_GETRFNP2 +* +* =========== DOCUMENTATION =========== +* +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ +* +*> \htmlonly +*> Download CLAUNHR_COL_GETRFNP2 + dependencies +*> +*> [TGZ] +*> +*> [ZIP] +*> +*> [TXT] +*> \endhtmlonly +* +* Definition: +* =========== +* +* RECURSIVE SUBROUTINE CLAUNHR_COL_GETRFNP2( M, N, A, LDA, D, INFO ) +* +* .. Scalar Arguments .. +* INTEGER INFO, LDA, M, N +* .. +* .. Array Arguments .. +* COMPLEX A( LDA, * ), D( * ) +* .. +* +* +*> \par Purpose: +* ============= +*> +*> \verbatim +*> +*> CLAUNHR_COL_GETRFNP2 computes the modified LU factorization without +*> pivoting of a complex general M-by-N matrix A. The factorization has +*> the form: +*> +*> A - S = L * U, +*> +*> where: +*> S is a m-by-n diagonal sign matrix with the diagonal D, so that +*> D(i) = S(i,i), 1 <= i <= min(M,N). The diagonal D is constructed +*> as D(i)=-SIGN(A(i,i)), where A(i,i) is the value after performing +*> i-1 steps of Gaussian elimination. This means that the diagonal +*> element at each step of "modified" Gaussian elimination is at +*> least one in absolute value (so that division-by-zero not +*> possible during the division by the diagonal element); +*> +*> L is a M-by-N lower triangular matrix with unit diagonal elements +*> (lower trapezoidal if M > N); +*> +*> and U is a M-by-N upper triangular matrix +*> (upper trapezoidal if M < N). +*> +*> This routine is an auxiliary routine used in the Householder +*> reconstruction routine CUNHR_COL. In CUNHR_COL, this routine is +*> applied to an M-by-N matrix A with orthonormal columns, where each +*> element is bounded by one in absolute value. With the choice of +*> the matrix S above, one can show that the diagonal element at each +*> step of Gaussian elimination is the largest (in absolute value) in +*> the column on or below the diagonal, so that no pivoting is required +*> for numerical stability [1]. +*> +*> For more details on the Householder reconstruction algorithm, +*> including the modified LU factorization, see [1]. +*> +*> This is the recursive version of the LU factorization algorithm. +*> Denote A - S by B. The algorithm divides the matrix B into four +*> submatrices: +*> +*> [ B11 | B12 ] where B11 is n1 by n1, +*> B = [ -----|----- ] B21 is (m-n1) by n1, +*> [ B21 | B22 ] B12 is n1 by n2, +*> B22 is (m-n1) by n2, +*> with n1 = min(m,n)/2, n2 = n-n1. +*> +*> +*> The subroutine calls itself to factor B11, solves for B21, +*> solves for B12, updates B22, then calls itself to factor B22. +*> +*> For more details on the recursive LU algorithm, see [2]. +*> +*> CLAUNHR_COL_GETRFNP2 is called to factorize a block by the blocked +*> routine CLAUNHR_COL_GETRFNP, which uses blocked code calling +*. Level 3 BLAS to update the submatrix. However, CLAUNHR_COL_GETRFNP2 +*> is self-sufficient and can be used without CLAUNHR_COL_GETRFNP. +*> +*> [1] "Reconstructing Householder vectors from tall-skinny QR", +*> G. Ballard, J. Demmel, L. Grigori, M. Jacquelin, H.D. Nguyen, +*> E. Solomonik, J. Parallel Distrib. Comput., +*> vol. 85, pp. 3-31, 2015. +*> +*> [2] "Recursion leads to automatic variable blocking for dense linear +*> algebra algorithms", F. Gustavson, IBM J. of Res. and Dev., +*> vol. 41, no. 6, pp. 737-755, 1997. +*> \endverbatim +* +* Arguments: +* ========== +* +*> \param[in] M +*> \verbatim +*> M is INTEGER +*> The number of rows of the matrix A. M >= 0. +*> \endverbatim +*> +*> \param[in] N +*> \verbatim +*> N is INTEGER +*> The number of columns of the matrix A. N >= 0. +*> \endverbatim +*> +*> \param[in,out] A +*> \verbatim +*> A is COMPLEX array, dimension (LDA,N) +*> On entry, the M-by-N matrix to be factored. +*> On exit, the factors L and U from the factorization +*> A-S=L*U; the unit diagonal elements of L are not stored. +*> \endverbatim +*> +*> \param[in] LDA +*> \verbatim +*> LDA is INTEGER +*> The leading dimension of the array A. LDA >= max(1,M). +*> \endverbatim +*> +*> \param[out] D +*> \verbatim +*> D is COMPLEX array, dimension min(M,N) +*> The diagonal elements of the diagonal M-by-N sign matrix S, +*> D(i) = S(i,i), where 1 <= i <= min(M,N). The elements can be +*> only ( +1.0, 0.0 ) or (-1.0, 0.0 ). +*> \endverbatim +*> +*> \param[out] INFO +*> \verbatim +*> INFO is INTEGER +*> = 0: successful exit +*> < 0: if INFO = -i, the i-th argument had an illegal value +*> \endverbatim +*> +* Authors: +* ======== +* +*> \author Univ. of Tennessee +*> \author Univ. of California Berkeley +*> \author Univ. of Colorado Denver +*> \author NAG Ltd. +* +*> \date November 2019 +* +*> \ingroup complexGEcomputational +* +*> \par Contributors: +* ================== +*> +*> \verbatim +*> +*> November 2019, Igor Kozachenko, +*> Computer Science Division, +*> University of California, Berkeley +*> +*> \endverbatim +* +* ===================================================================== + RECURSIVE SUBROUTINE CLAUNHR_COL_GETRFNP2( M, N, A, LDA, D, INFO ) + IMPLICIT NONE +* +* -- LAPACK computational routine (version 3.9.0) -- +* -- LAPACK is a software package provided by Univ. of Tennessee, -- +* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- +* November 2019 +* +* .. Scalar Arguments .. + INTEGER INFO, LDA, M, N +* .. +* .. Array Arguments .. + COMPLEX A( LDA, * ), D( * ) +* .. +* +* ===================================================================== +* +* .. Parameters .. + REAL ONE + PARAMETER ( ONE = 1.0E+0 ) + COMPLEX CONE + PARAMETER ( CONE = ( 1.0E+0, 0.0E+0 ) ) +* .. +* .. Local Scalars .. + REAL SFMIN + INTEGER I, IINFO, N1, N2 + COMPLEX Z +* .. +* .. External Functions .. + REAL SLAMCH + EXTERNAL SLAMCH +* .. +* .. External Subroutines .. + EXTERNAL CGEMM, CSCAL, CTRSM, XERBLA +* .. +* .. Intrinsic Functions .. + INTRINSIC ABS, REAL, CMPLX, AIMAG, SIGN, MAX, MIN +* .. +* .. Statement Functions .. + DOUBLE PRECISION CABS1 +* .. +* .. Statement Function definitions .. + CABS1( Z ) = ABS( REAL( Z ) ) + ABS( AIMAG( Z ) ) +* .. +* .. Executable Statements .. +* +* Test the input parameters +* + INFO = 0 + IF( M.LT.0 ) THEN + INFO = -1 + ELSE IF( N.LT.0 ) THEN + INFO = -2 + ELSE IF( LDA.LT.MAX( 1, M ) ) THEN + INFO = -4 + END IF + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'CLAUNHR_COL_GETRFNP2', -INFO ) + RETURN + END IF +* +* Quick return if possible +* + IF( MIN( M, N ).EQ.0 ) + $ RETURN + + IF ( M.EQ.1 ) THEN +* +* One row case, (also recursion termination case), +* use unblocked code +* +* Transfer the sign +* + D( 1 ) = CMPLX( -SIGN( ONE, REAL( A( 1, 1 ) ) ) ) +* +* Construct the row of U +* + A( 1, 1 ) = A( 1, 1 ) - D( 1 ) +* + ELSE IF( N.EQ.1 ) THEN +* +* One column case, (also recursion termination case), +* use unblocked code +* +* Transfer the sign +* + D( 1 ) = CMPLX( -SIGN( ONE, REAL( A( 1, 1 ) ) ) ) +* +* Construct the row of U +* + A( 1, 1 ) = A( 1, 1 ) - D( 1 ) +* +* Scale the elements 2:M of the column +* +* Determine machine safe minimum +* + SFMIN = SLAMCH('S') +* +* Construct the subdiagonal elements of L +* + IF( CABS1( A( 1, 1 ) ) .GE. SFMIN ) THEN + CALL CSCAL( M-1, CONE / A( 1, 1 ), A( 2, 1 ), 1 ) + ELSE + DO I = 2, M + A( I, 1 ) = A( I, 1 ) / A( 1, 1 ) + END DO + END IF +* + ELSE +* +* Divide the matrix B into four submatrices +* + N1 = MIN( M, N ) / 2 + N2 = N-N1 + +* +* Factor B11, recursive call +* + CALL CLAUNHR_COL_GETRFNP2( N1, N1, A, LDA, D, IINFO ) +* +* Solve for B21 +* + CALL CTRSM( 'R', 'U', 'N', 'N', M-N1, N1, CONE, A, LDA, + $ A( N1+1, 1 ), LDA ) +* +* Solve for B12 +* + CALL CTRSM( 'L', 'L', 'N', 'U', N1, N2, CONE, A, LDA, + $ A( 1, N1+1 ), LDA ) +* +* Update B22, i.e. compute the Schur complement +* B22 := B22 - B21*B12 +* + CALL CGEMM( 'N', 'N', M-N1, N2, N1, -CONE, A( N1+1, 1 ), LDA, + $ A( 1, N1+1 ), LDA, CONE, A( N1+1, N1+1 ), LDA ) +* +* Factor B22, recursive call +* + CALL CLAUNHR_COL_GETRFNP2( M-N1, N2, A( N1+1, N1+1 ), LDA, + $ D( N1+1 ), IINFO ) +* + END IF + RETURN +* +* End of CLAUNHR_COL_GETRFNP2 +* + END diff --git a/lapack-netlib/SRC/cporfsx.f b/lapack-netlib/SRC/cporfsx.f index 872bad36c..3a2db7135 100644 --- a/lapack-netlib/SRC/cporfsx.f +++ b/lapack-netlib/SRC/cporfsx.f @@ -44,7 +44,7 @@ *> \verbatim *> *> CPORFSX improves the computed solution to a system of linear -*> equations when the coefficient matrix is symmetric positive +*> equations when the coefficient matrix is Hermitian positive *> definite, and provides error bounds and backward error estimates *> for the solution. In addition to normwise error bound, the code *> provides maximum componentwise error bound if possible. See @@ -103,7 +103,7 @@ *> \param[in] A *> \verbatim *> A is COMPLEX array, dimension (LDA,N) -*> The symmetric matrix A. If UPLO = 'U', the leading N-by-N +*> The Hermitian matrix A. If UPLO = 'U', the leading N-by-N *> upper triangular part of A contains the upper triangular part *> of the matrix A, and the strictly lower triangular part of A *> is not referenced. If UPLO = 'L', the leading N-by-N lower @@ -134,7 +134,7 @@ *> \param[in,out] S *> \verbatim *> S is REAL array, dimension (N) -*> The row scale factors for A. If EQUED = 'Y', A is multiplied on +*> The scale factors for A. If EQUED = 'Y', A is multiplied on *> the left and right by diag(S). S is an input argument if FACT = *> 'F'; otherwise, S is an output argument. If FACT = 'F' and EQUED *> = 'Y', each element of S must be positive. If S is output, each @@ -262,7 +262,7 @@ *> information as described below. There currently are up to three *> pieces of information returned for each right-hand side. If *> componentwise accuracy is not requested (PARAMS(3) = 0.0), then -*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS .LT. 3, then at most +*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS < 3, then at most *> the first (:,N_ERR_BNDS) entries are returned. *> *> The first index in ERR_BNDS_COMP(i,:) corresponds to the ith @@ -298,14 +298,14 @@ *> \param[in] NPARAMS *> \verbatim *> NPARAMS is INTEGER -*> Specifies the number of parameters set in PARAMS. If .LE. 0, the +*> Specifies the number of parameters set in PARAMS. If <= 0, the *> PARAMS array is never referenced and default values are used. *> \endverbatim *> *> \param[in,out] PARAMS *> \verbatim *> PARAMS is REAL array, dimension NPARAMS -*> Specifies algorithm parameters. If an entry is .LT. 0.0, then +*> Specifies algorithm parameters. If an entry is < 0.0, then *> that entry will be filled with default value used for that *> parameter. Only positions up to NPARAMS are accessed; defaults *> are used for higher-numbered parameters. @@ -313,9 +313,9 @@ *> PARAMS(LA_LINRX_ITREF_I = 1) : Whether to perform iterative *> refinement or not. *> Default: 1.0 -*> = 0.0 : No refinement is performed, and no error bounds are +*> = 0.0: No refinement is performed, and no error bounds are *> computed. -*> = 1.0 : Use the double-precision refinement algorithm, +*> = 1.0: Use the double-precision refinement algorithm, *> possibly with doubled-single computations if the *> compilation environment does not support DOUBLE *> PRECISION. diff --git a/lapack-netlib/SRC/cposvxx.f b/lapack-netlib/SRC/cposvxx.f index 64d1b67fa..57c2d3feb 100644 --- a/lapack-netlib/SRC/cposvxx.f +++ b/lapack-netlib/SRC/cposvxx.f @@ -45,7 +45,7 @@ *> *> CPOSVXX uses the Cholesky factorization A = U**T*U or A = L*L**T *> to compute the solution to a complex system of linear equations -*> A * X = B, where A is an N-by-N symmetric positive definite matrix +*> A * X = B, where A is an N-by-N Hermitian positive definite matrix *> and X and B are N-by-NRHS matrices. *> *> If requested, both normwise and maximum componentwise error bounds @@ -157,7 +157,7 @@ *> \param[in,out] A *> \verbatim *> A is COMPLEX array, dimension (LDA,N) -*> On entry, the symmetric matrix A, except if FACT = 'F' and EQUED = +*> On entry, the Hermitian matrix A, except if FACT = 'F' and EQUED = *> 'Y', then A must contain the equilibrated matrix *> diag(S)*A*diag(S). If UPLO = 'U', the leading N-by-N upper *> triangular part of A contains the upper triangular part of the @@ -365,7 +365,7 @@ *> information as described below. There currently are up to three *> pieces of information returned for each right-hand side. If *> componentwise accuracy is not requested (PARAMS(3) = 0.0), then -*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS .LT. 3, then at most +*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS < 3, then at most *> the first (:,N_ERR_BNDS) entries are returned. *> *> The first index in ERR_BNDS_COMP(i,:) corresponds to the ith @@ -401,14 +401,14 @@ *> \param[in] NPARAMS *> \verbatim *> NPARAMS is INTEGER -*> Specifies the number of parameters set in PARAMS. If .LE. 0, the +*> Specifies the number of parameters set in PARAMS. If <= 0, the *> PARAMS array is never referenced and default values are used. *> \endverbatim *> *> \param[in,out] PARAMS *> \verbatim *> PARAMS is REAL array, dimension NPARAMS -*> Specifies algorithm parameters. If an entry is .LT. 0.0, then +*> Specifies algorithm parameters. If an entry is < 0.0, then *> that entry will be filled with default value used for that *> parameter. Only positions up to NPARAMS are accessed; defaults *> are used for higher-numbered parameters. @@ -416,9 +416,9 @@ *> PARAMS(LA_LINRX_ITREF_I = 1) : Whether to perform iterative *> refinement or not. *> Default: 1.0 -*> = 0.0 : No refinement is performed, and no error bounds are +*> = 0.0: No refinement is performed, and no error bounds are *> computed. -*> = 1.0 : Use the double-precision refinement algorithm, +*> = 1.0: Use the double-precision refinement algorithm, *> possibly with doubled-single computations if the *> compilation environment does not support DOUBLE *> PRECISION. diff --git a/lapack-netlib/SRC/cpotrf2.f b/lapack-netlib/SRC/cpotrf2.f index 789843c41..ed4f12cba 100644 --- a/lapack-netlib/SRC/cpotrf2.f +++ b/lapack-netlib/SRC/cpotrf2.f @@ -24,7 +24,7 @@ *> *> \verbatim *> -*> CPOTRF2 computes the Cholesky factorization of a real symmetric +*> CPOTRF2 computes the Cholesky factorization of a Hermitian *> positive definite matrix A using the recursive algorithm. *> *> The factorization has the form @@ -63,7 +63,7 @@ *> \param[in,out] A *> \verbatim *> A is COMPLEX array, dimension (LDA,N) -*> On entry, the symmetric matrix A. If UPLO = 'U', the leading +*> On entry, the Hermitian matrix A. If UPLO = 'U', the leading *> N-by-N upper triangular part of A contains the upper *> triangular part of the matrix A, and the strictly lower *> triangular part of A is not referenced. If UPLO = 'L', the diff --git a/lapack-netlib/SRC/cstemr.f b/lapack-netlib/SRC/cstemr.f index 22ac842c9..8fb8131d8 100644 --- a/lapack-netlib/SRC/cstemr.f +++ b/lapack-netlib/SRC/cstemr.f @@ -250,13 +250,13 @@ *> \param[in,out] TRYRAC *> \verbatim *> TRYRAC is LOGICAL -*> If TRYRAC.EQ..TRUE., indicates that the code should check whether +*> If TRYRAC = .TRUE., indicates that the code should check whether *> the tridiagonal matrix defines its eigenvalues to high relative *> accuracy. If so, the code uses relative-accuracy preserving *> algorithms that might be (a bit) slower depending on the matrix. *> If the matrix does not define its eigenvalues to high relative *> accuracy, the code can uses possibly faster algorithms. -*> If TRYRAC.EQ..FALSE., the code is not required to guarantee +*> If TRYRAC = .FALSE., the code is not required to guarantee *> relatively accurate eigenvalues and can use the fastest possible *> techniques. *> On exit, a .TRUE. TRYRAC will be set to .FALSE. if the matrix diff --git a/lapack-netlib/SRC/csycon_3.f b/lapack-netlib/SRC/csycon_3.f index 47d52dd15..5c1cb0491 100644 --- a/lapack-netlib/SRC/csycon_3.f +++ b/lapack-netlib/SRC/csycon_3.f @@ -19,7 +19,7 @@ * =========== * * SUBROUTINE CSYCON_3( UPLO, N, A, LDA, E, IPIV, ANORM, RCOND, -* WORK, IWORK, INFO ) +* WORK, INFO ) * * .. Scalar Arguments .. * CHARACTER UPLO @@ -27,7 +27,7 @@ * REAL ANORM, RCOND * .. * .. Array Arguments .. -* INTEGER IPIV( * ), IWORK( * ) +* INTEGER IPIV( * ) * COMPLEX A( LDA, * ), E ( * ), WORK( * ) * .. * @@ -129,11 +129,6 @@ *> WORK is COMPLEX array, dimension (2*N) *> \endverbatim *> -*> \param[out] IWORK -*> \verbatim -*> IWORK is INTEGER array, dimension (N) -*> \endverbatim -*> *> \param[out] INFO *> \verbatim *> INFO is INTEGER diff --git a/lapack-netlib/SRC/csyconvf.f b/lapack-netlib/SRC/csyconvf.f index 77ecf46b5..fd5a5e47f 100644 --- a/lapack-netlib/SRC/csyconvf.f +++ b/lapack-netlib/SRC/csyconvf.f @@ -294,7 +294,7 @@ * * Convert PERMUTATIONS and IPIV * -* Apply permutaions to submatrices of upper part of A +* Apply permutations to submatrices of upper part of A * in factorization order where i decreases from N to 1 * I = N @@ -347,7 +347,7 @@ * * Revert PERMUTATIONS and IPIV * -* Apply permutaions to submatrices of upper part of A +* Apply permutations to submatrices of upper part of A * in reverse factorization order where i increases from 1 to N * I = 1 @@ -438,7 +438,7 @@ * * Convert PERMUTATIONS and IPIV * -* Apply permutaions to submatrices of lower part of A +* Apply permutations to submatrices of lower part of A * in factorization order where k increases from 1 to N * I = 1 @@ -491,7 +491,7 @@ * * Revert PERMUTATIONS and IPIV * -* Apply permutaions to submatrices of lower part of A +* Apply permutations to submatrices of lower part of A * in reverse factorization order where i decreases from N to 1 * I = N diff --git a/lapack-netlib/SRC/csyconvf_rook.f b/lapack-netlib/SRC/csyconvf_rook.f index 1146a97c5..7ede26863 100644 --- a/lapack-netlib/SRC/csyconvf_rook.f +++ b/lapack-netlib/SRC/csyconvf_rook.f @@ -285,7 +285,7 @@ * * Convert PERMUTATIONS * -* Apply permutaions to submatrices of upper part of A +* Apply permutations to submatrices of upper part of A * in factorization order where i decreases from N to 1 * I = N @@ -336,7 +336,7 @@ * * Revert PERMUTATIONS * -* Apply permutaions to submatrices of upper part of A +* Apply permutations to submatrices of upper part of A * in reverse factorization order where i increases from 1 to N * I = 1 @@ -426,7 +426,7 @@ * * Convert PERMUTATIONS * -* Apply permutaions to submatrices of lower part of A +* Apply permutations to submatrices of lower part of A * in factorization order where i increases from 1 to N * I = 1 @@ -477,7 +477,7 @@ * * Revert PERMUTATIONS * -* Apply permutaions to submatrices of lower part of A +* Apply permutations to submatrices of lower part of A * in reverse factorization order where i decreases from N to 1 * I = N diff --git a/lapack-netlib/SRC/csyrfsx.f b/lapack-netlib/SRC/csyrfsx.f index 7323ba8eb..4d1bc3ccc 100644 --- a/lapack-netlib/SRC/csyrfsx.f +++ b/lapack-netlib/SRC/csyrfsx.f @@ -271,7 +271,7 @@ *> information as described below. There currently are up to three *> pieces of information returned for each right-hand side. If *> componentwise accuracy is not requested (PARAMS(3) = 0.0), then -*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS .LT. 3, then at most +*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS < 3, then at most *> the first (:,N_ERR_BNDS) entries are returned. *> *> The first index in ERR_BNDS_COMP(i,:) corresponds to the ith @@ -307,14 +307,14 @@ *> \param[in] NPARAMS *> \verbatim *> NPARAMS is INTEGER -*> Specifies the number of parameters set in PARAMS. If .LE. 0, the +*> Specifies the number of parameters set in PARAMS. If <= 0, the *> PARAMS array is never referenced and default values are used. *> \endverbatim *> *> \param[in,out] PARAMS *> \verbatim *> PARAMS is REAL array, dimension NPARAMS -*> Specifies algorithm parameters. If an entry is .LT. 0.0, then +*> Specifies algorithm parameters. If an entry is < 0.0, then *> that entry will be filled with default value used for that *> parameter. Only positions up to NPARAMS are accessed; defaults *> are used for higher-numbered parameters. @@ -322,9 +322,9 @@ *> PARAMS(LA_LINRX_ITREF_I = 1) : Whether to perform iterative *> refinement or not. *> Default: 1.0 -*> = 0.0 : No refinement is performed, and no error bounds are +*> = 0.0: No refinement is performed, and no error bounds are *> computed. -*> = 1.0 : Use the double-precision refinement algorithm, +*> = 1.0: Use the double-precision refinement algorithm, *> possibly with doubled-single computations if the *> compilation environment does not support DOUBLE *> PRECISION. diff --git a/lapack-netlib/SRC/csysv_aa.f b/lapack-netlib/SRC/csysv_aa.f index 87be734cc..2081644b1 100644 --- a/lapack-netlib/SRC/csysv_aa.f +++ b/lapack-netlib/SRC/csysv_aa.f @@ -42,7 +42,7 @@ *> matrices. *> *> Aasen's algorithm is used to factor A as -*> A = U * T * U**T, if UPLO = 'U', or +*> A = U**T * T * U, if UPLO = 'U', or *> A = L * T * L**T, if UPLO = 'L', *> where U (or L) is a product of permutation and unit upper (lower) *> triangular matrices, and T is symmetric tridiagonal. The factored @@ -75,7 +75,7 @@ *> *> \param[in,out] A *> \verbatim -*> A is REAL array, dimension (LDA,N) +*> A is COMPLEX array, dimension (LDA,N) *> On entry, the symmetric matrix A. If UPLO = 'U', the leading *> N-by-N upper triangular part of A contains the upper *> triangular part of the matrix A, and the strictly lower @@ -86,7 +86,7 @@ *> *> On exit, if INFO = 0, the tridiagonal matrix T and the *> multipliers used to obtain the factor U or L from the -*> factorization A = U*T*U**T or A = L*T*L**T as computed by +*> factorization A = U**T*T*U or A = L*T*L**T as computed by *> CSYTRF. *> \endverbatim *> @@ -106,7 +106,7 @@ *> *> \param[in,out] B *> \verbatim -*> B is REAL array, dimension (LDB,NRHS) +*> B is COMPLEX array, dimension (LDB,NRHS) *> On entry, the N-by-NRHS right hand side matrix B. *> On exit, if INFO = 0, the N-by-NRHS solution matrix X. *> \endverbatim @@ -119,7 +119,7 @@ *> *> \param[out] WORK *> \verbatim -*> WORK is REAL array, dimension (MAX(1,LWORK)) +*> WORK is COMPLEX array, dimension (MAX(1,LWORK)) *> On exit, if INFO = 0, WORK(1) returns the optimal LWORK. *> \endverbatim *> @@ -230,7 +230,7 @@ RETURN END IF * -* Compute the factorization A = U*T*U**T or A = L*T*L**T. +* Compute the factorization A = U**T*T*U or A = L*T*L**T. * CALL CSYTRF_AA( UPLO, N, A, LDA, IPIV, WORK, LWORK, INFO ) IF( INFO.EQ.0 ) THEN diff --git a/lapack-netlib/SRC/csysv_aa_2stage.f b/lapack-netlib/SRC/csysv_aa_2stage.f index a13349824..c5c328c63 100644 --- a/lapack-netlib/SRC/csysv_aa_2stage.f +++ b/lapack-netlib/SRC/csysv_aa_2stage.f @@ -43,8 +43,8 @@ *> matrices. *> *> Aasen's 2-stage algorithm is used to factor A as -*> A = U * T * U**H, if UPLO = 'U', or -*> A = L * T * L**H, if UPLO = 'L', +*> A = U**T * T * U, if UPLO = 'U', or +*> A = L * T * L**T, if UPLO = 'L', *> where U (or L) is a product of permutation and unit upper (lower) *> triangular matrices, and T is symmetric and band. The matrix T is *> then LU-factored with partial pivoting. The factored form of A @@ -257,7 +257,7 @@ END IF * * -* Compute the factorization A = U*T*U**H or A = L*T*L**H. +* Compute the factorization A = U**T*T*U or A = L*T*L**T. * CALL CSYTRF_AA_2STAGE( UPLO, N, A, LDA, TB, LTB, IPIV, IPIV2, $ WORK, LWORK, INFO ) diff --git a/lapack-netlib/SRC/csysvxx.f b/lapack-netlib/SRC/csysvxx.f index 2fd2c8771..7a9aee105 100644 --- a/lapack-netlib/SRC/csysvxx.f +++ b/lapack-netlib/SRC/csysvxx.f @@ -378,7 +378,7 @@ *> information as described below. There currently are up to three *> pieces of information returned for each right-hand side. If *> componentwise accuracy is not requested (PARAMS(3) = 0.0), then -*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS .LT. 3, then at most +*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS < 3, then at most *> the first (:,N_ERR_BNDS) entries are returned. *> *> The first index in ERR_BNDS_COMP(i,:) corresponds to the ith @@ -414,14 +414,14 @@ *> \param[in] NPARAMS *> \verbatim *> NPARAMS is INTEGER -*> Specifies the number of parameters set in PARAMS. If .LE. 0, the +*> Specifies the number of parameters set in PARAMS. If <= 0, the *> PARAMS array is never referenced and default values are used. *> \endverbatim *> *> \param[in,out] PARAMS *> \verbatim *> PARAMS is REAL array, dimension NPARAMS -*> Specifies algorithm parameters. If an entry is .LT. 0.0, then +*> Specifies algorithm parameters. If an entry is < 0.0, then *> that entry will be filled with default value used for that *> parameter. Only positions up to NPARAMS are accessed; defaults *> are used for higher-numbered parameters. @@ -429,9 +429,9 @@ *> PARAMS(LA_LINRX_ITREF_I = 1) : Whether to perform iterative *> refinement or not. *> Default: 1.0 -*> = 0.0 : No refinement is performed, and no error bounds are +*> = 0.0: No refinement is performed, and no error bounds are *> computed. -*> = 1.0 : Use the double-precision refinement algorithm, +*> = 1.0: Use the double-precision refinement algorithm, *> possibly with doubled-single computations if the *> compilation environment does not support DOUBLE *> PRECISION. diff --git a/lapack-netlib/SRC/csytf2_rk.f b/lapack-netlib/SRC/csytf2_rk.f index 3b5e53a03..7e39c2dfd 100644 --- a/lapack-netlib/SRC/csytf2_rk.f +++ b/lapack-netlib/SRC/csytf2_rk.f @@ -321,7 +321,7 @@ * * Factorize A as U*D*U**T using the upper triangle of A * -* Initilize the first entry of array E, where superdiagonal +* Initialize the first entry of array E, where superdiagonal * elements of D are stored * E( 1 ) = CZERO @@ -632,7 +632,7 @@ * * Factorize A as L*D*L**T using the lower triangle of A * -* Initilize the unused last entry of the subdiagonal array E. +* Initialize the unused last entry of the subdiagonal array E. * E( N ) = CZERO * diff --git a/lapack-netlib/SRC/csytrf.f b/lapack-netlib/SRC/csytrf.f index c389725e9..af913b8f4 100644 --- a/lapack-netlib/SRC/csytrf.f +++ b/lapack-netlib/SRC/csytrf.f @@ -43,7 +43,7 @@ *> *> where U (or L) is a product of permutation and unit upper (lower) *> triangular matrices, and D is symmetric and block diagonal with -*> with 1-by-1 and 2-by-2 diagonal blocks. +*> 1-by-1 and 2-by-2 diagonal blocks. *> *> This is the blocked version of the algorithm, calling Level 3 BLAS. *> \endverbatim diff --git a/lapack-netlib/SRC/csytrf_aa.f b/lapack-netlib/SRC/csytrf_aa.f index 2f185b0c7..427235bda 100644 --- a/lapack-netlib/SRC/csytrf_aa.f +++ b/lapack-netlib/SRC/csytrf_aa.f @@ -37,7 +37,7 @@ *> CSYTRF_AA computes the factorization of a complex symmetric matrix A *> using the Aasen's algorithm. The form of the factorization is *> -*> A = U*T*U**T or A = L*T*L**T +*> A = U**T*T*U or A = L*T*L**T *> *> where U (or L) is a product of permutation and unit upper (lower) *> triangular matrices, and T is a complex symmetric tridiagonal matrix. @@ -63,7 +63,7 @@ *> *> \param[in,out] A *> \verbatim -*> A is REAL array, dimension (LDA,N) +*> A is COMPLEX array, dimension (LDA,N) *> On entry, the symmetric matrix A. If UPLO = 'U', the leading *> N-by-N upper triangular part of A contains the upper *> triangular part of the matrix A, and the strictly lower @@ -94,7 +94,7 @@ *> *> \param[out] WORK *> \verbatim -*> WORK is REAL array, dimension (MAX(1,LWORK)) +*> WORK is COMPLEX array, dimension (MAX(1,LWORK)) *> On exit, if INFO = 0, WORK(1) returns the optimal LWORK. *> \endverbatim *> @@ -223,7 +223,7 @@ IF( UPPER ) THEN * * ..................................................... -* Factorize A as L*D*L**T using the upper triangle of A +* Factorize A as U**T*D*U using the upper triangle of A * ..................................................... * * Copy first row A(1, 1:N) into H(1:n) (stored in WORK(1:N)) @@ -256,7 +256,7 @@ $ A( MAX(1, J), J+1 ), LDA, $ IPIV( J+1 ), WORK, N, WORK( N*NB+1 ) ) * -* Ajust IPIV and apply it back (J-th step picks (J+1)-th pivot) +* Adjust IPIV and apply it back (J-th step picks (J+1)-th pivot) * DO J2 = J+2, MIN(N, J+JB+1) IPIV( J2 ) = IPIV( J2 ) + J @@ -375,7 +375,7 @@ $ A( J+1, MAX(1, J) ), LDA, $ IPIV( J+1 ), WORK, N, WORK( N*NB+1 ) ) * -* Ajust IPIV and apply it back (J-th step picks (J+1)-th pivot) +* Adjust IPIV and apply it back (J-th step picks (J+1)-th pivot) * DO J2 = J+2, MIN(N, J+JB+1) IPIV( J2 ) = IPIV( J2 ) + J diff --git a/lapack-netlib/SRC/csytrf_aa_2stage.f b/lapack-netlib/SRC/csytrf_aa_2stage.f index 0d0bd156c..0946d61b0 100644 --- a/lapack-netlib/SRC/csytrf_aa_2stage.f +++ b/lapack-netlib/SRC/csytrf_aa_2stage.f @@ -38,7 +38,7 @@ *> CSYTRF_AA_2STAGE computes the factorization of a complex symmetric matrix A *> using the Aasen's algorithm. The form of the factorization is *> -*> A = U*T*U**T or A = L*T*L**T +*> A = U**T*T*U or A = L*T*L**T *> *> where U (or L) is a product of permutation and unit upper (lower) *> triangular matrices, and T is a complex symmetric band matrix with the @@ -275,7 +275,7 @@ IF( UPPER ) THEN * * ..................................................... -* Factorize A as L*D*L**T using the upper triangle of A +* Factorize A as U**T*D*U using the upper triangle of A * ..................................................... * DO J = 0, NT-1 @@ -448,12 +448,14 @@ c END IF * > Apply pivots to previous columns of L CALL CSWAP( K-1, A( (J+1)*NB+1, I1 ), 1, $ A( (J+1)*NB+1, I2 ), 1 ) -* > Swap A(I1+1:M, I1) with A(I2, I1+1:M) - CALL CSWAP( I2-I1-1, A( I1, I1+1 ), LDA, - $ A( I1+1, I2 ), 1 ) +* > Swap A(I1+1:M, I1) with A(I2, I1+1:M) + IF( I2.GT.(I1+1) ) + $ CALL CSWAP( I2-I1-1, A( I1, I1+1 ), LDA, + $ A( I1+1, I2 ), 1 ) * > Swap A(I2+1:M, I1) with A(I2+1:M, I2) - CALL CSWAP( N-I2, A( I1, I2+1 ), LDA, - $ A( I2, I2+1 ), LDA ) + IF( I2.LT.N ) + $ CALL CSWAP( N-I2, A( I1, I2+1 ), LDA, + $ A( I2, I2+1 ), LDA ) * > Swap A(I1, I1) with A(I2, I2) PIV = A( I1, I1 ) A( I1, I1 ) = A( I2, I2 ) @@ -637,11 +639,13 @@ c END IF CALL CSWAP( K-1, A( I1, (J+1)*NB+1 ), LDA, $ A( I2, (J+1)*NB+1 ), LDA ) * > Swap A(I1+1:M, I1) with A(I2, I1+1:M) - CALL CSWAP( I2-I1-1, A( I1+1, I1 ), 1, - $ A( I2, I1+1 ), LDA ) + IF( I2.GT.(I1+1) ) + $ CALL CSWAP( I2-I1-1, A( I1+1, I1 ), 1, + $ A( I2, I1+1 ), LDA ) * > Swap A(I2+1:M, I1) with A(I2+1:M, I2) - CALL CSWAP( N-I2, A( I2+1, I1 ), 1, - $ A( I2+1, I2 ), 1 ) + IF( I2.LT.N ) + $ CALL CSWAP( N-I2, A( I2+1, I1 ), 1, + $ A( I2+1, I2 ), 1 ) * > Swap A(I1, I1) with A(I2, I2) PIV = A( I1, I1 ) A( I1, I1 ) = A( I2, I2 ) diff --git a/lapack-netlib/SRC/csytri2.f b/lapack-netlib/SRC/csytri2.f index 4bd8e4f99..8bee149c4 100644 --- a/lapack-netlib/SRC/csytri2.f +++ b/lapack-netlib/SRC/csytri2.f @@ -62,7 +62,7 @@ *> \param[in,out] A *> \verbatim *> A is COMPLEX array, dimension (LDA,N) -*> On entry, the NB diagonal matrix D and the multipliers +*> On entry, the block diagonal matrix D and the multipliers *> used to obtain the factor U or L as computed by CSYTRF. *> *> On exit, if INFO = 0, the (symmetric) inverse of the original @@ -82,7 +82,7 @@ *> \param[in] IPIV *> \verbatim *> IPIV is INTEGER array, dimension (N) -*> Details of the interchanges and the NB structure of D +*> Details of the interchanges and the block structure of D *> as determined by CSYTRF. *> \endverbatim *> diff --git a/lapack-netlib/SRC/csytrs2.f b/lapack-netlib/SRC/csytrs2.f index 1002b5461..93f2d6a1b 100644 --- a/lapack-netlib/SRC/csytrs2.f +++ b/lapack-netlib/SRC/csytrs2.f @@ -36,7 +36,7 @@ *> *> \verbatim *> -*> CSYTRS2 solves a system of linear equations A*X = B with a COMPLEX +*> CSYTRS2 solves a system of linear equations A*X = B with a complex *> symmetric matrix A using the factorization A = U*D*U**T or *> A = L*D*L**T computed by CSYTRF and converted by CSYCONV. *> \endverbatim diff --git a/lapack-netlib/SRC/csytrs_aa.f b/lapack-netlib/SRC/csytrs_aa.f index 7cf950492..981f8722a 100644 --- a/lapack-netlib/SRC/csytrs_aa.f +++ b/lapack-netlib/SRC/csytrs_aa.f @@ -37,7 +37,7 @@ *> \verbatim *> *> CSYTRS_AA solves a system of linear equations A*X = B with a complex -*> symmetric matrix A using the factorization A = U*T*U**T or +*> symmetric matrix A using the factorization A = U**T*T*U or *> A = L*T*L**T computed by CSYTRF_AA. *> \endverbatim * @@ -49,7 +49,7 @@ *> UPLO is CHARACTER*1 *> Specifies whether the details of the factorization are stored *> as an upper or lower triangular matrix. -*> = 'U': Upper triangular, form is A = U*T*U**T; +*> = 'U': Upper triangular, form is A = U**T*T*U; *> = 'L': Lower triangular, form is A = L*T*L**T. *> \endverbatim *> @@ -68,7 +68,7 @@ *> *> \param[in] A *> \verbatim -*> A is REAL array, dimension (LDA,N) +*> A is COMPLEX array, dimension (LDA,N) *> Details of factors computed by CSYTRF_AA. *> \endverbatim *> @@ -86,7 +86,7 @@ *> *> \param[in,out] B *> \verbatim -*> B is REAL array, dimension (LDB,NRHS) +*> B is COMPLEX array, dimension (LDB,NRHS) *> On entry, the right hand side matrix B. *> On exit, the solution matrix X. *> \endverbatim @@ -97,14 +97,16 @@ *> The leading dimension of the array B. LDB >= max(1,N). *> \endverbatim *> -*> \param[in] WORK +*> \param[out] WORK *> \verbatim -*> WORK is DOUBLE array, dimension (MAX(1,LWORK)) +*> WORK is COMPLEX array, dimension (MAX(1,LWORK)) *> \endverbatim *> *> \param[in] LWORK *> \verbatim -*> LWORK is INTEGER, LWORK >= MAX(1,3*N-2). +*> LWORK is INTEGER +*> The dimension of the array WORK. LWORK >= max(1,3*N-2). +*> \endverbatim *> *> \param[out] INFO *> \verbatim @@ -198,22 +200,29 @@ * IF( UPPER ) THEN * -* Solve A*X = B, where A = U*T*U**T. +* Solve A*X = B, where A = U**T*T*U. +* +* 1) Forward substitution with U**T +* + IF( N.GT.1 ) THEN +* +* Pivot, P**T * B -> B * -* Pivot, P**T * B + DO K = 1, N + KP = IPIV( K ) + IF( KP.NE.K ) + $ CALL CSWAP( NRHS, B( K, 1 ), LDB, B( KP, 1 ), LDB ) + END DO * - DO K = 1, N - KP = IPIV( K ) - IF( KP.NE.K ) - $ CALL CSWAP( NRHS, B( K, 1 ), LDB, B( KP, 1 ), LDB ) - END DO +* Compute U**T \ B -> B [ (U**T \P**T * B) ] * -* Compute (U \P**T * B) -> B [ (U \P**T * B) ] + CALL CTRSM( 'L', 'U', 'T', 'U', N-1, NRHS, ONE, A( 1, 2 ), + $ LDA, B( 2, 1 ), LDB) + END IF * - CALL CTRSM('L', 'U', 'T', 'U', N-1, NRHS, ONE, A( 1, 2 ), LDA, - $ B( 2, 1 ), LDB) +* 2) Solve with triangular matrix T * -* Compute T \ B -> B [ T \ (U \P**T * B) ] +* Compute T \ B -> B [ T \ (U**T \P**T * B) ] * CALL CLACPY( 'F', 1, N, A( 1, 1 ), LDA+1, WORK( N ), 1) IF( N.GT.1 ) THEN @@ -223,35 +232,48 @@ CALL CGTSV( N, NRHS, WORK( 1 ), WORK( N ), WORK( 2*N ), B, LDB, $ INFO ) * -* Compute (U**T \ B) -> B [ U**T \ (T \ (U \P**T * B) ) ] +* 3) Backward substitution with U +* + IF( N.GT.1 ) THEN * - CALL CTRSM( 'L', 'U', 'N', 'U', N-1, NRHS, ONE, A( 1, 2 ), LDA, - $ B( 2, 1 ), LDB) +* Compute U \ B -> B [ U \ (T \ (U**T \P**T * B) ) ] * -* Pivot, P * B [ P * (U**T \ (T \ (U \P**T * B) )) ] + CALL CTRSM( 'L', 'U', 'N', 'U', N-1, NRHS, ONE, A( 1, 2 ), + $ LDA, B( 2, 1 ), LDB) * - DO K = N, 1, -1 - KP = IPIV( K ) - IF( KP.NE.K ) - $ CALL CSWAP( NRHS, B( K, 1 ), LDB, B( KP, 1 ), LDB ) - END DO +* Pivot, P * B -> B [ P * (U**T \ (T \ (U \P**T * B) )) ] +* + DO K = N, 1, -1 + KP = IPIV( K ) + IF( KP.NE.K ) + $ CALL CSWAP( NRHS, B( K, 1 ), LDB, B( KP, 1 ), LDB ) + END DO + END IF * ELSE * * Solve A*X = B, where A = L*T*L**T. * -* Pivot, P**T * B +* 1) Forward substitution with L * - DO K = 1, N - KP = IPIV( K ) - IF( KP.NE.K ) - $ CALL CSWAP( NRHS, B( K, 1 ), LDB, B( KP, 1 ), LDB ) - END DO + IF( N.GT.1 ) THEN +* +* Pivot, P**T * B -> B +* + DO K = 1, N + KP = IPIV( K ) + IF( KP.NE.K ) + $ CALL CSWAP( NRHS, B( K, 1 ), LDB, B( KP, 1 ), LDB ) + END DO +* +* Compute L \ B -> B [ (L \P**T * B) ] +* + CALL CTRSM( 'L', 'L', 'N', 'U', N-1, NRHS, ONE, A( 2, 1 ), + $ LDA, B( 2, 1 ), LDB) + END IF * -* Compute (L \P**T * B) -> B [ (L \P**T * B) ] +* 2) Solve with triangular matrix T * - CALL CTRSM( 'L', 'L', 'N', 'U', N-1, NRHS, ONE, A( 2, 1 ), LDA, - $ B( 2, 1 ), LDB) * * Compute T \ B -> B [ T \ (L \P**T * B) ] * @@ -263,18 +285,23 @@ CALL CGTSV( N, NRHS, WORK( 1 ), WORK(N), WORK( 2*N ), B, LDB, $ INFO) * -* Compute (L**T \ B) -> B [ L**T \ (T \ (L \P**T * B) ) ] +* 3) Backward substitution with L**T +* + IF( N.GT.1 ) THEN * - CALL CTRSM( 'L', 'L', 'T', 'U', N-1, NRHS, ONE, A( 2, 1 ), LDA, - $ B( 2, 1 ), LDB) +* Compute (L**T \ B) -> B [ L**T \ (T \ (L \P**T * B) ) ] * -* Pivot, P * B [ P * (L**T \ (T \ (L \P**T * B) )) ] + CALL CTRSM( 'L', 'L', 'T', 'U', N-1, NRHS, ONE, A( 2, 1 ), + $ LDA, B( 2, 1 ), LDB) * - DO K = N, 1, -1 - KP = IPIV( K ) - IF( KP.NE.K ) - $ CALL CSWAP( NRHS, B( K, 1 ), LDB, B( KP, 1 ), LDB ) - END DO +* Pivot, P * B -> B [ P * (L**T \ (T \ (L \P**T * B) )) ] +* + DO K = N, 1, -1 + KP = IPIV( K ) + IF( KP.NE.K ) + $ CALL CSWAP( NRHS, B( K, 1 ), LDB, B( KP, 1 ), LDB ) + END DO + END IF * END IF * diff --git a/lapack-netlib/SRC/csytrs_aa_2stage.f b/lapack-netlib/SRC/csytrs_aa_2stage.f index d025c08fe..581910933 100644 --- a/lapack-netlib/SRC/csytrs_aa_2stage.f +++ b/lapack-netlib/SRC/csytrs_aa_2stage.f @@ -36,7 +36,7 @@ *> \verbatim *> *> CSYTRS_AA_2STAGE solves a system of linear equations A*X = B with a complex -*> symmetric matrix A using the factorization A = U*T*U**T or +*> symmetric matrix A using the factorization A = U**T*T*U or *> A = L*T*L**T computed by CSYTRF_AA_2STAGE. *> \endverbatim * @@ -48,7 +48,7 @@ *> UPLO is CHARACTER*1 *> Specifies whether the details of the factorization are stored *> as an upper or lower triangular matrix. -*> = 'U': Upper triangular, form is A = U*T*U**T; +*> = 'U': Upper triangular, form is A = U**T*T*U; *> = 'L': Lower triangular, form is A = L*T*L**T. *> \endverbatim *> @@ -208,15 +208,15 @@ * IF( UPPER ) THEN * -* Solve A*X = B, where A = U*T*U**T. +* Solve A*X = B, where A = U**T*T*U. * IF( N.GT.NB ) THEN * -* Pivot, P**T * B +* Pivot, P**T * B -> B * CALL CLASWP( NRHS, B, LDB, NB+1, N, IPIV, 1 ) * -* Compute (U**T \P**T * B) -> B [ (U**T \P**T * B) ] +* Compute (U**T \ B) -> B [ (U**T \P**T * B) ] * CALL CTRSM( 'L', 'U', 'T', 'U', N-NB, NRHS, ONE, A(1, NB+1), $ LDA, B(NB+1, 1), LDB) @@ -234,7 +234,7 @@ CALL CTRSM( 'L', 'U', 'N', 'U', N-NB, NRHS, ONE, A(1, NB+1), $ LDA, B(NB+1, 1), LDB) * -* Pivot, P * B [ P * (U \ (T \ (U**T \P**T * B) )) ] +* Pivot, P * B -> B [ P * (U \ (T \ (U**T \P**T * B) )) ] * CALL CLASWP( NRHS, B, LDB, NB+1, N, IPIV, -1 ) * @@ -246,11 +246,11 @@ * IF( N.GT.NB ) THEN * -* Pivot, P**T * B +* Pivot, P**T * B -> B * CALL CLASWP( NRHS, B, LDB, NB+1, N, IPIV, 1 ) * -* Compute (L \P**T * B) -> B [ (L \P**T * B) ] +* Compute (L \ B) -> B [ (L \P**T * B) ] * CALL CTRSM( 'L', 'L', 'N', 'U', N-NB, NRHS, ONE, A(NB+1, 1), $ LDA, B(NB+1, 1), LDB) @@ -268,7 +268,7 @@ CALL CTRSM( 'L', 'L', 'T', 'U', N-NB, NRHS, ONE, A(NB+1, 1), $ LDA, B(NB+1, 1), LDB) * -* Pivot, P * B [ P * (L**T \ (T \ (L \P**T * B) )) ] +* Pivot, P * B -> B [ P * (L**T \ (T \ (L \P**T * B) )) ] * CALL CLASWP( NRHS, B, LDB, NB+1, N, IPIV, -1 ) * diff --git a/lapack-netlib/SRC/ctgsy2.f b/lapack-netlib/SRC/ctgsy2.f index 66a8980d0..5ccdfb1e1 100644 --- a/lapack-netlib/SRC/ctgsy2.f +++ b/lapack-netlib/SRC/ctgsy2.f @@ -67,7 +67,7 @@ *> R * B**H + L * E**H = scale * -F *> *> This case is used to compute an estimate of Dif[(A, D), (B, E)] = -*> = sigma_min(Z) using reverse communicaton with CLACON. +*> = sigma_min(Z) using reverse communication with CLACON. *> *> CTGSY2 also (IJOB >= 1) contributes to the computation in CTGSYL *> of an upper bound on the separation between to matrix pairs. Then @@ -81,7 +81,7 @@ *> \param[in] TRANS *> \verbatim *> TRANS is CHARACTER*1 -*> = 'N', solve the generalized Sylvester equation (1). +*> = 'N': solve the generalized Sylvester equation (1). *> = 'T': solve the 'transposed' system (3). *> \endverbatim *> @@ -89,13 +89,13 @@ *> \verbatim *> IJOB is INTEGER *> Specifies what kind of functionality to be performed. -*> =0: solve (1) only. -*> =1: A contribution from this subsystem to a Frobenius -*> norm-based estimate of the separation between two matrix -*> pairs is computed. (look ahead strategy is used). -*> =2: A contribution from this subsystem to a Frobenius -*> norm-based estimate of the separation between two matrix -*> pairs is computed. (SGECON on sub-systems is used.) +*> = 0: solve (1) only. +*> = 1: A contribution from this subsystem to a Frobenius +*> norm-based estimate of the separation between two matrix +*> pairs is computed. (look ahead strategy is used). +*> = 2: A contribution from this subsystem to a Frobenius +*> norm-based estimate of the separation between two matrix +*> pairs is computed. (SGECON on sub-systems is used.) *> Not referenced if TRANS = 'T'. *> \endverbatim *> diff --git a/lapack-netlib/SRC/ctplqt.f b/lapack-netlib/SRC/ctplqt.f index cb4d419b9..39893df48 100644 --- a/lapack-netlib/SRC/ctplqt.f +++ b/lapack-netlib/SRC/ctplqt.f @@ -1,3 +1,5 @@ +*> \brief \b CTPLQT +* * Definition: * =========== * diff --git a/lapack-netlib/SRC/ctplqt2.f b/lapack-netlib/SRC/ctplqt2.f index b16d6149a..d18452aec 100644 --- a/lapack-netlib/SRC/ctplqt2.f +++ b/lapack-netlib/SRC/ctplqt2.f @@ -1,3 +1,5 @@ +*> \brief \b CTPLQT2 +* * Definition: * =========== * diff --git a/lapack-netlib/SRC/ctpmlqt.f b/lapack-netlib/SRC/ctpmlqt.f index cb5f033ca..5899a5335 100644 --- a/lapack-netlib/SRC/ctpmlqt.f +++ b/lapack-netlib/SRC/ctpmlqt.f @@ -1,3 +1,5 @@ +*> \brief \b CTPMLQT +* * Definition: * =========== * @@ -77,7 +79,7 @@ *> *> \param[in] V *> \verbatim -*> V is COMPLEX array, dimension (LDA,K) +*> V is COMPLEX array, dimension (LDV,K) *> The i-th row must contain the vector which defines the *> elementary reflector H(i), for i = 1,2,...,k, as returned by *> DTPLQT in B. See Further Details. diff --git a/lapack-netlib/SRC/ctpmqrt.f b/lapack-netlib/SRC/ctpmqrt.f index fd3d1b109..8d4a36ca8 100644 --- a/lapack-netlib/SRC/ctpmqrt.f +++ b/lapack-netlib/SRC/ctpmqrt.f @@ -94,7 +94,7 @@ *> *> \param[in] V *> \verbatim -*> V is COMPLEX array, dimension (LDA,K) +*> V is COMPLEX array, dimension (LDV,K) *> The i-th column must contain the vector which defines the *> elementary reflector H(i), for i = 1,2,...,k, as returned by *> CTPQRT in B. See Further Details. diff --git a/lapack-netlib/SRC/ctprfb.f b/lapack-netlib/SRC/ctprfb.f index 1538deb56..0f45edaf8 100644 --- a/lapack-netlib/SRC/ctprfb.f +++ b/lapack-netlib/SRC/ctprfb.f @@ -152,8 +152,8 @@ *> \verbatim *> LDA is INTEGER *> The leading dimension of the array A. -*> If SIDE = 'L', LDC >= max(1,K); -*> If SIDE = 'R', LDC >= max(1,M). +*> If SIDE = 'L', LDA >= max(1,K); +*> If SIDE = 'R', LDA >= max(1,M). *> \endverbatim *> *> \param[in,out] B diff --git a/lapack-netlib/SRC/cungtsqr.f b/lapack-netlib/SRC/cungtsqr.f new file mode 100644 index 000000000..bc5305cf9 --- /dev/null +++ b/lapack-netlib/SRC/cungtsqr.f @@ -0,0 +1,307 @@ +*> \brief \b CUNGTSQR +* +* =========== DOCUMENTATION =========== +* +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ +* +*> \htmlonly +*> Download CUNGTSQR + dependencies +*> +*> [TGZ] +*> +*> [ZIP] +*> +*> [TXT] +*> +* Definition: +* =========== +* +* SUBROUTINE CUNGTSQR( M, N, MB, NB, A, LDA, T, LDT, WORK, LWORK, +* $ INFO ) +* +* .. Scalar Arguments .. +* INTEGER INFO, LDA, LDT, LWORK, M, N, MB, NB +* .. +* .. Array Arguments .. +* COMPLEX A( LDA, * ), T( LDT, * ), WORK( * ) +* .. +* +*> \par Purpose: +* ============= +*> +*> \verbatim +*> +*> CUNGTSQR generates an M-by-N complex matrix Q_out with orthonormal +*> columns, which are the first N columns of a product of comlpex unitary +*> matrices of order M which are returned by CLATSQR +*> +*> Q_out = first_N_columns_of( Q(1)_in * Q(2)_in * ... * Q(k)_in ). +*> +*> See the documentation for CLATSQR. +*> \endverbatim +* +* Arguments: +* ========== +* +*> \param[in] M +*> \verbatim +*> M is INTEGER +*> The number of rows of the matrix A. M >= 0. +*> \endverbatim +*> +*> \param[in] N +*> \verbatim +*> N is INTEGER +*> The number of columns of the matrix A. M >= N >= 0. +*> \endverbatim +*> +*> \param[in] MB +*> \verbatim +*> MB is INTEGER +*> The row block size used by DLATSQR to return +*> arrays A and T. MB > N. +*> (Note that if MB > M, then M is used instead of MB +*> as the row block size). +*> \endverbatim +*> +*> \param[in] NB +*> \verbatim +*> NB is INTEGER +*> The column block size used by CLATSQR to return +*> arrays A and T. NB >= 1. +*> (Note that if NB > N, then N is used instead of NB +*> as the column block size). +*> \endverbatim +*> +*> \param[in,out] A +*> \verbatim +*> A is COMPLEX array, dimension (LDA,N) +*> +*> On entry: +*> +*> The elements on and above the diagonal are not accessed. +*> The elements below the diagonal represent the unit +*> lower-trapezoidal blocked matrix V computed by CLATSQR +*> that defines the input matrices Q_in(k) (ones on the +*> diagonal are not stored) (same format as the output A +*> below the diagonal in CLATSQR). +*> +*> On exit: +*> +*> The array A contains an M-by-N orthonormal matrix Q_out, +*> i.e the columns of A are orthogonal unit vectors. +*> \endverbatim +*> +*> \param[in] LDA +*> \verbatim +*> LDA is INTEGER +*> The leading dimension of the array A. LDA >= max(1,M). +*> \endverbatim +*> +*> \param[in] T +*> \verbatim +*> T is COMPLEX array, +*> dimension (LDT, N * NIRB) +*> where NIRB = Number_of_input_row_blocks +*> = MAX( 1, CEIL((M-N)/(MB-N)) ) +*> Let NICB = Number_of_input_col_blocks +*> = CEIL(N/NB) +*> +*> The upper-triangular block reflectors used to define the +*> input matrices Q_in(k), k=(1:NIRB*NICB). The block +*> reflectors are stored in compact form in NIRB block +*> reflector sequences. Each of NIRB block reflector sequences +*> is stored in a larger NB-by-N column block of T and consists +*> of NICB smaller NB-by-NB upper-triangular column blocks. +*> (same format as the output T in CLATSQR). +*> \endverbatim +*> +*> \param[in] LDT +*> \verbatim +*> LDT is INTEGER +*> The leading dimension of the array T. +*> LDT >= max(1,min(NB1,N)). +*> \endverbatim +*> +*> \param[out] WORK +*> \verbatim +*> (workspace) COMPLEX array, dimension (MAX(2,LWORK)) +*> On exit, if INFO = 0, WORK(1) returns the optimal LWORK. +*> \endverbatim +*> +*> \param[in] LWORK +*> \verbatim +*> The dimension of the array WORK. LWORK >= (M+NB)*N. +*> If LWORK = -1, then a workspace query is assumed. +*> The routine only calculates the optimal size of the WORK +*> array, returns this value as the first entry of the WORK +*> array, and no error message related to LWORK is issued +*> by XERBLA. +*> \endverbatim +*> +*> \param[out] INFO +*> \verbatim +*> INFO is INTEGER +*> = 0: successful exit +*> < 0: if INFO = -i, the i-th argument had an illegal value +*> \endverbatim +*> +* Authors: +* ======== +* +*> \author Univ. of Tennessee +*> \author Univ. of California Berkeley +*> \author Univ. of Colorado Denver +*> \author NAG Ltd. +* +*> \date November 2019 +* +*> \ingroup comlexOTHERcomputational +* +*> \par Contributors: +* ================== +*> +*> \verbatim +*> +*> November 2019, Igor Kozachenko, +*> Computer Science Division, +*> University of California, Berkeley +*> +*> \endverbatim +* +* ===================================================================== + SUBROUTINE CUNGTSQR( M, N, MB, NB, A, LDA, T, LDT, WORK, LWORK, + $ INFO ) + IMPLICIT NONE +* +* -- LAPACK computational routine (version 3.9.0) -- +* -- LAPACK is a software package provided by Univ. of Tennessee, -- +* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- +* November 2019 +* +* .. Scalar Arguments .. + INTEGER INFO, LDA, LDT, LWORK, M, N, MB, NB +* .. +* .. Array Arguments .. + COMPLEX A( LDA, * ), T( LDT, * ), WORK( * ) +* .. +* +* ===================================================================== +* +* .. Parameters .. + COMPLEX CONE, CZERO + PARAMETER ( CONE = ( 1.0E+0, 0.0E+0 ), + $ CZERO = ( 0.0E+0, 0.0E+0 ) ) +* .. +* .. Local Scalars .. + LOGICAL LQUERY + INTEGER IINFO, LDC, LWORKOPT, LC, LW, NBLOCAL, J +* .. +* .. External Subroutines .. + EXTERNAL CCOPY, CLAMTSQR, CLASET, XERBLA +* .. +* .. Intrinsic Functions .. + INTRINSIC CMPLX, MAX, MIN +* .. +* .. Executable Statements .. +* +* Test the input parameters +* + LQUERY = LWORK.EQ.-1 + INFO = 0 + IF( M.LT.0 ) THEN + INFO = -1 + ELSE IF( N.LT.0 .OR. M.LT.N ) THEN + INFO = -2 + ELSE IF( MB.LE.N ) THEN + INFO = -3 + ELSE IF( NB.LT.1 ) THEN + INFO = -4 + ELSE IF( LDA.LT.MAX( 1, M ) ) THEN + INFO = -6 + ELSE IF( LDT.LT.MAX( 1, MIN( NB, N ) ) ) THEN + INFO = -8 + ELSE +* +* Test the input LWORK for the dimension of the array WORK. +* This workspace is used to store array C(LDC, N) and WORK(LWORK) +* in the call to CLAMTSQR. See the documentation for CLAMTSQR. +* + IF( LWORK.LT.2 .AND. (.NOT.LQUERY) ) THEN + INFO = -10 + ELSE +* +* Set block size for column blocks +* + NBLOCAL = MIN( NB, N ) +* +* LWORK = -1, then set the size for the array C(LDC,N) +* in CLAMTSQR call and set the optimal size of the work array +* WORK(LWORK) in CLAMTSQR call. +* + LDC = M + LC = LDC*N + LW = N * NBLOCAL +* + LWORKOPT = LC+LW +* + IF( ( LWORK.LT.MAX( 1, LWORKOPT ) ).AND.(.NOT.LQUERY) ) THEN + INFO = -10 + END IF + END IF +* + END IF +* +* Handle error in the input parameters and return workspace query. +* + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'CUNGTSQR', -INFO ) + RETURN + ELSE IF ( LQUERY ) THEN + WORK( 1 ) = CMPLX( LWORKOPT ) + RETURN + END IF +* +* Quick return if possible +* + IF( MIN( M, N ).EQ.0 ) THEN + WORK( 1 ) = CMPLX( LWORKOPT ) + RETURN + END IF +* +* (1) Form explicitly the tall-skinny M-by-N left submatrix Q1_in +* of M-by-M orthogonal matrix Q_in, which is implicitly stored in +* the subdiagonal part of input array A and in the input array T. +* Perform by the following operation using the routine CLAMTSQR. +* +* Q1_in = Q_in * ( I ), where I is a N-by-N identity matrix, +* ( 0 ) 0 is a (M-N)-by-N zero matrix. +* +* (1a) Form M-by-N matrix in the array WORK(1:LDC*N) with ones +* on the diagonal and zeros elsewhere. +* + CALL CLASET( 'F', M, N, CZERO, CONE, WORK, LDC ) +* +* (1b) On input, WORK(1:LDC*N) stores ( I ); +* ( 0 ) +* +* On output, WORK(1:LDC*N) stores Q1_in. +* + CALL CLAMTSQR( 'L', 'N', M, N, N, MB, NBLOCAL, A, LDA, T, LDT, + $ WORK, LDC, WORK( LC+1 ), LW, IINFO ) +* +* (2) Copy the result from the part of the work array (1:M,1:N) +* with the leading dimension LDC that starts at WORK(1) into +* the output array A(1:M,1:N) column-by-column. +* + DO J = 1, N + CALL CCOPY( M, WORK( (J-1)*LDC + 1 ), 1, A( 1, J ), 1 ) + END DO +* + WORK( 1 ) = CMPLX( LWORKOPT ) + RETURN +* +* End of CUNGTSQR +* + END \ No newline at end of file diff --git a/lapack-netlib/SRC/cunhr_col.f b/lapack-netlib/SRC/cunhr_col.f new file mode 100644 index 000000000..15c31491e --- /dev/null +++ b/lapack-netlib/SRC/cunhr_col.f @@ -0,0 +1,441 @@ +*> \brief \b CUNHR_COL +* +* =========== DOCUMENTATION =========== +* +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ +* +*> \htmlonly +*> Download CUNHR_COL + dependencies +*> +*> [TGZ] +*> +*> [ZIP] +*> +*> [TXT] +*> +* Definition: +* =========== +* +* SUBROUTINE CUNHR_COL( M, N, NB, A, LDA, T, LDT, D, INFO ) +* +* .. Scalar Arguments .. +* INTEGER INFO, LDA, LDT, M, N, NB +* .. +* .. Array Arguments .. +* COMPLEX A( LDA, * ), D( * ), T( LDT, * ) +* .. +* +*> \par Purpose: +* ============= +*> +*> \verbatim +*> +*> CUNHR_COL takes an M-by-N complex matrix Q_in with orthonormal columns +*> as input, stored in A, and performs Householder Reconstruction (HR), +*> i.e. reconstructs Householder vectors V(i) implicitly representing +*> another M-by-N matrix Q_out, with the property that Q_in = Q_out*S, +*> where S is an N-by-N diagonal matrix with diagonal entries +*> equal to +1 or -1. The Householder vectors (columns V(i) of V) are +*> stored in A on output, and the diagonal entries of S are stored in D. +*> Block reflectors are also returned in T +*> (same output format as CGEQRT). +*> \endverbatim +* +* Arguments: +* ========== +* +*> \param[in] M +*> \verbatim +*> M is INTEGER +*> The number of rows of the matrix A. M >= 0. +*> \endverbatim +*> +*> \param[in] N +*> \verbatim +*> N is INTEGER +*> The number of columns of the matrix A. M >= N >= 0. +*> \endverbatim +*> +*> \param[in] NB +*> \verbatim +*> NB is INTEGER +*> The column block size to be used in the reconstruction +*> of Householder column vector blocks in the array A and +*> corresponding block reflectors in the array T. NB >= 1. +*> (Note that if NB > N, then N is used instead of NB +*> as the column block size.) +*> \endverbatim +*> +*> \param[in,out] A +*> \verbatim +*> A is COMPLEX array, dimension (LDA,N) +*> +*> On entry: +*> +*> The array A contains an M-by-N orthonormal matrix Q_in, +*> i.e the columns of A are orthogonal unit vectors. +*> +*> On exit: +*> +*> The elements below the diagonal of A represent the unit +*> lower-trapezoidal matrix V of Householder column vectors +*> V(i). The unit diagonal entries of V are not stored +*> (same format as the output below the diagonal in A from +*> CGEQRT). The matrix T and the matrix V stored on output +*> in A implicitly define Q_out. +*> +*> The elements above the diagonal contain the factor U +*> of the "modified" LU-decomposition: +*> Q_in - ( S ) = V * U +*> ( 0 ) +*> where 0 is a (M-N)-by-(M-N) zero matrix. +*> \endverbatim +*> +*> \param[in] LDA +*> \verbatim +*> LDA is INTEGER +*> The leading dimension of the array A. LDA >= max(1,M). +*> \endverbatim +*> +*> \param[out] T +*> \verbatim +*> T is COMPLEX array, +*> dimension (LDT, N) +*> +*> Let NOCB = Number_of_output_col_blocks +*> = CEIL(N/NB) +*> +*> On exit, T(1:NB, 1:N) contains NOCB upper-triangular +*> block reflectors used to define Q_out stored in compact +*> form as a sequence of upper-triangular NB-by-NB column +*> blocks (same format as the output T in CGEQRT). +*> The matrix T and the matrix V stored on output in A +*> implicitly define Q_out. NOTE: The lower triangles +*> below the upper-triangular blcoks will be filled with +*> zeros. See Further Details. +*> \endverbatim +*> +*> \param[in] LDT +*> \verbatim +*> LDT is INTEGER +*> The leading dimension of the array T. +*> LDT >= max(1,min(NB,N)). +*> \endverbatim +*> +*> \param[out] D +*> \verbatim +*> D is COMPLEX array, dimension min(M,N). +*> The elements can be only plus or minus one. +*> +*> D(i) is constructed as D(i) = -SIGN(Q_in_i(i,i)), where +*> 1 <= i <= min(M,N), and Q_in_i is Q_in after performing +*> i-1 steps of “modified” Gaussian elimination. +*> See Further Details. +*> \endverbatim +*> +*> \param[out] INFO +*> \verbatim +*> INFO is INTEGER +*> = 0: successful exit +*> < 0: if INFO = -i, the i-th argument had an illegal value +*> \endverbatim +*> +*> \par Further Details: +* ===================== +*> +*> \verbatim +*> +*> The computed M-by-M unitary factor Q_out is defined implicitly as +*> a product of unitary matrices Q_out(i). Each Q_out(i) is stored in +*> the compact WY-representation format in the corresponding blocks of +*> matrices V (stored in A) and T. +*> +*> The M-by-N unit lower-trapezoidal matrix V stored in the M-by-N +*> matrix A contains the column vectors V(i) in NB-size column +*> blocks VB(j). For example, VB(1) contains the columns +*> V(1), V(2), ... V(NB). NOTE: The unit entries on +*> the diagonal of Y are not stored in A. +*> +*> The number of column blocks is +*> +*> NOCB = Number_of_output_col_blocks = CEIL(N/NB) +*> +*> where each block is of order NB except for the last block, which +*> is of order LAST_NB = N - (NOCB-1)*NB. +*> +*> For example, if M=6, N=5 and NB=2, the matrix V is +*> +*> +*> V = ( VB(1), VB(2), VB(3) ) = +*> +*> = ( 1 ) +*> ( v21 1 ) +*> ( v31 v32 1 ) +*> ( v41 v42 v43 1 ) +*> ( v51 v52 v53 v54 1 ) +*> ( v61 v62 v63 v54 v65 ) +*> +*> +*> For each of the column blocks VB(i), an upper-triangular block +*> reflector TB(i) is computed. These blocks are stored as +*> a sequence of upper-triangular column blocks in the NB-by-N +*> matrix T. The size of each TB(i) block is NB-by-NB, except +*> for the last block, whose size is LAST_NB-by-LAST_NB. +*> +*> For example, if M=6, N=5 and NB=2, the matrix T is +*> +*> T = ( TB(1), TB(2), TB(3) ) = +*> +*> = ( t11 t12 t13 t14 t15 ) +*> ( t22 t24 ) +*> +*> +*> The M-by-M factor Q_out is given as a product of NOCB +*> unitary M-by-M matrices Q_out(i). +*> +*> Q_out = Q_out(1) * Q_out(2) * ... * Q_out(NOCB), +*> +*> where each matrix Q_out(i) is given by the WY-representation +*> using corresponding blocks from the matrices V and T: +*> +*> Q_out(i) = I - VB(i) * TB(i) * (VB(i))**T, +*> +*> where I is the identity matrix. Here is the formula with matrix +*> dimensions: +*> +*> Q(i){M-by-M} = I{M-by-M} - +*> VB(i){M-by-INB} * TB(i){INB-by-INB} * (VB(i))**T {INB-by-M}, +*> +*> where INB = NB, except for the last block NOCB +*> for which INB=LAST_NB. +*> +*> ===== +*> NOTE: +*> ===== +*> +*> If Q_in is the result of doing a QR factorization +*> B = Q_in * R_in, then: +*> +*> B = (Q_out*S) * R_in = Q_out * (S * R_in) = O_out * R_out. +*> +*> So if one wants to interpret Q_out as the result +*> of the QR factorization of B, then corresponding R_out +*> should be obtained by R_out = S * R_in, i.e. some rows of R_in +*> should be multiplied by -1. +*> +*> For the details of the algorithm, see [1]. +*> +*> [1] "Reconstructing Householder vectors from tall-skinny QR", +*> G. Ballard, J. Demmel, L. Grigori, M. Jacquelin, H.D. Nguyen, +*> E. Solomonik, J. Parallel Distrib. Comput., +*> vol. 85, pp. 3-31, 2015. +*> \endverbatim +*> +* Authors: +* ======== +* +*> \author Univ. of Tennessee +*> \author Univ. of California Berkeley +*> \author Univ. of Colorado Denver +*> \author NAG Ltd. +* +*> \date November 2019 +* +*> \ingroup complexOTHERcomputational +* +*> \par Contributors: +* ================== +*> +*> \verbatim +*> +*> November 2019, Igor Kozachenko, +*> Computer Science Division, +*> University of California, Berkeley +*> +*> \endverbatim +* +* ===================================================================== + SUBROUTINE CUNHR_COL( M, N, NB, A, LDA, T, LDT, D, INFO ) + IMPLICIT NONE +* +* -- LAPACK computational routine (version 3.9.0) -- +* -- LAPACK is a software package provided by Univ. of Tennessee, -- +* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- +* November 2019 +* +* .. Scalar Arguments .. + INTEGER INFO, LDA, LDT, M, N, NB +* .. +* .. Array Arguments .. + COMPLEX A( LDA, * ), D( * ), T( LDT, * ) +* .. +* +* ===================================================================== +* +* .. Parameters .. + COMPLEX CONE, CZERO + PARAMETER ( CONE = ( 1.0E+0, 0.0E+0 ), + $ CZERO = ( 0.0E+0, 0.0E+0 ) ) +* .. +* .. Local Scalars .. + INTEGER I, IINFO, J, JB, JBTEMP1, JBTEMP2, JNB, + $ NPLUSONE +* .. +* .. External Subroutines .. + EXTERNAL CCOPY, CLAUNHR_COL_GETRFNP, CSCAL, CTRSM, + $ XERBLA +* .. +* .. Intrinsic Functions .. + INTRINSIC MAX, MIN +* .. +* .. Executable Statements .. +* +* Test the input parameters +* + INFO = 0 + IF( M.LT.0 ) THEN + INFO = -1 + ELSE IF( N.LT.0 .OR. N.GT.M ) THEN + INFO = -2 + ELSE IF( NB.LT.1 ) THEN + INFO = -3 + ELSE IF( LDA.LT.MAX( 1, M ) ) THEN + INFO = -5 + ELSE IF( LDT.LT.MAX( 1, MIN( NB, N ) ) ) THEN + INFO = -7 + END IF +* +* Handle error in the input parameters. +* + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'CUNHR_COL', -INFO ) + RETURN + END IF +* +* Quick return if possible +* + IF( MIN( M, N ).EQ.0 ) THEN + RETURN + END IF +* +* On input, the M-by-N matrix A contains the unitary +* M-by-N matrix Q_in. +* +* (1) Compute the unit lower-trapezoidal V (ones on the diagonal +* are not stored) by performing the "modified" LU-decomposition. +* +* Q_in - ( S ) = V * U = ( V1 ) * U, +* ( 0 ) ( V2 ) +* +* where 0 is an (M-N)-by-N zero matrix. +* +* (1-1) Factor V1 and U. + + CALL CLAUNHR_COL_GETRFNP( N, N, A, LDA, D, IINFO ) +* +* (1-2) Solve for V2. +* + IF( M.GT.N ) THEN + CALL CTRSM( 'R', 'U', 'N', 'N', M-N, N, CONE, A, LDA, + $ A( N+1, 1 ), LDA ) + END IF +* +* (2) Reconstruct the block reflector T stored in T(1:NB, 1:N) +* as a sequence of upper-triangular blocks with NB-size column +* blocking. +* +* Loop over the column blocks of size NB of the array A(1:M,1:N) +* and the array T(1:NB,1:N), JB is the column index of a column +* block, JNB is the column block size at each step JB. +* + NPLUSONE = N + 1 + DO JB = 1, N, NB +* +* (2-0) Determine the column block size JNB. +* + JNB = MIN( NPLUSONE-JB, NB ) +* +* (2-1) Copy the upper-triangular part of the current JNB-by-JNB +* diagonal block U(JB) (of the N-by-N matrix U) stored +* in A(JB:JB+JNB-1,JB:JB+JNB-1) into the upper-triangular part +* of the current JNB-by-JNB block T(1:JNB,JB:JB+JNB-1) +* column-by-column, total JNB*(JNB+1)/2 elements. +* + JBTEMP1 = JB - 1 + DO J = JB, JB+JNB-1 + CALL CCOPY( J-JBTEMP1, A( JB, J ), 1, T( 1, J ), 1 ) + END DO +* +* (2-2) Perform on the upper-triangular part of the current +* JNB-by-JNB diagonal block U(JB) (of the N-by-N matrix U) stored +* in T(1:JNB,JB:JB+JNB-1) the following operation in place: +* (-1)*U(JB)*S(JB), i.e the result will be stored in the upper- +* triangular part of T(1:JNB,JB:JB+JNB-1). This multiplication +* of the JNB-by-JNB diagonal block U(JB) by the JNB-by-JNB +* diagonal block S(JB) of the N-by-N sign matrix S from the +* right means changing the sign of each J-th column of the block +* U(JB) according to the sign of the diagonal element of the block +* S(JB), i.e. S(J,J) that is stored in the array element D(J). +* + DO J = JB, JB+JNB-1 + IF( D( J ).EQ.CONE ) THEN + CALL CSCAL( J-JBTEMP1, -CONE, T( 1, J ), 1 ) + END IF + END DO +* +* (2-3) Perform the triangular solve for the current block +* matrix X(JB): +* +* X(JB) * (A(JB)**T) = B(JB), where: +* +* A(JB)**T is a JNB-by-JNB unit upper-triangular +* coefficient block, and A(JB)=V1(JB), which +* is a JNB-by-JNB unit lower-triangular block +* stored in A(JB:JB+JNB-1,JB:JB+JNB-1). +* The N-by-N matrix V1 is the upper part +* of the M-by-N lower-trapezoidal matrix V +* stored in A(1:M,1:N); +* +* B(JB) is a JNB-by-JNB upper-triangular right-hand +* side block, B(JB) = (-1)*U(JB)*S(JB), and +* B(JB) is stored in T(1:JNB,JB:JB+JNB-1); +* +* X(JB) is a JNB-by-JNB upper-triangular solution +* block, X(JB) is the upper-triangular block +* reflector T(JB), and X(JB) is stored +* in T(1:JNB,JB:JB+JNB-1). +* +* In other words, we perform the triangular solve for the +* upper-triangular block T(JB): +* +* T(JB) * (V1(JB)**T) = (-1)*U(JB)*S(JB). +* +* Even though the blocks X(JB) and B(JB) are upper- +* triangular, the routine CTRSM will access all JNB**2 +* elements of the square T(1:JNB,JB:JB+JNB-1). Therefore, +* we need to set to zero the elements of the block +* T(1:JNB,JB:JB+JNB-1) below the diagonal before the call +* to CTRSM. +* +* (2-3a) Set the elements to zero. +* + JBTEMP2 = JB - 2 + DO J = JB, JB+JNB-2 + DO I = J-JBTEMP2, NB + T( I, J ) = CZERO + END DO + END DO +* +* (2-3b) Perform the triangular solve. +* + CALL CTRSM( 'R', 'L', 'C', 'U', JNB, JNB, CONE, + $ A( JB, JB ), LDA, T( 1, JB ), LDT ) +* + END DO +* + RETURN +* +* End of CUNHR_COL +* + END \ No newline at end of file diff --git a/lapack-netlib/SRC/dbdsqr.f b/lapack-netlib/SRC/dbdsqr.f index 93db95e7a..7d47fa282 100644 --- a/lapack-netlib/SRC/dbdsqr.f +++ b/lapack-netlib/SRC/dbdsqr.f @@ -166,7 +166,7 @@ *> *> \param[out] WORK *> \verbatim -*> WORK is DOUBLE PRECISION array, dimension (4*N) +*> WORK is DOUBLE PRECISION array, dimension (4*(N-1)) *> \endverbatim *> *> \param[out] INFO diff --git a/lapack-netlib/SRC/dbdsvdx.f b/lapack-netlib/SRC/dbdsvdx.f index 96fdb3d61..10d97a71f 100644 --- a/lapack-netlib/SRC/dbdsvdx.f +++ b/lapack-netlib/SRC/dbdsvdx.f @@ -165,7 +165,7 @@ *> *> \param[out] Z *> \verbatim -*> Z is DOUBLE PRECISION array, dimension (2*N,K) ) +*> Z is DOUBLE PRECISION array, dimension (2*N,K) *> If JOBZ = 'V', then if INFO = 0 the first NS columns of Z *> contain the singular vectors of the matrix B corresponding to *> the selected singular values, with U in rows 1 to N and V diff --git a/lapack-netlib/SRC/dcombssq.f b/lapack-netlib/SRC/dcombssq.f new file mode 100644 index 000000000..7a1ddd1af --- /dev/null +++ b/lapack-netlib/SRC/dcombssq.f @@ -0,0 +1,94 @@ +*> \brief \b DCOMBSSQ adds two scaled sum of squares quantities. +* +* =========== DOCUMENTATION =========== +* +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ +* +* +* Definition: +* =========== +* +* SUBROUTINE DCOMBSSQ( V1, V2 ) +* +* .. Array Arguments .. +* DOUBLE PRECISION V1( 2 ), V2( 2 ) +* .. +* +* +*> \par Purpose: +* ============= +*> +*> \verbatim +*> +*> DCOMBSSQ adds two scaled sum of squares quantities, V1 := V1 + V2. +*> That is, +*> +*> V1_scale**2 * V1_sumsq := V1_scale**2 * V1_sumsq +*> + V2_scale**2 * V2_sumsq +*> \endverbatim +* +* Arguments: +* ========== +* +*> \param[in,out] V1 +*> \verbatim +*> V1 is DOUBLE PRECISION array, dimension (2). +*> The first scaled sum. +*> V1(1) = V1_scale, V1(2) = V1_sumsq. +*> \endverbatim +*> +*> \param[in] V2 +*> \verbatim +*> V2 is DOUBLE PRECISION array, dimension (2). +*> The second scaled sum. +*> V2(1) = V2_scale, V2(2) = V2_sumsq. +*> \endverbatim +* +* Authors: +* ======== +* +*> \author Univ. of Tennessee +*> \author Univ. of California Berkeley +*> \author Univ. of Colorado Denver +*> \author NAG Ltd. +* +*> \date November 2018 +* +*> \ingroup OTHERauxiliary +* +* ===================================================================== + SUBROUTINE DCOMBSSQ( V1, V2 ) +* +* -- LAPACK auxiliary routine (version 3.7.0) -- +* -- LAPACK is a software package provided by Univ. of Tennessee, -- +* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- +* November 2018 +* +* .. Array Arguments .. + DOUBLE PRECISION V1( 2 ), V2( 2 ) +* .. +* +* ===================================================================== +* +* .. Parameters .. + DOUBLE PRECISION ZERO + PARAMETER ( ZERO = 0.0D+0 ) +* .. +* .. Executable Statements .. +* + IF( V1( 1 ).GE.V2( 1 ) ) THEN + IF( V1( 1 ).NE.ZERO ) THEN + V1( 2 ) = V1( 2 ) + ( V2( 1 ) / V1( 1 ) )**2 * V2( 2 ) + ELSE + V1( 2 ) = V1( 2 ) + V2( 2 ) + END IF + ELSE + V1( 2 ) = V2( 2 ) + ( V1( 1 ) / V2( 1 ) )**2 * V1( 2 ) + V1( 1 ) = V2( 1 ) + END IF + RETURN +* +* End of DCOMBSSQ +* + END diff --git a/lapack-netlib/SRC/dgbrfsx.f b/lapack-netlib/SRC/dgbrfsx.f index fb52d643f..76afb2d6a 100644 --- a/lapack-netlib/SRC/dgbrfsx.f +++ b/lapack-netlib/SRC/dgbrfsx.f @@ -308,7 +308,7 @@ *> information as described below. There currently are up to three *> pieces of information returned for each right-hand side. If *> componentwise accuracy is not requested (PARAMS(3) = 0.0), then -*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS .LT. 3, then at most +*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS < 3, then at most *> the first (:,N_ERR_BNDS) entries are returned. *> *> The first index in ERR_BNDS_COMP(i,:) corresponds to the ith @@ -344,14 +344,14 @@ *> \param[in] NPARAMS *> \verbatim *> NPARAMS is INTEGER -*> Specifies the number of parameters set in PARAMS. If .LE. 0, the +*> Specifies the number of parameters set in PARAMS. If <= 0, the *> PARAMS array is never referenced and default values are used. *> \endverbatim *> *> \param[in,out] PARAMS *> \verbatim *> PARAMS is DOUBLE PRECISION array, dimension (NPARAMS) -*> Specifies algorithm parameters. If an entry is .LT. 0.0, then +*> Specifies algorithm parameters. If an entry is < 0.0, then *> that entry will be filled with default value used for that *> parameter. Only positions up to NPARAMS are accessed; defaults *> are used for higher-numbered parameters. @@ -359,9 +359,9 @@ *> PARAMS(LA_LINRX_ITREF_I = 1) : Whether to perform iterative *> refinement or not. *> Default: 1.0D+0 -*> = 0.0 : No refinement is performed, and no error bounds are +*> = 0.0: No refinement is performed, and no error bounds are *> computed. -*> = 1.0 : Use the double-precision refinement algorithm, +*> = 1.0: Use the double-precision refinement algorithm, *> possibly with doubled-single computations if the *> compilation environment does not support DOUBLE *> PRECISION. diff --git a/lapack-netlib/SRC/dgbsvxx.f b/lapack-netlib/SRC/dgbsvxx.f index 819d20c6d..058b20686 100644 --- a/lapack-netlib/SRC/dgbsvxx.f +++ b/lapack-netlib/SRC/dgbsvxx.f @@ -431,7 +431,7 @@ *> information as described below. There currently are up to three *> pieces of information returned for each right-hand side. If *> componentwise accuracy is not requested (PARAMS(3) = 0.0), then -*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS .LT. 3, then at most +*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS < 3, then at most *> the first (:,N_ERR_BNDS) entries are returned. *> *> The first index in ERR_BNDS_COMP(i,:) corresponds to the ith @@ -467,14 +467,14 @@ *> \param[in] NPARAMS *> \verbatim *> NPARAMS is INTEGER -*> Specifies the number of parameters set in PARAMS. If .LE. 0, the +*> Specifies the number of parameters set in PARAMS. If <= 0, the *> PARAMS array is never referenced and default values are used. *> \endverbatim *> *> \param[in,out] PARAMS *> \verbatim *> PARAMS is DOUBLE PRECISION array, dimension (NPARAMS) -*> Specifies algorithm parameters. If an entry is .LT. 0.0, then +*> Specifies algorithm parameters. If an entry is < 0.0, then *> that entry will be filled with default value used for that *> parameter. Only positions up to NPARAMS are accessed; defaults *> are used for higher-numbered parameters. @@ -482,9 +482,9 @@ *> PARAMS(LA_LINRX_ITREF_I = 1) : Whether to perform iterative *> refinement or not. *> Default: 1.0D+0 -*> = 0.0 : No refinement is performed, and no error bounds are +*> = 0.0: No refinement is performed, and no error bounds are *> computed. -*> = 1.0 : Use the extra-precise refinement algorithm. +*> = 1.0: Use the extra-precise refinement algorithm. *> (other values are reserved for future use) *> *> PARAMS(LA_LINRX_ITHRESH_I = 2) : Maximum number of residual diff --git a/lapack-netlib/SRC/dgebak.f b/lapack-netlib/SRC/dgebak.f index 45a86ee57..10a78aa1a 100644 --- a/lapack-netlib/SRC/dgebak.f +++ b/lapack-netlib/SRC/dgebak.f @@ -47,10 +47,10 @@ *> \verbatim *> JOB is CHARACTER*1 *> Specifies the type of backward transformation required: -*> = 'N', do nothing, return immediately; -*> = 'P', do backward transformation for permutation only; -*> = 'S', do backward transformation for scaling only; -*> = 'B', do backward transformations for both permutation and +*> = 'N': do nothing, return immediately; +*> = 'P': do backward transformation for permutation only; +*> = 'S': do backward transformation for scaling only; +*> = 'B': do backward transformations for both permutation and *> scaling. *> JOB must be the same as the argument JOB supplied to DGEBAL. *> \endverbatim diff --git a/lapack-netlib/SRC/dgeesx.f b/lapack-netlib/SRC/dgeesx.f index 26042a5f9..a08104d3d 100644 --- a/lapack-netlib/SRC/dgeesx.f +++ b/lapack-netlib/SRC/dgeesx.f @@ -583,7 +583,9 @@ IF( N.GT.I+1 ) $ CALL DSWAP( N-I-1, A( I, I+2 ), LDA, $ A( I+1, I+2 ), LDA ) - CALL DSWAP( N, VS( 1, I ), 1, VS( 1, I+1 ), 1 ) + IF( WANTVS ) THEN + CALL DSWAP( N, VS( 1, I ), 1, VS( 1, I+1 ), 1 ) + END IF A( I, I+1 ) = A( I+1, I ) A( I+1, I ) = ZERO END IF diff --git a/lapack-netlib/SRC/dgejsv.f b/lapack-netlib/SRC/dgejsv.f index 25ed248d0..a30cfab87 100644 --- a/lapack-netlib/SRC/dgejsv.f +++ b/lapack-netlib/SRC/dgejsv.f @@ -82,7 +82,7 @@ *> desirable, then this option is advisable. The input matrix A *> is preprocessed with QR factorization with FULL (row and *> column) pivoting. -*> = 'G' Computation as with 'F' with an additional estimate of the +*> = 'G': Computation as with 'F' with an additional estimate of the *> condition number of B, where A=D*B. If A has heavily weighted *> rows, then using this condition number gives too pessimistic *> error bound. @@ -133,7 +133,7 @@ *> specified range. If A .NE. 0 is scaled so that the largest singular *> value of c*A is around DSQRT(BIG), BIG=SLAMCH('O'), then JOBR issues *> the licence to kill columns of A whose norm in c*A is less than -*> DSQRT(SFMIN) (for JOBR.EQ.'R'), or less than SMALL=SFMIN/EPSLN, +*> DSQRT(SFMIN) (for JOBR = 'R'), or less than SMALL=SFMIN/EPSLN, *> where SFMIN=SLAMCH('S'), EPSLN=SLAMCH('E'). *> = 'N': Do not kill small columns of c*A. This option assumes that *> BLAS and QR factorizations and triangular solvers are @@ -230,7 +230,7 @@ *> If JOBU = 'F', then U contains on exit the M-by-M matrix of *> the left singular vectors, including an ONB *> of the orthogonal complement of the Range(A). -*> If JOBU = 'W' .AND. (JOBV.EQ.'V' .AND. JOBT.EQ.'T' .AND. M.EQ.N), +*> If JOBU = 'W' .AND. (JOBV = 'V' .AND. JOBT = 'T' .AND. M = N), *> then U is used as workspace if the procedure *> replaces A with A^t. In that case, [V] is computed *> in U as left singular vectors of A^t and then @@ -252,7 +252,7 @@ *> V is DOUBLE PRECISION array, dimension ( LDV, N ) *> If JOBV = 'V', 'J' then V contains on exit the N-by-N matrix of *> the right singular vectors; -*> If JOBV = 'W', AND (JOBU.EQ.'U' AND JOBT.EQ.'T' AND M.EQ.N), +*> If JOBV = 'W', AND (JOBU = 'U' AND JOBT = 'T' AND M = N), *> then V is used as workspace if the pprocedure *> replaces A with A^t. In that case, [U] is computed *> in V as right singular vectors of A^t and then @@ -272,13 +272,13 @@ *> \param[out] WORK *> \verbatim *> WORK is DOUBLE PRECISION array, dimension (LWORK) -*> On exit, if N.GT.0 .AND. M.GT.0 (else not referenced), +*> On exit, if N > 0 .AND. M > 0 (else not referenced), *> WORK(1) = SCALE = WORK(2) / WORK(1) is the scaling factor such *> that SCALE*SVA(1:N) are the computed singular values *> of A. (See the description of SVA().) *> WORK(2) = See the description of WORK(1). *> WORK(3) = SCONDA is an estimate for the condition number of -*> column equilibrated A. (If JOBA .EQ. 'E' or 'G') +*> column equilibrated A. (If JOBA = 'E' or 'G') *> SCONDA is an estimate of DSQRT(||(R^t * R)^(-1)||_1). *> It is computed using DPOCON. It holds *> N^(-1/4) * SCONDA <= ||R^(-1)||_2 <= N^(1/4) * SCONDA @@ -297,7 +297,7 @@ *> triangular factor in the first QR factorization. *> WORK(5) = an estimate of the scaled condition number of the *> triangular factor in the second QR factorization. -*> The following two parameters are computed if JOBT .EQ. 'T'. +*> The following two parameters are computed if JOBT = 'T'. *> They are provided for a developer/implementer who is familiar *> with the details of the method. *> @@ -313,8 +313,8 @@ *> Length of WORK to confirm proper allocation of work space. *> LWORK depends on the job: *> -*> If only SIGMA is needed ( JOBU.EQ.'N', JOBV.EQ.'N' ) and -*> -> .. no scaled condition estimate required (JOBE.EQ.'N'): +*> If only SIGMA is needed (JOBU = 'N', JOBV = 'N') and +*> -> .. no scaled condition estimate required (JOBE = 'N'): *> LWORK >= max(2*M+N,4*N+1,7). This is the minimal requirement. *> ->> For optimal performance (blocked code) the optimal value *> is LWORK >= max(2*M+N,3*N+(N+1)*NB,7). Here NB is the optimal @@ -330,7 +330,7 @@ *> LWORK >= max(2*M+N,N+LWORK(DGEQP3),N+LWORK(DGEQRF), *> N+N*N+LWORK(DPOCON),7). *> -*> If SIGMA and the right singular vectors are needed (JOBV.EQ.'V'), +*> If SIGMA and the right singular vectors are needed (JOBV = 'V'), *> -> the minimal requirement is LWORK >= max(2*M+N,4*N+1,7). *> -> For optimal performance, LWORK >= max(2*M+N,3*N+(N+1)*NB,7), *> where NB is the optimal block size for DGEQP3, DGEQRF, DGELQF, @@ -341,19 +341,19 @@ *> If SIGMA and the left singular vectors are needed *> -> the minimal requirement is LWORK >= max(2*M+N,4*N+1,7). *> -> For optimal performance: -*> if JOBU.EQ.'U' :: LWORK >= max(2*M+N,3*N+(N+1)*NB,7), -*> if JOBU.EQ.'F' :: LWORK >= max(2*M+N,3*N+(N+1)*NB,N+M*NB,7), +*> if JOBU = 'U' :: LWORK >= max(2*M+N,3*N+(N+1)*NB,7), +*> if JOBU = 'F' :: LWORK >= max(2*M+N,3*N+(N+1)*NB,N+M*NB,7), *> where NB is the optimal block size for DGEQP3, DGEQRF, DORMQR. *> In general, the optimal length LWORK is computed as *> LWORK >= max(2*M+N,N+LWORK(DGEQP3),N+LWORK(DPOCON), *> 2*N+LWORK(DGEQRF), N+LWORK(DORMQR)). -*> Here LWORK(DORMQR) equals N*NB (for JOBU.EQ.'U') or -*> M*NB (for JOBU.EQ.'F'). +*> Here LWORK(DORMQR) equals N*NB (for JOBU = 'U') or +*> M*NB (for JOBU = 'F'). *> -*> If the full SVD is needed: (JOBU.EQ.'U' or JOBU.EQ.'F') and -*> -> if JOBV.EQ.'V' +*> If the full SVD is needed: (JOBU = 'U' or JOBU = 'F') and +*> -> if JOBV = 'V' *> the minimal requirement is LWORK >= max(2*M+N,6*N+2*N*N). -*> -> if JOBV.EQ.'J' the minimal requirement is +*> -> if JOBV = 'J' the minimal requirement is *> LWORK >= max(2*M+N, 4*N+N*N,2*N+N*N+6). *> -> For optimal performance, LWORK should be additionally *> larger than N+M*NB, where NB is the optimal block size @@ -369,7 +369,7 @@ *> of JOBA and JOBR. *> IWORK(2) = the number of the computed nonzero singular values *> IWORK(3) = if nonzero, a warning message: -*> If IWORK(3).EQ.1 then some of the column norms of A +*> If IWORK(3) = 1 then some of the column norms of A *> were denormalized floats. The requested high accuracy *> is not warranted by the data. *> \endverbatim @@ -377,10 +377,10 @@ *> \param[out] INFO *> \verbatim *> INFO is INTEGER -*> < 0 : if INFO = -i, then the i-th argument had an illegal value. -*> = 0 : successful exit; -*> > 0 : DGEJSV did not converge in the maximal allowed number -*> of sweeps. The computed values may be inaccurate. +*> < 0: if INFO = -i, then the i-th argument had an illegal value. +*> = 0: successful exit; +*> > 0: DGEJSV did not converge in the maximal allowed number +*> of sweeps. The computed values may be inaccurate. *> \endverbatim * * Authors: @@ -953,7 +953,7 @@ IF ( L2ABER ) THEN * Standard absolute error bound suffices. All sigma_i with * sigma_i < N*EPSLN*||A|| are flushed to zero. This is an -* agressive enforcement of lower numerical rank by introducing a +* aggressive enforcement of lower numerical rank by introducing a * backward error of the order of N*EPSLN*||A||. TEMP1 = DSQRT(DBLE(N))*EPSLN DO 3001 p = 2, N @@ -965,7 +965,7 @@ 3001 CONTINUE 3002 CONTINUE ELSE IF ( L2RANK ) THEN -* .. similarly as above, only slightly more gentle (less agressive). +* .. similarly as above, only slightly more gentle (less aggressive). * Sudden drop on the diagonal of R1 is used as the criterion for * close-to-rank-deficient. TEMP1 = DSQRT(SFMIN) @@ -1294,7 +1294,7 @@ CALL DPOCON('Lower',NR,WORK(2*N+1),NR,ONE,TEMP1, $ WORK(2*N+NR*NR+1),IWORK(M+2*N+1),IERR) CONDR1 = ONE / DSQRT(TEMP1) -* .. here need a second oppinion on the condition number +* .. here need a second opinion on the condition number * .. then assume worst case scenario * R1 is OK for inverse <=> CONDR1 .LT. DBLE(N) * more conservative <=> CONDR1 .LT. DSQRT(DBLE(N)) @@ -1335,7 +1335,7 @@ ELSE * * .. ill-conditioned case: second QRF with pivoting -* Note that windowed pivoting would be equaly good +* Note that windowed pivoting would be equally good * numerically, and more run-time efficient. So, in * an optimal implementation, the next call to DGEQP3 * should be replaced with eg. CALL SGEQPX (ACM TOMS #782) @@ -1388,7 +1388,7 @@ * IF ( CONDR2 .GE. COND_OK ) THEN * .. save the Householder vectors used for Q3 -* (this overwrittes the copy of R2, as it will not be +* (this overwrites the copy of R2, as it will not be * needed in this branch, but it does not overwritte the * Huseholder vectors of Q2.). CALL DLACPY( 'U', NR, NR, V, LDV, WORK(2*N+1), N ) @@ -1638,7 +1638,7 @@ * * This branch deploys a preconditioned Jacobi SVD with explicitly * accumulated rotations. It is included as optional, mainly for -* experimental purposes. It does perfom well, and can also be used. +* experimental purposes. It does perform well, and can also be used. * In this implementation, this branch will be automatically activated * if the condition number sigma_max(A) / sigma_min(A) is predicted * to be greater than the overflow threshold. This is because the diff --git a/lapack-netlib/SRC/dgelq.f b/lapack-netlib/SRC/dgelq.f index ece645079..7b2f80862 100644 --- a/lapack-netlib/SRC/dgelq.f +++ b/lapack-netlib/SRC/dgelq.f @@ -1,3 +1,4 @@ +*> \brief \b DGELQ * * Definition: * =========== @@ -17,7 +18,17 @@ * ============= *> *> \verbatim -*> DGELQ computes a LQ factorization of an M-by-N matrix A. +*> +*> DGELQ computes an LQ factorization of a real M-by-N matrix A: +*> +*> A = ( L 0 ) * Q +*> +*> where: +*> +*> Q is a N-by-N orthogonal matrix; +*> L is a lower-triangular M-by-M matrix; +*> 0 is a M-by-(N-M) zero matrix, if M < N. +*> *> \endverbatim * * Arguments: @@ -138,7 +149,7 @@ *> \verbatim *> *> These details are particular for this LAPACK implementation. Users should not -*> take them for granted. These details may change in the future, and are unlikely not +*> take them for granted. These details may change in the future, and are not likely *> true for another LAPACK implementation. These details are relevant if one wants *> to try to understand the code. They are not part of the interface. *> @@ -159,10 +170,10 @@ SUBROUTINE DGELQ( M, N, A, LDA, T, TSIZE, WORK, LWORK, $ INFO ) * -* -- LAPACK computational routine (version 3.7.0) -- +* -- LAPACK computational routine (version 3.9.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd. -- -* December 2016 +* November 2019 * * .. Scalar Arguments .. INTEGER INFO, LDA, M, N, TSIZE, LWORK @@ -176,7 +187,7 @@ * .. * .. Local Scalars .. LOGICAL LQUERY, LMINWS, MINT, MINW - INTEGER MB, NB, MINTSZ, NBLCKS + INTEGER MB, NB, MINTSZ, NBLCKS, LWMIN, LWOPT, LWREQ * .. * .. External Functions .. LOGICAL LSAME @@ -232,20 +243,32 @@ * * Determine if the workspace size satisfies minimal size * + IF( ( N.LE.M ) .OR. ( NB.LE.M ) .OR. ( NB.GE.N ) ) THEN + LWMIN = MAX( 1, N ) + LWOPT = MAX( 1, MB*N ) + ELSE + LWMIN = MAX( 1, M ) + LWOPT = MAX( 1, MB*M ) + END IF LMINWS = .FALSE. - IF( ( TSIZE.LT.MAX( 1, MB*M*NBLCKS + 5 ) .OR. LWORK.LT.MB*M ) - $ .AND. ( LWORK.GE.M ) .AND. ( TSIZE.GE.MINTSZ ) + IF( ( TSIZE.LT.MAX( 1, MB*M*NBLCKS + 5 ) .OR. LWORK.LT.LWOPT ) + $ .AND. ( LWORK.GE.LWMIN ) .AND. ( TSIZE.GE.MINTSZ ) $ .AND. ( .NOT.LQUERY ) ) THEN IF( TSIZE.LT.MAX( 1, MB*M*NBLCKS + 5 ) ) THEN LMINWS = .TRUE. MB = 1 NB = N END IF - IF( LWORK.LT.MB*M ) THEN + IF( LWORK.LT.LWOPT ) THEN LMINWS = .TRUE. MB = 1 END IF END IF + IF( ( N.LE.M ) .OR. ( NB.LE.M ) .OR. ( NB.GE.N ) ) THEN + LWREQ = MAX( 1, MB*N ) + ELSE + LWREQ = MAX( 1, MB*M ) + END IF * IF( M.LT.0 ) THEN INFO = -1 @@ -256,7 +279,7 @@ ELSE IF( TSIZE.LT.MAX( 1, MB*M*NBLCKS + 5 ) $ .AND. ( .NOT.LQUERY ) .AND. ( .NOT.LMINWS ) ) THEN INFO = -6 - ELSE IF( ( LWORK.LT.MAX( 1, M*MB ) ) .AND .( .NOT.LQUERY ) + ELSE IF( ( LWORK.LT.LWREQ ) .AND .( .NOT.LQUERY ) $ .AND. ( .NOT.LMINWS ) ) THEN INFO = -8 END IF @@ -270,9 +293,9 @@ T( 2 ) = MB T( 3 ) = NB IF( MINW ) THEN - WORK( 1 ) = MAX( 1, N ) + WORK( 1 ) = LWMIN ELSE - WORK( 1 ) = MAX( 1, MB*M ) + WORK( 1 ) = LWREQ END IF END IF IF( INFO.NE.0 ) THEN @@ -297,7 +320,7 @@ $ LWORK, INFO ) END IF * - WORK( 1 ) = MAX( 1, MB*M ) + WORK( 1 ) = LWREQ * RETURN * diff --git a/lapack-netlib/SRC/dgelq2.f b/lapack-netlib/SRC/dgelq2.f index 04aa57fc1..a6c835de4 100644 --- a/lapack-netlib/SRC/dgelq2.f +++ b/lapack-netlib/SRC/dgelq2.f @@ -33,8 +33,16 @@ *> *> \verbatim *> -*> DGELQ2 computes an LQ factorization of a real m by n matrix A: -*> A = L * Q. +*> DGELQ2 computes an LQ factorization of a real m-by-n matrix A: +*> +*> A = ( L 0 ) * Q +*> +*> where: +*> +*> Q is a n-by-n orthogonal matrix; +*> L is an lower-triangular m-by-m matrix; +*> 0 is a m-by-(n-m) zero matrix, if m < n. +*> *> \endverbatim * * Arguments: @@ -96,7 +104,7 @@ *> \author Univ. of Colorado Denver *> \author NAG Ltd. * -*> \date December 2016 +*> \date November 2019 * *> \ingroup doubleGEcomputational * @@ -121,10 +129,10 @@ * ===================================================================== SUBROUTINE DGELQ2( M, N, A, LDA, TAU, WORK, INFO ) * -* -- LAPACK computational routine (version 3.7.0) -- +* -- LAPACK computational routine (version 3.9.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- -* December 2016 +* November 2019 * * .. Scalar Arguments .. INTEGER INFO, LDA, M, N diff --git a/lapack-netlib/SRC/dgelqf.f b/lapack-netlib/SRC/dgelqf.f index 834c47168..4b11761f6 100644 --- a/lapack-netlib/SRC/dgelqf.f +++ b/lapack-netlib/SRC/dgelqf.f @@ -34,7 +34,15 @@ *> \verbatim *> *> DGELQF computes an LQ factorization of a real M-by-N matrix A: -*> A = L * Q. +*> +*> A = ( L 0 ) * Q +*> +*> where: +*> +*> Q is a N-by-N orthogonal matrix; +*> L is an lower-triangular M-by-M matrix; +*> 0 is a M-by-(N-M) zero matrix, if M < N. +*> *> \endverbatim * * Arguments: @@ -110,7 +118,7 @@ *> \author Univ. of Colorado Denver *> \author NAG Ltd. * -*> \date December 2016 +*> \date November 2019 * *> \ingroup doubleGEcomputational * @@ -135,10 +143,10 @@ * ===================================================================== SUBROUTINE DGELQF( M, N, A, LDA, TAU, WORK, LWORK, INFO ) * -* -- LAPACK computational routine (version 3.7.0) -- +* -- LAPACK computational routine (version 3.9.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- -* December 2016 +* November 2019 * * .. Scalar Arguments .. INTEGER INFO, LDA, LWORK, M, N diff --git a/lapack-netlib/SRC/dgemlq.f b/lapack-netlib/SRC/dgemlq.f index bb6b2868f..dea693c24 100644 --- a/lapack-netlib/SRC/dgemlq.f +++ b/lapack-netlib/SRC/dgemlq.f @@ -1,3 +1,4 @@ +*> \brief \b DGEMLQ * * Definition: * =========== @@ -144,7 +145,7 @@ *> \verbatim *> *> These details are particular for this LAPACK implementation. Users should not -*> take them for granted. These details may change in the future, and are unlikely not +*> take them for granted. These details may change in the future, and are not likely *> true for another LAPACK implementation. These details are relevant if one wants *> to try to understand the code. They are not part of the interface. *> diff --git a/lapack-netlib/SRC/dgemqr.f b/lapack-netlib/SRC/dgemqr.f index 8509b13d9..0f7a42233 100644 --- a/lapack-netlib/SRC/dgemqr.f +++ b/lapack-netlib/SRC/dgemqr.f @@ -1,3 +1,4 @@ +*> \brief \b DGEMQR * * Definition: * =========== @@ -144,7 +145,7 @@ *> \verbatim *> *> These details are particular for this LAPACK implementation. Users should not -*> take them for granted. These details may change in the future, and are unlikely not +*> take them for granted. These details may change in the future, and are not likely *> true for another LAPACK implementation. These details are relevant if one wants *> to try to understand the code. They are not part of the interface. *> diff --git a/lapack-netlib/SRC/dgeqr.f b/lapack-netlib/SRC/dgeqr.f index d0a1a18f9..0bff5d1f9 100644 --- a/lapack-netlib/SRC/dgeqr.f +++ b/lapack-netlib/SRC/dgeqr.f @@ -1,3 +1,4 @@ +*> \brief \b DGEQR * * Definition: * =========== @@ -17,7 +18,18 @@ * ============= *> *> \verbatim -*> DGEQR computes a QR factorization of an M-by-N matrix A. +*> +*> DGEQR computes a QR factorization of a real M-by-N matrix A: +*> +*> A = Q * ( R ), +*> ( 0 ) +*> +*> where: +*> +*> Q is a M-by-M orthogonal matrix; +*> R is an upper-triangular N-by-N matrix; +*> 0 is a (M-N)-by-N zero matrix, if M > N. +*> *> \endverbatim * * Arguments: @@ -138,7 +150,7 @@ *> \verbatim *> *> These details are particular for this LAPACK implementation. Users should not -*> take them for granted. These details may change in the future, and are unlikely not +*> take them for granted. These details may change in the future, and are not likely *> true for another LAPACK implementation. These details are relevant if one wants *> to try to understand the code. They are not part of the interface. *> @@ -160,10 +172,10 @@ SUBROUTINE DGEQR( M, N, A, LDA, T, TSIZE, WORK, LWORK, $ INFO ) * -* -- LAPACK computational routine (version 3.7.0) -- +* -- LAPACK computational routine (version 3.9.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd. -- -* December 2016 +* November 2019 * * .. Scalar Arguments .. INTEGER INFO, LDA, M, N, TSIZE, LWORK diff --git a/lapack-netlib/SRC/dgeqr2.f b/lapack-netlib/SRC/dgeqr2.f index c1e91e9bd..9ce1feb25 100644 --- a/lapack-netlib/SRC/dgeqr2.f +++ b/lapack-netlib/SRC/dgeqr2.f @@ -33,8 +33,17 @@ *> *> \verbatim *> -*> DGEQR2 computes a QR factorization of a real m by n matrix A: -*> A = Q * R. +*> DGEQR2 computes a QR factorization of a real m-by-n matrix A: +*> +*> A = Q * ( R ), +*> ( 0 ) +*> +*> where: +*> +*> Q is a m-by-m orthogonal matrix; +*> R is an upper-triangular n-by-n matrix; +*> 0 is a (m-n)-by-n zero matrix, if m > n. +*> *> \endverbatim * * Arguments: @@ -96,7 +105,7 @@ *> \author Univ. of Colorado Denver *> \author NAG Ltd. * -*> \date December 2016 +*> \date November 2019 * *> \ingroup doubleGEcomputational * @@ -121,10 +130,10 @@ * ===================================================================== SUBROUTINE DGEQR2( M, N, A, LDA, TAU, WORK, INFO ) * -* -- LAPACK computational routine (version 3.7.0) -- +* -- LAPACK computational routine (version 3.9.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- -* December 2016 +* November 2019 * * .. Scalar Arguments .. INTEGER INFO, LDA, M, N diff --git a/lapack-netlib/SRC/dgeqr2p.f b/lapack-netlib/SRC/dgeqr2p.f index 921f79921..9b81ccb33 100644 --- a/lapack-netlib/SRC/dgeqr2p.f +++ b/lapack-netlib/SRC/dgeqr2p.f @@ -33,8 +33,18 @@ *> *> \verbatim *> -*> DGEQR2P computes a QR factorization of a real m by n matrix A: -*> A = Q * R. The diagonal entries of R are nonnegative. +*> DGEQR2P computes a QR factorization of a real m-by-n matrix A: +*> +*> A = Q * ( R ), +*> ( 0 ) +*> +*> where: +*> +*> Q is a m-by-m orthogonal matrix; +*> R is an upper-triangular n-by-n matrix with nonnegative diagonal +*> entries; +*> 0 is a (m-n)-by-n zero matrix, if m > n. +*> *> \endverbatim * * Arguments: @@ -97,7 +107,7 @@ *> \author Univ. of Colorado Denver *> \author NAG Ltd. * -*> \date December 2016 +*> \date November 2019 * *> \ingroup doubleGEcomputational * @@ -124,10 +134,10 @@ * ===================================================================== SUBROUTINE DGEQR2P( M, N, A, LDA, TAU, WORK, INFO ) * -* -- LAPACK computational routine (version 3.7.0) -- +* -- LAPACK computational routine (version 3.9.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- -* December 2016 +* November 2019 * * .. Scalar Arguments .. INTEGER INFO, LDA, M, N diff --git a/lapack-netlib/SRC/dgeqrf.f b/lapack-netlib/SRC/dgeqrf.f index 83d7d8dd7..98666221f 100644 --- a/lapack-netlib/SRC/dgeqrf.f +++ b/lapack-netlib/SRC/dgeqrf.f @@ -34,7 +34,16 @@ *> \verbatim *> *> DGEQRF computes a QR factorization of a real M-by-N matrix A: -*> A = Q * R. +*> +*> A = Q * ( R ), +*> ( 0 ) +*> +*> where: +*> +*> Q is a M-by-M orthogonal matrix; +*> R is an upper-triangular N-by-N matrix; +*> 0 is a (M-N)-by-N zero matrix, if M > N. +*> *> \endverbatim * * Arguments: @@ -111,7 +120,7 @@ *> \author Univ. of Colorado Denver *> \author NAG Ltd. * -*> \date December 2016 +*> \date November 2019 * *> \ingroup doubleGEcomputational * @@ -136,10 +145,10 @@ * ===================================================================== SUBROUTINE DGEQRF( M, N, A, LDA, TAU, WORK, LWORK, INFO ) * -* -- LAPACK computational routine (version 3.7.0) -- +* -- LAPACK computational routine (version 3.9.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- -* December 2016 +* November 2019 * * .. Scalar Arguments .. INTEGER INFO, LDA, LWORK, M, N diff --git a/lapack-netlib/SRC/dgeqrfp.f b/lapack-netlib/SRC/dgeqrfp.f index d182f98c9..5cf4069ed 100644 --- a/lapack-netlib/SRC/dgeqrfp.f +++ b/lapack-netlib/SRC/dgeqrfp.f @@ -33,8 +33,18 @@ *> *> \verbatim *> -*> DGEQRFP computes a QR factorization of a real M-by-N matrix A: -*> A = Q * R. The diagonal entries of R are nonnegative. +*> DGEQR2P computes a QR factorization of a real M-by-N matrix A: +*> +*> A = Q * ( R ), +*> ( 0 ) +*> +*> where: +*> +*> Q is a M-by-M orthogonal matrix; +*> R is an upper-triangular N-by-N matrix with nonnegative diagonal +*> entries; +*> 0 is a (M-N)-by-N zero matrix, if M > N. +*> *> \endverbatim * * Arguments: @@ -112,7 +122,7 @@ *> \author Univ. of Colorado Denver *> \author NAG Ltd. * -*> \date December 2016 +*> \date November 2019 * *> \ingroup doubleGEcomputational * @@ -139,10 +149,10 @@ * ===================================================================== SUBROUTINE DGEQRFP( M, N, A, LDA, TAU, WORK, LWORK, INFO ) * -* -- LAPACK computational routine (version 3.7.0) -- +* -- LAPACK computational routine (version 3.9.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- -* December 2016 +* November 2019 * * .. Scalar Arguments .. INTEGER INFO, LDA, LWORK, M, N diff --git a/lapack-netlib/SRC/dgerfsx.f b/lapack-netlib/SRC/dgerfsx.f index aafca8d10..495ea1726 100644 --- a/lapack-netlib/SRC/dgerfsx.f +++ b/lapack-netlib/SRC/dgerfsx.f @@ -283,7 +283,7 @@ *> information as described below. There currently are up to three *> pieces of information returned for each right-hand side. If *> componentwise accuracy is not requested (PARAMS(3) = 0.0), then -*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS .LT. 3, then at most +*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS < 3, then at most *> the first (:,N_ERR_BNDS) entries are returned. *> *> The first index in ERR_BNDS_COMP(i,:) corresponds to the ith @@ -319,14 +319,14 @@ *> \param[in] NPARAMS *> \verbatim *> NPARAMS is INTEGER -*> Specifies the number of parameters set in PARAMS. If .LE. 0, the +*> Specifies the number of parameters set in PARAMS. If <= 0, the *> PARAMS array is never referenced and default values are used. *> \endverbatim *> *> \param[in,out] PARAMS *> \verbatim *> PARAMS is DOUBLE PRECISION array, dimension (NPARAMS) -*> Specifies algorithm parameters. If an entry is .LT. 0.0, then +*> Specifies algorithm parameters. If an entry is < 0.0, then *> that entry will be filled with default value used for that *> parameter. Only positions up to NPARAMS are accessed; defaults *> are used for higher-numbered parameters. @@ -334,9 +334,9 @@ *> PARAMS(LA_LINRX_ITREF_I = 1) : Whether to perform iterative *> refinement or not. *> Default: 1.0D+0 -*> = 0.0 : No refinement is performed, and no error bounds are +*> = 0.0: No refinement is performed, and no error bounds are *> computed. -*> = 1.0 : Use the double-precision refinement algorithm, +*> = 1.0: Use the double-precision refinement algorithm, *> possibly with doubled-single computations if the *> compilation environment does not support DOUBLE *> PRECISION. diff --git a/lapack-netlib/SRC/dgesc2.f b/lapack-netlib/SRC/dgesc2.f index 2f01a762f..72d8a38f0 100644 --- a/lapack-netlib/SRC/dgesc2.f +++ b/lapack-netlib/SRC/dgesc2.f @@ -90,7 +90,7 @@ *> \verbatim *> SCALE is DOUBLE PRECISION *> On exit, SCALE contains the scale factor. SCALE is chosen -*> 0 <= SCALE <= 1 to prevent owerflow in the solution. +*> 0 <= SCALE <= 1 to prevent overflow in the solution. *> \endverbatim * * Authors: @@ -151,7 +151,7 @@ * .. * .. Executable Statements .. * -* Set constant to control owerflow +* Set constant to control overflow * EPS = DLAMCH( 'P' ) SMLNUM = DLAMCH( 'S' ) / EPS diff --git a/lapack-netlib/SRC/dgesdd.f b/lapack-netlib/SRC/dgesdd.f index 926607f98..0218900d2 100644 --- a/lapack-netlib/SRC/dgesdd.f +++ b/lapack-netlib/SRC/dgesdd.f @@ -322,7 +322,7 @@ * IF( WNTQN ) THEN * dbdsdc needs only 4*N (or 6*N for uplo=L for LAPACK <= 3.6) -* keep 7*N for backwards compatability. +* keep 7*N for backwards compatibility. BDSPAC = 7*N ELSE BDSPAC = 3*N*N + 4*N @@ -448,7 +448,7 @@ * IF( WNTQN ) THEN * dbdsdc needs only 4*N (or 6*N for uplo=L for LAPACK <= 3.6) -* keep 7*N for backwards compatability. +* keep 7*N for backwards compatibility. BDSPAC = 7*M ELSE BDSPAC = 3*M*M + 4*M diff --git a/lapack-netlib/SRC/dgesvdq.f b/lapack-netlib/SRC/dgesvdq.f new file mode 100644 index 000000000..e495d2bf9 --- /dev/null +++ b/lapack-netlib/SRC/dgesvdq.f @@ -0,0 +1,1385 @@ +*> \brief DGESVDQ computes the singular value decomposition (SVD) with a QR-Preconditioned QR SVD Method for GE matrices +* +* =========== DOCUMENTATION =========== +* +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ +* +*> \htmlonly +*> Download DGESVDQ + dependencies +*> +*> [TGZ] +*> +*> [ZIP] +*> +*> [TXT] +*> \endhtmlonly +* +* Definition: +* =========== +* +* SUBROUTINE DGESVDQ( JOBA, JOBP, JOBR, JOBU, JOBV, M, N, A, LDA, +* S, U, LDU, V, LDV, NUMRANK, IWORK, LIWORK, +* WORK, LWORK, RWORK, LRWORK, INFO ) +* +* .. Scalar Arguments .. +* IMPLICIT NONE +* CHARACTER JOBA, JOBP, JOBR, JOBU, JOBV +* INTEGER M, N, LDA, LDU, LDV, NUMRANK, LIWORK, LWORK, LRWORK, +* INFO +* .. +* .. Array Arguments .. +* DOUBLE PRECISION A( LDA, * ), U( LDU, * ), V( LDV, * ), WORK( * ) +* DOUBLE PRECISION S( * ), RWORK( * ) +* INTEGER IWORK( * ) +* .. +* +* +*> \par Purpose: +* ============= +*> +*> \verbatim +*> +*> DGESVDQ computes the singular value decomposition (SVD) of a real +*> M-by-N matrix A, where M >= N. The SVD of A is written as +*> [++] [xx] [x0] [xx] +*> A = U * SIGMA * V^*, [++] = [xx] * [ox] * [xx] +*> [++] [xx] +*> where SIGMA is an N-by-N diagonal matrix, U is an M-by-N orthonormal +*> matrix, and V is an N-by-N orthogonal matrix. The diagonal elements +*> of SIGMA are the singular values of A. The columns of U and V are the +*> left and the right singular vectors of A, respectively. +*> \endverbatim +* +* Arguments: +* ========== +* +*> \param[in] JOBA +*> \verbatim +*> JOBA is CHARACTER*1 +*> Specifies the level of accuracy in the computed SVD +*> = 'A' The requested accuracy corresponds to having the backward +*> error bounded by || delta A ||_F <= f(m,n) * EPS * || A ||_F, +*> where EPS = DLAMCH('Epsilon'). This authorises DGESVDQ to +*> truncate the computed triangular factor in a rank revealing +*> QR factorization whenever the truncated part is below the +*> threshold of the order of EPS * ||A||_F. This is aggressive +*> truncation level. +*> = 'M' Similarly as with 'A', but the truncation is more gentle: it +*> is allowed only when there is a drop on the diagonal of the +*> triangular factor in the QR factorization. This is medium +*> truncation level. +*> = 'H' High accuracy requested. No numerical rank determination based +*> on the rank revealing QR factorization is attempted. +*> = 'E' Same as 'H', and in addition the condition number of column +*> scaled A is estimated and returned in RWORK(1). +*> N^(-1/4)*RWORK(1) <= ||pinv(A_scaled)||_2 <= N^(1/4)*RWORK(1) +*> \endverbatim +*> +*> \param[in] JOBP +*> \verbatim +*> JOBP is CHARACTER*1 +*> = 'P' The rows of A are ordered in decreasing order with respect to +*> ||A(i,:)||_\infty. This enhances numerical accuracy at the cost +*> of extra data movement. Recommended for numerical robustness. +*> = 'N' No row pivoting. +*> \endverbatim +*> +*> \param[in] JOBR +*> \verbatim +*> JOBR is CHARACTER*1 +*> = 'T' After the initial pivoted QR factorization, DGESVD is applied to +*> the transposed R**T of the computed triangular factor R. This involves +*> some extra data movement (matrix transpositions). Useful for +*> experiments, research and development. +*> = 'N' The triangular factor R is given as input to DGESVD. This may be +*> preferred as it involves less data movement. +*> \endverbatim +*> +*> \param[in] JOBU +*> \verbatim +*> JOBU is CHARACTER*1 +*> = 'A' All M left singular vectors are computed and returned in the +*> matrix U. See the description of U. +*> = 'S' or 'U' N = min(M,N) left singular vectors are computed and returned +*> in the matrix U. See the description of U. +*> = 'R' Numerical rank NUMRANK is determined and only NUMRANK left singular +*> vectors are computed and returned in the matrix U. +*> = 'F' The N left singular vectors are returned in factored form as the +*> product of the Q factor from the initial QR factorization and the +*> N left singular vectors of (R**T , 0)**T. If row pivoting is used, +*> then the necessary information on the row pivoting is stored in +*> IWORK(N+1:N+M-1). +*> = 'N' The left singular vectors are not computed. +*> \endverbatim +*> +*> \param[in] JOBV +*> \verbatim +*> JOBV is CHARACTER*1 +*> = 'A', 'V' All N right singular vectors are computed and returned in +*> the matrix V. +*> = 'R' Numerical rank NUMRANK is determined and only NUMRANK right singular +*> vectors are computed and returned in the matrix V. This option is +*> allowed only if JOBU = 'R' or JOBU = 'N'; otherwise it is illegal. +*> = 'N' The right singular vectors are not computed. +*> \endverbatim +*> +*> \param[in] M +*> \verbatim +*> M is INTEGER +*> The number of rows of the input matrix A. M >= 0. +*> \endverbatim +*> +*> \param[in] N +*> \verbatim +*> N is INTEGER +*> The number of columns of the input matrix A. M >= N >= 0. +*> \endverbatim +*> +*> \param[in,out] A +*> \verbatim +*> A is DOUBLE PRECISION array of dimensions LDA x N +*> On entry, the input matrix A. +*> On exit, if JOBU .NE. 'N' or JOBV .NE. 'N', the lower triangle of A contains +*> the Householder vectors as stored by DGEQP3. If JOBU = 'F', these Householder +*> vectors together with WORK(1:N) can be used to restore the Q factors from +*> the initial pivoted QR factorization of A. See the description of U. +*> \endverbatim +*> +*> \param[in] LDA +*> \verbatim +*> LDA is INTEGER. +*> The leading dimension of the array A. LDA >= max(1,M). +*> \endverbatim +*> +*> \param[out] S +*> \verbatim +*> S is DOUBLE PRECISION array of dimension N. +*> The singular values of A, ordered so that S(i) >= S(i+1). +*> \endverbatim +*> +*> \param[out] U +*> \verbatim +*> U is DOUBLE PRECISION array, dimension +*> LDU x M if JOBU = 'A'; see the description of LDU. In this case, +*> on exit, U contains the M left singular vectors. +*> LDU x N if JOBU = 'S', 'U', 'R' ; see the description of LDU. In this +*> case, U contains the leading N or the leading NUMRANK left singular vectors. +*> LDU x N if JOBU = 'F' ; see the description of LDU. In this case U +*> contains N x N orthogonal matrix that can be used to form the left +*> singular vectors. +*> If JOBU = 'N', U is not referenced. +*> \endverbatim +*> +*> \param[in] LDU +*> \verbatim +*> LDU is INTEGER. +*> The leading dimension of the array U. +*> If JOBU = 'A', 'S', 'U', 'R', LDU >= max(1,M). +*> If JOBU = 'F', LDU >= max(1,N). +*> Otherwise, LDU >= 1. +*> \endverbatim +*> +*> \param[out] V +*> \verbatim +*> V is DOUBLE PRECISION array, dimension +*> LDV x N if JOBV = 'A', 'V', 'R' or if JOBA = 'E' . +*> If JOBV = 'A', or 'V', V contains the N-by-N orthogonal matrix V**T; +*> If JOBV = 'R', V contains the first NUMRANK rows of V**T (the right +*> singular vectors, stored rowwise, of the NUMRANK largest singular values). +*> If JOBV = 'N' and JOBA = 'E', V is used as a workspace. +*> If JOBV = 'N', and JOBA.NE.'E', V is not referenced. +*> \endverbatim +*> +*> \param[in] LDV +*> \verbatim +*> LDV is INTEGER +*> The leading dimension of the array V. +*> If JOBV = 'A', 'V', 'R', or JOBA = 'E', LDV >= max(1,N). +*> Otherwise, LDV >= 1. +*> \endverbatim +*> +*> \param[out] NUMRANK +*> \verbatim +*> NUMRANK is INTEGER +*> NUMRANK is the numerical rank first determined after the rank +*> revealing QR factorization, following the strategy specified by the +*> value of JOBA. If JOBV = 'R' and JOBU = 'R', only NUMRANK +*> leading singular values and vectors are then requested in the call +*> of DGESVD. The final value of NUMRANK might be further reduced if +*> some singular values are computed as zeros. +*> \endverbatim +*> +*> \param[out] IWORK +*> \verbatim +*> IWORK is INTEGER array, dimension (max(1, LIWORK)). +*> On exit, IWORK(1:N) contains column pivoting permutation of the +*> rank revealing QR factorization. +*> If JOBP = 'P', IWORK(N+1:N+M-1) contains the indices of the sequence +*> of row swaps used in row pivoting. These can be used to restore the +*> left singular vectors in the case JOBU = 'F'. +*> +*> If LIWORK, LWORK, or LRWORK = -1, then on exit, if INFO = 0, +*> LIWORK(1) returns the minimal LIWORK. +*> \endverbatim +*> +*> \param[in] LIWORK +*> \verbatim +*> LIWORK is INTEGER +*> The dimension of the array IWORK. +*> LIWORK >= N + M - 1, if JOBP = 'P' and JOBA .NE. 'E'; +*> LIWORK >= N if JOBP = 'N' and JOBA .NE. 'E'; +*> LIWORK >= N + M - 1 + N, if JOBP = 'P' and JOBA = 'E'; +*> LIWORK >= N + N if JOBP = 'N' and JOBA = 'E'. +* +*> If LIWORK = -1, then a workspace query is assumed; the routine +*> only calculates and returns the optimal and minimal sizes +*> for the WORK, IWORK, and RWORK arrays, and no error +*> message related to LWORK is issued by XERBLA. +*> \endverbatim +*> +*> \param[out] WORK +*> \verbatim +*> WORK is DOUBLE PRECISION array, dimension (max(2, LWORK)), used as a workspace. +*> On exit, if, on entry, LWORK.NE.-1, WORK(1:N) contains parameters +*> needed to recover the Q factor from the QR factorization computed by +*> DGEQP3. +*> +*> If LIWORK, LWORK, or LRWORK = -1, then on exit, if INFO = 0, +*> WORK(1) returns the optimal LWORK, and +*> WORK(2) returns the minimal LWORK. +*> \endverbatim +*> +*> \param[in,out] LWORK +*> \verbatim +*> LWORK is INTEGER +*> The dimension of the array WORK. It is determined as follows: +*> Let LWQP3 = 3*N+1, LWCON = 3*N, and let +*> LWORQ = { MAX( N, 1 ), if JOBU = 'R', 'S', or 'U' +*> { MAX( M, 1 ), if JOBU = 'A' +*> LWSVD = MAX( 5*N, 1 ) +*> LWLQF = MAX( N/2, 1 ), LWSVD2 = MAX( 5*(N/2), 1 ), LWORLQ = MAX( N, 1 ), +*> LWQRF = MAX( N/2, 1 ), LWORQ2 = MAX( N, 1 ) +*> Then the minimal value of LWORK is: +*> = MAX( N + LWQP3, LWSVD ) if only the singular values are needed; +*> = MAX( N + LWQP3, LWCON, LWSVD ) if only the singular values are needed, +*> and a scaled condition estimate requested; +*> +*> = N + MAX( LWQP3, LWSVD, LWORQ ) if the singular values and the left +*> singular vectors are requested; +*> = N + MAX( LWQP3, LWCON, LWSVD, LWORQ ) if the singular values and the left +*> singular vectors are requested, and also +*> a scaled condition estimate requested; +*> +*> = N + MAX( LWQP3, LWSVD ) if the singular values and the right +*> singular vectors are requested; +*> = N + MAX( LWQP3, LWCON, LWSVD ) if the singular values and the right +*> singular vectors are requested, and also +*> a scaled condition etimate requested; +*> +*> = N + MAX( LWQP3, LWSVD, LWORQ ) if the full SVD is requested with JOBV = 'R'; +*> independent of JOBR; +*> = N + MAX( LWQP3, LWCON, LWSVD, LWORQ ) if the full SVD is requested, +*> JOBV = 'R' and, also a scaled condition +*> estimate requested; independent of JOBR; +*> = MAX( N + MAX( LWQP3, LWSVD, LWORQ ), +*> N + MAX( LWQP3, N/2+LWLQF, N/2+LWSVD2, N/2+LWORLQ, LWORQ) ) if the +*> full SVD is requested with JOBV = 'A' or 'V', and +*> JOBR ='N' +*> = MAX( N + MAX( LWQP3, LWCON, LWSVD, LWORQ ), +*> N + MAX( LWQP3, LWCON, N/2+LWLQF, N/2+LWSVD2, N/2+LWORLQ, LWORQ ) ) +*> if the full SVD is requested with JOBV = 'A' or 'V', and +*> JOBR ='N', and also a scaled condition number estimate +*> requested. +*> = MAX( N + MAX( LWQP3, LWSVD, LWORQ ), +*> N + MAX( LWQP3, N/2+LWQRF, N/2+LWSVD2, N/2+LWORQ2, LWORQ ) ) if the +*> full SVD is requested with JOBV = 'A', 'V', and JOBR ='T' +*> = MAX( N + MAX( LWQP3, LWCON, LWSVD, LWORQ ), +*> N + MAX( LWQP3, LWCON, N/2+LWQRF, N/2+LWSVD2, N/2+LWORQ2, LWORQ ) ) +*> if the full SVD is requested with JOBV = 'A' or 'V', and +*> JOBR ='T', and also a scaled condition number estimate +*> requested. +*> Finally, LWORK must be at least two: LWORK = MAX( 2, LWORK ). +*> +*> If LWORK = -1, then a workspace query is assumed; the routine +*> only calculates and returns the optimal and minimal sizes +*> for the WORK, IWORK, and RWORK arrays, and no error +*> message related to LWORK is issued by XERBLA. +*> \endverbatim +*> +*> \param[out] RWORK +*> \verbatim +*> RWORK is DOUBLE PRECISION array, dimension (max(1, LRWORK)). +*> On exit, +*> 1. If JOBA = 'E', RWORK(1) contains an estimate of the condition +*> number of column scaled A. If A = C * D where D is diagonal and C +*> has unit columns in the Euclidean norm, then, assuming full column rank, +*> N^(-1/4) * RWORK(1) <= ||pinv(C)||_2 <= N^(1/4) * RWORK(1). +*> Otherwise, RWORK(1) = -1. +*> 2. RWORK(2) contains the number of singular values computed as +*> exact zeros in DGESVD applied to the upper triangular or trapeziodal +*> R (from the initial QR factorization). In case of early exit (no call to +*> DGESVD, such as in the case of zero matrix) RWORK(2) = -1. +*> +*> If LIWORK, LWORK, or LRWORK = -1, then on exit, if INFO = 0, +*> RWORK(1) returns the minimal LRWORK. +*> \endverbatim +*> +*> \param[in] LRWORK +*> \verbatim +*> LRWORK is INTEGER. +*> The dimension of the array RWORK. +*> If JOBP ='P', then LRWORK >= MAX(2, M). +*> Otherwise, LRWORK >= 2 +* +*> If LRWORK = -1, then a workspace query is assumed; the routine +*> only calculates and returns the optimal and minimal sizes +*> for the WORK, IWORK, and RWORK arrays, and no error +*> message related to LWORK is issued by XERBLA. +*> \endverbatim +*> +*> \param[out] INFO +*> \verbatim +*> INFO is INTEGER +*> = 0: successful exit. +*> < 0: if INFO = -i, the i-th argument had an illegal value. +*> > 0: if DBDSQR did not converge, INFO specifies how many superdiagonals +*> of an intermediate bidiagonal form B (computed in DGESVD) did not +*> converge to zero. +*> \endverbatim +* +*> \par Further Details: +* ======================== +*> +*> \verbatim +*> +*> 1. The data movement (matrix transpose) is coded using simple nested +*> DO-loops because BLAS and LAPACK do not provide corresponding subroutines. +*> Those DO-loops are easily identified in this source code - by the CONTINUE +*> statements labeled with 11**. In an optimized version of this code, the +*> nested DO loops should be replaced with calls to an optimized subroutine. +*> 2. This code scales A by 1/SQRT(M) if the largest ABS(A(i,j)) could cause +*> column norm overflow. This is the minial precaution and it is left to the +*> SVD routine (CGESVD) to do its own preemptive scaling if potential over- +*> or underflows are detected. To avoid repeated scanning of the array A, +*> an optimal implementation would do all necessary scaling before calling +*> CGESVD and the scaling in CGESVD can be switched off. +*> 3. Other comments related to code optimization are given in comments in the +*> code, enlosed in [[double brackets]]. +*> \endverbatim +* +*> \par Bugs, examples and comments +* =========================== +* +*> \verbatim +*> Please report all bugs and send interesting examples and/or comments to +*> drmac@math.hr. Thank you. +*> \endverbatim +* +*> \par References +* =============== +* +*> \verbatim +*> [1] Zlatko Drmac, Algorithm 977: A QR-Preconditioned QR SVD Method for +*> Computing the SVD with High Accuracy. ACM Trans. Math. Softw. +*> 44(1): 11:1-11:30 (2017) +*> +*> SIGMA library, xGESVDQ section updated February 2016. +*> Developed and coded by Zlatko Drmac, Department of Mathematics +*> University of Zagreb, Croatia, drmac@math.hr +*> \endverbatim +* +* +*> \par Contributors: +* ================== +*> +*> \verbatim +*> Developed and coded by Zlatko Drmac, Department of Mathematics +*> University of Zagreb, Croatia, drmac@math.hr +*> \endverbatim +* +* Authors: +* ======== +* +*> \author Univ. of Tennessee +*> \author Univ. of California Berkeley +*> \author Univ. of Colorado Denver +*> \author NAG Ltd. +* +*> \date November 2018 +* +*> \ingroup doubleGEsing +* +* ===================================================================== + SUBROUTINE DGESVDQ( JOBA, JOBP, JOBR, JOBU, JOBV, M, N, A, LDA, + $ S, U, LDU, V, LDV, NUMRANK, IWORK, LIWORK, + $ WORK, LWORK, RWORK, LRWORK, INFO ) +* .. Scalar Arguments .. + IMPLICIT NONE + CHARACTER JOBA, JOBP, JOBR, JOBU, JOBV + INTEGER M, N, LDA, LDU, LDV, NUMRANK, LIWORK, LWORK, LRWORK, + $ INFO +* .. +* .. Array Arguments .. + DOUBLE PRECISION A( LDA, * ), U( LDU, * ), V( LDV, * ), WORK( * ) + DOUBLE PRECISION S( * ), RWORK( * ) + INTEGER IWORK( * ) +* +* ===================================================================== +* +* .. Parameters .. + DOUBLE PRECISION ZERO, ONE + PARAMETER ( ZERO = 0.0D0, ONE = 1.0D0 ) +* .. Local Scalars .. + INTEGER IERR, IWOFF, NR, N1, OPTRATIO, p, q + INTEGER LWCON, LWQP3, LWRK_DGELQF, LWRK_DGESVD, LWRK_DGESVD2, + $ LWRK_DGEQP3, LWRK_DGEQRF, LWRK_DORMLQ, LWRK_DORMQR, + $ LWRK_DORMQR2, LWLQF, LWQRF, LWSVD, LWSVD2, LWORQ, + $ LWORQ2, LWORLQ, MINWRK, MINWRK2, OPTWRK, OPTWRK2, + $ IMINWRK, RMINWRK + LOGICAL ACCLA, ACCLM, ACCLH, ASCALED, CONDA, DNTWU, DNTWV, + $ LQUERY, LSVC0, LSVEC, ROWPRM, RSVEC, RTRANS, WNTUA, + $ WNTUF, WNTUR, WNTUS, WNTVA, WNTVR + DOUBLE PRECISION BIG, EPSLN, RTMP, SCONDA, SFMIN +* .. Local Arrays + DOUBLE PRECISION RDUMMY(1) +* .. +* .. External Subroutines (BLAS, LAPACK) + EXTERNAL DGELQF, DGEQP3, DGEQRF, DGESVD, DLACPY, DLAPMT, + $ DLASCL, DLASET, DLASWP, DSCAL, DPOCON, DORMLQ, + $ DORMQR, XERBLA +* .. +* .. External Functions (BLAS, LAPACK) + LOGICAL LSAME + INTEGER IDAMAX + DOUBLE PRECISION DLANGE, DNRM2, DLAMCH + EXTERNAL DLANGE, LSAME, IDAMAX, DNRM2, DLAMCH +* .. +* .. Intrinsic Functions .. +* + INTRINSIC ABS, MAX, MIN, DBLE, SQRT +* +* Test the input arguments +* + WNTUS = LSAME( JOBU, 'S' ) .OR. LSAME( JOBU, 'U' ) + WNTUR = LSAME( JOBU, 'R' ) + WNTUA = LSAME( JOBU, 'A' ) + WNTUF = LSAME( JOBU, 'F' ) + LSVC0 = WNTUS .OR. WNTUR .OR. WNTUA + LSVEC = LSVC0 .OR. WNTUF + DNTWU = LSAME( JOBU, 'N' ) +* + WNTVR = LSAME( JOBV, 'R' ) + WNTVA = LSAME( JOBV, 'A' ) .OR. LSAME( JOBV, 'V' ) + RSVEC = WNTVR .OR. WNTVA + DNTWV = LSAME( JOBV, 'N' ) +* + ACCLA = LSAME( JOBA, 'A' ) + ACCLM = LSAME( JOBA, 'M' ) + CONDA = LSAME( JOBA, 'E' ) + ACCLH = LSAME( JOBA, 'H' ) .OR. CONDA +* + ROWPRM = LSAME( JOBP, 'P' ) + RTRANS = LSAME( JOBR, 'T' ) +* + IF ( ROWPRM ) THEN + IF ( CONDA ) THEN + IMINWRK = MAX( 1, N + M - 1 + N ) + ELSE + IMINWRK = MAX( 1, N + M - 1 ) + END IF + RMINWRK = MAX( 2, M ) + ELSE + IF ( CONDA ) THEN + IMINWRK = MAX( 1, N + N ) + ELSE + IMINWRK = MAX( 1, N ) + END IF + RMINWRK = 2 + END IF + LQUERY = (LIWORK .EQ. -1 .OR. LWORK .EQ. -1 .OR. LRWORK .EQ. -1) + INFO = 0 + IF ( .NOT. ( ACCLA .OR. ACCLM .OR. ACCLH ) ) THEN + INFO = -1 + ELSE IF ( .NOT.( ROWPRM .OR. LSAME( JOBP, 'N' ) ) ) THEN + INFO = -2 + ELSE IF ( .NOT.( RTRANS .OR. LSAME( JOBR, 'N' ) ) ) THEN + INFO = -3 + ELSE IF ( .NOT.( LSVEC .OR. DNTWU ) ) THEN + INFO = -4 + ELSE IF ( WNTUR .AND. WNTVA ) THEN + INFO = -5 + ELSE IF ( .NOT.( RSVEC .OR. DNTWV )) THEN + INFO = -5 + ELSE IF ( M.LT.0 ) THEN + INFO = -6 + ELSE IF ( ( N.LT.0 ) .OR. ( N.GT.M ) ) THEN + INFO = -7 + ELSE IF ( LDA.LT.MAX( 1, M ) ) THEN + INFO = -9 + ELSE IF ( LDU.LT.1 .OR. ( LSVC0 .AND. LDU.LT.M ) .OR. + $ ( WNTUF .AND. LDU.LT.N ) ) THEN + INFO = -12 + ELSE IF ( LDV.LT.1 .OR. ( RSVEC .AND. LDV.LT.N ) .OR. + $ ( CONDA .AND. LDV.LT.N ) ) THEN + INFO = -14 + ELSE IF ( LIWORK .LT. IMINWRK .AND. .NOT. LQUERY ) THEN + INFO = -17 + END IF +* +* + IF ( INFO .EQ. 0 ) THEN +* .. compute the minimal and the optimal workspace lengths +* [[The expressions for computing the minimal and the optimal +* values of LWORK are written with a lot of redundancy and +* can be simplified. However, this detailed form is easier for +* maintenance and modifications of the code.]] +* +* .. minimal workspace length for DGEQP3 of an M x N matrix + LWQP3 = 3 * N + 1 +* .. minimal workspace length for DORMQR to build left singular vectors + IF ( WNTUS .OR. WNTUR ) THEN + LWORQ = MAX( N , 1 ) + ELSE IF ( WNTUA ) THEN + LWORQ = MAX( M , 1 ) + END IF +* .. minimal workspace length for DPOCON of an N x N matrix + LWCON = 3 * N +* .. DGESVD of an N x N matrix + LWSVD = MAX( 5 * N, 1 ) + IF ( LQUERY ) THEN + CALL DGEQP3( M, N, A, LDA, IWORK, RDUMMY, RDUMMY, -1, + $ IERR ) + LWRK_DGEQP3 = INT( RDUMMY(1) ) + IF ( WNTUS .OR. WNTUR ) THEN + CALL DORMQR( 'L', 'N', M, N, N, A, LDA, RDUMMY, U, + $ LDU, RDUMMY, -1, IERR ) + LWRK_DORMQR = INT( RDUMMY(1) ) + ELSE IF ( WNTUA ) THEN + CALL DORMQR( 'L', 'N', M, M, N, A, LDA, RDUMMY, U, + $ LDU, RDUMMY, -1, IERR ) + LWRK_DORMQR = INT( RDUMMY(1) ) + ELSE + LWRK_DORMQR = 0 + END IF + END IF + MINWRK = 2 + OPTWRK = 2 + IF ( .NOT. (LSVEC .OR. RSVEC )) THEN +* .. minimal and optimal sizes of the workspace if +* only the singular values are requested + IF ( CONDA ) THEN + MINWRK = MAX( N+LWQP3, LWCON, LWSVD ) + ELSE + MINWRK = MAX( N+LWQP3, LWSVD ) + END IF + IF ( LQUERY ) THEN + CALL DGESVD( 'N', 'N', N, N, A, LDA, S, U, LDU, + $ V, LDV, RDUMMY, -1, IERR ) + LWRK_DGESVD = INT( RDUMMY(1) ) + IF ( CONDA ) THEN + OPTWRK = MAX( N+LWRK_DGEQP3, N+LWCON, LWRK_DGESVD ) + ELSE + OPTWRK = MAX( N+LWRK_DGEQP3, LWRK_DGESVD ) + END IF + END IF + ELSE IF ( LSVEC .AND. (.NOT.RSVEC) ) THEN +* .. minimal and optimal sizes of the workspace if the +* singular values and the left singular vectors are requested + IF ( CONDA ) THEN + MINWRK = N + MAX( LWQP3, LWCON, LWSVD, LWORQ ) + ELSE + MINWRK = N + MAX( LWQP3, LWSVD, LWORQ ) + END IF + IF ( LQUERY ) THEN + IF ( RTRANS ) THEN + CALL DGESVD( 'N', 'O', N, N, A, LDA, S, U, LDU, + $ V, LDV, RDUMMY, -1, IERR ) + ELSE + CALL DGESVD( 'O', 'N', N, N, A, LDA, S, U, LDU, + $ V, LDV, RDUMMY, -1, IERR ) + END IF + LWRK_DGESVD = INT( RDUMMY(1) ) + IF ( CONDA ) THEN + OPTWRK = N + MAX( LWRK_DGEQP3, LWCON, LWRK_DGESVD, + $ LWRK_DORMQR ) + ELSE + OPTWRK = N + MAX( LWRK_DGEQP3, LWRK_DGESVD, + $ LWRK_DORMQR ) + END IF + END IF + ELSE IF ( RSVEC .AND. (.NOT.LSVEC) ) THEN +* .. minimal and optimal sizes of the workspace if the +* singular values and the right singular vectors are requested + IF ( CONDA ) THEN + MINWRK = N + MAX( LWQP3, LWCON, LWSVD ) + ELSE + MINWRK = N + MAX( LWQP3, LWSVD ) + END IF + IF ( LQUERY ) THEN + IF ( RTRANS ) THEN + CALL DGESVD( 'O', 'N', N, N, A, LDA, S, U, LDU, + $ V, LDV, RDUMMY, -1, IERR ) + ELSE + CALL DGESVD( 'N', 'O', N, N, A, LDA, S, U, LDU, + $ V, LDV, RDUMMY, -1, IERR ) + END IF + LWRK_DGESVD = INT( RDUMMY(1) ) + IF ( CONDA ) THEN + OPTWRK = N + MAX( LWRK_DGEQP3, LWCON, LWRK_DGESVD ) + ELSE + OPTWRK = N + MAX( LWRK_DGEQP3, LWRK_DGESVD ) + END IF + END IF + ELSE +* .. minimal and optimal sizes of the workspace if the +* full SVD is requested + IF ( RTRANS ) THEN + MINWRK = MAX( LWQP3, LWSVD, LWORQ ) + IF ( CONDA ) MINWRK = MAX( MINWRK, LWCON ) + MINWRK = MINWRK + N + IF ( WNTVA ) THEN +* .. minimal workspace length for N x N/2 DGEQRF + LWQRF = MAX( N/2, 1 ) +* .. minimal workspace lengt for N/2 x N/2 DGESVD + LWSVD2 = MAX( 5 * (N/2), 1 ) + LWORQ2 = MAX( N, 1 ) + MINWRK2 = MAX( LWQP3, N/2+LWQRF, N/2+LWSVD2, + $ N/2+LWORQ2, LWORQ ) + IF ( CONDA ) MINWRK2 = MAX( MINWRK2, LWCON ) + MINWRK2 = N + MINWRK2 + MINWRK = MAX( MINWRK, MINWRK2 ) + END IF + ELSE + MINWRK = MAX( LWQP3, LWSVD, LWORQ ) + IF ( CONDA ) MINWRK = MAX( MINWRK, LWCON ) + MINWRK = MINWRK + N + IF ( WNTVA ) THEN +* .. minimal workspace length for N/2 x N DGELQF + LWLQF = MAX( N/2, 1 ) + LWSVD2 = MAX( 5 * (N/2), 1 ) + LWORLQ = MAX( N , 1 ) + MINWRK2 = MAX( LWQP3, N/2+LWLQF, N/2+LWSVD2, + $ N/2+LWORLQ, LWORQ ) + IF ( CONDA ) MINWRK2 = MAX( MINWRK2, LWCON ) + MINWRK2 = N + MINWRK2 + MINWRK = MAX( MINWRK, MINWRK2 ) + END IF + END IF + IF ( LQUERY ) THEN + IF ( RTRANS ) THEN + CALL DGESVD( 'O', 'A', N, N, A, LDA, S, U, LDU, + $ V, LDV, RDUMMY, -1, IERR ) + LWRK_DGESVD = INT( RDUMMY(1) ) + OPTWRK = MAX(LWRK_DGEQP3,LWRK_DGESVD,LWRK_DORMQR) + IF ( CONDA ) OPTWRK = MAX( OPTWRK, LWCON ) + OPTWRK = N + OPTWRK + IF ( WNTVA ) THEN + CALL DGEQRF(N,N/2,U,LDU,RDUMMY,RDUMMY,-1,IERR) + LWRK_DGEQRF = INT( RDUMMY(1) ) + CALL DGESVD( 'S', 'O', N/2,N/2, V,LDV, S, U,LDU, + $ V, LDV, RDUMMY, -1, IERR ) + LWRK_DGESVD2 = INT( RDUMMY(1) ) + CALL DORMQR( 'R', 'C', N, N, N/2, U, LDU, RDUMMY, + $ V, LDV, RDUMMY, -1, IERR ) + LWRK_DORMQR2 = INT( RDUMMY(1) ) + OPTWRK2 = MAX( LWRK_DGEQP3, N/2+LWRK_DGEQRF, + $ N/2+LWRK_DGESVD2, N/2+LWRK_DORMQR2 ) + IF ( CONDA ) OPTWRK2 = MAX( OPTWRK2, LWCON ) + OPTWRK2 = N + OPTWRK2 + OPTWRK = MAX( OPTWRK, OPTWRK2 ) + END IF + ELSE + CALL DGESVD( 'S', 'O', N, N, A, LDA, S, U, LDU, + $ V, LDV, RDUMMY, -1, IERR ) + LWRK_DGESVD = INT( RDUMMY(1) ) + OPTWRK = MAX(LWRK_DGEQP3,LWRK_DGESVD,LWRK_DORMQR) + IF ( CONDA ) OPTWRK = MAX( OPTWRK, LWCON ) + OPTWRK = N + OPTWRK + IF ( WNTVA ) THEN + CALL DGELQF(N/2,N,U,LDU,RDUMMY,RDUMMY,-1,IERR) + LWRK_DGELQF = INT( RDUMMY(1) ) + CALL DGESVD( 'S','O', N/2,N/2, V, LDV, S, U, LDU, + $ V, LDV, RDUMMY, -1, IERR ) + LWRK_DGESVD2 = INT( RDUMMY(1) ) + CALL DORMLQ( 'R', 'N', N, N, N/2, U, LDU, RDUMMY, + $ V, LDV, RDUMMY,-1,IERR ) + LWRK_DORMLQ = INT( RDUMMY(1) ) + OPTWRK2 = MAX( LWRK_DGEQP3, N/2+LWRK_DGELQF, + $ N/2+LWRK_DGESVD2, N/2+LWRK_DORMLQ ) + IF ( CONDA ) OPTWRK2 = MAX( OPTWRK2, LWCON ) + OPTWRK2 = N + OPTWRK2 + OPTWRK = MAX( OPTWRK, OPTWRK2 ) + END IF + END IF + END IF + END IF +* + MINWRK = MAX( 2, MINWRK ) + OPTWRK = MAX( 2, OPTWRK ) + IF ( LWORK .LT. MINWRK .AND. (.NOT.LQUERY) ) INFO = -19 +* + END IF +* + IF (INFO .EQ. 0 .AND. LRWORK .LT. RMINWRK .AND. .NOT. LQUERY) THEN + INFO = -21 + END IF + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'DGESVDQ', -INFO ) + RETURN + ELSE IF ( LQUERY ) THEN +* +* Return optimal workspace +* + IWORK(1) = IMINWRK + WORK(1) = OPTWRK + WORK(2) = MINWRK + RWORK(1) = RMINWRK + RETURN + END IF +* +* Quick return if the matrix is void. +* + IF( ( M.EQ.0 ) .OR. ( N.EQ.0 ) ) THEN +* .. all output is void. + RETURN + END IF +* + BIG = DLAMCH('O') + ASCALED = .FALSE. + IWOFF = 1 + IF ( ROWPRM ) THEN + IWOFF = M +* .. reordering the rows in decreasing sequence in the +* ell-infinity norm - this enhances numerical robustness in +* the case of differently scaled rows. + DO 1904 p = 1, M +* RWORK(p) = ABS( A(p,ICAMAX(N,A(p,1),LDA)) ) +* [[DLANGE will return NaN if an entry of the p-th row is Nan]] + RWORK(p) = DLANGE( 'M', 1, N, A(p,1), LDA, RDUMMY ) +* .. check for NaN's and Inf's + IF ( ( RWORK(p) .NE. RWORK(p) ) .OR. + $ ( (RWORK(p)*ZERO) .NE. ZERO ) ) THEN + INFO = -8 + CALL XERBLA( 'DGESVDQ', -INFO ) + RETURN + END IF + 1904 CONTINUE + DO 1952 p = 1, M - 1 + q = IDAMAX( M-p+1, RWORK(p), 1 ) + p - 1 + IWORK(N+p) = q + IF ( p .NE. q ) THEN + RTMP = RWORK(p) + RWORK(p) = RWORK(q) + RWORK(q) = RTMP + END IF + 1952 CONTINUE +* + IF ( RWORK(1) .EQ. ZERO ) THEN +* Quick return: A is the M x N zero matrix. + NUMRANK = 0 + CALL DLASET( 'G', N, 1, ZERO, ZERO, S, N ) + IF ( WNTUS ) CALL DLASET('G', M, N, ZERO, ONE, U, LDU) + IF ( WNTUA ) CALL DLASET('G', M, M, ZERO, ONE, U, LDU) + IF ( WNTVA ) CALL DLASET('G', N, N, ZERO, ONE, V, LDV) + IF ( WNTUF ) THEN + CALL DLASET( 'G', N, 1, ZERO, ZERO, WORK, N ) + CALL DLASET( 'G', M, N, ZERO, ONE, U, LDU ) + END IF + DO 5001 p = 1, N + IWORK(p) = p + 5001 CONTINUE + IF ( ROWPRM ) THEN + DO 5002 p = N + 1, N + M - 1 + IWORK(p) = p - N + 5002 CONTINUE + END IF + IF ( CONDA ) RWORK(1) = -1 + RWORK(2) = -1 + RETURN + END IF +* + IF ( RWORK(1) .GT. BIG / SQRT(DBLE(M)) ) THEN +* .. to prevent overflow in the QR factorization, scale the +* matrix by 1/sqrt(M) if too large entry detected + CALL DLASCL('G',0,0,SQRT(DBLE(M)),ONE, M,N, A,LDA, IERR) + ASCALED = .TRUE. + END IF + CALL DLASWP( N, A, LDA, 1, M-1, IWORK(N+1), 1 ) + END IF +* +* .. At this stage, preemptive scaling is done only to avoid column +* norms overflows during the QR factorization. The SVD procedure should +* have its own scaling to save the singular values from overflows and +* underflows. That depends on the SVD procedure. +* + IF ( .NOT.ROWPRM ) THEN + RTMP = DLANGE( 'M', M, N, A, LDA, RDUMMY ) + IF ( ( RTMP .NE. RTMP ) .OR. + $ ( (RTMP*ZERO) .NE. ZERO ) ) THEN + INFO = -8 + CALL XERBLA( 'DGESVDQ', -INFO ) + RETURN + END IF + IF ( RTMP .GT. BIG / SQRT(DBLE(M)) ) THEN +* .. to prevent overflow in the QR factorization, scale the +* matrix by 1/sqrt(M) if too large entry detected + CALL DLASCL('G',0,0, SQRT(DBLE(M)),ONE, M,N, A,LDA, IERR) + ASCALED = .TRUE. + END IF + END IF +* +* .. QR factorization with column pivoting +* +* A * P = Q * [ R ] +* [ 0 ] +* + DO 1963 p = 1, N +* .. all columns are free columns + IWORK(p) = 0 + 1963 CONTINUE + CALL DGEQP3( M, N, A, LDA, IWORK, WORK, WORK(N+1), LWORK-N, + $ IERR ) +* +* If the user requested accuracy level allows truncation in the +* computed upper triangular factor, the matrix R is examined and, +* if possible, replaced with its leading upper trapezoidal part. +* + EPSLN = DLAMCH('E') + SFMIN = DLAMCH('S') +* SMALL = SFMIN / EPSLN + NR = N +* + IF ( ACCLA ) THEN +* +* Standard absolute error bound suffices. All sigma_i with +* sigma_i < N*EPS*||A||_F are flushed to zero. This is an +* aggressive enforcement of lower numerical rank by introducing a +* backward error of the order of N*EPS*||A||_F. + NR = 1 + RTMP = SQRT(DBLE(N))*EPSLN + DO 3001 p = 2, N + IF ( ABS(A(p,p)) .LT. (RTMP*ABS(A(1,1))) ) GO TO 3002 + NR = NR + 1 + 3001 CONTINUE + 3002 CONTINUE +* + ELSEIF ( ACCLM ) THEN +* .. similarly as above, only slightly more gentle (less aggressive). +* Sudden drop on the diagonal of R is used as the criterion for being +* close-to-rank-deficient. The threshold is set to EPSLN=DLAMCH('E'). +* [[This can be made more flexible by replacing this hard-coded value +* with a user specified threshold.]] Also, the values that underflow +* will be truncated. + NR = 1 + DO 3401 p = 2, N + IF ( ( ABS(A(p,p)) .LT. (EPSLN*ABS(A(p-1,p-1))) ) .OR. + $ ( ABS(A(p,p)) .LT. SFMIN ) ) GO TO 3402 + NR = NR + 1 + 3401 CONTINUE + 3402 CONTINUE +* + ELSE +* .. RRQR not authorized to determine numerical rank except in the +* obvious case of zero pivots. +* .. inspect R for exact zeros on the diagonal; +* R(i,i)=0 => R(i:N,i:N)=0. + NR = 1 + DO 3501 p = 2, N + IF ( ABS(A(p,p)) .EQ. ZERO ) GO TO 3502 + NR = NR + 1 + 3501 CONTINUE + 3502 CONTINUE +* + IF ( CONDA ) THEN +* Estimate the scaled condition number of A. Use the fact that it is +* the same as the scaled condition number of R. +* .. V is used as workspace + CALL DLACPY( 'U', N, N, A, LDA, V, LDV ) +* Only the leading NR x NR submatrix of the triangular factor +* is considered. Only if NR=N will this give a reliable error +* bound. However, even for NR < N, this can be used on an +* expert level and obtain useful information in the sense of +* perturbation theory. + DO 3053 p = 1, NR + RTMP = DNRM2( p, V(1,p), 1 ) + CALL DSCAL( p, ONE/RTMP, V(1,p), 1 ) + 3053 CONTINUE + IF ( .NOT. ( LSVEC .OR. RSVEC ) ) THEN + CALL DPOCON( 'U', NR, V, LDV, ONE, RTMP, + $ WORK, IWORK(N+IWOFF), IERR ) + ELSE + CALL DPOCON( 'U', NR, V, LDV, ONE, RTMP, + $ WORK(N+1), IWORK(N+IWOFF), IERR ) + END IF + SCONDA = ONE / SQRT(RTMP) +* For NR=N, SCONDA is an estimate of SQRT(||(R^* * R)^(-1)||_1), +* N^(-1/4) * SCONDA <= ||R^(-1)||_2 <= N^(1/4) * SCONDA +* See the reference [1] for more details. + END IF +* + ENDIF +* + IF ( WNTUR ) THEN + N1 = NR + ELSE IF ( WNTUS .OR. WNTUF) THEN + N1 = N + ELSE IF ( WNTUA ) THEN + N1 = M + END IF +* + IF ( .NOT. ( RSVEC .OR. LSVEC ) ) THEN +*....................................................................... +* .. only the singular values are requested +*....................................................................... + IF ( RTRANS ) THEN +* +* .. compute the singular values of R**T = [A](1:NR,1:N)**T +* .. set the lower triangle of [A] to [A](1:NR,1:N)**T and +* the upper triangle of [A] to zero. + DO 1146 p = 1, MIN( N, NR ) + DO 1147 q = p + 1, N + A(q,p) = A(p,q) + IF ( q .LE. NR ) A(p,q) = ZERO + 1147 CONTINUE + 1146 CONTINUE +* + CALL DGESVD( 'N', 'N', N, NR, A, LDA, S, U, LDU, + $ V, LDV, WORK, LWORK, INFO ) +* + ELSE +* +* .. compute the singular values of R = [A](1:NR,1:N) +* + IF ( NR .GT. 1 ) + $ CALL DLASET( 'L', NR-1,NR-1, ZERO,ZERO, A(2,1), LDA ) + CALL DGESVD( 'N', 'N', NR, N, A, LDA, S, U, LDU, + $ V, LDV, WORK, LWORK, INFO ) +* + END IF +* + ELSE IF ( LSVEC .AND. ( .NOT. RSVEC) ) THEN +*....................................................................... +* .. the singular values and the left singular vectors requested +*......................................................................."""""""" + IF ( RTRANS ) THEN +* .. apply DGESVD to R**T +* .. copy R**T into [U] and overwrite [U] with the right singular +* vectors of R + DO 1192 p = 1, NR + DO 1193 q = p, N + U(q,p) = A(p,q) + 1193 CONTINUE + 1192 CONTINUE + IF ( NR .GT. 1 ) + $ CALL DLASET( 'U', NR-1,NR-1, ZERO,ZERO, U(1,2), LDU ) +* .. the left singular vectors not computed, the NR right singular +* vectors overwrite [U](1:NR,1:NR) as transposed. These +* will be pre-multiplied by Q to build the left singular vectors of A. + CALL DGESVD( 'N', 'O', N, NR, U, LDU, S, U, LDU, + $ U, LDU, WORK(N+1), LWORK-N, INFO ) +* + DO 1119 p = 1, NR + DO 1120 q = p + 1, NR + RTMP = U(q,p) + U(q,p) = U(p,q) + U(p,q) = RTMP + 1120 CONTINUE + 1119 CONTINUE +* + ELSE +* .. apply DGESVD to R +* .. copy R into [U] and overwrite [U] with the left singular vectors + CALL DLACPY( 'U', NR, N, A, LDA, U, LDU ) + IF ( NR .GT. 1 ) + $ CALL DLASET( 'L', NR-1, NR-1, ZERO, ZERO, U(2,1), LDU ) +* .. the right singular vectors not computed, the NR left singular +* vectors overwrite [U](1:NR,1:NR) + CALL DGESVD( 'O', 'N', NR, N, U, LDU, S, U, LDU, + $ V, LDV, WORK(N+1), LWORK-N, INFO ) +* .. now [U](1:NR,1:NR) contains the NR left singular vectors of +* R. These will be pre-multiplied by Q to build the left singular +* vectors of A. + END IF +* +* .. assemble the left singular vector matrix U of dimensions +* (M x NR) or (M x N) or (M x M). + IF ( ( NR .LT. M ) .AND. ( .NOT.WNTUF ) ) THEN + CALL DLASET('A', M-NR, NR, ZERO, ZERO, U(NR+1,1), LDU) + IF ( NR .LT. N1 ) THEN + CALL DLASET( 'A',NR,N1-NR,ZERO,ZERO,U(1,NR+1), LDU ) + CALL DLASET( 'A',M-NR,N1-NR,ZERO,ONE, + $ U(NR+1,NR+1), LDU ) + END IF + END IF +* +* The Q matrix from the first QRF is built into the left singular +* vectors matrix U. +* + IF ( .NOT.WNTUF ) + $ CALL DORMQR( 'L', 'N', M, N1, N, A, LDA, WORK, U, + $ LDU, WORK(N+1), LWORK-N, IERR ) + IF ( ROWPRM .AND. .NOT.WNTUF ) + $ CALL DLASWP( N1, U, LDU, 1, M-1, IWORK(N+1), -1 ) +* + ELSE IF ( RSVEC .AND. ( .NOT. LSVEC ) ) THEN +*....................................................................... +* .. the singular values and the right singular vectors requested +*....................................................................... + IF ( RTRANS ) THEN +* .. apply DGESVD to R**T +* .. copy R**T into V and overwrite V with the left singular vectors + DO 1165 p = 1, NR + DO 1166 q = p, N + V(q,p) = (A(p,q)) + 1166 CONTINUE + 1165 CONTINUE + IF ( NR .GT. 1 ) + $ CALL DLASET( 'U', NR-1,NR-1, ZERO,ZERO, V(1,2), LDV ) +* .. the left singular vectors of R**T overwrite V, the right singular +* vectors not computed + IF ( WNTVR .OR. ( NR .EQ. N ) ) THEN + CALL DGESVD( 'O', 'N', N, NR, V, LDV, S, U, LDU, + $ U, LDU, WORK(N+1), LWORK-N, INFO ) +* + DO 1121 p = 1, NR + DO 1122 q = p + 1, NR + RTMP = V(q,p) + V(q,p) = V(p,q) + V(p,q) = RTMP + 1122 CONTINUE + 1121 CONTINUE +* + IF ( NR .LT. N ) THEN + DO 1103 p = 1, NR + DO 1104 q = NR + 1, N + V(p,q) = V(q,p) + 1104 CONTINUE + 1103 CONTINUE + END IF + CALL DLAPMT( .FALSE., NR, N, V, LDV, IWORK ) + ELSE +* .. need all N right singular vectors and NR < N +* [!] This is simple implementation that augments [V](1:N,1:NR) +* by padding a zero block. In the case NR << N, a more efficient +* way is to first use the QR factorization. For more details +* how to implement this, see the " FULL SVD " branch. + CALL DLASET('G', N, N-NR, ZERO, ZERO, V(1,NR+1), LDV) + CALL DGESVD( 'O', 'N', N, N, V, LDV, S, U, LDU, + $ U, LDU, WORK(N+1), LWORK-N, INFO ) +* + DO 1123 p = 1, N + DO 1124 q = p + 1, N + RTMP = V(q,p) + V(q,p) = V(p,q) + V(p,q) = RTMP + 1124 CONTINUE + 1123 CONTINUE + CALL DLAPMT( .FALSE., N, N, V, LDV, IWORK ) + END IF +* + ELSE +* .. aply DGESVD to R +* .. copy R into V and overwrite V with the right singular vectors + CALL DLACPY( 'U', NR, N, A, LDA, V, LDV ) + IF ( NR .GT. 1 ) + $ CALL DLASET( 'L', NR-1, NR-1, ZERO, ZERO, V(2,1), LDV ) +* .. the right singular vectors overwrite V, the NR left singular +* vectors stored in U(1:NR,1:NR) + IF ( WNTVR .OR. ( NR .EQ. N ) ) THEN + CALL DGESVD( 'N', 'O', NR, N, V, LDV, S, U, LDU, + $ V, LDV, WORK(N+1), LWORK-N, INFO ) + CALL DLAPMT( .FALSE., NR, N, V, LDV, IWORK ) +* .. now [V](1:NR,1:N) contains V(1:N,1:NR)**T + ELSE +* .. need all N right singular vectors and NR < N +* [!] This is simple implementation that augments [V](1:NR,1:N) +* by padding a zero block. In the case NR << N, a more efficient +* way is to first use the LQ factorization. For more details +* how to implement this, see the " FULL SVD " branch. + CALL DLASET('G', N-NR, N, ZERO,ZERO, V(NR+1,1), LDV) + CALL DGESVD( 'N', 'O', N, N, V, LDV, S, U, LDU, + $ V, LDV, WORK(N+1), LWORK-N, INFO ) + CALL DLAPMT( .FALSE., N, N, V, LDV, IWORK ) + END IF +* .. now [V] contains the transposed matrix of the right singular +* vectors of A. + END IF +* + ELSE +*....................................................................... +* .. FULL SVD requested +*....................................................................... + IF ( RTRANS ) THEN +* +* .. apply DGESVD to R**T [[this option is left for R&D&T]] +* + IF ( WNTVR .OR. ( NR .EQ. N ) ) THEN +* .. copy R**T into [V] and overwrite [V] with the left singular +* vectors of R**T + DO 1168 p = 1, NR + DO 1169 q = p, N + V(q,p) = A(p,q) + 1169 CONTINUE + 1168 CONTINUE + IF ( NR .GT. 1 ) + $ CALL DLASET( 'U', NR-1,NR-1, ZERO,ZERO, V(1,2), LDV ) +* +* .. the left singular vectors of R**T overwrite [V], the NR right +* singular vectors of R**T stored in [U](1:NR,1:NR) as transposed + CALL DGESVD( 'O', 'A', N, NR, V, LDV, S, V, LDV, + $ U, LDU, WORK(N+1), LWORK-N, INFO ) +* .. assemble V + DO 1115 p = 1, NR + DO 1116 q = p + 1, NR + RTMP = V(q,p) + V(q,p) = V(p,q) + V(p,q) = RTMP + 1116 CONTINUE + 1115 CONTINUE + IF ( NR .LT. N ) THEN + DO 1101 p = 1, NR + DO 1102 q = NR+1, N + V(p,q) = V(q,p) + 1102 CONTINUE + 1101 CONTINUE + END IF + CALL DLAPMT( .FALSE., NR, N, V, LDV, IWORK ) +* + DO 1117 p = 1, NR + DO 1118 q = p + 1, NR + RTMP = U(q,p) + U(q,p) = U(p,q) + U(p,q) = RTMP + 1118 CONTINUE + 1117 CONTINUE +* + IF ( ( NR .LT. M ) .AND. .NOT.(WNTUF)) THEN + CALL DLASET('A', M-NR,NR, ZERO,ZERO, U(NR+1,1), LDU) + IF ( NR .LT. N1 ) THEN + CALL DLASET('A',NR,N1-NR,ZERO,ZERO,U(1,NR+1),LDU) + CALL DLASET( 'A',M-NR,N1-NR,ZERO,ONE, + $ U(NR+1,NR+1), LDU ) + END IF + END IF +* + ELSE +* .. need all N right singular vectors and NR < N +* .. copy R**T into [V] and overwrite [V] with the left singular +* vectors of R**T +* [[The optimal ratio N/NR for using QRF instead of padding +* with zeros. Here hard coded to 2; it must be at least +* two due to work space constraints.]] +* OPTRATIO = ILAENV(6, 'DGESVD', 'S' // 'O', NR,N,0,0) +* OPTRATIO = MAX( OPTRATIO, 2 ) + OPTRATIO = 2 + IF ( OPTRATIO*NR .GT. N ) THEN + DO 1198 p = 1, NR + DO 1199 q = p, N + V(q,p) = A(p,q) + 1199 CONTINUE + 1198 CONTINUE + IF ( NR .GT. 1 ) + $ CALL DLASET('U',NR-1,NR-1, ZERO,ZERO, V(1,2),LDV) +* + CALL DLASET('A',N,N-NR,ZERO,ZERO,V(1,NR+1),LDV) + CALL DGESVD( 'O', 'A', N, N, V, LDV, S, V, LDV, + $ U, LDU, WORK(N+1), LWORK-N, INFO ) +* + DO 1113 p = 1, N + DO 1114 q = p + 1, N + RTMP = V(q,p) + V(q,p) = V(p,q) + V(p,q) = RTMP + 1114 CONTINUE + 1113 CONTINUE + CALL DLAPMT( .FALSE., N, N, V, LDV, IWORK ) +* .. assemble the left singular vector matrix U of dimensions +* (M x N1), i.e. (M x N) or (M x M). +* + DO 1111 p = 1, N + DO 1112 q = p + 1, N + RTMP = U(q,p) + U(q,p) = U(p,q) + U(p,q) = RTMP + 1112 CONTINUE + 1111 CONTINUE +* + IF ( ( N .LT. M ) .AND. .NOT.(WNTUF)) THEN + CALL DLASET('A',M-N,N,ZERO,ZERO,U(N+1,1),LDU) + IF ( N .LT. N1 ) THEN + CALL DLASET('A',N,N1-N,ZERO,ZERO,U(1,N+1),LDU) + CALL DLASET('A',M-N,N1-N,ZERO,ONE, + $ U(N+1,N+1), LDU ) + END IF + END IF + ELSE +* .. copy R**T into [U] and overwrite [U] with the right +* singular vectors of R + DO 1196 p = 1, NR + DO 1197 q = p, N + U(q,NR+p) = A(p,q) + 1197 CONTINUE + 1196 CONTINUE + IF ( NR .GT. 1 ) + $ CALL DLASET('U',NR-1,NR-1,ZERO,ZERO,U(1,NR+2),LDU) + CALL DGEQRF( N, NR, U(1,NR+1), LDU, WORK(N+1), + $ WORK(N+NR+1), LWORK-N-NR, IERR ) + DO 1143 p = 1, NR + DO 1144 q = 1, N + V(q,p) = U(p,NR+q) + 1144 CONTINUE + 1143 CONTINUE + CALL DLASET('U',NR-1,NR-1,ZERO,ZERO,V(1,2),LDV) + CALL DGESVD( 'S', 'O', NR, NR, V, LDV, S, U, LDU, + $ V,LDV, WORK(N+NR+1),LWORK-N-NR, INFO ) + CALL DLASET('A',N-NR,NR,ZERO,ZERO,V(NR+1,1),LDV) + CALL DLASET('A',NR,N-NR,ZERO,ZERO,V(1,NR+1),LDV) + CALL DLASET('A',N-NR,N-NR,ZERO,ONE,V(NR+1,NR+1),LDV) + CALL DORMQR('R','C', N, N, NR, U(1,NR+1), LDU, + $ WORK(N+1),V,LDV,WORK(N+NR+1),LWORK-N-NR,IERR) + CALL DLAPMT( .FALSE., N, N, V, LDV, IWORK ) +* .. assemble the left singular vector matrix U of dimensions +* (M x NR) or (M x N) or (M x M). + IF ( ( NR .LT. M ) .AND. .NOT.(WNTUF)) THEN + CALL DLASET('A',M-NR,NR,ZERO,ZERO,U(NR+1,1),LDU) + IF ( NR .LT. N1 ) THEN + CALL DLASET('A',NR,N1-NR,ZERO,ZERO,U(1,NR+1),LDU) + CALL DLASET( 'A',M-NR,N1-NR,ZERO,ONE, + $ U(NR+1,NR+1),LDU) + END IF + END IF + END IF + END IF +* + ELSE +* +* .. apply DGESVD to R [[this is the recommended option]] +* + IF ( WNTVR .OR. ( NR .EQ. N ) ) THEN +* .. copy R into [V] and overwrite V with the right singular vectors + CALL DLACPY( 'U', NR, N, A, LDA, V, LDV ) + IF ( NR .GT. 1 ) + $ CALL DLASET( 'L', NR-1,NR-1, ZERO,ZERO, V(2,1), LDV ) +* .. the right singular vectors of R overwrite [V], the NR left +* singular vectors of R stored in [U](1:NR,1:NR) + CALL DGESVD( 'S', 'O', NR, N, V, LDV, S, U, LDU, + $ V, LDV, WORK(N+1), LWORK-N, INFO ) + CALL DLAPMT( .FALSE., NR, N, V, LDV, IWORK ) +* .. now [V](1:NR,1:N) contains V(1:N,1:NR)**T +* .. assemble the left singular vector matrix U of dimensions +* (M x NR) or (M x N) or (M x M). + IF ( ( NR .LT. M ) .AND. .NOT.(WNTUF)) THEN + CALL DLASET('A', M-NR,NR, ZERO,ZERO, U(NR+1,1), LDU) + IF ( NR .LT. N1 ) THEN + CALL DLASET('A',NR,N1-NR,ZERO,ZERO,U(1,NR+1),LDU) + CALL DLASET( 'A',M-NR,N1-NR,ZERO,ONE, + $ U(NR+1,NR+1), LDU ) + END IF + END IF +* + ELSE +* .. need all N right singular vectors and NR < N +* .. the requested number of the left singular vectors +* is then N1 (N or M) +* [[The optimal ratio N/NR for using LQ instead of padding +* with zeros. Here hard coded to 2; it must be at least +* two due to work space constraints.]] +* OPTRATIO = ILAENV(6, 'DGESVD', 'S' // 'O', NR,N,0,0) +* OPTRATIO = MAX( OPTRATIO, 2 ) + OPTRATIO = 2 + IF ( OPTRATIO * NR .GT. N ) THEN + CALL DLACPY( 'U', NR, N, A, LDA, V, LDV ) + IF ( NR .GT. 1 ) + $ CALL DLASET('L', NR-1,NR-1, ZERO,ZERO, V(2,1),LDV) +* .. the right singular vectors of R overwrite [V], the NR left +* singular vectors of R stored in [U](1:NR,1:NR) + CALL DLASET('A', N-NR,N, ZERO,ZERO, V(NR+1,1),LDV) + CALL DGESVD( 'S', 'O', N, N, V, LDV, S, U, LDU, + $ V, LDV, WORK(N+1), LWORK-N, INFO ) + CALL DLAPMT( .FALSE., N, N, V, LDV, IWORK ) +* .. now [V] contains the transposed matrix of the right +* singular vectors of A. The leading N left singular vectors +* are in [U](1:N,1:N) +* .. assemble the left singular vector matrix U of dimensions +* (M x N1), i.e. (M x N) or (M x M). + IF ( ( N .LT. M ) .AND. .NOT.(WNTUF)) THEN + CALL DLASET('A',M-N,N,ZERO,ZERO,U(N+1,1),LDU) + IF ( N .LT. N1 ) THEN + CALL DLASET('A',N,N1-N,ZERO,ZERO,U(1,N+1),LDU) + CALL DLASET( 'A',M-N,N1-N,ZERO,ONE, + $ U(N+1,N+1), LDU ) + END IF + END IF + ELSE + CALL DLACPY( 'U', NR, N, A, LDA, U(NR+1,1), LDU ) + IF ( NR .GT. 1 ) + $ CALL DLASET('L',NR-1,NR-1,ZERO,ZERO,U(NR+2,1),LDU) + CALL DGELQF( NR, N, U(NR+1,1), LDU, WORK(N+1), + $ WORK(N+NR+1), LWORK-N-NR, IERR ) + CALL DLACPY('L',NR,NR,U(NR+1,1),LDU,V,LDV) + IF ( NR .GT. 1 ) + $ CALL DLASET('U',NR-1,NR-1,ZERO,ZERO,V(1,2),LDV) + CALL DGESVD( 'S', 'O', NR, NR, V, LDV, S, U, LDU, + $ V, LDV, WORK(N+NR+1), LWORK-N-NR, INFO ) + CALL DLASET('A',N-NR,NR,ZERO,ZERO,V(NR+1,1),LDV) + CALL DLASET('A',NR,N-NR,ZERO,ZERO,V(1,NR+1),LDV) + CALL DLASET('A',N-NR,N-NR,ZERO,ONE,V(NR+1,NR+1),LDV) + CALL DORMLQ('R','N',N,N,NR,U(NR+1,1),LDU,WORK(N+1), + $ V, LDV, WORK(N+NR+1),LWORK-N-NR,IERR) + CALL DLAPMT( .FALSE., N, N, V, LDV, IWORK ) +* .. assemble the left singular vector matrix U of dimensions +* (M x NR) or (M x N) or (M x M). + IF ( ( NR .LT. M ) .AND. .NOT.(WNTUF)) THEN + CALL DLASET('A',M-NR,NR,ZERO,ZERO,U(NR+1,1),LDU) + IF ( NR .LT. N1 ) THEN + CALL DLASET('A',NR,N1-NR,ZERO,ZERO,U(1,NR+1),LDU) + CALL DLASET( 'A',M-NR,N1-NR,ZERO,ONE, + $ U(NR+1,NR+1), LDU ) + END IF + END IF + END IF + END IF +* .. end of the "R**T or R" branch + END IF +* +* The Q matrix from the first QRF is built into the left singular +* vectors matrix U. +* + IF ( .NOT. WNTUF ) + $ CALL DORMQR( 'L', 'N', M, N1, N, A, LDA, WORK, U, + $ LDU, WORK(N+1), LWORK-N, IERR ) + IF ( ROWPRM .AND. .NOT.WNTUF ) + $ CALL DLASWP( N1, U, LDU, 1, M-1, IWORK(N+1), -1 ) +* +* ... end of the "full SVD" branch + END IF +* +* Check whether some singular values are returned as zeros, e.g. +* due to underflow, and update the numerical rank. + p = NR + DO 4001 q = p, 1, -1 + IF ( S(q) .GT. ZERO ) GO TO 4002 + NR = NR - 1 + 4001 CONTINUE + 4002 CONTINUE +* +* .. if numerical rank deficiency is detected, the truncated +* singular values are set to zero. + IF ( NR .LT. N ) CALL DLASET( 'G', N-NR,1, ZERO,ZERO, S(NR+1), N ) +* .. undo scaling; this may cause overflow in the largest singular +* values. + IF ( ASCALED ) + $ CALL DLASCL( 'G',0,0, ONE,SQRT(DBLE(M)), NR,1, S, N, IERR ) + IF ( CONDA ) RWORK(1) = SCONDA + RWORK(2) = p - NR +* .. p-NR is the number of singular values that are computed as +* exact zeros in DGESVD() applied to the (possibly truncated) +* full row rank triangular (trapezoidal) factor of A. + NUMRANK = NR +* + RETURN +* +* End of DGESVDQ +* + END diff --git a/lapack-netlib/SRC/dgesvj.f b/lapack-netlib/SRC/dgesvj.f index 2cbc5ce0e..cf7aac982 100644 --- a/lapack-netlib/SRC/dgesvj.f +++ b/lapack-netlib/SRC/dgesvj.f @@ -90,13 +90,13 @@ *> JOBV is CHARACTER*1 *> Specifies whether to compute the right singular vectors, that *> is, the matrix V: -*> = 'V' : the matrix V is computed and returned in the array V -*> = 'A' : the Jacobi rotations are applied to the MV-by-N +*> = 'V': the matrix V is computed and returned in the array V +*> = 'A': the Jacobi rotations are applied to the MV-by-N *> array V. In other words, the right singular vector *> matrix V is not computed explicitly, instead it is *> applied to an MV-by-N matrix initially stored in the *> first MV rows of V. -*> = 'N' : the matrix V is not computed and the array V is not +*> = 'N': the matrix V is not computed and the array V is not *> referenced *> \endverbatim *> @@ -118,8 +118,8 @@ *> A is DOUBLE PRECISION array, dimension (LDA,N) *> On entry, the M-by-N matrix A. *> On exit : -*> If JOBU .EQ. 'U' .OR. JOBU .EQ. 'C' : -*> If INFO .EQ. 0 : +*> If JOBU = 'U' .OR. JOBU = 'C' : +*> If INFO = 0 : *> RANKA orthonormal columns of U are returned in the *> leading RANKA columns of the array A. Here RANKA <= N *> is the number of computed singular values of A that are @@ -129,9 +129,9 @@ *> in the array WORK as RANKA=NINT(WORK(2)). Also see the *> descriptions of SVA and WORK. The computed columns of U *> are mutually numerically orthogonal up to approximately -*> TOL=DSQRT(M)*EPS (default); or TOL=CTOL*EPS (JOBU.EQ.'C'), +*> TOL=DSQRT(M)*EPS (default); or TOL=CTOL*EPS (JOBU = 'C'), *> see the description of JOBU. -*> If INFO .GT. 0 : +*> If INFO > 0 : *> the procedure DGESVJ did not converge in the given number *> of iterations (sweeps). In that case, the computed *> columns of U may not be orthogonal up to TOL. The output @@ -140,8 +140,8 @@ *> input matrix A in the sense that the residual *> ||A-SCALE*U*SIGMA*V^T||_2 / ||A||_2 is small. *> -*> If JOBU .EQ. 'N' : -*> If INFO .EQ. 0 : +*> If JOBU = 'N' : +*> If INFO = 0 : *> Note that the left singular vectors are 'for free' in the *> one-sided Jacobi SVD algorithm. However, if only the *> singular values are needed, the level of numerical @@ -150,7 +150,7 @@ *> numerically orthogonal up to approximately M*EPS. Thus, *> on exit, A contains the columns of U scaled with the *> corresponding singular values. -*> If INFO .GT. 0 : +*> If INFO > 0 : *> the procedure DGESVJ did not converge in the given number *> of iterations (sweeps). *> \endverbatim @@ -165,9 +165,9 @@ *> \verbatim *> SVA is DOUBLE PRECISION array, dimension (N) *> On exit : -*> If INFO .EQ. 0 : +*> If INFO = 0 : *> depending on the value SCALE = WORK(1), we have: -*> If SCALE .EQ. ONE : +*> If SCALE = ONE : *> SVA(1:N) contains the computed singular values of A. *> During the computation SVA contains the Euclidean column *> norms of the iterated matrices in the array A. @@ -175,7 +175,7 @@ *> The singular values of A are SCALE*SVA(1:N), and this *> factored representation is due to the fact that some of the *> singular values of A might underflow or overflow. -*> If INFO .GT. 0 : +*> If INFO > 0 : *> the procedure DGESVJ did not converge in the given number of *> iterations (sweeps) and SCALE*SVA(1:N) may not be accurate. *> \endverbatim @@ -183,7 +183,7 @@ *> \param[in] MV *> \verbatim *> MV is INTEGER -*> If JOBV .EQ. 'A', then the product of Jacobi rotations in DGESVJ +*> If JOBV = 'A', then the product of Jacobi rotations in DGESVJ *> is applied to the first MV rows of V. See the description of JOBV. *> \endverbatim *> @@ -201,16 +201,16 @@ *> \param[in] LDV *> \verbatim *> LDV is INTEGER -*> The leading dimension of the array V, LDV .GE. 1. -*> If JOBV .EQ. 'V', then LDV .GE. max(1,N). -*> If JOBV .EQ. 'A', then LDV .GE. max(1,MV) . +*> The leading dimension of the array V, LDV >= 1. +*> If JOBV = 'V', then LDV >= max(1,N). +*> If JOBV = 'A', then LDV >= max(1,MV) . *> \endverbatim *> *> \param[in,out] WORK *> \verbatim *> WORK is DOUBLE PRECISION array, dimension (LWORK) *> On entry : -*> If JOBU .EQ. 'C' : +*> If JOBU = 'C' : *> WORK(1) = CTOL, where CTOL defines the threshold for convergence. *> The process stops if all columns of A are mutually *> orthogonal up to CTOL*EPS, EPS=DLAMCH('E'). @@ -230,7 +230,7 @@ *> WORK(5) = max_{i.NE.j} |COS(A(:,i),A(:,j))| in the last sweep. *> This is useful information in cases when DGESVJ did *> not converge, as it can be used to estimate whether -*> the output is stil useful and for post festum analysis. +*> the output is still useful and for post festum analysis. *> WORK(6) = the largest absolute value over all sines of the *> Jacobi rotation angles in the last sweep. It can be *> useful for a post festum analysis. @@ -245,9 +245,9 @@ *> \param[out] INFO *> \verbatim *> INFO is INTEGER -*> = 0 : successful exit. -*> < 0 : if INFO = -i, then the i-th argument had an illegal value -*> > 0 : DGESVJ did not converge in the maximal allowed number (30) +*> = 0: successful exit. +*> < 0: if INFO = -i, then the i-th argument had an illegal value +*> > 0: DGESVJ did not converge in the maximal allowed number (30) *> of sweeps. The output may still be useful. See the *> description of WORK. *> \endverbatim diff --git a/lapack-netlib/SRC/dgesvxx.f b/lapack-netlib/SRC/dgesvxx.f index afcd05d8e..21b56f61c 100644 --- a/lapack-netlib/SRC/dgesvxx.f +++ b/lapack-netlib/SRC/dgesvxx.f @@ -411,7 +411,7 @@ *> information as described below. There currently are up to three *> pieces of information returned for each right-hand side. If *> componentwise accuracy is not requested (PARAMS(3) = 0.0), then -*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS .LT. 3, then at most +*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS < 3, then at most *> the first (:,N_ERR_BNDS) entries are returned. *> *> The first index in ERR_BNDS_COMP(i,:) corresponds to the ith @@ -447,14 +447,14 @@ *> \param[in] NPARAMS *> \verbatim *> NPARAMS is INTEGER -*> Specifies the number of parameters set in PARAMS. If .LE. 0, the +*> Specifies the number of parameters set in PARAMS. If <= 0, the *> PARAMS array is never referenced and default values are used. *> \endverbatim *> *> \param[in,out] PARAMS *> \verbatim *> PARAMS is DOUBLE PRECISION array, dimension (NPARAMS) -*> Specifies algorithm parameters. If an entry is .LT. 0.0, then +*> Specifies algorithm parameters. If an entry is < 0.0, then *> that entry will be filled with default value used for that *> parameter. Only positions up to NPARAMS are accessed; defaults *> are used for higher-numbered parameters. @@ -462,9 +462,9 @@ *> PARAMS(LA_LINRX_ITREF_I = 1) : Whether to perform iterative *> refinement or not. *> Default: 1.0D+0 -*> = 0.0 : No refinement is performed, and no error bounds are +*> = 0.0: No refinement is performed, and no error bounds are *> computed. -*> = 1.0 : Use the extra-precise refinement algorithm. +*> = 1.0: Use the extra-precise refinement algorithm. *> (other values are reserved for future use) *> *> PARAMS(LA_LINRX_ITHRESH_I = 2) : Maximum number of residual diff --git a/lapack-netlib/SRC/dgetc2.f b/lapack-netlib/SRC/dgetc2.f index 0896a7013..5bf5b890f 100644 --- a/lapack-netlib/SRC/dgetc2.f +++ b/lapack-netlib/SRC/dgetc2.f @@ -85,7 +85,7 @@ *> \verbatim *> INFO is INTEGER *> = 0: successful exit -*> > 0: if INFO = k, U(k, k) is likely to produce owerflow if +*> > 0: if INFO = k, U(k, k) is likely to produce overflow if *> we try to solve for x in Ax = b. So U is perturbed to *> avoid the overflow. *> \endverbatim diff --git a/lapack-netlib/SRC/dgetsls.f b/lapack-netlib/SRC/dgetsls.f index 3b44a40ab..c2ba5e2b8 100644 --- a/lapack-netlib/SRC/dgetsls.f +++ b/lapack-netlib/SRC/dgetsls.f @@ -1,3 +1,5 @@ +*> \brief \b DGETSLS +* * Definition: * =========== * @@ -256,7 +258,7 @@ TSZM = INT( TQ( 1 ) ) LWM = INT( WORKQ( 1 ) ) CALL DGEMLQ( 'L', TRANS, N, NRHS, M, A, LDA, TQ, - $ TSZO, B, LDB, WORKQ, -1, INFO2 ) + $ TSZM, B, LDB, WORKQ, -1, INFO2 ) LWM = MAX( LWM, INT( WORKQ( 1 ) ) ) WSIZEO = TSZO + LWO WSIZEM = TSZM + LWM diff --git a/lapack-netlib/SRC/dggesx.f b/lapack-netlib/SRC/dggesx.f index 47022fbdf..0e57d636e 100644 --- a/lapack-netlib/SRC/dggesx.f +++ b/lapack-netlib/SRC/dggesx.f @@ -131,10 +131,10 @@ *> \verbatim *> SENSE is CHARACTER*1 *> Determines which reciprocal condition numbers are computed. -*> = 'N' : None are computed; -*> = 'E' : Computed for average of selected eigenvalues only; -*> = 'V' : Computed for selected deflating subspaces only; -*> = 'B' : Computed for both. +*> = 'N': None are computed; +*> = 'E': Computed for average of selected eigenvalues only; +*> = 'V': Computed for selected deflating subspaces only; +*> = 'B': Computed for both. *> If SENSE = 'E', 'V', or 'B', SORT must equal 'S'. *> \endverbatim *> diff --git a/lapack-netlib/SRC/dgsvj0.f b/lapack-netlib/SRC/dgsvj0.f index 4fd38d37e..318e369fd 100644 --- a/lapack-netlib/SRC/dgsvj0.f +++ b/lapack-netlib/SRC/dgsvj0.f @@ -117,7 +117,7 @@ *> \param[in] MV *> \verbatim *> MV is INTEGER -*> If JOBV .EQ. 'A', then MV rows of V are post-multipled by a +*> If JOBV = 'A', then MV rows of V are post-multipled by a *> sequence of Jacobi rotations. *> If JOBV = 'N', then MV is not referenced. *> \endverbatim @@ -125,9 +125,9 @@ *> \param[in,out] V *> \verbatim *> V is DOUBLE PRECISION array, dimension (LDV,N) -*> If JOBV .EQ. 'V' then N rows of V are post-multipled by a +*> If JOBV = 'V' then N rows of V are post-multipled by a *> sequence of Jacobi rotations. -*> If JOBV .EQ. 'A' then MV rows of V are post-multipled by a +*> If JOBV = 'A' then MV rows of V are post-multipled by a *> sequence of Jacobi rotations. *> If JOBV = 'N', then V is not referenced. *> \endverbatim @@ -136,8 +136,8 @@ *> \verbatim *> LDV is INTEGER *> The leading dimension of the array V, LDV >= 1. -*> If JOBV = 'V', LDV .GE. N. -*> If JOBV = 'A', LDV .GE. MV. +*> If JOBV = 'V', LDV >= N. +*> If JOBV = 'A', LDV >= MV. *> \endverbatim *> *> \param[in] EPS @@ -157,7 +157,7 @@ *> TOL is DOUBLE PRECISION *> TOL is the threshold for Jacobi rotations. For a pair *> A(:,p), A(:,q) of pivot columns, the Jacobi rotation is -*> applied only if DABS(COS(angle(A(:,p),A(:,q)))) .GT. TOL. +*> applied only if DABS(COS(angle(A(:,p),A(:,q)))) > TOL. *> \endverbatim *> *> \param[in] NSWEEP @@ -175,14 +175,14 @@ *> \param[in] LWORK *> \verbatim *> LWORK is INTEGER -*> LWORK is the dimension of WORK. LWORK .GE. M. +*> LWORK is the dimension of WORK. LWORK >= M. *> \endverbatim *> *> \param[out] INFO *> \verbatim *> INFO is INTEGER -*> = 0 : successful exit. -*> < 0 : if INFO = -i, then the i-th argument had an illegal value +*> = 0: successful exit. +*> < 0: if INFO = -i, then the i-th argument had an illegal value *> \endverbatim * * Authors: @@ -1045,7 +1045,7 @@ 1993 CONTINUE * end i=1:NSWEEP loop -* #:) Reaching this point means that the procedure has comleted the given +* #:) Reaching this point means that the procedure has completed the given * number of iterations. INFO = NSWEEP - 1 GO TO 1995 diff --git a/lapack-netlib/SRC/dgsvj1.f b/lapack-netlib/SRC/dgsvj1.f index 376682c7f..35a93619f 100644 --- a/lapack-netlib/SRC/dgsvj1.f +++ b/lapack-netlib/SRC/dgsvj1.f @@ -61,7 +61,7 @@ *> In terms of the columns of A, the first N1 columns are rotated 'against' *> the remaining N-N1 columns, trying to increase the angle between the *> corresponding subspaces. The off-diagonal block is N1-by(N-N1) and it is -*> tiled using quadratic tiles of side KBL. Here, KBL is a tunning parmeter. +*> tiled using quadratic tiles of side KBL. Here, KBL is a tunning parameter. *> The number of sweeps is given in NSWEEP and the orthogonality threshold *> is given in TOL. *> \endverbatim @@ -147,27 +147,27 @@ *> \param[in] MV *> \verbatim *> MV is INTEGER -*> If JOBV .EQ. 'A', then MV rows of V are post-multipled by a -*> sequence of Jacobi rotations. -*> If JOBV = 'N', then MV is not referenced. +*> If JOBV = 'A', then MV rows of V are post-multipled by a +*> sequence of Jacobi rotations. +*> If JOBV = 'N', then MV is not referenced. *> \endverbatim *> *> \param[in,out] V *> \verbatim *> V is DOUBLE PRECISION array, dimension (LDV,N) -*> If JOBV .EQ. 'V' then N rows of V are post-multipled by a -*> sequence of Jacobi rotations. -*> If JOBV .EQ. 'A' then MV rows of V are post-multipled by a -*> sequence of Jacobi rotations. -*> If JOBV = 'N', then V is not referenced. +*> If JOBV = 'V', then N rows of V are post-multipled by a +*> sequence of Jacobi rotations. +*> If JOBV = 'A', then MV rows of V are post-multipled by a +*> sequence of Jacobi rotations. +*> If JOBV = 'N', then V is not referenced. *> \endverbatim *> *> \param[in] LDV *> \verbatim *> LDV is INTEGER *> The leading dimension of the array V, LDV >= 1. -*> If JOBV = 'V', LDV .GE. N. -*> If JOBV = 'A', LDV .GE. MV. +*> If JOBV = 'V', LDV >= N. +*> If JOBV = 'A', LDV >= MV. *> \endverbatim *> *> \param[in] EPS @@ -187,7 +187,7 @@ *> TOL is DOUBLE PRECISION *> TOL is the threshold for Jacobi rotations. For a pair *> A(:,p), A(:,q) of pivot columns, the Jacobi rotation is -*> applied only if DABS(COS(angle(A(:,p),A(:,q)))) .GT. TOL. +*> applied only if DABS(COS(angle(A(:,p),A(:,q)))) > TOL. *> \endverbatim *> *> \param[in] NSWEEP @@ -205,14 +205,14 @@ *> \param[in] LWORK *> \verbatim *> LWORK is INTEGER -*> LWORK is the dimension of WORK. LWORK .GE. M. +*> LWORK is the dimension of WORK. LWORK >= M. *> \endverbatim *> *> \param[out] INFO *> \verbatim *> INFO is INTEGER -*> = 0 : successful exit. -*> < 0 : if INFO = -i, then the i-th argument had an illegal value +*> = 0: successful exit. +*> < 0: if INFO = -i, then the i-th argument had an illegal value *> \endverbatim * * Authors: diff --git a/lapack-netlib/SRC/dhseqr.f b/lapack-netlib/SRC/dhseqr.f index 4444b955f..b4fc3af90 100644 --- a/lapack-netlib/SRC/dhseqr.f +++ b/lapack-netlib/SRC/dhseqr.f @@ -70,7 +70,7 @@ *> \param[in] N *> \verbatim *> N is INTEGER -*> The order of the matrix H. N .GE. 0. +*> The order of the matrix H. N >= 0. *> \endverbatim *> *> \param[in] ILO @@ -87,7 +87,7 @@ *> set by a previous call to DGEBAL, and then passed to ZGEHRD *> when the matrix output by DGEBAL is reduced to Hessenberg *> form. Otherwise ILO and IHI should be set to 1 and N -*> respectively. If N.GT.0, then 1.LE.ILO.LE.IHI.LE.N. +*> respectively. If N > 0, then 1 <= ILO <= IHI <= N. *> If N = 0, then ILO = 1 and IHI = 0. *> \endverbatim *> @@ -100,20 +100,20 @@ *> (the Schur form); 2-by-2 diagonal blocks (corresponding to *> complex conjugate pairs of eigenvalues) are returned in *> standard form, with H(i,i) = H(i+1,i+1) and -*> H(i+1,i)*H(i,i+1).LT.0. If INFO = 0 and JOB = 'E', the +*> H(i+1,i)*H(i,i+1) < 0. If INFO = 0 and JOB = 'E', the *> contents of H are unspecified on exit. (The output value of -*> H when INFO.GT.0 is given under the description of INFO +*> H when INFO > 0 is given under the description of INFO *> below.) *> *> Unlike earlier versions of DHSEQR, this subroutine may -*> explicitly H(i,j) = 0 for i.GT.j and j = 1, 2, ... ILO-1 +*> explicitly H(i,j) = 0 for i > j and j = 1, 2, ... ILO-1 *> or j = IHI+1, IHI+2, ... N. *> \endverbatim *> *> \param[in] LDH *> \verbatim *> LDH is INTEGER -*> The leading dimension of the array H. LDH .GE. max(1,N). +*> The leading dimension of the array H. LDH >= max(1,N). *> \endverbatim *> *> \param[out] WR @@ -128,8 +128,8 @@ *> The real and imaginary parts, respectively, of the computed *> eigenvalues. If two eigenvalues are computed as a complex *> conjugate pair, they are stored in consecutive elements of -*> WR and WI, say the i-th and (i+1)th, with WI(i) .GT. 0 and -*> WI(i+1) .LT. 0. If JOB = 'S', the eigenvalues are stored in +*> WR and WI, say the i-th and (i+1)th, with WI(i) > 0 and +*> WI(i+1) < 0. If JOB = 'S', the eigenvalues are stored in *> the same order as on the diagonal of the Schur form returned *> in H, with WR(i) = H(i,i) and, if H(i:i+1,i:i+1) is a 2-by-2 *> diagonal block, WI(i) = sqrt(-H(i+1,i)*H(i,i+1)) and @@ -148,7 +148,7 @@ *> if INFO = 0, Z contains Q*Z. *> Normally Q is the orthogonal matrix generated by DORGHR *> after the call to DGEHRD which formed the Hessenberg matrix -*> H. (The output value of Z when INFO.GT.0 is given under +*> H. (The output value of Z when INFO > 0 is given under *> the description of INFO below.) *> \endverbatim *> @@ -156,7 +156,7 @@ *> \verbatim *> LDZ is INTEGER *> The leading dimension of the array Z. if COMPZ = 'I' or -*> COMPZ = 'V', then LDZ.GE.MAX(1,N). Otherwize, LDZ.GE.1. +*> COMPZ = 'V', then LDZ >= MAX(1,N). Otherwise, LDZ >= 1. *> \endverbatim *> *> \param[out] WORK @@ -169,7 +169,7 @@ *> \param[in] LWORK *> \verbatim *> LWORK is INTEGER -*> The dimension of the array WORK. LWORK .GE. max(1,N) +*> The dimension of the array WORK. LWORK >= max(1,N) *> is sufficient and delivers very good and sometimes *> optimal performance. However, LWORK as large as 11*N *> may be required for optimal performance. A workspace @@ -187,21 +187,21 @@ *> \param[out] INFO *> \verbatim *> INFO is INTEGER -*> = 0: successful exit -*> .LT. 0: if INFO = -i, the i-th argument had an illegal +*> = 0: successful exit +*> < 0: if INFO = -i, the i-th argument had an illegal *> value -*> .GT. 0: if INFO = i, DHSEQR failed to compute all of +*> > 0: if INFO = i, DHSEQR failed to compute all of *> the eigenvalues. Elements 1:ilo-1 and i+1:n of WR *> and WI contain those eigenvalues which have been *> successfully computed. (Failures are rare.) *> -*> If INFO .GT. 0 and JOB = 'E', then on exit, the +*> If INFO > 0 and JOB = 'E', then on exit, the *> remaining unconverged eigenvalues are the eigen- *> values of the upper Hessenberg matrix rows and *> columns ILO through INFO of the final, output *> value of H. *> -*> If INFO .GT. 0 and JOB = 'S', then on exit +*> If INFO > 0 and JOB = 'S', then on exit *> *> (*) (initial value of H)*U = U*(final value of H) *> @@ -209,19 +209,19 @@ *> value of H is upper Hessenberg and quasi-triangular *> in rows and columns INFO+1 through IHI. *> -*> If INFO .GT. 0 and COMPZ = 'V', then on exit +*> If INFO > 0 and COMPZ = 'V', then on exit *> *> (final value of Z) = (initial value of Z)*U *> *> where U is the orthogonal matrix in (*) (regard- *> less of the value of JOB.) *> -*> If INFO .GT. 0 and COMPZ = 'I', then on exit +*> If INFO > 0 and COMPZ = 'I', then on exit *> (final value of Z) = U *> where U is the orthogonal matrix in (*) (regard- *> less of the value of JOB.) *> -*> If INFO .GT. 0 and COMPZ = 'N', then Z is not +*> If INFO > 0 and COMPZ = 'N', then Z is not *> accessed. *> \endverbatim * @@ -261,8 +261,8 @@ *> This depends on ILO, IHI and NS. NS is the *> number of simultaneous shifts returned *> by ILAENV(ISPEC=15). (See ISPEC=15 below.) -*> The default for (IHI-ILO+1).LE.500 is NS. -*> The default for (IHI-ILO+1).GT.500 is 3*NS/2. +*> The default for (IHI-ILO+1) <= 500 is NS. +*> The default for (IHI-ILO+1) > 500 is 3*NS/2. *> *> ISPEC=14: Nibble crossover point. (See IPARMQ for *> details.) Default: 14% of deflation window @@ -341,8 +341,8 @@ PARAMETER ( NTINY = 11 ) * * ==== NL allocates some local workspace to help small matrices -* . through a rare DLAHQR failure. NL .GT. NTINY = 11 is -* . required and NL .LE. NMIN = ILAENV(ISPEC=12,...) is recom- +* . through a rare DLAHQR failure. NL > NTINY = 11 is +* . required and NL <= NMIN = ILAENV(ISPEC=12,...) is recom- * . mended. (The default value of NMIN is 75.) Using NL = 49 * . allows up to six simultaneous shifts and a 16-by-16 * . deflation window. ==== diff --git a/lapack-netlib/SRC/dla_gbrcond.f b/lapack-netlib/SRC/dla_gbrcond.f index e9713c9ca..c9eebcbea 100644 --- a/lapack-netlib/SRC/dla_gbrcond.f +++ b/lapack-netlib/SRC/dla_gbrcond.f @@ -141,13 +141,13 @@ *> i > 0: The ith argument is invalid. *> \endverbatim *> -*> \param[in] WORK +*> \param[out] WORK *> \verbatim *> WORK is DOUBLE PRECISION array, dimension (5*N). *> Workspace. *> \endverbatim *> -*> \param[in] IWORK +*> \param[out] IWORK *> \verbatim *> IWORK is INTEGER array, dimension (N). *> Workspace. diff --git a/lapack-netlib/SRC/dla_gbrfsx_extended.f b/lapack-netlib/SRC/dla_gbrfsx_extended.f index 12b2a32a4..282a63f1c 100644 --- a/lapack-netlib/SRC/dla_gbrfsx_extended.f +++ b/lapack-netlib/SRC/dla_gbrfsx_extended.f @@ -66,19 +66,19 @@ *> \verbatim *> PREC_TYPE is INTEGER *> Specifies the intermediate precision to be used in refinement. -*> The value is defined by ILAPREC(P) where P is a CHARACTER and -*> P = 'S': Single +*> The value is defined by ILAPREC(P) where P is a CHARACTER and P +*> = 'S': Single *> = 'D': Double *> = 'I': Indigenous -*> = 'X', 'E': Extra +*> = 'X' or 'E': Extra *> \endverbatim *> *> \param[in] TRANS_TYPE *> \verbatim *> TRANS_TYPE is INTEGER *> Specifies the transposition operation on A. -*> The value is defined by ILATRANS(T) where T is a CHARACTER and -*> T = 'N': No transpose +*> The value is defined by ILATRANS(T) where T is a CHARACTER and T +*> = 'N': No transpose *> = 'T': Transpose *> = 'C': Conjugate transpose *> \endverbatim @@ -270,7 +270,7 @@ *> information as described below. There currently are up to three *> pieces of information returned for each right-hand side. If *> componentwise accuracy is not requested (PARAMS(3) = 0.0), then -*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS .LT. 3, then at most +*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS < 3, then at most *> the first (:,N_ERR_BNDS) entries are returned. *> *> The first index in ERR_BNDS_COMP(i,:) corresponds to the ith diff --git a/lapack-netlib/SRC/dla_gercond.f b/lapack-netlib/SRC/dla_gercond.f index aa93ca5a4..6f7d70a6a 100644 --- a/lapack-netlib/SRC/dla_gercond.f +++ b/lapack-netlib/SRC/dla_gercond.f @@ -123,13 +123,13 @@ *> i > 0: The ith argument is invalid. *> \endverbatim *> -*> \param[in] WORK +*> \param[out] WORK *> \verbatim *> WORK is DOUBLE PRECISION array, dimension (3*N). *> Workspace. *> \endverbatim *> -*> \param[in] IWORK +*> \param[out] IWORK *> \verbatim *> IWORK is INTEGER array, dimension (N). *> Workspace. diff --git a/lapack-netlib/SRC/dla_gerfsx_extended.f b/lapack-netlib/SRC/dla_gerfsx_extended.f index 082f810f0..4cb9ef4f9 100644 --- a/lapack-netlib/SRC/dla_gerfsx_extended.f +++ b/lapack-netlib/SRC/dla_gerfsx_extended.f @@ -64,19 +64,19 @@ *> \verbatim *> PREC_TYPE is INTEGER *> Specifies the intermediate precision to be used in refinement. -*> The value is defined by ILAPREC(P) where P is a CHARACTER and -*> P = 'S': Single +*> The value is defined by ILAPREC(P) where P is a CHARACTER and P +*> = 'S': Single *> = 'D': Double *> = 'I': Indigenous -*> = 'X', 'E': Extra +*> = 'X' or 'E': Extra *> \endverbatim *> *> \param[in] TRANS_TYPE *> \verbatim *> TRANS_TYPE is INTEGER *> Specifies the transposition operation on A. -*> The value is defined by ILATRANS(T) where T is a CHARACTER and -*> T = 'N': No transpose +*> The value is defined by ILATRANS(T) where T is a CHARACTER and T +*> = 'N': No transpose *> = 'T': Transpose *> = 'C': Conjugate transpose *> \endverbatim @@ -256,7 +256,7 @@ *> information as described below. There currently are up to three *> pieces of information returned for each right-hand side. If *> componentwise accuracy is not requested (PARAMS(3) = 0.0), then -*> ERRS_C is not accessed. If N_ERR_BNDS .LT. 3, then at most +*> ERRS_C is not accessed. If N_ERR_BNDS < 3, then at most *> the first (:,N_ERR_BNDS) entries are returned. *> *> The first index in ERRS_C(i,:) corresponds to the ith diff --git a/lapack-netlib/SRC/dla_porcond.f b/lapack-netlib/SRC/dla_porcond.f index 498e707e3..b2f9c4b1e 100644 --- a/lapack-netlib/SRC/dla_porcond.f +++ b/lapack-netlib/SRC/dla_porcond.f @@ -113,13 +113,13 @@ *> i > 0: The ith argument is invalid. *> \endverbatim *> -*> \param[in] WORK +*> \param[out] WORK *> \verbatim *> WORK is DOUBLE PRECISION array, dimension (3*N). *> Workspace. *> \endverbatim *> -*> \param[in] IWORK +*> \param[out] IWORK *> \verbatim *> IWORK is INTEGER array, dimension (N). *> Workspace. diff --git a/lapack-netlib/SRC/dla_porfsx_extended.f b/lapack-netlib/SRC/dla_porfsx_extended.f index 8c0d6bebd..ece9b00ed 100644 --- a/lapack-netlib/SRC/dla_porfsx_extended.f +++ b/lapack-netlib/SRC/dla_porfsx_extended.f @@ -65,11 +65,11 @@ *> \verbatim *> PREC_TYPE is INTEGER *> Specifies the intermediate precision to be used in refinement. -*> The value is defined by ILAPREC(P) where P is a CHARACTER and -*> P = 'S': Single +*> The value is defined by ILAPREC(P) where P is a CHARACTER and P +*> = 'S': Single *> = 'D': Double *> = 'I': Indigenous -*> = 'X', 'E': Extra +*> = 'X' or 'E': Extra *> \endverbatim *> *> \param[in] UPLO @@ -246,7 +246,7 @@ *> information as described below. There currently are up to three *> pieces of information returned for each right-hand side. If *> componentwise accuracy is not requested (PARAMS(3) = 0.0), then -*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS .LT. 3, then at most +*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS < 3, then at most *> the first (:,N_ERR_BNDS) entries are returned. *> *> The first index in ERR_BNDS_COMP(i,:) corresponds to the ith diff --git a/lapack-netlib/SRC/dla_porpvgrw.f b/lapack-netlib/SRC/dla_porpvgrw.f index 4fe1a1922..8a6f9e1a7 100644 --- a/lapack-netlib/SRC/dla_porpvgrw.f +++ b/lapack-netlib/SRC/dla_porpvgrw.f @@ -85,7 +85,7 @@ *> The leading dimension of the array AF. LDAF >= max(1,N). *> \endverbatim *> -*> \param[in] WORK +*> \param[out] WORK *> \verbatim *> WORK is DOUBLE PRECISION array, dimension (2*N) *> \endverbatim diff --git a/lapack-netlib/SRC/dla_syrcond.f b/lapack-netlib/SRC/dla_syrcond.f index 91d557145..23ed82588 100644 --- a/lapack-netlib/SRC/dla_syrcond.f +++ b/lapack-netlib/SRC/dla_syrcond.f @@ -119,13 +119,13 @@ *> i > 0: The ith argument is invalid. *> \endverbatim *> -*> \param[in] WORK +*> \param[out] WORK *> \verbatim *> WORK is DOUBLE PRECISION array, dimension (3*N). *> Workspace. *> \endverbatim *> -*> \param[in] IWORK +*> \param[out] IWORK *> \verbatim *> IWORK is INTEGER array, dimension (N). *> Workspace. diff --git a/lapack-netlib/SRC/dla_syrfsx_extended.f b/lapack-netlib/SRC/dla_syrfsx_extended.f index f54d15194..b390600c0 100644 --- a/lapack-netlib/SRC/dla_syrfsx_extended.f +++ b/lapack-netlib/SRC/dla_syrfsx_extended.f @@ -67,11 +67,11 @@ *> \verbatim *> PREC_TYPE is INTEGER *> Specifies the intermediate precision to be used in refinement. -*> The value is defined by ILAPREC(P) where P is a CHARACTER and -*> P = 'S': Single +*> The value is defined by ILAPREC(P) where P is a CHARACTER and P +*> = 'S': Single *> = 'D': Double *> = 'I': Indigenous -*> = 'X', 'E': Extra +*> = 'X' or 'E': Extra *> \endverbatim *> *> \param[in] UPLO @@ -255,7 +255,7 @@ *> information as described below. There currently are up to three *> pieces of information returned for each right-hand side. If *> componentwise accuracy is not requested (PARAMS(3) = 0.0), then -*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS .LT. 3, then at most +*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS < 3, then at most *> the first (:,N_ERR_BNDS) entries are returned. *> *> The first index in ERR_BNDS_COMP(i,:) corresponds to the ith diff --git a/lapack-netlib/SRC/dla_syrpvgrw.f b/lapack-netlib/SRC/dla_syrpvgrw.f index c2e5cb018..5ba03093b 100644 --- a/lapack-netlib/SRC/dla_syrpvgrw.f +++ b/lapack-netlib/SRC/dla_syrpvgrw.f @@ -101,7 +101,7 @@ *> as determined by DSYTRF. *> \endverbatim *> -*> \param[in] WORK +*> \param[out] WORK *> \verbatim *> WORK is DOUBLE PRECISION array, dimension (2*N) *> \endverbatim diff --git a/lapack-netlib/SRC/dla_wwaddw.f b/lapack-netlib/SRC/dla_wwaddw.f index 99a86c553..4f50540d6 100644 --- a/lapack-netlib/SRC/dla_wwaddw.f +++ b/lapack-netlib/SRC/dla_wwaddw.f @@ -36,7 +36,7 @@ *> DLA_WWADDW adds a vector W into a doubled-single vector (X, Y). *> *> This works for all extant IBM's hex and binary floating point -*> arithmetics, but not for decimal. +*> arithmetic, but not for decimal. *> \endverbatim * * Arguments: diff --git a/lapack-netlib/SRC/dlaed4.f b/lapack-netlib/SRC/dlaed4.f index e7dc839df..033438d73 100644 --- a/lapack-netlib/SRC/dlaed4.f +++ b/lapack-netlib/SRC/dlaed4.f @@ -82,7 +82,7 @@ *> \param[out] DELTA *> \verbatim *> DELTA is DOUBLE PRECISION array, dimension (N) -*> If N .GT. 2, DELTA contains (D(j) - lambda_I) in its j-th +*> If N > 2, DELTA contains (D(j) - lambda_I) in its j-th *> component. If N = 1, then DELTA(1) = 1. If N = 2, see DLAED5 *> for detail. The vector DELTA contains the information necessary *> to construct the eigenvectors by DLAED3 and DLAED9. diff --git a/lapack-netlib/SRC/dlaed8.f b/lapack-netlib/SRC/dlaed8.f index c053347b1..f64679dc0 100644 --- a/lapack-netlib/SRC/dlaed8.f +++ b/lapack-netlib/SRC/dlaed8.f @@ -353,7 +353,7 @@ Z( I ) = W( INDX( I ) ) 40 CONTINUE * -* Calculate the allowable deflation tolerence +* Calculate the allowable deflation tolerance * IMAX = IDAMAX( N, Z, 1 ) JMAX = IDAMAX( N, D, 1 ) diff --git a/lapack-netlib/SRC/dlagtf.f b/lapack-netlib/SRC/dlagtf.f index 4b257c64f..b92c84f39 100644 --- a/lapack-netlib/SRC/dlagtf.f +++ b/lapack-netlib/SRC/dlagtf.f @@ -125,7 +125,7 @@ *> then IN(k) = 1, otherwise IN(k) = 0. The element IN(n) *> returns the smallest positive integer j such that *> -*> abs( u(j,j) ).le. norm( (T - lambda*I)(j) )*TOL, +*> abs( u(j,j) ) <= norm( (T - lambda*I)(j) )*TOL, *> *> where norm( A(j) ) denotes the sum of the absolute values of *> the jth row of the matrix A. If no such j exists then IN(n) @@ -137,8 +137,8 @@ *> \param[out] INFO *> \verbatim *> INFO is INTEGER -*> = 0 : successful exit -*> .lt. 0: if INFO = -k, the kth argument had an illegal value +*> = 0: successful exit +*> < 0: if INFO = -k, the kth argument had an illegal value *> \endverbatim * * Authors: diff --git a/lapack-netlib/SRC/dlagts.f b/lapack-netlib/SRC/dlagts.f index 926075827..cbd35ae14 100644 --- a/lapack-netlib/SRC/dlagts.f +++ b/lapack-netlib/SRC/dlagts.f @@ -122,12 +122,12 @@ *> \param[in,out] TOL *> \verbatim *> TOL is DOUBLE PRECISION -*> On entry, with JOB .lt. 0, TOL should be the minimum +*> On entry, with JOB < 0, TOL should be the minimum *> perturbation to be made to very small diagonal elements of U. *> TOL should normally be chosen as about eps*norm(U), where eps *> is the relative machine precision, but if TOL is supplied as *> non-positive, then it is reset to eps*max( abs( u(i,j) ) ). -*> If JOB .gt. 0 then TOL is not referenced. +*> If JOB > 0 then TOL is not referenced. *> *> On exit, TOL is changed as described above, only if TOL is *> non-positive on entry. Otherwise TOL is unchanged. @@ -136,14 +136,14 @@ *> \param[out] INFO *> \verbatim *> INFO is INTEGER -*> = 0 : successful exit -*> .lt. 0: if INFO = -i, the i-th argument had an illegal value -*> .gt. 0: overflow would occur when computing the INFO(th) -*> element of the solution vector x. This can only occur -*> when JOB is supplied as positive and either means -*> that a diagonal element of U is very small, or that -*> the elements of the right-hand side vector y are very -*> large. +*> = 0: successful exit +*> < 0: if INFO = -i, the i-th argument had an illegal value +*> > 0: overflow would occur when computing the INFO(th) +*> element of the solution vector x. This can only occur +*> when JOB is supplied as positive and either means +*> that a diagonal element of U is very small, or that +*> the elements of the right-hand side vector y are very +*> large. *> \endverbatim * * Authors: diff --git a/lapack-netlib/SRC/dlahqr.f b/lapack-netlib/SRC/dlahqr.f index f7365d21e..e863829ec 100644 --- a/lapack-netlib/SRC/dlahqr.f +++ b/lapack-netlib/SRC/dlahqr.f @@ -150,26 +150,26 @@ *> \param[out] INFO *> \verbatim *> INFO is INTEGER -*> = 0: successful exit -*> .GT. 0: If INFO = i, DLAHQR failed to compute all the +*> = 0: successful exit +*> > 0: If INFO = i, DLAHQR failed to compute all the *> eigenvalues ILO to IHI in a total of 30 iterations *> per eigenvalue; elements i+1:ihi of WR and WI *> contain those eigenvalues which have been *> successfully computed. *> -*> If INFO .GT. 0 and WANTT is .FALSE., then on exit, +*> If INFO > 0 and WANTT is .FALSE., then on exit, *> the remaining unconverged eigenvalues are the *> eigenvalues of the upper Hessenberg matrix rows -*> and columns ILO thorugh INFO of the final, output +*> and columns ILO through INFO of the final, output *> value of H. *> -*> If INFO .GT. 0 and WANTT is .TRUE., then on exit +*> If INFO > 0 and WANTT is .TRUE., then on exit *> (*) (initial value of H)*U = U*(final value of H) -*> where U is an orthognal matrix. The final +*> where U is an orthogonal matrix. The final *> value of H is upper Hessenberg and triangular in *> rows and columns INFO+1 through IHI. *> -*> If INFO .GT. 0 and WANTZ is .TRUE., then on exit +*> If INFO > 0 and WANTZ is .TRUE., then on exit *> (final value of Z) = (initial value of Z)*U *> where U is the orthogonal matrix in (*) *> (regardless of the value of WANTT.) diff --git a/lapack-netlib/SRC/dlaln2.f b/lapack-netlib/SRC/dlaln2.f index a094b737b..0c94ea308 100644 --- a/lapack-netlib/SRC/dlaln2.f +++ b/lapack-netlib/SRC/dlaln2.f @@ -49,7 +49,7 @@ *> the first column of each being the real part and the second *> being the imaginary part. *> -*> "s" is a scaling factor (.LE. 1), computed by DLALN2, which is +*> "s" is a scaling factor (<= 1), computed by DLALN2, which is *> so chosen that X can be computed without overflow. X is further *> scaled if necessary to assure that norm(ca A - w D)*norm(X) is less *> than overflow. diff --git a/lapack-netlib/SRC/dlamswlq.f b/lapack-netlib/SRC/dlamswlq.f index 19e32f888..306c3d3de 100644 --- a/lapack-netlib/SRC/dlamswlq.f +++ b/lapack-netlib/SRC/dlamswlq.f @@ -1,3 +1,4 @@ +*> \brief \b DLAMSWLQ * * Definition: * =========== diff --git a/lapack-netlib/SRC/dlamtsqr.f b/lapack-netlib/SRC/dlamtsqr.f index 6af89d28e..41a067780 100644 --- a/lapack-netlib/SRC/dlamtsqr.f +++ b/lapack-netlib/SRC/dlamtsqr.f @@ -1,3 +1,4 @@ +*> \brief \b DLAMTSQR * * Definition: * =========== diff --git a/lapack-netlib/SRC/dlangb.f b/lapack-netlib/SRC/dlangb.f index 078573b87..0c4f938f7 100644 --- a/lapack-netlib/SRC/dlangb.f +++ b/lapack-netlib/SRC/dlangb.f @@ -129,6 +129,7 @@ * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * December 2016 * + IMPLICIT NONE * .. Scalar Arguments .. CHARACTER NORM INTEGER KL, KU, LDAB, N @@ -139,22 +140,24 @@ * * ===================================================================== * -* * .. Parameters .. DOUBLE PRECISION ONE, ZERO PARAMETER ( ONE = 1.0D+0, ZERO = 0.0D+0 ) * .. * .. Local Scalars .. INTEGER I, J, K, L - DOUBLE PRECISION SCALE, SUM, VALUE, TEMP + DOUBLE PRECISION SUM, VALUE, TEMP * .. -* .. External Subroutines .. - EXTERNAL DLASSQ +* .. Local Arrays .. + DOUBLE PRECISION SSQ( 2 ), COLSSQ( 2 ) * .. * .. External Functions .. LOGICAL LSAME, DISNAN EXTERNAL LSAME, DISNAN * .. +* .. External Subroutines .. + EXTERNAL DLASSQ, DCOMBSSQ +* .. * .. Intrinsic Functions .. INTRINSIC ABS, MAX, MIN, SQRT * .. @@ -206,15 +209,22 @@ ELSE IF( ( LSAME( NORM, 'F' ) ) .OR. ( LSAME( NORM, 'E' ) ) ) THEN * * Find normF(A). +* SSQ(1) is scale +* SSQ(2) is sum-of-squares +* For better accuracy, sum each column separately. * - SCALE = ZERO - SUM = ONE + SSQ( 1 ) = ZERO + SSQ( 2 ) = ONE DO 90 J = 1, N L = MAX( 1, J-KU ) K = KU + 1 - J + L - CALL DLASSQ( MIN( N, J+KL )-L+1, AB( K, J ), 1, SCALE, SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL DLASSQ( MIN( N, J+KL )-L+1, AB( K, J ), 1, + $ COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL DCOMBSSQ( SSQ, COLSSQ ) 90 CONTINUE - VALUE = SCALE*SQRT( SUM ) + VALUE = SSQ( 1 )*SQRT( SSQ( 2 ) ) END IF * DLANGB = VALUE diff --git a/lapack-netlib/SRC/dlange.f b/lapack-netlib/SRC/dlange.f index 9dbf45e81..6b32fbefd 100644 --- a/lapack-netlib/SRC/dlange.f +++ b/lapack-netlib/SRC/dlange.f @@ -119,6 +119,7 @@ * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * December 2016 * + IMPLICIT NONE * .. Scalar Arguments .. CHARACTER NORM INTEGER LDA, M, N @@ -135,10 +136,13 @@ * .. * .. Local Scalars .. INTEGER I, J - DOUBLE PRECISION SCALE, SUM, VALUE, TEMP + DOUBLE PRECISION SUM, VALUE, TEMP +* .. +* .. Local Arrays .. + DOUBLE PRECISION SSQ( 2 ), COLSSQ( 2 ) * .. * .. External Subroutines .. - EXTERNAL DLASSQ + EXTERNAL DLASSQ, DCOMBSSQ * .. * .. External Functions .. LOGICAL LSAME, DISNAN @@ -194,13 +198,19 @@ ELSE IF( ( LSAME( NORM, 'F' ) ) .OR. ( LSAME( NORM, 'E' ) ) ) THEN * * Find normF(A). +* SSQ(1) is scale +* SSQ(2) is sum-of-squares +* For better accuracy, sum each column separately. * - SCALE = ZERO - SUM = ONE + SSQ( 1 ) = ZERO + SSQ( 2 ) = ONE DO 90 J = 1, N - CALL DLASSQ( M, A( 1, J ), 1, SCALE, SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL DLASSQ( M, A( 1, J ), 1, COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL DCOMBSSQ( SSQ, COLSSQ ) 90 CONTINUE - VALUE = SCALE*SQRT( SUM ) + VALUE = SSQ( 1 )*SQRT( SSQ( 2 ) ) END IF * DLANGE = VALUE diff --git a/lapack-netlib/SRC/dlanhs.f b/lapack-netlib/SRC/dlanhs.f index 691dbc21e..a859d2216 100644 --- a/lapack-netlib/SRC/dlanhs.f +++ b/lapack-netlib/SRC/dlanhs.f @@ -113,6 +113,7 @@ * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * December 2016 * + IMPLICIT NONE * .. Scalar Arguments .. CHARACTER NORM INTEGER LDA, N @@ -129,15 +130,18 @@ * .. * .. Local Scalars .. INTEGER I, J - DOUBLE PRECISION SCALE, SUM, VALUE + DOUBLE PRECISION SUM, VALUE * .. -* .. External Subroutines .. - EXTERNAL DLASSQ +* .. Local Arrays .. + DOUBLE PRECISION SSQ( 2 ), COLSSQ( 2 ) * .. * .. External Functions .. LOGICAL LSAME, DISNAN EXTERNAL LSAME, DISNAN * .. +* .. External Subroutines .. + EXTERNAL DLASSQ, DCOMBSSQ +* .. * .. Intrinsic Functions .. INTRINSIC ABS, MIN, SQRT * .. @@ -188,13 +192,20 @@ ELSE IF( ( LSAME( NORM, 'F' ) ) .OR. ( LSAME( NORM, 'E' ) ) ) THEN * * Find normF(A). +* SSQ(1) is scale +* SSQ(2) is sum-of-squares +* For better accuracy, sum each column separately. * - SCALE = ZERO - SUM = ONE + SSQ( 1 ) = ZERO + SSQ( 2 ) = ONE DO 90 J = 1, N - CALL DLASSQ( MIN( N, J+1 ), A( 1, J ), 1, SCALE, SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL DLASSQ( MIN( N, J+1 ), A( 1, J ), 1, + $ COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL DCOMBSSQ( SSQ, COLSSQ ) 90 CONTINUE - VALUE = SCALE*SQRT( SUM ) + VALUE = SSQ( 1 )*SQRT( SSQ( 2 ) ) END IF * DLANHS = VALUE diff --git a/lapack-netlib/SRC/dlansb.f b/lapack-netlib/SRC/dlansb.f index 4ccf5f27e..a82dc41b1 100644 --- a/lapack-netlib/SRC/dlansb.f +++ b/lapack-netlib/SRC/dlansb.f @@ -134,6 +134,7 @@ * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * December 2016 * + IMPLICIT NONE * .. Scalar Arguments .. CHARACTER NORM, UPLO INTEGER K, LDAB, N @@ -150,15 +151,18 @@ * .. * .. Local Scalars .. INTEGER I, J, L - DOUBLE PRECISION ABSA, SCALE, SUM, VALUE + DOUBLE PRECISION ABSA, SUM, VALUE * .. -* .. External Subroutines .. - EXTERNAL DLASSQ +* .. Local Arrays .. + DOUBLE PRECISION SSQ( 2 ), COLSSQ( 2 ) * .. * .. External Functions .. LOGICAL LSAME, DISNAN EXTERNAL LSAME, DISNAN * .. +* .. External Subroutines .. + EXTERNAL DLASSQ, DCOMBSSQ +* .. * .. Intrinsic Functions .. INTRINSIC ABS, MAX, MIN, SQRT * .. @@ -225,29 +229,47 @@ ELSE IF( ( LSAME( NORM, 'F' ) ) .OR. ( LSAME( NORM, 'E' ) ) ) THEN * * Find normF(A). +* SSQ(1) is scale +* SSQ(2) is sum-of-squares +* For better accuracy, sum each column separately. +* + SSQ( 1 ) = ZERO + SSQ( 2 ) = ONE +* +* Sum off-diagonals * - SCALE = ZERO - SUM = ONE IF( K.GT.0 ) THEN IF( LSAME( UPLO, 'U' ) ) THEN DO 110 J = 2, N + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE CALL DLASSQ( MIN( J-1, K ), AB( MAX( K+2-J, 1 ), J ), - $ 1, SCALE, SUM ) + $ 1, COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL DCOMBSSQ( SSQ, COLSSQ ) 110 CONTINUE L = K + 1 ELSE DO 120 J = 1, N - 1 - CALL DLASSQ( MIN( N-J, K ), AB( 2, J ), 1, SCALE, - $ SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL DLASSQ( MIN( N-J, K ), AB( 2, J ), 1, + $ COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL DCOMBSSQ( SSQ, COLSSQ ) 120 CONTINUE L = 1 END IF - SUM = 2*SUM + SSQ( 2 ) = 2*SSQ( 2 ) ELSE L = 1 END IF - CALL DLASSQ( N, AB( L, 1 ), LDAB, SCALE, SUM ) - VALUE = SCALE*SQRT( SUM ) +* +* Sum diagonal +* + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL DLASSQ( N, AB( L, 1 ), LDAB, COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL DCOMBSSQ( SSQ, COLSSQ ) + VALUE = SSQ( 1 )*SQRT( SSQ( 2 ) ) END IF * DLANSB = VALUE diff --git a/lapack-netlib/SRC/dlansp.f b/lapack-netlib/SRC/dlansp.f index a1829db75..b6ad1ffcf 100644 --- a/lapack-netlib/SRC/dlansp.f +++ b/lapack-netlib/SRC/dlansp.f @@ -119,6 +119,7 @@ * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * December 2016 * + IMPLICIT NONE * .. Scalar Arguments .. CHARACTER NORM, UPLO INTEGER N @@ -135,15 +136,18 @@ * .. * .. Local Scalars .. INTEGER I, J, K - DOUBLE PRECISION ABSA, SCALE, SUM, VALUE + DOUBLE PRECISION ABSA, SUM, VALUE * .. -* .. External Subroutines .. - EXTERNAL DLASSQ +* .. Local Arrays .. + DOUBLE PRECISION SSQ( 2 ), COLSSQ( 2 ) * .. * .. External Functions .. LOGICAL LSAME, DISNAN EXTERNAL LSAME, DISNAN * .. +* .. External Subroutines .. + EXTERNAL DLASSQ, DCOMBSSQ +* .. * .. Intrinsic Functions .. INTRINSIC ABS, SQRT * .. @@ -217,31 +221,48 @@ ELSE IF( ( LSAME( NORM, 'F' ) ) .OR. ( LSAME( NORM, 'E' ) ) ) THEN * * Find normF(A). +* SSQ(1) is scale +* SSQ(2) is sum-of-squares +* For better accuracy, sum each column separately. +* + SSQ( 1 ) = ZERO + SSQ( 2 ) = ONE +* +* Sum off-diagonals * - SCALE = ZERO - SUM = ONE K = 2 IF( LSAME( UPLO, 'U' ) ) THEN DO 110 J = 2, N - CALL DLASSQ( J-1, AP( K ), 1, SCALE, SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL DLASSQ( J-1, AP( K ), 1, COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL DCOMBSSQ( SSQ, COLSSQ ) K = K + J 110 CONTINUE ELSE DO 120 J = 1, N - 1 - CALL DLASSQ( N-J, AP( K ), 1, SCALE, SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL DLASSQ( N-J, AP( K ), 1, COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL DCOMBSSQ( SSQ, COLSSQ ) K = K + N - J + 1 120 CONTINUE END IF - SUM = 2*SUM + SSQ( 2 ) = 2*SSQ( 2 ) +* +* Sum diagonal +* K = 1 + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE DO 130 I = 1, N IF( AP( K ).NE.ZERO ) THEN ABSA = ABS( AP( K ) ) - IF( SCALE.LT.ABSA ) THEN - SUM = ONE + SUM*( SCALE / ABSA )**2 - SCALE = ABSA + IF( COLSSQ( 1 ).LT.ABSA ) THEN + COLSSQ( 2 ) = ONE + COLSSQ(2)*( COLSSQ(1) / ABSA )**2 + COLSSQ( 1 ) = ABSA ELSE - SUM = SUM + ( ABSA / SCALE )**2 + COLSSQ( 2 ) = COLSSQ( 2 ) + ( ABSA / COLSSQ( 1 ) )**2 END IF END IF IF( LSAME( UPLO, 'U' ) ) THEN @@ -250,7 +271,8 @@ K = K + N - I + 1 END IF 130 CONTINUE - VALUE = SCALE*SQRT( SUM ) + CALL DCOMBSSQ( SSQ, COLSSQ ) + VALUE = SSQ( 1 )*SQRT( SSQ( 2 ) ) END IF * DLANSP = VALUE diff --git a/lapack-netlib/SRC/dlansy.f b/lapack-netlib/SRC/dlansy.f index 2372fce0a..87d514c11 100644 --- a/lapack-netlib/SRC/dlansy.f +++ b/lapack-netlib/SRC/dlansy.f @@ -127,6 +127,7 @@ * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * December 2016 * + IMPLICIT NONE * .. Scalar Arguments .. CHARACTER NORM, UPLO INTEGER LDA, N @@ -143,15 +144,18 @@ * .. * .. Local Scalars .. INTEGER I, J - DOUBLE PRECISION ABSA, SCALE, SUM, VALUE + DOUBLE PRECISION ABSA, SUM, VALUE * .. -* .. External Subroutines .. - EXTERNAL DLASSQ +* .. Local Arrays .. + DOUBLE PRECISION SSQ( 2 ), COLSSQ( 2 ) * .. * .. External Functions .. LOGICAL LSAME, DISNAN EXTERNAL LSAME, DISNAN * .. +* .. External Subroutines .. + EXTERNAL DLASSQ, DCOMBSSQ +* .. * .. Intrinsic Functions .. INTRINSIC ABS, SQRT * .. @@ -216,21 +220,39 @@ ELSE IF( ( LSAME( NORM, 'F' ) ) .OR. ( LSAME( NORM, 'E' ) ) ) THEN * * Find normF(A). +* SSQ(1) is scale +* SSQ(2) is sum-of-squares +* For better accuracy, sum each column separately. +* + SSQ( 1 ) = ZERO + SSQ( 2 ) = ONE +* +* Sum off-diagonals * - SCALE = ZERO - SUM = ONE IF( LSAME( UPLO, 'U' ) ) THEN DO 110 J = 2, N - CALL DLASSQ( J-1, A( 1, J ), 1, SCALE, SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL DLASSQ( J-1, A( 1, J ), 1, COLSSQ(1), COLSSQ(2) ) + CALL DCOMBSSQ( SSQ, COLSSQ ) 110 CONTINUE ELSE DO 120 J = 1, N - 1 - CALL DLASSQ( N-J, A( J+1, J ), 1, SCALE, SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL DLASSQ( N-J, A( J+1, J ), 1, COLSSQ(1), COLSSQ(2) ) + CALL DCOMBSSQ( SSQ, COLSSQ ) 120 CONTINUE END IF - SUM = 2*SUM - CALL DLASSQ( N, A, LDA+1, SCALE, SUM ) - VALUE = SCALE*SQRT( SUM ) + SSQ( 2 ) = 2*SSQ( 2 ) +* +* Sum diagonal +* + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL DLASSQ( N, A, LDA+1, COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL DCOMBSSQ( SSQ, COLSSQ ) + VALUE = SSQ( 1 )*SQRT( SSQ( 2 ) ) END IF * DLANSY = VALUE diff --git a/lapack-netlib/SRC/dlantb.f b/lapack-netlib/SRC/dlantb.f index 3d2bfe7e4..0d46f6cc8 100644 --- a/lapack-netlib/SRC/dlantb.f +++ b/lapack-netlib/SRC/dlantb.f @@ -145,6 +145,7 @@ * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * December 2016 * + IMPLICIT NONE * .. Scalar Arguments .. CHARACTER DIAG, NORM, UPLO INTEGER K, LDAB, N @@ -162,15 +163,18 @@ * .. Local Scalars .. LOGICAL UDIAG INTEGER I, J, L - DOUBLE PRECISION SCALE, SUM, VALUE + DOUBLE PRECISION SUM, VALUE * .. -* .. External Subroutines .. - EXTERNAL DLASSQ +* .. Local Arrays .. + DOUBLE PRECISION SSQ( 2 ), COLSSQ( 2 ) * .. * .. External Functions .. LOGICAL LSAME, DISNAN EXTERNAL LSAME, DISNAN * .. +* .. External Subroutines .. + EXTERNAL DLASSQ, DCOMBSSQ +* .. * .. Intrinsic Functions .. INTRINSIC ABS, MAX, MIN, SQRT * .. @@ -311,46 +315,61 @@ ELSE IF( ( LSAME( NORM, 'F' ) ) .OR. ( LSAME( NORM, 'E' ) ) ) THEN * * Find normF(A). +* SSQ(1) is scale +* SSQ(2) is sum-of-squares +* For better accuracy, sum each column separately. * IF( LSAME( UPLO, 'U' ) ) THEN IF( LSAME( DIAG, 'U' ) ) THEN - SCALE = ONE - SUM = N + SSQ( 1 ) = ONE + SSQ( 2 ) = N IF( K.GT.0 ) THEN DO 280 J = 2, N + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE CALL DLASSQ( MIN( J-1, K ), - $ AB( MAX( K+2-J, 1 ), J ), 1, SCALE, - $ SUM ) + $ AB( MAX( K+2-J, 1 ), J ), 1, + $ COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL DCOMBSSQ( SSQ, COLSSQ ) 280 CONTINUE END IF ELSE - SCALE = ZERO - SUM = ONE + SSQ( 1 ) = ZERO + SSQ( 2 ) = ONE DO 290 J = 1, N + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE CALL DLASSQ( MIN( J, K+1 ), AB( MAX( K+2-J, 1 ), J ), - $ 1, SCALE, SUM ) + $ 1, COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL DCOMBSSQ( SSQ, COLSSQ ) 290 CONTINUE END IF ELSE IF( LSAME( DIAG, 'U' ) ) THEN - SCALE = ONE - SUM = N + SSQ( 1 ) = ONE + SSQ( 2 ) = N IF( K.GT.0 ) THEN DO 300 J = 1, N - 1 - CALL DLASSQ( MIN( N-J, K ), AB( 2, J ), 1, SCALE, - $ SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL DLASSQ( MIN( N-J, K ), AB( 2, J ), 1, + $ COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL DCOMBSSQ( SSQ, COLSSQ ) 300 CONTINUE END IF ELSE - SCALE = ZERO - SUM = ONE + SSQ( 1 ) = ZERO + SSQ( 2 ) = ONE DO 310 J = 1, N - CALL DLASSQ( MIN( N-J+1, K+1 ), AB( 1, J ), 1, SCALE, - $ SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL DLASSQ( MIN( N-J+1, K+1 ), AB( 1, J ), 1, + $ COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL DCOMBSSQ( SSQ, COLSSQ ) 310 CONTINUE END IF END IF - VALUE = SCALE*SQRT( SUM ) + VALUE = SSQ( 1 )*SQRT( SSQ( 2 ) ) END IF * DLANTB = VALUE diff --git a/lapack-netlib/SRC/dlantp.f b/lapack-netlib/SRC/dlantp.f index f84a9e9d7..a7b89dec7 100644 --- a/lapack-netlib/SRC/dlantp.f +++ b/lapack-netlib/SRC/dlantp.f @@ -129,6 +129,7 @@ * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * December 2016 * + IMPLICIT NONE * .. Scalar Arguments .. CHARACTER DIAG, NORM, UPLO INTEGER N @@ -146,15 +147,18 @@ * .. Local Scalars .. LOGICAL UDIAG INTEGER I, J, K - DOUBLE PRECISION SCALE, SUM, VALUE + DOUBLE PRECISION SUM, VALUE * .. -* .. External Subroutines .. - EXTERNAL DLASSQ +* .. Local Arrays .. + DOUBLE PRECISION SSQ( 2 ), COLSSQ( 2 ) * .. * .. External Functions .. LOGICAL LSAME, DISNAN EXTERNAL LSAME, DISNAN * .. +* .. External Subroutines .. + EXTERNAL DLASSQ, DCOMBSSQ +* .. * .. Intrinsic Functions .. INTRINSIC ABS, SQRT * .. @@ -306,45 +310,64 @@ ELSE IF( ( LSAME( NORM, 'F' ) ) .OR. ( LSAME( NORM, 'E' ) ) ) THEN * * Find normF(A). +* SSQ(1) is scale +* SSQ(2) is sum-of-squares +* For better accuracy, sum each column separately. * IF( LSAME( UPLO, 'U' ) ) THEN IF( LSAME( DIAG, 'U' ) ) THEN - SCALE = ONE - SUM = N + SSQ( 1 ) = ONE + SSQ( 2 ) = N K = 2 DO 280 J = 2, N - CALL DLASSQ( J-1, AP( K ), 1, SCALE, SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL DLASSQ( J-1, AP( K ), 1, + $ COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL DCOMBSSQ( SSQ, COLSSQ ) K = K + J 280 CONTINUE ELSE - SCALE = ZERO - SUM = ONE + SSQ( 1 ) = ZERO + SSQ( 2 ) = ONE K = 1 DO 290 J = 1, N - CALL DLASSQ( J, AP( K ), 1, SCALE, SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL DLASSQ( J, AP( K ), 1, + $ COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL DCOMBSSQ( SSQ, COLSSQ ) K = K + J 290 CONTINUE END IF ELSE IF( LSAME( DIAG, 'U' ) ) THEN - SCALE = ONE - SUM = N + SSQ( 1 ) = ONE + SSQ( 2 ) = N K = 2 DO 300 J = 1, N - 1 - CALL DLASSQ( N-J, AP( K ), 1, SCALE, SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL DLASSQ( N-J, AP( K ), 1, + $ COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL DCOMBSSQ( SSQ, COLSSQ ) K = K + N - J + 1 300 CONTINUE ELSE - SCALE = ZERO - SUM = ONE + SSQ( 1 ) = ZERO + SSQ( 2 ) = ONE K = 1 DO 310 J = 1, N - CALL DLASSQ( N-J+1, AP( K ), 1, SCALE, SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL DLASSQ( N-J+1, AP( K ), 1, + $ COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL DCOMBSSQ( SSQ, COLSSQ ) K = K + N - J + 1 310 CONTINUE END IF END IF - VALUE = SCALE*SQRT( SUM ) + VALUE = SSQ( 1 )*SQRT( SSQ( 2 ) ) END IF * DLANTP = VALUE diff --git a/lapack-netlib/SRC/dlantr.f b/lapack-netlib/SRC/dlantr.f index 8585b2f68..adc7da4c4 100644 --- a/lapack-netlib/SRC/dlantr.f +++ b/lapack-netlib/SRC/dlantr.f @@ -146,6 +146,7 @@ * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * December 2016 * + IMPLICIT NONE * .. Scalar Arguments .. CHARACTER DIAG, NORM, UPLO INTEGER LDA, M, N @@ -163,15 +164,18 @@ * .. Local Scalars .. LOGICAL UDIAG INTEGER I, J - DOUBLE PRECISION SCALE, SUM, VALUE + DOUBLE PRECISION SUM, VALUE * .. -* .. External Subroutines .. - EXTERNAL DLASSQ +* .. Local Arrays .. + DOUBLE PRECISION SSQ( 2 ), COLSSQ( 2 ) * .. * .. External Functions .. LOGICAL LSAME, DISNAN EXTERNAL LSAME, DISNAN * .. +* .. External Subroutines .. + EXTERNAL DLASSQ, DCOMBSSQ +* .. * .. Intrinsic Functions .. INTRINSIC ABS, MIN, SQRT * .. @@ -281,7 +285,7 @@ END IF ELSE IF( LSAME( DIAG, 'U' ) ) THEN - DO 210 I = 1, N + DO 210 I = 1, MIN( M, N ) WORK( I ) = ONE 210 CONTINUE DO 220 I = N + 1, M @@ -311,38 +315,56 @@ ELSE IF( ( LSAME( NORM, 'F' ) ) .OR. ( LSAME( NORM, 'E' ) ) ) THEN * * Find normF(A). +* SSQ(1) is scale +* SSQ(2) is sum-of-squares +* For better accuracy, sum each column separately. * IF( LSAME( UPLO, 'U' ) ) THEN IF( LSAME( DIAG, 'U' ) ) THEN - SCALE = ONE - SUM = MIN( M, N ) + SSQ( 1 ) = ONE + SSQ( 2 ) = MIN( M, N ) DO 290 J = 2, N - CALL DLASSQ( MIN( M, J-1 ), A( 1, J ), 1, SCALE, SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL DLASSQ( MIN( M, J-1 ), A( 1, J ), 1, + $ COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL DCOMBSSQ( SSQ, COLSSQ ) 290 CONTINUE ELSE - SCALE = ZERO - SUM = ONE + SSQ( 1 ) = ZERO + SSQ( 2 ) = ONE DO 300 J = 1, N - CALL DLASSQ( MIN( M, J ), A( 1, J ), 1, SCALE, SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL DLASSQ( MIN( M, J ), A( 1, J ), 1, + $ COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL DCOMBSSQ( SSQ, COLSSQ ) 300 CONTINUE END IF ELSE IF( LSAME( DIAG, 'U' ) ) THEN - SCALE = ONE - SUM = MIN( M, N ) + SSQ( 1 ) = ONE + SSQ( 2 ) = MIN( M, N ) DO 310 J = 1, N - CALL DLASSQ( M-J, A( MIN( M, J+1 ), J ), 1, SCALE, - $ SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL DLASSQ( M-J, A( MIN( M, J+1 ), J ), 1, + $ COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL DCOMBSSQ( SSQ, COLSSQ ) 310 CONTINUE ELSE - SCALE = ZERO - SUM = ONE + SSQ( 1 ) = ZERO + SSQ( 2 ) = ONE DO 320 J = 1, N - CALL DLASSQ( M-J+1, A( J, J ), 1, SCALE, SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL DLASSQ( M-J+1, A( J, J ), 1, + $ COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL DCOMBSSQ( SSQ, COLSSQ ) 320 CONTINUE END IF END IF - VALUE = SCALE*SQRT( SUM ) + VALUE = SSQ( 1 )*SQRT( SSQ( 2 ) ) END IF * DLANTR = VALUE diff --git a/lapack-netlib/SRC/dlanv2.f b/lapack-netlib/SRC/dlanv2.f index 91fa14ff2..61b016f16 100644 --- a/lapack-netlib/SRC/dlanv2.f +++ b/lapack-netlib/SRC/dlanv2.f @@ -140,13 +140,16 @@ * * .. Parameters .. DOUBLE PRECISION ZERO, HALF, ONE - PARAMETER ( ZERO = 0.0D+0, HALF = 0.5D+0, ONE = 1.0D+0 ) + PARAMETER ( ZERO = 0.0D+0, HALF = 0.5D+0, ONE = 1.0D+0, + $ TWO = 2.0D0 ) DOUBLE PRECISION MULTPL PARAMETER ( MULTPL = 4.0D+0 ) * .. * .. Local Scalars .. DOUBLE PRECISION AA, BB, BCMAX, BCMIS, CC, CS1, DD, EPS, P, SAB, - $ SAC, SCALE, SIGMA, SN1, TAU, TEMP, Z + $ SAC, SCALE, SIGMA, SN1, TAU, TEMP, Z, SAFMIN, + $ SAFMN2, SAFMX2 + INTEGER COUNT * .. * .. External Functions .. DOUBLE PRECISION DLAMCH, DLAPY2 @@ -157,11 +160,14 @@ * .. * .. Executable Statements .. * + SAFMIN = DLAMCH( 'S' ) EPS = DLAMCH( 'P' ) + SAFMN2 = DLAMCH( 'B' )**INT( LOG( SAFMIN / EPS ) / + $ LOG( DLAMCH( 'B' ) ) / TWO ) + SAFMX2 = ONE / SAFMN2 IF( C.EQ.ZERO ) THEN CS = ONE SN = ZERO - GO TO 10 * ELSE IF( B.EQ.ZERO ) THEN * @@ -174,12 +180,12 @@ A = TEMP B = -C C = ZERO - GO TO 10 +* ELSE IF( ( A-D ).EQ.ZERO .AND. SIGN( ONE, B ).NE.SIGN( ONE, C ) ) $ THEN CS = ONE SN = ZERO - GO TO 10 +* ELSE * TEMP = A - D @@ -207,12 +213,30 @@ SN = C / TAU B = B - C C = ZERO +* ELSE * * Complex eigenvalues, or real (almost) equal eigenvalues. * Make diagonal elements equal. * + COUNT = 0 SIGMA = B + C + 10 CONTINUE + COUNT = COUNT + 1 + SCALE = MAX( ABS(TEMP), ABS(SIGMA) ) + IF( SCALE.GE.SAFMX2 ) THEN + SIGMA = SIGMA * SAFMN2 + TEMP = TEMP * SAFMN2 + IF (COUNT .LE. 20) + $ GOTO 10 + END IF + IF( SCALE.LE.SAFMN2 ) THEN + SIGMA = SIGMA * SAFMX2 + TEMP = TEMP * SAFMX2 + IF (COUNT .LE. 20) + $ GOTO 10 + END IF + P = HALF*TEMP TAU = DLAPY2( SIGMA, TEMP ) CS = SQRT( HALF*( ONE+ABS( SIGMA ) / TAU ) ) SN = -( P / ( TAU*CS ) )*SIGN( ONE, SIGMA ) @@ -268,8 +292,6 @@ END IF * END IF -* - 10 CONTINUE * * Store eigenvalues in (RT1R,RT1I) and (RT2R,RT2I). * diff --git a/lapack-netlib/SRC/dlaorhr_col_getrfnp.f b/lapack-netlib/SRC/dlaorhr_col_getrfnp.f new file mode 100644 index 000000000..6a7c629e8 --- /dev/null +++ b/lapack-netlib/SRC/dlaorhr_col_getrfnp.f @@ -0,0 +1,248 @@ +*> \brief \b DLAORHR_COL_GETRFNP +* +* =========== DOCUMENTATION =========== +* +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ +* +*> \htmlonly +*> Download DLAORHR_COL_GETRFNP + dependencies +*> +*> [TGZ] +*> +*> [ZIP] +*> +*> [TXT] +*> \endhtmlonly +* +* Definition: +* =========== +* +* SUBROUTINE DLAORHR_COL_GETRFNP( M, N, A, LDA, D, INFO ) +* +* .. Scalar Arguments .. +* INTEGER INFO, LDA, M, N +* .. +* .. Array Arguments .. +* DOUBLE PRECISION A( LDA, * ), D( * ) +* .. +* +* +*> \par Purpose: +* ============= +*> +*> \verbatim +*> +*> DLAORHR_COL_GETRFNP computes the modified LU factorization without +*> pivoting of a real general M-by-N matrix A. The factorization has +*> the form: +*> +*> A - S = L * U, +*> +*> where: +*> S is a m-by-n diagonal sign matrix with the diagonal D, so that +*> D(i) = S(i,i), 1 <= i <= min(M,N). The diagonal D is constructed +*> as D(i)=-SIGN(A(i,i)), where A(i,i) is the value after performing +*> i-1 steps of Gaussian elimination. This means that the diagonal +*> element at each step of "modified" Gaussian elimination is +*> at least one in absolute value (so that division-by-zero not +*> not possible during the division by the diagonal element); +*> +*> L is a M-by-N lower triangular matrix with unit diagonal elements +*> (lower trapezoidal if M > N); +*> +*> and U is a M-by-N upper triangular matrix +*> (upper trapezoidal if M < N). +*> +*> This routine is an auxiliary routine used in the Householder +*> reconstruction routine DORHR_COL. In DORHR_COL, this routine is +*> applied to an M-by-N matrix A with orthonormal columns, where each +*> element is bounded by one in absolute value. With the choice of +*> the matrix S above, one can show that the diagonal element at each +*> step of Gaussian elimination is the largest (in absolute value) in +*> the column on or below the diagonal, so that no pivoting is required +*> for numerical stability [1]. +*> +*> For more details on the Householder reconstruction algorithm, +*> including the modified LU factorization, see [1]. +*> +*> This is the blocked right-looking version of the algorithm, +*> calling Level 3 BLAS to update the submatrix. To factorize a block, +*> this routine calls the recursive routine DLAORHR_COL_GETRFNP2. +*> +*> [1] "Reconstructing Householder vectors from tall-skinny QR", +*> G. Ballard, J. Demmel, L. Grigori, M. Jacquelin, H.D. Nguyen, +*> E. Solomonik, J. Parallel Distrib. Comput., +*> vol. 85, pp. 3-31, 2015. +*> \endverbatim +* +* Arguments: +* ========== +* +*> \param[in] M +*> \verbatim +*> M is INTEGER +*> The number of rows of the matrix A. M >= 0. +*> \endverbatim +*> +*> \param[in] N +*> \verbatim +*> N is INTEGER +*> The number of columns of the matrix A. N >= 0. +*> \endverbatim +*> +*> \param[in,out] A +*> \verbatim +*> A is DOUBLE PRECISION array, dimension (LDA,N) +*> On entry, the M-by-N matrix to be factored. +*> On exit, the factors L and U from the factorization +*> A-S=L*U; the unit diagonal elements of L are not stored. +*> \endverbatim +*> +*> \param[in] LDA +*> \verbatim +*> LDA is INTEGER +*> The leading dimension of the array A. LDA >= max(1,M). +*> \endverbatim +*> +*> \param[out] D +*> \verbatim +*> D is DOUBLE PRECISION array, dimension min(M,N) +*> The diagonal elements of the diagonal M-by-N sign matrix S, +*> D(i) = S(i,i), where 1 <= i <= min(M,N). The elements can +*> be only plus or minus one. +*> \endverbatim +*> +*> \param[out] INFO +*> \verbatim +*> INFO is INTEGER +*> = 0: successful exit +*> < 0: if INFO = -i, the i-th argument had an illegal value +*> \endverbatim +*> +* Authors: +* ======== +* +*> \author Univ. of Tennessee +*> \author Univ. of California Berkeley +*> \author Univ. of Colorado Denver +*> \author NAG Ltd. +* +*> \date November 2019 +* +*> \ingroup doubleGEcomputational +* +*> \par Contributors: +* ================== +*> +*> \verbatim +*> +*> November 2019, Igor Kozachenko, +*> Computer Science Division, +*> University of California, Berkeley +*> +*> \endverbatim +* +* ===================================================================== + SUBROUTINE DLAORHR_COL_GETRFNP( M, N, A, LDA, D, INFO ) + IMPLICIT NONE +* +* -- LAPACK computational routine (version 3.9.0) -- +* -- LAPACK is a software package provided by Univ. of Tennessee, -- +* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- +* November 2019 +* +* .. Scalar Arguments .. + INTEGER INFO, LDA, M, N +* .. +* .. Array Arguments .. + DOUBLE PRECISION A( LDA, * ), D( * ) +* .. +* +* ===================================================================== +* +* .. Parameters .. + DOUBLE PRECISION ONE + PARAMETER ( ONE = 1.0D+0 ) +* .. +* .. Local Scalars .. + INTEGER IINFO, J, JB, NB +* .. +* .. External Subroutines .. + EXTERNAL DGEMM, DLAORHR_COL_GETRFNP2, DTRSM, XERBLA +* .. +* .. External Functions .. + INTEGER ILAENV + EXTERNAL ILAENV +* .. +* .. Intrinsic Functions .. + INTRINSIC MAX, MIN +* .. +* .. Executable Statements .. +* +* Test the input parameters. +* + INFO = 0 + IF( M.LT.0 ) THEN + INFO = -1 + ELSE IF( N.LT.0 ) THEN + INFO = -2 + ELSE IF( LDA.LT.MAX( 1, M ) ) THEN + INFO = -4 + END IF + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'DLAORHR_COL_GETRFNP', -INFO ) + RETURN + END IF +* +* Quick return if possible +* + IF( MIN( M, N ).EQ.0 ) + $ RETURN +* +* Determine the block size for this environment. +* + + NB = ILAENV( 1, 'DLAORHR_COL_GETRFNP', ' ', M, N, -1, -1 ) + + IF( NB.LE.1 .OR. NB.GE.MIN( M, N ) ) THEN +* +* Use unblocked code. +* + CALL DLAORHR_COL_GETRFNP2( M, N, A, LDA, D, INFO ) + ELSE +* +* Use blocked code. +* + DO J = 1, MIN( M, N ), NB + JB = MIN( MIN( M, N )-J+1, NB ) +* +* Factor diagonal and subdiagonal blocks. +* + CALL DLAORHR_COL_GETRFNP2( M-J+1, JB, A( J, J ), LDA, + $ D( J ), IINFO ) +* + IF( J+JB.LE.N ) THEN +* +* Compute block row of U. +* + CALL DTRSM( 'Left', 'Lower', 'No transpose', 'Unit', JB, + $ N-J-JB+1, ONE, A( J, J ), LDA, A( J, J+JB ), + $ LDA ) + IF( J+JB.LE.M ) THEN +* +* Update trailing submatrix. +* + CALL DGEMM( 'No transpose', 'No transpose', M-J-JB+1, + $ N-J-JB+1, JB, -ONE, A( J+JB, J ), LDA, + $ A( J, J+JB ), LDA, ONE, A( J+JB, J+JB ), + $ LDA ) + END IF + END IF + END DO + END IF + RETURN +* +* End of DLAORHR_COL_GETRFNP +* + END diff --git a/lapack-netlib/SRC/dlaorhr_col_getrfnp2.f b/lapack-netlib/SRC/dlaorhr_col_getrfnp2.f new file mode 100644 index 000000000..f7781f2e5 --- /dev/null +++ b/lapack-netlib/SRC/dlaorhr_col_getrfnp2.f @@ -0,0 +1,305 @@ +*> \brief \b DLAORHR_COL_GETRFNP2 +* +* =========== DOCUMENTATION =========== +* +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ +* +*> \htmlonly +*> Download DLAORHR_GETRF2NP + dependencies +*> +*> [TGZ] +*> +*> [ZIP] +*> +*> [TXT] +*> \endhtmlonly +* +* Definition: +* =========== +* +* RECURSIVE SUBROUTINE DLAORHR_COL_GETRFNP2( M, N, A, LDA, D, INFO ) +* +* .. Scalar Arguments .. +* INTEGER INFO, LDA, M, N +* .. +* .. Array Arguments .. +* DOUBLE PRECISION A( LDA, * ), D( * ) +* .. +* +* +*> \par Purpose: +* ============= +*> +*> \verbatim +*> +*> DLAORHR_COL_GETRFNP2 computes the modified LU factorization without +*> pivoting of a real general M-by-N matrix A. The factorization has +*> the form: +*> +*> A - S = L * U, +*> +*> where: +*> S is a m-by-n diagonal sign matrix with the diagonal D, so that +*> D(i) = S(i,i), 1 <= i <= min(M,N). The diagonal D is constructed +*> as D(i)=-SIGN(A(i,i)), where A(i,i) is the value after performing +*> i-1 steps of Gaussian elimination. This means that the diagonal +*> element at each step of "modified" Gaussian elimination is at +*> least one in absolute value (so that division-by-zero not +*> possible during the division by the diagonal element); +*> +*> L is a M-by-N lower triangular matrix with unit diagonal elements +*> (lower trapezoidal if M > N); +*> +*> and U is a M-by-N upper triangular matrix +*> (upper trapezoidal if M < N). +*> +*> This routine is an auxiliary routine used in the Householder +*> reconstruction routine DORHR_COL. In DORHR_COL, this routine is +*> applied to an M-by-N matrix A with orthonormal columns, where each +*> element is bounded by one in absolute value. With the choice of +*> the matrix S above, one can show that the diagonal element at each +*> step of Gaussian elimination is the largest (in absolute value) in +*> the column on or below the diagonal, so that no pivoting is required +*> for numerical stability [1]. +*> +*> For more details on the Householder reconstruction algorithm, +*> including the modified LU factorization, see [1]. +*> +*> This is the recursive version of the LU factorization algorithm. +*> Denote A - S by B. The algorithm divides the matrix B into four +*> submatrices: +*> +*> [ B11 | B12 ] where B11 is n1 by n1, +*> B = [ -----|----- ] B21 is (m-n1) by n1, +*> [ B21 | B22 ] B12 is n1 by n2, +*> B22 is (m-n1) by n2, +*> with n1 = min(m,n)/2, n2 = n-n1. +*> +*> +*> The subroutine calls itself to factor B11, solves for B21, +*> solves for B12, updates B22, then calls itself to factor B22. +*> +*> For more details on the recursive LU algorithm, see [2]. +*> +*> DLAORHR_COL_GETRFNP2 is called to factorize a block by the blocked +*> routine DLAORHR_COL_GETRFNP, which uses blocked code calling +*. Level 3 BLAS to update the submatrix. However, DLAORHR_COL_GETRFNP2 +*> is self-sufficient and can be used without DLAORHR_COL_GETRFNP. +*> +*> [1] "Reconstructing Householder vectors from tall-skinny QR", +*> G. Ballard, J. Demmel, L. Grigori, M. Jacquelin, H.D. Nguyen, +*> E. Solomonik, J. Parallel Distrib. Comput., +*> vol. 85, pp. 3-31, 2015. +*> +*> [2] "Recursion leads to automatic variable blocking for dense linear +*> algebra algorithms", F. Gustavson, IBM J. of Res. and Dev., +*> vol. 41, no. 6, pp. 737-755, 1997. +*> \endverbatim +* +* Arguments: +* ========== +* +*> \param[in] M +*> \verbatim +*> M is INTEGER +*> The number of rows of the matrix A. M >= 0. +*> \endverbatim +*> +*> \param[in] N +*> \verbatim +*> N is INTEGER +*> The number of columns of the matrix A. N >= 0. +*> \endverbatim +*> +*> \param[in,out] A +*> \verbatim +*> A is DOUBLE PRECISION array, dimension (LDA,N) +*> On entry, the M-by-N matrix to be factored. +*> On exit, the factors L and U from the factorization +*> A-S=L*U; the unit diagonal elements of L are not stored. +*> \endverbatim +*> +*> \param[in] LDA +*> \verbatim +*> LDA is INTEGER +*> The leading dimension of the array A. LDA >= max(1,M). +*> \endverbatim +*> +*> \param[out] D +*> \verbatim +*> D is DOUBLE PRECISION array, dimension min(M,N) +*> The diagonal elements of the diagonal M-by-N sign matrix S, +*> D(i) = S(i,i), where 1 <= i <= min(M,N). The elements can +*> be only plus or minus one. +*> \endverbatim +*> +*> \param[out] INFO +*> \verbatim +*> INFO is INTEGER +*> = 0: successful exit +*> < 0: if INFO = -i, the i-th argument had an illegal value +*> \endverbatim +*> +* Authors: +* ======== +* +*> \author Univ. of Tennessee +*> \author Univ. of California Berkeley +*> \author Univ. of Colorado Denver +*> \author NAG Ltd. +* +*> \date November 2019 +* +*> \ingroup doubleGEcomputational +* +*> \par Contributors: +* ================== +*> +*> \verbatim +*> +*> November 2019, Igor Kozachenko, +*> Computer Science Division, +*> University of California, Berkeley +*> +*> \endverbatim +* +* ===================================================================== + RECURSIVE SUBROUTINE DLAORHR_COL_GETRFNP2( M, N, A, LDA, D, INFO ) + IMPLICIT NONE +* +* -- LAPACK computational routine (version 3.9.0) -- +* -- LAPACK is a software package provided by Univ. of Tennessee, -- +* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- +* November 2019 +* +* .. Scalar Arguments .. + INTEGER INFO, LDA, M, N +* .. +* .. Array Arguments .. + DOUBLE PRECISION A( LDA, * ), D( * ) +* .. +* +* ===================================================================== +* +* .. Parameters .. + DOUBLE PRECISION ONE + PARAMETER ( ONE = 1.0D+0 ) +* .. +* .. Local Scalars .. + DOUBLE PRECISION SFMIN + INTEGER I, IINFO, N1, N2 +* .. +* .. External Functions .. + DOUBLE PRECISION DLAMCH + EXTERNAL DLAMCH +* .. +* .. External Subroutines .. + EXTERNAL DGEMM, DSCAL, DTRSM, XERBLA +* .. +* .. Intrinsic Functions .. + INTRINSIC ABS, DSIGN, MAX, MIN +* .. +* .. Executable Statements .. +* +* Test the input parameters +* + INFO = 0 + IF( M.LT.0 ) THEN + INFO = -1 + ELSE IF( N.LT.0 ) THEN + INFO = -2 + ELSE IF( LDA.LT.MAX( 1, M ) ) THEN + INFO = -4 + END IF + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'DLAORHR_COL_GETRFNP2', -INFO ) + RETURN + END IF +* +* Quick return if possible +* + IF( MIN( M, N ).EQ.0 ) + $ RETURN + + IF ( M.EQ.1 ) THEN +* +* One row case, (also recursion termination case), +* use unblocked code +* +* Transfer the sign +* + D( 1 ) = -DSIGN( ONE, A( 1, 1 ) ) +* +* Construct the row of U +* + A( 1, 1 ) = A( 1, 1 ) - D( 1 ) +* + ELSE IF( N.EQ.1 ) THEN +* +* One column case, (also recursion termination case), +* use unblocked code +* +* Transfer the sign +* + D( 1 ) = -DSIGN( ONE, A( 1, 1 ) ) +* +* Construct the row of U +* + A( 1, 1 ) = A( 1, 1 ) - D( 1 ) +* +* Scale the elements 2:M of the column +* +* Determine machine safe minimum +* + SFMIN = DLAMCH('S') +* +* Construct the subdiagonal elements of L +* + IF( ABS( A( 1, 1 ) ) .GE. SFMIN ) THEN + CALL DSCAL( M-1, ONE / A( 1, 1 ), A( 2, 1 ), 1 ) + ELSE + DO I = 2, M + A( I, 1 ) = A( I, 1 ) / A( 1, 1 ) + END DO + END IF +* + ELSE +* +* Divide the matrix B into four submatrices +* + N1 = MIN( M, N ) / 2 + N2 = N-N1 + +* +* Factor B11, recursive call +* + CALL DLAORHR_COL_GETRFNP2( N1, N1, A, LDA, D, IINFO ) +* +* Solve for B21 +* + CALL DTRSM( 'R', 'U', 'N', 'N', M-N1, N1, ONE, A, LDA, + $ A( N1+1, 1 ), LDA ) +* +* Solve for B12 +* + CALL DTRSM( 'L', 'L', 'N', 'U', N1, N2, ONE, A, LDA, + $ A( 1, N1+1 ), LDA ) +* +* Update B22, i.e. compute the Schur complement +* B22 := B22 - B21*B12 +* + CALL DGEMM( 'N', 'N', M-N1, N2, N1, -ONE, A( N1+1, 1 ), LDA, + $ A( 1, N1+1 ), LDA, ONE, A( N1+1, N1+1 ), LDA ) +* +* Factor B22, recursive call +* + CALL DLAORHR_COL_GETRFNP2( M-N1, N2, A( N1+1, N1+1 ), LDA, + $ D( N1+1 ), IINFO ) +* + END IF + RETURN +* +* End of DLAORHR_COL_GETRFNP2 +* + END diff --git a/lapack-netlib/SRC/dlaqps.f b/lapack-netlib/SRC/dlaqps.f index 395d8e0b1..0009de951 100644 --- a/lapack-netlib/SRC/dlaqps.f +++ b/lapack-netlib/SRC/dlaqps.f @@ -127,7 +127,7 @@ *> \param[in,out] AUXV *> \verbatim *> AUXV is DOUBLE PRECISION array, dimension (NB) -*> Auxiliar vector. +*> Auxiliary vector. *> \endverbatim *> *> \param[in,out] F diff --git a/lapack-netlib/SRC/dlaqr0.f b/lapack-netlib/SRC/dlaqr0.f index 247d4ef30..f362c096c 100644 --- a/lapack-netlib/SRC/dlaqr0.f +++ b/lapack-netlib/SRC/dlaqr0.f @@ -67,7 +67,7 @@ *> \param[in] N *> \verbatim *> N is INTEGER -*> The order of the matrix H. N .GE. 0. +*> The order of the matrix H. N >= 0. *> \endverbatim *> *> \param[in] ILO @@ -79,12 +79,12 @@ *> \verbatim *> IHI is INTEGER *> It is assumed that H is already upper triangular in rows -*> and columns 1:ILO-1 and IHI+1:N and, if ILO.GT.1, +*> and columns 1:ILO-1 and IHI+1:N and, if ILO > 1, *> H(ILO,ILO-1) is zero. ILO and IHI are normally set by a *> previous call to DGEBAL, and then passed to DGEHRD when the *> matrix output by DGEBAL is reduced to Hessenberg form. *> Otherwise, ILO and IHI should be set to 1 and N, -*> respectively. If N.GT.0, then 1.LE.ILO.LE.IHI.LE.N. +*> respectively. If N > 0, then 1 <= ILO <= IHI <= N. *> If N = 0, then ILO = 1 and IHI = 0. *> \endverbatim *> @@ -97,19 +97,19 @@ *> decomposition (the Schur form); 2-by-2 diagonal blocks *> (corresponding to complex conjugate pairs of eigenvalues) *> are returned in standard form, with H(i,i) = H(i+1,i+1) -*> and H(i+1,i)*H(i,i+1).LT.0. If INFO = 0 and WANTT is +*> and H(i+1,i)*H(i,i+1) < 0. If INFO = 0 and WANTT is *> .FALSE., then the contents of H are unspecified on exit. -*> (The output value of H when INFO.GT.0 is given under the +*> (The output value of H when INFO > 0 is given under the *> description of INFO below.) *> -*> This subroutine may explicitly set H(i,j) = 0 for i.GT.j and +*> This subroutine may explicitly set H(i,j) = 0 for i > j and *> j = 1, 2, ... ILO-1 or j = IHI+1, IHI+2, ... N. *> \endverbatim *> *> \param[in] LDH *> \verbatim *> LDH is INTEGER -*> The leading dimension of the array H. LDH .GE. max(1,N). +*> The leading dimension of the array H. LDH >= max(1,N). *> \endverbatim *> *> \param[out] WR @@ -125,7 +125,7 @@ *> and WI(ILO:IHI). If two eigenvalues are computed as a *> complex conjugate pair, they are stored in consecutive *> elements of WR and WI, say the i-th and (i+1)th, with -*> WI(i) .GT. 0 and WI(i+1) .LT. 0. If WANTT is .TRUE., then +*> WI(i) > 0 and WI(i+1) < 0. If WANTT is .TRUE., then *> the eigenvalues are stored in the same order as on the *> diagonal of the Schur form returned in H, with *> WR(i) = H(i,i) and, if H(i:i+1,i:i+1) is a 2-by-2 diagonal @@ -143,7 +143,7 @@ *> IHIZ is INTEGER *> Specify the rows of Z to which transformations must be *> applied if WANTZ is .TRUE.. -*> 1 .LE. ILOZ .LE. ILO; IHI .LE. IHIZ .LE. N. +*> 1 <= ILOZ <= ILO; IHI <= IHIZ <= N. *> \endverbatim *> *> \param[in,out] Z @@ -153,7 +153,7 @@ *> If WANTZ is .TRUE., then Z(ILO:IHI,ILOZ:IHIZ) is *> replaced by Z(ILO:IHI,ILOZ:IHIZ)*U where U is the *> orthogonal Schur factor of H(ILO:IHI,ILO:IHI). -*> (The output value of Z when INFO.GT.0 is given under +*> (The output value of Z when INFO > 0 is given under *> the description of INFO below.) *> \endverbatim *> @@ -161,7 +161,7 @@ *> \verbatim *> LDZ is INTEGER *> The leading dimension of the array Z. if WANTZ is .TRUE. -*> then LDZ.GE.MAX(1,IHIZ). Otherwize, LDZ.GE.1. +*> then LDZ >= MAX(1,IHIZ). Otherwise, LDZ >= 1. *> \endverbatim *> *> \param[out] WORK @@ -174,7 +174,7 @@ *> \param[in] LWORK *> \verbatim *> LWORK is INTEGER -*> The dimension of the array WORK. LWORK .GE. max(1,N) +*> The dimension of the array WORK. LWORK >= max(1,N) *> is sufficient, but LWORK typically as large as 6*N may *> be required for optimal performance. A workspace query *> to determine the optimal workspace size is recommended. @@ -190,19 +190,19 @@ *> \param[out] INFO *> \verbatim *> INFO is INTEGER -*> = 0: successful exit -*> .GT. 0: if INFO = i, DLAQR0 failed to compute all of +*> = 0: successful exit +*> > 0: if INFO = i, DLAQR0 failed to compute all of *> the eigenvalues. Elements 1:ilo-1 and i+1:n of WR *> and WI contain those eigenvalues which have been *> successfully computed. (Failures are rare.) *> -*> If INFO .GT. 0 and WANT is .FALSE., then on exit, +*> If INFO > 0 and WANT is .FALSE., then on exit, *> the remaining unconverged eigenvalues are the eigen- *> values of the upper Hessenberg matrix rows and *> columns ILO through INFO of the final, output *> value of H. *> -*> If INFO .GT. 0 and WANTT is .TRUE., then on exit +*> If INFO > 0 and WANTT is .TRUE., then on exit *> *> (*) (initial value of H)*U = U*(final value of H) *> @@ -210,7 +210,7 @@ *> value of H is upper Hessenberg and quasi-triangular *> in rows and columns INFO+1 through IHI. *> -*> If INFO .GT. 0 and WANTZ is .TRUE., then on exit +*> If INFO > 0 and WANTZ is .TRUE., then on exit *> *> (final value of Z(ILO:IHI,ILOZ:IHIZ) *> = (initial value of Z(ILO:IHI,ILOZ:IHIZ)*U @@ -218,7 +218,7 @@ *> where U is the orthogonal matrix in (*) (regard- *> less of the value of WANTT.) *> -*> If INFO .GT. 0 and WANTZ is .FALSE., then Z is not +*> If INFO > 0 and WANTZ is .FALSE., then Z is not *> accessed. *> \endverbatim * @@ -678,7 +678,7 @@ END IF END IF * -* ==== Use up to NS of the the smallest magnatiude +* ==== Use up to NS of the the smallest magnitude * . shifts. If there aren't NS shifts available, * . then use them all, possibly dropping one to * . make the number of shifts even. ==== diff --git a/lapack-netlib/SRC/dlaqr1.f b/lapack-netlib/SRC/dlaqr1.f index 795b072ab..4ccf997e7 100644 --- a/lapack-netlib/SRC/dlaqr1.f +++ b/lapack-netlib/SRC/dlaqr1.f @@ -69,7 +69,7 @@ *> \verbatim *> LDH is INTEGER *> The leading dimension of H as declared in -*> the calling procedure. LDH.GE.N +*> the calling procedure. LDH >= N *> \endverbatim *> *> \param[in] SR1 diff --git a/lapack-netlib/SRC/dlaqr2.f b/lapack-netlib/SRC/dlaqr2.f index 431b3f123..01fdf3046 100644 --- a/lapack-netlib/SRC/dlaqr2.f +++ b/lapack-netlib/SRC/dlaqr2.f @@ -103,7 +103,7 @@ *> \param[in] NW *> \verbatim *> NW is INTEGER -*> Deflation window size. 1 .LE. NW .LE. (KBOT-KTOP+1). +*> Deflation window size. 1 <= NW <= (KBOT-KTOP+1). *> \endverbatim *> *> \param[in,out] H @@ -121,7 +121,7 @@ *> \verbatim *> LDH is INTEGER *> Leading dimension of H just as declared in the calling -*> subroutine. N .LE. LDH +*> subroutine. N <= LDH *> \endverbatim *> *> \param[in] ILOZ @@ -133,7 +133,7 @@ *> \verbatim *> IHIZ is INTEGER *> Specify the rows of Z to which transformations must be -*> applied if WANTZ is .TRUE.. 1 .LE. ILOZ .LE. IHIZ .LE. N. +*> applied if WANTZ is .TRUE.. 1 <= ILOZ <= IHIZ <= N. *> \endverbatim *> *> \param[in,out] Z @@ -149,7 +149,7 @@ *> \verbatim *> LDZ is INTEGER *> The leading dimension of Z just as declared in the -*> calling subroutine. 1 .LE. LDZ. +*> calling subroutine. 1 <= LDZ. *> \endverbatim *> *> \param[out] NS @@ -194,13 +194,13 @@ *> \verbatim *> LDV is INTEGER *> The leading dimension of V just as declared in the -*> calling subroutine. NW .LE. LDV +*> calling subroutine. NW <= LDV *> \endverbatim *> *> \param[in] NH *> \verbatim *> NH is INTEGER -*> The number of columns of T. NH.GE.NW. +*> The number of columns of T. NH >= NW. *> \endverbatim *> *> \param[out] T @@ -212,14 +212,14 @@ *> \verbatim *> LDT is INTEGER *> The leading dimension of T just as declared in the -*> calling subroutine. NW .LE. LDT +*> calling subroutine. NW <= LDT *> \endverbatim *> *> \param[in] NV *> \verbatim *> NV is INTEGER *> The number of rows of work array WV available for -*> workspace. NV.GE.NW. +*> workspace. NV >= NW. *> \endverbatim *> *> \param[out] WV @@ -231,7 +231,7 @@ *> \verbatim *> LDWV is INTEGER *> The leading dimension of W just as declared in the -*> calling subroutine. NW .LE. LDV +*> calling subroutine. NW <= LDV *> \endverbatim *> *> \param[out] WORK diff --git a/lapack-netlib/SRC/dlaqr3.f b/lapack-netlib/SRC/dlaqr3.f index aa23617c3..1dbf55c9e 100644 --- a/lapack-netlib/SRC/dlaqr3.f +++ b/lapack-netlib/SRC/dlaqr3.f @@ -100,7 +100,7 @@ *> \param[in] NW *> \verbatim *> NW is INTEGER -*> Deflation window size. 1 .LE. NW .LE. (KBOT-KTOP+1). +*> Deflation window size. 1 <= NW <= (KBOT-KTOP+1). *> \endverbatim *> *> \param[in,out] H @@ -118,7 +118,7 @@ *> \verbatim *> LDH is INTEGER *> Leading dimension of H just as declared in the calling -*> subroutine. N .LE. LDH +*> subroutine. N <= LDH *> \endverbatim *> *> \param[in] ILOZ @@ -130,7 +130,7 @@ *> \verbatim *> IHIZ is INTEGER *> Specify the rows of Z to which transformations must be -*> applied if WANTZ is .TRUE.. 1 .LE. ILOZ .LE. IHIZ .LE. N. +*> applied if WANTZ is .TRUE.. 1 <= ILOZ <= IHIZ <= N. *> \endverbatim *> *> \param[in,out] Z @@ -146,7 +146,7 @@ *> \verbatim *> LDZ is INTEGER *> The leading dimension of Z just as declared in the -*> calling subroutine. 1 .LE. LDZ. +*> calling subroutine. 1 <= LDZ. *> \endverbatim *> *> \param[out] NS @@ -191,13 +191,13 @@ *> \verbatim *> LDV is INTEGER *> The leading dimension of V just as declared in the -*> calling subroutine. NW .LE. LDV +*> calling subroutine. NW <= LDV *> \endverbatim *> *> \param[in] NH *> \verbatim *> NH is INTEGER -*> The number of columns of T. NH.GE.NW. +*> The number of columns of T. NH >= NW. *> \endverbatim *> *> \param[out] T @@ -209,14 +209,14 @@ *> \verbatim *> LDT is INTEGER *> The leading dimension of T just as declared in the -*> calling subroutine. NW .LE. LDT +*> calling subroutine. NW <= LDT *> \endverbatim *> *> \param[in] NV *> \verbatim *> NV is INTEGER *> The number of rows of work array WV available for -*> workspace. NV.GE.NW. +*> workspace. NV >= NW. *> \endverbatim *> *> \param[out] WV @@ -228,7 +228,7 @@ *> \verbatim *> LDWV is INTEGER *> The leading dimension of W just as declared in the -*> calling subroutine. NW .LE. LDV +*> calling subroutine. NW <= LDV *> \endverbatim *> *> \param[out] WORK diff --git a/lapack-netlib/SRC/dlaqr4.f b/lapack-netlib/SRC/dlaqr4.f index 89b9b7f20..454bf9608 100644 --- a/lapack-netlib/SRC/dlaqr4.f +++ b/lapack-netlib/SRC/dlaqr4.f @@ -74,7 +74,7 @@ *> \param[in] N *> \verbatim *> N is INTEGER -*> The order of the matrix H. N .GE. 0. +*> The order of the matrix H. N >= 0. *> \endverbatim *> *> \param[in] ILO @@ -86,12 +86,12 @@ *> \verbatim *> IHI is INTEGER *> It is assumed that H is already upper triangular in rows -*> and columns 1:ILO-1 and IHI+1:N and, if ILO.GT.1, +*> and columns 1:ILO-1 and IHI+1:N and, if ILO > 1, *> H(ILO,ILO-1) is zero. ILO and IHI are normally set by a *> previous call to DGEBAL, and then passed to DGEHRD when the *> matrix output by DGEBAL is reduced to Hessenberg form. *> Otherwise, ILO and IHI should be set to 1 and N, -*> respectively. If N.GT.0, then 1.LE.ILO.LE.IHI.LE.N. +*> respectively. If N > 0, then 1 <= ILO <= IHI <= N. *> If N = 0, then ILO = 1 and IHI = 0. *> \endverbatim *> @@ -104,19 +104,19 @@ *> decomposition (the Schur form); 2-by-2 diagonal blocks *> (corresponding to complex conjugate pairs of eigenvalues) *> are returned in standard form, with H(i,i) = H(i+1,i+1) -*> and H(i+1,i)*H(i,i+1).LT.0. If INFO = 0 and WANTT is +*> and H(i+1,i)*H(i,i+1) < 0. If INFO = 0 and WANTT is *> .FALSE., then the contents of H are unspecified on exit. -*> (The output value of H when INFO.GT.0 is given under the +*> (The output value of H when INFO > 0 is given under the *> description of INFO below.) *> -*> This subroutine may explicitly set H(i,j) = 0 for i.GT.j and +*> This subroutine may explicitly set H(i,j) = 0 for i > j and *> j = 1, 2, ... ILO-1 or j = IHI+1, IHI+2, ... N. *> \endverbatim *> *> \param[in] LDH *> \verbatim *> LDH is INTEGER -*> The leading dimension of the array H. LDH .GE. max(1,N). +*> The leading dimension of the array H. LDH >= max(1,N). *> \endverbatim *> *> \param[out] WR @@ -132,7 +132,7 @@ *> and WI(ILO:IHI). If two eigenvalues are computed as a *> complex conjugate pair, they are stored in consecutive *> elements of WR and WI, say the i-th and (i+1)th, with -*> WI(i) .GT. 0 and WI(i+1) .LT. 0. If WANTT is .TRUE., then +*> WI(i) > 0 and WI(i+1) < 0. If WANTT is .TRUE., then *> the eigenvalues are stored in the same order as on the *> diagonal of the Schur form returned in H, with *> WR(i) = H(i,i) and, if H(i:i+1,i:i+1) is a 2-by-2 diagonal @@ -150,7 +150,7 @@ *> IHIZ is INTEGER *> Specify the rows of Z to which transformations must be *> applied if WANTZ is .TRUE.. -*> 1 .LE. ILOZ .LE. ILO; IHI .LE. IHIZ .LE. N. +*> 1 <= ILOZ <= ILO; IHI <= IHIZ <= N. *> \endverbatim *> *> \param[in,out] Z @@ -160,7 +160,7 @@ *> If WANTZ is .TRUE., then Z(ILO:IHI,ILOZ:IHIZ) is *> replaced by Z(ILO:IHI,ILOZ:IHIZ)*U where U is the *> orthogonal Schur factor of H(ILO:IHI,ILO:IHI). -*> (The output value of Z when INFO.GT.0 is given under +*> (The output value of Z when INFO > 0 is given under *> the description of INFO below.) *> \endverbatim *> @@ -168,7 +168,7 @@ *> \verbatim *> LDZ is INTEGER *> The leading dimension of the array Z. if WANTZ is .TRUE. -*> then LDZ.GE.MAX(1,IHIZ). Otherwize, LDZ.GE.1. +*> then LDZ >= MAX(1,IHIZ). Otherwise, LDZ >= 1. *> \endverbatim *> *> \param[out] WORK @@ -181,7 +181,7 @@ *> \param[in] LWORK *> \verbatim *> LWORK is INTEGER -*> The dimension of the array WORK. LWORK .GE. max(1,N) +*> The dimension of the array WORK. LWORK >= max(1,N) *> is sufficient, but LWORK typically as large as 6*N may *> be required for optimal performance. A workspace query *> to determine the optimal workspace size is recommended. @@ -197,19 +197,19 @@ *> \param[out] INFO *> \verbatim *> INFO is INTEGER -*> = 0: successful exit -*> .GT. 0: if INFO = i, DLAQR4 failed to compute all of +*> = 0: successful exit +*> > 0: if INFO = i, DLAQR4 failed to compute all of *> the eigenvalues. Elements 1:ilo-1 and i+1:n of WR *> and WI contain those eigenvalues which have been *> successfully computed. (Failures are rare.) *> -*> If INFO .GT. 0 and WANT is .FALSE., then on exit, +*> If INFO > 0 and WANT is .FALSE., then on exit, *> the remaining unconverged eigenvalues are the eigen- *> values of the upper Hessenberg matrix rows and *> columns ILO through INFO of the final, output *> value of H. *> -*> If INFO .GT. 0 and WANTT is .TRUE., then on exit +*> If INFO > 0 and WANTT is .TRUE., then on exit *> *> (*) (initial value of H)*U = U*(final value of H) *> @@ -217,7 +217,7 @@ *> value of H is upper Hessenberg and triangular in *> rows and columns INFO+1 through IHI. *> -*> If INFO .GT. 0 and WANTZ is .TRUE., then on exit +*> If INFO > 0 and WANTZ is .TRUE., then on exit *> *> (final value of Z(ILO:IHI,ILOZ:IHIZ) *> = (initial value of Z(ILO:IHI,ILOZ:IHIZ)*U @@ -225,7 +225,7 @@ *> where U is the orthogonal matrix in (*) (regard- *> less of the value of WANTT.) *> -*> If INFO .GT. 0 and WANTZ is .FALSE., then Z is not +*> If INFO > 0 and WANTZ is .FALSE., then Z is not *> accessed. *> \endverbatim * @@ -677,7 +677,7 @@ END IF END IF * -* ==== Use up to NS of the the smallest magnatiude +* ==== Use up to NS of the the smallest magnitude * . shifts. If there aren't NS shifts available, * . then use them all, possibly dropping one to * . make the number of shifts even. ==== diff --git a/lapack-netlib/SRC/dlaqr5.f b/lapack-netlib/SRC/dlaqr5.f index 5cc4eda1a..f58db9c89 100644 --- a/lapack-netlib/SRC/dlaqr5.f +++ b/lapack-netlib/SRC/dlaqr5.f @@ -133,7 +133,7 @@ *> \verbatim *> LDH is INTEGER *> LDH is the leading dimension of H just as declared in the -*> calling procedure. LDH.GE.MAX(1,N). +*> calling procedure. LDH >= MAX(1,N). *> \endverbatim *> *> \param[in] ILOZ @@ -145,7 +145,7 @@ *> \verbatim *> IHIZ is INTEGER *> Specify the rows of Z to which transformations must be -*> applied if WANTZ is .TRUE.. 1 .LE. ILOZ .LE. IHIZ .LE. N +*> applied if WANTZ is .TRUE.. 1 <= ILOZ <= IHIZ <= N *> \endverbatim *> *> \param[in,out] Z @@ -161,7 +161,7 @@ *> \verbatim *> LDZ is INTEGER *> LDA is the leading dimension of Z just as declared in -*> the calling procedure. LDZ.GE.N. +*> the calling procedure. LDZ >= N. *> \endverbatim *> *> \param[out] V @@ -173,7 +173,7 @@ *> \verbatim *> LDV is INTEGER *> LDV is the leading dimension of V as declared in the -*> calling procedure. LDV.GE.3. +*> calling procedure. LDV >= 3. *> \endverbatim *> *> \param[out] U @@ -185,33 +185,14 @@ *> \verbatim *> LDU is INTEGER *> LDU is the leading dimension of U just as declared in the -*> in the calling subroutine. LDU.GE.3*NSHFTS-3. -*> \endverbatim -*> -*> \param[in] NH -*> \verbatim -*> NH is INTEGER -*> NH is the number of columns in array WH available for -*> workspace. NH.GE.1. -*> \endverbatim -*> -*> \param[out] WH -*> \verbatim -*> WH is DOUBLE PRECISION array, dimension (LDWH,NH) -*> \endverbatim -*> -*> \param[in] LDWH -*> \verbatim -*> LDWH is INTEGER -*> Leading dimension of WH just as declared in the -*> calling procedure. LDWH.GE.3*NSHFTS-3. +*> in the calling subroutine. LDU >= 3*NSHFTS-3. *> \endverbatim *> *> \param[in] NV *> \verbatim *> NV is INTEGER *> NV is the number of rows in WV agailable for workspace. -*> NV.GE.1. +*> NV >= 1. *> \endverbatim *> *> \param[out] WV @@ -223,9 +204,28 @@ *> \verbatim *> LDWV is INTEGER *> LDWV is the leading dimension of WV as declared in the -*> in the calling subroutine. LDWV.GE.NV. +*> in the calling subroutine. LDWV >= NV. *> \endverbatim * +*> \param[in] NH +*> \verbatim +*> NH is INTEGER +*> NH is the number of columns in array WH available for +*> workspace. NH >= 1. +*> \endverbatim +*> +*> \param[out] WH +*> \verbatim +*> WH is DOUBLE PRECISION array, dimension (LDWH,NH) +*> \endverbatim +*> +*> \param[in] LDWH +*> \verbatim +*> LDWH is INTEGER +*> Leading dimension of WH just as declared in the +*> calling procedure. LDWH >= 3*NSHFTS-3. +*> \endverbatim +*> * Authors: * ======== * diff --git a/lapack-netlib/SRC/dlarfb.f b/lapack-netlib/SRC/dlarfb.f index 5b2cc2ba8..e63641213 100644 --- a/lapack-netlib/SRC/dlarfb.f +++ b/lapack-netlib/SRC/dlarfb.f @@ -92,6 +92,8 @@ *> K is INTEGER *> The order of the matrix T (= the number of elementary *> reflectors whose product defines the block reflector). +*> If SIDE = 'L', M >= K >= 0; +*> if SIDE = 'R', N >= K >= 0. *> \endverbatim *> *> \param[in] V diff --git a/lapack-netlib/SRC/dlarfx.f b/lapack-netlib/SRC/dlarfx.f index 260d367d4..a9e4496f9 100644 --- a/lapack-netlib/SRC/dlarfx.f +++ b/lapack-netlib/SRC/dlarfx.f @@ -94,7 +94,7 @@ *> \param[in] LDC *> \verbatim *> LDC is INTEGER -*> The leading dimension of the array C. LDA >= (1,M). +*> The leading dimension of the array C. LDC >= (1,M). *> \endverbatim *> *> \param[out] WORK diff --git a/lapack-netlib/SRC/dlarfy.f b/lapack-netlib/SRC/dlarfy.f index a0b0ebb31..3000b38bc 100644 --- a/lapack-netlib/SRC/dlarfy.f +++ b/lapack-netlib/SRC/dlarfy.f @@ -103,7 +103,7 @@ * *> \date December 2016 * -*> \ingroup double_eig +*> \ingroup doubleOTHERauxiliary * * ===================================================================== SUBROUTINE DLARFY( UPLO, N, V, INCV, TAU, C, LDC, WORK ) diff --git a/lapack-netlib/SRC/dlarrb.f b/lapack-netlib/SRC/dlarrb.f index 2b6389e25..ddf3888b9 100644 --- a/lapack-netlib/SRC/dlarrb.f +++ b/lapack-netlib/SRC/dlarrb.f @@ -91,7 +91,7 @@ *> RTOL2 is DOUBLE PRECISION *> Tolerance for the convergence of the bisection intervals. *> An interval [LEFT,RIGHT] has converged if -*> RIGHT-LEFT.LT.MAX( RTOL1*GAP, RTOL2*MAX(|LEFT|,|RIGHT|) ) +*> RIGHT-LEFT < MAX( RTOL1*GAP, RTOL2*MAX(|LEFT|,|RIGHT|) ) *> where GAP is the (estimated) distance to the nearest *> eigenvalue. *> \endverbatim @@ -117,7 +117,7 @@ *> WGAP is DOUBLE PRECISION array, dimension (N-1) *> On input, the (estimated) gaps between consecutive *> eigenvalues of L D L^T, i.e., WGAP(I-OFFSET) is the gap between -*> eigenvalues I and I+1. Note that if IFIRST.EQ.ILAST +*> eigenvalues I and I+1. Note that if IFIRST = ILAST *> then WGAP(IFIRST-OFFSET) must be set to ZERO. *> On output, these gaps are refined. *> \endverbatim diff --git a/lapack-netlib/SRC/dlarre.f b/lapack-netlib/SRC/dlarre.f index 0613efbc3..ce55442e2 100644 --- a/lapack-netlib/SRC/dlarre.f +++ b/lapack-netlib/SRC/dlarre.f @@ -150,7 +150,7 @@ *> RTOL2 is DOUBLE PRECISION *> Parameters for bisection. *> An interval [LEFT,RIGHT] has converged if -*> RIGHT-LEFT.LT.MAX( RTOL1*GAP, RTOL2*MAX(|LEFT|,|RIGHT|) ) +*> RIGHT-LEFT < MAX( RTOL1*GAP, RTOL2*MAX(|LEFT|,|RIGHT|) ) *> \endverbatim *> *> \param[in] SPLTOL diff --git a/lapack-netlib/SRC/dlarrj.f b/lapack-netlib/SRC/dlarrj.f index 097ba9f77..a4bfb210c 100644 --- a/lapack-netlib/SRC/dlarrj.f +++ b/lapack-netlib/SRC/dlarrj.f @@ -85,7 +85,7 @@ *> RTOL is DOUBLE PRECISION *> Tolerance for the convergence of the bisection intervals. *> An interval [LEFT,RIGHT] has converged if -*> RIGHT-LEFT.LT.RTOL*MAX(|LEFT|,|RIGHT|). +*> RIGHT-LEFT < RTOL*MAX(|LEFT|,|RIGHT|). *> \endverbatim *> *> \param[in] OFFSET diff --git a/lapack-netlib/SRC/dlarrv.f b/lapack-netlib/SRC/dlarrv.f index cace17c0e..4a59a2bbf 100644 --- a/lapack-netlib/SRC/dlarrv.f +++ b/lapack-netlib/SRC/dlarrv.f @@ -149,7 +149,7 @@ *> RTOL2 is DOUBLE PRECISION *> Parameters for bisection. *> An interval [LEFT,RIGHT] has converged if -*> RIGHT-LEFT.LT.MAX( RTOL1*GAP, RTOL2*MAX(|LEFT|,|RIGHT|) ) +*> RIGHT-LEFT < MAX( RTOL1*GAP, RTOL2*MAX(|LEFT|,|RIGHT|) ) *> \endverbatim *> *> \param[in,out] W diff --git a/lapack-netlib/SRC/dlartg.f b/lapack-netlib/SRC/dlartg.f index 1c7c46f63..dc49986a0 100644 --- a/lapack-netlib/SRC/dlartg.f +++ b/lapack-netlib/SRC/dlartg.f @@ -163,7 +163,7 @@ F1 = F1*SAFMN2 G1 = G1*SAFMN2 SCALE = MAX( ABS( F1 ), ABS( G1 ) ) - IF( SCALE.GE.SAFMX2 ) + IF( SCALE.GE.SAFMX2 .AND. COUNT .LT. 20) $ GO TO 10 R = SQRT( F1**2+G1**2 ) CS = F1 / R diff --git a/lapack-netlib/SRC/dlartgp.f b/lapack-netlib/SRC/dlartgp.f index 0cb0d2d13..334e416e8 100644 --- a/lapack-netlib/SRC/dlartgp.f +++ b/lapack-netlib/SRC/dlartgp.f @@ -161,7 +161,7 @@ F1 = F1*SAFMN2 G1 = G1*SAFMN2 SCALE = MAX( ABS( F1 ), ABS( G1 ) ) - IF( SCALE.GE.SAFMX2 ) + IF( SCALE.GE.SAFMX2 .AND. COUNT .LT. 20 ) $ GO TO 10 R = SQRT( F1**2+G1**2 ) CS = F1 / R diff --git a/lapack-netlib/SRC/dlasd7.f b/lapack-netlib/SRC/dlasd7.f index e0ddedeb5..66f665cf8 100644 --- a/lapack-netlib/SRC/dlasd7.f +++ b/lapack-netlib/SRC/dlasd7.f @@ -400,7 +400,7 @@ VL( I ) = VLW( IDXI ) 50 CONTINUE * -* Calculate the allowable deflation tolerence +* Calculate the allowable deflation tolerance * EPS = DLAMCH( 'Epsilon' ) TOL = MAX( ABS( ALPHA ), ABS( BETA ) ) diff --git a/lapack-netlib/SRC/dlasr.f b/lapack-netlib/SRC/dlasr.f index 6059c6293..f707970e4 100644 --- a/lapack-netlib/SRC/dlasr.f +++ b/lapack-netlib/SRC/dlasr.f @@ -175,7 +175,7 @@ *> \verbatim *> A is DOUBLE PRECISION array, dimension (LDA,N) *> The M-by-N matrix A. On exit, A is overwritten by P*A if -*> SIDE = 'R' or by A*P**T if SIDE = 'L'. +*> SIDE = 'L' or by A*P**T if SIDE = 'R'. *> \endverbatim *> *> \param[in] LDA diff --git a/lapack-netlib/SRC/dlassq.f b/lapack-netlib/SRC/dlassq.f index 885395e3c..5922360f9 100644 --- a/lapack-netlib/SRC/dlassq.f +++ b/lapack-netlib/SRC/dlassq.f @@ -60,7 +60,7 @@ *> *> \param[in] X *> \verbatim -*> X is DOUBLE PRECISION array, dimension (N) +*> X is DOUBLE PRECISION array, dimension (1+(N-1)*INCX) *> The vector for which a scaled sum of squares is computed. *> x( i ) = X( 1 + ( i - 1 )*INCX ), 1 <= i <= n. *> \endverbatim diff --git a/lapack-netlib/SRC/dlaswlq.f b/lapack-netlib/SRC/dlaswlq.f index 6e4ca20fd..619a1f1a2 100644 --- a/lapack-netlib/SRC/dlaswlq.f +++ b/lapack-netlib/SRC/dlaswlq.f @@ -1,3 +1,4 @@ +*> \brief \b DLASWLQ * * Definition: * =========== @@ -18,9 +19,20 @@ *> *> \verbatim *> -*> DLASWLQ computes a blocked Short-Wide LQ factorization of a -*> M-by-N matrix A, where N >= M: -*> A = L * Q +*> DLASWLQ computes a blocked Tall-Skinny LQ factorization of +*> a real M-by-N matrix A for M <= N: +*> +*> A = ( L 0 ) * Q, +*> +*> where: +*> +*> Q is a n-by-N orthogonal matrix, stored on exit in an implicit +*> form in the elements above the digonal of the array A and in +*> the elemenst of the array T; +*> L is an lower-triangular M-by-M matrix stored on exit in +*> the elements on and below the diagonal of the array A. +*> 0 is a M-by-(N-M) zero matrix, if M < N, and is not stored. +*> *> \endverbatim * * Arguments: @@ -150,7 +162,7 @@ SUBROUTINE DLASWLQ( M, N, MB, NB, A, LDA, T, LDT, WORK, LWORK, $ INFO) * -* -- LAPACK computational routine (version 3.7.1) -- +* -- LAPACK computational routine (version 3.9.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd. -- * June 2017 diff --git a/lapack-netlib/SRC/dlasyf_aa.f b/lapack-netlib/SRC/dlasyf_aa.f index 6b75e46e0..793537e04 100644 --- a/lapack-netlib/SRC/dlasyf_aa.f +++ b/lapack-netlib/SRC/dlasyf_aa.f @@ -284,8 +284,9 @@ * * Swap A(I1, I2+1:M) with A(I2, I2+1:M) * - CALL DSWAP( M-I2, A( J1+I1-1, I2+1 ), LDA, - $ A( J1+I2-1, I2+1 ), LDA ) + IF( I2.LT.M ) + $ CALL DSWAP( M-I2, A( J1+I1-1, I2+1 ), LDA, + $ A( J1+I2-1, I2+1 ), LDA ) * * Swap A(I1, I1) with A(I2,I2) * @@ -325,13 +326,15 @@ * Compute L(J+2, J+1) = WORK( 3:M ) / T(J, J+1), * where A(J, J+1) = T(J, J+1) and A(J+2:M, J) = L(J+2:M, J+1) * - IF( A( K, J+1 ).NE.ZERO ) THEN - ALPHA = ONE / A( K, J+1 ) - CALL DCOPY( M-J-1, WORK( 3 ), 1, A( K, J+2 ), LDA ) - CALL DSCAL( M-J-1, ALPHA, A( K, J+2 ), LDA ) - ELSE - CALL DLASET( 'Full', 1, M-J-1, ZERO, ZERO, - $ A( K, J+2 ), LDA) + IF( J.LT.(M-1) ) THEN + IF( A( K, J+1 ).NE.ZERO ) THEN + ALPHA = ONE / A( K, J+1 ) + CALL DCOPY( M-J-1, WORK( 3 ), 1, A( K, J+2 ), LDA ) + CALL DSCAL( M-J-1, ALPHA, A( K, J+2 ), LDA ) + ELSE + CALL DLASET( 'Full', 1, M-J-1, ZERO, ZERO, + $ A( K, J+2 ), LDA) + END IF END IF END IF J = J + 1 @@ -432,8 +435,9 @@ * * Swap A(I2+1:M, I1) with A(I2+1:M, I2) * - CALL DSWAP( M-I2, A( I2+1, J1+I1-1 ), 1, - $ A( I2+1, J1+I2-1 ), 1 ) + IF( I2.LT.M ) + $ CALL DSWAP( M-I2, A( I2+1, J1+I1-1 ), 1, + $ A( I2+1, J1+I2-1 ), 1 ) * * Swap A(I1, I1) with A(I2, I2) * @@ -473,13 +477,15 @@ * Compute L(J+2, J+1) = WORK( 3:M ) / T(J, J+1), * where A(J, J+1) = T(J, J+1) and A(J+2:M, J) = L(J+2:M, J+1) * - IF( A( J+1, K ).NE.ZERO ) THEN - ALPHA = ONE / A( J+1, K ) - CALL DCOPY( M-J-1, WORK( 3 ), 1, A( J+2, K ), 1 ) - CALL DSCAL( M-J-1, ALPHA, A( J+2, K ), 1 ) - ELSE - CALL DLASET( 'Full', M-J-1, 1, ZERO, ZERO, - $ A( J+2, K ), LDA ) + IF( J.LT.(M-1) ) THEN + IF( A( J+1, K ).NE.ZERO ) THEN + ALPHA = ONE / A( J+1, K ) + CALL DCOPY( M-J-1, WORK( 3 ), 1, A( J+2, K ), 1 ) + CALL DSCAL( M-J-1, ALPHA, A( J+2, K ), 1 ) + ELSE + CALL DLASET( 'Full', M-J-1, 1, ZERO, ZERO, + $ A( J+2, K ), LDA ) + END IF END IF END IF J = J + 1 diff --git a/lapack-netlib/SRC/dlasyf_rk.f b/lapack-netlib/SRC/dlasyf_rk.f index 209b4c89d..d581eeedc 100644 --- a/lapack-netlib/SRC/dlasyf_rk.f +++ b/lapack-netlib/SRC/dlasyf_rk.f @@ -321,7 +321,7 @@ * of A and working backwards, and compute the matrix W = U12*D * for use in updating A11 * -* Initilize the first entry of array E, where superdiagonal +* Initialize the first entry of array E, where superdiagonal * elements of D are stored * E( 1 ) = ZERO @@ -649,7 +649,7 @@ * of A and working forwards, and compute the matrix W = L21*D * for use in updating A22 * -* Initilize the unused last entry of the subdiagonal array E. +* Initialize the unused last entry of the subdiagonal array E. * E( N ) = ZERO * diff --git a/lapack-netlib/SRC/dlasyf_rook.f b/lapack-netlib/SRC/dlasyf_rook.f index 49ee7a6c9..557032104 100644 --- a/lapack-netlib/SRC/dlasyf_rook.f +++ b/lapack-netlib/SRC/dlasyf_rook.f @@ -21,7 +21,7 @@ * SUBROUTINE DLASYF_ROOK( UPLO, N, NB, KB, A, LDA, IPIV, W, LDW, INFO ) * * .. Scalar Arguments .. -* CHARADLATER UPLO +* CHARACTER UPLO * INTEGER INFO, KB, LDA, LDW, N, NB * .. * .. Array Arguments .. diff --git a/lapack-netlib/SRC/dlatdf.f b/lapack-netlib/SRC/dlatdf.f index fd05059b3..8001e0830 100644 --- a/lapack-netlib/SRC/dlatdf.f +++ b/lapack-netlib/SRC/dlatdf.f @@ -85,7 +85,7 @@ *> RHS is DOUBLE PRECISION array, dimension (N) *> On entry, RHS contains contributions from other subsystems. *> On exit, RHS contains the solution of the subsystem with -*> entries acoording to the value of IJOB (see above). +*> entries according to the value of IJOB (see above). *> \endverbatim *> *> \param[in,out] RDSUM @@ -260,7 +260,7 @@ * * Solve for U-part, look-ahead for RHS(N) = +-1. This is not done * in BSOLVE and will hopefully give us a better estimate because -* any ill-conditioning of the original matrix is transfered to U +* any ill-conditioning of the original matrix is transferred to U * and not to L. U(N, N) is an approximation to sigma_min(LU). * CALL DCOPY( N-1, RHS, 1, XP, 1 ) diff --git a/lapack-netlib/SRC/dlatsqr.f b/lapack-netlib/SRC/dlatsqr.f index 1ce7c4de0..598d2938e 100644 --- a/lapack-netlib/SRC/dlatsqr.f +++ b/lapack-netlib/SRC/dlatsqr.f @@ -1,3 +1,4 @@ +*> \brief \b DLATSQR * * Definition: * =========== @@ -19,8 +20,22 @@ *> \verbatim *> *> DLATSQR computes a blocked Tall-Skinny QR factorization of -*> an M-by-N matrix A, where M >= N: -*> A = Q * R . +*> a real M-by-N matrix A for M >= N: +*> +*> A = Q * ( R ), +*> ( 0 ) +*> +*> where: +*> +*> Q is a M-by-M orthogonal matrix, stored on exit in an implicit +*> form in the elements below the digonal of the array A and in +*> the elemenst of the array T; +*> +*> R is an upper-triangular N-by-N matrix, stored on exit in +*> the elements on and above the diagonal of the array A. +*> +*> 0 is a (M-N)-by-N zero matrix, and is not stored. +*> *> \endverbatim * * Arguments: @@ -149,10 +164,10 @@ SUBROUTINE DLATSQR( M, N, MB, NB, A, LDA, T, LDT, WORK, $ LWORK, INFO) * -* -- LAPACK computational routine (version 3.7.0) -- +* -- LAPACK computational routine (version 3.9.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd. -- -* December 2016 +* November 2019 * * .. Scalar Arguments .. INTEGER INFO, LDA, M, N, MB, NB, LDT, LWORK diff --git a/lapack-netlib/SRC/dorgtsqr.f b/lapack-netlib/SRC/dorgtsqr.f new file mode 100644 index 000000000..85b05b6b5 --- /dev/null +++ b/lapack-netlib/SRC/dorgtsqr.f @@ -0,0 +1,306 @@ +*> \brief \b DORGTSQR +* +* =========== DOCUMENTATION =========== +* +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ +* +*> \htmlonly +*> Download DORGTSQR + dependencies +*> +*> [TGZ] +*> +*> [ZIP] +*> +*> [TXT] +*> +* Definition: +* =========== +* +* SUBROUTINE DORGTSQR( M, N, MB, NB, A, LDA, T, LDT, WORK, LWORK, +* $ INFO ) +* +* .. Scalar Arguments .. +* INTEGER INFO, LDA, LDT, LWORK, M, N, MB, NB +* .. +* .. Array Arguments .. +* DOUBLE PRECISION A( LDA, * ), T( LDT, * ), WORK( * ) +* .. +* +*> \par Purpose: +* ============= +*> +*> \verbatim +*> +*> DORGTSQR generates an M-by-N real matrix Q_out with orthonormal columns, +*> which are the first N columns of a product of real orthogonal +*> matrices of order M which are returned by DLATSQR +*> +*> Q_out = first_N_columns_of( Q(1)_in * Q(2)_in * ... * Q(k)_in ). +*> +*> See the documentation for DLATSQR. +*> \endverbatim +* +* Arguments: +* ========== +* +*> \param[in] M +*> \verbatim +*> M is INTEGER +*> The number of rows of the matrix A. M >= 0. +*> \endverbatim +*> +*> \param[in] N +*> \verbatim +*> N is INTEGER +*> The number of columns of the matrix A. M >= N >= 0. +*> \endverbatim +*> +*> \param[in] MB +*> \verbatim +*> MB is INTEGER +*> The row block size used by DLATSQR to return +*> arrays A and T. MB > N. +*> (Note that if MB > M, then M is used instead of MB +*> as the row block size). +*> \endverbatim +*> +*> \param[in] NB +*> \verbatim +*> NB is INTEGER +*> The column block size used by DLATSQR to return +*> arrays A and T. NB >= 1. +*> (Note that if NB > N, then N is used instead of NB +*> as the column block size). +*> \endverbatim +*> +*> \param[in,out] A +*> \verbatim +*> A is DOUBLE PRECISION array, dimension (LDA,N) +*> +*> On entry: +*> +*> The elements on and above the diagonal are not accessed. +*> The elements below the diagonal represent the unit +*> lower-trapezoidal blocked matrix V computed by DLATSQR +*> that defines the input matrices Q_in(k) (ones on the +*> diagonal are not stored) (same format as the output A +*> below the diagonal in DLATSQR). +*> +*> On exit: +*> +*> The array A contains an M-by-N orthonormal matrix Q_out, +*> i.e the columns of A are orthogonal unit vectors. +*> \endverbatim +*> +*> \param[in] LDA +*> \verbatim +*> LDA is INTEGER +*> The leading dimension of the array A. LDA >= max(1,M). +*> \endverbatim +*> +*> \param[in] T +*> \verbatim +*> T is DOUBLE PRECISION array, +*> dimension (LDT, N * NIRB) +*> where NIRB = Number_of_input_row_blocks +*> = MAX( 1, CEIL((M-N)/(MB-N)) ) +*> Let NICB = Number_of_input_col_blocks +*> = CEIL(N/NB) +*> +*> The upper-triangular block reflectors used to define the +*> input matrices Q_in(k), k=(1:NIRB*NICB). The block +*> reflectors are stored in compact form in NIRB block +*> reflector sequences. Each of NIRB block reflector sequences +*> is stored in a larger NB-by-N column block of T and consists +*> of NICB smaller NB-by-NB upper-triangular column blocks. +*> (same format as the output T in DLATSQR). +*> \endverbatim +*> +*> \param[in] LDT +*> \verbatim +*> LDT is INTEGER +*> The leading dimension of the array T. +*> LDT >= max(1,min(NB1,N)). +*> \endverbatim +*> +*> \param[out] WORK +*> \verbatim +*> (workspace) DOUBLE PRECISION array, dimension (MAX(2,LWORK)) +*> On exit, if INFO = 0, WORK(1) returns the optimal LWORK. +*> \endverbatim +*> +*> \param[in] LWORK +*> \verbatim +*> The dimension of the array WORK. LWORK >= (M+NB)*N. +*> If LWORK = -1, then a workspace query is assumed. +*> The routine only calculates the optimal size of the WORK +*> array, returns this value as the first entry of the WORK +*> array, and no error message related to LWORK is issued +*> by XERBLA. +*> \endverbatim +*> +*> \param[out] INFO +*> \verbatim +*> INFO is INTEGER +*> = 0: successful exit +*> < 0: if INFO = -i, the i-th argument had an illegal value +*> \endverbatim +*> +* Authors: +* ======== +* +*> \author Univ. of Tennessee +*> \author Univ. of California Berkeley +*> \author Univ. of Colorado Denver +*> \author NAG Ltd. +* +*> \date November 2019 +* +*> \ingroup doubleOTHERcomputational +* +*> \par Contributors: +* ================== +*> +*> \verbatim +*> +*> November 2019, Igor Kozachenko, +*> Computer Science Division, +*> University of California, Berkeley +*> +*> \endverbatim +* +* ===================================================================== + SUBROUTINE DORGTSQR( M, N, MB, NB, A, LDA, T, LDT, WORK, LWORK, + $ INFO ) + IMPLICIT NONE +* +* -- LAPACK computational routine (version 3.9.0) -- +* -- LAPACK is a software package provided by Univ. of Tennessee, -- +* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- +* November 2019 +* +* .. Scalar Arguments .. + INTEGER INFO, LDA, LDT, LWORK, M, N, MB, NB +* .. +* .. Array Arguments .. + DOUBLE PRECISION A( LDA, * ), T( LDT, * ), WORK( * ) +* .. +* +* ===================================================================== +* +* .. Parameters .. + DOUBLE PRECISION ONE, ZERO + PARAMETER ( ONE = 1.0D+0, ZERO = 0.0D+0 ) +* .. +* .. Local Scalars .. + LOGICAL LQUERY + INTEGER IINFO, LDC, LWORKOPT, LC, LW, NBLOCAL, J +* .. +* .. External Subroutines .. + EXTERNAL DCOPY, DLAMTSQR, DLASET, XERBLA +* .. +* .. Intrinsic Functions .. + INTRINSIC DBLE, MAX, MIN +* .. +* .. Executable Statements .. +* +* Test the input parameters +* + LQUERY = LWORK.EQ.-1 + INFO = 0 + IF( M.LT.0 ) THEN + INFO = -1 + ELSE IF( N.LT.0 .OR. M.LT.N ) THEN + INFO = -2 + ELSE IF( MB.LE.N ) THEN + INFO = -3 + ELSE IF( NB.LT.1 ) THEN + INFO = -4 + ELSE IF( LDA.LT.MAX( 1, M ) ) THEN + INFO = -6 + ELSE IF( LDT.LT.MAX( 1, MIN( NB, N ) ) ) THEN + INFO = -8 + ELSE +* +* Test the input LWORK for the dimension of the array WORK. +* This workspace is used to store array C(LDC, N) and WORK(LWORK) +* in the call to DLAMTSQR. See the documentation for DLAMTSQR. +* + IF( LWORK.LT.2 .AND. (.NOT.LQUERY) ) THEN + INFO = -10 + ELSE +* +* Set block size for column blocks +* + NBLOCAL = MIN( NB, N ) +* +* LWORK = -1, then set the size for the array C(LDC,N) +* in DLAMTSQR call and set the optimal size of the work array +* WORK(LWORK) in DLAMTSQR call. +* + LDC = M + LC = LDC*N + LW = N * NBLOCAL +* + LWORKOPT = LC+LW +* + IF( ( LWORK.LT.MAX( 1, LWORKOPT ) ).AND.(.NOT.LQUERY) ) THEN + INFO = -10 + END IF + END IF +* + END IF +* +* Handle error in the input parameters and return workspace query. +* + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'DORGTSQR', -INFO ) + RETURN + ELSE IF ( LQUERY ) THEN + WORK( 1 ) = DBLE( LWORKOPT ) + RETURN + END IF +* +* Quick return if possible +* + IF( MIN( M, N ).EQ.0 ) THEN + WORK( 1 ) = DBLE( LWORKOPT ) + RETURN + END IF +* +* (1) Form explicitly the tall-skinny M-by-N left submatrix Q1_in +* of M-by-M orthogonal matrix Q_in, which is implicitly stored in +* the subdiagonal part of input array A and in the input array T. +* Perform by the following operation using the routine DLAMTSQR. +* +* Q1_in = Q_in * ( I ), where I is a N-by-N identity matrix, +* ( 0 ) 0 is a (M-N)-by-N zero matrix. +* +* (1a) Form M-by-N matrix in the array WORK(1:LDC*N) with ones +* on the diagonal and zeros elsewhere. +* + CALL DLASET( 'F', M, N, ZERO, ONE, WORK, LDC ) +* +* (1b) On input, WORK(1:LDC*N) stores ( I ); +* ( 0 ) +* +* On output, WORK(1:LDC*N) stores Q1_in. +* + CALL DLAMTSQR( 'L', 'N', M, N, N, MB, NBLOCAL, A, LDA, T, LDT, + $ WORK, LDC, WORK( LC+1 ), LW, IINFO ) +* +* (2) Copy the result from the part of the work array (1:M,1:N) +* with the leading dimension LDC that starts at WORK(1) into +* the output array A(1:M,1:N) column-by-column. +* + DO J = 1, N + CALL DCOPY( M, WORK( (J-1)*LDC + 1 ), 1, A( 1, J ), 1 ) + END DO +* + WORK( 1 ) = DBLE( LWORKOPT ) + RETURN +* +* End of DORGTSQR +* + END \ No newline at end of file diff --git a/lapack-netlib/SRC/dorhr_col.f b/lapack-netlib/SRC/dorhr_col.f new file mode 100644 index 000000000..b5a65973d --- /dev/null +++ b/lapack-netlib/SRC/dorhr_col.f @@ -0,0 +1,440 @@ +*> \brief \b DORHR_COL +* +* =========== DOCUMENTATION =========== +* +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ +* +*> \htmlonly +*> Download DORHR_COL + dependencies +*> +*> [TGZ] +*> +*> [ZIP] +*> +*> [TXT] +*> +* Definition: +* =========== +* +* SUBROUTINE DORHR_COL( M, N, NB, A, LDA, T, LDT, D, INFO ) +* +* .. Scalar Arguments .. +* INTEGER INFO, LDA, LDT, M, N, NB +* .. +* .. Array Arguments .. +* DOUBLE PRECISION A( LDA, * ), D( * ), T( LDT, * ) +* .. +* +*> \par Purpose: +* ============= +*> +*> \verbatim +*> +*> DORHR_COL takes an M-by-N real matrix Q_in with orthonormal columns +*> as input, stored in A, and performs Householder Reconstruction (HR), +*> i.e. reconstructs Householder vectors V(i) implicitly representing +*> another M-by-N matrix Q_out, with the property that Q_in = Q_out*S, +*> where S is an N-by-N diagonal matrix with diagonal entries +*> equal to +1 or -1. The Householder vectors (columns V(i) of V) are +*> stored in A on output, and the diagonal entries of S are stored in D. +*> Block reflectors are also returned in T +*> (same output format as DGEQRT). +*> \endverbatim +* +* Arguments: +* ========== +* +*> \param[in] M +*> \verbatim +*> M is INTEGER +*> The number of rows of the matrix A. M >= 0. +*> \endverbatim +*> +*> \param[in] N +*> \verbatim +*> N is INTEGER +*> The number of columns of the matrix A. M >= N >= 0. +*> \endverbatim +*> +*> \param[in] NB +*> \verbatim +*> NB is INTEGER +*> The column block size to be used in the reconstruction +*> of Householder column vector blocks in the array A and +*> corresponding block reflectors in the array T. NB >= 1. +*> (Note that if NB > N, then N is used instead of NB +*> as the column block size.) +*> \endverbatim +*> +*> \param[in,out] A +*> \verbatim +*> A is DOUBLE PRECISION array, dimension (LDA,N) +*> +*> On entry: +*> +*> The array A contains an M-by-N orthonormal matrix Q_in, +*> i.e the columns of A are orthogonal unit vectors. +*> +*> On exit: +*> +*> The elements below the diagonal of A represent the unit +*> lower-trapezoidal matrix V of Householder column vectors +*> V(i). The unit diagonal entries of V are not stored +*> (same format as the output below the diagonal in A from +*> DGEQRT). The matrix T and the matrix V stored on output +*> in A implicitly define Q_out. +*> +*> The elements above the diagonal contain the factor U +*> of the "modified" LU-decomposition: +*> Q_in - ( S ) = V * U +*> ( 0 ) +*> where 0 is a (M-N)-by-(M-N) zero matrix. +*> \endverbatim +*> +*> \param[in] LDA +*> \verbatim +*> LDA is INTEGER +*> The leading dimension of the array A. LDA >= max(1,M). +*> \endverbatim +*> +*> \param[out] T +*> \verbatim +*> T is DOUBLE PRECISION array, +*> dimension (LDT, N) +*> +*> Let NOCB = Number_of_output_col_blocks +*> = CEIL(N/NB) +*> +*> On exit, T(1:NB, 1:N) contains NOCB upper-triangular +*> block reflectors used to define Q_out stored in compact +*> form as a sequence of upper-triangular NB-by-NB column +*> blocks (same format as the output T in DGEQRT). +*> The matrix T and the matrix V stored on output in A +*> implicitly define Q_out. NOTE: The lower triangles +*> below the upper-triangular blcoks will be filled with +*> zeros. See Further Details. +*> \endverbatim +*> +*> \param[in] LDT +*> \verbatim +*> LDT is INTEGER +*> The leading dimension of the array T. +*> LDT >= max(1,min(NB,N)). +*> \endverbatim +*> +*> \param[out] D +*> \verbatim +*> D is DOUBLE PRECISION array, dimension min(M,N). +*> The elements can be only plus or minus one. +*> +*> D(i) is constructed as D(i) = -SIGN(Q_in_i(i,i)), where +*> 1 <= i <= min(M,N), and Q_in_i is Q_in after performing +*> i-1 steps of “modified” Gaussian elimination. +*> See Further Details. +*> \endverbatim +*> +*> \param[out] INFO +*> \verbatim +*> INFO is INTEGER +*> = 0: successful exit +*> < 0: if INFO = -i, the i-th argument had an illegal value +*> \endverbatim +*> +*> \par Further Details: +* ===================== +*> +*> \verbatim +*> +*> The computed M-by-M orthogonal factor Q_out is defined implicitly as +*> a product of orthogonal matrices Q_out(i). Each Q_out(i) is stored in +*> the compact WY-representation format in the corresponding blocks of +*> matrices V (stored in A) and T. +*> +*> The M-by-N unit lower-trapezoidal matrix V stored in the M-by-N +*> matrix A contains the column vectors V(i) in NB-size column +*> blocks VB(j). For example, VB(1) contains the columns +*> V(1), V(2), ... V(NB). NOTE: The unit entries on +*> the diagonal of Y are not stored in A. +*> +*> The number of column blocks is +*> +*> NOCB = Number_of_output_col_blocks = CEIL(N/NB) +*> +*> where each block is of order NB except for the last block, which +*> is of order LAST_NB = N - (NOCB-1)*NB. +*> +*> For example, if M=6, N=5 and NB=2, the matrix V is +*> +*> +*> V = ( VB(1), VB(2), VB(3) ) = +*> +*> = ( 1 ) +*> ( v21 1 ) +*> ( v31 v32 1 ) +*> ( v41 v42 v43 1 ) +*> ( v51 v52 v53 v54 1 ) +*> ( v61 v62 v63 v54 v65 ) +*> +*> +*> For each of the column blocks VB(i), an upper-triangular block +*> reflector TB(i) is computed. These blocks are stored as +*> a sequence of upper-triangular column blocks in the NB-by-N +*> matrix T. The size of each TB(i) block is NB-by-NB, except +*> for the last block, whose size is LAST_NB-by-LAST_NB. +*> +*> For example, if M=6, N=5 and NB=2, the matrix T is +*> +*> T = ( TB(1), TB(2), TB(3) ) = +*> +*> = ( t11 t12 t13 t14 t15 ) +*> ( t22 t24 ) +*> +*> +*> The M-by-M factor Q_out is given as a product of NOCB +*> orthogonal M-by-M matrices Q_out(i). +*> +*> Q_out = Q_out(1) * Q_out(2) * ... * Q_out(NOCB), +*> +*> where each matrix Q_out(i) is given by the WY-representation +*> using corresponding blocks from the matrices V and T: +*> +*> Q_out(i) = I - VB(i) * TB(i) * (VB(i))**T, +*> +*> where I is the identity matrix. Here is the formula with matrix +*> dimensions: +*> +*> Q(i){M-by-M} = I{M-by-M} - +*> VB(i){M-by-INB} * TB(i){INB-by-INB} * (VB(i))**T {INB-by-M}, +*> +*> where INB = NB, except for the last block NOCB +*> for which INB=LAST_NB. +*> +*> ===== +*> NOTE: +*> ===== +*> +*> If Q_in is the result of doing a QR factorization +*> B = Q_in * R_in, then: +*> +*> B = (Q_out*S) * R_in = Q_out * (S * R_in) = O_out * R_out. +*> +*> So if one wants to interpret Q_out as the result +*> of the QR factorization of B, then corresponding R_out +*> should be obtained by R_out = S * R_in, i.e. some rows of R_in +*> should be multiplied by -1. +*> +*> For the details of the algorithm, see [1]. +*> +*> [1] "Reconstructing Householder vectors from tall-skinny QR", +*> G. Ballard, J. Demmel, L. Grigori, M. Jacquelin, H.D. Nguyen, +*> E. Solomonik, J. Parallel Distrib. Comput., +*> vol. 85, pp. 3-31, 2015. +*> \endverbatim +*> +* Authors: +* ======== +* +*> \author Univ. of Tennessee +*> \author Univ. of California Berkeley +*> \author Univ. of Colorado Denver +*> \author NAG Ltd. +* +*> \date November 2019 +* +*> \ingroup doubleOTHERcomputational +* +*> \par Contributors: +* ================== +*> +*> \verbatim +*> +*> November 2019, Igor Kozachenko, +*> Computer Science Division, +*> University of California, Berkeley +*> +*> \endverbatim +* +* ===================================================================== + SUBROUTINE DORHR_COL( M, N, NB, A, LDA, T, LDT, D, INFO ) + IMPLICIT NONE +* +* -- LAPACK computational routine (version 3.9.0) -- +* -- LAPACK is a software package provided by Univ. of Tennessee, -- +* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- +* November 2019 +* +* .. Scalar Arguments .. + INTEGER INFO, LDA, LDT, M, N, NB +* .. +* .. Array Arguments .. + DOUBLE PRECISION A( LDA, * ), D( * ), T( LDT, * ) +* .. +* +* ===================================================================== +* +* .. Parameters .. + DOUBLE PRECISION ONE, ZERO + PARAMETER ( ONE = 1.0D+0, ZERO = 0.0D+0 ) +* .. +* .. Local Scalars .. + INTEGER I, IINFO, J, JB, JBTEMP1, JBTEMP2, JNB, + $ NPLUSONE +* .. +* .. External Subroutines .. + EXTERNAL DCOPY, DLAORHR_COL_GETRFNP, DSCAL, DTRSM, + $ XERBLA +* .. +* .. Intrinsic Functions .. + INTRINSIC MAX, MIN +* .. +* .. Executable Statements .. +* +* Test the input parameters +* + INFO = 0 + IF( M.LT.0 ) THEN + INFO = -1 + ELSE IF( N.LT.0 .OR. N.GT.M ) THEN + INFO = -2 + ELSE IF( NB.LT.1 ) THEN + INFO = -3 + ELSE IF( LDA.LT.MAX( 1, M ) ) THEN + INFO = -5 + ELSE IF( LDT.LT.MAX( 1, MIN( NB, N ) ) ) THEN + INFO = -7 + END IF +* +* Handle error in the input parameters. +* + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'DORHR_COL', -INFO ) + RETURN + END IF +* +* Quick return if possible +* + IF( MIN( M, N ).EQ.0 ) THEN + RETURN + END IF +* +* On input, the M-by-N matrix A contains the orthogonal +* M-by-N matrix Q_in. +* +* (1) Compute the unit lower-trapezoidal V (ones on the diagonal +* are not stored) by performing the "modified" LU-decomposition. +* +* Q_in - ( S ) = V * U = ( V1 ) * U, +* ( 0 ) ( V2 ) +* +* where 0 is an (M-N)-by-N zero matrix. +* +* (1-1) Factor V1 and U. + + CALL DLAORHR_COL_GETRFNP( N, N, A, LDA, D, IINFO ) +* +* (1-2) Solve for V2. +* + IF( M.GT.N ) THEN + CALL DTRSM( 'R', 'U', 'N', 'N', M-N, N, ONE, A, LDA, + $ A( N+1, 1 ), LDA ) + END IF +* +* (2) Reconstruct the block reflector T stored in T(1:NB, 1:N) +* as a sequence of upper-triangular blocks with NB-size column +* blocking. +* +* Loop over the column blocks of size NB of the array A(1:M,1:N) +* and the array T(1:NB,1:N), JB is the column index of a column +* block, JNB is the column block size at each step JB. +* + NPLUSONE = N + 1 + DO JB = 1, N, NB +* +* (2-0) Determine the column block size JNB. +* + JNB = MIN( NPLUSONE-JB, NB ) +* +* (2-1) Copy the upper-triangular part of the current JNB-by-JNB +* diagonal block U(JB) (of the N-by-N matrix U) stored +* in A(JB:JB+JNB-1,JB:JB+JNB-1) into the upper-triangular part +* of the current JNB-by-JNB block T(1:JNB,JB:JB+JNB-1) +* column-by-column, total JNB*(JNB+1)/2 elements. +* + JBTEMP1 = JB - 1 + DO J = JB, JB+JNB-1 + CALL DCOPY( J-JBTEMP1, A( JB, J ), 1, T( 1, J ), 1 ) + END DO +* +* (2-2) Perform on the upper-triangular part of the current +* JNB-by-JNB diagonal block U(JB) (of the N-by-N matrix U) stored +* in T(1:JNB,JB:JB+JNB-1) the following operation in place: +* (-1)*U(JB)*S(JB), i.e the result will be stored in the upper- +* triangular part of T(1:JNB,JB:JB+JNB-1). This multiplication +* of the JNB-by-JNB diagonal block U(JB) by the JNB-by-JNB +* diagonal block S(JB) of the N-by-N sign matrix S from the +* right means changing the sign of each J-th column of the block +* U(JB) according to the sign of the diagonal element of the block +* S(JB), i.e. S(J,J) that is stored in the array element D(J). +* + DO J = JB, JB+JNB-1 + IF( D( J ).EQ.ONE ) THEN + CALL DSCAL( J-JBTEMP1, -ONE, T( 1, J ), 1 ) + END IF + END DO +* +* (2-3) Perform the triangular solve for the current block +* matrix X(JB): +* +* X(JB) * (A(JB)**T) = B(JB), where: +* +* A(JB)**T is a JNB-by-JNB unit upper-triangular +* coefficient block, and A(JB)=V1(JB), which +* is a JNB-by-JNB unit lower-triangular block +* stored in A(JB:JB+JNB-1,JB:JB+JNB-1). +* The N-by-N matrix V1 is the upper part +* of the M-by-N lower-trapezoidal matrix V +* stored in A(1:M,1:N); +* +* B(JB) is a JNB-by-JNB upper-triangular right-hand +* side block, B(JB) = (-1)*U(JB)*S(JB), and +* B(JB) is stored in T(1:JNB,JB:JB+JNB-1); +* +* X(JB) is a JNB-by-JNB upper-triangular solution +* block, X(JB) is the upper-triangular block +* reflector T(JB), and X(JB) is stored +* in T(1:JNB,JB:JB+JNB-1). +* +* In other words, we perform the triangular solve for the +* upper-triangular block T(JB): +* +* T(JB) * (V1(JB)**T) = (-1)*U(JB)*S(JB). +* +* Even though the blocks X(JB) and B(JB) are upper- +* triangular, the routine DTRSM will access all JNB**2 +* elements of the square T(1:JNB,JB:JB+JNB-1). Therefore, +* we need to set to zero the elements of the block +* T(1:JNB,JB:JB+JNB-1) below the diagonal before the call +* to DTRSM. +* +* (2-3a) Set the elements to zero. +* + JBTEMP2 = JB - 2 + DO J = JB, JB+JNB-2 + DO I = J-JBTEMP2, NB + T( I, J ) = ZERO + END DO + END DO +* +* (2-3b) Perform the triangular solve. +* + CALL DTRSM( 'R', 'L', 'T', 'U', JNB, JNB, ONE, + $ A( JB, JB ), LDA, T( 1, JB ), LDT ) +* + END DO +* + RETURN +* +* End of DORHR_COL +* + END \ No newline at end of file diff --git a/lapack-netlib/SRC/dporfsx.f b/lapack-netlib/SRC/dporfsx.f index 53724925e..67cca9ccf 100644 --- a/lapack-netlib/SRC/dporfsx.f +++ b/lapack-netlib/SRC/dporfsx.f @@ -135,7 +135,7 @@ *> \param[in,out] S *> \verbatim *> S is DOUBLE PRECISION array, dimension (N) -*> The row scale factors for A. If EQUED = 'Y', A is multiplied on +*> The scale factors for A. If EQUED = 'Y', A is multiplied on *> the left and right by diag(S). S is an input argument if FACT = *> 'F'; otherwise, S is an output argument. If FACT = 'F' and EQUED *> = 'Y', each element of S must be positive. If S is output, each @@ -263,7 +263,7 @@ *> information as described below. There currently are up to three *> pieces of information returned for each right-hand side. If *> componentwise accuracy is not requested (PARAMS(3) = 0.0), then -*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS .LT. 3, then at most +*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS < 3, then at most *> the first (:,N_ERR_BNDS) entries are returned. *> *> The first index in ERR_BNDS_COMP(i,:) corresponds to the ith @@ -299,14 +299,14 @@ *> \param[in] NPARAMS *> \verbatim *> NPARAMS is INTEGER -*> Specifies the number of parameters set in PARAMS. If .LE. 0, the +*> Specifies the number of parameters set in PARAMS. If <= 0, the *> PARAMS array is never referenced and default values are used. *> \endverbatim *> *> \param[in,out] PARAMS *> \verbatim *> PARAMS is DOUBLE PRECISION array, dimension (NPARAMS) -*> Specifies algorithm parameters. If an entry is .LT. 0.0, then +*> Specifies algorithm parameters. If an entry is < 0.0, then *> that entry will be filled with default value used for that *> parameter. Only positions up to NPARAMS are accessed; defaults *> are used for higher-numbered parameters. @@ -314,9 +314,9 @@ *> PARAMS(LA_LINRX_ITREF_I = 1) : Whether to perform iterative *> refinement or not. *> Default: 1.0D+0 -*> = 0.0 : No refinement is performed, and no error bounds are +*> = 0.0: No refinement is performed, and no error bounds are *> computed. -*> = 1.0 : Use the double-precision refinement algorithm, +*> = 1.0: Use the double-precision refinement algorithm, *> possibly with doubled-single computations if the *> compilation environment does not support DOUBLE *> PRECISION. diff --git a/lapack-netlib/SRC/dposvxx.f b/lapack-netlib/SRC/dposvxx.f index 488e0b15a..b0de44910 100644 --- a/lapack-netlib/SRC/dposvxx.f +++ b/lapack-netlib/SRC/dposvxx.f @@ -366,7 +366,7 @@ *> information as described below. There currently are up to three *> pieces of information returned for each right-hand side. If *> componentwise accuracy is not requested (PARAMS(3) = 0.0), then -*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS .LT. 3, then at most +*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS < 3, then at most *> the first (:,N_ERR_BNDS) entries are returned. *> *> The first index in ERR_BNDS_COMP(i,:) corresponds to the ith @@ -402,14 +402,14 @@ *> \param[in] NPARAMS *> \verbatim *> NPARAMS is INTEGER -*> Specifies the number of parameters set in PARAMS. If .LE. 0, the +*> Specifies the number of parameters set in PARAMS. If <= 0, the *> PARAMS array is never referenced and default values are used. *> \endverbatim *> *> \param[in,out] PARAMS *> \verbatim *> PARAMS is DOUBLE PRECISION array, dimension NPARAMS -*> Specifies algorithm parameters. If an entry is .LT. 0.0, then +*> Specifies algorithm parameters. If an entry is < 0.0, then *> that entry will be filled with default value used for that *> parameter. Only positions up to NPARAMS are accessed; defaults *> are used for higher-numbered parameters. @@ -417,9 +417,9 @@ *> PARAMS(LA_LINRX_ITREF_I = 1) : Whether to perform iterative *> refinement or not. *> Default: 1.0D+0 -*> = 0.0 : No refinement is performed, and no error bounds are +*> = 0.0: No refinement is performed, and no error bounds are *> computed. -*> = 1.0 : Use the extra-precise refinement algorithm. +*> = 1.0: Use the extra-precise refinement algorithm. *> (other values are reserved for future use) *> *> PARAMS(LA_LINRX_ITHRESH_I = 2) : Maximum number of residual diff --git a/lapack-netlib/SRC/dsb2st_kernels.f b/lapack-netlib/SRC/dsb2st_kernels.f index 3bf126d5b..a9dc6b5ca 100644 --- a/lapack-netlib/SRC/dsb2st_kernels.f +++ b/lapack-netlib/SRC/dsb2st_kernels.f @@ -1,26 +1,26 @@ *> \brief \b DSB2ST_KERNELS * * @generated from zhb2st_kernels.f, fortran z -> d, Wed Dec 7 08:22:39 2016 -* +* * =========== DOCUMENTATION =========== * -* Online html documentation available at -* http://www.netlib.org/lapack/explore-html/ +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ * *> \htmlonly -*> Download DSB2ST_KERNELS + dependencies -*> -*> [TGZ] -*> -*> [ZIP] -*> +*> Download DSB2ST_KERNELS + dependencies +*> +*> [TGZ] +*> +*> [ZIP] +*> *> [TXT] -*> \endhtmlonly +*> \endhtmlonly * * Definition: * =========== * -* SUBROUTINE DSB2ST_KERNELS( UPLO, WANTZ, TTYPE, +* SUBROUTINE DSB2ST_KERNELS( UPLO, WANTZ, TTYPE, * ST, ED, SWEEP, N, NB, IB, * A, LDA, V, TAU, LDVT, WORK) * @@ -32,9 +32,9 @@ * INTEGER TTYPE, ST, ED, SWEEP, N, NB, IB, LDA, LDVT * .. * .. Array Arguments .. -* DOUBLE PRECISION A( LDA, * ), V( * ), +* DOUBLE PRECISION A( LDA, * ), V( * ), * TAU( * ), WORK( * ) -* +* *> \par Purpose: * ============= *> @@ -124,7 +124,7 @@ *> LDVT is INTEGER. *> \endverbatim *> -*> \param[in] WORK +*> \param[out] WORK *> \verbatim *> WORK is DOUBLE PRECISION array. Workspace of size nb. *> \endverbatim @@ -147,7 +147,7 @@ *> http://doi.acm.org/10.1145/2063384.2063394 *> *> A. Haidar, J. Kurzak, P. Luszczek, 2013. -*> An improved parallel singular value algorithm and its implementation +*> An improved parallel singular value algorithm and its implementation *> for multicore hardware, In Proceedings of 2013 International Conference *> for High Performance Computing, Networking, Storage and Analysis (SC '13). *> Denver, Colorado, USA, 2013. @@ -155,16 +155,16 @@ *> http://doi.acm.org/10.1145/2503210.2503292 *> *> A. Haidar, R. Solca, S. Tomov, T. Schulthess and J. Dongarra. -*> A novel hybrid CPU-GPU generalized eigensolver for electronic structure +*> A novel hybrid CPU-GPU generalized eigensolver for electronic structure *> calculations based on fine-grained memory aware tasks. *> International Journal of High Performance Computing Applications. *> Volume 28 Issue 2, Pages 196-209, May 2014. -*> http://hpc.sagepub.com/content/28/2/196 +*> http://hpc.sagepub.com/content/28/2/196 *> *> \endverbatim *> * ===================================================================== - SUBROUTINE DSB2ST_KERNELS( UPLO, WANTZ, TTYPE, + SUBROUTINE DSB2ST_KERNELS( UPLO, WANTZ, TTYPE, $ ST, ED, SWEEP, N, NB, IB, $ A, LDA, V, TAU, LDVT, WORK) * @@ -181,7 +181,7 @@ INTEGER TTYPE, ST, ED, SWEEP, N, NB, IB, LDA, LDVT * .. * .. Array Arguments .. - DOUBLE PRECISION A( LDA, * ), V( * ), + DOUBLE PRECISION A( LDA, * ), V( * ), $ TAU( * ), WORK( * ) * .. * @@ -195,8 +195,8 @@ * .. Local Scalars .. LOGICAL UPPER INTEGER I, J1, J2, LM, LN, VPOS, TAUPOS, - $ DPOS, OFDPOS, AJETER - DOUBLE PRECISION CTMP + $ DPOS, OFDPOS, AJETER + DOUBLE PRECISION CTMP * .. * .. External Subroutines .. EXTERNAL DLARFG, DLARFX, DLARFY @@ -209,7 +209,7 @@ * .. * .. * .. Executable Statements .. -* +* AJETER = IB + LDVT UPPER = LSAME( UPLO, 'U' ) @@ -240,10 +240,10 @@ V( VPOS ) = ONE DO 10 I = 1, LM-1 V( VPOS+I ) = ( A( OFDPOS-I, ST+I ) ) - A( OFDPOS-I, ST+I ) = ZERO + A( OFDPOS-I, ST+I ) = ZERO 10 CONTINUE CTMP = ( A( OFDPOS, ST ) ) - CALL DLARFG( LM, CTMP, V( VPOS+1 ), 1, + CALL DLARFG( LM, CTMP, V( VPOS+1 ), 1, $ TAU( TAUPOS ) ) A( OFDPOS, ST ) = CTMP * @@ -281,14 +281,14 @@ * V( VPOS ) = ONE DO 30 I = 1, LM-1 - V( VPOS+I ) = + V( VPOS+I ) = $ ( A( DPOS-NB-I, J1+I ) ) A( DPOS-NB-I, J1+I ) = ZERO 30 CONTINUE CTMP = ( A( DPOS-NB, J1 ) ) CALL DLARFG( LM, CTMP, V( VPOS+1 ), 1, TAU( TAUPOS ) ) A( DPOS-NB, J1 ) = CTMP -* +* CALL DLARFX( 'Right', LN-1, LM, V( VPOS ), $ TAU( TAUPOS ), $ A( DPOS-NB+1, J1 ), LDA-1, WORK) @@ -296,9 +296,9 @@ ENDIF * * Lower case -* +* ELSE -* +* IF( WANTZ ) THEN VPOS = MOD( SWEEP-1, 2 ) * N + ST TAUPOS = MOD( SWEEP-1, 2 ) * N + ST @@ -313,9 +313,9 @@ V( VPOS ) = ONE DO 20 I = 1, LM-1 V( VPOS+I ) = A( OFDPOS+I, ST-1 ) - A( OFDPOS+I, ST-1 ) = ZERO + A( OFDPOS+I, ST-1 ) = ZERO 20 CONTINUE - CALL DLARFG( LM, A( OFDPOS, ST-1 ), V( VPOS+1 ), 1, + CALL DLARFG( LM, A( OFDPOS, ST-1 ), V( VPOS+1 ), 1, $ TAU( TAUPOS ) ) * LM = ED - ST + 1 @@ -342,7 +342,7 @@ LM = J2-J1+1 * IF( LM.GT.0) THEN - CALL DLARFX( 'Right', LM, LN, V( VPOS ), + CALL DLARFX( 'Right', LM, LN, V( VPOS ), $ TAU( TAUPOS ), A( DPOS+NB, ST ), $ LDA-1, WORK) * @@ -359,13 +359,13 @@ V( VPOS+I ) = A( DPOS+NB+I, ST ) A( DPOS+NB+I, ST ) = ZERO 40 CONTINUE - CALL DLARFG( LM, A( DPOS+NB, ST ), V( VPOS+1 ), 1, + CALL DLARFG( LM, A( DPOS+NB, ST ), V( VPOS+1 ), 1, $ TAU( TAUPOS ) ) * - CALL DLARFX( 'Left', LM, LN-1, V( VPOS ), + CALL DLARFX( 'Left', LM, LN-1, V( VPOS ), $ ( TAU( TAUPOS ) ), $ A( DPOS+NB-1, ST+1 ), LDA-1, WORK) - + ENDIF ENDIF ENDIF @@ -374,4 +374,4 @@ * * END OF DSB2ST_KERNELS * - END + END diff --git a/lapack-netlib/SRC/dsbgvx.f b/lapack-netlib/SRC/dsbgvx.f index eab5ebcbb..6de1eb89b 100644 --- a/lapack-netlib/SRC/dsbgvx.f +++ b/lapack-netlib/SRC/dsbgvx.f @@ -261,11 +261,11 @@ *> \param[out] INFO *> \verbatim *> INFO is INTEGER -*> = 0 : successful exit -*> < 0 : if INFO = -i, the i-th argument had an illegal value +*> = 0: successful exit +*> < 0: if INFO = -i, the i-th argument had an illegal value *> <= N: if INFO = i, then i eigenvectors failed to converge. *> Their indices are stored in IFAIL. -*> > N : DPBSTF returned an error code; i.e., +*> > N: DPBSTF returned an error code; i.e., *> if INFO = N + i, for 1 <= i <= N, then the leading *> minor of order i of B is not positive definite. *> The factorization of B could not be completed and diff --git a/lapack-netlib/SRC/dsgesv.f b/lapack-netlib/SRC/dsgesv.f index f47327d00..edbb87e7a 100644 --- a/lapack-netlib/SRC/dsgesv.f +++ b/lapack-netlib/SRC/dsgesv.f @@ -92,9 +92,9 @@ *> dimension (LDA,N) *> On entry, the N-by-N coefficient matrix A. *> On exit, if iterative refinement has been successfully used -*> (INFO.EQ.0 and ITER.GE.0, see description below), then A is +*> (INFO = 0 and ITER >= 0, see description below), then A is *> unchanged, if double precision factorization has been used -*> (INFO.EQ.0 and ITER.LT.0, see description below), then the +*> (INFO = 0 and ITER < 0, see description below), then the *> array A contains the factors L and U from the factorization *> A = P*L*U; the unit diagonal elements of L are not stored. *> \endverbatim @@ -111,8 +111,8 @@ *> The pivot indices that define the permutation matrix P; *> row i of the matrix was interchanged with row IPIV(i). *> Corresponds either to the single precision factorization -*> (if INFO.EQ.0 and ITER.GE.0) or the double precision -*> factorization (if INFO.EQ.0 and ITER.LT.0). +*> (if INFO = 0 and ITER >= 0) or the double precision +*> factorization (if INFO = 0 and ITER < 0). *> \endverbatim *> *> \param[in] B @@ -406,7 +406,7 @@ 30 CONTINUE * * If we are at this place of the code, this is because we have -* performed ITER=ITERMAX iterations and never satisified the +* performed ITER=ITERMAX iterations and never satisfied the * stopping criterion, set up the ITER flag accordingly and follow up * on double precision routine. * diff --git a/lapack-netlib/SRC/dsposv.f b/lapack-netlib/SRC/dsposv.f index 4a8575241..6c8baa56b 100644 --- a/lapack-netlib/SRC/dsposv.f +++ b/lapack-netlib/SRC/dsposv.f @@ -106,9 +106,9 @@ *> triangular part of the matrix A, and the strictly upper *> triangular part of A is not referenced. *> On exit, if iterative refinement has been successfully used -*> (INFO.EQ.0 and ITER.GE.0, see description below), then A is +*> (INFO = 0 and ITER >= 0, see description below), then A is *> unchanged, if double precision factorization has been used -*> (INFO.EQ.0 and ITER.LT.0, see description below), then the +*> (INFO = 0 and ITER < 0, see description below), then the *> array A contains the factor U or L from the Cholesky *> factorization A = U**T*U or A = L*L**T. *> \endverbatim @@ -413,7 +413,7 @@ 30 CONTINUE * * If we are at this place of the code, this is because we have -* performed ITER=ITERMAX iterations and never satisified the +* performed ITER=ITERMAX iterations and never satisfied the * stopping criterion, set up the ITER flag accordingly and follow * up on double precision routine. * diff --git a/lapack-netlib/SRC/dstemr.f b/lapack-netlib/SRC/dstemr.f index a1a8e3433..16c9d970d 100644 --- a/lapack-netlib/SRC/dstemr.f +++ b/lapack-netlib/SRC/dstemr.f @@ -233,13 +233,13 @@ *> \param[in,out] TRYRAC *> \verbatim *> TRYRAC is LOGICAL -*> If TRYRAC.EQ..TRUE., indicates that the code should check whether +*> If TRYRAC = .TRUE., indicates that the code should check whether *> the tridiagonal matrix defines its eigenvalues to high relative *> accuracy. If so, the code uses relative-accuracy preserving *> algorithms that might be (a bit) slower depending on the matrix. *> If the matrix does not define its eigenvalues to high relative *> accuracy, the code can uses possibly faster algorithms. -*> If TRYRAC.EQ..FALSE., the code is not required to guarantee +*> If TRYRAC = .FALSE., the code is not required to guarantee *> relatively accurate eigenvalues and can use the fastest possible *> techniques. *> On exit, a .TRUE. TRYRAC will be set to .FALSE. if the matrix diff --git a/lapack-netlib/SRC/dsyconvf.f b/lapack-netlib/SRC/dsyconvf.f index 37c8157ba..60cfd1e65 100644 --- a/lapack-netlib/SRC/dsyconvf.f +++ b/lapack-netlib/SRC/dsyconvf.f @@ -291,7 +291,7 @@ * * Convert PERMUTATIONS and IPIV * -* Apply permutaions to submatrices of upper part of A +* Apply permutations to submatrices of upper part of A * in factorization order where i decreases from N to 1 * I = N @@ -344,7 +344,7 @@ * * Revert PERMUTATIONS and IPIV * -* Apply permutaions to submatrices of upper part of A +* Apply permutations to submatrices of upper part of A * in reverse factorization order where i increases from 1 to N * I = 1 @@ -435,7 +435,7 @@ * * Convert PERMUTATIONS and IPIV * -* Apply permutaions to submatrices of lower part of A +* Apply permutations to submatrices of lower part of A * in factorization order where k increases from 1 to N * I = 1 @@ -488,7 +488,7 @@ * * Revert PERMUTATIONS and IPIV * -* Apply permutaions to submatrices of lower part of A +* Apply permutations to submatrices of lower part of A * in reverse factorization order where i decreases from N to 1 * I = N diff --git a/lapack-netlib/SRC/dsyconvf_rook.f b/lapack-netlib/SRC/dsyconvf_rook.f index 5c774906e..bd683a087 100644 --- a/lapack-netlib/SRC/dsyconvf_rook.f +++ b/lapack-netlib/SRC/dsyconvf_rook.f @@ -282,7 +282,7 @@ * * Convert PERMUTATIONS * -* Apply permutaions to submatrices of upper part of A +* Apply permutations to submatrices of upper part of A * in factorization order where i decreases from N to 1 * I = N @@ -333,7 +333,7 @@ * * Revert PERMUTATIONS * -* Apply permutaions to submatrices of upper part of A +* Apply permutations to submatrices of upper part of A * in reverse factorization order where i increases from 1 to N * I = 1 @@ -423,7 +423,7 @@ * * Convert PERMUTATIONS * -* Apply permutaions to submatrices of lower part of A +* Apply permutations to submatrices of lower part of A * in factorization order where i increases from 1 to N * I = 1 @@ -474,7 +474,7 @@ * * Revert PERMUTATIONS * -* Apply permutaions to submatrices of lower part of A +* Apply permutations to submatrices of lower part of A * in reverse factorization order where i decreases from N to 1 * I = N diff --git a/lapack-netlib/SRC/dsyev_2stage.f b/lapack-netlib/SRC/dsyev_2stage.f index fff0dedbc..9d802905c 100644 --- a/lapack-netlib/SRC/dsyev_2stage.f +++ b/lapack-netlib/SRC/dsyev_2stage.f @@ -317,7 +317,7 @@ IF( .NOT.WANTZ ) THEN CALL DSTERF( N, W, WORK( INDE ), INFO ) ELSE -* Not available in this release, and agrument checking should not +* Not available in this release, and argument checking should not * let it getting here RETURN CALL DORGTR( UPLO, N, A, LDA, WORK( INDTAU ), WORK( INDWRK ), diff --git a/lapack-netlib/SRC/dsyevd_2stage.f b/lapack-netlib/SRC/dsyevd_2stage.f index 75a6da436..ff8e08d71 100644 --- a/lapack-netlib/SRC/dsyevd_2stage.f +++ b/lapack-netlib/SRC/dsyevd_2stage.f @@ -385,7 +385,7 @@ IF( .NOT.WANTZ ) THEN CALL DSTERF( N, W, WORK( INDE ), INFO ) ELSE -* Not available in this release, and agrument checking should not +* Not available in this release, and argument checking should not * let it getting here RETURN CALL DSTEDC( 'I', N, W, WORK( INDE ), WORK( INDWRK ), N, diff --git a/lapack-netlib/SRC/dsyrfsx.f b/lapack-netlib/SRC/dsyrfsx.f index e128cd4e0..eb091e720 100644 --- a/lapack-netlib/SRC/dsyrfsx.f +++ b/lapack-netlib/SRC/dsyrfsx.f @@ -271,7 +271,7 @@ *> information as described below. There currently are up to three *> pieces of information returned for each right-hand side. If *> componentwise accuracy is not requested (PARAMS(3) = 0.0), then -*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS .LT. 3, then at most +*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS < 3, then at most *> the first (:,N_ERR_BNDS) entries are returned. *> *> The first index in ERR_BNDS_COMP(i,:) corresponds to the ith @@ -307,14 +307,14 @@ *> \param[in] NPARAMS *> \verbatim *> NPARAMS is INTEGER -*> Specifies the number of parameters set in PARAMS. If .LE. 0, the +*> Specifies the number of parameters set in PARAMS. If <= 0, the *> PARAMS array is never referenced and default values are used. *> \endverbatim *> *> \param[in,out] PARAMS *> \verbatim *> PARAMS is DOUBLE PRECISION array, dimension (NPARAMS) -*> Specifies algorithm parameters. If an entry is .LT. 0.0, then +*> Specifies algorithm parameters. If an entry is < 0.0, then *> that entry will be filled with default value used for that *> parameter. Only positions up to NPARAMS are accessed; defaults *> are used for higher-numbered parameters. @@ -322,9 +322,9 @@ *> PARAMS(LA_LINRX_ITREF_I = 1) : Whether to perform iterative *> refinement or not. *> Default: 1.0D+0 -*> = 0.0 : No refinement is performed, and no error bounds are +*> = 0.0: No refinement is performed, and no error bounds are *> computed. -*> = 1.0 : Use the double-precision refinement algorithm, +*> = 1.0: Use the double-precision refinement algorithm, *> possibly with doubled-single computations if the *> compilation environment does not support DOUBLE *> PRECISION. diff --git a/lapack-netlib/SRC/dsysv_aa.f b/lapack-netlib/SRC/dsysv_aa.f index 7192928c6..4ee474448 100644 --- a/lapack-netlib/SRC/dsysv_aa.f +++ b/lapack-netlib/SRC/dsysv_aa.f @@ -42,7 +42,7 @@ *> matrices. *> *> Aasen's algorithm is used to factor A as -*> A = U * T * U**T, if UPLO = 'U', or +*> A = U**T * T * U, if UPLO = 'U', or *> A = L * T * L**T, if UPLO = 'L', *> where U (or L) is a product of permutation and unit upper (lower) *> triangular matrices, and T is symmetric tridiagonal. The factored @@ -86,7 +86,7 @@ *> *> On exit, if INFO = 0, the tridiagonal matrix T and the *> multipliers used to obtain the factor U or L from the -*> factorization A = U*T*U**T or A = L*T*L**T as computed by +*> factorization A = U**T*T*U or A = L*T*L**T as computed by *> DSYTRF. *> \endverbatim *> @@ -230,7 +230,7 @@ RETURN END IF * -* Compute the factorization A = U*T*U**T or A = L*T*L**T. +* Compute the factorization A = U**T*T*U or A = L*T*L**T. * CALL DSYTRF_AA( UPLO, N, A, LDA, IPIV, WORK, LWORK, INFO ) IF( INFO.EQ.0 ) THEN diff --git a/lapack-netlib/SRC/dsysv_aa_2stage.f b/lapack-netlib/SRC/dsysv_aa_2stage.f index 05e538f0b..ef593bc7e 100644 --- a/lapack-netlib/SRC/dsysv_aa_2stage.f +++ b/lapack-netlib/SRC/dsysv_aa_2stage.f @@ -45,7 +45,7 @@ *> matrices. *> *> Aasen's 2-stage algorithm is used to factor A as -*> A = U * T * U**T, if UPLO = 'U', or +*> A = U**T * T * U, if UPLO = 'U', or *> A = L * T * L**T, if UPLO = 'L', *> where U (or L) is a product of permutation and unit upper (lower) *> triangular matrices, and T is symmetric and band. The matrix T is @@ -259,7 +259,7 @@ END IF * * -* Compute the factorization A = U*T*U**T or A = L*T*L**T. +* Compute the factorization A = U**T*T*U or A = L*T*L**T. * CALL DSYTRF_AA_2STAGE( UPLO, N, A, LDA, TB, LTB, IPIV, IPIV2, $ WORK, LWORK, INFO ) diff --git a/lapack-netlib/SRC/dsysvxx.f b/lapack-netlib/SRC/dsysvxx.f index 6e167d81e..0be50bcd1 100644 --- a/lapack-netlib/SRC/dsysvxx.f +++ b/lapack-netlib/SRC/dsysvxx.f @@ -377,7 +377,7 @@ *> information as described below. There currently are up to three *> pieces of information returned for each right-hand side. If *> componentwise accuracy is not requested (PARAMS(3) = 0.0), then -*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS .LT. 3, then at most +*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS < 3, then at most *> the first (:,N_ERR_BNDS) entries are returned. *> *> The first index in ERR_BNDS_COMP(i,:) corresponds to the ith @@ -413,14 +413,14 @@ *> \param[in] NPARAMS *> \verbatim *> NPARAMS is INTEGER -*> Specifies the number of parameters set in PARAMS. If .LE. 0, the +*> Specifies the number of parameters set in PARAMS. If <= 0, the *> PARAMS array is never referenced and default values are used. *> \endverbatim *> *> \param[in,out] PARAMS *> \verbatim *> PARAMS is DOUBLE PRECISION array, dimension (NPARAMS) -*> Specifies algorithm parameters. If an entry is .LT. 0.0, then +*> Specifies algorithm parameters. If an entry is < 0.0, then *> that entry will be filled with default value used for that *> parameter. Only positions up to NPARAMS are accessed; defaults *> are used for higher-numbered parameters. @@ -428,9 +428,9 @@ *> PARAMS(LA_LINRX_ITREF_I = 1) : Whether to perform iterative *> refinement or not. *> Default: 1.0D+0 -*> = 0.0 : No refinement is performed, and no error bounds are +*> = 0.0: No refinement is performed, and no error bounds are *> computed. -*> = 1.0 : Use the extra-precise refinement algorithm. +*> = 1.0: Use the extra-precise refinement algorithm. *> (other values are reserved for future use) *> *> PARAMS(LA_LINRX_ITHRESH_I = 2) : Maximum number of residual diff --git a/lapack-netlib/SRC/dsytf2_rk.f b/lapack-netlib/SRC/dsytf2_rk.f index 45cf62ab9..cc9e4616e 100644 --- a/lapack-netlib/SRC/dsytf2_rk.f +++ b/lapack-netlib/SRC/dsytf2_rk.f @@ -312,7 +312,7 @@ * * Factorize A as U*D*U**T using the upper triangle of A * -* Initilize the first entry of array E, where superdiagonal +* Initialize the first entry of array E, where superdiagonal * elements of D are stored * E( 1 ) = ZERO @@ -623,7 +623,7 @@ * * Factorize A as L*D*L**T using the lower triangle of A * -* Initilize the unused last entry of the subdiagonal array E. +* Initialize the unused last entry of the subdiagonal array E. * E( N ) = ZERO * diff --git a/lapack-netlib/SRC/dsytrd_2stage.f b/lapack-netlib/SRC/dsytrd_2stage.f index 522602bb2..fc4b92908 100644 --- a/lapack-netlib/SRC/dsytrd_2stage.f +++ b/lapack-netlib/SRC/dsytrd_2stage.f @@ -123,23 +123,22 @@ *> *> \param[out] HOUS2 *> \verbatim -*> HOUS2 is DOUBLE PRECISION array, dimension LHOUS2, that -*> store the Householder representation of the stage2 +*> HOUS2 is DOUBLE PRECISION array, dimension (LHOUS2) +*> Stores the Householder representation of the stage2 *> band to tridiagonal. *> \endverbatim *> *> \param[in] LHOUS2 *> \verbatim *> LHOUS2 is INTEGER -*> The dimension of the array HOUS2. LHOUS2 = MAX(1, dimension) -*> If LWORK = -1, or LHOUS2=-1, +*> The dimension of the array HOUS2. +*> If LWORK = -1, or LHOUS2 = -1, *> then a query is assumed; the routine *> only calculates the optimal size of the HOUS2 array, returns *> this value as the first entry of the HOUS2 array, and no error *> message related to LHOUS2 is issued by XERBLA. -*> LHOUS2 = MAX(1, dimension) where -*> dimension = 4*N if VECT='N' -*> not available now if VECT='H' +*> If VECT='N', LHOUS2 = max(1, 4*n); +*> if VECT='V', option not yet available. *> \endverbatim *> *> \param[out] WORK diff --git a/lapack-netlib/SRC/dsytrd_sb2st.F b/lapack-netlib/SRC/dsytrd_sb2st.F index 4d81fe226..0c0dbf125 100644 --- a/lapack-netlib/SRC/dsytrd_sb2st.F +++ b/lapack-netlib/SRC/dsytrd_sb2st.F @@ -50,9 +50,9 @@ * Arguments: * ========== * -*> \param[in] STAGE +*> \param[in] STAGE1 *> \verbatim -*> STAGE is CHARACTER*1 +*> STAGE1 is CHARACTER*1 *> = 'N': "No": to mention that the stage 1 of the reduction *> from dense to band using the dsytrd_sy2sb routine *> was not called before this routine to reproduce AB. diff --git a/lapack-netlib/SRC/dsytrd_sy2sb.f b/lapack-netlib/SRC/dsytrd_sy2sb.f index e0a5debc5..7f30817b0 100644 --- a/lapack-netlib/SRC/dsytrd_sy2sb.f +++ b/lapack-netlib/SRC/dsytrd_sy2sb.f @@ -363,7 +363,7 @@ * * * Set the workspace of the triangular matrix T to zero once such a -* way everytime T is generated the upper/lower portion will be always zero +* way every time T is generated the upper/lower portion will be always zero * CALL DLASET( "A", LDT, KD, ZERO, ZERO, WORK( TPOS ), LDT ) * diff --git a/lapack-netlib/SRC/dsytrf.f b/lapack-netlib/SRC/dsytrf.f index d8da4f122..a3bd30a2f 100644 --- a/lapack-netlib/SRC/dsytrf.f +++ b/lapack-netlib/SRC/dsytrf.f @@ -39,7 +39,7 @@ *> the Bunch-Kaufman diagonal pivoting method. The form of the *> factorization is *> -*> A = U*D*U**T or A = L*D*L**T +*> A = U**T*D*U or A = L*D*L**T *> *> where U (or L) is a product of permutation and unit upper (lower) *> triangular matrices, and D is symmetric and block diagonal with @@ -144,7 +144,7 @@ *> *> \verbatim *> -*> If UPLO = 'U', then A = U*D*U**T, where +*> If UPLO = 'U', then A = U**T*D*U, where *> U = P(n)*U(n)* ... *P(k)U(k)* ..., *> i.e., U is a product of terms P(k)*U(k), where k decreases from n to *> 1 in steps of 1 or 2, and D is a block diagonal matrix with 1-by-1 @@ -262,7 +262,7 @@ * IF( UPPER ) THEN * -* Factorize A as U*D*U**T using the upper triangle of A +* Factorize A as U**T*D*U using the upper triangle of A * * K is the main loop index, decreasing from N to 1 in steps of * KB, where KB is the number of columns factorized by DLASYF; diff --git a/lapack-netlib/SRC/dsytrf_aa.f b/lapack-netlib/SRC/dsytrf_aa.f index 24b3f393b..6df0da2cd 100644 --- a/lapack-netlib/SRC/dsytrf_aa.f +++ b/lapack-netlib/SRC/dsytrf_aa.f @@ -37,7 +37,7 @@ *> DSYTRF_AA computes the factorization of a real symmetric matrix A *> using the Aasen's algorithm. The form of the factorization is *> -*> A = U*T*U**T or A = L*T*L**T +*> A = U**T*T*U or A = L*T*L**T *> *> where U (or L) is a product of permutation and unit upper (lower) *> triangular matrices, and T is a symmetric tridiagonal matrix. @@ -223,7 +223,7 @@ IF( UPPER ) THEN * * ..................................................... -* Factorize A as L*D*L**T using the upper triangle of A +* Factorize A as U**T*D*U using the upper triangle of A * ..................................................... * * Copy first row A(1, 1:N) into H(1:n) (stored in WORK(1:N)) @@ -256,7 +256,7 @@ $ A( MAX(1, J), J+1 ), LDA, $ IPIV( J+1 ), WORK, N, WORK( N*NB+1 ) ) * -* Ajust IPIV and apply it back (J-th step picks (J+1)-th pivot) +* Adjust IPIV and apply it back (J-th step picks (J+1)-th pivot) * DO J2 = J+2, MIN(N, J+JB+1) IPIV( J2 ) = IPIV( J2 ) + J @@ -375,7 +375,7 @@ $ A( J+1, MAX(1, J) ), LDA, $ IPIV( J+1 ), WORK, N, WORK( N*NB+1 ) ) * -* Ajust IPIV and apply it back (J-th step picks (J+1)-th pivot) +* Adjust IPIV and apply it back (J-th step picks (J+1)-th pivot) * DO J2 = J+2, MIN(N, J+JB+1) IPIV( J2 ) = IPIV( J2 ) + J diff --git a/lapack-netlib/SRC/dsytrf_aa_2stage.f b/lapack-netlib/SRC/dsytrf_aa_2stage.f index 25fc1a2eb..a37be5bdd 100644 --- a/lapack-netlib/SRC/dsytrf_aa_2stage.f +++ b/lapack-netlib/SRC/dsytrf_aa_2stage.f @@ -38,7 +38,7 @@ *> DSYTRF_AA_2STAGE computes the factorization of a real symmetric matrix A *> using the Aasen's algorithm. The form of the factorization is *> -*> A = U*T*U**T or A = L*T*L**T +*> A = U**T*T*U or A = L*T*L**T *> *> where U (or L) is a product of permutation and unit upper (lower) *> triangular matrices, and T is a symmetric band matrix with the @@ -103,6 +103,22 @@ *> no error message related to LTB is issued by XERBLA. *> \endverbatim *> +*> \param[out] IPIV +*> \verbatim +*> IPIV is INTEGER array, dimension (N) +*> On exit, it contains the details of the interchanges, i.e., +*> the row and column k of A were interchanged with the +*> row and column IPIV(k). +*> \endverbatim +*> +*> \param[out] IPIV2 +*> \verbatim +*> IPIV2 is INTEGER array, dimension (N) +*> On exit, it contains the details of the interchanges, i.e., +*> the row and column k of T were interchanged with the +*> row and column IPIV2(k). +*> \endverbatim +*> *> \param[out] WORK *> \verbatim *> WORK is DOUBLE PRECISION workspace of size LWORK @@ -120,22 +136,6 @@ *> no error message related to LWORK is issued by XERBLA. *> \endverbatim *> -*> \param[out] IPIV -*> \verbatim -*> IPIV is INTEGER array, dimension (N) -*> On exit, it contains the details of the interchanges, i.e., -*> the row and column k of A were interchanged with the -*> row and column IPIV(k). -*> \endverbatim -*> -*> \param[out] IPIV2 -*> \verbatim -*> IPIV2 is INTEGER array, dimension (N) -*> On exit, it contains the details of the interchanges, i.e., -*> the row and column k of T were interchanged with the -*> row and column IPIV2(k). -*> \endverbatim -*> *> \param[out] INFO *> \verbatim *> INFO is INTEGER @@ -275,7 +275,7 @@ IF( UPPER ) THEN * * ..................................................... -* Factorize A as L*D*L**T using the upper triangle of A +* Factorize A as U**T*D*U using the upper triangle of A * ..................................................... * DO J = 0, NT-1 @@ -442,12 +442,14 @@ c END IF * > Apply pivots to previous columns of L CALL DSWAP( K-1, A( (J+1)*NB+1, I1 ), 1, $ A( (J+1)*NB+1, I2 ), 1 ) -* > Swap A(I1+1:M, I1) with A(I2, I1+1:M) - CALL DSWAP( I2-I1-1, A( I1, I1+1 ), LDA, - $ A( I1+1, I2 ), 1 ) +* > Swap A(I1+1:M, I1) with A(I2, I1+1:M) + IF( I2.GT.(I1+1) ) + $ CALL DSWAP( I2-I1-1, A( I1, I1+1 ), LDA, + $ A( I1+1, I2 ), 1 ) * > Swap A(I2+1:M, I1) with A(I2+1:M, I2) - CALL DSWAP( N-I2, A( I1, I2+1 ), LDA, - $ A( I2, I2+1 ), LDA ) + IF( I2.LT.N ) + $ CALL DSWAP( N-I2, A( I1, I2+1 ), LDA, + $ A( I2, I2+1 ), LDA ) * > Swap A(I1, I1) with A(I2, I2) PIV = A( I1, I1 ) A( I1, I1 ) = A( I2, I2 ) @@ -616,11 +618,13 @@ c END IF CALL DSWAP( K-1, A( I1, (J+1)*NB+1 ), LDA, $ A( I2, (J+1)*NB+1 ), LDA ) * > Swap A(I1+1:M, I1) with A(I2, I1+1:M) - CALL DSWAP( I2-I1-1, A( I1+1, I1 ), 1, - $ A( I2, I1+1 ), LDA ) + IF( I2.GT.(I1+1) ) + $ CALL DSWAP( I2-I1-1, A( I1+1, I1 ), 1, + $ A( I2, I1+1 ), LDA ) * > Swap A(I2+1:M, I1) with A(I2+1:M, I2) - CALL DSWAP( N-I2, A( I2+1, I1 ), 1, - $ A( I2+1, I2 ), 1 ) + IF( I2.LT.N ) + $ CALL DSWAP( N-I2, A( I2+1, I1 ), 1, + $ A( I2+1, I2 ), 1 ) * > Swap A(I1, I1) with A(I2, I2) PIV = A( I1, I1 ) A( I1, I1 ) = A( I2, I2 ) diff --git a/lapack-netlib/SRC/dsytri2.f b/lapack-netlib/SRC/dsytri2.f index 23f8b9fa2..5c3a5ec76 100644 --- a/lapack-netlib/SRC/dsytri2.f +++ b/lapack-netlib/SRC/dsytri2.f @@ -62,7 +62,7 @@ *> \param[in,out] A *> \verbatim *> A is DOUBLE PRECISION array, dimension (LDA,N) -*> On entry, the NB diagonal matrix D and the multipliers +*> On entry, the block diagonal matrix D and the multipliers *> used to obtain the factor U or L as computed by DSYTRF. *> *> On exit, if INFO = 0, the (symmetric) inverse of the original @@ -82,7 +82,7 @@ *> \param[in] IPIV *> \verbatim *> IPIV is INTEGER array, dimension (N) -*> Details of the interchanges and the NB structure of D +*> Details of the interchanges and the block structure of D *> as determined by DSYTRF. *> \endverbatim *> diff --git a/lapack-netlib/SRC/dsytrs_aa.f b/lapack-netlib/SRC/dsytrs_aa.f index 05ef31ff3..d9dc0a6d1 100644 --- a/lapack-netlib/SRC/dsytrs_aa.f +++ b/lapack-netlib/SRC/dsytrs_aa.f @@ -37,7 +37,7 @@ *> \verbatim *> *> DSYTRS_AA solves a system of linear equations A*X = B with a real -*> symmetric matrix A using the factorization A = U*T*U**T or +*> symmetric matrix A using the factorization A = U**T*T*U or *> A = L*T*L**T computed by DSYTRF_AA. *> \endverbatim * @@ -49,7 +49,7 @@ *> UPLO is CHARACTER*1 *> Specifies whether the details of the factorization are stored *> as an upper or lower triangular matrix. -*> = 'U': Upper triangular, form is A = U*T*U**T; +*> = 'U': Upper triangular, form is A = U**T*T*U; *> = 'L': Lower triangular, form is A = L*T*L**T. *> \endverbatim *> @@ -97,14 +97,16 @@ *> The leading dimension of the array B. LDB >= max(1,N). *> \endverbatim *> -*> \param[in] WORK +*> \param[out] WORK *> \verbatim -*> WORK is DOUBLE array, dimension (MAX(1,LWORK)) +*> WORK is DOUBLE PRECISION array, dimension (MAX(1,LWORK)) *> \endverbatim *> *> \param[in] LWORK *> \verbatim -*> LWORK is INTEGER, LWORK >= MAX(1,3*N-2). +*> LWORK is INTEGER +*> The dimension of the array WORK. LWORK >= max(1,3*N-2). +*> \endverbatim *> *> \param[out] INFO *> \verbatim @@ -198,22 +200,29 @@ * IF( UPPER ) THEN * -* Solve A*X = B, where A = U*T*U**T. +* Solve A*X = B, where A = U**T*T*U. +* +* 1) Forward substitution with U**T +* + IF( N.GT.1 ) THEN +* +* Pivot, P**T * B -> B * -* Pivot, P**T * B + DO K = 1, N + KP = IPIV( K ) + IF( KP.NE.K ) + $ CALL DSWAP( NRHS, B( K, 1 ), LDB, B( KP, 1 ), LDB ) + END DO * - DO K = 1, N - KP = IPIV( K ) - IF( KP.NE.K ) - $ CALL DSWAP( NRHS, B( K, 1 ), LDB, B( KP, 1 ), LDB ) - END DO +* Compute U**T \ B -> B [ (U**T \P**T * B) ] * -* Compute (U \P**T * B) -> B [ (U \P**T * B) ] + CALL DTRSM('L', 'U', 'T', 'U', N-1, NRHS, ONE, A( 1, 2 ), + $ LDA, B( 2, 1 ), LDB) + END IF * - CALL DTRSM('L', 'U', 'T', 'U', N-1, NRHS, ONE, A( 1, 2 ), LDA, - $ B( 2, 1 ), LDB) +* 2) Solve with triangular matrix T * -* Compute T \ B -> B [ T \ (U \P**T * B) ] +* Compute T \ B -> B [ T \ (U**T \P**T * B) ] * CALL DLACPY( 'F', 1, N, A( 1, 1 ), LDA+1, WORK( N ), 1) IF( N.GT.1 ) THEN @@ -223,35 +232,47 @@ CALL DGTSV( N, NRHS, WORK( 1 ), WORK( N ), WORK( 2*N ), B, LDB, $ INFO ) * -* Compute (U**T \ B) -> B [ U**T \ (T \ (U \P**T * B) ) ] +* 3) Backward substitution with U +* + IF( N.GT.1 ) THEN * - CALL DTRSM( 'L', 'U', 'N', 'U', N-1, NRHS, ONE, A( 1, 2 ), LDA, - $ B( 2, 1 ), LDB) +* Compute U \ B -> B [ U \ (T \ (U**T \P**T * B) ) ] * -* Pivot, P * B [ P * (U**T \ (T \ (U \P**T * B) )) ] + CALL DTRSM( 'L', 'U', 'N', 'U', N-1, NRHS, ONE, A( 1, 2 ), + $ LDA, B( 2, 1 ), LDB) * - DO K = N, 1, -1 - KP = IPIV( K ) - IF( KP.NE.K ) - $ CALL DSWAP( NRHS, B( K, 1 ), LDB, B( KP, 1 ), LDB ) - END DO +* Pivot, P * B -> B [ P * (U \ (T \ (U**T \P**T * B) )) ] +* + DO K = N, 1, -1 + KP = IPIV( K ) + IF( KP.NE.K ) + $ CALL DSWAP( NRHS, B( K, 1 ), LDB, B( KP, 1 ), LDB ) + END DO + END IF * ELSE * * Solve A*X = B, where A = L*T*L**T. * -* Pivot, P**T * B +* 1) Forward substitution with L * - DO K = 1, N - KP = IPIV( K ) - IF( KP.NE.K ) - $ CALL DSWAP( NRHS, B( K, 1 ), LDB, B( KP, 1 ), LDB ) - END DO + IF( N.GT.1 ) THEN +* +* Pivot, P**T * B -> B +* + DO K = 1, N + KP = IPIV( K ) + IF( KP.NE.K ) + $ CALL DSWAP( NRHS, B( K, 1 ), LDB, B( KP, 1 ), LDB ) + END DO * -* Compute (L \P**T * B) -> B [ (L \P**T * B) ] +* Compute L \ B -> B [ (L \P**T * B) ] +* + CALL DTRSM( 'L', 'L', 'N', 'U', N-1, NRHS, ONE, A( 2, 1 ), + $ LDA, B( 2, 1 ), LDB) + END IF * - CALL DTRSM( 'L', 'L', 'N', 'U', N-1, NRHS, ONE, A( 2, 1 ), LDA, - $ B( 2, 1 ), LDB) +* 2) Solve with triangular matrix T * * Compute T \ B -> B [ T \ (L \P**T * B) ] * @@ -263,18 +284,23 @@ CALL DGTSV( N, NRHS, WORK( 1 ), WORK(N), WORK( 2*N ), B, LDB, $ INFO) * -* Compute (L**T \ B) -> B [ L**T \ (T \ (L \P**T * B) ) ] +* 3) Backward substitution with L**T * - CALL DTRSM( 'L', 'L', 'T', 'U', N-1, NRHS, ONE, A( 2, 1 ), LDA, - $ B( 2, 1 ), LDB) + IF( N.GT.1 ) THEN +* +* Compute (L**T \ B) -> B [ L**T \ (T \ (L \P**T * B) ) ] * -* Pivot, P * B [ P * (L**T \ (T \ (L \P**T * B) )) ] + CALL DTRSM( 'L', 'L', 'T', 'U', N-1, NRHS, ONE, A( 2, 1 ), + $ LDA, B( 2, 1 ), LDB) * - DO K = N, 1, -1 - KP = IPIV( K ) - IF( KP.NE.K ) - $ CALL DSWAP( NRHS, B( K, 1 ), LDB, B( KP, 1 ), LDB ) - END DO +* Pivot, P * B -> B [ P * (L**T \ (T \ (L \P**T * B) )) ] +* + DO K = N, 1, -1 + KP = IPIV( K ) + IF( KP.NE.K ) + $ CALL DSWAP( NRHS, B( K, 1 ), LDB, B( KP, 1 ), LDB ) + END DO + END IF * END IF * diff --git a/lapack-netlib/SRC/dsytrs_aa_2stage.f b/lapack-netlib/SRC/dsytrs_aa_2stage.f index bb283cb95..69c702f8a 100644 --- a/lapack-netlib/SRC/dsytrs_aa_2stage.f +++ b/lapack-netlib/SRC/dsytrs_aa_2stage.f @@ -36,7 +36,7 @@ *> \verbatim *> *> DSYTRS_AA_2STAGE solves a system of linear equations A*X = B with a real -*> symmetric matrix A using the factorization A = U*T*U**T or +*> symmetric matrix A using the factorization A = U**T*T*U or *> A = L*T*L**T computed by DSYTRF_AA_2STAGE. *> \endverbatim * @@ -48,7 +48,7 @@ *> UPLO is CHARACTER*1 *> Specifies whether the details of the factorization are stored *> as an upper or lower triangular matrix. -*> = 'U': Upper triangular, form is A = U*T*U**T; +*> = 'U': Upper triangular, form is A = U**T*T*U; *> = 'L': Lower triangular, form is A = L*T*L**T. *> \endverbatim *> @@ -208,15 +208,15 @@ * IF( UPPER ) THEN * -* Solve A*X = B, where A = U*T*U**T. +* Solve A*X = B, where A = U**T*T*U. * IF( N.GT.NB ) THEN * -* Pivot, P**T * B +* Pivot, P**T * B -> B * CALL DLASWP( NRHS, B, LDB, NB+1, N, IPIV, 1 ) * -* Compute (U**T \P**T * B) -> B [ (U**T \P**T * B) ] +* Compute (U**T \ B) -> B [ (U**T \P**T * B) ] * CALL DTRSM( 'L', 'U', 'T', 'U', N-NB, NRHS, ONE, A(1, NB+1), $ LDA, B(NB+1, 1), LDB) @@ -234,7 +234,7 @@ CALL DTRSM( 'L', 'U', 'N', 'U', N-NB, NRHS, ONE, A(1, NB+1), $ LDA, B(NB+1, 1), LDB) * -* Pivot, P * B [ P * (U \ (T \ (U**T \P**T * B) )) ] +* Pivot, P * B -> B [ P * (U \ (T \ (U**T \P**T * B) )) ] * CALL DLASWP( NRHS, B, LDB, NB+1, N, IPIV, -1 ) * @@ -246,11 +246,11 @@ * IF( N.GT.NB ) THEN * -* Pivot, P**T * B +* Pivot, P**T * B -> B * CALL DLASWP( NRHS, B, LDB, NB+1, N, IPIV, 1 ) * -* Compute (L \P**T * B) -> B [ (L \P**T * B) ] +* Compute (L \ B) -> B [ (L \P**T * B) ] * CALL DTRSM( 'L', 'L', 'N', 'U', N-NB, NRHS, ONE, A(NB+1, 1), $ LDA, B(NB+1, 1), LDB) @@ -268,7 +268,7 @@ CALL DTRSM( 'L', 'L', 'T', 'U', N-NB, NRHS, ONE, A(NB+1, 1), $ LDA, B(NB+1, 1), LDB) * -* Pivot, P * B [ P * (L**T \ (T \ (L \P**T * B) )) ] +* Pivot, P * B -> B [ P * (L**T \ (T \ (L \P**T * B) )) ] * CALL DLASWP( NRHS, B, LDB, NB+1, N, IPIV, -1 ) * diff --git a/lapack-netlib/SRC/dtgsy2.f b/lapack-netlib/SRC/dtgsy2.f index 1c687b15e..e8c9b4001 100644 --- a/lapack-netlib/SRC/dtgsy2.f +++ b/lapack-netlib/SRC/dtgsy2.f @@ -71,7 +71,7 @@ *> R * B**T + L * E**T = scale * -F *> *> This case is used to compute an estimate of Dif[(A, D), (B, E)] = -*> sigma_min(Z) using reverse communicaton with DLACON. +*> sigma_min(Z) using reverse communication with DLACON. *> *> DTGSY2 also (IJOB >= 1) contributes to the computation in DTGSYL *> of an upper bound on the separation between to matrix pairs. Then @@ -85,7 +85,7 @@ *> \param[in] TRANS *> \verbatim *> TRANS is CHARACTER*1 -*> = 'N', solve the generalized Sylvester equation (1). +*> = 'N': solve the generalized Sylvester equation (1). *> = 'T': solve the 'transposed' system (3). *> \endverbatim *> diff --git a/lapack-netlib/SRC/dtgsyl.f b/lapack-netlib/SRC/dtgsyl.f index 1cc3a1bf8..bb0751794 100644 --- a/lapack-netlib/SRC/dtgsyl.f +++ b/lapack-netlib/SRC/dtgsyl.f @@ -88,20 +88,20 @@ *> \param[in] TRANS *> \verbatim *> TRANS is CHARACTER*1 -*> = 'N', solve the generalized Sylvester equation (1). -*> = 'T', solve the 'transposed' system (3). +*> = 'N': solve the generalized Sylvester equation (1). +*> = 'T': solve the 'transposed' system (3). *> \endverbatim *> *> \param[in] IJOB *> \verbatim *> IJOB is INTEGER *> Specifies what kind of functionality to be performed. -*> =0: solve (1) only. -*> =1: The functionality of 0 and 3. -*> =2: The functionality of 0 and 4. -*> =3: Only an estimate of Dif[(A,D), (B,E)] is computed. +*> = 0: solve (1) only. +*> = 1: The functionality of 0 and 3. +*> = 2: The functionality of 0 and 4. +*> = 3: Only an estimate of Dif[(A,D), (B,E)] is computed. *> (look ahead strategy IJOB = 1 is used). -*> =4: Only an estimate of Dif[(A,D), (B,E)] is computed. +*> = 4: Only an estimate of Dif[(A,D), (B,E)] is computed. *> ( DGECON on sub-systems is used ). *> Not referenced if TRANS = 'T'. *> \endverbatim diff --git a/lapack-netlib/SRC/dtpmlqt.f b/lapack-netlib/SRC/dtpmlqt.f index 3782d0c71..975ebdc27 100644 --- a/lapack-netlib/SRC/dtpmlqt.f +++ b/lapack-netlib/SRC/dtpmlqt.f @@ -94,7 +94,7 @@ *> *> \param[in] V *> \verbatim -*> V is DOUBLE PRECISION array, dimension (LDA,K) +*> V is DOUBLE PRECISION array, dimension (LDV,K) *> The i-th row must contain the vector which defines the *> elementary reflector H(i), for i = 1,2,...,k, as returned by *> DTPLQT in B. See Further Details. diff --git a/lapack-netlib/SRC/dtpmqrt.f b/lapack-netlib/SRC/dtpmqrt.f index 44985a80d..a7888e192 100644 --- a/lapack-netlib/SRC/dtpmqrt.f +++ b/lapack-netlib/SRC/dtpmqrt.f @@ -94,7 +94,7 @@ *> *> \param[in] V *> \verbatim -*> V is DOUBLE PRECISION array, dimension (LDA,K) +*> V is DOUBLE PRECISION array, dimension (LDV,K) *> The i-th column must contain the vector which defines the *> elementary reflector H(i), for i = 1,2,...,k, as returned by *> CTPQRT in B. See Further Details. diff --git a/lapack-netlib/SRC/dtprfb.f b/lapack-netlib/SRC/dtprfb.f index 6ae8fad8c..6d3b4dae1 100644 --- a/lapack-netlib/SRC/dtprfb.f +++ b/lapack-netlib/SRC/dtprfb.f @@ -152,8 +152,8 @@ *> \verbatim *> LDA is INTEGER *> The leading dimension of the array A. -*> If SIDE = 'L', LDC >= max(1,K); -*> If SIDE = 'R', LDC >= max(1,M). +*> If SIDE = 'L', LDA >= max(1,K); +*> If SIDE = 'R', LDA >= max(1,M). *> \endverbatim *> *> \param[in,out] B diff --git a/lapack-netlib/SRC/ilaenv.f b/lapack-netlib/SRC/ilaenv.f index a438ada38..7f68c383d 100644 --- a/lapack-netlib/SRC/ilaenv.f +++ b/lapack-netlib/SRC/ilaenv.f @@ -132,7 +132,7 @@ *> \author Univ. of Colorado Denver *> \author NAG Ltd. * -*> \date November 2017 +*> \date November 2019 * *> \ingroup OTHERauxiliary * @@ -162,10 +162,10 @@ * ===================================================================== INTEGER FUNCTION ILAENV( ISPEC, NAME, OPTS, N1, N2, N3, N4 ) * -* -- LAPACK auxiliary routine (version 3.8.0) -- +* -- LAPACK auxiliary routine (version 3.9.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- -* November 2017 +* November 2019 * * .. Scalar Arguments .. CHARACTER*( * ) NAME, OPTS @@ -271,7 +271,16 @@ * NB = 1 * - IF( C2.EQ.'GE' ) THEN + IF( SUBNAM(2:6).EQ.'LAORH' ) THEN +* +* This is for *LAORHR_GETRFNP routine +* + IF( SNAME ) THEN + NB = 32 + ELSE + NB = 32 + END IF + ELSE IF( C2.EQ.'GE' ) THEN IF( C3.EQ.'TRF' ) THEN IF( SNAME ) THEN NB = 64 diff --git a/lapack-netlib/SRC/ilaenv2stage.f b/lapack-netlib/SRC/ilaenv2stage.f index 3c0d34a12..db30a1b4d 100644 --- a/lapack-netlib/SRC/ilaenv2stage.f +++ b/lapack-netlib/SRC/ilaenv2stage.f @@ -39,9 +39,9 @@ *> *> ILAENV2STAGE returns an INTEGER *> if ILAENV2STAGE >= 0: ILAENV2STAGE returns the value of the parameter -* specified by ISPEC +*> specified by ISPEC *> if ILAENV2STAGE < 0: if ILAENV2STAGE = -k, the k-th argument had an -* illegal value. +*> illegal value. *> *> This version provides a set of parameters which should give good, *> but not optimal, performance on many of the currently available diff --git a/lapack-netlib/SRC/iparam2stage.F b/lapack-netlib/SRC/iparam2stage.F index 836e20eed..1a37300a7 100644 --- a/lapack-netlib/SRC/iparam2stage.F +++ b/lapack-netlib/SRC/iparam2stage.F @@ -35,7 +35,7 @@ *> \verbatim *> *> This program sets problem and machine dependent parameters -*> useful for xHETRD_2STAGE, xHETRD_H@2HB, xHETRD_HB2ST, +*> useful for xHETRD_2STAGE, xHETRD_HE2HB, xHETRD_HB2ST, *> xGEBRD_2STAGE, xGEBRD_GE2GB, xGEBRD_GB2BD *> and related subroutines for eigenvalue problems. *> It is called whenever ILAENV is called with 17 <= ISPEC <= 21. @@ -53,7 +53,7 @@ *> return. *> *> ISPEC=17: the optimal blocksize nb for the reduction to -* BAND +*> BAND *> *> ISPEC=18: the optimal blocksize ib for the eigenvectors *> singular vectors update routine @@ -90,14 +90,14 @@ *> \param[in] NBI *> \verbatim *> NBI is INTEGER which is the used in the reduciton, -* (e.g., the size of the band), needed to compute workspace -* and LHOUS2. +*> (e.g., the size of the band), needed to compute workspace +*> and LHOUS2. *> \endverbatim *> *> \param[in] IBI *> \verbatim *> IBI is INTEGER which represent the IB of the reduciton, -* needed to compute workspace and LHOUS2. +*> needed to compute workspace and LHOUS2. *> \endverbatim *> *> \param[in] NXI diff --git a/lapack-netlib/SRC/iparmq.f b/lapack-netlib/SRC/iparmq.f index a9212b3e0..bb711243d 100644 --- a/lapack-netlib/SRC/iparmq.f +++ b/lapack-netlib/SRC/iparmq.f @@ -60,7 +60,7 @@ *> invest in an (expensive) multi-shift QR sweep. *> If the aggressive early deflation subroutine *> finds LD converged eigenvalues from an order -*> NW deflation window and LD.GT.(NW*NIBBLE)/100, +*> NW deflation window and LD > (NW*NIBBLE)/100, *> then the next QR sweep is skipped and early *> deflation is applied immediately to the *> remaining active diagonal block. Setting @@ -184,8 +184,8 @@ *> This depends on ILO, IHI and NS, the *> number of simultaneous shifts returned *> by IPARMQ(ISPEC=15). The default for -*> (IHI-ILO+1).LE.500 is NS. The default -*> for (IHI-ILO+1).GT.500 is 3*NS/2. +*> (IHI-ILO+1) <= 500 is NS. The default +*> for (IHI-ILO+1) > 500 is 3*NS/2. *> *> IPARMQ(ISPEC=14) Nibble crossover point. Default: 14. *> diff --git a/lapack-netlib/SRC/meson.build b/lapack-netlib/SRC/meson.build new file mode 100644 index 000000000..bad682401 --- /dev/null +++ b/lapack-netlib/SRC/meson.build @@ -0,0 +1,11 @@ +ALLAUX = files('ilaenv.f', 'ilaenv2stage.f', 'ieeeck.f', 'lsamen.f', 'xerbla.f', 'xerbla_array.f', 'iparmq.f', 'iparam2stage.F', 'ilaprec.f', 'ilatrans.f', 'ilauplo.f', 'iladiag.f', 'chla_transtype.f', '../INSTALL/ilaver.f', '../INSTALL/lsame.f', '../INSTALL/slamch.f') + + +SCLAUX = files('sbdsdc.f', 'sbdsqr.f', 'sdisna.f', 'slabad.f', 'slacpy.f', 'sladiv.f', 'slae2.f', 'slaebz.f', 'slaed0.f', 'slaed1.f', 'slaed2.f', 'slaed3.f', 'slaed4.f', 'slaed5.f', 'slaed6.f', 'slaed7.f', 'slaed8.f', 'slaed9.f', 'slaeda.f', 'slaev2.f', 'slagtf.f', 'slagts.f', 'slamrg.f', 'slanst.f', 'slapy2.f', 'slapy3.f', 'slarnv.f', 'slarra.f', 'slarrb.f', 'slarrc.f', 'slarrd.f', 'slarre.f', 'slarrf.f', 'slarrj.f', 'slarrk.f', 'slarrr.f', 'slaneg.f', 'slartg.f', 'slaruv.f', 'slas2.f', 'slascl.f', 'slasd0.f', 'slasd1.f', 'slasd2.f', 'slasd3.f', 'slasd4.f', 'slasd5.f', 'slasd6.f', 'slasd7.f', 'slasd8.f', 'slasda.f', 'slasdq.f', 'slasdt.f', 'slaset.f', 'slasq1.f', 'slasq2.f', 'slasq3.f', 'slasq4.f', 'slasq5.f', 'slasq6.f', 'slasr.f', 'slasrt.f', 'slassq.f', 'slasv2.f', 'spttrf.f', 'sstebz.f', 'sstedc.f', 'ssteqr.f', 'ssterf.f', 'slaisnan.f', 'sisnan.f', 'slartgp.f', 'slartgs.f', '../INSTALL/second_INT_CPU_TIME.f') + +DZLAUX = files('dbdsdc.f', 'dbdsqr.f', 'ddisna.f', 'dlabad.f', 'dlacpy.f', 'dladiv.f', 'dlae2.f', 'dlaebz.f', 'dlaed0.f', 'dlaed1.f', 'dlaed2.f', 'dlaed3.f', 'dlaed4.f', 'dlaed5.f', 'dlaed6.f', 'dlaed7.f', 'dlaed8.f', 'dlaed9.f', 'dlaeda.f', 'dlaev2.f', 'dlagtf.f', 'dlagts.f', 'dlamrg.f', 'dlanst.f', 'dlapy2.f', 'dlapy3.f', 'dlarnv.f', 'dlarra.f', 'dlarrb.f', 'dlarrc.f', 'dlarrd.f', 'dlarre.f', 'dlarrf.f', 'dlarrj.f', 'dlarrk.f', 'dlarrr.f', 'dlaneg.f', 'dlartg.f', 'dlaruv.f', 'dlas2.f', 'dlascl.f', 'dlasd0.f', 'dlasd1.f', 'dlasd2.f', 'dlasd3.f', 'dlasd4.f', 'dlasd5.f', 'dlasd6.f', 'dlasd7.f', 'dlasd8.f', 'dlasda.f', 'dlasdq.f', 'dlasdt.f', 'dlaset.f', 'dlasq1.f', 'dlasq2.f', 'dlasq3.f', 'dlasq4.f', 'dlasq5.f', 'dlasq6.f', 'dlasr.f', 'dlasrt.f', 'dlassq.f', 'dlasv2.f', 'dpttrf.f', 'dstebz.f', 'dstedc.f', 'dsteqr.f', 'dsterf.f', 'dlaisnan.f', 'disnan.f', 'dlartgp.f', 'dlartgs.f', '../INSTALL/dlamch.f', '../INSTALL/dsecnd_INT_CPU_TIME.f') + + +SLASRC = files('sbdsvdx.f', 'spotrf2.f', 'sgetrf2.f', 'sgbbrd.f', 'sgbcon.f', 'sgbequ.f', 'sgbrfs.f', 'sgbsv.f', 'sgbsvx.f', 'sgbtf2.f', 'sgbtrf.f', 'sgbtrs.f', 'sgebak.f', 'sgebal.f', 'sgebd2.f', 'sgebrd.f', 'sgecon.f', 'sgeequ.f', 'sgees.f', 'sgeesx.f', 'sgeev.f', 'sgeevx.f', 'sgehd2.f', 'sgehrd.f', 'sgelq2.f', 'sgelqf.f', 'sgels.f', 'sgelsd.f', 'sgelss.f', 'sgelsy.f', 'sgeql2.f', 'sgeqlf.f', 'sgeqp3.f', 'sgeqr2.f', 'sgeqr2p.f', 'sgeqrf.f', 'sgeqrfp.f', 'sgerfs.f', 'sgerq2.f', 'sgerqf.f', 'sgesc2.f', 'sgesdd.f', 'sgesv.f', 'sgesvd.f', 'sgesvdx.f', 'sgesvx.f', 'sgetc2.f', 'sgetf2.f', 'sgetri.f', 'sggbak.f', 'sggbal.f', 'sgges.f', 'sgges3.f', 'sggesx.f', 'sggev.f', 'sggev3.f', 'sggevx.f', 'sggglm.f', 'sgghrd.f', 'sgghd3.f', 'sgglse.f', 'sggqrf.f', 'sggrqf.f', 'sggsvd3.f', 'sggsvp3.f', 'sgtcon.f', 'sgtrfs.f', 'sgtsv.f', 'sgtsvx.f', 'sgttrf.f', 'sgttrs.f', 'sgtts2.f', 'shgeqz.f', 'shsein.f', 'shseqr.f', 'slabrd.f', 'slacon.f', 'slacn2.f', 'slaein.f', 'slaexc.f', 'slag2.f', 'slags2.f', 'slagtm.f', 'slagv2.f', 'slahqr.f', 'slahr2.f', 'slaic1.f', 'slaln2.f', 'slals0.f', 'slalsa.f', 'slalsd.f', 'slangb.f', 'slange.f', 'slangt.f', 'slanhs.f', 'slansb.f', 'slansp.f', 'slansy.f', 'slantb.f', 'slantp.f', 'slantr.f', 'slanv2.f', 'slapll.f', 'slapmt.f', 'slaqgb.f', 'slaqge.f', 'slaqp2.f', 'slaqps.f', 'slaqsb.f', 'slaqsp.f', 'slaqsy.f', 'slaqr0.f', 'slaqr1.f', 'slaqr2.f', 'slaqr3.f', 'slaqr4.f', 'slaqr5.f', 'slaqtr.f', 'slar1v.f', 'slar2v.f', 'ilaslr.f', 'ilaslc.f', 'slarf.f', 'slarfb.f', 'slarfg.f', 'slarfgp.f', 'slarft.f', 'slarfx.f', 'slarfy.f', 'slargv.f', 'slarrv.f', 'slartv.f', 'slarz.f', 'slarzb.f', 'slarzt.f', 'slaswp.f', 'slasy2.f', 'slasyf.f', 'slasyf_rook.f', 'slasyf_rk.f', 'slatbs.f', 'slatdf.f', 'slatps.f', 'slatrd.f', 'slatrs.f', 'slatrz.f', 'slauu2.f', 'slauum.f', 'sopgtr.f', 'sopmtr.f', 'sorg2l.f', 'sorg2r.f', 'sorgbr.f', 'sorghr.f', 'sorgl2.f', 'sorglq.f', 'sorgql.f', 'sorgqr.f', 'sorgr2.f', 'sorgrq.f', 'sorgtr.f', 'sorm2l.f', 'sorm2r.f', 'sorm22.f', 'sormbr.f', 'sormhr.f', 'sorml2.f', 'sormlq.f', 'sormql.f', 'sormqr.f', 'sormr2.f', 'sormr3.f', 'sormrq.f', 'sormrz.f', 'sormtr.f', 'spbcon.f', 'spbequ.f', 'spbrfs.f', 'spbstf.f', 'spbsv.f', 'spbsvx.f', 'spbtf2.f', 'spbtrf.f', 'spbtrs.f', 'spocon.f', 'spoequ.f', 'sporfs.f', 'sposv.f', 'sposvx.f', 'spotf2.f', 'spotri.f', 'spstrf.f', 'spstf2.f', 'sppcon.f', 'sppequ.f', 'spprfs.f', 'sppsv.f', 'sppsvx.f', 'spptrf.f', 'spptri.f', 'spptrs.f', 'sptcon.f', 'spteqr.f', 'sptrfs.f', 'sptsv.f', 'sptsvx.f', 'spttrs.f', 'sptts2.f', 'srscl.f', 'ssbev.f', 'ssbevd.f', 'ssbevx.f', 'ssbgst.f', 'ssbgv.f', 'ssbgvd.f', 'ssbgvx.f', 'ssbtrd.f', 'sspcon.f', 'sspev.f', 'sspevd.f', 'sspevx.f', 'sspgst.f', 'sspgv.f', 'sspgvd.f', 'sspgvx.f', 'ssprfs.f', 'sspsv.f', 'sspsvx.f', 'ssptrd.f', 'ssptrf.f', 'ssptri.f', 'ssptrs.f', 'sstegr.f', 'sstein.f', 'sstev.f', 'sstevd.f', 'sstevr.f', 'sstevx.f', 'ssycon.f', 'ssyev.f', 'ssyevd.f', 'ssyevr.f', 'ssyevx.f', 'ssygs2.f', 'ssygst.f', 'ssygv.f', 'ssygvd.f', 'ssygvx.f', 'ssyrfs.f', 'ssysv.f', 'ssysvx.f', 'ssytd2.f', 'ssytf2.f', 'ssytrd.f', 'ssytrf.f', 'ssytri.f', 'ssytri2.f', 'ssytri2x.f', 'ssyswapr.f', 'ssytrs.f', 'ssytrs2.f', 'ssyconv.f', 'ssyconvf.f', 'ssyconvf_rook.f', 'ssytf2_rook.f', 'ssytrf_rook.f', 'ssytrs_rook.f', 'ssytri_rook.f', 'ssycon_rook.f', 'ssysv_rook.f', 'ssytf2_rk.f', 'ssytrf_rk.f', 'ssytrs_3.f', 'ssytri_3.f', 'ssytri_3x.f', 'ssycon_3.f', 'ssysv_rk.f', 'slasyf_aa.f', 'ssysv_aa.f', 'ssytrf_aa.f', 'ssytrs_aa.f', 'ssysv_aa_2stage.f', 'ssytrf_aa_2stage.f', 'ssytrs_aa_2stage.f', 'stbcon.f', 'stbrfs.f', 'stbtrs.f', 'stgevc.f', 'stgex2.f', 'stgexc.f', 'stgsen.f', 'stgsja.f', 'stgsna.f', 'stgsy2.f', 'stgsyl.f', 'stpcon.f', 'stprfs.f', 'stptri.f', 'stptrs.f', 'strcon.f', 'strevc.f', 'strevc3.f', 'strexc.f', 'strrfs.f', 'strsen.f', 'strsna.f', 'strsyl.f', 'strti2.f', 'strtri.f', 'strtrs.f', 'stzrzf.f', 'sstemr.f', 'slansf.f', 'spftrf.f', 'spftri.f', 'spftrs.f', 'ssfrk.f', 'stfsm.f', 'stftri.f', 'stfttp.f', 'stfttr.f', 'stpttf.f', 'stpttr.f', 'strttf.f', 'strttp.f', 'sgejsv.f', 'sgesvj.f', 'sgsvj0.f', 'sgsvj1.f', 'sgeequb.f', 'ssyequb.f', 'spoequb.f', 'sgbequb.f', 'sbbcsd.f', 'slapmr.f', 'sorbdb.f', 'sorbdb1.f', 'sorbdb2.f', 'sorbdb3.f', 'sorbdb4.f', 'sorbdb5.f', 'sorbdb6.f', 'sorcsd.f', 'sorcsd2by1.f', 'sgeqrt.f', 'sgeqrt2.f', 'sgeqrt3.f', 'sgemqrt.f', 'stpqrt.f', 'stpqrt2.f', 'stpmqrt.f', 'stprfb.f', 'sgelqt.f', 'sgelqt3.f', 'sgemlqt.f', 'sgetsls.f', 'sgeqr.f', 'slatsqr.f', 'slamtsqr.f', 'sgemqr.f', 'sgelq.f', 'slaswlq.f', 'slamswlq.f', 'sgemlq.f', 'stplqt.f', 'stplqt2.f', 'stpmlqt.f', 'ssytrd_2stage.f', 'ssytrd_sy2sb.f', 'ssytrd_sb2st.F', 'ssb2st_kernels.f', 'ssyevd_2stage.f', 'ssyev_2stage.f', 'ssyevx_2stage.f', 'ssyevr_2stage.f', 'ssbev_2stage.f', 'ssbevx_2stage.f', 'ssbevd_2stage.f', 'ssygv_2stage.f', 'sgesvdq.f', 'scombssq.f') + +DSLASRC = files('spotrs.f', 'sgetrs.f', 'spotrf.f', 'sgetrf.f') diff --git a/lapack-netlib/SRC/sbdsvdx.f b/lapack-netlib/SRC/sbdsvdx.f index a4b1887b2..c46674c47 100644 --- a/lapack-netlib/SRC/sbdsvdx.f +++ b/lapack-netlib/SRC/sbdsvdx.f @@ -165,7 +165,7 @@ *> *> \param[out] Z *> \verbatim -*> Z is REAL array, dimension (2*N,K) ) +*> Z is REAL array, dimension (2*N,K) *> If JOBZ = 'V', then if INFO = 0 the first NS columns of Z *> contain the singular vectors of the matrix B corresponding to *> the selected singular values, with U in rows 1 to N and V diff --git a/lapack-netlib/SRC/scombssq.f b/lapack-netlib/SRC/scombssq.f new file mode 100644 index 000000000..cc51a324b --- /dev/null +++ b/lapack-netlib/SRC/scombssq.f @@ -0,0 +1,94 @@ +*> \brief \b SCOMBSSQ adds two scaled sum of squares quantities +* +* =========== DOCUMENTATION =========== +* +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ +* +* +* Definition: +* =========== +* +* SUBROUTINE SCOMBSSQ( V1, V2 ) +* +* .. Array Arguments .. +* REAL V1( 2 ), V2( 2 ) +* .. +* +* +*> \par Purpose: +* ============= +*> +*> \verbatim +*> +*> SCOMBSSQ adds two scaled sum of squares quantities, V1 := V1 + V2. +*> That is, +*> +*> V1_scale**2 * V1_sumsq := V1_scale**2 * V1_sumsq +*> + V2_scale**2 * V2_sumsq +*> \endverbatim +* +* Arguments: +* ========== +* +*> \param[in,out] V1 +*> \verbatim +*> V1 is REAL array, dimension (2). +*> The first scaled sum. +*> V1(1) = V1_scale, V1(2) = V1_sumsq. +*> \endverbatim +*> +*> \param[in] V2 +*> \verbatim +*> V2 is REAL array, dimension (2). +*> The second scaled sum. +*> V2(1) = V2_scale, V2(2) = V2_sumsq. +*> \endverbatim +* +* Authors: +* ======== +* +*> \author Univ. of Tennessee +*> \author Univ. of California Berkeley +*> \author Univ. of Colorado Denver +*> \author NAG Ltd. +* +*> \date November 2018 +* +*> \ingroup OTHERauxiliary +* +* ===================================================================== + SUBROUTINE SCOMBSSQ( V1, V2 ) +* +* -- LAPACK auxiliary routine (version 3.7.0) -- +* -- LAPACK is a software package provided by Univ. of Tennessee, -- +* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- +* November 2018 +* +* .. Array Arguments .. + REAL V1( 2 ), V2( 2 ) +* .. +* +* ===================================================================== +* +* .. Parameters .. + REAL ZERO + PARAMETER ( ZERO = 0.0D+0 ) +* .. +* .. Executable Statements .. +* + IF( V1( 1 ).GE.V2( 1 ) ) THEN + IF( V1( 1 ).NE.ZERO ) THEN + V1( 2 ) = V1( 2 ) + ( V2( 1 ) / V1( 1 ) )**2 * V2( 2 ) + ELSE + V1( 2 ) = V1( 2 ) + V2( 2 ) + END IF + ELSE + V1( 2 ) = V2( 2 ) + ( V1( 1 ) / V2( 1 ) )**2 * V1( 2 ) + V1( 1 ) = V2( 1 ) + END IF + RETURN +* +* End of SCOMBSSQ +* + END diff --git a/lapack-netlib/SRC/sgbrfsx.f b/lapack-netlib/SRC/sgbrfsx.f index 032b78b80..78ae584e1 100644 --- a/lapack-netlib/SRC/sgbrfsx.f +++ b/lapack-netlib/SRC/sgbrfsx.f @@ -308,7 +308,7 @@ *> information as described below. There currently are up to three *> pieces of information returned for each right-hand side. If *> componentwise accuracy is not requested (PARAMS(3) = 0.0), then -*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS .LT. 3, then at most +*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS < 3, then at most *> the first (:,N_ERR_BNDS) entries are returned. *> *> The first index in ERR_BNDS_COMP(i,:) corresponds to the ith @@ -344,14 +344,14 @@ *> \param[in] NPARAMS *> \verbatim *> NPARAMS is INTEGER -*> Specifies the number of parameters set in PARAMS. If .LE. 0, the +*> Specifies the number of parameters set in PARAMS. If <= 0, the *> PARAMS array is never referenced and default values are used. *> \endverbatim *> *> \param[in,out] PARAMS *> \verbatim *> PARAMS is REAL array, dimension NPARAMS -*> Specifies algorithm parameters. If an entry is .LT. 0.0, then +*> Specifies algorithm parameters. If an entry is < 0.0, then *> that entry will be filled with default value used for that *> parameter. Only positions up to NPARAMS are accessed; defaults *> are used for higher-numbered parameters. @@ -359,9 +359,9 @@ *> PARAMS(LA_LINRX_ITREF_I = 1) : Whether to perform iterative *> refinement or not. *> Default: 1.0 -*> = 0.0 : No refinement is performed, and no error bounds are +*> = 0.0: No refinement is performed, and no error bounds are *> computed. -*> = 1.0 : Use the double-precision refinement algorithm, +*> = 1.0: Use the double-precision refinement algorithm, *> possibly with doubled-single computations if the *> compilation environment does not support DOUBLE *> PRECISION. diff --git a/lapack-netlib/SRC/sgbsvxx.f b/lapack-netlib/SRC/sgbsvxx.f index b2132325e..3c3d737b3 100644 --- a/lapack-netlib/SRC/sgbsvxx.f +++ b/lapack-netlib/SRC/sgbsvxx.f @@ -431,7 +431,7 @@ *> information as described below. There currently are up to three *> pieces of information returned for each right-hand side. If *> componentwise accuracy is not requested (PARAMS(3) = 0.0), then -*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS .LT. 3, then at most +*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS < 3, then at most *> the first (:,N_ERR_BNDS) entries are returned. *> *> The first index in ERR_BNDS_COMP(i,:) corresponds to the ith @@ -467,14 +467,14 @@ *> \param[in] NPARAMS *> \verbatim *> NPARAMS is INTEGER -*> Specifies the number of parameters set in PARAMS. If .LE. 0, the +*> Specifies the number of parameters set in PARAMS. If <= 0, the *> PARAMS array is never referenced and default values are used. *> \endverbatim *> *> \param[in,out] PARAMS *> \verbatim *> PARAMS is REAL array, dimension NPARAMS -*> Specifies algorithm parameters. If an entry is .LT. 0.0, then +*> Specifies algorithm parameters. If an entry is < 0.0, then *> that entry will be filled with default value used for that *> parameter. Only positions up to NPARAMS are accessed; defaults *> are used for higher-numbered parameters. @@ -482,9 +482,9 @@ *> PARAMS(LA_LINRX_ITREF_I = 1) : Whether to perform iterative *> refinement or not. *> Default: 1.0 -*> = 0.0 : No refinement is performed, and no error bounds are +*> = 0.0: No refinement is performed, and no error bounds are *> computed. -*> = 1.0 : Use the double-precision refinement algorithm, +*> = 1.0: Use the double-precision refinement algorithm, *> possibly with doubled-single computations if the *> compilation environment does not support DOUBLE *> PRECISION. diff --git a/lapack-netlib/SRC/sgebak.f b/lapack-netlib/SRC/sgebak.f index ec58bf335..5c64c8b97 100644 --- a/lapack-netlib/SRC/sgebak.f +++ b/lapack-netlib/SRC/sgebak.f @@ -47,10 +47,10 @@ *> \verbatim *> JOB is CHARACTER*1 *> Specifies the type of backward transformation required: -*> = 'N', do nothing, return immediately; -*> = 'P', do backward transformation for permutation only; -*> = 'S', do backward transformation for scaling only; -*> = 'B', do backward transformations for both permutation and +*> = 'N': do nothing, return immediately; +*> = 'P': do backward transformation for permutation only; +*> = 'S': do backward transformation for scaling only; +*> = 'B': do backward transformations for both permutation and *> scaling. *> JOB must be the same as the argument JOB supplied to SGEBAL. *> \endverbatim diff --git a/lapack-netlib/SRC/sgeesx.f b/lapack-netlib/SRC/sgeesx.f index c90de9b81..5ffa3bc37 100644 --- a/lapack-netlib/SRC/sgeesx.f +++ b/lapack-netlib/SRC/sgeesx.f @@ -583,7 +583,9 @@ IF( N.GT.I+1 ) $ CALL SSWAP( N-I-1, A( I, I+2 ), LDA, $ A( I+1, I+2 ), LDA ) - CALL SSWAP( N, VS( 1, I ), 1, VS( 1, I+1 ), 1 ) + IF( WANTVS ) THEN + CALL SSWAP( N, VS( 1, I ), 1, VS( 1, I+1 ), 1 ) + END IF A( I, I+1 ) = A( I+1, I ) A( I+1, I ) = ZERO END IF diff --git a/lapack-netlib/SRC/sgejsv.f b/lapack-netlib/SRC/sgejsv.f index e4cbe8d0e..4ad316d99 100644 --- a/lapack-netlib/SRC/sgejsv.f +++ b/lapack-netlib/SRC/sgejsv.f @@ -82,7 +82,7 @@ *> desirable, then this option is advisable. The input matrix A *> is preprocessed with QR factorization with FULL (row and *> column) pivoting. -*> = 'G' Computation as with 'F' with an additional estimate of the +*> = 'G': Computation as with 'F' with an additional estimate of the *> condition number of B, where A=D*B. If A has heavily weighted *> rows, then using this condition number gives too pessimistic *> error bound. @@ -133,7 +133,7 @@ *> specified range. If A .NE. 0 is scaled so that the largest singular *> value of c*A is around SQRT(BIG), BIG=SLAMCH('O'), then JOBR issues *> the licence to kill columns of A whose norm in c*A is less than -*> SQRT(SFMIN) (for JOBR.EQ.'R'), or less than SMALL=SFMIN/EPSLN, +*> SQRT(SFMIN) (for JOBR = 'R'), or less than SMALL=SFMIN/EPSLN, *> where SFMIN=SLAMCH('S'), EPSLN=SLAMCH('E'). *> = 'N': Do not kill small columns of c*A. This option assumes that *> BLAS and QR factorizations and triangular solvers are @@ -230,7 +230,7 @@ *> If JOBU = 'F', then U contains on exit the M-by-M matrix of *> the left singular vectors, including an ONB *> of the orthogonal complement of the Range(A). -*> If JOBU = 'W' .AND. (JOBV.EQ.'V' .AND. JOBT.EQ.'T' .AND. M.EQ.N), +*> If JOBU = 'W' .AND. (JOBV = 'V' .AND. JOBT = 'T' .AND. M = N), *> then U is used as workspace if the procedure *> replaces A with A^t. In that case, [V] is computed *> in U as left singular vectors of A^t and then @@ -252,7 +252,7 @@ *> V is REAL array, dimension ( LDV, N ) *> If JOBV = 'V', 'J' then V contains on exit the N-by-N matrix of *> the right singular vectors; -*> If JOBV = 'W', AND (JOBU.EQ.'U' AND JOBT.EQ.'T' AND M.EQ.N), +*> If JOBV = 'W', AND (JOBU = 'U' AND JOBT = 'T' AND M = N), *> then V is used as workspace if the pprocedure *> replaces A with A^t. In that case, [U] is computed *> in V as right singular vectors of A^t and then @@ -278,7 +278,7 @@ *> of A. (See the description of SVA().) *> WORK(2) = See the description of WORK(1). *> WORK(3) = SCONDA is an estimate for the condition number of -*> column equilibrated A. (If JOBA .EQ. 'E' or 'G') +*> column equilibrated A. (If JOBA = 'E' or 'G') *> SCONDA is an estimate of SQRT(||(R^t * R)^(-1)||_1). *> It is computed using SPOCON. It holds *> N^(-1/4) * SCONDA <= ||R^(-1)||_2 <= N^(1/4) * SCONDA @@ -297,7 +297,7 @@ *> triangular factor in the first QR factorization. *> WORK(5) = an estimate of the scaled condition number of the *> triangular factor in the second QR factorization. -*> The following two parameters are computed if JOBT .EQ. 'T'. +*> The following two parameters are computed if JOBT = 'T'. *> They are provided for a developer/implementer who is familiar *> with the details of the method. *> @@ -313,8 +313,8 @@ *> Length of WORK to confirm proper allocation of work space. *> LWORK depends on the job: *> -*> If only SIGMA is needed ( JOBU.EQ.'N', JOBV.EQ.'N' ) and -*> -> .. no scaled condition estimate required (JOBE.EQ.'N'): +*> If only SIGMA is needed ( JOBU = 'N', JOBV = 'N' ) and +*> -> .. no scaled condition estimate required (JOBE = 'N'): *> LWORK >= max(2*M+N,4*N+1,7). This is the minimal requirement. *> ->> For optimal performance (blocked code) the optimal value *> is LWORK >= max(2*M+N,3*N+(N+1)*NB,7). Here NB is the optimal @@ -330,7 +330,7 @@ *> LWORK >= max(2*M+N,N+LWORK(DGEQP3),N+LWORK(DGEQRF), *> N+N*N+LWORK(DPOCON),7). *> -*> If SIGMA and the right singular vectors are needed (JOBV.EQ.'V'), +*> If SIGMA and the right singular vectors are needed (JOBV = 'V'), *> -> the minimal requirement is LWORK >= max(2*M+N,4*N+1,7). *> -> For optimal performance, LWORK >= max(2*M+N,3*N+(N+1)*NB,7), *> where NB is the optimal block size for DGEQP3, DGEQRF, DGELQ, @@ -341,19 +341,19 @@ *> If SIGMA and the left singular vectors are needed *> -> the minimal requirement is LWORK >= max(2*M+N,4*N+1,7). *> -> For optimal performance: -*> if JOBU.EQ.'U' :: LWORK >= max(2*M+N,3*N+(N+1)*NB,7), -*> if JOBU.EQ.'F' :: LWORK >= max(2*M+N,3*N+(N+1)*NB,N+M*NB,7), +*> if JOBU = 'U' :: LWORK >= max(2*M+N,3*N+(N+1)*NB,7), +*> if JOBU = 'F' :: LWORK >= max(2*M+N,3*N+(N+1)*NB,N+M*NB,7), *> where NB is the optimal block size for DGEQP3, DGEQRF, DORMQR. *> In general, the optimal length LWORK is computed as *> LWORK >= max(2*M+N,N+LWORK(DGEQP3),N+LWORK(DPOCON), *> 2*N+LWORK(DGEQRF), N+LWORK(DORMQR)). -*> Here LWORK(DORMQR) equals N*NB (for JOBU.EQ.'U') or -*> M*NB (for JOBU.EQ.'F'). +*> Here LWORK(DORMQR) equals N*NB (for JOBU = 'U') or +*> M*NB (for JOBU = 'F'). *> -*> If the full SVD is needed: (JOBU.EQ.'U' or JOBU.EQ.'F') and -*> -> if JOBV.EQ.'V' +*> If the full SVD is needed: (JOBU = 'U' or JOBU = 'F') and +*> -> if JOBV = 'V' *> the minimal requirement is LWORK >= max(2*M+N,6*N+2*N*N). -*> -> if JOBV.EQ.'J' the minimal requirement is +*> -> if JOBV = 'J' the minimal requirement is *> LWORK >= max(2*M+N, 4*N+N*N,2*N+N*N+6). *> -> For optimal performance, LWORK should be additionally *> larger than N+M*NB, where NB is the optimal block size @@ -369,7 +369,7 @@ *> of JOBA and JOBR. *> IWORK(2) = the number of the computed nonzero singular values *> IWORK(3) = if nonzero, a warning message: -*> If IWORK(3).EQ.1 then some of the column norms of A +*> If IWORK(3) = 1 then some of the column norms of A *> were denormalized floats. The requested high accuracy *> is not warranted by the data. *> \endverbatim @@ -377,10 +377,10 @@ *> \param[out] INFO *> \verbatim *> INFO is INTEGER -*> < 0 : if INFO = -i, then the i-th argument had an illegal value. -*> = 0 : successful exit; -*> > 0 : SGEJSV did not converge in the maximal allowed number -*> of sweeps. The computed values may be inaccurate. +*> < 0: if INFO = -i, then the i-th argument had an illegal value. +*> = 0: successful exit; +*> > 0: SGEJSV did not converge in the maximal allowed number +*> of sweeps. The computed values may be inaccurate. *> \endverbatim * * Authors: @@ -953,7 +953,7 @@ IF ( L2ABER ) THEN * Standard absolute error bound suffices. All sigma_i with * sigma_i < N*EPSLN*||A|| are flushed to zero. This is an -* agressive enforcement of lower numerical rank by introducing a +* aggressive enforcement of lower numerical rank by introducing a * backward error of the order of N*EPSLN*||A||. TEMP1 = SQRT(FLOAT(N))*EPSLN DO 3001 p = 2, N @@ -965,7 +965,7 @@ 3001 CONTINUE 3002 CONTINUE ELSE IF ( L2RANK ) THEN -* .. similarly as above, only slightly more gentle (less agressive). +* .. similarly as above, only slightly more gentle (less aggressive). * Sudden drop on the diagonal of R1 is used as the criterion for * close-to-rank-deficient. TEMP1 = SQRT(SFMIN) @@ -1294,7 +1294,7 @@ CALL SPOCON('Lower',NR,WORK(2*N+1),NR,ONE,TEMP1, $ WORK(2*N+NR*NR+1),IWORK(M+2*N+1),IERR) CONDR1 = ONE / SQRT(TEMP1) -* .. here need a second oppinion on the condition number +* .. here need a second opinion on the condition number * .. then assume worst case scenario * R1 is OK for inverse <=> CONDR1 .LT. FLOAT(N) * more conservative <=> CONDR1 .LT. SQRT(FLOAT(N)) @@ -1335,7 +1335,7 @@ ELSE * * .. ill-conditioned case: second QRF with pivoting -* Note that windowed pivoting would be equaly good +* Note that windowed pivoting would be equally good * numerically, and more run-time efficient. So, in * an optimal implementation, the next call to SGEQP3 * should be replaced with eg. CALL SGEQPX (ACM TOMS #782) @@ -1388,7 +1388,7 @@ * IF ( CONDR2 .GE. COND_OK ) THEN * .. save the Householder vectors used for Q3 -* (this overwrittes the copy of R2, as it will not be +* (this overwrites the copy of R2, as it will not be * needed in this branch, but it does not overwritte the * Huseholder vectors of Q2.). CALL SLACPY( 'U', NR, NR, V, LDV, WORK(2*N+1), N ) @@ -1638,7 +1638,7 @@ * * This branch deploys a preconditioned Jacobi SVD with explicitly * accumulated rotations. It is included as optional, mainly for -* experimental purposes. It does perfom well, and can also be used. +* experimental purposes. It does perform well, and can also be used. * In this implementation, this branch will be automatically activated * if the condition number sigma_max(A) / sigma_min(A) is predicted * to be greater than the overflow threshold. This is because the diff --git a/lapack-netlib/SRC/sgelq.f b/lapack-netlib/SRC/sgelq.f index 4fe4d191d..e45c68db4 100644 --- a/lapack-netlib/SRC/sgelq.f +++ b/lapack-netlib/SRC/sgelq.f @@ -1,3 +1,4 @@ +*> \brief \b SGELQ * * Definition: * =========== @@ -17,7 +18,17 @@ * ============= *> *> \verbatim -*> SGELQ computes a LQ factorization of an M-by-N matrix A. +*> +*> SGELQ computes an LQ factorization of a real M-by-N matrix A: +*> +*> A = ( L 0 ) * Q +*> +*> where: +*> +*> Q is a N-by-N orthogonal matrix; +*> L is a lower-triangular M-by-M matrix; +*> 0 is a M-by-(N-M) zero matrix, if M < N. +*> *> \endverbatim * * Arguments: @@ -138,7 +149,7 @@ *> \verbatim *> *> These details are particular for this LAPACK implementation. Users should not -*> take them for granted. These details may change in the future, and are unlikely not +*> take them for granted. These details may change in the future, and are not likely *> true for another LAPACK implementation. These details are relevant if one wants *> to try to understand the code. They are not part of the interface. *> @@ -159,10 +170,10 @@ SUBROUTINE SGELQ( M, N, A, LDA, T, TSIZE, WORK, LWORK, $ INFO ) * -* -- LAPACK computational routine (version 3.7.0) -- +* -- LAPACK computational routine (version 3.9.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd. -- -* December 2016 +* November 2019 * * .. Scalar Arguments .. INTEGER INFO, LDA, M, N, TSIZE, LWORK @@ -176,7 +187,7 @@ * .. * .. Local Scalars .. LOGICAL LQUERY, LMINWS, MINT, MINW - INTEGER MB, NB, MINTSZ, NBLCKS + INTEGER MB, NB, MINTSZ, NBLCKS, LWMIN, LWOPT, LWREQ * .. * .. External Functions .. LOGICAL LSAME @@ -232,20 +243,32 @@ * * Determine if the workspace size satisfies minimal size * + IF( ( N.LE.M ) .OR. ( NB.LE.M ) .OR. ( NB.GE.N ) ) THEN + LWMIN = MAX( 1, N ) + LWOPT = MAX( 1, MB*N ) + ELSE + LWMIN = MAX( 1, M ) + LWOPT = MAX( 1, MB*M ) + END IF LMINWS = .FALSE. - IF( ( TSIZE.LT.MAX( 1, MB*M*NBLCKS + 5 ) .OR. LWORK.LT.MB*M ) - $ .AND. ( LWORK.GE.M ) .AND. ( TSIZE.GE.MINTSZ ) + IF( ( TSIZE.LT.MAX( 1, MB*M*NBLCKS + 5 ) .OR. LWORK.LT.LWOPT ) + $ .AND. ( LWORK.GE.LWMIN ) .AND. ( TSIZE.GE.MINTSZ ) $ .AND. ( .NOT.LQUERY ) ) THEN IF( TSIZE.LT.MAX( 1, MB*M*NBLCKS + 5 ) ) THEN LMINWS = .TRUE. MB = 1 NB = N END IF - IF( LWORK.LT.MB*M ) THEN + IF( LWORK.LT.LWOPT ) THEN LMINWS = .TRUE. MB = 1 END IF END IF + IF( ( N.LE.M ) .OR. ( NB.LE.M ) .OR. ( NB.GE.N ) ) THEN + LWREQ = MAX( 1, MB*N ) + ELSE + LWREQ = MAX( 1, MB*M ) + END IF * IF( M.LT.0 ) THEN INFO = -1 @@ -256,7 +279,7 @@ ELSE IF( TSIZE.LT.MAX( 1, MB*M*NBLCKS + 5 ) $ .AND. ( .NOT.LQUERY ) .AND. ( .NOT.LMINWS ) ) THEN INFO = -6 - ELSE IF( ( LWORK.LT.MAX( 1, M*MB ) ) .AND .( .NOT.LQUERY ) + ELSE IF( ( LWORK.LT.LWREQ ) .AND .( .NOT.LQUERY ) $ .AND. ( .NOT.LMINWS ) ) THEN INFO = -8 END IF @@ -270,9 +293,9 @@ T( 2 ) = MB T( 3 ) = NB IF( MINW ) THEN - WORK( 1 ) = MAX( 1, N ) + WORK( 1 ) = LWMIN ELSE - WORK( 1 ) = MAX( 1, MB*M ) + WORK( 1 ) = LWREQ END IF END IF IF( INFO.NE.0 ) THEN @@ -297,7 +320,7 @@ $ LWORK, INFO ) END IF * - WORK( 1 ) = MAX( 1, MB*M ) + WORK( 1 ) = LWREQ RETURN * * End of SGELQ diff --git a/lapack-netlib/SRC/sgelq2.f b/lapack-netlib/SRC/sgelq2.f index 5b1ad215a..df8128d53 100644 --- a/lapack-netlib/SRC/sgelq2.f +++ b/lapack-netlib/SRC/sgelq2.f @@ -33,8 +33,16 @@ *> *> \verbatim *> -*> SGELQ2 computes an LQ factorization of a real m by n matrix A: -*> A = L * Q. +*> SGELQ2 computes an LQ factorization of a real m-by-n matrix A: +*> +*> A = ( L 0 ) * Q +*> +*> where: +*> +*> Q is a n-by-n orthogonal matrix; +*> L is an lower-triangular m-by-m matrix; +*> 0 is a m-by-(n-m) zero matrix, if m < n. +*> *> \endverbatim * * Arguments: @@ -96,7 +104,7 @@ *> \author Univ. of Colorado Denver *> \author NAG Ltd. * -*> \date December 2016 +*> \date November 2019 * *> \ingroup realGEcomputational * @@ -121,10 +129,10 @@ * ===================================================================== SUBROUTINE SGELQ2( M, N, A, LDA, TAU, WORK, INFO ) * -* -- LAPACK computational routine (version 3.7.0) -- +* -- LAPACK computational routine (version 3.9.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- -* December 2016 +* November 2019 * * .. Scalar Arguments .. INTEGER INFO, LDA, M, N diff --git a/lapack-netlib/SRC/sgelqf.f b/lapack-netlib/SRC/sgelqf.f index 99c03c0a3..90357e623 100644 --- a/lapack-netlib/SRC/sgelqf.f +++ b/lapack-netlib/SRC/sgelqf.f @@ -34,7 +34,15 @@ *> \verbatim *> *> SGELQF computes an LQ factorization of a real M-by-N matrix A: -*> A = L * Q. +*> +*> A = ( L 0 ) * Q +*> +*> where: +*> +*> Q is a N-by-N orthogonal matrix; +*> L is an lower-triangular M-by-M matrix; +*> 0 is a M-by-(N-M) zero matrix, if M < N. +*> *> \endverbatim * * Arguments: @@ -110,7 +118,7 @@ *> \author Univ. of Colorado Denver *> \author NAG Ltd. * -*> \date December 2016 +*> \date November 2019 * *> \ingroup realGEcomputational * @@ -135,10 +143,10 @@ * ===================================================================== SUBROUTINE SGELQF( M, N, A, LDA, TAU, WORK, LWORK, INFO ) * -* -- LAPACK computational routine (version 3.7.0) -- +* -- LAPACK computational routine (version 3.9.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- -* December 2016 +* November 2019 * * .. Scalar Arguments .. INTEGER INFO, LDA, LWORK, M, N diff --git a/lapack-netlib/SRC/sgelqt.f b/lapack-netlib/SRC/sgelqt.f index 9a93af332..64d46025c 100644 --- a/lapack-netlib/SRC/sgelqt.f +++ b/lapack-netlib/SRC/sgelqt.f @@ -1,3 +1,5 @@ +*> \brief \b SGELQT +* * Definition: * =========== * diff --git a/lapack-netlib/SRC/sgelqt3.f b/lapack-netlib/SRC/sgelqt3.f index 292ae88a3..edf5d6d30 100644 --- a/lapack-netlib/SRC/sgelqt3.f +++ b/lapack-netlib/SRC/sgelqt3.f @@ -1,3 +1,5 @@ +*> \brief \b SGELQT3 +* * Definition: * =========== * diff --git a/lapack-netlib/SRC/sgemlq.f b/lapack-netlib/SRC/sgemlq.f index dedbe7752..5f2e02a8e 100644 --- a/lapack-netlib/SRC/sgemlq.f +++ b/lapack-netlib/SRC/sgemlq.f @@ -1,3 +1,4 @@ +*> \brief \b SGEMLQ * * Definition: * =========== @@ -143,7 +144,7 @@ *> \verbatim *> *> These details are particular for this LAPACK implementation. Users should not -*> take them for granted. These details may change in the future, and are unlikely not +*> take them for granted. These details may change in the future, and are not likely *> true for another LAPACK implementation. These details are relevant if one wants *> to try to understand the code. They are not part of the interface. *> diff --git a/lapack-netlib/SRC/sgemlqt.f b/lapack-netlib/SRC/sgemlqt.f index a8f022bdc..37850fdf5 100644 --- a/lapack-netlib/SRC/sgemlqt.f +++ b/lapack-netlib/SRC/sgemlqt.f @@ -1,3 +1,5 @@ +*> \brief \b SGEMLQT +* * Definition: * =========== * diff --git a/lapack-netlib/SRC/sgemqr.f b/lapack-netlib/SRC/sgemqr.f index 307fc8ca9..66c5117c9 100644 --- a/lapack-netlib/SRC/sgemqr.f +++ b/lapack-netlib/SRC/sgemqr.f @@ -1,3 +1,4 @@ +*> \brief \b SGEMQR * * Definition: * =========== @@ -144,7 +145,7 @@ *> \verbatim *> *> These details are particular for this LAPACK implementation. Users should not -*> take them for granted. These details may change in the future, and are unlikely not +*> take them for granted. These details may change in the future, and are not likely *> true for another LAPACK implementation. These details are relevant if one wants *> to try to understand the code. They are not part of the interface. *> diff --git a/lapack-netlib/SRC/sgeqr.f b/lapack-netlib/SRC/sgeqr.f index f939abd9d..4a6bb9ea5 100644 --- a/lapack-netlib/SRC/sgeqr.f +++ b/lapack-netlib/SRC/sgeqr.f @@ -1,3 +1,4 @@ +*> \brief \b SGEQR * * Definition: * =========== @@ -17,7 +18,18 @@ * ============= *> *> \verbatim -*> SGEQR computes a QR factorization of an M-by-N matrix A. +*> +*> SGEQR computes a QR factorization of a real M-by-N matrix A: +*> +*> A = Q * ( R ), +*> ( 0 ) +*> +*> where: +*> +*> Q is a M-by-M orthogonal matrix; +*> R is an upper-triangular N-by-N matrix; +*> 0 is a (M-N)-by-N zero matrix, if M > N. +*> *> \endverbatim * * Arguments: @@ -138,7 +150,7 @@ *> \verbatim *> *> These details are particular for this LAPACK implementation. Users should not -*> take them for granted. These details may change in the future, and are unlikely not +*> take them for granted. These details may change in the future, and are not likely *> true for another LAPACK implementation. These details are relevant if one wants *> to try to understand the code. They are not part of the interface. *> @@ -160,10 +172,10 @@ SUBROUTINE SGEQR( M, N, A, LDA, T, TSIZE, WORK, LWORK, $ INFO ) * -* -- LAPACK computational routine (version 3.7.0) -- +* -- LAPACK computational routine (version 3.9.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd. -- -* December 2016 +* November 2019 * * .. Scalar Arguments .. INTEGER INFO, LDA, M, N, TSIZE, LWORK diff --git a/lapack-netlib/SRC/sgeqr2.f b/lapack-netlib/SRC/sgeqr2.f index 3b990f825..0a1ff304f 100644 --- a/lapack-netlib/SRC/sgeqr2.f +++ b/lapack-netlib/SRC/sgeqr2.f @@ -33,8 +33,17 @@ *> *> \verbatim *> -*> SGEQR2 computes a QR factorization of a real m by n matrix A: -*> A = Q * R. +*> SGEQR2 computes a QR factorization of a real m-by-n matrix A: +*> +*> A = Q * ( R ), +*> ( 0 ) +*> +*> where: +*> +*> Q is a m-by-m orthogonal matrix; +*> R is an upper-triangular n-by-n matrix; +*> 0 is a (m-n)-by-n zero matrix, if m > n. +*> *> \endverbatim * * Arguments: @@ -96,7 +105,7 @@ *> \author Univ. of Colorado Denver *> \author NAG Ltd. * -*> \date December 2016 +*> \date November 2019 * *> \ingroup realGEcomputational * @@ -121,10 +130,10 @@ * ===================================================================== SUBROUTINE SGEQR2( M, N, A, LDA, TAU, WORK, INFO ) * -* -- LAPACK computational routine (version 3.7.0) -- +* -- LAPACK computational routine (version 3.9.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- -* December 2016 +* November 2019 * * .. Scalar Arguments .. INTEGER INFO, LDA, M, N diff --git a/lapack-netlib/SRC/sgeqr2p.f b/lapack-netlib/SRC/sgeqr2p.f index f48af9d2d..08d124797 100644 --- a/lapack-netlib/SRC/sgeqr2p.f +++ b/lapack-netlib/SRC/sgeqr2p.f @@ -33,8 +33,18 @@ *> *> \verbatim *> -*> SGEQR2P computes a QR factorization of a real m by n matrix A: -*> A = Q * R. The diagonal entries of R are nonnegative. +*> SGEQR2P computes a QR factorization of a real m-by-n matrix A: +*> +*> A = Q * ( R ), +*> ( 0 ) +*> +*> where: +*> +*> Q is a m-by-m orthogonal matrix; +*> R is an upper-triangular n-by-n matrix with nonnegative diagonal +*> entries; +*> 0 is a (m-n)-by-n zero matrix, if m > n. +*> *> \endverbatim * * Arguments: @@ -97,7 +107,7 @@ *> \author Univ. of Colorado Denver *> \author NAG Ltd. * -*> \date December 2016 +*> \date November 2019 * *> \ingroup realGEcomputational * @@ -124,10 +134,10 @@ * ===================================================================== SUBROUTINE SGEQR2P( M, N, A, LDA, TAU, WORK, INFO ) * -* -- LAPACK computational routine (version 3.7.0) -- +* -- LAPACK computational routine (version 3.9.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- -* December 2016 +* November 2019 * * .. Scalar Arguments .. INTEGER INFO, LDA, M, N diff --git a/lapack-netlib/SRC/sgeqrf.f b/lapack-netlib/SRC/sgeqrf.f index 0f79c2ca5..7df495e04 100644 --- a/lapack-netlib/SRC/sgeqrf.f +++ b/lapack-netlib/SRC/sgeqrf.f @@ -34,7 +34,16 @@ *> \verbatim *> *> SGEQRF computes a QR factorization of a real M-by-N matrix A: -*> A = Q * R. +*> +*> A = Q * ( R ), +*> ( 0 ) +*> +*> where: +*> +*> Q is a M-by-M orthogonal matrix; +*> R is an upper-triangular N-by-N matrix; +*> 0 is a (M-N)-by-N zero matrix, if M > N. +*> *> \endverbatim * * Arguments: @@ -111,7 +120,7 @@ *> \author Univ. of Colorado Denver *> \author NAG Ltd. * -*> \date December 2016 +*> \date November 2019 * *> \ingroup realGEcomputational * @@ -136,10 +145,10 @@ * ===================================================================== SUBROUTINE SGEQRF( M, N, A, LDA, TAU, WORK, LWORK, INFO ) * -* -- LAPACK computational routine (version 3.7.0) -- +* -- LAPACK computational routine (version 3.9.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- -* December 2016 +* November 2019 * * .. Scalar Arguments .. INTEGER INFO, LDA, LWORK, M, N diff --git a/lapack-netlib/SRC/sgeqrfp.f b/lapack-netlib/SRC/sgeqrfp.f index 654c0a13a..7f6741570 100644 --- a/lapack-netlib/SRC/sgeqrfp.f +++ b/lapack-netlib/SRC/sgeqrfp.f @@ -33,8 +33,18 @@ *> *> \verbatim *> -*> SGEQRFP computes a QR factorization of a real M-by-N matrix A: -*> A = Q * R. The diagonal entries of R are nonnegative. +*> SGEQR2P computes a QR factorization of a real M-by-N matrix A: +*> +*> A = Q * ( R ), +*> ( 0 ) +*> +*> where: +*> +*> Q is a M-by-M orthogonal matrix; +*> R is an upper-triangular N-by-N matrix with nonnegative diagonal +*> entries; +*> 0 is a (M-N)-by-N zero matrix, if M > N. +*> *> \endverbatim * * Arguments: @@ -112,7 +122,7 @@ *> \author Univ. of Colorado Denver *> \author NAG Ltd. * -*> \date December 2016 +*> \date November 2019 * *> \ingroup realGEcomputational * @@ -139,10 +149,10 @@ * ===================================================================== SUBROUTINE SGEQRFP( M, N, A, LDA, TAU, WORK, LWORK, INFO ) * -* -- LAPACK computational routine (version 3.7.0) -- +* -- LAPACK computational routine (version 3.9.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- -* December 2016 +* November 2019 * * .. Scalar Arguments .. INTEGER INFO, LDA, LWORK, M, N diff --git a/lapack-netlib/SRC/sgerfsx.f b/lapack-netlib/SRC/sgerfsx.f index 3f518899e..b1a1eb13d 100644 --- a/lapack-netlib/SRC/sgerfsx.f +++ b/lapack-netlib/SRC/sgerfsx.f @@ -283,7 +283,7 @@ *> information as described below. There currently are up to three *> pieces of information returned for each right-hand side. If *> componentwise accuracy is not requested (PARAMS(3) = 0.0), then -*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS .LT. 3, then at most +*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS < 3, then at most *> the first (:,N_ERR_BNDS) entries are returned. *> *> The first index in ERR_BNDS_COMP(i,:) corresponds to the ith @@ -319,14 +319,14 @@ *> \param[in] NPARAMS *> \verbatim *> NPARAMS is INTEGER -*> Specifies the number of parameters set in PARAMS. If .LE. 0, the +*> Specifies the number of parameters set in PARAMS. If <= 0, the *> PARAMS array is never referenced and default values are used. *> \endverbatim *> *> \param[in,out] PARAMS *> \verbatim *> PARAMS is REAL array, dimension NPARAMS -*> Specifies algorithm parameters. If an entry is .LT. 0.0, then +*> Specifies algorithm parameters. If an entry is < 0.0, then *> that entry will be filled with default value used for that *> parameter. Only positions up to NPARAMS are accessed; defaults *> are used for higher-numbered parameters. @@ -334,9 +334,9 @@ *> PARAMS(LA_LINRX_ITREF_I = 1) : Whether to perform iterative *> refinement or not. *> Default: 1.0 -*> = 0.0 : No refinement is performed, and no error bounds are +*> = 0.0: No refinement is performed, and no error bounds are *> computed. -*> = 1.0 : Use the double-precision refinement algorithm, +*> = 1.0: Use the double-precision refinement algorithm, *> possibly with doubled-single computations if the *> compilation environment does not support DOUBLE *> PRECISION. diff --git a/lapack-netlib/SRC/sgesc2.f b/lapack-netlib/SRC/sgesc2.f index c78daa334..3a6f34584 100644 --- a/lapack-netlib/SRC/sgesc2.f +++ b/lapack-netlib/SRC/sgesc2.f @@ -90,7 +90,7 @@ *> \verbatim *> SCALE is REAL *> On exit, SCALE contains the scale factor. SCALE is chosen -*> 0 <= SCALE <= 1 to prevent owerflow in the solution. +*> 0 <= SCALE <= 1 to prevent overflow in the solution. *> \endverbatim * * Authors: @@ -151,7 +151,7 @@ * .. * .. Executable Statements .. * -* Set constant to control owerflow +* Set constant to control overflow * EPS = SLAMCH( 'P' ) SMLNUM = SLAMCH( 'S' ) / EPS diff --git a/lapack-netlib/SRC/sgesdd.f b/lapack-netlib/SRC/sgesdd.f index 0ba2a78c7..689494dd1 100644 --- a/lapack-netlib/SRC/sgesdd.f +++ b/lapack-netlib/SRC/sgesdd.f @@ -322,7 +322,7 @@ * IF( WNTQN ) THEN * sbdsdc needs only 4*N (or 6*N for uplo=L for LAPACK <= 3.6) -* keep 7*N for backwards compatability. +* keep 7*N for backwards compatibility. BDSPAC = 7*N ELSE BDSPAC = 3*N*N + 4*N @@ -448,7 +448,7 @@ * IF( WNTQN ) THEN * sbdsdc needs only 4*N (or 6*N for uplo=L for LAPACK <= 3.6) -* keep 7*N for backwards compatability. +* keep 7*N for backwards compatibility. BDSPAC = 7*M ELSE BDSPAC = 3*M*M + 4*M diff --git a/lapack-netlib/SRC/sgesvdq.f b/lapack-netlib/SRC/sgesvdq.f new file mode 100644 index 000000000..73e34862f --- /dev/null +++ b/lapack-netlib/SRC/sgesvdq.f @@ -0,0 +1,1388 @@ +*> \brief SGESVDQ computes the singular value decomposition (SVD) with a QR-Preconditioned QR SVD Method for GE matrices +* +* =========== DOCUMENTATION =========== +* +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ +* +*> \htmlonly +*> Download SGESVDQ + dependencies +*> +*> [TGZ] +*> +*> [ZIP] +*> +*> [TXT] +*> \endhtmlonly +* +* Definition: +* =========== +* +* SUBROUTINE SGESVDQ( JOBA, JOBP, JOBR, JOBU, JOBV, M, N, A, LDA, +* S, U, LDU, V, LDV, NUMRANK, IWORK, LIWORK, +* WORK, LWORK, RWORK, LRWORK, INFO ) +* +* .. Scalar Arguments .. +* IMPLICIT NONE +* CHARACTER JOBA, JOBP, JOBR, JOBU, JOBV +* INTEGER M, N, LDA, LDU, LDV, NUMRANK, LIWORK, LWORK, LRWORK, +* INFO +* .. +* .. Array Arguments .. +* REAL A( LDA, * ), U( LDU, * ), V( LDV, * ), WORK( * ) +* REAL S( * ), RWORK( * ) +* INTEGER IWORK( * ) +* .. +* +* +*> \par Purpose: +* ============= +*> +*> \verbatim +*> +*> SGESVDQ computes the singular value decomposition (SVD) of a real +*> M-by-N matrix A, where M >= N. The SVD of A is written as +*> [++] [xx] [x0] [xx] +*> A = U * SIGMA * V^*, [++] = [xx] * [ox] * [xx] +*> [++] [xx] +*> where SIGMA is an N-by-N diagonal matrix, U is an M-by-N orthonormal +*> matrix, and V is an N-by-N orthogonal matrix. The diagonal elements +*> of SIGMA are the singular values of A. The columns of U and V are the +*> left and the right singular vectors of A, respectively. +*> \endverbatim +* +* Arguments: +* ========== +* +*> \param[in] JOBA +*> \verbatim +*> JOBA is CHARACTER*1 +*> Specifies the level of accuracy in the computed SVD +*> = 'A' The requested accuracy corresponds to having the backward +*> error bounded by || delta A ||_F <= f(m,n) * EPS * || A ||_F, +*> where EPS = SLAMCH('Epsilon'). This authorises CGESVDQ to +*> truncate the computed triangular factor in a rank revealing +*> QR factorization whenever the truncated part is below the +*> threshold of the order of EPS * ||A||_F. This is aggressive +*> truncation level. +*> = 'M' Similarly as with 'A', but the truncation is more gentle: it +*> is allowed only when there is a drop on the diagonal of the +*> triangular factor in the QR factorization. This is medium +*> truncation level. +*> = 'H' High accuracy requested. No numerical rank determination based +*> on the rank revealing QR factorization is attempted. +*> = 'E' Same as 'H', and in addition the condition number of column +*> scaled A is estimated and returned in RWORK(1). +*> N^(-1/4)*RWORK(1) <= ||pinv(A_scaled)||_2 <= N^(1/4)*RWORK(1) +*> \endverbatim +*> +*> \param[in] JOBP +*> \verbatim +*> JOBP is CHARACTER*1 +*> = 'P' The rows of A are ordered in decreasing order with respect to +*> ||A(i,:)||_\infty. This enhances numerical accuracy at the cost +*> of extra data movement. Recommended for numerical robustness. +*> = 'N' No row pivoting. +*> \endverbatim +*> +*> \param[in] JOBR +*> \verbatim +*> JOBR is CHARACTER*1 +*> = 'T' After the initial pivoted QR factorization, SGESVD is applied to +*> the transposed R**T of the computed triangular factor R. This involves +*> some extra data movement (matrix transpositions). Useful for +*> experiments, research and development. +*> = 'N' The triangular factor R is given as input to SGESVD. This may be +*> preferred as it involves less data movement. +*> \endverbatim +*> +*> \param[in] JOBU +*> \verbatim +*> JOBU is CHARACTER*1 +*> = 'A' All M left singular vectors are computed and returned in the +*> matrix U. See the description of U. +*> = 'S' or 'U' N = min(M,N) left singular vectors are computed and returned +*> in the matrix U. See the description of U. +*> = 'R' Numerical rank NUMRANK is determined and only NUMRANK left singular +*> vectors are computed and returned in the matrix U. +*> = 'F' The N left singular vectors are returned in factored form as the +*> product of the Q factor from the initial QR factorization and the +*> N left singular vectors of (R**T , 0)**T. If row pivoting is used, +*> then the necessary information on the row pivoting is stored in +*> IWORK(N+1:N+M-1). +*> = 'N' The left singular vectors are not computed. +*> \endverbatim +*> +*> \param[in] JOBV +*> \verbatim +*> JOBV is CHARACTER*1 +*> = 'A', 'V' All N right singular vectors are computed and returned in +*> the matrix V. +*> = 'R' Numerical rank NUMRANK is determined and only NUMRANK right singular +*> vectors are computed and returned in the matrix V. This option is +*> allowed only if JOBU = 'R' or JOBU = 'N'; otherwise it is illegal. +*> = 'N' The right singular vectors are not computed. +*> \endverbatim +*> +*> \param[in] M +*> \verbatim +*> M is INTEGER +*> The number of rows of the input matrix A. M >= 0. +*> \endverbatim +*> +*> \param[in] N +*> \verbatim +*> N is INTEGER +*> The number of columns of the input matrix A. M >= N >= 0. +*> \endverbatim +*> +*> \param[in,out] A +*> \verbatim +*> A is REAL array of dimensions LDA x N +*> On entry, the input matrix A. +*> On exit, if JOBU .NE. 'N' or JOBV .NE. 'N', the lower triangle of A contains +*> the Householder vectors as stored by SGEQP3. If JOBU = 'F', these Householder +*> vectors together with WORK(1:N) can be used to restore the Q factors from +*> the initial pivoted QR factorization of A. See the description of U. +*> \endverbatim +*> +*> \param[in] LDA +*> \verbatim +*> LDA is INTEGER. +*> The leading dimension of the array A. LDA >= max(1,M). +*> \endverbatim +*> +*> \param[out] S +*> \verbatim +*> S is REAL array of dimension N. +*> The singular values of A, ordered so that S(i) >= S(i+1). +*> \endverbatim +*> +*> \param[out] U +*> \verbatim +*> U is REAL array, dimension +*> LDU x M if JOBU = 'A'; see the description of LDU. In this case, +*> on exit, U contains the M left singular vectors. +*> LDU x N if JOBU = 'S', 'U', 'R' ; see the description of LDU. In this +*> case, U contains the leading N or the leading NUMRANK left singular vectors. +*> LDU x N if JOBU = 'F' ; see the description of LDU. In this case U +*> contains N x N orthogonal matrix that can be used to form the left +*> singular vectors. +*> If JOBU = 'N', U is not referenced. +*> \endverbatim +*> +*> \param[in] LDU +*> \verbatim +*> LDU is INTEGER. +*> The leading dimension of the array U. +*> If JOBU = 'A', 'S', 'U', 'R', LDU >= max(1,M). +*> If JOBU = 'F', LDU >= max(1,N). +*> Otherwise, LDU >= 1. +*> \endverbatim +*> +*> \param[out] V +*> \verbatim +*> V is REAL array, dimension +*> LDV x N if JOBV = 'A', 'V', 'R' or if JOBA = 'E' . +*> If JOBV = 'A', or 'V', V contains the N-by-N orthogonal matrix V**T; +*> If JOBV = 'R', V contains the first NUMRANK rows of V**T (the right +*> singular vectors, stored rowwise, of the NUMRANK largest singular values). +*> If JOBV = 'N' and JOBA = 'E', V is used as a workspace. +*> If JOBV = 'N', and JOBA.NE.'E', V is not referenced. +*> \endverbatim +*> +*> \param[in] LDV +*> \verbatim +*> LDV is INTEGER +*> The leading dimension of the array V. +*> If JOBV = 'A', 'V', 'R', or JOBA = 'E', LDV >= max(1,N). +*> Otherwise, LDV >= 1. +*> \endverbatim +*> +*> \param[out] NUMRANK +*> \verbatim +*> NUMRANK is INTEGER +*> NUMRANK is the numerical rank first determined after the rank +*> revealing QR factorization, following the strategy specified by the +*> value of JOBA. If JOBV = 'R' and JOBU = 'R', only NUMRANK +*> leading singular values and vectors are then requested in the call +*> of SGESVD. The final value of NUMRANK might be further reduced if +*> some singular values are computed as zeros. +*> \endverbatim +*> +*> \param[out] IWORK +*> \verbatim +*> IWORK is INTEGER array, dimension (max(1, LIWORK)). +*> On exit, IWORK(1:N) contains column pivoting permutation of the +*> rank revealing QR factorization. +*> If JOBP = 'P', IWORK(N+1:N+M-1) contains the indices of the sequence +*> of row swaps used in row pivoting. These can be used to restore the +*> left singular vectors in the case JOBU = 'F'. +*> +*> If LIWORK, LWORK, or LRWORK = -1, then on exit, if INFO = 0, +*> LIWORK(1) returns the minimal LIWORK. +*> \endverbatim +*> +*> \param[in] LIWORK +*> \verbatim +*> LIWORK is INTEGER +*> The dimension of the array IWORK. +*> LIWORK >= N + M - 1, if JOBP = 'P' and JOBA .NE. 'E'; +*> LIWORK >= N if JOBP = 'N' and JOBA .NE. 'E'; +*> LIWORK >= N + M - 1 + N, if JOBP = 'P' and JOBA = 'E'; +*> LIWORK >= N + N if JOBP = 'N' and JOBA = 'E'. +* +*> If LIWORK = -1, then a workspace query is assumed; the routine +*> only calculates and returns the optimal and minimal sizes +*> for the WORK, IWORK, and RWORK arrays, and no error +*> message related to LWORK is issued by XERBLA. +*> \endverbatim +*> +*> \param[out] WORK +*> \verbatim +*> WORK is REAL array, dimension (max(2, LWORK)), used as a workspace. +*> On exit, if, on entry, LWORK.NE.-1, WORK(1:N) contains parameters +*> needed to recover the Q factor from the QR factorization computed by +*> SGEQP3. +*> +*> If LIWORK, LWORK, or LRWORK = -1, then on exit, if INFO = 0, +*> WORK(1) returns the optimal LWORK, and +*> WORK(2) returns the minimal LWORK. +*> \endverbatim +*> +*> \param[in,out] LWORK +*> \verbatim +*> LWORK is INTEGER +*> The dimension of the array WORK. It is determined as follows: +*> Let LWQP3 = 3*N+1, LWCON = 3*N, and let +*> LWORQ = { MAX( N, 1 ), if JOBU = 'R', 'S', or 'U' +*> { MAX( M, 1 ), if JOBU = 'A' +*> LWSVD = MAX( 5*N, 1 ) +*> LWLQF = MAX( N/2, 1 ), LWSVD2 = MAX( 5*(N/2), 1 ), LWORLQ = MAX( N, 1 ), +*> LWQRF = MAX( N/2, 1 ), LWORQ2 = MAX( N, 1 ) +*> Then the minimal value of LWORK is: +*> = MAX( N + LWQP3, LWSVD ) if only the singular values are needed; +*> = MAX( N + LWQP3, LWCON, LWSVD ) if only the singular values are needed, +*> and a scaled condition estimate requested; +*> +*> = N + MAX( LWQP3, LWSVD, LWORQ ) if the singular values and the left +*> singular vectors are requested; +*> = N + MAX( LWQP3, LWCON, LWSVD, LWORQ ) if the singular values and the left +*> singular vectors are requested, and also +*> a scaled condition estimate requested; +*> +*> = N + MAX( LWQP3, LWSVD ) if the singular values and the right +*> singular vectors are requested; +*> = N + MAX( LWQP3, LWCON, LWSVD ) if the singular values and the right +*> singular vectors are requested, and also +*> a scaled condition etimate requested; +*> +*> = N + MAX( LWQP3, LWSVD, LWORQ ) if the full SVD is requested with JOBV = 'R'; +*> independent of JOBR; +*> = N + MAX( LWQP3, LWCON, LWSVD, LWORQ ) if the full SVD is requested, +*> JOBV = 'R' and, also a scaled condition +*> estimate requested; independent of JOBR; +*> = MAX( N + MAX( LWQP3, LWSVD, LWORQ ), +*> N + MAX( LWQP3, N/2+LWLQF, N/2+LWSVD2, N/2+LWORLQ, LWORQ) ) if the +*> full SVD is requested with JOBV = 'A' or 'V', and +*> JOBR ='N' +*> = MAX( N + MAX( LWQP3, LWCON, LWSVD, LWORQ ), +*> N + MAX( LWQP3, LWCON, N/2+LWLQF, N/2+LWSVD2, N/2+LWORLQ, LWORQ ) ) +*> if the full SVD is requested with JOBV = 'A' or 'V', and +*> JOBR ='N', and also a scaled condition number estimate +*> requested. +*> = MAX( N + MAX( LWQP3, LWSVD, LWORQ ), +*> N + MAX( LWQP3, N/2+LWQRF, N/2+LWSVD2, N/2+LWORQ2, LWORQ ) ) if the +*> full SVD is requested with JOBV = 'A', 'V', and JOBR ='T' +*> = MAX( N + MAX( LWQP3, LWCON, LWSVD, LWORQ ), +*> N + MAX( LWQP3, LWCON, N/2+LWQRF, N/2+LWSVD2, N/2+LWORQ2, LWORQ ) ) +*> if the full SVD is requested with JOBV = 'A' or 'V', and +*> JOBR ='T', and also a scaled condition number estimate +*> requested. +*> Finally, LWORK must be at least two: LWORK = MAX( 2, LWORK ). +*> +*> If LWORK = -1, then a workspace query is assumed; the routine +*> only calculates and returns the optimal and minimal sizes +*> for the WORK, IWORK, and RWORK arrays, and no error +*> message related to LWORK is issued by XERBLA. +*> \endverbatim +*> +*> \param[out] RWORK +*> \verbatim +*> RWORK is REAL array, dimension (max(1, LRWORK)). +*> On exit, +*> 1. If JOBA = 'E', RWORK(1) contains an estimate of the condition +*> number of column scaled A. If A = C * D where D is diagonal and C +*> has unit columns in the Euclidean norm, then, assuming full column rank, +*> N^(-1/4) * RWORK(1) <= ||pinv(C)||_2 <= N^(1/4) * RWORK(1). +*> Otherwise, RWORK(1) = -1. +*> 2. RWORK(2) contains the number of singular values computed as +*> exact zeros in SGESVD applied to the upper triangular or trapeziodal +*> R (from the initial QR factorization). In case of early exit (no call to +*> SGESVD, such as in the case of zero matrix) RWORK(2) = -1. +*> +*> If LIWORK, LWORK, or LRWORK = -1, then on exit, if INFO = 0, +*> RWORK(1) returns the minimal LRWORK. +*> \endverbatim +*> +*> \param[in] LRWORK +*> \verbatim +*> LRWORK is INTEGER. +*> The dimension of the array RWORK. +*> If JOBP ='P', then LRWORK >= MAX(2, M). +*> Otherwise, LRWORK >= 2 +* +*> If LRWORK = -1, then a workspace query is assumed; the routine +*> only calculates and returns the optimal and minimal sizes +*> for the WORK, IWORK, and RWORK arrays, and no error +*> message related to LWORK is issued by XERBLA. +*> \endverbatim +*> +*> \param[out] INFO +*> \verbatim +*> INFO is INTEGER +*> = 0: successful exit. +*> < 0: if INFO = -i, the i-th argument had an illegal value. +*> > 0: if SBDSQR did not converge, INFO specifies how many superdiagonals +*> of an intermediate bidiagonal form B (computed in SGESVD) did not +*> converge to zero. +*> \endverbatim +* +*> \par Further Details: +* ======================== +*> +*> \verbatim +*> +*> 1. The data movement (matrix transpose) is coded using simple nested +*> DO-loops because BLAS and LAPACK do not provide corresponding subroutines. +*> Those DO-loops are easily identified in this source code - by the CONTINUE +*> statements labeled with 11**. In an optimized version of this code, the +*> nested DO loops should be replaced with calls to an optimized subroutine. +*> 2. This code scales A by 1/SQRT(M) if the largest ABS(A(i,j)) could cause +*> column norm overflow. This is the minial precaution and it is left to the +*> SVD routine (CGESVD) to do its own preemptive scaling if potential over- +*> or underflows are detected. To avoid repeated scanning of the array A, +*> an optimal implementation would do all necessary scaling before calling +*> CGESVD and the scaling in CGESVD can be switched off. +*> 3. Other comments related to code optimization are given in comments in the +*> code, enlosed in [[double brackets]]. +*> \endverbatim +* +*> \par Bugs, examples and comments +* =========================== +* +*> \verbatim +*> Please report all bugs and send interesting examples and/or comments to +*> drmac@math.hr. Thank you. +*> \endverbatim +* +*> \par References +* =============== +* +*> \verbatim +*> [1] Zlatko Drmac, Algorithm 977: A QR-Preconditioned QR SVD Method for +*> Computing the SVD with High Accuracy. ACM Trans. Math. Softw. +*> 44(1): 11:1-11:30 (2017) +*> +*> SIGMA library, xGESVDQ section updated February 2016. +*> Developed and coded by Zlatko Drmac, Department of Mathematics +*> University of Zagreb, Croatia, drmac@math.hr +*> \endverbatim +* +* +*> \par Contributors: +* ================== +*> +*> \verbatim +*> Developed and coded by Zlatko Drmac, Department of Mathematics +*> University of Zagreb, Croatia, drmac@math.hr +*> \endverbatim +* +* Authors: +* ======== +* +*> \author Univ. of Tennessee +*> \author Univ. of California Berkeley +*> \author Univ. of Colorado Denver +*> \author NAG Ltd. +* +*> \date November 2018 +* +*> \ingroup realGEsing +* +* ===================================================================== + SUBROUTINE SGESVDQ( JOBA, JOBP, JOBR, JOBU, JOBV, M, N, A, LDA, + $ S, U, LDU, V, LDV, NUMRANK, IWORK, LIWORK, + $ WORK, LWORK, RWORK, LRWORK, INFO ) +* .. Scalar Arguments .. + IMPLICIT NONE + CHARACTER JOBA, JOBP, JOBR, JOBU, JOBV + INTEGER M, N, LDA, LDU, LDV, NUMRANK, LIWORK, LWORK, LRWORK, + $ INFO +* .. +* .. Array Arguments .. + REAL A( LDA, * ), U( LDU, * ), V( LDV, * ), WORK( * ) + REAL S( * ), RWORK( * ) + INTEGER IWORK( * ) +* +* ===================================================================== +* +* .. Parameters .. + REAL ZERO, ONE + PARAMETER ( ZERO = 0.0E0, ONE = 1.0E0 ) +* .. +* .. Local Scalars .. + INTEGER IERR, IWOFF, NR, N1, OPTRATIO, p, q + INTEGER LWCON, LWQP3, LWRK_SGELQF, LWRK_SGESVD, LWRK_SGESVD2, + $ LWRK_SGEQP3, LWRK_SGEQRF, LWRK_SORMLQ, LWRK_SORMQR, + $ LWRK_SORMQR2, LWLQF, LWQRF, LWSVD, LWSVD2, LWORQ, + $ LWORQ2, LWUNLQ, MINWRK, MINWRK2, OPTWRK, OPTWRK2, + $ IMINWRK, RMINWRK + LOGICAL ACCLA, ACCLM, ACCLH, ASCALED, CONDA, DNTWU, DNTWV, + $ LQUERY, LSVC0, LSVEC, ROWPRM, RSVEC, RTRANS, WNTUA, + $ WNTUF, WNTUR, WNTUS, WNTVA, WNTVR + REAL BIG, EPSLN, RTMP, SCONDA, SFMIN +* .. +* .. Local Arrays + REAL RDUMMY(1) +* .. +* .. External Subroutines (BLAS, LAPACK) + EXTERNAL SGELQF, SGEQP3, SGEQRF, SGESVD, SLACPY, SLAPMT, + $ SLASCL, SLASET, SLASWP, SSCAL, SPOCON, SORMLQ, + $ SORMQR, XERBLA +* .. +* .. External Functions (BLAS, LAPACK) + LOGICAL LSAME + INTEGER ISAMAX + REAL SLANGE, SNRM2, SLAMCH + EXTERNAL SLANGE, LSAME, ISAMAX, SNRM2, SLAMCH +* .. +* .. Intrinsic Functions .. + INTRINSIC ABS, MAX, MIN, REAL, SQRT +* .. +* .. Executable Statements .. +* +* Test the input arguments +* + WNTUS = LSAME( JOBU, 'S' ) .OR. LSAME( JOBU, 'U' ) + WNTUR = LSAME( JOBU, 'R' ) + WNTUA = LSAME( JOBU, 'A' ) + WNTUF = LSAME( JOBU, 'F' ) + LSVC0 = WNTUS .OR. WNTUR .OR. WNTUA + LSVEC = LSVC0 .OR. WNTUF + DNTWU = LSAME( JOBU, 'N' ) +* + WNTVR = LSAME( JOBV, 'R' ) + WNTVA = LSAME( JOBV, 'A' ) .OR. LSAME( JOBV, 'V' ) + RSVEC = WNTVR .OR. WNTVA + DNTWV = LSAME( JOBV, 'N' ) +* + ACCLA = LSAME( JOBA, 'A' ) + ACCLM = LSAME( JOBA, 'M' ) + CONDA = LSAME( JOBA, 'E' ) + ACCLH = LSAME( JOBA, 'H' ) .OR. CONDA +* + ROWPRM = LSAME( JOBP, 'P' ) + RTRANS = LSAME( JOBR, 'T' ) +* + IF ( ROWPRM ) THEN + IF ( CONDA ) THEN + IMINWRK = MAX( 1, N + M - 1 + N ) + ELSE + IMINWRK = MAX( 1, N + M - 1 ) + END IF + RMINWRK = MAX( 2, M ) + ELSE + IF ( CONDA ) THEN + IMINWRK = MAX( 1, N + N ) + ELSE + IMINWRK = MAX( 1, N ) + END IF + RMINWRK = 2 + END IF + LQUERY = (LIWORK .EQ. -1 .OR. LWORK .EQ. -1 .OR. LRWORK .EQ. -1) + INFO = 0 + IF ( .NOT. ( ACCLA .OR. ACCLM .OR. ACCLH ) ) THEN + INFO = -1 + ELSE IF ( .NOT.( ROWPRM .OR. LSAME( JOBP, 'N' ) ) ) THEN + INFO = -2 + ELSE IF ( .NOT.( RTRANS .OR. LSAME( JOBR, 'N' ) ) ) THEN + INFO = -3 + ELSE IF ( .NOT.( LSVEC .OR. DNTWU ) ) THEN + INFO = -4 + ELSE IF ( WNTUR .AND. WNTVA ) THEN + INFO = -5 + ELSE IF ( .NOT.( RSVEC .OR. DNTWV )) THEN + INFO = -5 + ELSE IF ( M.LT.0 ) THEN + INFO = -6 + ELSE IF ( ( N.LT.0 ) .OR. ( N.GT.M ) ) THEN + INFO = -7 + ELSE IF ( LDA.LT.MAX( 1, M ) ) THEN + INFO = -9 + ELSE IF ( LDU.LT.1 .OR. ( LSVC0 .AND. LDU.LT.M ) .OR. + $ ( WNTUF .AND. LDU.LT.N ) ) THEN + INFO = -12 + ELSE IF ( LDV.LT.1 .OR. ( RSVEC .AND. LDV.LT.N ) .OR. + $ ( CONDA .AND. LDV.LT.N ) ) THEN + INFO = -14 + ELSE IF ( LIWORK .LT. IMINWRK .AND. .NOT. LQUERY ) THEN + INFO = -17 + END IF +* +* + IF ( INFO .EQ. 0 ) THEN +* .. compute the minimal and the optimal workspace lengths +* [[The expressions for computing the minimal and the optimal +* values of LWORK are written with a lot of redundancy and +* can be simplified. However, this detailed form is easier for +* maintenance and modifications of the code.]] +* +* .. minimal workspace length for SGEQP3 of an M x N matrix + LWQP3 = 3 * N + 1 +* .. minimal workspace length for SORMQR to build left singular vectors + IF ( WNTUS .OR. WNTUR ) THEN + LWORQ = MAX( N , 1 ) + ELSE IF ( WNTUA ) THEN + LWORQ = MAX( M , 1 ) + END IF +* .. minimal workspace length for SPOCON of an N x N matrix + LWCON = 3 * N +* .. SGESVD of an N x N matrix + LWSVD = MAX( 5 * N, 1 ) + IF ( LQUERY ) THEN + CALL SGEQP3( M, N, A, LDA, IWORK, RDUMMY, RDUMMY, -1, + $ IERR ) + LWRK_SGEQP3 = INT( RDUMMY(1) ) + IF ( WNTUS .OR. WNTUR ) THEN + CALL SORMQR( 'L', 'N', M, N, N, A, LDA, RDUMMY, U, + $ LDU, RDUMMY, -1, IERR ) + LWRK_SORMQR = INT( RDUMMY(1) ) + ELSE IF ( WNTUA ) THEN + CALL SORMQR( 'L', 'N', M, M, N, A, LDA, RDUMMY, U, + $ LDU, RDUMMY, -1, IERR ) + LWRK_SORMQR = INT( RDUMMY(1) ) + ELSE + LWRK_SORMQR = 0 + END IF + END IF + MINWRK = 2 + OPTWRK = 2 + IF ( .NOT. (LSVEC .OR. RSVEC )) THEN +* .. minimal and optimal sizes of the workspace if +* only the singular values are requested + IF ( CONDA ) THEN + MINWRK = MAX( N+LWQP3, LWCON, LWSVD ) + ELSE + MINWRK = MAX( N+LWQP3, LWSVD ) + END IF + IF ( LQUERY ) THEN + CALL SGESVD( 'N', 'N', N, N, A, LDA, S, U, LDU, + $ V, LDV, RDUMMY, -1, IERR ) + LWRK_SGESVD = INT( RDUMMY(1) ) + IF ( CONDA ) THEN + OPTWRK = MAX( N+LWRK_SGEQP3, N+LWCON, LWRK_SGESVD ) + ELSE + OPTWRK = MAX( N+LWRK_SGEQP3, LWRK_SGESVD ) + END IF + END IF + ELSE IF ( LSVEC .AND. (.NOT.RSVEC) ) THEN +* .. minimal and optimal sizes of the workspace if the +* singular values and the left singular vectors are requested + IF ( CONDA ) THEN + MINWRK = N + MAX( LWQP3, LWCON, LWSVD, LWORQ ) + ELSE + MINWRK = N + MAX( LWQP3, LWSVD, LWORQ ) + END IF + IF ( LQUERY ) THEN + IF ( RTRANS ) THEN + CALL SGESVD( 'N', 'O', N, N, A, LDA, S, U, LDU, + $ V, LDV, RDUMMY, -1, IERR ) + ELSE + CALL SGESVD( 'O', 'N', N, N, A, LDA, S, U, LDU, + $ V, LDV, RDUMMY, -1, IERR ) + END IF + LWRK_SGESVD = INT( RDUMMY(1) ) + IF ( CONDA ) THEN + OPTWRK = N + MAX( LWRK_SGEQP3, LWCON, LWRK_SGESVD, + $ LWRK_SORMQR ) + ELSE + OPTWRK = N + MAX( LWRK_SGEQP3, LWRK_SGESVD, + $ LWRK_SORMQR ) + END IF + END IF + ELSE IF ( RSVEC .AND. (.NOT.LSVEC) ) THEN +* .. minimal and optimal sizes of the workspace if the +* singular values and the right singular vectors are requested + IF ( CONDA ) THEN + MINWRK = N + MAX( LWQP3, LWCON, LWSVD ) + ELSE + MINWRK = N + MAX( LWQP3, LWSVD ) + END IF + IF ( LQUERY ) THEN + IF ( RTRANS ) THEN + CALL SGESVD( 'O', 'N', N, N, A, LDA, S, U, LDU, + $ V, LDV, RDUMMY, -1, IERR ) + ELSE + CALL SGESVD( 'N', 'O', N, N, A, LDA, S, U, LDU, + $ V, LDV, RDUMMY, -1, IERR ) + END IF + LWRK_SGESVD = INT( RDUMMY(1) ) + IF ( CONDA ) THEN + OPTWRK = N + MAX( LWRK_SGEQP3, LWCON, LWRK_SGESVD ) + ELSE + OPTWRK = N + MAX( LWRK_SGEQP3, LWRK_SGESVD ) + END IF + END IF + ELSE +* .. minimal and optimal sizes of the workspace if the +* full SVD is requested + IF ( RTRANS ) THEN + MINWRK = MAX( LWQP3, LWSVD, LWORQ ) + IF ( CONDA ) MINWRK = MAX( MINWRK, LWCON ) + MINWRK = MINWRK + N + IF ( WNTVA ) THEN +* .. minimal workspace length for N x N/2 SGEQRF + LWQRF = MAX( N/2, 1 ) +* .. minimal workspace lengt for N/2 x N/2 SGESVD + LWSVD2 = MAX( 5 * (N/2), 1 ) + LWORQ2 = MAX( N, 1 ) + MINWRK2 = MAX( LWQP3, N/2+LWQRF, N/2+LWSVD2, + $ N/2+LWORQ2, LWORQ ) + IF ( CONDA ) MINWRK2 = MAX( MINWRK2, LWCON ) + MINWRK2 = N + MINWRK2 + MINWRK = MAX( MINWRK, MINWRK2 ) + END IF + ELSE + MINWRK = MAX( LWQP3, LWSVD, LWORQ ) + IF ( CONDA ) MINWRK = MAX( MINWRK, LWCON ) + MINWRK = MINWRK + N + IF ( WNTVA ) THEN +* .. minimal workspace length for N/2 x N SGELQF + LWLQF = MAX( N/2, 1 ) + LWSVD2 = MAX( 5 * (N/2), 1 ) + LWUNLQ = MAX( N , 1 ) + MINWRK2 = MAX( LWQP3, N/2+LWLQF, N/2+LWSVD2, + $ N/2+LWUNLQ, LWORQ ) + IF ( CONDA ) MINWRK2 = MAX( MINWRK2, LWCON ) + MINWRK2 = N + MINWRK2 + MINWRK = MAX( MINWRK, MINWRK2 ) + END IF + END IF + IF ( LQUERY ) THEN + IF ( RTRANS ) THEN + CALL SGESVD( 'O', 'A', N, N, A, LDA, S, U, LDU, + $ V, LDV, RDUMMY, -1, IERR ) + LWRK_SGESVD = INT( RDUMMY(1) ) + OPTWRK = MAX(LWRK_SGEQP3,LWRK_SGESVD,LWRK_SORMQR) + IF ( CONDA ) OPTWRK = MAX( OPTWRK, LWCON ) + OPTWRK = N + OPTWRK + IF ( WNTVA ) THEN + CALL SGEQRF(N,N/2,U,LDU,RDUMMY,RDUMMY,-1,IERR) + LWRK_SGEQRF = INT( RDUMMY(1) ) + CALL SGESVD( 'S', 'O', N/2,N/2, V,LDV, S, U,LDU, + $ V, LDV, RDUMMY, -1, IERR ) + LWRK_SGESVD2 = INT( RDUMMY(1) ) + CALL SORMQR( 'R', 'C', N, N, N/2, U, LDU, RDUMMY, + $ V, LDV, RDUMMY, -1, IERR ) + LWRK_SORMQR2 = INT( RDUMMY(1) ) + OPTWRK2 = MAX( LWRK_SGEQP3, N/2+LWRK_SGEQRF, + $ N/2+LWRK_SGESVD2, N/2+LWRK_SORMQR2 ) + IF ( CONDA ) OPTWRK2 = MAX( OPTWRK2, LWCON ) + OPTWRK2 = N + OPTWRK2 + OPTWRK = MAX( OPTWRK, OPTWRK2 ) + END IF + ELSE + CALL SGESVD( 'S', 'O', N, N, A, LDA, S, U, LDU, + $ V, LDV, RDUMMY, -1, IERR ) + LWRK_SGESVD = INT( RDUMMY(1) ) + OPTWRK = MAX(LWRK_SGEQP3,LWRK_SGESVD,LWRK_SORMQR) + IF ( CONDA ) OPTWRK = MAX( OPTWRK, LWCON ) + OPTWRK = N + OPTWRK + IF ( WNTVA ) THEN + CALL SGELQF(N/2,N,U,LDU,RDUMMY,RDUMMY,-1,IERR) + LWRK_SGELQF = INT( RDUMMY(1) ) + CALL SGESVD( 'S','O', N/2,N/2, V, LDV, S, U, LDU, + $ V, LDV, RDUMMY, -1, IERR ) + LWRK_SGESVD2 = INT( RDUMMY(1) ) + CALL SORMLQ( 'R', 'N', N, N, N/2, U, LDU, RDUMMY, + $ V, LDV, RDUMMY,-1,IERR ) + LWRK_SORMLQ = INT( RDUMMY(1) ) + OPTWRK2 = MAX( LWRK_SGEQP3, N/2+LWRK_SGELQF, + $ N/2+LWRK_SGESVD2, N/2+LWRK_SORMLQ ) + IF ( CONDA ) OPTWRK2 = MAX( OPTWRK2, LWCON ) + OPTWRK2 = N + OPTWRK2 + OPTWRK = MAX( OPTWRK, OPTWRK2 ) + END IF + END IF + END IF + END IF +* + MINWRK = MAX( 2, MINWRK ) + OPTWRK = MAX( 2, OPTWRK ) + IF ( LWORK .LT. MINWRK .AND. (.NOT.LQUERY) ) INFO = -19 +* + END IF +* + IF (INFO .EQ. 0 .AND. LRWORK .LT. RMINWRK .AND. .NOT. LQUERY) THEN + INFO = -21 + END IF + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'SGESVDQ', -INFO ) + RETURN + ELSE IF ( LQUERY ) THEN +* +* Return optimal workspace +* + IWORK(1) = IMINWRK + WORK(1) = OPTWRK + WORK(2) = MINWRK + RWORK(1) = RMINWRK + RETURN + END IF +* +* Quick return if the matrix is void. +* + IF( ( M.EQ.0 ) .OR. ( N.EQ.0 ) ) THEN +* .. all output is void. + RETURN + END IF +* + BIG = SLAMCH('O') + ASCALED = .FALSE. + IWOFF = 1 + IF ( ROWPRM ) THEN + IWOFF = M +* .. reordering the rows in decreasing sequence in the +* ell-infinity norm - this enhances numerical robustness in +* the case of differently scaled rows. + DO 1904 p = 1, M +* RWORK(p) = ABS( A(p,ICAMAX(N,A(p,1),LDA)) ) +* [[SLANGE will return NaN if an entry of the p-th row is Nan]] + RWORK(p) = SLANGE( 'M', 1, N, A(p,1), LDA, RDUMMY ) +* .. check for NaN's and Inf's + IF ( ( RWORK(p) .NE. RWORK(p) ) .OR. + $ ( (RWORK(p)*ZERO) .NE. ZERO ) ) THEN + INFO = -8 + CALL XERBLA( 'SGESVDQ', -INFO ) + RETURN + END IF + 1904 CONTINUE + DO 1952 p = 1, M - 1 + q = ISAMAX( M-p+1, RWORK(p), 1 ) + p - 1 + IWORK(N+p) = q + IF ( p .NE. q ) THEN + RTMP = RWORK(p) + RWORK(p) = RWORK(q) + RWORK(q) = RTMP + END IF + 1952 CONTINUE +* + IF ( RWORK(1) .EQ. ZERO ) THEN +* Quick return: A is the M x N zero matrix. + NUMRANK = 0 + CALL SLASET( 'G', N, 1, ZERO, ZERO, S, N ) + IF ( WNTUS ) CALL SLASET('G', M, N, ZERO, ONE, U, LDU) + IF ( WNTUA ) CALL SLASET('G', M, M, ZERO, ONE, U, LDU) + IF ( WNTVA ) CALL SLASET('G', N, N, ZERO, ONE, V, LDV) + IF ( WNTUF ) THEN + CALL SLASET( 'G', N, 1, ZERO, ZERO, WORK, N ) + CALL SLASET( 'G', M, N, ZERO, ONE, U, LDU ) + END IF + DO 5001 p = 1, N + IWORK(p) = p + 5001 CONTINUE + IF ( ROWPRM ) THEN + DO 5002 p = N + 1, N + M - 1 + IWORK(p) = p - N + 5002 CONTINUE + END IF + IF ( CONDA ) RWORK(1) = -1 + RWORK(2) = -1 + RETURN + END IF +* + IF ( RWORK(1) .GT. BIG / SQRT(REAL(M)) ) THEN +* .. to prevent overflow in the QR factorization, scale the +* matrix by 1/sqrt(M) if too large entry detected + CALL SLASCL('G',0,0,SQRT(REAL(M)),ONE, M,N, A,LDA, IERR) + ASCALED = .TRUE. + END IF + CALL SLASWP( N, A, LDA, 1, M-1, IWORK(N+1), 1 ) + END IF +* +* .. At this stage, preemptive scaling is done only to avoid column +* norms overflows during the QR factorization. The SVD procedure should +* have its own scaling to save the singular values from overflows and +* underflows. That depends on the SVD procedure. +* + IF ( .NOT.ROWPRM ) THEN + RTMP = SLANGE( 'M', M, N, A, LDA, RDUMMY ) + IF ( ( RTMP .NE. RTMP ) .OR. + $ ( (RTMP*ZERO) .NE. ZERO ) ) THEN + INFO = -8 + CALL XERBLA( 'SGESVDQ', -INFO ) + RETURN + END IF + IF ( RTMP .GT. BIG / SQRT(REAL(M)) ) THEN +* .. to prevent overflow in the QR factorization, scale the +* matrix by 1/sqrt(M) if too large entry detected + CALL SLASCL('G',0,0, SQRT(REAL(M)),ONE, M,N, A,LDA, IERR) + ASCALED = .TRUE. + END IF + END IF +* +* .. QR factorization with column pivoting +* +* A * P = Q * [ R ] +* [ 0 ] +* + DO 1963 p = 1, N +* .. all columns are free columns + IWORK(p) = 0 + 1963 CONTINUE + CALL SGEQP3( M, N, A, LDA, IWORK, WORK, WORK(N+1), LWORK-N, + $ IERR ) +* +* If the user requested accuracy level allows truncation in the +* computed upper triangular factor, the matrix R is examined and, +* if possible, replaced with its leading upper trapezoidal part. +* + EPSLN = SLAMCH('E') + SFMIN = SLAMCH('S') +* SMALL = SFMIN / EPSLN + NR = N +* + IF ( ACCLA ) THEN +* +* Standard absolute error bound suffices. All sigma_i with +* sigma_i < N*EPS*||A||_F are flushed to zero. This is an +* aggressive enforcement of lower numerical rank by introducing a +* backward error of the order of N*EPS*||A||_F. + NR = 1 + RTMP = SQRT(REAL(N))*EPSLN + DO 3001 p = 2, N + IF ( ABS(A(p,p)) .LT. (RTMP*ABS(A(1,1))) ) GO TO 3002 + NR = NR + 1 + 3001 CONTINUE + 3002 CONTINUE +* + ELSEIF ( ACCLM ) THEN +* .. similarly as above, only slightly more gentle (less aggressive). +* Sudden drop on the diagonal of R is used as the criterion for being +* close-to-rank-deficient. The threshold is set to EPSLN=SLAMCH('E'). +* [[This can be made more flexible by replacing this hard-coded value +* with a user specified threshold.]] Also, the values that underflow +* will be truncated. + NR = 1 + DO 3401 p = 2, N + IF ( ( ABS(A(p,p)) .LT. (EPSLN*ABS(A(p-1,p-1))) ) .OR. + $ ( ABS(A(p,p)) .LT. SFMIN ) ) GO TO 3402 + NR = NR + 1 + 3401 CONTINUE + 3402 CONTINUE +* + ELSE +* .. RRQR not authorized to determine numerical rank except in the +* obvious case of zero pivots. +* .. inspect R for exact zeros on the diagonal; +* R(i,i)=0 => R(i:N,i:N)=0. + NR = 1 + DO 3501 p = 2, N + IF ( ABS(A(p,p)) .EQ. ZERO ) GO TO 3502 + NR = NR + 1 + 3501 CONTINUE + 3502 CONTINUE +* + IF ( CONDA ) THEN +* Estimate the scaled condition number of A. Use the fact that it is +* the same as the scaled condition number of R. +* .. V is used as workspace + CALL SLACPY( 'U', N, N, A, LDA, V, LDV ) +* Only the leading NR x NR submatrix of the triangular factor +* is considered. Only if NR=N will this give a reliable error +* bound. However, even for NR < N, this can be used on an +* expert level and obtain useful information in the sense of +* perturbation theory. + DO 3053 p = 1, NR + RTMP = SNRM2( p, V(1,p), 1 ) + CALL SSCAL( p, ONE/RTMP, V(1,p), 1 ) + 3053 CONTINUE + IF ( .NOT. ( LSVEC .OR. RSVEC ) ) THEN + CALL SPOCON( 'U', NR, V, LDV, ONE, RTMP, + $ WORK, IWORK(N+IWOFF), IERR ) + ELSE + CALL SPOCON( 'U', NR, V, LDV, ONE, RTMP, + $ WORK(N+1), IWORK(N+IWOFF), IERR ) + END IF + SCONDA = ONE / SQRT(RTMP) +* For NR=N, SCONDA is an estimate of SQRT(||(R^* * R)^(-1)||_1), +* N^(-1/4) * SCONDA <= ||R^(-1)||_2 <= N^(1/4) * SCONDA +* See the reference [1] for more details. + END IF +* + ENDIF +* + IF ( WNTUR ) THEN + N1 = NR + ELSE IF ( WNTUS .OR. WNTUF) THEN + N1 = N + ELSE IF ( WNTUA ) THEN + N1 = M + END IF +* + IF ( .NOT. ( RSVEC .OR. LSVEC ) ) THEN +*....................................................................... +* .. only the singular values are requested +*....................................................................... + IF ( RTRANS ) THEN +* +* .. compute the singular values of R**T = [A](1:NR,1:N)**T +* .. set the lower triangle of [A] to [A](1:NR,1:N)**T and +* the upper triangle of [A] to zero. + DO 1146 p = 1, MIN( N, NR ) + DO 1147 q = p + 1, N + A(q,p) = A(p,q) + IF ( q .LE. NR ) A(p,q) = ZERO + 1147 CONTINUE + 1146 CONTINUE +* + CALL SGESVD( 'N', 'N', N, NR, A, LDA, S, U, LDU, + $ V, LDV, WORK, LWORK, INFO ) +* + ELSE +* +* .. compute the singular values of R = [A](1:NR,1:N) +* + IF ( NR .GT. 1 ) + $ CALL SLASET( 'L', NR-1,NR-1, ZERO,ZERO, A(2,1), LDA ) + CALL SGESVD( 'N', 'N', NR, N, A, LDA, S, U, LDU, + $ V, LDV, WORK, LWORK, INFO ) +* + END IF +* + ELSE IF ( LSVEC .AND. ( .NOT. RSVEC) ) THEN +*....................................................................... +* .. the singular values and the left singular vectors requested +*......................................................................."""""""" + IF ( RTRANS ) THEN +* .. apply SGESVD to R**T +* .. copy R**T into [U] and overwrite [U] with the right singular +* vectors of R + DO 1192 p = 1, NR + DO 1193 q = p, N + U(q,p) = A(p,q) + 1193 CONTINUE + 1192 CONTINUE + IF ( NR .GT. 1 ) + $ CALL SLASET( 'U', NR-1,NR-1, ZERO,ZERO, U(1,2), LDU ) +* .. the left singular vectors not computed, the NR right singular +* vectors overwrite [U](1:NR,1:NR) as transposed. These +* will be pre-multiplied by Q to build the left singular vectors of A. + CALL SGESVD( 'N', 'O', N, NR, U, LDU, S, U, LDU, + $ U, LDU, WORK(N+1), LWORK-N, INFO ) +* + DO 1119 p = 1, NR + DO 1120 q = p + 1, NR + RTMP = U(q,p) + U(q,p) = U(p,q) + U(p,q) = RTMP + 1120 CONTINUE + 1119 CONTINUE +* + ELSE +* .. apply SGESVD to R +* .. copy R into [U] and overwrite [U] with the left singular vectors + CALL SLACPY( 'U', NR, N, A, LDA, U, LDU ) + IF ( NR .GT. 1 ) + $ CALL SLASET( 'L', NR-1, NR-1, ZERO, ZERO, U(2,1), LDU ) +* .. the right singular vectors not computed, the NR left singular +* vectors overwrite [U](1:NR,1:NR) + CALL SGESVD( 'O', 'N', NR, N, U, LDU, S, U, LDU, + $ V, LDV, WORK(N+1), LWORK-N, INFO ) +* .. now [U](1:NR,1:NR) contains the NR left singular vectors of +* R. These will be pre-multiplied by Q to build the left singular +* vectors of A. + END IF +* +* .. assemble the left singular vector matrix U of dimensions +* (M x NR) or (M x N) or (M x M). + IF ( ( NR .LT. M ) .AND. ( .NOT.WNTUF ) ) THEN + CALL SLASET('A', M-NR, NR, ZERO, ZERO, U(NR+1,1), LDU) + IF ( NR .LT. N1 ) THEN + CALL SLASET( 'A',NR,N1-NR,ZERO,ZERO,U(1,NR+1), LDU ) + CALL SLASET( 'A',M-NR,N1-NR,ZERO,ONE, + $ U(NR+1,NR+1), LDU ) + END IF + END IF +* +* The Q matrix from the first QRF is built into the left singular +* vectors matrix U. +* + IF ( .NOT.WNTUF ) + $ CALL SORMQR( 'L', 'N', M, N1, N, A, LDA, WORK, U, + $ LDU, WORK(N+1), LWORK-N, IERR ) + IF ( ROWPRM .AND. .NOT.WNTUF ) + $ CALL SLASWP( N1, U, LDU, 1, M-1, IWORK(N+1), -1 ) +* + ELSE IF ( RSVEC .AND. ( .NOT. LSVEC ) ) THEN +*....................................................................... +* .. the singular values and the right singular vectors requested +*....................................................................... + IF ( RTRANS ) THEN +* .. apply SGESVD to R**T +* .. copy R**T into V and overwrite V with the left singular vectors + DO 1165 p = 1, NR + DO 1166 q = p, N + V(q,p) = (A(p,q)) + 1166 CONTINUE + 1165 CONTINUE + IF ( NR .GT. 1 ) + $ CALL SLASET( 'U', NR-1,NR-1, ZERO,ZERO, V(1,2), LDV ) +* .. the left singular vectors of R**T overwrite V, the right singular +* vectors not computed + IF ( WNTVR .OR. ( NR .EQ. N ) ) THEN + CALL SGESVD( 'O', 'N', N, NR, V, LDV, S, U, LDU, + $ U, LDU, WORK(N+1), LWORK-N, INFO ) +* + DO 1121 p = 1, NR + DO 1122 q = p + 1, NR + RTMP = V(q,p) + V(q,p) = V(p,q) + V(p,q) = RTMP + 1122 CONTINUE + 1121 CONTINUE +* + IF ( NR .LT. N ) THEN + DO 1103 p = 1, NR + DO 1104 q = NR + 1, N + V(p,q) = V(q,p) + 1104 CONTINUE + 1103 CONTINUE + END IF + CALL SLAPMT( .FALSE., NR, N, V, LDV, IWORK ) + ELSE +* .. need all N right singular vectors and NR < N +* [!] This is simple implementation that augments [V](1:N,1:NR) +* by padding a zero block. In the case NR << N, a more efficient +* way is to first use the QR factorization. For more details +* how to implement this, see the " FULL SVD " branch. + CALL SLASET('G', N, N-NR, ZERO, ZERO, V(1,NR+1), LDV) + CALL SGESVD( 'O', 'N', N, N, V, LDV, S, U, LDU, + $ U, LDU, WORK(N+1), LWORK-N, INFO ) +* + DO 1123 p = 1, N + DO 1124 q = p + 1, N + RTMP = V(q,p) + V(q,p) = V(p,q) + V(p,q) = RTMP + 1124 CONTINUE + 1123 CONTINUE + CALL SLAPMT( .FALSE., N, N, V, LDV, IWORK ) + END IF +* + ELSE +* .. aply SGESVD to R +* .. copy R into V and overwrite V with the right singular vectors + CALL SLACPY( 'U', NR, N, A, LDA, V, LDV ) + IF ( NR .GT. 1 ) + $ CALL SLASET( 'L', NR-1, NR-1, ZERO, ZERO, V(2,1), LDV ) +* .. the right singular vectors overwrite V, the NR left singular +* vectors stored in U(1:NR,1:NR) + IF ( WNTVR .OR. ( NR .EQ. N ) ) THEN + CALL SGESVD( 'N', 'O', NR, N, V, LDV, S, U, LDU, + $ V, LDV, WORK(N+1), LWORK-N, INFO ) + CALL SLAPMT( .FALSE., NR, N, V, LDV, IWORK ) +* .. now [V](1:NR,1:N) contains V(1:N,1:NR)**T + ELSE +* .. need all N right singular vectors and NR < N +* [!] This is simple implementation that augments [V](1:NR,1:N) +* by padding a zero block. In the case NR << N, a more efficient +* way is to first use the LQ factorization. For more details +* how to implement this, see the " FULL SVD " branch. + CALL SLASET('G', N-NR, N, ZERO,ZERO, V(NR+1,1), LDV) + CALL SGESVD( 'N', 'O', N, N, V, LDV, S, U, LDU, + $ V, LDV, WORK(N+1), LWORK-N, INFO ) + CALL SLAPMT( .FALSE., N, N, V, LDV, IWORK ) + END IF +* .. now [V] contains the transposed matrix of the right singular +* vectors of A. + END IF +* + ELSE +*....................................................................... +* .. FULL SVD requested +*....................................................................... + IF ( RTRANS ) THEN +* +* .. apply SGESVD to R**T [[this option is left for R&D&T]] +* + IF ( WNTVR .OR. ( NR .EQ. N ) ) THEN +* .. copy R**T into [V] and overwrite [V] with the left singular +* vectors of R**T + DO 1168 p = 1, NR + DO 1169 q = p, N + V(q,p) = A(p,q) + 1169 CONTINUE + 1168 CONTINUE + IF ( NR .GT. 1 ) + $ CALL SLASET( 'U', NR-1,NR-1, ZERO,ZERO, V(1,2), LDV ) +* +* .. the left singular vectors of R**T overwrite [V], the NR right +* singular vectors of R**T stored in [U](1:NR,1:NR) as transposed + CALL SGESVD( 'O', 'A', N, NR, V, LDV, S, V, LDV, + $ U, LDU, WORK(N+1), LWORK-N, INFO ) +* .. assemble V + DO 1115 p = 1, NR + DO 1116 q = p + 1, NR + RTMP = V(q,p) + V(q,p) = V(p,q) + V(p,q) = RTMP + 1116 CONTINUE + 1115 CONTINUE + IF ( NR .LT. N ) THEN + DO 1101 p = 1, NR + DO 1102 q = NR+1, N + V(p,q) = V(q,p) + 1102 CONTINUE + 1101 CONTINUE + END IF + CALL SLAPMT( .FALSE., NR, N, V, LDV, IWORK ) +* + DO 1117 p = 1, NR + DO 1118 q = p + 1, NR + RTMP = U(q,p) + U(q,p) = U(p,q) + U(p,q) = RTMP + 1118 CONTINUE + 1117 CONTINUE +* + IF ( ( NR .LT. M ) .AND. .NOT.(WNTUF)) THEN + CALL SLASET('A', M-NR,NR, ZERO,ZERO, U(NR+1,1), LDU) + IF ( NR .LT. N1 ) THEN + CALL SLASET('A',NR,N1-NR,ZERO,ZERO,U(1,NR+1),LDU) + CALL SLASET( 'A',M-NR,N1-NR,ZERO,ONE, + $ U(NR+1,NR+1), LDU ) + END IF + END IF +* + ELSE +* .. need all N right singular vectors and NR < N +* .. copy R**T into [V] and overwrite [V] with the left singular +* vectors of R**T +* [[The optimal ratio N/NR for using QRF instead of padding +* with zeros. Here hard coded to 2; it must be at least +* two due to work space constraints.]] +* OPTRATIO = ILAENV(6, 'SGESVD', 'S' // 'O', NR,N,0,0) +* OPTRATIO = MAX( OPTRATIO, 2 ) + OPTRATIO = 2 + IF ( OPTRATIO*NR .GT. N ) THEN + DO 1198 p = 1, NR + DO 1199 q = p, N + V(q,p) = A(p,q) + 1199 CONTINUE + 1198 CONTINUE + IF ( NR .GT. 1 ) + $ CALL SLASET('U',NR-1,NR-1, ZERO,ZERO, V(1,2),LDV) +* + CALL SLASET('A',N,N-NR,ZERO,ZERO,V(1,NR+1),LDV) + CALL SGESVD( 'O', 'A', N, N, V, LDV, S, V, LDV, + $ U, LDU, WORK(N+1), LWORK-N, INFO ) +* + DO 1113 p = 1, N + DO 1114 q = p + 1, N + RTMP = V(q,p) + V(q,p) = V(p,q) + V(p,q) = RTMP + 1114 CONTINUE + 1113 CONTINUE + CALL SLAPMT( .FALSE., N, N, V, LDV, IWORK ) +* .. assemble the left singular vector matrix U of dimensions +* (M x N1), i.e. (M x N) or (M x M). +* + DO 1111 p = 1, N + DO 1112 q = p + 1, N + RTMP = U(q,p) + U(q,p) = U(p,q) + U(p,q) = RTMP + 1112 CONTINUE + 1111 CONTINUE +* + IF ( ( N .LT. M ) .AND. .NOT.(WNTUF)) THEN + CALL SLASET('A',M-N,N,ZERO,ZERO,U(N+1,1),LDU) + IF ( N .LT. N1 ) THEN + CALL SLASET('A',N,N1-N,ZERO,ZERO,U(1,N+1),LDU) + CALL SLASET('A',M-N,N1-N,ZERO,ONE, + $ U(N+1,N+1), LDU ) + END IF + END IF + ELSE +* .. copy R**T into [U] and overwrite [U] with the right +* singular vectors of R + DO 1196 p = 1, NR + DO 1197 q = p, N + U(q,NR+p) = A(p,q) + 1197 CONTINUE + 1196 CONTINUE + IF ( NR .GT. 1 ) + $ CALL SLASET('U',NR-1,NR-1,ZERO,ZERO,U(1,NR+2),LDU) + CALL SGEQRF( N, NR, U(1,NR+1), LDU, WORK(N+1), + $ WORK(N+NR+1), LWORK-N-NR, IERR ) + DO 1143 p = 1, NR + DO 1144 q = 1, N + V(q,p) = U(p,NR+q) + 1144 CONTINUE + 1143 CONTINUE + CALL SLASET('U',NR-1,NR-1,ZERO,ZERO,V(1,2),LDV) + CALL SGESVD( 'S', 'O', NR, NR, V, LDV, S, U, LDU, + $ V,LDV, WORK(N+NR+1),LWORK-N-NR, INFO ) + CALL SLASET('A',N-NR,NR,ZERO,ZERO,V(NR+1,1),LDV) + CALL SLASET('A',NR,N-NR,ZERO,ZERO,V(1,NR+1),LDV) + CALL SLASET('A',N-NR,N-NR,ZERO,ONE,V(NR+1,NR+1),LDV) + CALL SORMQR('R','C', N, N, NR, U(1,NR+1), LDU, + $ WORK(N+1),V,LDV,WORK(N+NR+1),LWORK-N-NR,IERR) + CALL SLAPMT( .FALSE., N, N, V, LDV, IWORK ) +* .. assemble the left singular vector matrix U of dimensions +* (M x NR) or (M x N) or (M x M). + IF ( ( NR .LT. M ) .AND. .NOT.(WNTUF)) THEN + CALL SLASET('A',M-NR,NR,ZERO,ZERO,U(NR+1,1),LDU) + IF ( NR .LT. N1 ) THEN + CALL SLASET('A',NR,N1-NR,ZERO,ZERO,U(1,NR+1),LDU) + CALL SLASET( 'A',M-NR,N1-NR,ZERO,ONE, + $ U(NR+1,NR+1),LDU) + END IF + END IF + END IF + END IF +* + ELSE +* +* .. apply SGESVD to R [[this is the recommended option]] +* + IF ( WNTVR .OR. ( NR .EQ. N ) ) THEN +* .. copy R into [V] and overwrite V with the right singular vectors + CALL SLACPY( 'U', NR, N, A, LDA, V, LDV ) + IF ( NR .GT. 1 ) + $ CALL SLASET( 'L', NR-1,NR-1, ZERO,ZERO, V(2,1), LDV ) +* .. the right singular vectors of R overwrite [V], the NR left +* singular vectors of R stored in [U](1:NR,1:NR) + CALL SGESVD( 'S', 'O', NR, N, V, LDV, S, U, LDU, + $ V, LDV, WORK(N+1), LWORK-N, INFO ) + CALL SLAPMT( .FALSE., NR, N, V, LDV, IWORK ) +* .. now [V](1:NR,1:N) contains V(1:N,1:NR)**T +* .. assemble the left singular vector matrix U of dimensions +* (M x NR) or (M x N) or (M x M). + IF ( ( NR .LT. M ) .AND. .NOT.(WNTUF)) THEN + CALL SLASET('A', M-NR,NR, ZERO,ZERO, U(NR+1,1), LDU) + IF ( NR .LT. N1 ) THEN + CALL SLASET('A',NR,N1-NR,ZERO,ZERO,U(1,NR+1),LDU) + CALL SLASET( 'A',M-NR,N1-NR,ZERO,ONE, + $ U(NR+1,NR+1), LDU ) + END IF + END IF +* + ELSE +* .. need all N right singular vectors and NR < N +* .. the requested number of the left singular vectors +* is then N1 (N or M) +* [[The optimal ratio N/NR for using LQ instead of padding +* with zeros. Here hard coded to 2; it must be at least +* two due to work space constraints.]] +* OPTRATIO = ILAENV(6, 'SGESVD', 'S' // 'O', NR,N,0,0) +* OPTRATIO = MAX( OPTRATIO, 2 ) + OPTRATIO = 2 + IF ( OPTRATIO * NR .GT. N ) THEN + CALL SLACPY( 'U', NR, N, A, LDA, V, LDV ) + IF ( NR .GT. 1 ) + $ CALL SLASET('L', NR-1,NR-1, ZERO,ZERO, V(2,1),LDV) +* .. the right singular vectors of R overwrite [V], the NR left +* singular vectors of R stored in [U](1:NR,1:NR) + CALL SLASET('A', N-NR,N, ZERO,ZERO, V(NR+1,1),LDV) + CALL SGESVD( 'S', 'O', N, N, V, LDV, S, U, LDU, + $ V, LDV, WORK(N+1), LWORK-N, INFO ) + CALL SLAPMT( .FALSE., N, N, V, LDV, IWORK ) +* .. now [V] contains the transposed matrix of the right +* singular vectors of A. The leading N left singular vectors +* are in [U](1:N,1:N) +* .. assemble the left singular vector matrix U of dimensions +* (M x N1), i.e. (M x N) or (M x M). + IF ( ( N .LT. M ) .AND. .NOT.(WNTUF)) THEN + CALL SLASET('A',M-N,N,ZERO,ZERO,U(N+1,1),LDU) + IF ( N .LT. N1 ) THEN + CALL SLASET('A',N,N1-N,ZERO,ZERO,U(1,N+1),LDU) + CALL SLASET( 'A',M-N,N1-N,ZERO,ONE, + $ U(N+1,N+1), LDU ) + END IF + END IF + ELSE + CALL SLACPY( 'U', NR, N, A, LDA, U(NR+1,1), LDU ) + IF ( NR .GT. 1 ) + $ CALL SLASET('L',NR-1,NR-1,ZERO,ZERO,U(NR+2,1),LDU) + CALL SGELQF( NR, N, U(NR+1,1), LDU, WORK(N+1), + $ WORK(N+NR+1), LWORK-N-NR, IERR ) + CALL SLACPY('L',NR,NR,U(NR+1,1),LDU,V,LDV) + IF ( NR .GT. 1 ) + $ CALL SLASET('U',NR-1,NR-1,ZERO,ZERO,V(1,2),LDV) + CALL SGESVD( 'S', 'O', NR, NR, V, LDV, S, U, LDU, + $ V, LDV, WORK(N+NR+1), LWORK-N-NR, INFO ) + CALL SLASET('A',N-NR,NR,ZERO,ZERO,V(NR+1,1),LDV) + CALL SLASET('A',NR,N-NR,ZERO,ZERO,V(1,NR+1),LDV) + CALL SLASET('A',N-NR,N-NR,ZERO,ONE,V(NR+1,NR+1),LDV) + CALL SORMLQ('R','N',N,N,NR,U(NR+1,1),LDU,WORK(N+1), + $ V, LDV, WORK(N+NR+1),LWORK-N-NR,IERR) + CALL SLAPMT( .FALSE., N, N, V, LDV, IWORK ) +* .. assemble the left singular vector matrix U of dimensions +* (M x NR) or (M x N) or (M x M). + IF ( ( NR .LT. M ) .AND. .NOT.(WNTUF)) THEN + CALL SLASET('A',M-NR,NR,ZERO,ZERO,U(NR+1,1),LDU) + IF ( NR .LT. N1 ) THEN + CALL SLASET('A',NR,N1-NR,ZERO,ZERO,U(1,NR+1),LDU) + CALL SLASET( 'A',M-NR,N1-NR,ZERO,ONE, + $ U(NR+1,NR+1), LDU ) + END IF + END IF + END IF + END IF +* .. end of the "R**T or R" branch + END IF +* +* The Q matrix from the first QRF is built into the left singular +* vectors matrix U. +* + IF ( .NOT. WNTUF ) + $ CALL SORMQR( 'L', 'N', M, N1, N, A, LDA, WORK, U, + $ LDU, WORK(N+1), LWORK-N, IERR ) + IF ( ROWPRM .AND. .NOT.WNTUF ) + $ CALL SLASWP( N1, U, LDU, 1, M-1, IWORK(N+1), -1 ) +* +* ... end of the "full SVD" branch + END IF +* +* Check whether some singular values are returned as zeros, e.g. +* due to underflow, and update the numerical rank. + p = NR + DO 4001 q = p, 1, -1 + IF ( S(q) .GT. ZERO ) GO TO 4002 + NR = NR - 1 + 4001 CONTINUE + 4002 CONTINUE +* +* .. if numerical rank deficiency is detected, the truncated +* singular values are set to zero. + IF ( NR .LT. N ) CALL SLASET( 'G', N-NR,1, ZERO,ZERO, S(NR+1), N ) +* .. undo scaling; this may cause overflow in the largest singular +* values. + IF ( ASCALED ) + $ CALL SLASCL( 'G',0,0, ONE,SQRT(REAL(M)), NR,1, S, N, IERR ) + IF ( CONDA ) RWORK(1) = SCONDA + RWORK(2) = p - NR +* .. p-NR is the number of singular values that are computed as +* exact zeros in SGESVD() applied to the (possibly truncated) +* full row rank triangular (trapezoidal) factor of A. + NUMRANK = NR +* + RETURN +* +* End of SGESVDQ +* + END diff --git a/lapack-netlib/SRC/sgesvj.f b/lapack-netlib/SRC/sgesvj.f index 7a7901135..fee5aba4a 100644 --- a/lapack-netlib/SRC/sgesvj.f +++ b/lapack-netlib/SRC/sgesvj.f @@ -90,13 +90,13 @@ *> JOBV is CHARACTER*1 *> Specifies whether to compute the right singular vectors, that *> is, the matrix V: -*> = 'V' : the matrix V is computed and returned in the array V -*> = 'A' : the Jacobi rotations are applied to the MV-by-N +*> = 'V': the matrix V is computed and returned in the array V +*> = 'A': the Jacobi rotations are applied to the MV-by-N *> array V. In other words, the right singular vector *> matrix V is not computed explicitly; instead it is *> applied to an MV-by-N matrix initially stored in the *> first MV rows of V. -*> = 'N' : the matrix V is not computed and the array V is not +*> = 'N': the matrix V is not computed and the array V is not *> referenced *> \endverbatim *> @@ -118,8 +118,8 @@ *> A is REAL array, dimension (LDA,N) *> On entry, the M-by-N matrix A. *> On exit, -*> If JOBU .EQ. 'U' .OR. JOBU .EQ. 'C': -*> If INFO .EQ. 0 : +*> If JOBU = 'U' .OR. JOBU = 'C': +*> If INFO = 0: *> RANKA orthonormal columns of U are returned in the *> leading RANKA columns of the array A. Here RANKA <= N *> is the number of computed singular values of A that are @@ -129,9 +129,9 @@ *> in the array WORK as RANKA=NINT(WORK(2)). Also see the *> descriptions of SVA and WORK. The computed columns of U *> are mutually numerically orthogonal up to approximately -*> TOL=SQRT(M)*EPS (default); or TOL=CTOL*EPS (JOBU.EQ.'C'), +*> TOL=SQRT(M)*EPS (default); or TOL=CTOL*EPS (JOBU = 'C'), *> see the description of JOBU. -*> If INFO .GT. 0, +*> If INFO > 0, *> the procedure SGESVJ did not converge in the given number *> of iterations (sweeps). In that case, the computed *> columns of U may not be orthogonal up to TOL. The output @@ -139,8 +139,8 @@ *> values in SVA(1:N)) and V is still a decomposition of the *> input matrix A in the sense that the residual *> ||A-SCALE*U*SIGMA*V^T||_2 / ||A||_2 is small. -*> If JOBU .EQ. 'N': -*> If INFO .EQ. 0 : +*> If JOBU = 'N': +*> If INFO = 0: *> Note that the left singular vectors are 'for free' in the *> one-sided Jacobi SVD algorithm. However, if only the *> singular values are needed, the level of numerical @@ -149,7 +149,7 @@ *> numerically orthogonal up to approximately M*EPS. Thus, *> on exit, A contains the columns of U scaled with the *> corresponding singular values. -*> If INFO .GT. 0 : +*> If INFO > 0: *> the procedure SGESVJ did not converge in the given number *> of iterations (sweeps). *> \endverbatim @@ -164,9 +164,9 @@ *> \verbatim *> SVA is REAL array, dimension (N) *> On exit, -*> If INFO .EQ. 0 : +*> If INFO = 0 : *> depending on the value SCALE = WORK(1), we have: -*> If SCALE .EQ. ONE: +*> If SCALE = ONE: *> SVA(1:N) contains the computed singular values of A. *> During the computation SVA contains the Euclidean column *> norms of the iterated matrices in the array A. @@ -175,7 +175,7 @@ *> factored representation is due to the fact that some of the *> singular values of A might underflow or overflow. *> -*> If INFO .GT. 0 : +*> If INFO > 0 : *> the procedure SGESVJ did not converge in the given number of *> iterations (sweeps) and SCALE*SVA(1:N) may not be accurate. *> \endverbatim @@ -183,7 +183,7 @@ *> \param[in] MV *> \verbatim *> MV is INTEGER -*> If JOBV .EQ. 'A', then the product of Jacobi rotations in SGESVJ +*> If JOBV = 'A', then the product of Jacobi rotations in SGESVJ *> is applied to the first MV rows of V. See the description of JOBV. *> \endverbatim *> @@ -201,16 +201,16 @@ *> \param[in] LDV *> \verbatim *> LDV is INTEGER -*> The leading dimension of the array V, LDV .GE. 1. -*> If JOBV .EQ. 'V', then LDV .GE. max(1,N). -*> If JOBV .EQ. 'A', then LDV .GE. max(1,MV) . +*> The leading dimension of the array V, LDV >= 1. +*> If JOBV = 'V', then LDV >= max(1,N). +*> If JOBV = 'A', then LDV >= max(1,MV) . *> \endverbatim *> *> \param[in,out] WORK *> \verbatim *> WORK is REAL array, dimension (LWORK) *> On entry, -*> If JOBU .EQ. 'C' : +*> If JOBU = 'C' : *> WORK(1) = CTOL, where CTOL defines the threshold for convergence. *> The process stops if all columns of A are mutually *> orthogonal up to CTOL*EPS, EPS=SLAMCH('E'). @@ -230,7 +230,7 @@ *> WORK(5) = max_{i.NE.j} |COS(A(:,i),A(:,j))| in the last sweep. *> This is useful information in cases when SGESVJ did *> not converge, as it can be used to estimate whether -*> the output is stil useful and for post festum analysis. +*> the output is still useful and for post festum analysis. *> WORK(6) = the largest absolute value over all sines of the *> Jacobi rotation angles in the last sweep. It can be *> useful for a post festum analysis. @@ -245,9 +245,9 @@ *> \param[out] INFO *> \verbatim *> INFO is INTEGER -*> = 0 : successful exit. -*> < 0 : if INFO = -i, then the i-th argument had an illegal value -*> > 0 : SGESVJ did not converge in the maximal allowed number (30) +*> = 0: successful exit. +*> < 0: if INFO = -i, then the i-th argument had an illegal value +*> > 0: SGESVJ did not converge in the maximal allowed number (30) *> of sweeps. The output may still be useful. See the *> description of WORK. *> \endverbatim diff --git a/lapack-netlib/SRC/sgesvxx.f b/lapack-netlib/SRC/sgesvxx.f index 281f198d5..7cb29d5ab 100644 --- a/lapack-netlib/SRC/sgesvxx.f +++ b/lapack-netlib/SRC/sgesvxx.f @@ -411,7 +411,7 @@ *> information as described below. There currently are up to three *> pieces of information returned for each right-hand side. If *> componentwise accuracy is not requested (PARAMS(3) = 0.0), then -*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS .LT. 3, then at most +*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS < 3, then at most *> the first (:,N_ERR_BNDS) entries are returned. *> *> The first index in ERR_BNDS_COMP(i,:) corresponds to the ith @@ -447,14 +447,14 @@ *> \param[in] NPARAMS *> \verbatim *> NPARAMS is INTEGER -*> Specifies the number of parameters set in PARAMS. If .LE. 0, the +*> Specifies the number of parameters set in PARAMS. If <= 0, the *> PARAMS array is never referenced and default values are used. *> \endverbatim *> *> \param[in,out] PARAMS *> \verbatim *> PARAMS is REAL array, dimension NPARAMS -*> Specifies algorithm parameters. If an entry is .LT. 0.0, then +*> Specifies algorithm parameters. If an entry is < 0.0, then *> that entry will be filled with default value used for that *> parameter. Only positions up to NPARAMS are accessed; defaults *> are used for higher-numbered parameters. @@ -462,9 +462,9 @@ *> PARAMS(LA_LINRX_ITREF_I = 1) : Whether to perform iterative *> refinement or not. *> Default: 1.0 -*> = 0.0 : No refinement is performed, and no error bounds are +*> = 0.0: No refinement is performed, and no error bounds are *> computed. -*> = 1.0 : Use the double-precision refinement algorithm, +*> = 1.0: Use the double-precision refinement algorithm, *> possibly with doubled-single computations if the *> compilation environment does not support DOUBLE *> PRECISION. diff --git a/lapack-netlib/SRC/sgetc2.f b/lapack-netlib/SRC/sgetc2.f index b0301b953..6bf0a93c6 100644 --- a/lapack-netlib/SRC/sgetc2.f +++ b/lapack-netlib/SRC/sgetc2.f @@ -85,7 +85,7 @@ *> \verbatim *> INFO is INTEGER *> = 0: successful exit -*> > 0: if INFO = k, U(k, k) is likely to produce owerflow if +*> > 0: if INFO = k, U(k, k) is likely to produce overflow if *> we try to solve for x in Ax = b. So U is perturbed to *> avoid the overflow. *> \endverbatim diff --git a/lapack-netlib/SRC/sgetsls.f b/lapack-netlib/SRC/sgetsls.f index 35af66c19..3bf084515 100644 --- a/lapack-netlib/SRC/sgetsls.f +++ b/lapack-netlib/SRC/sgetsls.f @@ -1,3 +1,5 @@ +*> \brief \b SGETSLS +* * Definition: * =========== * @@ -154,7 +156,7 @@ * *> \date June 2017 * -*> \ingroup doubleGEsolve +*> \ingroup realGEsolve * * ===================================================================== SUBROUTINE SGETSLS( TRANS, M, N, NRHS, A, LDA, B, LDB, @@ -256,7 +258,7 @@ TSZM = INT( TQ( 1 ) ) LWM = INT( WORKQ( 1 ) ) CALL SGEMLQ( 'L', TRANS, N, NRHS, M, A, LDA, TQ, - $ TSZO, B, LDB, WORKQ, -1, INFO2 ) + $ TSZM, B, LDB, WORKQ, -1, INFO2 ) LWM = MAX( LWM, INT( WORKQ( 1 ) ) ) WSIZEO = TSZO + LWO WSIZEM = TSZM + LWM diff --git a/lapack-netlib/SRC/sggesx.f b/lapack-netlib/SRC/sggesx.f index 3c6273dcf..25691d164 100644 --- a/lapack-netlib/SRC/sggesx.f +++ b/lapack-netlib/SRC/sggesx.f @@ -131,10 +131,10 @@ *> \verbatim *> SENSE is CHARACTER*1 *> Determines which reciprocal condition numbers are computed. -*> = 'N' : None are computed; -*> = 'E' : Computed for average of selected eigenvalues only; -*> = 'V' : Computed for selected deflating subspaces only; -*> = 'B' : Computed for both. +*> = 'N': None are computed; +*> = 'E': Computed for average of selected eigenvalues only; +*> = 'V': Computed for selected deflating subspaces only; +*> = 'B': Computed for both. *> If SENSE = 'E', 'V', or 'B', SORT must equal 'S'. *> \endverbatim *> diff --git a/lapack-netlib/SRC/sgsvj0.f b/lapack-netlib/SRC/sgsvj0.f index e580efc30..d9177d818 100644 --- a/lapack-netlib/SRC/sgsvj0.f +++ b/lapack-netlib/SRC/sgsvj0.f @@ -117,7 +117,7 @@ *> \param[in] MV *> \verbatim *> MV is INTEGER -*> If JOBV .EQ. 'A', then MV rows of V are post-multipled by a +*> If JOBV = 'A', then MV rows of V are post-multipled by a *> sequence of Jacobi rotations. *> If JOBV = 'N', then MV is not referenced. *> \endverbatim @@ -125,9 +125,9 @@ *> \param[in,out] V *> \verbatim *> V is REAL array, dimension (LDV,N) -*> If JOBV .EQ. 'V' then N rows of V are post-multipled by a +*> If JOBV = 'V' then N rows of V are post-multipled by a *> sequence of Jacobi rotations. -*> If JOBV .EQ. 'A' then MV rows of V are post-multipled by a +*> If JOBV = 'A' then MV rows of V are post-multipled by a *> sequence of Jacobi rotations. *> If JOBV = 'N', then V is not referenced. *> \endverbatim @@ -136,8 +136,8 @@ *> \verbatim *> LDV is INTEGER *> The leading dimension of the array V, LDV >= 1. -*> If JOBV = 'V', LDV .GE. N. -*> If JOBV = 'A', LDV .GE. MV. +*> If JOBV = 'V', LDV >= N. +*> If JOBV = 'A', LDV >= MV. *> \endverbatim *> *> \param[in] EPS @@ -157,7 +157,7 @@ *> TOL is REAL *> TOL is the threshold for Jacobi rotations. For a pair *> A(:,p), A(:,q) of pivot columns, the Jacobi rotation is -*> applied only if ABS(COS(angle(A(:,p),A(:,q)))) .GT. TOL. +*> applied only if ABS(COS(angle(A(:,p),A(:,q)))) > TOL. *> \endverbatim *> *> \param[in] NSWEEP @@ -175,14 +175,14 @@ *> \param[in] LWORK *> \verbatim *> LWORK is INTEGER -*> LWORK is the dimension of WORK. LWORK .GE. M. +*> LWORK is the dimension of WORK. LWORK >= M. *> \endverbatim *> *> \param[out] INFO *> \verbatim *> INFO is INTEGER -*> = 0 : successful exit. -*> < 0 : if INFO = -i, then the i-th argument had an illegal value +*> = 0: successful exit. +*> < 0: if INFO = -i, then the i-th argument had an illegal value *> \endverbatim * * Authors: @@ -1045,7 +1045,7 @@ 1993 CONTINUE * end i=1:NSWEEP loop -* #:) Reaching this point means that the procedure has comleted the given +* #:) Reaching this point means that the procedure has completed the given * number of iterations. INFO = NSWEEP - 1 GO TO 1995 diff --git a/lapack-netlib/SRC/sgsvj1.f b/lapack-netlib/SRC/sgsvj1.f index 49b81cf4f..ea4ba2e0e 100644 --- a/lapack-netlib/SRC/sgsvj1.f +++ b/lapack-netlib/SRC/sgsvj1.f @@ -61,7 +61,7 @@ *> In terms of the columns of A, the first N1 columns are rotated 'against' *> the remaining N-N1 columns, trying to increase the angle between the *> corresponding subspaces. The off-diagonal block is N1-by(N-N1) and it is -*> tiled using quadratic tiles of side KBL. Here, KBL is a tunning parmeter. +*> tiled using quadratic tiles of side KBL. Here, KBL is a tunning parameter. *> The number of sweeps is given in NSWEEP and the orthogonality threshold *> is given in TOL. *> \endverbatim @@ -147,7 +147,7 @@ *> \param[in] MV *> \verbatim *> MV is INTEGER -*> If JOBV .EQ. 'A', then MV rows of V are post-multipled by a +*> If JOBV = 'A', then MV rows of V are post-multipled by a *> sequence of Jacobi rotations. *> If JOBV = 'N', then MV is not referenced. *> \endverbatim @@ -155,9 +155,9 @@ *> \param[in,out] V *> \verbatim *> V is REAL array, dimension (LDV,N) -*> If JOBV .EQ. 'V' then N rows of V are post-multipled by a +*> If JOBV = 'V' then N rows of V are post-multipled by a *> sequence of Jacobi rotations. -*> If JOBV .EQ. 'A' then MV rows of V are post-multipled by a +*> If JOBV = 'A' then MV rows of V are post-multipled by a *> sequence of Jacobi rotations. *> If JOBV = 'N', then V is not referenced. *> \endverbatim @@ -166,8 +166,8 @@ *> \verbatim *> LDV is INTEGER *> The leading dimension of the array V, LDV >= 1. -*> If JOBV = 'V', LDV .GE. N. -*> If JOBV = 'A', LDV .GE. MV. +*> If JOBV = 'V', LDV >= N. +*> If JOBV = 'A', LDV >= MV. *> \endverbatim *> *> \param[in] EPS @@ -187,7 +187,7 @@ *> TOL is REAL *> TOL is the threshold for Jacobi rotations. For a pair *> A(:,p), A(:,q) of pivot columns, the Jacobi rotation is -*> applied only if ABS(COS(angle(A(:,p),A(:,q)))) .GT. TOL. +*> applied only if ABS(COS(angle(A(:,p),A(:,q)))) > TOL. *> \endverbatim *> *> \param[in] NSWEEP @@ -205,14 +205,14 @@ *> \param[in] LWORK *> \verbatim *> LWORK is INTEGER -*> LWORK is the dimension of WORK. LWORK .GE. M. +*> LWORK is the dimension of WORK. LWORK >= M. *> \endverbatim *> *> \param[out] INFO *> \verbatim *> INFO is INTEGER -*> = 0 : successful exit. -*> < 0 : if INFO = -i, then the i-th argument had an illegal value +*> = 0: successful exit. +*> < 0: if INFO = -i, then the i-th argument had an illegal value *> \endverbatim * * Authors: diff --git a/lapack-netlib/SRC/shseqr.f b/lapack-netlib/SRC/shseqr.f index 5654a4682..b5707f2c3 100644 --- a/lapack-netlib/SRC/shseqr.f +++ b/lapack-netlib/SRC/shseqr.f @@ -70,7 +70,7 @@ *> \param[in] N *> \verbatim *> N is INTEGER -*> The order of the matrix H. N .GE. 0. +*> The order of the matrix H. N >= 0. *> \endverbatim *> *> \param[in] ILO @@ -87,7 +87,7 @@ *> set by a previous call to SGEBAL, and then passed to ZGEHRD *> when the matrix output by SGEBAL is reduced to Hessenberg *> form. Otherwise ILO and IHI should be set to 1 and N -*> respectively. If N.GT.0, then 1.LE.ILO.LE.IHI.LE.N. +*> respectively. If N > 0, then 1 <= ILO <= IHI <= N. *> If N = 0, then ILO = 1 and IHI = 0. *> \endverbatim *> @@ -100,20 +100,20 @@ *> (the Schur form); 2-by-2 diagonal blocks (corresponding to *> complex conjugate pairs of eigenvalues) are returned in *> standard form, with H(i,i) = H(i+1,i+1) and -*> H(i+1,i)*H(i,i+1).LT.0. If INFO = 0 and JOB = 'E', the +*> H(i+1,i)*H(i,i+1) < 0. If INFO = 0 and JOB = 'E', the *> contents of H are unspecified on exit. (The output value of -*> H when INFO.GT.0 is given under the description of INFO +*> H when INFO > 0 is given under the description of INFO *> below.) *> *> Unlike earlier versions of SHSEQR, this subroutine may -*> explicitly H(i,j) = 0 for i.GT.j and j = 1, 2, ... ILO-1 +*> explicitly H(i,j) = 0 for i > j and j = 1, 2, ... ILO-1 *> or j = IHI+1, IHI+2, ... N. *> \endverbatim *> *> \param[in] LDH *> \verbatim *> LDH is INTEGER -*> The leading dimension of the array H. LDH .GE. max(1,N). +*> The leading dimension of the array H. LDH >= max(1,N). *> \endverbatim *> *> \param[out] WR @@ -128,8 +128,8 @@ *> The real and imaginary parts, respectively, of the computed *> eigenvalues. If two eigenvalues are computed as a complex *> conjugate pair, they are stored in consecutive elements of -*> WR and WI, say the i-th and (i+1)th, with WI(i) .GT. 0 and -*> WI(i+1) .LT. 0. If JOB = 'S', the eigenvalues are stored in +*> WR and WI, say the i-th and (i+1)th, with WI(i) > 0 and +*> WI(i+1) < 0. If JOB = 'S', the eigenvalues are stored in *> the same order as on the diagonal of the Schur form returned *> in H, with WR(i) = H(i,i) and, if H(i:i+1,i:i+1) is a 2-by-2 *> diagonal block, WI(i) = sqrt(-H(i+1,i)*H(i,i+1)) and @@ -148,7 +148,7 @@ *> if INFO = 0, Z contains Q*Z. *> Normally Q is the orthogonal matrix generated by SORGHR *> after the call to SGEHRD which formed the Hessenberg matrix -*> H. (The output value of Z when INFO.GT.0 is given under +*> H. (The output value of Z when INFO > 0 is given under *> the description of INFO below.) *> \endverbatim *> @@ -156,7 +156,7 @@ *> \verbatim *> LDZ is INTEGER *> The leading dimension of the array Z. if COMPZ = 'I' or -*> COMPZ = 'V', then LDZ.GE.MAX(1,N). Otherwize, LDZ.GE.1. +*> COMPZ = 'V', then LDZ >= MAX(1,N). Otherwise, LDZ >= 1. *> \endverbatim *> *> \param[out] WORK @@ -169,7 +169,7 @@ *> \param[in] LWORK *> \verbatim *> LWORK is INTEGER -*> The dimension of the array WORK. LWORK .GE. max(1,N) +*> The dimension of the array WORK. LWORK >= max(1,N) *> is sufficient and delivers very good and sometimes *> optimal performance. However, LWORK as large as 11*N *> may be required for optimal performance. A workspace @@ -187,21 +187,21 @@ *> \param[out] INFO *> \verbatim *> INFO is INTEGER -*> = 0: successful exit -*> .LT. 0: if INFO = -i, the i-th argument had an illegal +*> = 0: successful exit +*> < 0: if INFO = -i, the i-th argument had an illegal *> value -*> .GT. 0: if INFO = i, SHSEQR failed to compute all of +*> > 0: if INFO = i, SHSEQR failed to compute all of *> the eigenvalues. Elements 1:ilo-1 and i+1:n of WR *> and WI contain those eigenvalues which have been *> successfully computed. (Failures are rare.) *> -*> If INFO .GT. 0 and JOB = 'E', then on exit, the +*> If INFO > 0 and JOB = 'E', then on exit, the *> remaining unconverged eigenvalues are the eigen- *> values of the upper Hessenberg matrix rows and *> columns ILO through INFO of the final, output *> value of H. *> -*> If INFO .GT. 0 and JOB = 'S', then on exit +*> If INFO > 0 and JOB = 'S', then on exit *> *> (*) (initial value of H)*U = U*(final value of H) *> @@ -209,19 +209,19 @@ *> value of H is upper Hessenberg and quasi-triangular *> in rows and columns INFO+1 through IHI. *> -*> If INFO .GT. 0 and COMPZ = 'V', then on exit +*> If INFO > 0 and COMPZ = 'V', then on exit *> *> (final value of Z) = (initial value of Z)*U *> *> where U is the orthogonal matrix in (*) (regard- *> less of the value of JOB.) *> -*> If INFO .GT. 0 and COMPZ = 'I', then on exit +*> If INFO > 0 and COMPZ = 'I', then on exit *> (final value of Z) = U *> where U is the orthogonal matrix in (*) (regard- *> less of the value of JOB.) *> -*> If INFO .GT. 0 and COMPZ = 'N', then Z is not +*> If INFO > 0 and COMPZ = 'N', then Z is not *> accessed. *> \endverbatim * @@ -261,8 +261,8 @@ *> This depends on ILO, IHI and NS. NS is the *> number of simultaneous shifts returned *> by ILAENV(ISPEC=15). (See ISPEC=15 below.) -*> The default for (IHI-ILO+1).LE.500 is NS. -*> The default for (IHI-ILO+1).GT.500 is 3*NS/2. +*> The default for (IHI-ILO+1) <= 500 is NS. +*> The default for (IHI-ILO+1) > 500 is 3*NS/2. *> *> ISPEC=14: Nibble crossover point. (See IPARMQ for *> details.) Default: 14% of deflation window @@ -341,8 +341,8 @@ PARAMETER ( NTINY = 11 ) * * ==== NL allocates some local workspace to help small matrices -* . through a rare SLAHQR failure. NL .GT. NTINY = 11 is -* . required and NL .LE. NMIN = ILAENV(ISPEC=12,...) is recom- +* . through a rare SLAHQR failure. NL > NTINY = 11 is +* . required and NL <= NMIN = ILAENV(ISPEC=12,...) is recom- * . mended. (The default value of NMIN is 75.) Using NL = 49 * . allows up to six simultaneous shifts and a 16-by-16 * . deflation window. ==== diff --git a/lapack-netlib/SRC/sla_gbrcond.f b/lapack-netlib/SRC/sla_gbrcond.f index 36aa93dc9..7f2c4062e 100644 --- a/lapack-netlib/SRC/sla_gbrcond.f +++ b/lapack-netlib/SRC/sla_gbrcond.f @@ -140,13 +140,13 @@ *> i > 0: The ith argument is invalid. *> \endverbatim *> -*> \param[in] WORK +*> \param[out] WORK *> \verbatim *> WORK is REAL array, dimension (5*N). *> Workspace. *> \endverbatim *> -*> \param[in] IWORK +*> \param[out] IWORK *> \verbatim *> IWORK is INTEGER array, dimension (N). *> Workspace. diff --git a/lapack-netlib/SRC/sla_gbrfsx_extended.f b/lapack-netlib/SRC/sla_gbrfsx_extended.f index a81feb45e..0fd1fd350 100644 --- a/lapack-netlib/SRC/sla_gbrfsx_extended.f +++ b/lapack-netlib/SRC/sla_gbrfsx_extended.f @@ -65,19 +65,19 @@ *> \verbatim *> PREC_TYPE is INTEGER *> Specifies the intermediate precision to be used in refinement. -*> The value is defined by ILAPREC(P) where P is a CHARACTER and -*> P = 'S': Single +*> The value is defined by ILAPREC(P) where P is a CHARACTER and P +*> = 'S': Single *> = 'D': Double *> = 'I': Indigenous -*> = 'X', 'E': Extra +*> = 'X' or 'E': Extra *> \endverbatim *> *> \param[in] TRANS_TYPE *> \verbatim *> TRANS_TYPE is INTEGER *> Specifies the transposition operation on A. -*> The value is defined by ILATRANS(T) where T is a CHARACTER and -*> T = 'N': No transpose +*> The value is defined by ILATRANS(T) where T is a CHARACTER and T +*> = 'N': No transpose *> = 'T': Transpose *> = 'C': Conjugate transpose *> \endverbatim @@ -269,7 +269,7 @@ *> information as described below. There currently are up to three *> pieces of information returned for each right-hand side. If *> componentwise accuracy is not requested (PARAMS(3) = 0.0), then -*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS .LT. 3, then at most +*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS < 3, then at most *> the first (:,N_ERR_BNDS) entries are returned. *> *> The first index in ERR_BNDS_COMP(i,:) corresponds to the ith diff --git a/lapack-netlib/SRC/sla_gercond.f b/lapack-netlib/SRC/sla_gercond.f index 349a1b5be..e54e0d7b4 100644 --- a/lapack-netlib/SRC/sla_gercond.f +++ b/lapack-netlib/SRC/sla_gercond.f @@ -122,13 +122,13 @@ *> i > 0: The ith argument is invalid. *> \endverbatim *> -*> \param[in] WORK +*> \param[out] WORK *> \verbatim *> WORK is REAL array, dimension (3*N). *> Workspace. *> \endverbatim *> -*> \param[in] IWORK +*> \param[out] IWORK *> \verbatim *> IWORK is INTEGER array, dimension (N). *> Workspace.2 diff --git a/lapack-netlib/SRC/sla_gerfsx_extended.f b/lapack-netlib/SRC/sla_gerfsx_extended.f index 1795ea975..84d1ae31b 100644 --- a/lapack-netlib/SRC/sla_gerfsx_extended.f +++ b/lapack-netlib/SRC/sla_gerfsx_extended.f @@ -65,19 +65,19 @@ *> \verbatim *> PREC_TYPE is INTEGER *> Specifies the intermediate precision to be used in refinement. -*> The value is defined by ILAPREC(P) where P is a CHARACTER and -*> P = 'S': Single +*> The value is defined by ILAPREC(P) where P is a CHARACTER and P +*> = 'S': Single *> = 'D': Double *> = 'I': Indigenous -*> = 'X', 'E': Extra +*> = 'X' or 'E': Extra *> \endverbatim *> *> \param[in] TRANS_TYPE *> \verbatim *> TRANS_TYPE is INTEGER *> Specifies the transposition operation on A. -*> The value is defined by ILATRANS(T) where T is a CHARACTER and -*> T = 'N': No transpose +*> The value is defined by ILATRANS(T) where T is a CHARACTER and T +*> = 'N': No transpose *> = 'T': Transpose *> = 'C': Conjugate transpose *> \endverbatim @@ -257,7 +257,7 @@ *> information as described below. There currently are up to three *> pieces of information returned for each right-hand side. If *> componentwise accuracy is not requested (PARAMS(3) = 0.0), then -*> ERRS_C is not accessed. If N_ERR_BNDS .LT. 3, then at most +*> ERRS_C is not accessed. If N_ERR_BNDS < 3, then at most *> the first (:,N_ERR_BNDS) entries are returned. *> *> The first index in ERRS_C(i,:) corresponds to the ith diff --git a/lapack-netlib/SRC/sla_porcond.f b/lapack-netlib/SRC/sla_porcond.f index 9dd7c587b..729581f46 100644 --- a/lapack-netlib/SRC/sla_porcond.f +++ b/lapack-netlib/SRC/sla_porcond.f @@ -112,13 +112,13 @@ *> i > 0: The ith argument is invalid. *> \endverbatim *> -*> \param[in] WORK +*> \param[out] WORK *> \verbatim *> WORK is REAL array, dimension (3*N). *> Workspace. *> \endverbatim *> -*> \param[in] IWORK +*> \param[out] IWORK *> \verbatim *> IWORK is INTEGER array, dimension (N). *> Workspace. diff --git a/lapack-netlib/SRC/sla_porfsx_extended.f b/lapack-netlib/SRC/sla_porfsx_extended.f index 27baa20d1..abbfebb83 100644 --- a/lapack-netlib/SRC/sla_porfsx_extended.f +++ b/lapack-netlib/SRC/sla_porfsx_extended.f @@ -65,11 +65,11 @@ *> \verbatim *> PREC_TYPE is INTEGER *> Specifies the intermediate precision to be used in refinement. -*> The value is defined by ILAPREC(P) where P is a CHARACTER and -*> P = 'S': Single +*> The value is defined by ILAPREC(P) where P is a CHARACTER and P +*> = 'S': Single *> = 'D': Double *> = 'I': Indigenous -*> = 'X', 'E': Extra +*> = 'X' or 'E': Extra *> \endverbatim *> *> \param[in] UPLO @@ -246,7 +246,7 @@ *> information as described below. There currently are up to three *> pieces of information returned for each right-hand side. If *> componentwise accuracy is not requested (PARAMS(3) = 0.0), then -*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS .LT. 3, then at most +*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS < 3, then at most *> the first (:,N_ERR_BNDS) entries are returned. *> *> The first index in ERR_BNDS_COMP(i,:) corresponds to the ith diff --git a/lapack-netlib/SRC/sla_syrcond.f b/lapack-netlib/SRC/sla_syrcond.f index c4b204cc6..0c9e2b361 100644 --- a/lapack-netlib/SRC/sla_syrcond.f +++ b/lapack-netlib/SRC/sla_syrcond.f @@ -118,13 +118,13 @@ *> i > 0: The ith argument is invalid. *> \endverbatim *> -*> \param[in] WORK +*> \param[out] WORK *> \verbatim *> WORK is REAL array, dimension (3*N). *> Workspace. *> \endverbatim *> -*> \param[in] IWORK +*> \param[out] IWORK *> \verbatim *> IWORK is INTEGER array, dimension (N). *> Workspace. diff --git a/lapack-netlib/SRC/sla_syrfsx_extended.f b/lapack-netlib/SRC/sla_syrfsx_extended.f index f7b909ac0..a83a9db98 100644 --- a/lapack-netlib/SRC/sla_syrfsx_extended.f +++ b/lapack-netlib/SRC/sla_syrfsx_extended.f @@ -67,11 +67,11 @@ *> \verbatim *> PREC_TYPE is INTEGER *> Specifies the intermediate precision to be used in refinement. -*> The value is defined by ILAPREC(P) where P is a CHARACTER and -*> P = 'S': Single +*> The value is defined by ILAPREC(P) where P is a CHARACTER and P +*> = 'S': Single *> = 'D': Double *> = 'I': Indigenous -*> = 'X', 'E': Extra +*> = 'X' or 'E': Extra *> \endverbatim *> *> \param[in] UPLO @@ -255,7 +255,7 @@ *> information as described below. There currently are up to three *> pieces of information returned for each right-hand side. If *> componentwise accuracy is not requested (PARAMS(3) = 0.0), then -*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS .LT. 3, then at most +*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS < 3, then at most *> the first (:,N_ERR_BNDS) entries are returned. *> *> The first index in ERR_BNDS_COMP(i,:) corresponds to the ith diff --git a/lapack-netlib/SRC/sla_syrpvgrw.f b/lapack-netlib/SRC/sla_syrpvgrw.f index f5eb81b1f..a0a235ee3 100644 --- a/lapack-netlib/SRC/sla_syrpvgrw.f +++ b/lapack-netlib/SRC/sla_syrpvgrw.f @@ -101,7 +101,7 @@ *> as determined by SSYTRF. *> \endverbatim *> -*> \param[in] WORK +*> \param[out] WORK *> \verbatim *> WORK is REAL array, dimension (2*N) *> \endverbatim diff --git a/lapack-netlib/SRC/sla_wwaddw.f b/lapack-netlib/SRC/sla_wwaddw.f index 96a7d3542..e390c9fab 100644 --- a/lapack-netlib/SRC/sla_wwaddw.f +++ b/lapack-netlib/SRC/sla_wwaddw.f @@ -36,7 +36,7 @@ *> SLA_WWADDW adds a vector W into a doubled-single vector (X, Y). *> *> This works for all extant IBM's hex and binary floating point -*> arithmetics, but not for decimal. +*> arithmetic, but not for decimal. *> \endverbatim * * Arguments: diff --git a/lapack-netlib/SRC/slaed4.f b/lapack-netlib/SRC/slaed4.f index c65cba75a..64260843f 100644 --- a/lapack-netlib/SRC/slaed4.f +++ b/lapack-netlib/SRC/slaed4.f @@ -82,7 +82,7 @@ *> \param[out] DELTA *> \verbatim *> DELTA is REAL array, dimension (N) -*> If N .GT. 2, DELTA contains (D(j) - lambda_I) in its j-th +*> If N > 2, DELTA contains (D(j) - lambda_I) in its j-th *> component. If N = 1, then DELTA(1) = 1. If N = 2, see SLAED5 *> for detail. The vector DELTA contains the information necessary *> to construct the eigenvectors by SLAED3 and SLAED9. diff --git a/lapack-netlib/SRC/slaed8.f b/lapack-netlib/SRC/slaed8.f index 5ec117cb5..2e3f6f51f 100644 --- a/lapack-netlib/SRC/slaed8.f +++ b/lapack-netlib/SRC/slaed8.f @@ -353,7 +353,7 @@ Z( I ) = W( INDX( I ) ) 40 CONTINUE * -* Calculate the allowable deflation tolerence +* Calculate the allowable deflation tolerance * IMAX = ISAMAX( N, Z, 1 ) JMAX = ISAMAX( N, D, 1 ) diff --git a/lapack-netlib/SRC/slagtf.f b/lapack-netlib/SRC/slagtf.f index d3f0b6813..59ef097a7 100644 --- a/lapack-netlib/SRC/slagtf.f +++ b/lapack-netlib/SRC/slagtf.f @@ -125,7 +125,7 @@ *> then IN(k) = 1, otherwise IN(k) = 0. The element IN(n) *> returns the smallest positive integer j such that *> -*> abs( u(j,j) ).le. norm( (T - lambda*I)(j) )*TOL, +*> abs( u(j,j) ) <= norm( (T - lambda*I)(j) )*TOL, *> *> where norm( A(j) ) denotes the sum of the absolute values of *> the jth row of the matrix A. If no such j exists then IN(n) @@ -137,8 +137,8 @@ *> \param[out] INFO *> \verbatim *> INFO is INTEGER -*> = 0 : successful exit -*> .lt. 0: if INFO = -k, the kth argument had an illegal value +*> = 0: successful exit +*> < 0: if INFO = -k, the kth argument had an illegal value *> \endverbatim * * Authors: diff --git a/lapack-netlib/SRC/slagts.f b/lapack-netlib/SRC/slagts.f index 0c3c5239f..e0c8892d7 100644 --- a/lapack-netlib/SRC/slagts.f +++ b/lapack-netlib/SRC/slagts.f @@ -122,12 +122,12 @@ *> \param[in,out] TOL *> \verbatim *> TOL is REAL -*> On entry, with JOB .lt. 0, TOL should be the minimum +*> On entry, with JOB < 0, TOL should be the minimum *> perturbation to be made to very small diagonal elements of U. *> TOL should normally be chosen as about eps*norm(U), where eps *> is the relative machine precision, but if TOL is supplied as *> non-positive, then it is reset to eps*max( abs( u(i,j) ) ). -*> If JOB .gt. 0 then TOL is not referenced. +*> If JOB > 0 then TOL is not referenced. *> *> On exit, TOL is changed as described above, only if TOL is *> non-positive on entry. Otherwise TOL is unchanged. @@ -136,14 +136,14 @@ *> \param[out] INFO *> \verbatim *> INFO is INTEGER -*> = 0 : successful exit -*> .lt. 0: if INFO = -i, the i-th argument had an illegal value -*> .gt. 0: overflow would occur when computing the INFO(th) -*> element of the solution vector x. This can only occur -*> when JOB is supplied as positive and either means -*> that a diagonal element of U is very small, or that -*> the elements of the right-hand side vector y are very -*> large. +*> = 0: successful exit +*> < 0: if INFO = -i, the i-th argument had an illegal value +*> > 0: overflow would occur when computing the INFO(th) +*> element of the solution vector x. This can only occur +*> when JOB is supplied as positive and either means +*> that a diagonal element of U is very small, or that +*> the elements of the right-hand side vector y are very +*> large. *> \endverbatim * * Authors: diff --git a/lapack-netlib/SRC/slahqr.f b/lapack-netlib/SRC/slahqr.f index d91826e61..e5642d2bf 100644 --- a/lapack-netlib/SRC/slahqr.f +++ b/lapack-netlib/SRC/slahqr.f @@ -150,26 +150,26 @@ *> \param[out] INFO *> \verbatim *> INFO is INTEGER -*> = 0: successful exit -*> .GT. 0: If INFO = i, SLAHQR failed to compute all the +*> = 0: successful exit +*> > 0: If INFO = i, SLAHQR failed to compute all the *> eigenvalues ILO to IHI in a total of 30 iterations *> per eigenvalue; elements i+1:ihi of WR and WI *> contain those eigenvalues which have been *> successfully computed. *> -*> If INFO .GT. 0 and WANTT is .FALSE., then on exit, +*> If INFO > 0 and WANTT is .FALSE., then on exit, *> the remaining unconverged eigenvalues are the *> eigenvalues of the upper Hessenberg matrix rows -*> and columns ILO thorugh INFO of the final, output +*> and columns ILO through INFO of the final, output *> value of H. *> -*> If INFO .GT. 0 and WANTT is .TRUE., then on exit +*> If INFO > 0 and WANTT is .TRUE., then on exit *> (*) (initial value of H)*U = U*(final value of H) -*> where U is an orthognal matrix. The final +*> where U is an orthogonal matrix. The final *> value of H is upper Hessenberg and triangular in *> rows and columns INFO+1 through IHI. *> -*> If INFO .GT. 0 and WANTZ is .TRUE., then on exit +*> If INFO > 0 and WANTZ is .TRUE., then on exit *> (final value of Z) = (initial value of Z)*U *> where U is the orthogonal matrix in (*) *> (regardless of the value of WANTT.) diff --git a/lapack-netlib/SRC/slaln2.f b/lapack-netlib/SRC/slaln2.f index f9ceee7b7..4c6a55ec7 100644 --- a/lapack-netlib/SRC/slaln2.f +++ b/lapack-netlib/SRC/slaln2.f @@ -49,7 +49,7 @@ *> the first column of each being the real part and the second *> being the imaginary part. *> -*> "s" is a scaling factor (.LE. 1), computed by SLALN2, which is +*> "s" is a scaling factor (<= 1), computed by SLALN2, which is *> so chosen that X can be computed without overflow. X is further *> scaled if necessary to assure that norm(ca A - w D)*norm(X) is less *> than overflow. diff --git a/lapack-netlib/SRC/slamswlq.f b/lapack-netlib/SRC/slamswlq.f index b13d02b6c..59ab1a6ee 100644 --- a/lapack-netlib/SRC/slamswlq.f +++ b/lapack-netlib/SRC/slamswlq.f @@ -1,3 +1,4 @@ +*> \brief \b SLAMSWLQ * * Definition: * =========== diff --git a/lapack-netlib/SRC/slamtsqr.f b/lapack-netlib/SRC/slamtsqr.f index 84ac86ee2..58905ab46 100644 --- a/lapack-netlib/SRC/slamtsqr.f +++ b/lapack-netlib/SRC/slamtsqr.f @@ -1,3 +1,4 @@ +*> \brief \b SLAMTSQR * * Definition: * =========== diff --git a/lapack-netlib/SRC/slangb.f b/lapack-netlib/SRC/slangb.f index fd538b1b7..706e07501 100644 --- a/lapack-netlib/SRC/slangb.f +++ b/lapack-netlib/SRC/slangb.f @@ -129,6 +129,7 @@ * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * December 2016 * + IMPLICIT NONE * .. Scalar Arguments .. CHARACTER NORM INTEGER KL, KU, LDAB, N @@ -139,22 +140,24 @@ * * ===================================================================== * -* * .. Parameters .. REAL ONE, ZERO PARAMETER ( ONE = 1.0E+0, ZERO = 0.0E+0 ) * .. * .. Local Scalars .. INTEGER I, J, K, L - REAL SCALE, SUM, VALUE, TEMP + REAL SUM, VALUE, TEMP * .. -* .. External Subroutines .. - EXTERNAL SLASSQ +* .. Local Arrays .. + REAL SSQ( 2 ), COLSSQ( 2 ) * .. * .. External Functions .. LOGICAL LSAME, SISNAN EXTERNAL LSAME, SISNAN * .. +* .. External Subroutines .. + EXTERNAL SLASSQ, SCOMBSSQ +* .. * .. Intrinsic Functions .. INTRINSIC ABS, MAX, MIN, SQRT * .. @@ -206,15 +209,22 @@ ELSE IF( ( LSAME( NORM, 'F' ) ) .OR. ( LSAME( NORM, 'E' ) ) ) THEN * * Find normF(A). +* SSQ(1) is scale +* SSQ(2) is sum-of-squares +* For better accuracy, sum each column separately. * - SCALE = ZERO - SUM = ONE + SSQ( 1 ) = ZERO + SSQ( 2 ) = ONE DO 90 J = 1, N L = MAX( 1, J-KU ) K = KU + 1 - J + L - CALL SLASSQ( MIN( N, J+KL )-L+1, AB( K, J ), 1, SCALE, SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL SLASSQ( MIN( N, J+KL )-L+1, AB( K, J ), 1, + $ COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL SCOMBSSQ( SSQ, COLSSQ ) 90 CONTINUE - VALUE = SCALE*SQRT( SUM ) + VALUE = SSQ( 1 )*SQRT( SSQ( 2 ) ) END IF * SLANGB = VALUE diff --git a/lapack-netlib/SRC/slange.f b/lapack-netlib/SRC/slange.f index 2eb8d7d14..0c80f1d40 100644 --- a/lapack-netlib/SRC/slange.f +++ b/lapack-netlib/SRC/slange.f @@ -119,6 +119,7 @@ * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * December 2016 * + IMPLICIT NONE * .. Scalar Arguments .. CHARACTER NORM INTEGER LDA, M, N @@ -135,10 +136,13 @@ * .. * .. Local Scalars .. INTEGER I, J - REAL SCALE, SUM, VALUE, TEMP + REAL SUM, VALUE, TEMP +* .. +* .. Local Arrays .. + REAL SSQ( 2 ), COLSSQ( 2 ) * .. * .. External Subroutines .. - EXTERNAL SLASSQ + EXTERNAL SLASSQ, SCOMBSSQ * .. * .. External Functions .. LOGICAL LSAME, SISNAN @@ -194,13 +198,19 @@ ELSE IF( ( LSAME( NORM, 'F' ) ) .OR. ( LSAME( NORM, 'E' ) ) ) THEN * * Find normF(A). +* SSQ(1) is scale +* SSQ(2) is sum-of-squares +* For better accuracy, sum each column separately. * - SCALE = ZERO - SUM = ONE + SSQ( 1 ) = ZERO + SSQ( 2 ) = ONE DO 90 J = 1, N - CALL SLASSQ( M, A( 1, J ), 1, SCALE, SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL SLASSQ( M, A( 1, J ), 1, COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL SCOMBSSQ( SSQ, COLSSQ ) 90 CONTINUE - VALUE = SCALE*SQRT( SUM ) + VALUE = SSQ( 1 )*SQRT( SSQ( 2 ) ) END IF * SLANGE = VALUE diff --git a/lapack-netlib/SRC/slanhs.f b/lapack-netlib/SRC/slanhs.f index c5a077fbf..8913031a2 100644 --- a/lapack-netlib/SRC/slanhs.f +++ b/lapack-netlib/SRC/slanhs.f @@ -113,6 +113,7 @@ * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * December 2016 * + IMPLICIT NONE * .. Scalar Arguments .. CHARACTER NORM INTEGER LDA, N @@ -129,15 +130,18 @@ * .. * .. Local Scalars .. INTEGER I, J - REAL SCALE, SUM, VALUE + REAL SUM, VALUE * .. -* .. External Subroutines .. - EXTERNAL SLASSQ +* .. Local Arrays .. + REAL SSQ( 2 ), COLSSQ( 2 ) * .. * .. External Functions .. LOGICAL LSAME, SISNAN EXTERNAL LSAME, SISNAN * .. +* .. External Subroutines .. + EXTERNAL SLASSQ, SCOMBSSQ +* .. * .. Intrinsic Functions .. INTRINSIC ABS, MIN, SQRT * .. @@ -188,13 +192,20 @@ ELSE IF( ( LSAME( NORM, 'F' ) ) .OR. ( LSAME( NORM, 'E' ) ) ) THEN * * Find normF(A). +* SSQ(1) is scale +* SSQ(2) is sum-of-squares +* For better accuracy, sum each column separately. * - SCALE = ZERO - SUM = ONE + SSQ( 1 ) = ZERO + SSQ( 2 ) = ONE DO 90 J = 1, N - CALL SLASSQ( MIN( N, J+1 ), A( 1, J ), 1, SCALE, SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL SLASSQ( MIN( N, J+1 ), A( 1, J ), 1, + $ COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL SCOMBSSQ( SSQ, COLSSQ ) 90 CONTINUE - VALUE = SCALE*SQRT( SUM ) + VALUE = SSQ( 1 )*SQRT( SSQ( 2 ) ) END IF * SLANHS = VALUE diff --git a/lapack-netlib/SRC/slansb.f b/lapack-netlib/SRC/slansb.f index 8f3fe1eb9..23519025d 100644 --- a/lapack-netlib/SRC/slansb.f +++ b/lapack-netlib/SRC/slansb.f @@ -134,6 +134,7 @@ * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * December 2016 * + IMPLICIT NONE * .. Scalar Arguments .. CHARACTER NORM, UPLO INTEGER K, LDAB, N @@ -150,15 +151,18 @@ * .. * .. Local Scalars .. INTEGER I, J, L - REAL ABSA, SCALE, SUM, VALUE + REAL ABSA, SUM, VALUE * .. -* .. External Subroutines .. - EXTERNAL SLASSQ +* .. Local Arrays .. + REAL SSQ( 2 ), COLSSQ( 2 ) * .. * .. External Functions .. LOGICAL LSAME, SISNAN EXTERNAL LSAME, SISNAN * .. +* .. External Subroutines .. + EXTERNAL SLASSQ, SCOMBSSQ +* .. * .. Intrinsic Functions .. INTRINSIC ABS, MAX, MIN, SQRT * .. @@ -225,29 +229,47 @@ ELSE IF( ( LSAME( NORM, 'F' ) ) .OR. ( LSAME( NORM, 'E' ) ) ) THEN * * Find normF(A). +* SSQ(1) is scale +* SSQ(2) is sum-of-squares +* For better accuracy, sum each column separately. +* + SSQ( 1 ) = ZERO + SSQ( 2 ) = ONE +* +* Sum off-diagonals * - SCALE = ZERO - SUM = ONE IF( K.GT.0 ) THEN IF( LSAME( UPLO, 'U' ) ) THEN DO 110 J = 2, N + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE CALL SLASSQ( MIN( J-1, K ), AB( MAX( K+2-J, 1 ), J ), - $ 1, SCALE, SUM ) + $ 1, COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL SCOMBSSQ( SSQ, COLSSQ ) 110 CONTINUE L = K + 1 ELSE DO 120 J = 1, N - 1 - CALL SLASSQ( MIN( N-J, K ), AB( 2, J ), 1, SCALE, - $ SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL SLASSQ( MIN( N-J, K ), AB( 2, J ), 1, + $ COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL SCOMBSSQ( SSQ, COLSSQ ) 120 CONTINUE L = 1 END IF - SUM = 2*SUM + SSQ( 2 ) = 2*SSQ( 2 ) ELSE L = 1 END IF - CALL SLASSQ( N, AB( L, 1 ), LDAB, SCALE, SUM ) - VALUE = SCALE*SQRT( SUM ) +* +* Sum diagonal +* + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL SLASSQ( N, AB( L, 1 ), LDAB, COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL SCOMBSSQ( SSQ, COLSSQ ) + VALUE = SSQ( 1 )*SQRT( SSQ( 2 ) ) END IF * SLANSB = VALUE diff --git a/lapack-netlib/SRC/slansp.f b/lapack-netlib/SRC/slansp.f index 35390cd1c..7e29d778b 100644 --- a/lapack-netlib/SRC/slansp.f +++ b/lapack-netlib/SRC/slansp.f @@ -119,6 +119,7 @@ * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * December 2016 * + IMPLICIT NONE * .. Scalar Arguments .. CHARACTER NORM, UPLO INTEGER N @@ -135,15 +136,18 @@ * .. * .. Local Scalars .. INTEGER I, J, K - REAL ABSA, SCALE, SUM, VALUE + REAL ABSA, SUM, VALUE * .. -* .. External Subroutines .. - EXTERNAL SLASSQ +* .. Local Arrays .. + REAL SSQ( 2 ), COLSSQ( 2 ) * .. * .. External Functions .. LOGICAL LSAME, SISNAN EXTERNAL LSAME, SISNAN * .. +* .. External Subroutines .. + EXTERNAL SLASSQ, SCOMBSSQ +* .. * .. Intrinsic Functions .. INTRINSIC ABS, SQRT * .. @@ -217,31 +221,48 @@ ELSE IF( ( LSAME( NORM, 'F' ) ) .OR. ( LSAME( NORM, 'E' ) ) ) THEN * * Find normF(A). +* SSQ(1) is scale +* SSQ(2) is sum-of-squares +* For better accuracy, sum each column separately. +* + SSQ( 1 ) = ZERO + SSQ( 2 ) = ONE +* +* Sum off-diagonals * - SCALE = ZERO - SUM = ONE K = 2 IF( LSAME( UPLO, 'U' ) ) THEN DO 110 J = 2, N - CALL SLASSQ( J-1, AP( K ), 1, SCALE, SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL SLASSQ( J-1, AP( K ), 1, COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL SCOMBSSQ( SSQ, COLSSQ ) K = K + J 110 CONTINUE ELSE DO 120 J = 1, N - 1 - CALL SLASSQ( N-J, AP( K ), 1, SCALE, SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL SLASSQ( N-J, AP( K ), 1, COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL SCOMBSSQ( SSQ, COLSSQ ) K = K + N - J + 1 120 CONTINUE END IF - SUM = 2*SUM + SSQ( 2 ) = 2*SSQ( 2 ) +* +* Sum diagonal +* K = 1 + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE DO 130 I = 1, N IF( AP( K ).NE.ZERO ) THEN ABSA = ABS( AP( K ) ) - IF( SCALE.LT.ABSA ) THEN - SUM = ONE + SUM*( SCALE / ABSA )**2 - SCALE = ABSA + IF( COLSSQ( 1 ).LT.ABSA ) THEN + COLSSQ( 2 ) = ONE + COLSSQ(2)*( COLSSQ(1) / ABSA )**2 + COLSSQ( 1 ) = ABSA ELSE - SUM = SUM + ( ABSA / SCALE )**2 + COLSSQ( 2 ) = COLSSQ( 2 ) + ( ABSA / COLSSQ( 1 ) )**2 END IF END IF IF( LSAME( UPLO, 'U' ) ) THEN @@ -250,7 +271,8 @@ K = K + N - I + 1 END IF 130 CONTINUE - VALUE = SCALE*SQRT( SUM ) + CALL SCOMBSSQ( SSQ, COLSSQ ) + VALUE = SSQ( 1 )*SQRT( SSQ( 2 ) ) END IF * SLANSP = VALUE diff --git a/lapack-netlib/SRC/slansy.f b/lapack-netlib/SRC/slansy.f index c8400e530..66ff1c5c7 100644 --- a/lapack-netlib/SRC/slansy.f +++ b/lapack-netlib/SRC/slansy.f @@ -127,6 +127,7 @@ * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * December 2016 * + IMPLICIT NONE * .. Scalar Arguments .. CHARACTER NORM, UPLO INTEGER LDA, N @@ -143,15 +144,18 @@ * .. * .. Local Scalars .. INTEGER I, J - REAL ABSA, SCALE, SUM, VALUE + REAL ABSA, SUM, VALUE * .. -* .. External Subroutines .. - EXTERNAL SLASSQ +* .. Local Arrays .. + REAL SSQ( 2 ), COLSSQ( 2 ) * .. * .. External Functions .. LOGICAL LSAME, SISNAN EXTERNAL LSAME, SISNAN * .. +* .. External Subroutines .. + EXTERNAL SLASSQ, SCOMBSSQ +* .. * .. Intrinsic Functions .. INTRINSIC ABS, SQRT * .. @@ -216,21 +220,39 @@ ELSE IF( ( LSAME( NORM, 'F' ) ) .OR. ( LSAME( NORM, 'E' ) ) ) THEN * * Find normF(A). +* SSQ(1) is scale +* SSQ(2) is sum-of-squares +* For better accuracy, sum each column separately. +* + SSQ( 1 ) = ZERO + SSQ( 2 ) = ONE +* +* Sum off-diagonals * - SCALE = ZERO - SUM = ONE IF( LSAME( UPLO, 'U' ) ) THEN DO 110 J = 2, N - CALL SLASSQ( J-1, A( 1, J ), 1, SCALE, SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL SLASSQ( J-1, A( 1, J ), 1, COLSSQ(1), COLSSQ(2) ) + CALL SCOMBSSQ( SSQ, COLSSQ ) 110 CONTINUE ELSE DO 120 J = 1, N - 1 - CALL SLASSQ( N-J, A( J+1, J ), 1, SCALE, SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL SLASSQ( N-J, A( J+1, J ), 1, COLSSQ(1), COLSSQ(2) ) + CALL SCOMBSSQ( SSQ, COLSSQ ) 120 CONTINUE END IF - SUM = 2*SUM - CALL SLASSQ( N, A, LDA+1, SCALE, SUM ) - VALUE = SCALE*SQRT( SUM ) + SSQ( 2 ) = 2*SSQ( 2 ) +* +* Sum diagonal +* + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL SLASSQ( N, A, LDA+1, COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL SCOMBSSQ( SSQ, COLSSQ ) + VALUE = SSQ( 1 )*SQRT( SSQ( 2 ) ) END IF * SLANSY = VALUE diff --git a/lapack-netlib/SRC/slantb.f b/lapack-netlib/SRC/slantb.f index 3588779cb..5b94618e1 100644 --- a/lapack-netlib/SRC/slantb.f +++ b/lapack-netlib/SRC/slantb.f @@ -145,6 +145,7 @@ * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * December 2016 * + IMPLICIT NONE * .. Scalar Arguments .. CHARACTER DIAG, NORM, UPLO INTEGER K, LDAB, N @@ -162,15 +163,18 @@ * .. Local Scalars .. LOGICAL UDIAG INTEGER I, J, L - REAL SCALE, SUM, VALUE + REAL SUM, VALUE * .. -* .. External Subroutines .. - EXTERNAL SLASSQ +* .. Local Arrays .. + REAL SSQ( 2 ), COLSSQ( 2 ) * .. * .. External Functions .. LOGICAL LSAME, SISNAN EXTERNAL LSAME, SISNAN * .. +* .. External Subroutines .. + EXTERNAL SLASSQ, SCOMBSSQ +* .. * .. Intrinsic Functions .. INTRINSIC ABS, MAX, MIN, SQRT * .. @@ -311,46 +315,61 @@ ELSE IF( ( LSAME( NORM, 'F' ) ) .OR. ( LSAME( NORM, 'E' ) ) ) THEN * * Find normF(A). +* SSQ(1) is scale +* SSQ(2) is sum-of-squares +* For better accuracy, sum each column separately. * IF( LSAME( UPLO, 'U' ) ) THEN IF( LSAME( DIAG, 'U' ) ) THEN - SCALE = ONE - SUM = N + SSQ( 1 ) = ONE + SSQ( 2 ) = N IF( K.GT.0 ) THEN DO 280 J = 2, N + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE CALL SLASSQ( MIN( J-1, K ), - $ AB( MAX( K+2-J, 1 ), J ), 1, SCALE, - $ SUM ) + $ AB( MAX( K+2-J, 1 ), J ), 1, + $ COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL SCOMBSSQ( SSQ, COLSSQ ) 280 CONTINUE END IF ELSE - SCALE = ZERO - SUM = ONE + SSQ( 1 ) = ZERO + SSQ( 2 ) = ONE DO 290 J = 1, N + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE CALL SLASSQ( MIN( J, K+1 ), AB( MAX( K+2-J, 1 ), J ), - $ 1, SCALE, SUM ) + $ 1, COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL SCOMBSSQ( SSQ, COLSSQ ) 290 CONTINUE END IF ELSE IF( LSAME( DIAG, 'U' ) ) THEN - SCALE = ONE - SUM = N + SSQ( 1 ) = ONE + SSQ( 2 ) = N IF( K.GT.0 ) THEN DO 300 J = 1, N - 1 - CALL SLASSQ( MIN( N-J, K ), AB( 2, J ), 1, SCALE, - $ SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL SLASSQ( MIN( N-J, K ), AB( 2, J ), 1, + $ COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL SCOMBSSQ( SSQ, COLSSQ ) 300 CONTINUE END IF ELSE - SCALE = ZERO - SUM = ONE + SSQ( 1 ) = ZERO + SSQ( 2 ) = ONE DO 310 J = 1, N - CALL SLASSQ( MIN( N-J+1, K+1 ), AB( 1, J ), 1, SCALE, - $ SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL SLASSQ( MIN( N-J+1, K+1 ), AB( 1, J ), 1, + $ COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL SCOMBSSQ( SSQ, COLSSQ ) 310 CONTINUE END IF END IF - VALUE = SCALE*SQRT( SUM ) + VALUE = SSQ( 1 )*SQRT( SSQ( 2 ) ) END IF * SLANTB = VALUE diff --git a/lapack-netlib/SRC/slantp.f b/lapack-netlib/SRC/slantp.f index 1423f5ca3..ab781deac 100644 --- a/lapack-netlib/SRC/slantp.f +++ b/lapack-netlib/SRC/slantp.f @@ -129,6 +129,7 @@ * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * December 2016 * + IMPLICIT NONE * .. Scalar Arguments .. CHARACTER DIAG, NORM, UPLO INTEGER N @@ -146,15 +147,18 @@ * .. Local Scalars .. LOGICAL UDIAG INTEGER I, J, K - REAL SCALE, SUM, VALUE + REAL SUM, VALUE * .. -* .. External Subroutines .. - EXTERNAL SLASSQ +* .. Local Arrays .. + REAL SSQ( 2 ), COLSSQ( 2 ) * .. * .. External Functions .. LOGICAL LSAME, SISNAN EXTERNAL LSAME, SISNAN * .. +* .. External Subroutines .. + EXTERNAL SLASSQ, SCOMBSSQ +* .. * .. Intrinsic Functions .. INTRINSIC ABS, SQRT * .. @@ -306,45 +310,64 @@ ELSE IF( ( LSAME( NORM, 'F' ) ) .OR. ( LSAME( NORM, 'E' ) ) ) THEN * * Find normF(A). +* SSQ(1) is scale +* SSQ(2) is sum-of-squares +* For better accuracy, sum each column separately. * IF( LSAME( UPLO, 'U' ) ) THEN IF( LSAME( DIAG, 'U' ) ) THEN - SCALE = ONE - SUM = N + SSQ( 1 ) = ONE + SSQ( 2 ) = N K = 2 DO 280 J = 2, N - CALL SLASSQ( J-1, AP( K ), 1, SCALE, SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL SLASSQ( J-1, AP( K ), 1, + $ COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL SCOMBSSQ( SSQ, COLSSQ ) K = K + J 280 CONTINUE ELSE - SCALE = ZERO - SUM = ONE + SSQ( 1 ) = ZERO + SSQ( 2 ) = ONE K = 1 DO 290 J = 1, N - CALL SLASSQ( J, AP( K ), 1, SCALE, SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL SLASSQ( J, AP( K ), 1, + $ COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL SCOMBSSQ( SSQ, COLSSQ ) K = K + J 290 CONTINUE END IF ELSE IF( LSAME( DIAG, 'U' ) ) THEN - SCALE = ONE - SUM = N + SSQ( 1 ) = ONE + SSQ( 2 ) = N K = 2 DO 300 J = 1, N - 1 - CALL SLASSQ( N-J, AP( K ), 1, SCALE, SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL SLASSQ( N-J, AP( K ), 1, + $ COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL SCOMBSSQ( SSQ, COLSSQ ) K = K + N - J + 1 300 CONTINUE ELSE - SCALE = ZERO - SUM = ONE + SSQ( 1 ) = ZERO + SSQ( 2 ) = ONE K = 1 DO 310 J = 1, N - CALL SLASSQ( N-J+1, AP( K ), 1, SCALE, SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL SLASSQ( N-J+1, AP( K ), 1, + $ COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL SCOMBSSQ( SSQ, COLSSQ ) K = K + N - J + 1 310 CONTINUE END IF END IF - VALUE = SCALE*SQRT( SUM ) + VALUE = SSQ( 1 )*SQRT( SSQ( 2 ) ) END IF * SLANTP = VALUE diff --git a/lapack-netlib/SRC/slantr.f b/lapack-netlib/SRC/slantr.f index 63b855892..04d29f537 100644 --- a/lapack-netlib/SRC/slantr.f +++ b/lapack-netlib/SRC/slantr.f @@ -146,6 +146,7 @@ * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * December 2016 * + IMPLICIT NONE * .. Scalar Arguments .. CHARACTER DIAG, NORM, UPLO INTEGER LDA, M, N @@ -163,15 +164,18 @@ * .. Local Scalars .. LOGICAL UDIAG INTEGER I, J - REAL SCALE, SUM, VALUE + REAL SUM, VALUE * .. -* .. External Subroutines .. - EXTERNAL SLASSQ +* .. Local Arrays .. + REAL SSQ( 2 ), COLSSQ( 2 ) * .. * .. External Functions .. LOGICAL LSAME, SISNAN EXTERNAL LSAME, SISNAN * .. +* .. External Subroutines .. + EXTERNAL SLASSQ, SCOMBSSQ +* .. * .. Intrinsic Functions .. INTRINSIC ABS, MIN, SQRT * .. @@ -281,7 +285,7 @@ END IF ELSE IF( LSAME( DIAG, 'U' ) ) THEN - DO 210 I = 1, N + DO 210 I = 1, MIN( M, N ) WORK( I ) = ONE 210 CONTINUE DO 220 I = N + 1, M @@ -311,38 +315,56 @@ ELSE IF( ( LSAME( NORM, 'F' ) ) .OR. ( LSAME( NORM, 'E' ) ) ) THEN * * Find normF(A). +* SSQ(1) is scale +* SSQ(2) is sum-of-squares +* For better accuracy, sum each column separately. * IF( LSAME( UPLO, 'U' ) ) THEN IF( LSAME( DIAG, 'U' ) ) THEN - SCALE = ONE - SUM = MIN( M, N ) + SSQ( 1 ) = ONE + SSQ( 2 ) = MIN( M, N ) DO 290 J = 2, N - CALL SLASSQ( MIN( M, J-1 ), A( 1, J ), 1, SCALE, SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL SLASSQ( MIN( M, J-1 ), A( 1, J ), 1, + $ COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL SCOMBSSQ( SSQ, COLSSQ ) 290 CONTINUE ELSE - SCALE = ZERO - SUM = ONE + SSQ( 1 ) = ZERO + SSQ( 2 ) = ONE DO 300 J = 1, N - CALL SLASSQ( MIN( M, J ), A( 1, J ), 1, SCALE, SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL SLASSQ( MIN( M, J ), A( 1, J ), 1, + $ COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL SCOMBSSQ( SSQ, COLSSQ ) 300 CONTINUE END IF ELSE IF( LSAME( DIAG, 'U' ) ) THEN - SCALE = ONE - SUM = MIN( M, N ) + SSQ( 1 ) = ONE + SSQ( 2 ) = MIN( M, N ) DO 310 J = 1, N - CALL SLASSQ( M-J, A( MIN( M, J+1 ), J ), 1, SCALE, - $ SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL SLASSQ( M-J, A( MIN( M, J+1 ), J ), 1, + $ COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL SCOMBSSQ( SSQ, COLSSQ ) 310 CONTINUE ELSE - SCALE = ZERO - SUM = ONE + SSQ( 1 ) = ZERO + SSQ( 2 ) = ONE DO 320 J = 1, N - CALL SLASSQ( M-J+1, A( J, J ), 1, SCALE, SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL SLASSQ( M-J+1, A( J, J ), 1, + $ COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL SCOMBSSQ( SSQ, COLSSQ ) 320 CONTINUE END IF END IF - VALUE = SCALE*SQRT( SUM ) + VALUE = SSQ( 1 )*SQRT( SSQ( 2 ) ) END IF * SLANTR = VALUE diff --git a/lapack-netlib/SRC/slanv2.f b/lapack-netlib/SRC/slanv2.f index e73e5455c..e678305f2 100644 --- a/lapack-netlib/SRC/slanv2.f +++ b/lapack-netlib/SRC/slanv2.f @@ -140,13 +140,16 @@ * * .. Parameters .. REAL ZERO, HALF, ONE - PARAMETER ( ZERO = 0.0E+0, HALF = 0.5E+0, ONE = 1.0E+0 ) + PARAMETER ( ZERO = 0.0E+0, HALF = 0.5E+0, ONE = 1.0E+0, + $ TWO = 2.0E+0 ) REAL MULTPL PARAMETER ( MULTPL = 4.0E+0 ) * .. * .. Local Scalars .. REAL AA, BB, BCMAX, BCMIS, CC, CS1, DD, EPS, P, SAB, - $ SAC, SCALE, SIGMA, SN1, TAU, TEMP, Z + $ SAC, SCALE, SIGMA, SN1, TAU, TEMP, Z, SAFMIN, + $ SAFMN2, SAFMX2 + INTEGER COUNT * .. * .. External Functions .. REAL SLAMCH, SLAPY2 @@ -157,11 +160,14 @@ * .. * .. Executable Statements .. * + SAFMIN = SLAMCH( 'S' ) EPS = SLAMCH( 'P' ) + SAFMN2 = SLAMCH( 'B' )**INT( LOG( SAFMIN / EPS ) / + $ LOG( SLAMCH( 'B' ) ) / TWO ) + SAFMX2 = ONE / SAFMN2 IF( C.EQ.ZERO ) THEN CS = ONE SN = ZERO - GO TO 10 * ELSE IF( B.EQ.ZERO ) THEN * @@ -174,12 +180,12 @@ A = TEMP B = -C C = ZERO - GO TO 10 +* ELSE IF( (A-D).EQ.ZERO .AND. SIGN( ONE, B ).NE. $ SIGN( ONE, C ) ) THEN CS = ONE SN = ZERO - GO TO 10 +* ELSE * TEMP = A - D @@ -207,12 +213,30 @@ SN = C / TAU B = B - C C = ZERO +* ELSE * * Complex eigenvalues, or real (almost) equal eigenvalues. * Make diagonal elements equal. * + COUNT = 0 SIGMA = B + C + 10 CONTINUE + COUNT = COUNT + 1 + SCALE = MAX( ABS(TEMP), ABS(SIGMA) ) + IF( SCALE.GE.SAFMX2 ) THEN + SIGMA = SIGMA * SAFMN2 + TEMP = TEMP * SAFMN2 + IF (COUNT .LE. 20) + $ GOTO 10 + END IF + IF( SCALE.LE.SAFMN2 ) THEN + SIGMA = SIGMA * SAFMX2 + TEMP = TEMP * SAFMX2 + IF (COUNT .LE. 20) + $ GOTO 10 + END IF + P = HALF*TEMP TAU = SLAPY2( SIGMA, TEMP ) CS = SQRT( HALF*( ONE+ABS( SIGMA ) / TAU ) ) SN = -( P / ( TAU*CS ) )*SIGN( ONE, SIGMA ) @@ -268,8 +292,6 @@ END IF * END IF -* - 10 CONTINUE * * Store eigenvalues in (RT1R,RT1I) and (RT2R,RT2I). * diff --git a/lapack-netlib/SRC/slaorhr_col_getrfnp.f b/lapack-netlib/SRC/slaorhr_col_getrfnp.f new file mode 100644 index 000000000..6cc59e538 --- /dev/null +++ b/lapack-netlib/SRC/slaorhr_col_getrfnp.f @@ -0,0 +1,248 @@ +*> \brief \b SLAORHR_COL_GETRFNP +* +* =========== DOCUMENTATION =========== +* +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ +* +*> \htmlonly +*> Download SLAORHR_COL_GETRFNP + dependencies +*> +*> [TGZ] +*> +*> [ZIP] +*> +*> [TXT] +*> \endhtmlonly +* +* Definition: +* =========== +* +* SUBROUTINE SLAORHR_COL_GETRFNP( M, N, A, LDA, D, INFO ) +* +* .. Scalar Arguments .. +* INTEGER INFO, LDA, M, N +* .. +* .. Array Arguments .. +* REAL A( LDA, * ), D( * ) +* .. +* +* +*> \par Purpose: +* ============= +*> +*> \verbatim +*> +*> SLAORHR_COL_GETRFNP computes the modified LU factorization without +*> pivoting of a real general M-by-N matrix A. The factorization has +*> the form: +*> +*> A - S = L * U, +*> +*> where: +*> S is a m-by-n diagonal sign matrix with the diagonal D, so that +*> D(i) = S(i,i), 1 <= i <= min(M,N). The diagonal D is constructed +*> as D(i)=-SIGN(A(i,i)), where A(i,i) is the value after performing +*> i-1 steps of Gaussian elimination. This means that the diagonal +*> element at each step of "modified" Gaussian elimination is +*> at least one in absolute value (so that division-by-zero not +*> not possible during the division by the diagonal element); +*> +*> L is a M-by-N lower triangular matrix with unit diagonal elements +*> (lower trapezoidal if M > N); +*> +*> and U is a M-by-N upper triangular matrix +*> (upper trapezoidal if M < N). +*> +*> This routine is an auxiliary routine used in the Householder +*> reconstruction routine SORHR_COL. In SORHR_COL, this routine is +*> applied to an M-by-N matrix A with orthonormal columns, where each +*> element is bounded by one in absolute value. With the choice of +*> the matrix S above, one can show that the diagonal element at each +*> step of Gaussian elimination is the largest (in absolute value) in +*> the column on or below the diagonal, so that no pivoting is required +*> for numerical stability [1]. +*> +*> For more details on the Householder reconstruction algorithm, +*> including the modified LU factorization, see [1]. +*> +*> This is the blocked right-looking version of the algorithm, +*> calling Level 3 BLAS to update the submatrix. To factorize a block, +*> this routine calls the recursive routine SLAORHR_COL_GETRFNP2. +*> +*> [1] "Reconstructing Householder vectors from tall-skinny QR", +*> G. Ballard, J. Demmel, L. Grigori, M. Jacquelin, H.D. Nguyen, +*> E. Solomonik, J. Parallel Distrib. Comput., +*> vol. 85, pp. 3-31, 2015. +*> \endverbatim +* +* Arguments: +* ========== +* +*> \param[in] M +*> \verbatim +*> M is INTEGER +*> The number of rows of the matrix A. M >= 0. +*> \endverbatim +*> +*> \param[in] N +*> \verbatim +*> N is INTEGER +*> The number of columns of the matrix A. N >= 0. +*> \endverbatim +*> +*> \param[in,out] A +*> \verbatim +*> A is REAL array, dimension (LDA,N) +*> On entry, the M-by-N matrix to be factored. +*> On exit, the factors L and U from the factorization +*> A-S=L*U; the unit diagonal elements of L are not stored. +*> \endverbatim +*> +*> \param[in] LDA +*> \verbatim +*> LDA is INTEGER +*> The leading dimension of the array A. LDA >= max(1,M). +*> \endverbatim +*> +*> \param[out] D +*> \verbatim +*> D is REAL array, dimension min(M,N) +*> The diagonal elements of the diagonal M-by-N sign matrix S, +*> D(i) = S(i,i), where 1 <= i <= min(M,N). The elements can +*> be only plus or minus one. +*> \endverbatim +*> +*> \param[out] INFO +*> \verbatim +*> INFO is INTEGER +*> = 0: successful exit +*> < 0: if INFO = -i, the i-th argument had an illegal value +*> \endverbatim +*> +* Authors: +* ======== +* +*> \author Univ. of Tennessee +*> \author Univ. of California Berkeley +*> \author Univ. of Colorado Denver +*> \author NAG Ltd. +* +*> \date November 2019 +* +*> \ingroup realGEcomputational +* +*> \par Contributors: +* ================== +*> +*> \verbatim +*> +*> November 2019, Igor Kozachenko, +*> Computer Science Division, +*> University of California, Berkeley +*> +*> \endverbatim +* +* ===================================================================== + SUBROUTINE SLAORHR_COL_GETRFNP( M, N, A, LDA, D, INFO ) + IMPLICIT NONE +* +* -- LAPACK computational routine (version 3.9.0) -- +* -- LAPACK is a software package provided by Univ. of Tennessee, -- +* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- +* November 2019 +* +* .. Scalar Arguments .. + INTEGER INFO, LDA, M, N +* .. +* .. Array Arguments .. + REAL A( LDA, * ), D( * ) +* .. +* +* ===================================================================== +* +* .. Parameters .. + REAL ONE + PARAMETER ( ONE = 1.0E+0 ) +* .. +* .. Local Scalars .. + INTEGER IINFO, J, JB, NB +* .. +* .. External Subroutines .. + EXTERNAL SGEMM, SLAORHR_COL_GETRFNP2, STRSM, XERBLA +* .. +* .. External Functions .. + INTEGER ILAENV + EXTERNAL ILAENV +* .. +* .. Intrinsic Functions .. + INTRINSIC MAX, MIN +* .. +* .. Executable Statements .. +* +* Test the input parameters. +* + INFO = 0 + IF( M.LT.0 ) THEN + INFO = -1 + ELSE IF( N.LT.0 ) THEN + INFO = -2 + ELSE IF( LDA.LT.MAX( 1, M ) ) THEN + INFO = -4 + END IF + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'SLAORHR_COL_GETRFNP', -INFO ) + RETURN + END IF +* +* Quick return if possible +* + IF( MIN( M, N ).EQ.0 ) + $ RETURN +* +* Determine the block size for this environment. +* + + NB = ILAENV( 1, 'SLAORHR_COL_GETRFNP', ' ', M, N, -1, -1 ) + + IF( NB.LE.1 .OR. NB.GE.MIN( M, N ) ) THEN +* +* Use unblocked code. +* + CALL SLAORHR_COL_GETRFNP2( M, N, A, LDA, D, INFO ) + ELSE +* +* Use blocked code. +* + DO J = 1, MIN( M, N ), NB + JB = MIN( MIN( M, N )-J+1, NB ) +* +* Factor diagonal and subdiagonal blocks. +* + CALL SLAORHR_COL_GETRFNP2( M-J+1, JB, A( J, J ), LDA, + $ D( J ), IINFO ) +* + IF( J+JB.LE.N ) THEN +* +* Compute block row of U. +* + CALL STRSM( 'Left', 'Lower', 'No transpose', 'Unit', JB, + $ N-J-JB+1, ONE, A( J, J ), LDA, A( J, J+JB ), + $ LDA ) + IF( J+JB.LE.M ) THEN +* +* Update trailing submatrix. +* + CALL SGEMM( 'No transpose', 'No transpose', M-J-JB+1, + $ N-J-JB+1, JB, -ONE, A( J+JB, J ), LDA, + $ A( J, J+JB ), LDA, ONE, A( J+JB, J+JB ), + $ LDA ) + END IF + END IF + END DO + END IF + RETURN +* +* End of SLAORHR_COL_GETRFNP +* + END \ No newline at end of file diff --git a/lapack-netlib/SRC/slaorhr_col_getrfnp2.f b/lapack-netlib/SRC/slaorhr_col_getrfnp2.f new file mode 100644 index 000000000..de604602f --- /dev/null +++ b/lapack-netlib/SRC/slaorhr_col_getrfnp2.f @@ -0,0 +1,305 @@ +*> \brief \b SLAORHR_COL_GETRFNP2 +* +* =========== DOCUMENTATION =========== +* +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ +* +*> \htmlonly +*> Download DLAORHR_GETRF2NP + dependencies +*> +*> [TGZ] +*> +*> [ZIP] +*> +*> [TXT] +*> \endhtmlonly +* +* Definition: +* =========== +* +* RECURSIVE SUBROUTINE SLAORHR_COL_GETRFNP2( M, N, A, LDA, D, INFO ) +* +* .. Scalar Arguments .. +* INTEGER INFO, LDA, M, N +* .. +* .. Array Arguments .. +* REAL A( LDA, * ), D( * ) +* .. +* +* +*> \par Purpose: +* ============= +*> +*> \verbatim +*> +*> SLAORHR_COL_GETRFNP2 computes the modified LU factorization without +*> pivoting of a real general M-by-N matrix A. The factorization has +*> the form: +*> +*> A - S = L * U, +*> +*> where: +*> S is a m-by-n diagonal sign matrix with the diagonal D, so that +*> D(i) = S(i,i), 1 <= i <= min(M,N). The diagonal D is constructed +*> as D(i)=-SIGN(A(i,i)), where A(i,i) is the value after performing +*> i-1 steps of Gaussian elimination. This means that the diagonal +*> element at each step of "modified" Gaussian elimination is at +*> least one in absolute value (so that division-by-zero not +*> possible during the division by the diagonal element); +*> +*> L is a M-by-N lower triangular matrix with unit diagonal elements +*> (lower trapezoidal if M > N); +*> +*> and U is a M-by-N upper triangular matrix +*> (upper trapezoidal if M < N). +*> +*> This routine is an auxiliary routine used in the Householder +*> reconstruction routine SORHR_COL. In SORHR_COL, this routine is +*> applied to an M-by-N matrix A with orthonormal columns, where each +*> element is bounded by one in absolute value. With the choice of +*> the matrix S above, one can show that the diagonal element at each +*> step of Gaussian elimination is the largest (in absolute value) in +*> the column on or below the diagonal, so that no pivoting is required +*> for numerical stability [1]. +*> +*> For more details on the Householder reconstruction algorithm, +*> including the modified LU factorization, see [1]. +*> +*> This is the recursive version of the LU factorization algorithm. +*> Denote A - S by B. The algorithm divides the matrix B into four +*> submatrices: +*> +*> [ B11 | B12 ] where B11 is n1 by n1, +*> B = [ -----|----- ] B21 is (m-n1) by n1, +*> [ B21 | B22 ] B12 is n1 by n2, +*> B22 is (m-n1) by n2, +*> with n1 = min(m,n)/2, n2 = n-n1. +*> +*> +*> The subroutine calls itself to factor B11, solves for B21, +*> solves for B12, updates B22, then calls itself to factor B22. +*> +*> For more details on the recursive LU algorithm, see [2]. +*> +*> SLAORHR_COL_GETRFNP2 is called to factorize a block by the blocked +*> routine SLAORHR_COL_GETRFNP, which uses blocked code calling +*. Level 3 BLAS to update the submatrix. However, SLAORHR_COL_GETRFNP2 +*> is self-sufficient and can be used without SLAORHR_COL_GETRFNP. +*> +*> [1] "Reconstructing Householder vectors from tall-skinny QR", +*> G. Ballard, J. Demmel, L. Grigori, M. Jacquelin, H.D. Nguyen, +*> E. Solomonik, J. Parallel Distrib. Comput., +*> vol. 85, pp. 3-31, 2015. +*> +*> [2] "Recursion leads to automatic variable blocking for dense linear +*> algebra algorithms", F. Gustavson, IBM J. of Res. and Dev., +*> vol. 41, no. 6, pp. 737-755, 1997. +*> \endverbatim +* +* Arguments: +* ========== +* +*> \param[in] M +*> \verbatim +*> M is INTEGER +*> The number of rows of the matrix A. M >= 0. +*> \endverbatim +*> +*> \param[in] N +*> \verbatim +*> N is INTEGER +*> The number of columns of the matrix A. N >= 0. +*> \endverbatim +*> +*> \param[in,out] A +*> \verbatim +*> A is REAL array, dimension (LDA,N) +*> On entry, the M-by-N matrix to be factored. +*> On exit, the factors L and U from the factorization +*> A-S=L*U; the unit diagonal elements of L are not stored. +*> \endverbatim +*> +*> \param[in] LDA +*> \verbatim +*> LDA is INTEGER +*> The leading dimension of the array A. LDA >= max(1,M). +*> \endverbatim +*> +*> \param[out] D +*> \verbatim +*> D is REAL array, dimension min(M,N) +*> The diagonal elements of the diagonal M-by-N sign matrix S, +*> D(i) = S(i,i), where 1 <= i <= min(M,N). The elements can +*> be only plus or minus one. +*> \endverbatim +*> +*> \param[out] INFO +*> \verbatim +*> INFO is INTEGER +*> = 0: successful exit +*> < 0: if INFO = -i, the i-th argument had an illegal value +*> \endverbatim +*> +* Authors: +* ======== +* +*> \author Univ. of Tennessee +*> \author Univ. of California Berkeley +*> \author Univ. of Colorado Denver +*> \author NAG Ltd. +* +*> \date November 2019 +* +*> \ingroup realGEcomputational +* +*> \par Contributors: +* ================== +*> +*> \verbatim +*> +*> November 2019, Igor Kozachenko, +*> Computer Science Division, +*> University of California, Berkeley +*> +*> \endverbatim +* +* ===================================================================== + RECURSIVE SUBROUTINE SLAORHR_COL_GETRFNP2( M, N, A, LDA, D, INFO ) + IMPLICIT NONE +* +* -- LAPACK computational routine (version 3.9.0) -- +* -- LAPACK is a software package provided by Univ. of Tennessee, -- +* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- +* November 2019 +* +* .. Scalar Arguments .. + INTEGER INFO, LDA, M, N +* .. +* .. Array Arguments .. + REAL A( LDA, * ), D( * ) +* .. +* +* ===================================================================== +* +* .. Parameters .. + REAL ONE + PARAMETER ( ONE = 1.0E+0 ) +* .. +* .. Local Scalars .. + REAL SFMIN + INTEGER I, IINFO, N1, N2 +* .. +* .. External Functions .. + REAL SLAMCH + EXTERNAL SLAMCH +* .. +* .. External Subroutines .. + EXTERNAL SGEMM, SSCAL, STRSM, XERBLA +* .. +* .. Intrinsic Functions .. + INTRINSIC ABS, SIGN, MAX, MIN +* .. +* .. Executable Statements .. +* +* Test the input parameters +* + INFO = 0 + IF( M.LT.0 ) THEN + INFO = -1 + ELSE IF( N.LT.0 ) THEN + INFO = -2 + ELSE IF( LDA.LT.MAX( 1, M ) ) THEN + INFO = -4 + END IF + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'SLAORHR_COL_GETRFNP2', -INFO ) + RETURN + END IF +* +* Quick return if possible +* + IF( MIN( M, N ).EQ.0 ) + $ RETURN + + IF ( M.EQ.1 ) THEN +* +* One row case, (also recursion termination case), +* use unblocked code +* +* Transfer the sign +* + D( 1 ) = -SIGN( ONE, A( 1, 1 ) ) +* +* Construct the row of U +* + A( 1, 1 ) = A( 1, 1 ) - D( 1 ) +* + ELSE IF( N.EQ.1 ) THEN +* +* One column case, (also recursion termination case), +* use unblocked code +* +* Transfer the sign +* + D( 1 ) = -SIGN( ONE, A( 1, 1 ) ) +* +* Construct the row of U +* + A( 1, 1 ) = A( 1, 1 ) - D( 1 ) +* +* Scale the elements 2:M of the column +* +* Determine machine safe minimum +* + SFMIN = SLAMCH('S') +* +* Construct the subdiagonal elements of L +* + IF( ABS( A( 1, 1 ) ) .GE. SFMIN ) THEN + CALL SSCAL( M-1, ONE / A( 1, 1 ), A( 2, 1 ), 1 ) + ELSE + DO I = 2, M + A( I, 1 ) = A( I, 1 ) / A( 1, 1 ) + END DO + END IF +* + ELSE +* +* Divide the matrix B into four submatrices +* + N1 = MIN( M, N ) / 2 + N2 = N-N1 + +* +* Factor B11, recursive call +* + CALL SLAORHR_COL_GETRFNP2( N1, N1, A, LDA, D, IINFO ) +* +* Solve for B21 +* + CALL STRSM( 'R', 'U', 'N', 'N', M-N1, N1, ONE, A, LDA, + $ A( N1+1, 1 ), LDA ) +* +* Solve for B12 +* + CALL STRSM( 'L', 'L', 'N', 'U', N1, N2, ONE, A, LDA, + $ A( 1, N1+1 ), LDA ) +* +* Update B22, i.e. compute the Schur complement +* B22 := B22 - B21*B12 +* + CALL SGEMM( 'N', 'N', M-N1, N2, N1, -ONE, A( N1+1, 1 ), LDA, + $ A( 1, N1+1 ), LDA, ONE, A( N1+1, N1+1 ), LDA ) +* +* Factor B22, recursive call +* + CALL SLAORHR_COL_GETRFNP2( M-N1, N2, A( N1+1, N1+1 ), LDA, + $ D( N1+1 ), IINFO ) +* + END IF + RETURN +* +* End of SLAORHR_COL_GETRFNP2 +* + END diff --git a/lapack-netlib/SRC/slaqps.f b/lapack-netlib/SRC/slaqps.f index 9c62ec8b6..3f8af304f 100644 --- a/lapack-netlib/SRC/slaqps.f +++ b/lapack-netlib/SRC/slaqps.f @@ -127,7 +127,7 @@ *> \param[in,out] AUXV *> \verbatim *> AUXV is REAL array, dimension (NB) -*> Auxiliar vector. +*> Auxiliary vector. *> \endverbatim *> *> \param[in,out] F diff --git a/lapack-netlib/SRC/slaqr0.f b/lapack-netlib/SRC/slaqr0.f index 1dcd3d176..318b46943 100644 --- a/lapack-netlib/SRC/slaqr0.f +++ b/lapack-netlib/SRC/slaqr0.f @@ -67,7 +67,7 @@ *> \param[in] N *> \verbatim *> N is INTEGER -*> The order of the matrix H. N .GE. 0. +*> The order of the matrix H. N >= 0. *> \endverbatim *> *> \param[in] ILO @@ -79,12 +79,12 @@ *> \verbatim *> IHI is INTEGER *> It is assumed that H is already upper triangular in rows -*> and columns 1:ILO-1 and IHI+1:N and, if ILO.GT.1, +*> and columns 1:ILO-1 and IHI+1:N and, if ILO > 1, *> H(ILO,ILO-1) is zero. ILO and IHI are normally set by a *> previous call to SGEBAL, and then passed to SGEHRD when the *> matrix output by SGEBAL is reduced to Hessenberg form. *> Otherwise, ILO and IHI should be set to 1 and N, -*> respectively. If N.GT.0, then 1.LE.ILO.LE.IHI.LE.N. +*> respectively. If N > 0, then 1 <= ILO <= IHI <= N. *> If N = 0, then ILO = 1 and IHI = 0. *> \endverbatim *> @@ -97,19 +97,19 @@ *> decomposition (the Schur form); 2-by-2 diagonal blocks *> (corresponding to complex conjugate pairs of eigenvalues) *> are returned in standard form, with H(i,i) = H(i+1,i+1) -*> and H(i+1,i)*H(i,i+1).LT.0. If INFO = 0 and WANTT is +*> and H(i+1,i)*H(i,i+1) < 0. If INFO = 0 and WANTT is *> .FALSE., then the contents of H are unspecified on exit. -*> (The output value of H when INFO.GT.0 is given under the +*> (The output value of H when INFO > 0 is given under the *> description of INFO below.) *> -*> This subroutine may explicitly set H(i,j) = 0 for i.GT.j and +*> This subroutine may explicitly set H(i,j) = 0 for i > j and *> j = 1, 2, ... ILO-1 or j = IHI+1, IHI+2, ... N. *> \endverbatim *> *> \param[in] LDH *> \verbatim *> LDH is INTEGER -*> The leading dimension of the array H. LDH .GE. max(1,N). +*> The leading dimension of the array H. LDH >= max(1,N). *> \endverbatim *> *> \param[out] WR @@ -125,7 +125,7 @@ *> and WI(ILO:IHI). If two eigenvalues are computed as a *> complex conjugate pair, they are stored in consecutive *> elements of WR and WI, say the i-th and (i+1)th, with -*> WI(i) .GT. 0 and WI(i+1) .LT. 0. If WANTT is .TRUE., then +*> WI(i) > 0 and WI(i+1) < 0. If WANTT is .TRUE., then *> the eigenvalues are stored in the same order as on the *> diagonal of the Schur form returned in H, with *> WR(i) = H(i,i) and, if H(i:i+1,i:i+1) is a 2-by-2 diagonal @@ -143,7 +143,7 @@ *> IHIZ is INTEGER *> Specify the rows of Z to which transformations must be *> applied if WANTZ is .TRUE.. -*> 1 .LE. ILOZ .LE. ILO; IHI .LE. IHIZ .LE. N. +*> 1 <= ILOZ <= ILO; IHI <= IHIZ <= N. *> \endverbatim *> *> \param[in,out] Z @@ -153,7 +153,7 @@ *> If WANTZ is .TRUE., then Z(ILO:IHI,ILOZ:IHIZ) is *> replaced by Z(ILO:IHI,ILOZ:IHIZ)*U where U is the *> orthogonal Schur factor of H(ILO:IHI,ILO:IHI). -*> (The output value of Z when INFO.GT.0 is given under +*> (The output value of Z when INFO > 0 is given under *> the description of INFO below.) *> \endverbatim *> @@ -161,7 +161,7 @@ *> \verbatim *> LDZ is INTEGER *> The leading dimension of the array Z. if WANTZ is .TRUE. -*> then LDZ.GE.MAX(1,IHIZ). Otherwize, LDZ.GE.1. +*> then LDZ >= MAX(1,IHIZ). Otherwise, LDZ >= 1. *> \endverbatim *> *> \param[out] WORK @@ -174,7 +174,7 @@ *> \param[in] LWORK *> \verbatim *> LWORK is INTEGER -*> The dimension of the array WORK. LWORK .GE. max(1,N) +*> The dimension of the array WORK. LWORK >= max(1,N) *> is sufficient, but LWORK typically as large as 6*N may *> be required for optimal performance. A workspace query *> to determine the optimal workspace size is recommended. @@ -190,19 +190,19 @@ *> \param[out] INFO *> \verbatim *> INFO is INTEGER -*> = 0: successful exit -*> .GT. 0: if INFO = i, SLAQR0 failed to compute all of +*> = 0: successful exit +*> > 0: if INFO = i, SLAQR0 failed to compute all of *> the eigenvalues. Elements 1:ilo-1 and i+1:n of WR *> and WI contain those eigenvalues which have been *> successfully computed. (Failures are rare.) *> -*> If INFO .GT. 0 and WANT is .FALSE., then on exit, +*> If INFO > 0 and WANT is .FALSE., then on exit, *> the remaining unconverged eigenvalues are the eigen- *> values of the upper Hessenberg matrix rows and *> columns ILO through INFO of the final, output *> value of H. *> -*> If INFO .GT. 0 and WANTT is .TRUE., then on exit +*> If INFO > 0 and WANTT is .TRUE., then on exit *> *> (*) (initial value of H)*U = U*(final value of H) *> @@ -210,7 +210,7 @@ *> value of H is upper Hessenberg and quasi-triangular *> in rows and columns INFO+1 through IHI. *> -*> If INFO .GT. 0 and WANTZ is .TRUE., then on exit +*> If INFO > 0 and WANTZ is .TRUE., then on exit *> *> (final value of Z(ILO:IHI,ILOZ:IHIZ) *> = (initial value of Z(ILO:IHI,ILOZ:IHIZ)*U @@ -218,7 +218,7 @@ *> where U is the orthogonal matrix in (*) (regard- *> less of the value of WANTT.) *> -*> If INFO .GT. 0 and WANTZ is .FALSE., then Z is not +*> If INFO > 0 and WANTZ is .FALSE., then Z is not *> accessed. *> \endverbatim * @@ -677,7 +677,7 @@ END IF END IF * -* ==== Use up to NS of the the smallest magnatiude +* ==== Use up to NS of the the smallest magnitude * . shifts. If there aren't NS shifts available, * . then use them all, possibly dropping one to * . make the number of shifts even. ==== diff --git a/lapack-netlib/SRC/slaqr1.f b/lapack-netlib/SRC/slaqr1.f index 2de33849d..6bb88c794 100644 --- a/lapack-netlib/SRC/slaqr1.f +++ b/lapack-netlib/SRC/slaqr1.f @@ -69,7 +69,7 @@ *> \verbatim *> LDH is INTEGER *> The leading dimension of H as declared in -*> the calling procedure. LDH.GE.N +*> the calling procedure. LDH >= N *> \endverbatim *> *> \param[in] SR1 diff --git a/lapack-netlib/SRC/slaqr2.f b/lapack-netlib/SRC/slaqr2.f index 8e1f34910..f4f8ca7f2 100644 --- a/lapack-netlib/SRC/slaqr2.f +++ b/lapack-netlib/SRC/slaqr2.f @@ -103,7 +103,7 @@ *> \param[in] NW *> \verbatim *> NW is INTEGER -*> Deflation window size. 1 .LE. NW .LE. (KBOT-KTOP+1). +*> Deflation window size. 1 <= NW <= (KBOT-KTOP+1). *> \endverbatim *> *> \param[in,out] H @@ -121,7 +121,7 @@ *> \verbatim *> LDH is INTEGER *> Leading dimension of H just as declared in the calling -*> subroutine. N .LE. LDH +*> subroutine. N <= LDH *> \endverbatim *> *> \param[in] ILOZ @@ -133,7 +133,7 @@ *> \verbatim *> IHIZ is INTEGER *> Specify the rows of Z to which transformations must be -*> applied if WANTZ is .TRUE.. 1 .LE. ILOZ .LE. IHIZ .LE. N. +*> applied if WANTZ is .TRUE.. 1 <= ILOZ <= IHIZ <= N. *> \endverbatim *> *> \param[in,out] Z @@ -149,7 +149,7 @@ *> \verbatim *> LDZ is INTEGER *> The leading dimension of Z just as declared in the -*> calling subroutine. 1 .LE. LDZ. +*> calling subroutine. 1 <= LDZ. *> \endverbatim *> *> \param[out] NS @@ -194,13 +194,13 @@ *> \verbatim *> LDV is INTEGER *> The leading dimension of V just as declared in the -*> calling subroutine. NW .LE. LDV +*> calling subroutine. NW <= LDV *> \endverbatim *> *> \param[in] NH *> \verbatim *> NH is INTEGER -*> The number of columns of T. NH.GE.NW. +*> The number of columns of T. NH >= NW. *> \endverbatim *> *> \param[out] T @@ -212,14 +212,14 @@ *> \verbatim *> LDT is INTEGER *> The leading dimension of T just as declared in the -*> calling subroutine. NW .LE. LDT +*> calling subroutine. NW <= LDT *> \endverbatim *> *> \param[in] NV *> \verbatim *> NV is INTEGER *> The number of rows of work array WV available for -*> workspace. NV.GE.NW. +*> workspace. NV >= NW. *> \endverbatim *> *> \param[out] WV @@ -231,7 +231,7 @@ *> \verbatim *> LDWV is INTEGER *> The leading dimension of W just as declared in the -*> calling subroutine. NW .LE. LDV +*> calling subroutine. NW <= LDV *> \endverbatim *> *> \param[out] WORK diff --git a/lapack-netlib/SRC/slaqr3.f b/lapack-netlib/SRC/slaqr3.f index 534e2c489..ccad338b9 100644 --- a/lapack-netlib/SRC/slaqr3.f +++ b/lapack-netlib/SRC/slaqr3.f @@ -100,7 +100,7 @@ *> \param[in] NW *> \verbatim *> NW is INTEGER -*> Deflation window size. 1 .LE. NW .LE. (KBOT-KTOP+1). +*> Deflation window size. 1 <= NW <= (KBOT-KTOP+1). *> \endverbatim *> *> \param[in,out] H @@ -118,7 +118,7 @@ *> \verbatim *> LDH is INTEGER *> Leading dimension of H just as declared in the calling -*> subroutine. N .LE. LDH +*> subroutine. N <= LDH *> \endverbatim *> *> \param[in] ILOZ @@ -130,7 +130,7 @@ *> \verbatim *> IHIZ is INTEGER *> Specify the rows of Z to which transformations must be -*> applied if WANTZ is .TRUE.. 1 .LE. ILOZ .LE. IHIZ .LE. N. +*> applied if WANTZ is .TRUE.. 1 <= ILOZ <= IHIZ <= N. *> \endverbatim *> *> \param[in,out] Z @@ -146,7 +146,7 @@ *> \verbatim *> LDZ is INTEGER *> The leading dimension of Z just as declared in the -*> calling subroutine. 1 .LE. LDZ. +*> calling subroutine. 1 <= LDZ. *> \endverbatim *> *> \param[out] NS @@ -191,13 +191,13 @@ *> \verbatim *> LDV is INTEGER *> The leading dimension of V just as declared in the -*> calling subroutine. NW .LE. LDV +*> calling subroutine. NW <= LDV *> \endverbatim *> *> \param[in] NH *> \verbatim *> NH is INTEGER -*> The number of columns of T. NH.GE.NW. +*> The number of columns of T. NH >= NW. *> \endverbatim *> *> \param[out] T @@ -209,14 +209,14 @@ *> \verbatim *> LDT is INTEGER *> The leading dimension of T just as declared in the -*> calling subroutine. NW .LE. LDT +*> calling subroutine. NW <= LDT *> \endverbatim *> *> \param[in] NV *> \verbatim *> NV is INTEGER *> The number of rows of work array WV available for -*> workspace. NV.GE.NW. +*> workspace. NV >= NW. *> \endverbatim *> *> \param[out] WV @@ -228,7 +228,7 @@ *> \verbatim *> LDWV is INTEGER *> The leading dimension of W just as declared in the -*> calling subroutine. NW .LE. LDV +*> calling subroutine. NW <= LDV *> \endverbatim *> *> \param[out] WORK diff --git a/lapack-netlib/SRC/slaqr4.f b/lapack-netlib/SRC/slaqr4.f index 12b6b2fb1..cd642e07f 100644 --- a/lapack-netlib/SRC/slaqr4.f +++ b/lapack-netlib/SRC/slaqr4.f @@ -74,7 +74,7 @@ *> \param[in] N *> \verbatim *> N is INTEGER -*> The order of the matrix H. N .GE. 0. +*> The order of the matrix H. N >= 0. *> \endverbatim *> *> \param[in] ILO @@ -86,12 +86,12 @@ *> \verbatim *> IHI is INTEGER *> It is assumed that H is already upper triangular in rows -*> and columns 1:ILO-1 and IHI+1:N and, if ILO.GT.1, +*> and columns 1:ILO-1 and IHI+1:N and, if ILO > 1, *> H(ILO,ILO-1) is zero. ILO and IHI are normally set by a *> previous call to SGEBAL, and then passed to SGEHRD when the *> matrix output by SGEBAL is reduced to Hessenberg form. *> Otherwise, ILO and IHI should be set to 1 and N, -*> respectively. If N.GT.0, then 1.LE.ILO.LE.IHI.LE.N. +*> respectively. If N > 0, then 1 <= ILO <= IHI <= N. *> If N = 0, then ILO = 1 and IHI = 0. *> \endverbatim *> @@ -104,19 +104,19 @@ *> decomposition (the Schur form); 2-by-2 diagonal blocks *> (corresponding to complex conjugate pairs of eigenvalues) *> are returned in standard form, with H(i,i) = H(i+1,i+1) -*> and H(i+1,i)*H(i,i+1).LT.0. If INFO = 0 and WANTT is +*> and H(i+1,i)*H(i,i+1) < 0. If INFO = 0 and WANTT is *> .FALSE., then the contents of H are unspecified on exit. -*> (The output value of H when INFO.GT.0 is given under the +*> (The output value of H when INFO > 0 is given under the *> description of INFO below.) *> -*> This subroutine may explicitly set H(i,j) = 0 for i.GT.j and +*> This subroutine may explicitly set H(i,j) = 0 for i > j and *> j = 1, 2, ... ILO-1 or j = IHI+1, IHI+2, ... N. *> \endverbatim *> *> \param[in] LDH *> \verbatim *> LDH is INTEGER -*> The leading dimension of the array H. LDH .GE. max(1,N). +*> The leading dimension of the array H. LDH >= max(1,N). *> \endverbatim *> *> \param[out] WR @@ -132,7 +132,7 @@ *> and WI(ILO:IHI). If two eigenvalues are computed as a *> complex conjugate pair, they are stored in consecutive *> elements of WR and WI, say the i-th and (i+1)th, with -*> WI(i) .GT. 0 and WI(i+1) .LT. 0. If WANTT is .TRUE., then +*> WI(i) > 0 and WI(i+1) < 0. If WANTT is .TRUE., then *> the eigenvalues are stored in the same order as on the *> diagonal of the Schur form returned in H, with *> WR(i) = H(i,i) and, if H(i:i+1,i:i+1) is a 2-by-2 diagonal @@ -150,7 +150,7 @@ *> IHIZ is INTEGER *> Specify the rows of Z to which transformations must be *> applied if WANTZ is .TRUE.. -*> 1 .LE. ILOZ .LE. ILO; IHI .LE. IHIZ .LE. N. +*> 1 <= ILOZ <= ILO; IHI <= IHIZ <= N. *> \endverbatim *> *> \param[in,out] Z @@ -160,7 +160,7 @@ *> If WANTZ is .TRUE., then Z(ILO:IHI,ILOZ:IHIZ) is *> replaced by Z(ILO:IHI,ILOZ:IHIZ)*U where U is the *> orthogonal Schur factor of H(ILO:IHI,ILO:IHI). -*> (The output value of Z when INFO.GT.0 is given under +*> (The output value of Z when INFO > 0 is given under *> the description of INFO below.) *> \endverbatim *> @@ -168,7 +168,7 @@ *> \verbatim *> LDZ is INTEGER *> The leading dimension of the array Z. if WANTZ is .TRUE. -*> then LDZ.GE.MAX(1,IHIZ). Otherwize, LDZ.GE.1. +*> then LDZ >= MAX(1,IHIZ). Otherwise, LDZ >= 1. *> \endverbatim *> *> \param[out] WORK @@ -181,7 +181,7 @@ *> \param[in] LWORK *> \verbatim *> LWORK is INTEGER -*> The dimension of the array WORK. LWORK .GE. max(1,N) +*> The dimension of the array WORK. LWORK >= max(1,N) *> is sufficient, but LWORK typically as large as 6*N may *> be required for optimal performance. A workspace query *> to determine the optimal workspace size is recommended. @@ -199,19 +199,19 @@ *> INFO is INTEGER *> \verbatim *> INFO is INTEGER -*> = 0: successful exit -*> .GT. 0: if INFO = i, SLAQR4 failed to compute all of +*> = 0: successful exit +*> > 0: if INFO = i, SLAQR4 failed to compute all of *> the eigenvalues. Elements 1:ilo-1 and i+1:n of WR *> and WI contain those eigenvalues which have been *> successfully computed. (Failures are rare.) *> -*> If INFO .GT. 0 and WANT is .FALSE., then on exit, +*> If INFO > 0 and WANT is .FALSE., then on exit, *> the remaining unconverged eigenvalues are the eigen- *> values of the upper Hessenberg matrix rows and *> columns ILO through INFO of the final, output *> value of H. *> -*> If INFO .GT. 0 and WANTT is .TRUE., then on exit +*> If INFO > 0 and WANTT is .TRUE., then on exit *> *> (*) (initial value of H)*U = U*(final value of H) *> @@ -219,7 +219,7 @@ *> value of H is upper Hessenberg and triangular in *> rows and columns INFO+1 through IHI. *> -*> If INFO .GT. 0 and WANTZ is .TRUE., then on exit +*> If INFO > 0 and WANTZ is .TRUE., then on exit *> *> (final value of Z(ILO:IHI,ILOZ:IHIZ) *> = (initial value of Z(ILO:IHI,ILOZ:IHIZ)*U @@ -227,7 +227,7 @@ *> where U is the orthogonal matrix in (*) (regard- *> less of the value of WANTT.) *> -*> If INFO .GT. 0 and WANTZ is .FALSE., then Z is not +*> If INFO > 0 and WANTZ is .FALSE., then Z is not *> accessed. *> \endverbatim * @@ -680,7 +680,7 @@ END IF END IF * -* ==== Use up to NS of the the smallest magnatiude +* ==== Use up to NS of the the smallest magnitude * . shifts. If there aren't NS shifts available, * . then use them all, possibly dropping one to * . make the number of shifts even. ==== diff --git a/lapack-netlib/SRC/slaqr5.f b/lapack-netlib/SRC/slaqr5.f index 65278e355..f04ee577e 100644 --- a/lapack-netlib/SRC/slaqr5.f +++ b/lapack-netlib/SRC/slaqr5.f @@ -133,7 +133,7 @@ *> \verbatim *> LDH is INTEGER *> LDH is the leading dimension of H just as declared in the -*> calling procedure. LDH.GE.MAX(1,N). +*> calling procedure. LDH >= MAX(1,N). *> \endverbatim *> *> \param[in] ILOZ @@ -145,7 +145,7 @@ *> \verbatim *> IHIZ is INTEGER *> Specify the rows of Z to which transformations must be -*> applied if WANTZ is .TRUE.. 1 .LE. ILOZ .LE. IHIZ .LE. N +*> applied if WANTZ is .TRUE.. 1 <= ILOZ <= IHIZ <= N *> \endverbatim *> *> \param[in,out] Z @@ -161,7 +161,7 @@ *> \verbatim *> LDZ is INTEGER *> LDA is the leading dimension of Z just as declared in -*> the calling procedure. LDZ.GE.N. +*> the calling procedure. LDZ >= N. *> \endverbatim *> *> \param[out] V @@ -173,7 +173,7 @@ *> \verbatim *> LDV is INTEGER *> LDV is the leading dimension of V as declared in the -*> calling procedure. LDV.GE.3. +*> calling procedure. LDV >= 3. *> \endverbatim *> *> \param[out] U @@ -185,33 +185,14 @@ *> \verbatim *> LDU is INTEGER *> LDU is the leading dimension of U just as declared in the -*> in the calling subroutine. LDU.GE.3*NSHFTS-3. -*> \endverbatim -*> -*> \param[in] NH -*> \verbatim -*> NH is INTEGER -*> NH is the number of columns in array WH available for -*> workspace. NH.GE.1. -*> \endverbatim -*> -*> \param[out] WH -*> \verbatim -*> WH is REAL array, dimension (LDWH,NH) -*> \endverbatim -*> -*> \param[in] LDWH -*> \verbatim -*> LDWH is INTEGER -*> Leading dimension of WH just as declared in the -*> calling procedure. LDWH.GE.3*NSHFTS-3. +*> in the calling subroutine. LDU >= 3*NSHFTS-3. *> \endverbatim *> *> \param[in] NV *> \verbatim *> NV is INTEGER *> NV is the number of rows in WV agailable for workspace. -*> NV.GE.1. +*> NV >= 1. *> \endverbatim *> *> \param[out] WV @@ -223,9 +204,28 @@ *> \verbatim *> LDWV is INTEGER *> LDWV is the leading dimension of WV as declared in the -*> in the calling subroutine. LDWV.GE.NV. +*> in the calling subroutine. LDWV >= NV. *> \endverbatim * +*> \param[in] NH +*> \verbatim +*> NH is INTEGER +*> NH is the number of columns in array WH available for +*> workspace. NH >= 1. +*> \endverbatim +*> +*> \param[out] WH +*> \verbatim +*> WH is REAL array, dimension (LDWH,NH) +*> \endverbatim +*> +*> \param[in] LDWH +*> \verbatim +*> LDWH is INTEGER +*> Leading dimension of WH just as declared in the +*> calling procedure. LDWH >= 3*NSHFTS-3. +*> \endverbatim +*> * Authors: * ======== * diff --git a/lapack-netlib/SRC/slarfb.f b/lapack-netlib/SRC/slarfb.f index c51f69534..d853a54ec 100644 --- a/lapack-netlib/SRC/slarfb.f +++ b/lapack-netlib/SRC/slarfb.f @@ -92,6 +92,8 @@ *> K is INTEGER *> The order of the matrix T (= the number of elementary *> reflectors whose product defines the block reflector). +*> If SIDE = 'L', M >= K >= 0; +*> if SIDE = 'R', N >= K >= 0. *> \endverbatim *> *> \param[in] V diff --git a/lapack-netlib/SRC/slarfx.f b/lapack-netlib/SRC/slarfx.f index 590e99e70..3175068b8 100644 --- a/lapack-netlib/SRC/slarfx.f +++ b/lapack-netlib/SRC/slarfx.f @@ -94,7 +94,7 @@ *> \param[in] LDC *> \verbatim *> LDC is INTEGER -*> The leading dimension of the array C. LDA >= (1,M). +*> The leading dimension of the array C. LDC >= (1,M). *> \endverbatim *> *> \param[out] WORK diff --git a/lapack-netlib/SRC/slarfy.f b/lapack-netlib/SRC/slarfy.f index 340c54413..f9ba011a2 100644 --- a/lapack-netlib/SRC/slarfy.f +++ b/lapack-netlib/SRC/slarfy.f @@ -103,7 +103,7 @@ * *> \date December 2016 * -*> \ingroup single_eig +*> \ingroup realOTHERauxiliary * * ===================================================================== SUBROUTINE SLARFY( UPLO, N, V, INCV, TAU, C, LDC, WORK ) diff --git a/lapack-netlib/SRC/slarrb.f b/lapack-netlib/SRC/slarrb.f index 988e25ff0..ac9d7bc8c 100644 --- a/lapack-netlib/SRC/slarrb.f +++ b/lapack-netlib/SRC/slarrb.f @@ -91,7 +91,7 @@ *> RTOL2 is REAL *> Tolerance for the convergence of the bisection intervals. *> An interval [LEFT,RIGHT] has converged if -*> RIGHT-LEFT.LT.MAX( RTOL1*GAP, RTOL2*MAX(|LEFT|,|RIGHT|) ) +*> RIGHT-LEFT < MAX( RTOL1*GAP, RTOL2*MAX(|LEFT|,|RIGHT|) ) *> where GAP is the (estimated) distance to the nearest *> eigenvalue. *> \endverbatim @@ -117,7 +117,7 @@ *> WGAP is REAL array, dimension (N-1) *> On input, the (estimated) gaps between consecutive *> eigenvalues of L D L^T, i.e., WGAP(I-OFFSET) is the gap between -*> eigenvalues I and I+1. Note that if IFIRST.EQ.ILAST +*> eigenvalues I and I+1. Note that if IFIRST = ILAST *> then WGAP(IFIRST-OFFSET) must be set to ZERO. *> On output, these gaps are refined. *> \endverbatim diff --git a/lapack-netlib/SRC/slarre.f b/lapack-netlib/SRC/slarre.f index ea9b8fcbc..6636235d0 100644 --- a/lapack-netlib/SRC/slarre.f +++ b/lapack-netlib/SRC/slarre.f @@ -150,7 +150,7 @@ *> RTOL2 is REAL *> Parameters for bisection. *> An interval [LEFT,RIGHT] has converged if -*> RIGHT-LEFT.LT.MAX( RTOL1*GAP, RTOL2*MAX(|LEFT|,|RIGHT|) ) +*> RIGHT-LEFT < MAX( RTOL1*GAP, RTOL2*MAX(|LEFT|,|RIGHT|) ) *> \endverbatim *> *> \param[in] SPLTOL diff --git a/lapack-netlib/SRC/slarrj.f b/lapack-netlib/SRC/slarrj.f index a721d0751..fb867595c 100644 --- a/lapack-netlib/SRC/slarrj.f +++ b/lapack-netlib/SRC/slarrj.f @@ -85,7 +85,7 @@ *> RTOL is REAL *> Tolerance for the convergence of the bisection intervals. *> An interval [LEFT,RIGHT] has converged if -*> RIGHT-LEFT.LT.RTOL*MAX(|LEFT|,|RIGHT|). +*> RIGHT-LEFT < RTOL*MAX(|LEFT|,|RIGHT|). *> \endverbatim *> *> \param[in] OFFSET diff --git a/lapack-netlib/SRC/slarrv.f b/lapack-netlib/SRC/slarrv.f index f9e3cf2b9..04519fde8 100644 --- a/lapack-netlib/SRC/slarrv.f +++ b/lapack-netlib/SRC/slarrv.f @@ -149,7 +149,7 @@ *> RTOL2 is REAL *> Parameters for bisection. *> An interval [LEFT,RIGHT] has converged if -*> RIGHT-LEFT.LT.MAX( RTOL1*GAP, RTOL2*MAX(|LEFT|,|RIGHT|) ) +*> RIGHT-LEFT < MAX( RTOL1*GAP, RTOL2*MAX(|LEFT|,|RIGHT|) ) *> \endverbatim *> *> \param[in,out] W diff --git a/lapack-netlib/SRC/slartg.f b/lapack-netlib/SRC/slartg.f index 784d4bc36..307c9c83a 100644 --- a/lapack-netlib/SRC/slartg.f +++ b/lapack-netlib/SRC/slartg.f @@ -163,7 +163,7 @@ F1 = F1*SAFMN2 G1 = G1*SAFMN2 SCALE = MAX( ABS( F1 ), ABS( G1 ) ) - IF( SCALE.GE.SAFMX2 ) + IF( SCALE.GE.SAFMX2 .AND. COUNT .LT. 20) $ GO TO 10 R = SQRT( F1**2+G1**2 ) CS = F1 / R diff --git a/lapack-netlib/SRC/slartgp.f b/lapack-netlib/SRC/slartgp.f index ad76c94b4..f8be5f52b 100644 --- a/lapack-netlib/SRC/slartgp.f +++ b/lapack-netlib/SRC/slartgp.f @@ -161,7 +161,7 @@ F1 = F1*SAFMN2 G1 = G1*SAFMN2 SCALE = MAX( ABS( F1 ), ABS( G1 ) ) - IF( SCALE.GE.SAFMX2 ) + IF( SCALE.GE.SAFMX2 .AND. COUNT .LT. 20) $ GO TO 10 R = SQRT( F1**2+G1**2 ) CS = F1 / R diff --git a/lapack-netlib/SRC/slasd7.f b/lapack-netlib/SRC/slasd7.f index 2adaa5ee7..d8775b6c6 100644 --- a/lapack-netlib/SRC/slasd7.f +++ b/lapack-netlib/SRC/slasd7.f @@ -400,7 +400,7 @@ VL( I ) = VLW( IDXI ) 50 CONTINUE * -* Calculate the allowable deflation tolerence +* Calculate the allowable deflation tolerance * EPS = SLAMCH( 'Epsilon' ) TOL = MAX( ABS( ALPHA ), ABS( BETA ) ) diff --git a/lapack-netlib/SRC/slassq.f b/lapack-netlib/SRC/slassq.f index 35b40f07f..d9930a597 100644 --- a/lapack-netlib/SRC/slassq.f +++ b/lapack-netlib/SRC/slassq.f @@ -60,7 +60,7 @@ *> *> \param[in] X *> \verbatim -*> X is REAL array, dimension (N) +*> X is REAL array, dimension (1+(N-1)*INCX) *> The vector for which a scaled sum of squares is computed. *> x( i ) = X( 1 + ( i - 1 )*INCX ), 1 <= i <= n. *> \endverbatim diff --git a/lapack-netlib/SRC/slaswlq.f b/lapack-netlib/SRC/slaswlq.f index 27b5b8067..5eb9cc5f9 100644 --- a/lapack-netlib/SRC/slaswlq.f +++ b/lapack-netlib/SRC/slaswlq.f @@ -1,3 +1,4 @@ +*> \brief \b SLASWLQ * * Definition: * =========== @@ -18,9 +19,20 @@ *> *> \verbatim *> -*> SLASWLQ computes a blocked Short-Wide LQ factorization of a -*> M-by-N matrix A, where N >= M: -*> A = L * Q +*> SLASWLQ computes a blocked Tall-Skinny LQ factorization of +*> a real M-by-N matrix A for M <= N: +*> +*> A = ( L 0 ) * Q, +*> +*> where: +*> +*> Q is a n-by-N orthogonal matrix, stored on exit in an implicit +*> form in the elements above the digonal of the array A and in +*> the elemenst of the array T; +*> L is an lower-triangular M-by-M matrix stored on exit in +*> the elements on and below the diagonal of the array A. +*> 0 is a M-by-(N-M) zero matrix, if M < N, and is not stored. +*> *> \endverbatim * * Arguments: @@ -150,10 +162,10 @@ SUBROUTINE SLASWLQ( M, N, MB, NB, A, LDA, T, LDT, WORK, LWORK, $ INFO) * -* -- LAPACK computational routine (version 3.8.0) -- +* -- LAPACK computational routine (version 3.9.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd. -- -* November 2017 +* November 2019 * * .. Scalar Arguments .. INTEGER INFO, LDA, M, N, MB, NB, LWORK, LDT diff --git a/lapack-netlib/SRC/slasyf_aa.f b/lapack-netlib/SRC/slasyf_aa.f index ed4ef6291..76f632602 100644 --- a/lapack-netlib/SRC/slasyf_aa.f +++ b/lapack-netlib/SRC/slasyf_aa.f @@ -284,8 +284,9 @@ * * Swap A(I1, I2+1:M) with A(I2, I2+1:M) * - CALL SSWAP( M-I2, A( J1+I1-1, I2+1 ), LDA, - $ A( J1+I2-1, I2+1 ), LDA ) + IF( I2.LT.M ) + $ CALL SSWAP( M-I2, A( J1+I1-1, I2+1 ), LDA, + $ A( J1+I2-1, I2+1 ), LDA ) * * Swap A(I1, I1) with A(I2,I2) * @@ -325,13 +326,15 @@ * Compute L(J+2, J+1) = WORK( 3:M ) / T(J, J+1), * where A(J, J+1) = T(J, J+1) and A(J+2:M, J) = L(J+2:M, J+1) * - IF( A( K, J+1 ).NE.ZERO ) THEN - ALPHA = ONE / A( K, J+1 ) - CALL SCOPY( M-J-1, WORK( 3 ), 1, A( K, J+2 ), LDA ) - CALL SSCAL( M-J-1, ALPHA, A( K, J+2 ), LDA ) - ELSE - CALL SLASET( 'Full', 1, M-J-1, ZERO, ZERO, - $ A( K, J+2 ), LDA) + IF( J.LT.(M-1) ) THEN + IF( A( K, J+1 ).NE.ZERO ) THEN + ALPHA = ONE / A( K, J+1 ) + CALL SCOPY( M-J-1, WORK( 3 ), 1, A( K, J+2 ), LDA ) + CALL SSCAL( M-J-1, ALPHA, A( K, J+2 ), LDA ) + ELSE + CALL SLASET( 'Full', 1, M-J-1, ZERO, ZERO, + $ A( K, J+2 ), LDA) + END IF END IF END IF J = J + 1 @@ -432,8 +435,9 @@ * * Swap A(I2+1:M, I1) with A(I2+1:M, I2) * - CALL SSWAP( M-I2, A( I2+1, J1+I1-1 ), 1, - $ A( I2+1, J1+I2-1 ), 1 ) + IF( I2.LT.M ) + $ CALL SSWAP( M-I2, A( I2+1, J1+I1-1 ), 1, + $ A( I2+1, J1+I2-1 ), 1 ) * * Swap A(I1, I1) with A(I2, I2) * @@ -473,13 +477,15 @@ * Compute L(J+2, J+1) = WORK( 3:M ) / T(J, J+1), * where A(J, J+1) = T(J, J+1) and A(J+2:M, J) = L(J+2:M, J+1) * - IF( A( J+1, K ).NE.ZERO ) THEN - ALPHA = ONE / A( J+1, K ) - CALL SCOPY( M-J-1, WORK( 3 ), 1, A( J+2, K ), 1 ) - CALL SSCAL( M-J-1, ALPHA, A( J+2, K ), 1 ) - ELSE - CALL SLASET( 'Full', M-J-1, 1, ZERO, ZERO, - $ A( J+2, K ), LDA ) + IF( J.LT.(M-1) ) THEN + IF( A( J+1, K ).NE.ZERO ) THEN + ALPHA = ONE / A( J+1, K ) + CALL SCOPY( M-J-1, WORK( 3 ), 1, A( J+2, K ), 1 ) + CALL SSCAL( M-J-1, ALPHA, A( J+2, K ), 1 ) + ELSE + CALL SLASET( 'Full', M-J-1, 1, ZERO, ZERO, + $ A( J+2, K ), LDA ) + END IF END IF END IF J = J + 1 diff --git a/lapack-netlib/SRC/slasyf_rk.f b/lapack-netlib/SRC/slasyf_rk.f index b1b37177f..c16708365 100644 --- a/lapack-netlib/SRC/slasyf_rk.f +++ b/lapack-netlib/SRC/slasyf_rk.f @@ -321,7 +321,7 @@ * of A and working backwards, and compute the matrix W = U12*D * for use in updating A11 * -* Initilize the first entry of array E, where superdiagonal +* Initialize the first entry of array E, where superdiagonal * elements of D are stored * E( 1 ) = ZERO @@ -649,7 +649,7 @@ * of A and working forwards, and compute the matrix W = L21*D * for use in updating A22 * -* Initilize the unused last entry of the subdiagonal array E. +* Initialize the unused last entry of the subdiagonal array E. * E( N ) = ZERO * diff --git a/lapack-netlib/SRC/slatdf.f b/lapack-netlib/SRC/slatdf.f index 5496f9db4..495d32502 100644 --- a/lapack-netlib/SRC/slatdf.f +++ b/lapack-netlib/SRC/slatdf.f @@ -85,7 +85,7 @@ *> RHS is REAL array, dimension N. *> On entry, RHS contains contributions from other subsystems. *> On exit, RHS contains the solution of the subsystem with -*> entries acoording to the value of IJOB (see above). +*> entries according to the value of IJOB (see above). *> \endverbatim *> *> \param[in,out] RDSUM @@ -260,7 +260,7 @@ * * Solve for U-part, look-ahead for RHS(N) = +-1. This is not done * in BSOLVE and will hopefully give us a better estimate because -* any ill-conditioning of the original matrix is transfered to U +* any ill-conditioning of the original matrix is transferred to U * and not to L. U(N, N) is an approximation to sigma_min(LU). * CALL SCOPY( N-1, RHS, 1, XP, 1 ) diff --git a/lapack-netlib/SRC/slatsqr.f b/lapack-netlib/SRC/slatsqr.f index d6d682799..b56b0d41e 100644 --- a/lapack-netlib/SRC/slatsqr.f +++ b/lapack-netlib/SRC/slatsqr.f @@ -1,3 +1,4 @@ +*> \brief \b SLATSQR * * Definition: * =========== @@ -19,8 +20,22 @@ *> \verbatim *> *> SLATSQR computes a blocked Tall-Skinny QR factorization of -*> an M-by-N matrix A, where M >= N: -*> A = Q * R . +*> a real M-by-N matrix A for M >= N: +*> +*> A = Q * ( R ), +*> ( 0 ) +*> +*> where: +*> +*> Q is a M-by-M orthogonal matrix, stored on exit in an implicit +*> form in the elements below the digonal of the array A and in +*> the elemenst of the array T; +*> +*> R is an upper-triangular N-by-N matrix, stored on exit in +*> the elements on and above the diagonal of the array A. +*> +*> 0 is a (M-N)-by-N zero matrix, and is not stored. +*> *> \endverbatim * * Arguments: @@ -149,10 +164,10 @@ SUBROUTINE SLATSQR( M, N, MB, NB, A, LDA, T, LDT, WORK, $ LWORK, INFO) * -* -- LAPACK computational routine (version 3.7.0) -- +* -- LAPACK computational routine (version 3.9.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd. -- -* December 2016 +* November 2019 * * .. Scalar Arguments .. INTEGER INFO, LDA, M, N, MB, NB, LDT, LWORK diff --git a/lapack-netlib/SRC/sorgtsqr.f b/lapack-netlib/SRC/sorgtsqr.f new file mode 100644 index 000000000..748760d63 --- /dev/null +++ b/lapack-netlib/SRC/sorgtsqr.f @@ -0,0 +1,306 @@ +*> \brief \b SORGTSQR +* +* =========== DOCUMENTATION =========== +* +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ +* +*> \htmlonly +*> Download SORGTSQR + dependencies +*> +*> [TGZ] +*> +*> [ZIP] +*> +*> [TXT] +*> +* Definition: +* =========== +* +* SUBROUTINE SORGTSQR( M, N, MB, NB, A, LDA, T, LDT, WORK, LWORK, +* $ INFO ) +* +* .. Scalar Arguments .. +* INTEGER INFO, LDA, LDT, LWORK, M, N, MB, NB +* .. +* .. Array Arguments .. +* REAL A( LDA, * ), T( LDT, * ), WORK( * ) +* .. +* +*> \par Purpose: +* ============= +*> +*> \verbatim +*> +*> SORGTSQR generates an M-by-N real matrix Q_out with orthonormal columns, +*> which are the first N columns of a product of real orthogonal +*> matrices of order M which are returned by SLATSQR +*> +*> Q_out = first_N_columns_of( Q(1)_in * Q(2)_in * ... * Q(k)_in ). +*> +*> See the documentation for SLATSQR. +*> \endverbatim +* +* Arguments: +* ========== +* +*> \param[in] M +*> \verbatim +*> M is INTEGER +*> The number of rows of the matrix A. M >= 0. +*> \endverbatim +*> +*> \param[in] N +*> \verbatim +*> N is INTEGER +*> The number of columns of the matrix A. M >= N >= 0. +*> \endverbatim +*> +*> \param[in] MB +*> \verbatim +*> MB is INTEGER +*> The row block size used by SLATSQR to return +*> arrays A and T. MB > N. +*> (Note that if MB > M, then M is used instead of MB +*> as the row block size). +*> \endverbatim +*> +*> \param[in] NB +*> \verbatim +*> NB is INTEGER +*> The column block size used by SLATSQR to return +*> arrays A and T. NB >= 1. +*> (Note that if NB > N, then N is used instead of NB +*> as the column block size). +*> \endverbatim +*> +*> \param[in,out] A +*> \verbatim +*> A is REAL array, dimension (LDA,N) +*> +*> On entry: +*> +*> The elements on and above the diagonal are not accessed. +*> The elements below the diagonal represent the unit +*> lower-trapezoidal blocked matrix V computed by SLATSQR +*> that defines the input matrices Q_in(k) (ones on the +*> diagonal are not stored) (same format as the output A +*> below the diagonal in SLATSQR). +*> +*> On exit: +*> +*> The array A contains an M-by-N orthonormal matrix Q_out, +*> i.e the columns of A are orthogonal unit vectors. +*> \endverbatim +*> +*> \param[in] LDA +*> \verbatim +*> LDA is INTEGER +*> The leading dimension of the array A. LDA >= max(1,M). +*> \endverbatim +*> +*> \param[in] T +*> \verbatim +*> T is REAL array, +*> dimension (LDT, N * NIRB) +*> where NIRB = Number_of_input_row_blocks +*> = MAX( 1, CEIL((M-N)/(MB-N)) ) +*> Let NICB = Number_of_input_col_blocks +*> = CEIL(N/NB) +*> +*> The upper-triangular block reflectors used to define the +*> input matrices Q_in(k), k=(1:NIRB*NICB). The block +*> reflectors are stored in compact form in NIRB block +*> reflector sequences. Each of NIRB block reflector sequences +*> is stored in a larger NB-by-N column block of T and consists +*> of NICB smaller NB-by-NB upper-triangular column blocks. +*> (same format as the output T in SLATSQR). +*> \endverbatim +*> +*> \param[in] LDT +*> \verbatim +*> LDT is INTEGER +*> The leading dimension of the array T. +*> LDT >= max(1,min(NB1,N)). +*> \endverbatim +*> +*> \param[out] WORK +*> \verbatim +*> (workspace) REAL array, dimension (MAX(2,LWORK)) +*> On exit, if INFO = 0, WORK(1) returns the optimal LWORK. +*> \endverbatim +*> +*> \param[in] LWORK +*> \verbatim +*> The dimension of the array WORK. LWORK >= (M+NB)*N. +*> If LWORK = -1, then a workspace query is assumed. +*> The routine only calculates the optimal size of the WORK +*> array, returns this value as the first entry of the WORK +*> array, and no error message related to LWORK is issued +*> by XERBLA. +*> \endverbatim +*> +*> \param[out] INFO +*> \verbatim +*> INFO is INTEGER +*> = 0: successful exit +*> < 0: if INFO = -i, the i-th argument had an illegal value +*> \endverbatim +*> +* Authors: +* ======== +* +*> \author Univ. of Tennessee +*> \author Univ. of California Berkeley +*> \author Univ. of Colorado Denver +*> \author NAG Ltd. +* +*> \date November 2019 +* +*> \ingroup singleOTHERcomputational +* +*> \par Contributors: +* ================== +*> +*> \verbatim +*> +*> November 2019, Igor Kozachenko, +*> Computer Science Division, +*> University of California, Berkeley +*> +*> \endverbatim +* +* ===================================================================== + SUBROUTINE SORGTSQR( M, N, MB, NB, A, LDA, T, LDT, WORK, LWORK, + $ INFO ) + IMPLICIT NONE +* +* -- LAPACK computational routine (version 3.9.0) -- +* -- LAPACK is a software package provided by Univ. of Tennessee, -- +* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- +* November 2019 +* +* .. Scalar Arguments .. + INTEGER INFO, LDA, LDT, LWORK, M, N, MB, NB +* .. +* .. Array Arguments .. + REAL A( LDA, * ), T( LDT, * ), WORK( * ) +* .. +* +* ===================================================================== +* +* .. Parameters .. + REAL ONE, ZERO + PARAMETER ( ONE = 1.0E+0, ZERO = 0.0E+0 ) +* .. +* .. Local Scalars .. + LOGICAL LQUERY + INTEGER IINFO, LDC, LWORKOPT, LC, LW, NBLOCAL, J +* .. +* .. External Subroutines .. + EXTERNAL SCOPY, SLAMTSQR, SLASET, XERBLA +* .. +* .. Intrinsic Functions .. + INTRINSIC REAL, MAX, MIN +* .. +* .. Executable Statements .. +* +* Test the input parameters +* + LQUERY = LWORK.EQ.-1 + INFO = 0 + IF( M.LT.0 ) THEN + INFO = -1 + ELSE IF( N.LT.0 .OR. M.LT.N ) THEN + INFO = -2 + ELSE IF( MB.LE.N ) THEN + INFO = -3 + ELSE IF( NB.LT.1 ) THEN + INFO = -4 + ELSE IF( LDA.LT.MAX( 1, M ) ) THEN + INFO = -6 + ELSE IF( LDT.LT.MAX( 1, MIN( NB, N ) ) ) THEN + INFO = -8 + ELSE +* +* Test the input LWORK for the dimension of the array WORK. +* This workspace is used to store array C(LDC, N) and WORK(LWORK) +* in the call to DLAMTSQR. See the documentation for DLAMTSQR. +* + IF( LWORK.LT.2 .AND. (.NOT.LQUERY) ) THEN + INFO = -10 + ELSE +* +* Set block size for column blocks +* + NBLOCAL = MIN( NB, N ) +* +* LWORK = -1, then set the size for the array C(LDC,N) +* in DLAMTSQR call and set the optimal size of the work array +* WORK(LWORK) in DLAMTSQR call. +* + LDC = M + LC = LDC*N + LW = N * NBLOCAL +* + LWORKOPT = LC+LW +* + IF( ( LWORK.LT.MAX( 1, LWORKOPT ) ).AND.(.NOT.LQUERY) ) THEN + INFO = -10 + END IF + END IF +* + END IF +* +* Handle error in the input parameters and return workspace query. +* + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'SORGTSQR', -INFO ) + RETURN + ELSE IF ( LQUERY ) THEN + WORK( 1 ) = REAL( LWORKOPT ) + RETURN + END IF +* +* Quick return if possible +* + IF( MIN( M, N ).EQ.0 ) THEN + WORK( 1 ) = REAL( LWORKOPT ) + RETURN + END IF +* +* (1) Form explicitly the tall-skinny M-by-N left submatrix Q1_in +* of M-by-M orthogonal matrix Q_in, which is implicitly stored in +* the subdiagonal part of input array A and in the input array T. +* Perform by the following operation using the routine DLAMTSQR. +* +* Q1_in = Q_in * ( I ), where I is a N-by-N identity matrix, +* ( 0 ) 0 is a (M-N)-by-N zero matrix. +* +* (1a) Form M-by-N matrix in the array WORK(1:LDC*N) with ones +* on the diagonal and zeros elsewhere. +* + CALL SLASET( 'F', M, N, ZERO, ONE, WORK, LDC ) +* +* (1b) On input, WORK(1:LDC*N) stores ( I ); +* ( 0 ) +* +* On output, WORK(1:LDC*N) stores Q1_in. +* + CALL SLAMTSQR( 'L', 'N', M, N, N, MB, NBLOCAL, A, LDA, T, LDT, + $ WORK, LDC, WORK( LC+1 ), LW, IINFO ) +* +* (2) Copy the result from the part of the work array (1:M,1:N) +* with the leading dimension LDC that starts at WORK(1) into +* the output array A(1:M,1:N) column-by-column. +* + DO J = 1, N + CALL SCOPY( M, WORK( (J-1)*LDC + 1 ), 1, A( 1, J ), 1 ) + END DO +* + WORK( 1 ) = REAL( LWORKOPT ) + RETURN +* +* End of SORGTSQR +* + END \ No newline at end of file diff --git a/lapack-netlib/SRC/sorhr_col.f b/lapack-netlib/SRC/sorhr_col.f new file mode 100644 index 000000000..9aef57b26 --- /dev/null +++ b/lapack-netlib/SRC/sorhr_col.f @@ -0,0 +1,440 @@ +*> \brief \b SORHR_COL +* +* =========== DOCUMENTATION =========== +* +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ +* +*> \htmlonly +*> Download SORHR_COL + dependencies +*> +*> [TGZ] +*> +*> [ZIP] +*> +*> [TXT] +*> +* Definition: +* =========== +* +* SUBROUTINE SORHR_COL( M, N, NB, A, LDA, T, LDT, D, INFO ) +* +* .. Scalar Arguments .. +* INTEGER INFO, LDA, LDT, M, N, NB +* .. +* .. Array Arguments .. +* REAL A( LDA, * ), D( * ), T( LDT, * ) +* .. +* +*> \par Purpose: +* ============= +*> +*> \verbatim +*> +*> SORHR_COL takes an M-by-N real matrix Q_in with orthonormal columns +*> as input, stored in A, and performs Householder Reconstruction (HR), +*> i.e. reconstructs Householder vectors V(i) implicitly representing +*> another M-by-N matrix Q_out, with the property that Q_in = Q_out*S, +*> where S is an N-by-N diagonal matrix with diagonal entries +*> equal to +1 or -1. The Householder vectors (columns V(i) of V) are +*> stored in A on output, and the diagonal entries of S are stored in D. +*> Block reflectors are also returned in T +*> (same output format as SGEQRT). +*> \endverbatim +* +* Arguments: +* ========== +* +*> \param[in] M +*> \verbatim +*> M is INTEGER +*> The number of rows of the matrix A. M >= 0. +*> \endverbatim +*> +*> \param[in] N +*> \verbatim +*> N is INTEGER +*> The number of columns of the matrix A. M >= N >= 0. +*> \endverbatim +*> +*> \param[in] NB +*> \verbatim +*> NB is INTEGER +*> The column block size to be used in the reconstruction +*> of Householder column vector blocks in the array A and +*> corresponding block reflectors in the array T. NB >= 1. +*> (Note that if NB > N, then N is used instead of NB +*> as the column block size.) +*> \endverbatim +*> +*> \param[in,out] A +*> \verbatim +*> A is REAL array, dimension (LDA,N) +*> +*> On entry: +*> +*> The array A contains an M-by-N orthonormal matrix Q_in, +*> i.e the columns of A are orthogonal unit vectors. +*> +*> On exit: +*> +*> The elements below the diagonal of A represent the unit +*> lower-trapezoidal matrix V of Householder column vectors +*> V(i). The unit diagonal entries of V are not stored +*> (same format as the output below the diagonal in A from +*> SGEQRT). The matrix T and the matrix V stored on output +*> in A implicitly define Q_out. +*> +*> The elements above the diagonal contain the factor U +*> of the "modified" LU-decomposition: +*> Q_in - ( S ) = V * U +*> ( 0 ) +*> where 0 is a (M-N)-by-(M-N) zero matrix. +*> \endverbatim +*> +*> \param[in] LDA +*> \verbatim +*> LDA is INTEGER +*> The leading dimension of the array A. LDA >= max(1,M). +*> \endverbatim +*> +*> \param[out] T +*> \verbatim +*> T is REAL array, +*> dimension (LDT, N) +*> +*> Let NOCB = Number_of_output_col_blocks +*> = CEIL(N/NB) +*> +*> On exit, T(1:NB, 1:N) contains NOCB upper-triangular +*> block reflectors used to define Q_out stored in compact +*> form as a sequence of upper-triangular NB-by-NB column +*> blocks (same format as the output T in SGEQRT). +*> The matrix T and the matrix V stored on output in A +*> implicitly define Q_out. NOTE: The lower triangles +*> below the upper-triangular blcoks will be filled with +*> zeros. See Further Details. +*> \endverbatim +*> +*> \param[in] LDT +*> \verbatim +*> LDT is INTEGER +*> The leading dimension of the array T. +*> LDT >= max(1,min(NB,N)). +*> \endverbatim +*> +*> \param[out] D +*> \verbatim +*> D is REAL array, dimension min(M,N). +*> The elements can be only plus or minus one. +*> +*> D(i) is constructed as D(i) = -SIGN(Q_in_i(i,i)), where +*> 1 <= i <= min(M,N), and Q_in_i is Q_in after performing +*> i-1 steps of “modified” Gaussian elimination. +*> See Further Details. +*> \endverbatim +*> +*> \param[out] INFO +*> \verbatim +*> INFO is INTEGER +*> = 0: successful exit +*> < 0: if INFO = -i, the i-th argument had an illegal value +*> \endverbatim +*> +*> \par Further Details: +* ===================== +*> +*> \verbatim +*> +*> The computed M-by-M orthogonal factor Q_out is defined implicitly as +*> a product of orthogonal matrices Q_out(i). Each Q_out(i) is stored in +*> the compact WY-representation format in the corresponding blocks of +*> matrices V (stored in A) and T. +*> +*> The M-by-N unit lower-trapezoidal matrix V stored in the M-by-N +*> matrix A contains the column vectors V(i) in NB-size column +*> blocks VB(j). For example, VB(1) contains the columns +*> V(1), V(2), ... V(NB). NOTE: The unit entries on +*> the diagonal of Y are not stored in A. +*> +*> The number of column blocks is +*> +*> NOCB = Number_of_output_col_blocks = CEIL(N/NB) +*> +*> where each block is of order NB except for the last block, which +*> is of order LAST_NB = N - (NOCB-1)*NB. +*> +*> For example, if M=6, N=5 and NB=2, the matrix V is +*> +*> +*> V = ( VB(1), VB(2), VB(3) ) = +*> +*> = ( 1 ) +*> ( v21 1 ) +*> ( v31 v32 1 ) +*> ( v41 v42 v43 1 ) +*> ( v51 v52 v53 v54 1 ) +*> ( v61 v62 v63 v54 v65 ) +*> +*> +*> For each of the column blocks VB(i), an upper-triangular block +*> reflector TB(i) is computed. These blocks are stored as +*> a sequence of upper-triangular column blocks in the NB-by-N +*> matrix T. The size of each TB(i) block is NB-by-NB, except +*> for the last block, whose size is LAST_NB-by-LAST_NB. +*> +*> For example, if M=6, N=5 and NB=2, the matrix T is +*> +*> T = ( TB(1), TB(2), TB(3) ) = +*> +*> = ( t11 t12 t13 t14 t15 ) +*> ( t22 t24 ) +*> +*> +*> The M-by-M factor Q_out is given as a product of NOCB +*> orthogonal M-by-M matrices Q_out(i). +*> +*> Q_out = Q_out(1) * Q_out(2) * ... * Q_out(NOCB), +*> +*> where each matrix Q_out(i) is given by the WY-representation +*> using corresponding blocks from the matrices V and T: +*> +*> Q_out(i) = I - VB(i) * TB(i) * (VB(i))**T, +*> +*> where I is the identity matrix. Here is the formula with matrix +*> dimensions: +*> +*> Q(i){M-by-M} = I{M-by-M} - +*> VB(i){M-by-INB} * TB(i){INB-by-INB} * (VB(i))**T {INB-by-M}, +*> +*> where INB = NB, except for the last block NOCB +*> for which INB=LAST_NB. +*> +*> ===== +*> NOTE: +*> ===== +*> +*> If Q_in is the result of doing a QR factorization +*> B = Q_in * R_in, then: +*> +*> B = (Q_out*S) * R_in = Q_out * (S * R_in) = O_out * R_out. +*> +*> So if one wants to interpret Q_out as the result +*> of the QR factorization of B, then corresponding R_out +*> should be obtained by R_out = S * R_in, i.e. some rows of R_in +*> should be multiplied by -1. +*> +*> For the details of the algorithm, see [1]. +*> +*> [1] "Reconstructing Householder vectors from tall-skinny QR", +*> G. Ballard, J. Demmel, L. Grigori, M. Jacquelin, H.D. Nguyen, +*> E. Solomonik, J. Parallel Distrib. Comput., +*> vol. 85, pp. 3-31, 2015. +*> \endverbatim +*> +* Authors: +* ======== +* +*> \author Univ. of Tennessee +*> \author Univ. of California Berkeley +*> \author Univ. of Colorado Denver +*> \author NAG Ltd. +* +*> \date November 2019 +* +*> \ingroup singleOTHERcomputational +* +*> \par Contributors: +* ================== +*> +*> \verbatim +*> +*> November 2019, Igor Kozachenko, +*> Computer Science Division, +*> University of California, Berkeley +*> +*> \endverbatim +* +* ===================================================================== + SUBROUTINE SORHR_COL( M, N, NB, A, LDA, T, LDT, D, INFO ) + IMPLICIT NONE +* +* -- LAPACK computational routine (version 3.9.0) -- +* -- LAPACK is a software package provided by Univ. of Tennessee, -- +* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- +* November 2019 +* +* .. Scalar Arguments .. + INTEGER INFO, LDA, LDT, M, N, NB +* .. +* .. Array Arguments .. + REAL A( LDA, * ), D( * ), T( LDT, * ) +* .. +* +* ===================================================================== +* +* .. Parameters .. + REAL ONE, ZERO + PARAMETER ( ONE = 1.0E+0, ZERO = 0.0E+0 ) +* .. +* .. Local Scalars .. + INTEGER I, IINFO, J, JB, JBTEMP1, JBTEMP2, JNB, + $ NPLUSONE +* .. +* .. External Subroutines .. + EXTERNAL SCOPY, SLAORHR_COL_GETRFNP, SSCAL, STRSM, + $XERBLA +* .. +* .. Intrinsic Functions .. + INTRINSIC MAX, MIN +* .. +* .. Executable Statements .. +* +* Test the input parameters +* + INFO = 0 + IF( M.LT.0 ) THEN + INFO = -1 + ELSE IF( N.LT.0 .OR. N.GT.M ) THEN + INFO = -2 + ELSE IF( NB.LT.1 ) THEN + INFO = -3 + ELSE IF( LDA.LT.MAX( 1, M ) ) THEN + INFO = -5 + ELSE IF( LDT.LT.MAX( 1, MIN( NB, N ) ) ) THEN + INFO = -7 + END IF +* +* Handle error in the input parameters. +* + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'SORHR_COL', -INFO ) + RETURN + END IF +* +* Quick return if possible +* + IF( MIN( M, N ).EQ.0 ) THEN + RETURN + END IF +* +* On input, the M-by-N matrix A contains the orthogonal +* M-by-N matrix Q_in. +* +* (1) Compute the unit lower-trapezoidal V (ones on the diagonal +* are not stored) by performing the "modified" LU-decomposition. +* +* Q_in - ( S ) = V * U = ( V1 ) * U, +* ( 0 ) ( V2 ) +* +* where 0 is an (M-N)-by-N zero matrix. +* +* (1-1) Factor V1 and U. + + CALL SLAORHR_COL_GETRFNP( N, N, A, LDA, D, IINFO ) +* +* (1-2) Solve for V2. +* + IF( M.GT.N ) THEN + CALL STRSM( 'R', 'U', 'N', 'N', M-N, N, ONE, A, LDA, + $ A( N+1, 1 ), LDA ) + END IF +* +* (2) Reconstruct the block reflector T stored in T(1:NB, 1:N) +* as a sequence of upper-triangular blocks with NB-size column +* blocking. +* +* Loop over the column blocks of size NB of the array A(1:M,1:N) +* and the array T(1:NB,1:N), JB is the column index of a column +* block, JNB is the column block size at each step JB. +* + NPLUSONE = N + 1 + DO JB = 1, N, NB +* +* (2-0) Determine the column block size JNB. +* + JNB = MIN( NPLUSONE-JB, NB ) +* +* (2-1) Copy the upper-triangular part of the current JNB-by-JNB +* diagonal block U(JB) (of the N-by-N matrix U) stored +* in A(JB:JB+JNB-1,JB:JB+JNB-1) into the upper-triangular part +* of the current JNB-by-JNB block T(1:JNB,JB:JB+JNB-1) +* column-by-column, total JNB*(JNB+1)/2 elements. +* + JBTEMP1 = JB - 1 + DO J = JB, JB+JNB-1 + CALL SCOPY( J-JBTEMP1, A( JB, J ), 1, T( 1, J ), 1 ) + END DO +* +* (2-2) Perform on the upper-triangular part of the current +* JNB-by-JNB diagonal block U(JB) (of the N-by-N matrix U) stored +* in T(1:JNB,JB:JB+JNB-1) the following operation in place: +* (-1)*U(JB)*S(JB), i.e the result will be stored in the upper- +* triangular part of T(1:JNB,JB:JB+JNB-1). This multiplication +* of the JNB-by-JNB diagonal block U(JB) by the JNB-by-JNB +* diagonal block S(JB) of the N-by-N sign matrix S from the +* right means changing the sign of each J-th column of the block +* U(JB) according to the sign of the diagonal element of the block +* S(JB), i.e. S(J,J) that is stored in the array element D(J). +* + DO J = JB, JB+JNB-1 + IF( D( J ).EQ.ONE ) THEN + CALL SSCAL( J-JBTEMP1, -ONE, T( 1, J ), 1 ) + END IF + END DO +* +* (2-3) Perform the triangular solve for the current block +* matrix X(JB): +* +* X(JB) * (A(JB)**T) = B(JB), where: +* +* A(JB)**T is a JNB-by-JNB unit upper-triangular +* coefficient block, and A(JB)=V1(JB), which +* is a JNB-by-JNB unit lower-triangular block +* stored in A(JB:JB+JNB-1,JB:JB+JNB-1). +* The N-by-N matrix V1 is the upper part +* of the M-by-N lower-trapezoidal matrix V +* stored in A(1:M,1:N); +* +* B(JB) is a JNB-by-JNB upper-triangular right-hand +* side block, B(JB) = (-1)*U(JB)*S(JB), and +* B(JB) is stored in T(1:JNB,JB:JB+JNB-1); +* +* X(JB) is a JNB-by-JNB upper-triangular solution +* block, X(JB) is the upper-triangular block +* reflector T(JB), and X(JB) is stored +* in T(1:JNB,JB:JB+JNB-1). +* +* In other words, we perform the triangular solve for the +* upper-triangular block T(JB): +* +* T(JB) * (V1(JB)**T) = (-1)*U(JB)*S(JB). +* +* Even though the blocks X(JB) and B(JB) are upper- +* triangular, the routine STRSM will access all JNB**2 +* elements of the square T(1:JNB,JB:JB+JNB-1). Therefore, +* we need to set to zero the elements of the block +* T(1:JNB,JB:JB+JNB-1) below the diagonal before the call +* to STRSM. +* +* (2-3a) Set the elements to zero. +* + JBTEMP2 = JB - 2 + DO J = JB, JB+JNB-2 + DO I = J-JBTEMP2, NB + T( I, J ) = ZERO + END DO + END DO +* +* (2-3b) Perform the triangular solve. +* + CALL STRSM( 'R', 'L', 'T', 'U', JNB, JNB, ONE, + $ A( JB, JB ), LDA, T( 1, JB ), LDT ) +* + END DO +* + RETURN +* +* End of SORHR_COL +* + END diff --git a/lapack-netlib/SRC/sporfsx.f b/lapack-netlib/SRC/sporfsx.f index 52fab6976..ce8c26569 100644 --- a/lapack-netlib/SRC/sporfsx.f +++ b/lapack-netlib/SRC/sporfsx.f @@ -135,7 +135,7 @@ *> \param[in,out] S *> \verbatim *> S is REAL array, dimension (N) -*> The row scale factors for A. If EQUED = 'Y', A is multiplied on +*> The scale factors for A. If EQUED = 'Y', A is multiplied on *> the left and right by diag(S). S is an input argument if FACT = *> 'F'; otherwise, S is an output argument. If FACT = 'F' and EQUED *> = 'Y', each element of S must be positive. If S is output, each @@ -263,7 +263,7 @@ *> information as described below. There currently are up to three *> pieces of information returned for each right-hand side. If *> componentwise accuracy is not requested (PARAMS(3) = 0.0), then -*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS .LT. 3, then at most +*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS < 3, then at most *> the first (:,N_ERR_BNDS) entries are returned. *> *> The first index in ERR_BNDS_COMP(i,:) corresponds to the ith @@ -299,14 +299,14 @@ *> \param[in] NPARAMS *> \verbatim *> NPARAMS is INTEGER -*> Specifies the number of parameters set in PARAMS. If .LE. 0, the +*> Specifies the number of parameters set in PARAMS. If <= 0, the *> PARAMS array is never referenced and default values are used. *> \endverbatim *> *> \param[in,out] PARAMS *> \verbatim *> PARAMS is REAL array, dimension NPARAMS -*> Specifies algorithm parameters. If an entry is .LT. 0.0, then +*> Specifies algorithm parameters. If an entry is < 0.0, then *> that entry will be filled with default value used for that *> parameter. Only positions up to NPARAMS are accessed; defaults *> are used for higher-numbered parameters. @@ -314,9 +314,9 @@ *> PARAMS(LA_LINRX_ITREF_I = 1) : Whether to perform iterative *> refinement or not. *> Default: 1.0 -*> = 0.0 : No refinement is performed, and no error bounds are +*> = 0.0: No refinement is performed, and no error bounds are *> computed. -*> = 1.0 : Use the double-precision refinement algorithm, +*> = 1.0: Use the double-precision refinement algorithm, *> possibly with doubled-single computations if the *> compilation environment does not support DOUBLE *> PRECISION. diff --git a/lapack-netlib/SRC/sposvxx.f b/lapack-netlib/SRC/sposvxx.f index 3cdfa749c..fa2c0d3f3 100644 --- a/lapack-netlib/SRC/sposvxx.f +++ b/lapack-netlib/SRC/sposvxx.f @@ -366,7 +366,7 @@ *> information as described below. There currently are up to three *> pieces of information returned for each right-hand side. If *> componentwise accuracy is not requested (PARAMS(3) = 0.0), then -*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS .LT. 3, then at most +*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS < 3, then at most *> the first (:,N_ERR_BNDS) entries are returned. *> *> The first index in ERR_BNDS_COMP(i,:) corresponds to the ith @@ -402,14 +402,14 @@ *> \param[in] NPARAMS *> \verbatim *> NPARAMS is INTEGER -*> Specifies the number of parameters set in PARAMS. If .LE. 0, the +*> Specifies the number of parameters set in PARAMS. If <= 0, the *> PARAMS array is never referenced and default values are used. *> \endverbatim *> *> \param[in,out] PARAMS *> \verbatim *> PARAMS is REAL array, dimension NPARAMS -*> Specifies algorithm parameters. If an entry is .LT. 0.0, then +*> Specifies algorithm parameters. If an entry is < 0.0, then *> that entry will be filled with default value used for that *> parameter. Only positions up to NPARAMS are accessed; defaults *> are used for higher-numbered parameters. @@ -417,9 +417,9 @@ *> PARAMS(LA_LINRX_ITREF_I = 1) : Whether to perform iterative *> refinement or not. *> Default: 1.0 -*> = 0.0 : No refinement is performed, and no error bounds are +*> = 0.0: No refinement is performed, and no error bounds are *> computed. -*> = 1.0 : Use the double-precision refinement algorithm, +*> = 1.0: Use the double-precision refinement algorithm, *> possibly with doubled-single computations if the *> compilation environment does not support DOUBLE *> PRECISION. diff --git a/lapack-netlib/SRC/ssb2st_kernels.f b/lapack-netlib/SRC/ssb2st_kernels.f index 54479f89e..08859169b 100644 --- a/lapack-netlib/SRC/ssb2st_kernels.f +++ b/lapack-netlib/SRC/ssb2st_kernels.f @@ -1,26 +1,26 @@ *> \brief \b SSB2ST_KERNELS * * @generated from zhb2st_kernels.f, fortran z -> s, Wed Dec 7 08:22:40 2016 -* +* * =========== DOCUMENTATION =========== * -* Online html documentation available at -* http://www.netlib.org/lapack/explore-html/ +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ * *> \htmlonly -*> Download SSB2ST_KERNELS + dependencies -*> -*> [TGZ] -*> -*> [ZIP] -*> +*> Download SSB2ST_KERNELS + dependencies +*> +*> [TGZ] +*> +*> [ZIP] +*> *> [TXT] -*> \endhtmlonly +*> \endhtmlonly * * Definition: * =========== * -* SUBROUTINE SSB2ST_KERNELS( UPLO, WANTZ, TTYPE, +* SUBROUTINE SSB2ST_KERNELS( UPLO, WANTZ, TTYPE, * ST, ED, SWEEP, N, NB, IB, * A, LDA, V, TAU, LDVT, WORK) * @@ -32,9 +32,9 @@ * INTEGER TTYPE, ST, ED, SWEEP, N, NB, IB, LDA, LDVT * .. * .. Array Arguments .. -* REAL A( LDA, * ), V( * ), +* REAL A( LDA, * ), V( * ), * TAU( * ), WORK( * ) -* +* *> \par Purpose: * ============= *> @@ -124,7 +124,7 @@ *> LDVT is INTEGER. *> \endverbatim *> -*> \param[in] WORK +*> \param[out] WORK *> \verbatim *> WORK is REAL array. Workspace of size nb. *> \endverbatim @@ -150,7 +150,7 @@ *> http://doi.acm.org/10.1145/2063384.2063394 *> *> A. Haidar, J. Kurzak, P. Luszczek, 2013. -*> An improved parallel singular value algorithm and its implementation +*> An improved parallel singular value algorithm and its implementation *> for multicore hardware, In Proceedings of 2013 International Conference *> for High Performance Computing, Networking, Storage and Analysis (SC '13). *> Denver, Colorado, USA, 2013. @@ -158,16 +158,16 @@ *> http://doi.acm.org/10.1145/2503210.2503292 *> *> A. Haidar, R. Solca, S. Tomov, T. Schulthess and J. Dongarra. -*> A novel hybrid CPU-GPU generalized eigensolver for electronic structure +*> A novel hybrid CPU-GPU generalized eigensolver for electronic structure *> calculations based on fine-grained memory aware tasks. *> International Journal of High Performance Computing Applications. *> Volume 28 Issue 2, Pages 196-209, May 2014. -*> http://hpc.sagepub.com/content/28/2/196 +*> http://hpc.sagepub.com/content/28/2/196 *> *> \endverbatim *> * ===================================================================== - SUBROUTINE SSB2ST_KERNELS( UPLO, WANTZ, TTYPE, + SUBROUTINE SSB2ST_KERNELS( UPLO, WANTZ, TTYPE, $ ST, ED, SWEEP, N, NB, IB, $ A, LDA, V, TAU, LDVT, WORK) * @@ -184,7 +184,7 @@ INTEGER TTYPE, ST, ED, SWEEP, N, NB, IB, LDA, LDVT * .. * .. Array Arguments .. - REAL A( LDA, * ), V( * ), + REAL A( LDA, * ), V( * ), $ TAU( * ), WORK( * ) * .. * @@ -198,8 +198,8 @@ * .. Local Scalars .. LOGICAL UPPER INTEGER I, J1, J2, LM, LN, VPOS, TAUPOS, - $ DPOS, OFDPOS, AJETER - REAL CTMP + $ DPOS, OFDPOS, AJETER + REAL CTMP * .. * .. External Subroutines .. EXTERNAL SLARFG, SLARFX, SLARFY @@ -212,7 +212,7 @@ * .. * .. * .. Executable Statements .. -* +* AJETER = IB + LDVT UPPER = LSAME( UPLO, 'U' ) @@ -243,10 +243,10 @@ V( VPOS ) = ONE DO 10 I = 1, LM-1 V( VPOS+I ) = ( A( OFDPOS-I, ST+I ) ) - A( OFDPOS-I, ST+I ) = ZERO + A( OFDPOS-I, ST+I ) = ZERO 10 CONTINUE CTMP = ( A( OFDPOS, ST ) ) - CALL SLARFG( LM, CTMP, V( VPOS+1 ), 1, + CALL SLARFG( LM, CTMP, V( VPOS+1 ), 1, $ TAU( TAUPOS ) ) A( OFDPOS, ST ) = CTMP * @@ -284,14 +284,14 @@ * V( VPOS ) = ONE DO 30 I = 1, LM-1 - V( VPOS+I ) = + V( VPOS+I ) = $ ( A( DPOS-NB-I, J1+I ) ) A( DPOS-NB-I, J1+I ) = ZERO 30 CONTINUE CTMP = ( A( DPOS-NB, J1 ) ) CALL SLARFG( LM, CTMP, V( VPOS+1 ), 1, TAU( TAUPOS ) ) A( DPOS-NB, J1 ) = CTMP -* +* CALL SLARFX( 'Right', LN-1, LM, V( VPOS ), $ TAU( TAUPOS ), $ A( DPOS-NB+1, J1 ), LDA-1, WORK) @@ -299,9 +299,9 @@ ENDIF * * Lower case -* +* ELSE -* +* IF( WANTZ ) THEN VPOS = MOD( SWEEP-1, 2 ) * N + ST TAUPOS = MOD( SWEEP-1, 2 ) * N + ST @@ -316,9 +316,9 @@ V( VPOS ) = ONE DO 20 I = 1, LM-1 V( VPOS+I ) = A( OFDPOS+I, ST-1 ) - A( OFDPOS+I, ST-1 ) = ZERO + A( OFDPOS+I, ST-1 ) = ZERO 20 CONTINUE - CALL SLARFG( LM, A( OFDPOS, ST-1 ), V( VPOS+1 ), 1, + CALL SLARFG( LM, A( OFDPOS, ST-1 ), V( VPOS+1 ), 1, $ TAU( TAUPOS ) ) * LM = ED - ST + 1 @@ -345,7 +345,7 @@ LM = J2-J1+1 * IF( LM.GT.0) THEN - CALL SLARFX( 'Right', LM, LN, V( VPOS ), + CALL SLARFX( 'Right', LM, LN, V( VPOS ), $ TAU( TAUPOS ), A( DPOS+NB, ST ), $ LDA-1, WORK) * @@ -362,13 +362,13 @@ V( VPOS+I ) = A( DPOS+NB+I, ST ) A( DPOS+NB+I, ST ) = ZERO 40 CONTINUE - CALL SLARFG( LM, A( DPOS+NB, ST ), V( VPOS+1 ), 1, + CALL SLARFG( LM, A( DPOS+NB, ST ), V( VPOS+1 ), 1, $ TAU( TAUPOS ) ) * - CALL SLARFX( 'Left', LM, LN-1, V( VPOS ), + CALL SLARFX( 'Left', LM, LN-1, V( VPOS ), $ ( TAU( TAUPOS ) ), $ A( DPOS+NB-1, ST+1 ), LDA-1, WORK) - + ENDIF ENDIF ENDIF @@ -377,4 +377,4 @@ * * END OF SSB2ST_KERNELS * - END + END diff --git a/lapack-netlib/SRC/ssbgvx.f b/lapack-netlib/SRC/ssbgvx.f index 3408810bd..22f96729e 100644 --- a/lapack-netlib/SRC/ssbgvx.f +++ b/lapack-netlib/SRC/ssbgvx.f @@ -261,11 +261,11 @@ *> \param[out] INFO *> \verbatim *> INFO is INTEGER -*> = 0 : successful exit -*> < 0 : if INFO = -i, the i-th argument had an illegal value +*> = 0: successful exit +*> < 0: if INFO = -i, the i-th argument had an illegal value *> <= N: if INFO = i, then i eigenvectors failed to converge. *> Their indices are stored in IFAIL. -*> > N : SPBSTF returned an error code; i.e., +*> > N: SPBSTF returned an error code; i.e., *> if INFO = N + i, for 1 <= i <= N, then the leading *> minor of order i of B is not positive definite. *> The factorization of B could not be completed and diff --git a/lapack-netlib/SRC/sstemr.f b/lapack-netlib/SRC/sstemr.f index 228538161..d550f87e0 100644 --- a/lapack-netlib/SRC/sstemr.f +++ b/lapack-netlib/SRC/sstemr.f @@ -233,13 +233,13 @@ *> \param[in,out] TRYRAC *> \verbatim *> TRYRAC is LOGICAL -*> If TRYRAC.EQ..TRUE., indicates that the code should check whether +*> If TRYRAC = .TRUE., indicates that the code should check whether *> the tridiagonal matrix defines its eigenvalues to high relative *> accuracy. If so, the code uses relative-accuracy preserving *> algorithms that might be (a bit) slower depending on the matrix. *> If the matrix does not define its eigenvalues to high relative *> accuracy, the code can uses possibly faster algorithms. -*> If TRYRAC.EQ..FALSE., the code is not required to guarantee +*> If TRYRAC = .FALSE., the code is not required to guarantee *> relatively accurate eigenvalues and can use the fastest possible *> techniques. *> On exit, a .TRUE. TRYRAC will be set to .FALSE. if the matrix diff --git a/lapack-netlib/SRC/ssyconvf.f b/lapack-netlib/SRC/ssyconvf.f index d43b9473f..c6f08428f 100644 --- a/lapack-netlib/SRC/ssyconvf.f +++ b/lapack-netlib/SRC/ssyconvf.f @@ -291,7 +291,7 @@ * * Convert PERMUTATIONS and IPIV * -* Apply permutaions to submatrices of upper part of A +* Apply permutations to submatrices of upper part of A * in factorization order where i decreases from N to 1 * I = N @@ -344,7 +344,7 @@ * * Revert PERMUTATIONS and IPIV * -* Apply permutaions to submatrices of upper part of A +* Apply permutations to submatrices of upper part of A * in reverse factorization order where i increases from 1 to N * I = 1 @@ -435,7 +435,7 @@ * * Convert PERMUTATIONS and IPIV * -* Apply permutaions to submatrices of lower part of A +* Apply permutations to submatrices of lower part of A * in factorization order where k increases from 1 to N * I = 1 @@ -488,7 +488,7 @@ * * Revert PERMUTATIONS and IPIV * -* Apply permutaions to submatrices of lower part of A +* Apply permutations to submatrices of lower part of A * in reverse factorization order where i decreases from N to 1 * I = N diff --git a/lapack-netlib/SRC/ssyconvf_rook.f b/lapack-netlib/SRC/ssyconvf_rook.f index 833b9c632..a7e0d5258 100644 --- a/lapack-netlib/SRC/ssyconvf_rook.f +++ b/lapack-netlib/SRC/ssyconvf_rook.f @@ -282,7 +282,7 @@ * * Convert PERMUTATIONS * -* Apply permutaions to submatrices of upper part of A +* Apply permutations to submatrices of upper part of A * in factorization order where i decreases from N to 1 * I = N @@ -333,7 +333,7 @@ * * Revert PERMUTATIONS * -* Apply permutaions to submatrices of upper part of A +* Apply permutations to submatrices of upper part of A * in reverse factorization order where i increases from 1 to N * I = 1 @@ -423,7 +423,7 @@ * * Convert PERMUTATIONS * -* Apply permutaions to submatrices of lower part of A +* Apply permutations to submatrices of lower part of A * in factorization order where i increases from 1 to N * I = 1 @@ -474,7 +474,7 @@ * * Revert PERMUTATIONS * -* Apply permutaions to submatrices of lower part of A +* Apply permutations to submatrices of lower part of A * in reverse factorization order where i decreases from N to 1 * I = N diff --git a/lapack-netlib/SRC/ssyev_2stage.f b/lapack-netlib/SRC/ssyev_2stage.f index 166766919..5d354c1b3 100644 --- a/lapack-netlib/SRC/ssyev_2stage.f +++ b/lapack-netlib/SRC/ssyev_2stage.f @@ -317,7 +317,7 @@ IF( .NOT.WANTZ ) THEN CALL SSTERF( N, W, WORK( INDE ), INFO ) ELSE -* Not available in this release, and agrument checking should not +* Not available in this release, and argument checking should not * let it getting here RETURN CALL SORGTR( UPLO, N, A, LDA, WORK( INDTAU ), WORK( INDWRK ), diff --git a/lapack-netlib/SRC/ssyevd_2stage.f b/lapack-netlib/SRC/ssyevd_2stage.f index 8ab90b641..625713b85 100644 --- a/lapack-netlib/SRC/ssyevd_2stage.f +++ b/lapack-netlib/SRC/ssyevd_2stage.f @@ -385,7 +385,7 @@ IF( .NOT.WANTZ ) THEN CALL SSTERF( N, W, WORK( INDE ), INFO ) ELSE -* Not available in this release, and agrument checking should not +* Not available in this release, and argument checking should not * let it getting here RETURN CALL SSTEDC( 'I', N, W, WORK( INDE ), WORK( INDWRK ), N, diff --git a/lapack-netlib/SRC/ssyrfsx.f b/lapack-netlib/SRC/ssyrfsx.f index b5dd0b2df..bfb7b6005 100644 --- a/lapack-netlib/SRC/ssyrfsx.f +++ b/lapack-netlib/SRC/ssyrfsx.f @@ -271,7 +271,7 @@ *> information as described below. There currently are up to three *> pieces of information returned for each right-hand side. If *> componentwise accuracy is not requested (PARAMS(3) = 0.0), then -*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS .LT. 3, then at most +*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS < 3, then at most *> the first (:,N_ERR_BNDS) entries are returned. *> *> The first index in ERR_BNDS_COMP(i,:) corresponds to the ith @@ -307,14 +307,14 @@ *> \param[in] NPARAMS *> \verbatim *> NPARAMS is INTEGER -*> Specifies the number of parameters set in PARAMS. If .LE. 0, the +*> Specifies the number of parameters set in PARAMS. If <= 0, the *> PARAMS array is never referenced and default values are used. *> \endverbatim *> *> \param[in,out] PARAMS *> \verbatim *> PARAMS is REAL array, dimension NPARAMS -*> Specifies algorithm parameters. If an entry is .LT. 0.0, then +*> Specifies algorithm parameters. If an entry is < 0.0, then *> that entry will be filled with default value used for that *> parameter. Only positions up to NPARAMS are accessed; defaults *> are used for higher-numbered parameters. @@ -322,9 +322,9 @@ *> PARAMS(LA_LINRX_ITREF_I = 1) : Whether to perform iterative *> refinement or not. *> Default: 1.0 -*> = 0.0 : No refinement is performed, and no error bounds are +*> = 0.0: No refinement is performed, and no error bounds are *> computed. -*> = 1.0 : Use the double-precision refinement algorithm, +*> = 1.0: Use the double-precision refinement algorithm, *> possibly with doubled-single computations if the *> compilation environment does not support DOUBLE *> PRECISION. diff --git a/lapack-netlib/SRC/ssysv_aa.f b/lapack-netlib/SRC/ssysv_aa.f index e470f5883..7e58d1e75 100644 --- a/lapack-netlib/SRC/ssysv_aa.f +++ b/lapack-netlib/SRC/ssysv_aa.f @@ -42,7 +42,7 @@ *> matrices. *> *> Aasen's algorithm is used to factor A as -*> A = U * T * U**T, if UPLO = 'U', or +*> A = U**T * T * U, if UPLO = 'U', or *> A = L * T * L**T, if UPLO = 'L', *> where U (or L) is a product of permutation and unit upper (lower) *> triangular matrices, and T is symmetric tridiagonal. The factored @@ -86,7 +86,7 @@ *> *> On exit, if INFO = 0, the tridiagonal matrix T and the *> multipliers used to obtain the factor U or L from the -*> factorization A = U*T*U**T or A = L*T*L**T as computed by +*> factorization A = U**T*T*U or A = L*T*L**T as computed by *> SSYTRF. *> \endverbatim *> @@ -229,7 +229,7 @@ RETURN END IF * -* Compute the factorization A = U*T*U**T or A = L*T*L**T. +* Compute the factorization A = U**T*T*U or A = L*T*L**T. * CALL SSYTRF_AA( UPLO, N, A, LDA, IPIV, WORK, LWORK, INFO ) IF( INFO.EQ.0 ) THEN diff --git a/lapack-netlib/SRC/ssysv_aa_2stage.f b/lapack-netlib/SRC/ssysv_aa_2stage.f index 43d937141..5e2e0e340 100644 --- a/lapack-netlib/SRC/ssysv_aa_2stage.f +++ b/lapack-netlib/SRC/ssysv_aa_2stage.f @@ -44,7 +44,7 @@ *> matrices. *> *> Aasen's 2-stage algorithm is used to factor A as -*> A = U * T * U**T, if UPLO = 'U', or +*> A = U**T * T * U, if UPLO = 'U', or *> A = L * T * L**T, if UPLO = 'L', *> where U (or L) is a product of permutation and unit upper (lower) *> triangular matrices, and T is symmetric and band. The matrix T is @@ -258,7 +258,7 @@ END IF * * -* Compute the factorization A = U*T*U**T or A = L*T*L**T. +* Compute the factorization A = U**T*T*U or A = L*T*L**T. * CALL SSYTRF_AA_2STAGE( UPLO, N, A, LDA, TB, LTB, IPIV, IPIV2, $ WORK, LWORK, INFO ) diff --git a/lapack-netlib/SRC/ssysvxx.f b/lapack-netlib/SRC/ssysvxx.f index 4762748c0..e2be0128b 100644 --- a/lapack-netlib/SRC/ssysvxx.f +++ b/lapack-netlib/SRC/ssysvxx.f @@ -377,7 +377,7 @@ *> information as described below. There currently are up to three *> pieces of information returned for each right-hand side. If *> componentwise accuracy is not requested (PARAMS(3) = 0.0), then -*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS .LT. 3, then at most +*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS < 3, then at most *> the first (:,N_ERR_BNDS) entries are returned. *> *> The first index in ERR_BNDS_COMP(i,:) corresponds to the ith @@ -413,14 +413,14 @@ *> \param[in] NPARAMS *> \verbatim *> NPARAMS is INTEGER -*> Specifies the number of parameters set in PARAMS. If .LE. 0, the +*> Specifies the number of parameters set in PARAMS. If <= 0, the *> PARAMS array is never referenced and default values are used. *> \endverbatim *> *> \param[in,out] PARAMS *> \verbatim *> PARAMS is REAL array, dimension NPARAMS -*> Specifies algorithm parameters. If an entry is .LT. 0.0, then +*> Specifies algorithm parameters. If an entry is < 0.0, then *> that entry will be filled with default value used for that *> parameter. Only positions up to NPARAMS are accessed; defaults *> are used for higher-numbered parameters. @@ -428,9 +428,9 @@ *> PARAMS(LA_LINRX_ITREF_I = 1) : Whether to perform iterative *> refinement or not. *> Default: 1.0 -*> = 0.0 : No refinement is performed, and no error bounds are +*> = 0.0: No refinement is performed, and no error bounds are *> computed. -*> = 1.0 : Use the double-precision refinement algorithm, +*> = 1.0: Use the double-precision refinement algorithm, *> possibly with doubled-single computations if the *> compilation environment does not support DOUBLE *> PRECISION. diff --git a/lapack-netlib/SRC/ssytf2_rk.f b/lapack-netlib/SRC/ssytf2_rk.f index bf113d1bd..400e48353 100644 --- a/lapack-netlib/SRC/ssytf2_rk.f +++ b/lapack-netlib/SRC/ssytf2_rk.f @@ -312,7 +312,7 @@ * * Factorize A as U*D*U**T using the upper triangle of A * -* Initilize the first entry of array E, where superdiagonal +* Initialize the first entry of array E, where superdiagonal * elements of D are stored * E( 1 ) = ZERO @@ -623,7 +623,7 @@ * * Factorize A as L*D*L**T using the lower triangle of A * -* Initilize the unused last entry of the subdiagonal array E. +* Initialize the unused last entry of the subdiagonal array E. * E( N ) = ZERO * diff --git a/lapack-netlib/SRC/ssytrd_2stage.f b/lapack-netlib/SRC/ssytrd_2stage.f index 7ddc0224e..d2502f483 100644 --- a/lapack-netlib/SRC/ssytrd_2stage.f +++ b/lapack-netlib/SRC/ssytrd_2stage.f @@ -123,23 +123,22 @@ *> *> \param[out] HOUS2 *> \verbatim -*> HOUS2 is REAL array, dimension LHOUS2, that -*> store the Householder representation of the stage2 +*> HOUS2 is REAL array, dimension (LHOUS2) +*> Stores the Householder representation of the stage2 *> band to tridiagonal. *> \endverbatim *> *> \param[in] LHOUS2 *> \verbatim *> LHOUS2 is INTEGER -*> The dimension of the array HOUS2. LHOUS2 = MAX(1, dimension) -*> If LWORK = -1, or LHOUS2=-1, +*> The dimension of the array HOUS2. +*> If LWORK = -1, or LHOUS2 = -1, *> then a query is assumed; the routine *> only calculates the optimal size of the HOUS2 array, returns *> this value as the first entry of the HOUS2 array, and no error *> message related to LHOUS2 is issued by XERBLA. -*> LHOUS2 = MAX(1, dimension) where -*> dimension = 4*N if VECT='N' -*> not available now if VECT='H' +*> If VECT='N', LHOUS2 = max(1, 4*n); +*> if VECT='V', option not yet available. *> \endverbatim *> *> \param[out] WORK diff --git a/lapack-netlib/SRC/ssytrd_sb2st.F b/lapack-netlib/SRC/ssytrd_sb2st.F index 0df1173e4..1d8c9f5c5 100644 --- a/lapack-netlib/SRC/ssytrd_sb2st.F +++ b/lapack-netlib/SRC/ssytrd_sb2st.F @@ -50,9 +50,9 @@ * Arguments: * ========== * -*> \param[in] STAGE +*> \param[in] STAGE1 *> \verbatim -*> STAGE is CHARACTER*1 +*> STAGE1 is CHARACTER*1 *> = 'N': "No": to mention that the stage 1 of the reduction *> from dense to band using the ssytrd_sy2sb routine *> was not called before this routine to reproduce AB. diff --git a/lapack-netlib/SRC/ssytrd_sy2sb.f b/lapack-netlib/SRC/ssytrd_sy2sb.f index 272876700..98169dc00 100644 --- a/lapack-netlib/SRC/ssytrd_sy2sb.f +++ b/lapack-netlib/SRC/ssytrd_sy2sb.f @@ -363,7 +363,7 @@ * * * Set the workspace of the triangular matrix T to zero once such a -* way everytime T is generated the upper/lower portion will be always zero +* way every time T is generated the upper/lower portion will be always zero * CALL SLASET( "A", LDT, KD, ZERO, ZERO, WORK( TPOS ), LDT ) * diff --git a/lapack-netlib/SRC/ssytrf.f b/lapack-netlib/SRC/ssytrf.f index 2c29475df..ae4550f28 100644 --- a/lapack-netlib/SRC/ssytrf.f +++ b/lapack-netlib/SRC/ssytrf.f @@ -39,7 +39,7 @@ *> the Bunch-Kaufman diagonal pivoting method. The form of the *> factorization is *> -*> A = U*D*U**T or A = L*D*L**T +*> A = U**T*D*U or A = L*D*L**T *> *> where U (or L) is a product of permutation and unit upper (lower) *> triangular matrices, and D is symmetric and block diagonal with @@ -144,7 +144,7 @@ *> *> \verbatim *> -*> If UPLO = 'U', then A = U*D*U**T, where +*> If UPLO = 'U', then A = U**T*D*U, where *> U = P(n)*U(n)* ... *P(k)U(k)* ..., *> i.e., U is a product of terms P(k)*U(k), where k decreases from n to *> 1 in steps of 1 or 2, and D is a block diagonal matrix with 1-by-1 @@ -262,7 +262,7 @@ * IF( UPPER ) THEN * -* Factorize A as U*D*U**T using the upper triangle of A +* Factorize A as U**T*D*U using the upper triangle of A * * K is the main loop index, decreasing from N to 1 in steps of * KB, where KB is the number of columns factorized by SLASYF; diff --git a/lapack-netlib/SRC/ssytrf_aa.f b/lapack-netlib/SRC/ssytrf_aa.f index 4aaa978ad..7f428561c 100644 --- a/lapack-netlib/SRC/ssytrf_aa.f +++ b/lapack-netlib/SRC/ssytrf_aa.f @@ -37,7 +37,7 @@ *> SSYTRF_AA computes the factorization of a real symmetric matrix A *> using the Aasen's algorithm. The form of the factorization is *> -*> A = U*T*U**T or A = L*T*L**T +*> A = U**T*T*U or A = L*T*L**T *> *> where U (or L) is a product of permutation and unit upper (lower) *> triangular matrices, and T is a symmetric tridiagonal matrix. @@ -223,7 +223,7 @@ IF( UPPER ) THEN * * ..................................................... -* Factorize A as L*D*L**T using the upper triangle of A +* Factorize A as U**T*D*U using the upper triangle of A * ..................................................... * * Copy first row A(1, 1:N) into H(1:n) (stored in WORK(1:N)) @@ -256,7 +256,7 @@ $ A( MAX(1, J), J+1 ), LDA, $ IPIV( J+1 ), WORK, N, WORK( N*NB+1 ) ) * -* Ajust IPIV and apply it back (J-th step picks (J+1)-th pivot) +* Adjust IPIV and apply it back (J-th step picks (J+1)-th pivot) * DO J2 = J+2, MIN(N, J+JB+1) IPIV( J2 ) = IPIV( J2 ) + J @@ -375,7 +375,7 @@ $ A( J+1, MAX(1, J) ), LDA, $ IPIV( J+1 ), WORK, N, WORK( N*NB+1 ) ) * -* Ajust IPIV and apply it back (J-th step picks (J+1)-th pivot) +* Adjust IPIV and apply it back (J-th step picks (J+1)-th pivot) * DO J2 = J+2, MIN(N, J+JB+1) IPIV( J2 ) = IPIV( J2 ) + J diff --git a/lapack-netlib/SRC/ssytrf_aa_2stage.f b/lapack-netlib/SRC/ssytrf_aa_2stage.f index 0e0f6edb7..03690815b 100644 --- a/lapack-netlib/SRC/ssytrf_aa_2stage.f +++ b/lapack-netlib/SRC/ssytrf_aa_2stage.f @@ -38,7 +38,7 @@ *> SSYTRF_AA_2STAGE computes the factorization of a real symmetric matrix A *> using the Aasen's algorithm. The form of the factorization is *> -*> A = U*T*U**T or A = L*T*L**T +*> A = U**T*T*U or A = L*T*L**T *> *> where U (or L) is a product of permutation and unit upper (lower) *> triangular matrices, and T is a symmetric band matrix with the @@ -275,7 +275,7 @@ IF( UPPER ) THEN * * ..................................................... -* Factorize A as L*D*L**T using the upper triangle of A +* Factorize A as U**T*D*U using the upper triangle of A * ..................................................... * DO J = 0, NT-1 @@ -442,12 +442,14 @@ c END IF * > Apply pivots to previous columns of L CALL SSWAP( K-1, A( (J+1)*NB+1, I1 ), 1, $ A( (J+1)*NB+1, I2 ), 1 ) -* > Swap A(I1+1:M, I1) with A(I2, I1+1:M) - CALL SSWAP( I2-I1-1, A( I1, I1+1 ), LDA, - $ A( I1+1, I2 ), 1 ) +* > Swap A(I1+1:M, I1) with A(I2, I1+1:M) + IF( I2.GT.(I1+1) ) + $ CALL SSWAP( I2-I1-1, A( I1, I1+1 ), LDA, + $ A( I1+1, I2 ), 1 ) * > Swap A(I2+1:M, I1) with A(I2+1:M, I2) - CALL SSWAP( N-I2, A( I1, I2+1 ), LDA, - $ A( I2, I2+1 ), LDA ) + IF( I2.LT.N ) + $ CALL SSWAP( N-I2, A( I1, I2+1 ), LDA, + $ A( I2, I2+1 ), LDA ) * > Swap A(I1, I1) with A(I2, I2) PIV = A( I1, I1 ) A( I1, I1 ) = A( I2, I2 ) @@ -616,11 +618,13 @@ c END IF CALL SSWAP( K-1, A( I1, (J+1)*NB+1 ), LDA, $ A( I2, (J+1)*NB+1 ), LDA ) * > Swap A(I1+1:M, I1) with A(I2, I1+1:M) - CALL SSWAP( I2-I1-1, A( I1+1, I1 ), 1, - $ A( I2, I1+1 ), LDA ) + IF( I2.GT.(I1+1) ) + $ CALL SSWAP( I2-I1-1, A( I1+1, I1 ), 1, + $ A( I2, I1+1 ), LDA ) * > Swap A(I2+1:M, I1) with A(I2+1:M, I2) - CALL SSWAP( N-I2, A( I2+1, I1 ), 1, - $ A( I2+1, I2 ), 1 ) + IF( I2.LT.N ) + $ CALL SSWAP( N-I2, A( I2+1, I1 ), 1, + $ A( I2+1, I2 ), 1 ) * > Swap A(I1, I1) with A(I2, I2) PIV = A( I1, I1 ) A( I1, I1 ) = A( I2, I2 ) diff --git a/lapack-netlib/SRC/ssytri2.f b/lapack-netlib/SRC/ssytri2.f index 4b9ea4e7b..897116c23 100644 --- a/lapack-netlib/SRC/ssytri2.f +++ b/lapack-netlib/SRC/ssytri2.f @@ -62,7 +62,7 @@ *> \param[in,out] A *> \verbatim *> A is REAL array, dimension (LDA,N) -*> On entry, the NB diagonal matrix D and the multipliers +*> On entry, the block diagonal matrix D and the multipliers *> used to obtain the factor U or L as computed by SSYTRF. *> *> On exit, if INFO = 0, the (symmetric) inverse of the original @@ -82,7 +82,7 @@ *> \param[in] IPIV *> \verbatim *> IPIV is INTEGER array, dimension (N) -*> Details of the interchanges and the NB structure of D +*> Details of the interchanges and the block structure of D *> as determined by SSYTRF. *> \endverbatim *> diff --git a/lapack-netlib/SRC/ssytrs_aa.f b/lapack-netlib/SRC/ssytrs_aa.f index b05c9f7e6..ed4377ae7 100644 --- a/lapack-netlib/SRC/ssytrs_aa.f +++ b/lapack-netlib/SRC/ssytrs_aa.f @@ -37,7 +37,7 @@ *> \verbatim *> *> SSYTRS_AA solves a system of linear equations A*X = B with a real -*> symmetric matrix A using the factorization A = U*T*U**T or +*> symmetric matrix A using the factorization A = U**T*T*U or *> A = L*T*L**T computed by SSYTRF_AA. *> \endverbatim * @@ -49,7 +49,7 @@ *> UPLO is CHARACTER*1 *> Specifies whether the details of the factorization are stored *> as an upper or lower triangular matrix. -*> = 'U': Upper triangular, form is A = U*T*U**T; +*> = 'U': Upper triangular, form is A = U**T*T*U; *> = 'L': Lower triangular, form is A = L*T*L**T. *> \endverbatim *> @@ -97,14 +97,16 @@ *> The leading dimension of the array B. LDB >= max(1,N). *> \endverbatim *> -*> \param[in] WORK +*> \param[out] WORK *> \verbatim -*> WORK is DOUBLE array, dimension (MAX(1,LWORK)) +*> WORK is REAL array, dimension (MAX(1,LWORK)) *> \endverbatim *> *> \param[in] LWORK *> \verbatim -*> LWORK is INTEGER, LWORK >= MAX(1,3*N-2). +*> LWORK is INTEGER +*> The dimension of the array WORK. LWORK >= max(1,3*N-2). +*> \endverbatim *> *> \param[out] INFO *> \verbatim @@ -198,24 +200,31 @@ * IF( UPPER ) THEN * -* Solve A*X = B, where A = U*T*U**T. +* Solve A*X = B, where A = U**T*T*U. +* +* 1) Forward substitution with U**T +* + IF( N.GT.1 ) THEN +* +* Pivot, P**T * B -> B * -* Pivot, P**T * B + K = 1 + DO WHILE ( K.LE.N ) + KP = IPIV( K ) + IF( KP.NE.K ) + $ CALL SSWAP( NRHS, B( K, 1 ), LDB, B( KP, 1 ), LDB ) + K = K + 1 + END DO * - K = 1 - DO WHILE ( K.LE.N ) - KP = IPIV( K ) - IF( KP.NE.K ) - $ CALL SSWAP( NRHS, B( K, 1 ), LDB, B( KP, 1 ), LDB ) - K = K + 1 - END DO +* Compute U**T \ B -> B [ (U**T \P**T * B) ] * -* Compute (U \P**T * B) -> B [ (U \P**T * B) ] + CALL STRSM( 'L', 'U', 'T', 'U', N-1, NRHS, ONE, A( 1, 2 ), + $ LDA, B( 2, 1 ), LDB) + END IF * - CALL STRSM('L', 'U', 'T', 'U', N-1, NRHS, ONE, A( 1, 2 ), LDA, - $ B( 2, 1 ), LDB) +* 2) Solve with triangular matrix T * -* Compute T \ B -> B [ T \ (U \P**T * B) ] +* Compute T \ B -> B [ T \ (U**T \P**T * B) ] * CALL SLACPY( 'F', 1, N, A(1, 1), LDA+1, WORK(N), 1) IF( N.GT.1 ) THEN @@ -224,41 +233,53 @@ END IF CALL SGTSV(N, NRHS, WORK(1), WORK(N), WORK(2*N), B, LDB, $ INFO) +* +* 3) Backward substitution with U +* + IF( N.GT.1 ) THEN * * -* Compute (U**T \ B) -> B [ U**T \ (T \ (U \P**T * B) ) ] +* Compute U \ B -> B [ U \ (T \ (U**T \P**T * B) ) ] * - CALL STRSM( 'L', 'U', 'N', 'U', N-1, NRHS, ONE, A( 1, 2 ), LDA, - $ B(2, 1), LDB) + CALL STRSM( 'L', 'U', 'N', 'U', N-1, NRHS, ONE, A( 1, 2 ), + $ LDA, B(2, 1), LDB) * -* Pivot, P * B [ P * (U**T \ (T \ (U \P**T * B) )) ] +* Pivot, P * B -> B [ P * (U \ (T \ (U**T \P**T * B) )) ] * - K = N - DO WHILE ( K.GE.1 ) - KP = IPIV( K ) - IF( KP.NE.K ) - $ CALL SSWAP( NRHS, B( K, 1 ), LDB, B( KP, 1 ), LDB ) - K = K - 1 - END DO + K = N + DO WHILE ( K.GE.1 ) + KP = IPIV( K ) + IF( KP.NE.K ) + $ CALL SSWAP( NRHS, B( K, 1 ), LDB, B( KP, 1 ), LDB ) + K = K - 1 + END DO + END IF * ELSE * * Solve A*X = B, where A = L*T*L**T. * -* Pivot, P**T * B +* 1) Forward substitution with L * - K = 1 - DO WHILE ( K.LE.N ) - KP = IPIV( K ) - IF( KP.NE.K ) - $ CALL SSWAP( NRHS, B( K, 1 ), LDB, B( KP, 1 ), LDB ) - K = K + 1 - END DO + IF( N.GT.1 ) THEN +* +* Pivot, P**T * B -> B +* + K = 1 + DO WHILE ( K.LE.N ) + KP = IPIV( K ) + IF( KP.NE.K ) + $ CALL SSWAP( NRHS, B( K, 1 ), LDB, B( KP, 1 ), LDB ) + K = K + 1 + END DO * -* Compute (L \P**T * B) -> B [ (L \P**T * B) ] +* Compute L \ B -> B [ (L \P**T * B) ] +* + CALL STRSM( 'L', 'L', 'N', 'U', N-1, NRHS, ONE, A( 2, 1), + $ LDA, B(2, 1), LDB) + END IF * - CALL STRSM( 'L', 'L', 'N', 'U', N-1, NRHS, ONE, A( 2, 1), LDA, - $ B(2, 1), LDB) +* 2) Solve with triangular matrix T * * Compute T \ B -> B [ T \ (L \P**T * B) ] * @@ -270,20 +291,25 @@ CALL SGTSV(N, NRHS, WORK(1), WORK(N), WORK(2*N), B, LDB, $ INFO) * -* Compute (L**T \ B) -> B [ L**T \ (T \ (L \P**T * B) ) ] +* 3) Backward substitution with L**T * - CALL STRSM( 'L', 'L', 'T', 'U', N-1, NRHS, ONE, A( 2, 1 ), LDA, - $ B( 2, 1 ), LDB) + IF( N.GT.1 ) THEN +* +* Compute L**T \ B -> B [ L**T \ (T \ (L \P**T * B) ) ] * -* Pivot, P * B [ P * (L**T \ (T \ (L \P**T * B) )) ] + CALL STRSM( 'L', 'L', 'T', 'U', N-1, NRHS, ONE, A( 2, 1 ), + $ LDA, B( 2, 1 ), LDB) * - K = N - DO WHILE ( K.GE.1 ) - KP = IPIV( K ) - IF( KP.NE.K ) - $ CALL SSWAP( NRHS, B( K, 1 ), LDB, B( KP, 1 ), LDB ) - K = K - 1 - END DO +* Pivot, P * B -> B [ P * (L**T \ (T \ (L \P**T * B) )) ] +* + K = N + DO WHILE ( K.GE.1 ) + KP = IPIV( K ) + IF( KP.NE.K ) + $ CALL SSWAP( NRHS, B( K, 1 ), LDB, B( KP, 1 ), LDB ) + K = K - 1 + END DO + END IF * END IF * diff --git a/lapack-netlib/SRC/ssytrs_aa_2stage.f b/lapack-netlib/SRC/ssytrs_aa_2stage.f index d271b9481..cf2da529d 100644 --- a/lapack-netlib/SRC/ssytrs_aa_2stage.f +++ b/lapack-netlib/SRC/ssytrs_aa_2stage.f @@ -36,7 +36,7 @@ *> \verbatim *> *> SSYTRS_AA_2STAGE solves a system of linear equations A*X = B with a real -*> symmetric matrix A using the factorization A = U*T*U**T or +*> symmetric matrix A using the factorization A = U**T*T*U or *> A = L*T*L**T computed by SSYTRF_AA_2STAGE. *> \endverbatim * @@ -48,7 +48,7 @@ *> UPLO is CHARACTER*1 *> Specifies whether the details of the factorization are stored *> as an upper or lower triangular matrix. -*> = 'U': Upper triangular, form is A = U*T*U**T; +*> = 'U': Upper triangular, form is A = U**T*T*U; *> = 'L': Lower triangular, form is A = L*T*L**T. *> \endverbatim *> @@ -208,15 +208,15 @@ * IF( UPPER ) THEN * -* Solve A*X = B, where A = U*T*U**T. +* Solve A*X = B, where A = U**T*T*U. * IF( N.GT.NB ) THEN * -* Pivot, P**T * B +* Pivot, P**T * B -> B * CALL SLASWP( NRHS, B, LDB, NB+1, N, IPIV, 1 ) * -* Compute (U**T \P**T * B) -> B [ (U**T \P**T * B) ] +* Compute (U**T \ B) -> B [ (U**T \P**T * B) ] * CALL STRSM( 'L', 'U', 'T', 'U', N-NB, NRHS, ONE, A(1, NB+1), $ LDA, B(NB+1, 1), LDB) @@ -234,7 +234,7 @@ CALL STRSM( 'L', 'U', 'N', 'U', N-NB, NRHS, ONE, A(1, NB+1), $ LDA, B(NB+1, 1), LDB) * -* Pivot, P * B [ P * (U \ (T \ (U**T \P**T * B) )) ] +* Pivot, P * B -> B [ P * (U \ (T \ (U**T \P**T * B) )) ] * CALL SLASWP( NRHS, B, LDB, NB+1, N, IPIV, -1 ) * @@ -246,11 +246,11 @@ * IF( N.GT.NB ) THEN * -* Pivot, P**T * B +* Pivot, P**T * B -> B * CALL SLASWP( NRHS, B, LDB, NB+1, N, IPIV, 1 ) * -* Compute (L \P**T * B) -> B [ (L \P**T * B) ] +* Compute (L \ B) -> B [ (L \P**T * B) ] * CALL STRSM( 'L', 'L', 'N', 'U', N-NB, NRHS, ONE, A(NB+1, 1), $ LDA, B(NB+1, 1), LDB) @@ -268,7 +268,7 @@ CALL STRSM( 'L', 'L', 'T', 'U', N-NB, NRHS, ONE, A(NB+1, 1), $ LDA, B(NB+1, 1), LDB) * -* Pivot, P * B [ P * (L**T \ (T \ (L \P**T * B) )) ] +* Pivot, P * B -> B [ P * (L**T \ (T \ (L \P**T * B) )) ] * CALL SLASWP( NRHS, B, LDB, NB+1, N, IPIV, -1 ) * diff --git a/lapack-netlib/SRC/stgsy2.f b/lapack-netlib/SRC/stgsy2.f index ca9946a7e..2814889fc 100644 --- a/lapack-netlib/SRC/stgsy2.f +++ b/lapack-netlib/SRC/stgsy2.f @@ -71,7 +71,7 @@ *> R * B**T + L * E**T = scale * -F *> *> This case is used to compute an estimate of Dif[(A, D), (B, E)] = -*> sigma_min(Z) using reverse communicaton with SLACON. +*> sigma_min(Z) using reverse communication with SLACON. *> *> STGSY2 also (IJOB >= 1) contributes to the computation in STGSYL *> of an upper bound on the separation between to matrix pairs. Then @@ -85,7 +85,7 @@ *> \param[in] TRANS *> \verbatim *> TRANS is CHARACTER*1 -*> = 'N', solve the generalized Sylvester equation (1). +*> = 'N': solve the generalized Sylvester equation (1). *> = 'T': solve the 'transposed' system (3). *> \endverbatim *> diff --git a/lapack-netlib/SRC/stgsyl.f b/lapack-netlib/SRC/stgsyl.f index cd597f37d..ff634b1de 100644 --- a/lapack-netlib/SRC/stgsyl.f +++ b/lapack-netlib/SRC/stgsyl.f @@ -88,20 +88,20 @@ *> \param[in] TRANS *> \verbatim *> TRANS is CHARACTER*1 -*> = 'N', solve the generalized Sylvester equation (1). -*> = 'T', solve the 'transposed' system (3). +*> = 'N': solve the generalized Sylvester equation (1). +*> = 'T': solve the 'transposed' system (3). *> \endverbatim *> *> \param[in] IJOB *> \verbatim *> IJOB is INTEGER *> Specifies what kind of functionality to be performed. -*> =0: solve (1) only. -*> =1: The functionality of 0 and 3. -*> =2: The functionality of 0 and 4. -*> =3: Only an estimate of Dif[(A,D), (B,E)] is computed. +*> = 0: solve (1) only. +*> = 1: The functionality of 0 and 3. +*> = 2: The functionality of 0 and 4. +*> = 3: Only an estimate of Dif[(A,D), (B,E)] is computed. *> (look ahead strategy IJOB = 1 is used). -*> =4: Only an estimate of Dif[(A,D), (B,E)] is computed. +*> = 4: Only an estimate of Dif[(A,D), (B,E)] is computed. *> ( SGECON on sub-systems is used ). *> Not referenced if TRANS = 'T'. *> \endverbatim diff --git a/lapack-netlib/SRC/stpmlqt.f b/lapack-netlib/SRC/stpmlqt.f index 565dadd0c..8fc7823c2 100644 --- a/lapack-netlib/SRC/stpmlqt.f +++ b/lapack-netlib/SRC/stpmlqt.f @@ -94,7 +94,7 @@ *> *> \param[in] V *> \verbatim -*> V is REAL array, dimension (LDA,K) +*> V is REAL array, dimension (LDV,K) *> The i-th row must contain the vector which defines the *> elementary reflector H(i), for i = 1,2,...,k, as returned by *> DTPLQT in B. See Further Details. diff --git a/lapack-netlib/SRC/stpmqrt.f b/lapack-netlib/SRC/stpmqrt.f index b1813b7dd..6a5cbb981 100644 --- a/lapack-netlib/SRC/stpmqrt.f +++ b/lapack-netlib/SRC/stpmqrt.f @@ -94,7 +94,7 @@ *> *> \param[in] V *> \verbatim -*> V is REAL array, dimension (LDA,K) +*> V is REAL array, dimension (LDV,K) *> The i-th column must contain the vector which defines the *> elementary reflector H(i), for i = 1,2,...,k, as returned by *> CTPQRT in B. See Further Details. diff --git a/lapack-netlib/SRC/stprfb.f b/lapack-netlib/SRC/stprfb.f index 66e67252f..fcd164183 100644 --- a/lapack-netlib/SRC/stprfb.f +++ b/lapack-netlib/SRC/stprfb.f @@ -152,8 +152,8 @@ *> \verbatim *> LDA is INTEGER *> The leading dimension of the array A. -*> If SIDE = 'L', LDC >= max(1,K); -*> If SIDE = 'R', LDC >= max(1,M). +*> If SIDE = 'L', LDA >= max(1,K); +*> If SIDE = 'R', LDA >= max(1,M). *> \endverbatim *> *> \param[in,out] B diff --git a/lapack-netlib/SRC/zcgesv.f b/lapack-netlib/SRC/zcgesv.f index bb12d4f3a..b71018638 100644 --- a/lapack-netlib/SRC/zcgesv.f +++ b/lapack-netlib/SRC/zcgesv.f @@ -93,9 +93,9 @@ *> dimension (LDA,N) *> On entry, the N-by-N coefficient matrix A. *> On exit, if iterative refinement has been successfully used -*> (INFO.EQ.0 and ITER.GE.0, see description below), then A is +*> (INFO = 0 and ITER >= 0, see description below), then A is *> unchanged, if double precision factorization has been used -*> (INFO.EQ.0 and ITER.LT.0, see description below), then the +*> (INFO = 0 and ITER < 0, see description below), then the *> array A contains the factors L and U from the factorization *> A = P*L*U; the unit diagonal elements of L are not stored. *> \endverbatim @@ -112,8 +112,8 @@ *> The pivot indices that define the permutation matrix P; *> row i of the matrix was interchanged with row IPIV(i). *> Corresponds either to the single precision factorization -*> (if INFO.EQ.0 and ITER.GE.0) or the double precision -*> factorization (if INFO.EQ.0 and ITER.LT.0). +*> (if INFO = 0 and ITER >= 0) or the double precision +*> factorization (if INFO = 0 and ITER < 0). *> \endverbatim *> *> \param[in] B @@ -421,7 +421,7 @@ 30 CONTINUE * * If we are at this place of the code, this is because we have -* performed ITER=ITERMAX iterations and never satisified the stopping +* performed ITER=ITERMAX iterations and never satisfied the stopping * criterion, set up the ITER flag accordingly and follow up on double * precision routine. * diff --git a/lapack-netlib/SRC/zcposv.f b/lapack-netlib/SRC/zcposv.f index eafcce623..101d25f5d 100644 --- a/lapack-netlib/SRC/zcposv.f +++ b/lapack-netlib/SRC/zcposv.f @@ -111,9 +111,9 @@ *> elements need not be set and are assumed to be zero. *> *> On exit, if iterative refinement has been successfully used -*> (INFO.EQ.0 and ITER.GE.0, see description below), then A is +*> (INFO = 0 and ITER >= 0, see description below), then A is *> unchanged, if double precision factorization has been used -*> (INFO.EQ.0 and ITER.LT.0, see description below), then the +*> (INFO = 0 and ITER < 0, see description below), then the *> array A contains the factor U or L from the Cholesky *> factorization A = U**H*U or A = L*L**H. *> \endverbatim @@ -431,7 +431,7 @@ 30 CONTINUE * * If we are at this place of the code, this is because we have -* performed ITER=ITERMAX iterations and never satisified the +* performed ITER=ITERMAX iterations and never satisfied the * stopping criterion, set up the ITER flag accordingly and follow * up on double precision routine. * diff --git a/lapack-netlib/SRC/zgbrfsx.f b/lapack-netlib/SRC/zgbrfsx.f index e40d7d23e..872709899 100644 --- a/lapack-netlib/SRC/zgbrfsx.f +++ b/lapack-netlib/SRC/zgbrfsx.f @@ -75,7 +75,7 @@ *> Specifies the form of the system of equations: *> = 'N': A * X = B (No transpose) *> = 'T': A**T * X = B (Transpose) -*> = 'C': A**H * X = B (Conjugate transpose = Transpose) +*> = 'C': A**H * X = B (Conjugate transpose) *> \endverbatim *> *> \param[in] EQUED @@ -308,7 +308,7 @@ *> information as described below. There currently are up to three *> pieces of information returned for each right-hand side. If *> componentwise accuracy is not requested (PARAMS(3) = 0.0), then -*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS .LT. 3, then at most +*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS < 3, then at most *> the first (:,N_ERR_BNDS) entries are returned. *> *> The first index in ERR_BNDS_COMP(i,:) corresponds to the ith @@ -344,14 +344,14 @@ *> \param[in] NPARAMS *> \verbatim *> NPARAMS is INTEGER -*> Specifies the number of parameters set in PARAMS. If .LE. 0, the +*> Specifies the number of parameters set in PARAMS. If <= 0, the *> PARAMS array is never referenced and default values are used. *> \endverbatim *> *> \param[in,out] PARAMS *> \verbatim *> PARAMS is DOUBLE PRECISION array, dimension NPARAMS -*> Specifies algorithm parameters. If an entry is .LT. 0.0, then +*> Specifies algorithm parameters. If an entry is < 0.0, then *> that entry will be filled with default value used for that *> parameter. Only positions up to NPARAMS are accessed; defaults *> are used for higher-numbered parameters. @@ -359,9 +359,9 @@ *> PARAMS(LA_LINRX_ITREF_I = 1) : Whether to perform iterative *> refinement or not. *> Default: 1.0D+0 -*> = 0.0 : No refinement is performed, and no error bounds are +*> = 0.0: No refinement is performed, and no error bounds are *> computed. -*> = 1.0 : Use the double-precision refinement algorithm, +*> = 1.0: Use the double-precision refinement algorithm, *> possibly with doubled-single computations if the *> compilation environment does not support DOUBLE *> PRECISION. diff --git a/lapack-netlib/SRC/zgbsvxx.f b/lapack-netlib/SRC/zgbsvxx.f index 9ba9c2ee3..0d916fd62 100644 --- a/lapack-netlib/SRC/zgbsvxx.f +++ b/lapack-netlib/SRC/zgbsvxx.f @@ -431,7 +431,7 @@ *> information as described below. There currently are up to three *> pieces of information returned for each right-hand side. If *> componentwise accuracy is not requested (PARAMS(3) = 0.0), then -*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS .LT. 3, then at most +*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS < 3, then at most *> the first (:,N_ERR_BNDS) entries are returned. *> *> The first index in ERR_BNDS_COMP(i,:) corresponds to the ith @@ -467,14 +467,14 @@ *> \param[in] NPARAMS *> \verbatim *> NPARAMS is INTEGER -*> Specifies the number of parameters set in PARAMS. If .LE. 0, the +*> Specifies the number of parameters set in PARAMS. If <= 0, the *> PARAMS array is never referenced and default values are used. *> \endverbatim *> *> \param[in,out] PARAMS *> \verbatim *> PARAMS is DOUBLE PRECISION array, dimension NPARAMS -*> Specifies algorithm parameters. If an entry is .LT. 0.0, then +*> Specifies algorithm parameters. If an entry is < 0.0, then *> that entry will be filled with default value used for that *> parameter. Only positions up to NPARAMS are accessed; defaults *> are used for higher-numbered parameters. @@ -482,9 +482,9 @@ *> PARAMS(LA_LINRX_ITREF_I = 1) : Whether to perform iterative *> refinement or not. *> Default: 1.0D+0 -*> = 0.0 : No refinement is performed, and no error bounds are +*> = 0.0: No refinement is performed, and no error bounds are *> computed. -*> = 1.0 : Use the extra-precise refinement algorithm. +*> = 1.0: Use the extra-precise refinement algorithm. *> (other values are reserved for future use) *> *> PARAMS(LA_LINRX_ITHRESH_I = 2) : Maximum number of residual diff --git a/lapack-netlib/SRC/zgebak.f b/lapack-netlib/SRC/zgebak.f index a9761fde2..70c265e05 100644 --- a/lapack-netlib/SRC/zgebak.f +++ b/lapack-netlib/SRC/zgebak.f @@ -48,10 +48,10 @@ *> \verbatim *> JOB is CHARACTER*1 *> Specifies the type of backward transformation required: -*> = 'N', do nothing, return immediately; -*> = 'P', do backward transformation for permutation only; -*> = 'S', do backward transformation for scaling only; -*> = 'B', do backward transformations for both permutation and +*> = 'N': do nothing, return immediately; +*> = 'P': do backward transformation for permutation only; +*> = 'S': do backward transformation for scaling only; +*> = 'B': do backward transformations for both permutation and *> scaling. *> JOB must be the same as the argument JOB supplied to ZGEBAL. *> \endverbatim diff --git a/lapack-netlib/SRC/zgeev.f b/lapack-netlib/SRC/zgeev.f index 22b04469f..1ba542587 100644 --- a/lapack-netlib/SRC/zgeev.f +++ b/lapack-netlib/SRC/zgeev.f @@ -157,7 +157,7 @@ *> < 0: if INFO = -i, the i-th argument had an illegal value. *> > 0: if INFO = i, the QR algorithm failed to compute all the *> eigenvalues, and no eigenvectors have been computed; -*> elements and i+1:N of W contain eigenvalues which have +*> elements i+1:N of W contain eigenvalues which have *> converged. *> \endverbatim * diff --git a/lapack-netlib/SRC/zgejsv.f b/lapack-netlib/SRC/zgejsv.f index d553da90b..91a20416e 100644 --- a/lapack-netlib/SRC/zgejsv.f +++ b/lapack-netlib/SRC/zgejsv.f @@ -80,13 +80,13 @@ *> desirable, then this option is advisable. The input matrix A *> is preprocessed with QR factorization with FULL (row and *> column) pivoting. -*> = 'G' Computation as with 'F' with an additional estimate of the +*> = 'G': Computation as with 'F' with an additional estimate of the *> condition number of B, where A=B*D. If A has heavily weighted *> rows, then using this condition number gives too pessimistic *> error bound. *> = 'A': Small singular values are not well determined by the data *> and are considered as noisy; the matrix is treated as -*> numerically rank defficient. The error in the computed +*> numerically rank deficient. The error in the computed *> singular values is bounded by f(m,n)*epsilon*||A||. *> The computed SVD A = U * S * V^* restores A up to *> f(m,n)*epsilon*||A||. @@ -117,7 +117,7 @@ *> = 'V': N columns of V are returned in the array V; Jacobi rotations *> are not explicitly accumulated. *> = 'J': N columns of V are returned in the array V, but they are -*> computed as the product of Jacobi rotations, if JOBT .EQ. 'N'. +*> computed as the product of Jacobi rotations, if JOBT = 'N'. *> = 'W': V may be used as workspace of length N*N. See the description *> of V. *> = 'N': V is not computed. @@ -131,7 +131,7 @@ *> specified range. If A .NE. 0 is scaled so that the largest singular *> value of c*A is around SQRT(BIG), BIG=DLAMCH('O'), then JOBR issues *> the licence to kill columns of A whose norm in c*A is less than -*> SQRT(SFMIN) (for JOBR.EQ.'R'), or less than SMALL=SFMIN/EPSLN, +*> SQRT(SFMIN) (for JOBR = 'R'), or less than SMALL=SFMIN/EPSLN, *> where SFMIN=DLAMCH('S'), EPSLN=DLAMCH('E'). *> = 'N': Do not kill small columns of c*A. This option assumes that *> BLAS and QR factorizations and triangular solvers are @@ -229,7 +229,7 @@ *> If JOBU = 'F', then U contains on exit the M-by-M matrix of *> the left singular vectors, including an ONB *> of the orthogonal complement of the Range(A). -*> If JOBU = 'W' .AND. (JOBV.EQ.'V' .AND. JOBT.EQ.'T' .AND. M.EQ.N), +*> If JOBU = 'W' .AND. (JOBV = 'V' .AND. JOBT = 'T' .AND. M = N), *> then U is used as workspace if the procedure *> replaces A with A^*. In that case, [V] is computed *> in U as left singular vectors of A^* and then @@ -251,7 +251,7 @@ *> V is COMPLEX*16 array, dimension ( LDV, N ) *> If JOBV = 'V', 'J' then V contains on exit the N-by-N matrix of *> the right singular vectors; -*> If JOBV = 'W', AND (JOBU.EQ.'U' AND JOBT.EQ.'T' AND M.EQ.N), +*> If JOBV = 'W', AND (JOBU = 'U' AND JOBT = 'T' AND M = N), *> then V is used as workspace if the pprocedure *> replaces A with A^*. In that case, [U] is computed *> in V as right singular vectors of A^* and then @@ -282,7 +282,7 @@ *> Length of CWORK to confirm proper allocation of workspace. *> LWORK depends on the job: *> -*> 1. If only SIGMA is needed ( JOBU.EQ.'N', JOBV.EQ.'N' ) and +*> 1. If only SIGMA is needed ( JOBU = 'N', JOBV = 'N' ) and *> 1.1 .. no scaled condition estimate required (JOBA.NE.'E'.AND.JOBA.NE.'G'): *> LWORK >= 2*N+1. This is the minimal requirement. *> ->> For optimal performance (blocked code) the optimal value @@ -298,9 +298,9 @@ *> In general, the optimal length LWORK is computed as *> LWORK >= max(N+LWORK(ZGEQP3),N+LWORK(ZGEQRF), LWORK(ZGESVJ), *> N*N+LWORK(ZPOCON)). -*> 2. If SIGMA and the right singular vectors are needed (JOBV.EQ.'V'), -*> (JOBU.EQ.'N') -*> 2.1 .. no scaled condition estimate requested (JOBE.EQ.'N'): +*> 2. If SIGMA and the right singular vectors are needed (JOBV = 'V'), +*> (JOBU = 'N') +*> 2.1 .. no scaled condition estimate requested (JOBE = 'N'): *> -> the minimal requirement is LWORK >= 3*N. *> -> For optimal performance, *> LWORK >= max(N+(N+1)*NB, 2*N+N*NB)=2*N+N*NB, @@ -318,10 +318,10 @@ *> LWORK >= max(N+LWORK(ZGEQP3), LWORK(ZPOCON), N+LWORK(ZGESVJ), *> N+LWORK(ZGELQF), 2*N+LWORK(ZGEQRF), N+LWORK(ZUNMLQ)). *> 3. If SIGMA and the left singular vectors are needed -*> 3.1 .. no scaled condition estimate requested (JOBE.EQ.'N'): +*> 3.1 .. no scaled condition estimate requested (JOBE = 'N'): *> -> the minimal requirement is LWORK >= 3*N. *> -> For optimal performance: -*> if JOBU.EQ.'U' :: LWORK >= max(3*N, N+(N+1)*NB, 2*N+N*NB)=2*N+N*NB, +*> if JOBU = 'U' :: LWORK >= max(3*N, N+(N+1)*NB, 2*N+N*NB)=2*N+N*NB, *> where NB is the optimal block size for ZGEQP3, ZGEQRF, ZUNMQR. *> In general, the optimal length LWORK is computed as *> LWORK >= max(N+LWORK(ZGEQP3), 2*N+LWORK(ZGEQRF), N+LWORK(ZUNMQR)). @@ -329,15 +329,15 @@ *> required (JOBA='E', or 'G'). *> -> the minimal requirement is LWORK >= 3*N. *> -> For optimal performance: -*> if JOBU.EQ.'U' :: LWORK >= max(3*N, N+(N+1)*NB, 2*N+N*NB)=2*N+N*NB, +*> if JOBU = 'U' :: LWORK >= max(3*N, N+(N+1)*NB, 2*N+N*NB)=2*N+N*NB, *> where NB is the optimal block size for ZGEQP3, ZGEQRF, ZUNMQR. *> In general, the optimal length LWORK is computed as *> LWORK >= max(N+LWORK(ZGEQP3),N+LWORK(ZPOCON), *> 2*N+LWORK(ZGEQRF), N+LWORK(ZUNMQR)). -*> 4. If the full SVD is needed: (JOBU.EQ.'U' or JOBU.EQ.'F') and -*> 4.1. if JOBV.EQ.'V' +*> 4. If the full SVD is needed: (JOBU = 'U' or JOBU = 'F') and +*> 4.1. if JOBV = 'V' *> the minimal requirement is LWORK >= 5*N+2*N*N. -*> 4.2. if JOBV.EQ.'J' the minimal requirement is +*> 4.2. if JOBV = 'J' the minimal requirement is *> LWORK >= 4*N+N*N. *> In both cases, the allocated CWORK can accommodate blocked runs *> of ZGEQP3, ZGEQRF, ZGELQF, SUNMQR, ZUNMLQ. @@ -356,7 +356,7 @@ *> of A. (See the description of SVA().) *> RWORK(2) = See the description of RWORK(1). *> RWORK(3) = SCONDA is an estimate for the condition number of -*> column equilibrated A. (If JOBA .EQ. 'E' or 'G') +*> column equilibrated A. (If JOBA = 'E' or 'G') *> SCONDA is an estimate of SQRT(||(R^* * R)^(-1)||_1). *> It is computed using SPOCON. It holds *> N^(-1/4) * SCONDA <= ||R^(-1)||_2 <= N^(1/4) * SCONDA @@ -375,7 +375,7 @@ *> triangular factor in the first QR factorization. *> RWORK(5) = an estimate of the scaled condition number of the *> triangular factor in the second QR factorization. -*> The following two parameters are computed if JOBT .EQ. 'T'. +*> The following two parameters are computed if JOBT = 'T'. *> They are provided for a developer/implementer who is familiar *> with the details of the method. *> RWORK(6) = the entropy of A^* * A :: this is the Shannon entropy @@ -456,23 +456,23 @@ *> of JOBA and JOBR. *> IWORK(2) = the number of the computed nonzero singular values *> IWORK(3) = if nonzero, a warning message: -*> If IWORK(3).EQ.1 then some of the column norms of A +*> If IWORK(3) = 1 then some of the column norms of A *> were denormalized floats. The requested high accuracy *> is not warranted by the data. -*> IWORK(4) = 1 or -1. If IWORK(4) .EQ. 1, then the procedure used A^* to +*> IWORK(4) = 1 or -1. If IWORK(4) = 1, then the procedure used A^* to *> do the job as specified by the JOB parameters. -*> If the call to ZGEJSV is a workspace query (indicated by LWORK .EQ. -1 or -*> LRWORK .EQ. -1), then on exit IWORK(1) contains the required length of +*> If the call to ZGEJSV is a workspace query (indicated by LWORK = -1 or +*> LRWORK = -1), then on exit IWORK(1) contains the required length of *> IWORK for the job parameters used in the call. *> \endverbatim *> *> \param[out] INFO *> \verbatim *> INFO is INTEGER -*> < 0 : if INFO = -i, then the i-th argument had an illegal value. -*> = 0 : successful exit; -*> > 0 : ZGEJSV did not converge in the maximal allowed number -*> of sweeps. The computed values may be inaccurate. +*> < 0: if INFO = -i, then the i-th argument had an illegal value. +*> = 0: successful exit; +*> > 0: ZGEJSV did not converge in the maximal allowed number +*> of sweeps. The computed values may be inaccurate. *> \endverbatim * * Authors: @@ -1338,7 +1338,7 @@ IF ( L2ABER ) THEN * Standard absolute error bound suffices. All sigma_i with * sigma_i < N*EPSLN*||A|| are flushed to zero. This is an -* agressive enforcement of lower numerical rank by introducing a +* aggressive enforcement of lower numerical rank by introducing a * backward error of the order of N*EPSLN*||A||. TEMP1 = SQRT(DBLE(N))*EPSLN DO 3001 p = 2, N @@ -1350,7 +1350,7 @@ 3001 CONTINUE 3002 CONTINUE ELSE IF ( L2RANK ) THEN -* .. similarly as above, only slightly more gentle (less agressive). +* .. similarly as above, only slightly more gentle (less aggressive). * Sudden drop on the diagonal of R1 is used as the criterion for * close-to-rank-deficient. TEMP1 = SQRT(SFMIN) @@ -1720,7 +1720,7 @@ CALL ZPOCON('L',NR,CWORK(2*N+1),NR,ONE,TEMP1, $ CWORK(2*N+NR*NR+1),RWORK,IERR) CONDR1 = ONE / SQRT(TEMP1) -* .. here need a second oppinion on the condition number +* .. here need a second opinion on the condition number * .. then assume worst case scenario * R1 is OK for inverse <=> CONDR1 .LT. DBLE(N) * more conservative <=> CONDR1 .LT. SQRT(DBLE(N)) @@ -1765,7 +1765,7 @@ ELSE * * .. ill-conditioned case: second QRF with pivoting -* Note that windowed pivoting would be equaly good +* Note that windowed pivoting would be equally good * numerically, and more run-time efficient. So, in * an optimal implementation, the next call to ZGEQP3 * should be replaced with eg. CALL ZGEQPX (ACM TOMS #782) @@ -1823,7 +1823,7 @@ * IF ( CONDR2 .GE. COND_OK ) THEN * .. save the Householder vectors used for Q3 -* (this overwrittes the copy of R2, as it will not be +* (this overwrites the copy of R2, as it will not be * needed in this branch, but it does not overwritte the * Huseholder vectors of Q2.). CALL ZLACPY( 'U', NR, NR, V, LDV, CWORK(2*N+1), N ) @@ -2079,7 +2079,7 @@ * * This branch deploys a preconditioned Jacobi SVD with explicitly * accumulated rotations. It is included as optional, mainly for -* experimental purposes. It does perfom well, and can also be used. +* experimental purposes. It does perform well, and can also be used. * In this implementation, this branch will be automatically activated * if the condition number sigma_max(A) / sigma_min(A) is predicted * to be greater than the overflow threshold. This is because the diff --git a/lapack-netlib/SRC/zgelq.f b/lapack-netlib/SRC/zgelq.f index 656396536..beb054b87 100644 --- a/lapack-netlib/SRC/zgelq.f +++ b/lapack-netlib/SRC/zgelq.f @@ -1,3 +1,4 @@ +*> \brief \b ZGELQ * * Definition: * =========== @@ -17,7 +18,17 @@ * ============= *> *> \verbatim -*> ZGELQ computes a LQ factorization of an M-by-N matrix A. +*> +*> ZGELQ computes an LQ factorization of a complex M-by-N matrix A: +*> +*> A = ( L 0 ) * Q +*> +*> where: +*> +*> Q is a N-by-N orthogonal matrix; +*> L is a lower-triangular M-by-M matrix; +*> 0 is a M-by-(N-M) zero matrix, if M < N. +*> *> \endverbatim * * Arguments: @@ -138,7 +149,7 @@ *> \verbatim *> *> These details are particular for this LAPACK implementation. Users should not -*> take them for granted. These details may change in the future, and are unlikely not +*> take them for granted. These details may change in the future, and are not likely *> true for another LAPACK implementation. These details are relevant if one wants *> to try to understand the code. They are not part of the interface. *> @@ -159,10 +170,10 @@ SUBROUTINE ZGELQ( M, N, A, LDA, T, TSIZE, WORK, LWORK, $ INFO ) * -* -- LAPACK computational routine (version 3.7.0) -- +* -- LAPACK computational routine (version 3.9.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd. -- -* December 2016 +* November 2019 * * .. Scalar Arguments .. INTEGER INFO, LDA, M, N, TSIZE, LWORK @@ -176,7 +187,7 @@ * .. * .. Local Scalars .. LOGICAL LQUERY, LMINWS, MINT, MINW - INTEGER MB, NB, MINTSZ, NBLCKS + INTEGER MB, NB, MINTSZ, NBLCKS, LWMIN, LWOPT, LWREQ * .. * .. External Functions .. LOGICAL LSAME @@ -232,20 +243,32 @@ * * Determine if the workspace size satisfies minimal size * + IF( ( N.LE.M ) .OR. ( NB.LE.M ) .OR. ( NB.GE.N ) ) THEN + LWMIN = MAX( 1, N ) + LWOPT = MAX( 1, MB*N ) + ELSE + LWMIN = MAX( 1, M ) + LWOPT = MAX( 1, MB*M ) + END IF LMINWS = .FALSE. - IF( ( TSIZE.LT.MAX( 1, MB*M*NBLCKS + 5 ) .OR. LWORK.LT.MB*M ) - $ .AND. ( LWORK.GE.M ) .AND. ( TSIZE.GE.MINTSZ ) + IF( ( TSIZE.LT.MAX( 1, MB*M*NBLCKS + 5 ) .OR. LWORK.LT.LWOPT ) + $ .AND. ( LWORK.GE.LWMIN ) .AND. ( TSIZE.GE.MINTSZ ) $ .AND. ( .NOT.LQUERY ) ) THEN IF( TSIZE.LT.MAX( 1, MB*M*NBLCKS + 5 ) ) THEN LMINWS = .TRUE. MB = 1 NB = N END IF - IF( LWORK.LT.MB*M ) THEN + IF( LWORK.LT.LWOPT ) THEN LMINWS = .TRUE. MB = 1 END IF END IF + IF( ( N.LE.M ) .OR. ( NB.LE.M ) .OR. ( NB.GE.N ) ) THEN + LWREQ = MAX( 1, MB*N ) + ELSE + LWREQ = MAX( 1, MB*M ) + END IF * IF( M.LT.0 ) THEN INFO = -1 @@ -256,7 +279,7 @@ ELSE IF( TSIZE.LT.MAX( 1, MB*M*NBLCKS + 5 ) $ .AND. ( .NOT.LQUERY ) .AND. ( .NOT.LMINWS ) ) THEN INFO = -6 - ELSE IF( ( LWORK.LT.MAX( 1, M*MB ) ) .AND .( .NOT.LQUERY ) + ELSE IF( ( LWORK.LT.LWREQ ) .AND .( .NOT.LQUERY ) $ .AND. ( .NOT.LMINWS ) ) THEN INFO = -8 END IF @@ -270,9 +293,9 @@ T( 2 ) = MB T( 3 ) = NB IF( MINW ) THEN - WORK( 1 ) = MAX( 1, N ) + WORK( 1 ) = LWMIN ELSE - WORK( 1 ) = MAX( 1, MB*M ) + WORK( 1 ) = LWREQ END IF END IF IF( INFO.NE.0 ) THEN @@ -297,7 +320,7 @@ $ LWORK, INFO ) END IF * - WORK( 1 ) = MAX( 1, MB*M ) + WORK( 1 ) = LWREQ * RETURN * diff --git a/lapack-netlib/SRC/zgelq2.f b/lapack-netlib/SRC/zgelq2.f index 188c8f8c8..a825ac17b 100644 --- a/lapack-netlib/SRC/zgelq2.f +++ b/lapack-netlib/SRC/zgelq2.f @@ -33,8 +33,16 @@ *> *> \verbatim *> -*> ZGELQ2 computes an LQ factorization of a complex m by n matrix A: -*> A = L * Q. +*> ZGELQ2 computes an LQ factorization of a complex m-by-n matrix A: +*> +*> A = ( L 0 ) * Q +*> +*> where: +*> +*> Q is a n-by-n orthogonal matrix; +*> L is an lower-triangular m-by-m matrix; +*> 0 is a m-by-(n-m) zero matrix, if m < n. +*> *> \endverbatim * * Arguments: @@ -96,7 +104,7 @@ *> \author Univ. of Colorado Denver *> \author NAG Ltd. * -*> \date December 2016 +*> \date November 2019 * *> \ingroup complex16GEcomputational * @@ -121,10 +129,10 @@ * ===================================================================== SUBROUTINE ZGELQ2( M, N, A, LDA, TAU, WORK, INFO ) * -* -- LAPACK computational routine (version 3.7.0) -- +* -- LAPACK computational routine (version 3.9.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- -* December 2016 +* November 2019 * * .. Scalar Arguments .. INTEGER INFO, LDA, M, N diff --git a/lapack-netlib/SRC/zgelqf.f b/lapack-netlib/SRC/zgelqf.f index 8d9341a61..3a5e5fd4a 100644 --- a/lapack-netlib/SRC/zgelqf.f +++ b/lapack-netlib/SRC/zgelqf.f @@ -34,7 +34,15 @@ *> \verbatim *> *> ZGELQF computes an LQ factorization of a complex M-by-N matrix A: -*> A = L * Q. +*> +*> A = ( L 0 ) * Q +*> +*> where: +*> +*> Q is a N-by-N orthogonal matrix; +*> L is an lower-triangular M-by-M matrix; +*> 0 is a M-by-(N-M) zero matrix, if M < N. +*> *> \endverbatim * * Arguments: @@ -110,7 +118,7 @@ *> \author Univ. of Colorado Denver *> \author NAG Ltd. * -*> \date December 2016 +*> \date November 2019 * *> \ingroup complex16GEcomputational * @@ -135,10 +143,10 @@ * ===================================================================== SUBROUTINE ZGELQF( M, N, A, LDA, TAU, WORK, LWORK, INFO ) * -* -- LAPACK computational routine (version 3.7.0) -- +* -- LAPACK computational routine (version 3.9.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- -* December 2016 +* November 2019 * * .. Scalar Arguments .. INTEGER INFO, LDA, LWORK, M, N diff --git a/lapack-netlib/SRC/zgemlq.f b/lapack-netlib/SRC/zgemlq.f index aa07e0feb..6fb2be3d8 100644 --- a/lapack-netlib/SRC/zgemlq.f +++ b/lapack-netlib/SRC/zgemlq.f @@ -1,3 +1,4 @@ +*> \brief \b ZGEMLQ * * Definition: * =========== @@ -142,7 +143,7 @@ *> \verbatim *> *> These details are particular for this LAPACK implementation. Users should not -*> take them for granted. These details may change in the future, and are unlikely not +*> take them for granted. These details may change in the future, and are not likely *> true for another LAPACK implementation. These details are relevant if one wants *> to try to understand the code. They are not part of the interface. *> diff --git a/lapack-netlib/SRC/zgemqr.f b/lapack-netlib/SRC/zgemqr.f index 32f1bf4d5..aec9321bb 100644 --- a/lapack-netlib/SRC/zgemqr.f +++ b/lapack-netlib/SRC/zgemqr.f @@ -1,3 +1,4 @@ +*> \brief \b ZGEMQR * * Definition: * =========== @@ -144,7 +145,7 @@ *> \verbatim *> *> These details are particular for this LAPACK implementation. Users should not -*> take them for granted. These details may change in the future, and are unlikely not +*> take them for granted. These details may change in the future, and are not likely *> true for another LAPACK implementation. These details are relevant if one wants *> to try to understand the code. They are not part of the interface. *> diff --git a/lapack-netlib/SRC/zgeqr.f b/lapack-netlib/SRC/zgeqr.f index 1aa457f56..cea686b98 100644 --- a/lapack-netlib/SRC/zgeqr.f +++ b/lapack-netlib/SRC/zgeqr.f @@ -1,3 +1,4 @@ +*> \brief \b ZGEQR * * Definition: * =========== @@ -17,7 +18,18 @@ * ============= *> *> \verbatim -*> ZGEQR computes a QR factorization of an M-by-N matrix A. +*> +*> ZGEQR computes a QR factorization of a complex M-by-N matrix A: +*> +*> A = Q * ( R ), +*> ( 0 ) +*> +*> where: +*> +*> Q is a M-by-M orthogonal matrix; +*> R is an upper-triangular N-by-N matrix; +*> 0 is a (M-N)-by-N zero matrix, if M > N. +*> *> \endverbatim * * Arguments: @@ -138,7 +150,7 @@ *> \verbatim *> *> These details are particular for this LAPACK implementation. Users should not -*> take them for granted. These details may change in the future, and are unlikely not +*> take them for granted. These details may change in the future, and are not likely *> true for another LAPACK implementation. These details are relevant if one wants *> to try to understand the code. They are not part of the interface. *> @@ -160,10 +172,10 @@ SUBROUTINE ZGEQR( M, N, A, LDA, T, TSIZE, WORK, LWORK, $ INFO ) * -* -- LAPACK computational routine (version 3.7.0) -- +* -- LAPACK computational routine (version 3.9.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd. -- -* December 2016 +* November 2019 * * .. Scalar Arguments .. INTEGER INFO, LDA, M, N, TSIZE, LWORK diff --git a/lapack-netlib/SRC/zgeqr2.f b/lapack-netlib/SRC/zgeqr2.f index d2774d788..0384c1d42 100644 --- a/lapack-netlib/SRC/zgeqr2.f +++ b/lapack-netlib/SRC/zgeqr2.f @@ -33,8 +33,17 @@ *> *> \verbatim *> -*> ZGEQR2 computes a QR factorization of a complex m by n matrix A: -*> A = Q * R. +*> ZGEQR2 computes a QR factorization of a complex m-by-n matrix A: +*> +*> A = Q * ( R ), +*> ( 0 ) +*> +*> where: +*> +*> Q is a m-by-m orthogonal matrix; +*> R is an upper-triangular n-by-n matrix; +*> 0 is a (m-n)-by-n zero matrix, if m > n. +*> *> \endverbatim * * Arguments: @@ -96,7 +105,7 @@ *> \author Univ. of Colorado Denver *> \author NAG Ltd. * -*> \date December 2016 +*> \date November 2019 * *> \ingroup complex16GEcomputational * @@ -121,10 +130,10 @@ * ===================================================================== SUBROUTINE ZGEQR2( M, N, A, LDA, TAU, WORK, INFO ) * -* -- LAPACK computational routine (version 3.7.0) -- +* -- LAPACK computational routine (version 3.9.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- -* December 2016 +* November 2019 * * .. Scalar Arguments .. INTEGER INFO, LDA, M, N diff --git a/lapack-netlib/SRC/zgeqr2p.f b/lapack-netlib/SRC/zgeqr2p.f index 0e5e55486..7bbd81da9 100644 --- a/lapack-netlib/SRC/zgeqr2p.f +++ b/lapack-netlib/SRC/zgeqr2p.f @@ -33,8 +33,18 @@ *> *> \verbatim *> -*> ZGEQR2P computes a QR factorization of a complex m by n matrix A: -*> A = Q * R. The diagonal entries of R are real and nonnegative. +*> ZGEQR2P computes a QR factorization of a complex m-by-n matrix A: +*> +*> A = Q * ( R ), +*> ( 0 ) +*> +*> where: +*> +*> Q is a m-by-m orthogonal matrix; +*> R is an upper-triangular n-by-n matrix with nonnegative diagonal +*> entries; +*> 0 is a (m-n)-by-n zero matrix, if m > n. +*> *> \endverbatim * * Arguments: @@ -97,7 +107,7 @@ *> \author Univ. of Colorado Denver *> \author NAG Ltd. * -*> \date December 2016 +*> \date November 2019 * *> \ingroup complex16GEcomputational * @@ -124,10 +134,10 @@ * ===================================================================== SUBROUTINE ZGEQR2P( M, N, A, LDA, TAU, WORK, INFO ) * -* -- LAPACK computational routine (version 3.7.0) -- +* -- LAPACK computational routine (version 3.9.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- -* December 2016 +* November 2019 * * .. Scalar Arguments .. INTEGER INFO, LDA, M, N diff --git a/lapack-netlib/SRC/zgeqrf.f b/lapack-netlib/SRC/zgeqrf.f index 3ea1e71e1..2c03ebe73 100644 --- a/lapack-netlib/SRC/zgeqrf.f +++ b/lapack-netlib/SRC/zgeqrf.f @@ -34,7 +34,16 @@ *> \verbatim *> *> ZGEQRF computes a QR factorization of a complex M-by-N matrix A: -*> A = Q * R. +*> +*> A = Q * ( R ), +*> ( 0 ) +*> +*> where: +*> +*> Q is a M-by-M orthogonal matrix; +*> R is an upper-triangular N-by-N matrix; +*> 0 is a (M-N)-by-N zero matrix, if M > N. +*> *> \endverbatim * * Arguments: @@ -111,7 +120,7 @@ *> \author Univ. of Colorado Denver *> \author NAG Ltd. * -*> \date December 2016 +*> \date November 2019 * *> \ingroup complex16GEcomputational * @@ -136,10 +145,10 @@ * ===================================================================== SUBROUTINE ZGEQRF( M, N, A, LDA, TAU, WORK, LWORK, INFO ) * -* -- LAPACK computational routine (version 3.7.0) -- +* -- LAPACK computational routine (version 3.9.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- -* December 2016 +* November 2019 * * .. Scalar Arguments .. INTEGER INFO, LDA, LWORK, M, N diff --git a/lapack-netlib/SRC/zgeqrfp.f b/lapack-netlib/SRC/zgeqrfp.f index cdc4bfa94..80ead21ca 100644 --- a/lapack-netlib/SRC/zgeqrfp.f +++ b/lapack-netlib/SRC/zgeqrfp.f @@ -33,8 +33,18 @@ *> *> \verbatim *> -*> ZGEQRFP computes a QR factorization of a complex M-by-N matrix A: -*> A = Q * R. The diagonal entries of R are real and nonnegative. +*> ZGEQR2P computes a QR factorization of a complex M-by-N matrix A: +*> +*> A = Q * ( R ), +*> ( 0 ) +*> +*> where: +*> +*> Q is a M-by-M orthogonal matrix; +*> R is an upper-triangular N-by-N matrix with nonnegative diagonal +*> entries; +*> 0 is a (M-N)-by-N zero matrix, if M > N. +*> *> \endverbatim * * Arguments: @@ -112,7 +122,7 @@ *> \author Univ. of Colorado Denver *> \author NAG Ltd. * -*> \date December 2016 +*> \date November 2019 * *> \ingroup complex16GEcomputational * @@ -139,10 +149,10 @@ * ===================================================================== SUBROUTINE ZGEQRFP( M, N, A, LDA, TAU, WORK, LWORK, INFO ) * -* -- LAPACK computational routine (version 3.7.0) -- +* -- LAPACK computational routine (version 3.9.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- -* December 2016 +* November 2019 * * .. Scalar Arguments .. INTEGER INFO, LDA, LWORK, M, N diff --git a/lapack-netlib/SRC/zgerfsx.f b/lapack-netlib/SRC/zgerfsx.f index 5aabe50ed..3af7f8b6b 100644 --- a/lapack-netlib/SRC/zgerfsx.f +++ b/lapack-netlib/SRC/zgerfsx.f @@ -74,7 +74,7 @@ *> Specifies the form of the system of equations: *> = 'N': A * X = B (No transpose) *> = 'T': A**T * X = B (Transpose) -*> = 'C': A**H * X = B (Conjugate transpose = Transpose) +*> = 'C': A**H * X = B (Conjugate transpose) *> \endverbatim *> *> \param[in] EQUED @@ -283,7 +283,7 @@ *> information as described below. There currently are up to three *> pieces of information returned for each right-hand side. If *> componentwise accuracy is not requested (PARAMS(3) = 0.0), then -*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS .LT. 3, then at most +*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS < 3, then at most *> the first (:,N_ERR_BNDS) entries are returned. *> *> The first index in ERR_BNDS_COMP(i,:) corresponds to the ith @@ -319,14 +319,14 @@ *> \param[in] NPARAMS *> \verbatim *> NPARAMS is INTEGER -*> Specifies the number of parameters set in PARAMS. If .LE. 0, the +*> Specifies the number of parameters set in PARAMS. If <= 0, the *> PARAMS array is never referenced and default values are used. *> \endverbatim *> *> \param[in,out] PARAMS *> \verbatim *> PARAMS is DOUBLE PRECISION array, dimension NPARAMS -*> Specifies algorithm parameters. If an entry is .LT. 0.0, then +*> Specifies algorithm parameters. If an entry is < 0.0, then *> that entry will be filled with default value used for that *> parameter. Only positions up to NPARAMS are accessed; defaults *> are used for higher-numbered parameters. @@ -334,9 +334,9 @@ *> PARAMS(LA_LINRX_ITREF_I = 1) : Whether to perform iterative *> refinement or not. *> Default: 1.0D+0 -*> = 0.0 : No refinement is performed, and no error bounds are +*> = 0.0: No refinement is performed, and no error bounds are *> computed. -*> = 1.0 : Use the double-precision refinement algorithm, +*> = 1.0: Use the double-precision refinement algorithm, *> possibly with doubled-single computations if the *> compilation environment does not support DOUBLE *> PRECISION. diff --git a/lapack-netlib/SRC/zgesc2.f b/lapack-netlib/SRC/zgesc2.f index 72ef99dba..cdf15e4f4 100644 --- a/lapack-netlib/SRC/zgesc2.f +++ b/lapack-netlib/SRC/zgesc2.f @@ -91,7 +91,7 @@ *> \verbatim *> SCALE is DOUBLE PRECISION *> On exit, SCALE contains the scale factor. SCALE is chosen -*> 0 <= SCALE <= 1 to prevent owerflow in the solution. +*> 0 <= SCALE <= 1 to prevent overflow in the solution. *> \endverbatim * * Authors: diff --git a/lapack-netlib/SRC/zgesvdq.f b/lapack-netlib/SRC/zgesvdq.f new file mode 100644 index 000000000..e0fb920bb --- /dev/null +++ b/lapack-netlib/SRC/zgesvdq.f @@ -0,0 +1,1389 @@ +*> \brief ZGESVDQ computes the singular value decomposition (SVD) with a QR-Preconditioned QR SVD Method for GE matrices +* +* =========== DOCUMENTATION =========== +* +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ +* +*> \htmlonly +*> Download ZGESVDQ + dependencies +*> +*> [TGZ] +*> +*> [ZIP] +*> +*> [TXT] +*> \endhtmlonly +* +* Definition: +* =========== +* +* SUBROUTINE ZGESVDQ( JOBA, JOBP, JOBR, JOBU, JOBV, M, N, A, LDA, +* S, U, LDU, V, LDV, NUMRANK, IWORK, LIWORK, +* CWORK, LCWORK, RWORK, LRWORK, INFO ) +* +* .. Scalar Arguments .. +* IMPLICIT NONE +* CHARACTER JOBA, JOBP, JOBR, JOBU, JOBV +* INTEGER M, N, LDA, LDU, LDV, NUMRANK, LIWORK, LCWORK, LRWORK, +* INFO +* .. +* .. Array Arguments .. +* COMPLEX*16 A( LDA, * ), U( LDU, * ), V( LDV, * ), CWORK( * ) +* DOUBLE PRECISION S( * ), RWORK( * ) +* INTEGER IWORK( * ) +* .. +* +* +*> \par Purpose: +* ============= +*> +*> \verbatim +*> +* ZCGESVDQ computes the singular value decomposition (SVD) of a complex +*> M-by-N matrix A, where M >= N. The SVD of A is written as +*> [++] [xx] [x0] [xx] +*> A = U * SIGMA * V^*, [++] = [xx] * [ox] * [xx] +*> [++] [xx] +*> where SIGMA is an N-by-N diagonal matrix, U is an M-by-N orthonormal +*> matrix, and V is an N-by-N unitary matrix. The diagonal elements +*> of SIGMA are the singular values of A. The columns of U and V are the +*> left and the right singular vectors of A, respectively. +*> \endverbatim +* +* Arguments +* ========= +* +*> \param[in] JOBA +*> \verbatim +*> JOBA is CHARACTER*1 +*> Specifies the level of accuracy in the computed SVD +*> = 'A' The requested accuracy corresponds to having the backward +*> error bounded by || delta A ||_F <= f(m,n) * EPS * || A ||_F, +*> where EPS = DLAMCH('Epsilon'). This authorises ZGESVDQ to +*> truncate the computed triangular factor in a rank revealing +*> QR factorization whenever the truncated part is below the +*> threshold of the order of EPS * ||A||_F. This is aggressive +*> truncation level. +*> = 'M' Similarly as with 'A', but the truncation is more gentle: it +*> is allowed only when there is a drop on the diagonal of the +*> triangular factor in the QR factorization. This is medium +*> truncation level. +*> = 'H' High accuracy requested. No numerical rank determination based +*> on the rank revealing QR factorization is attempted. +*> = 'E' Same as 'H', and in addition the condition number of column +*> scaled A is estimated and returned in RWORK(1). +*> N^(-1/4)*RWORK(1) <= ||pinv(A_scaled)||_2 <= N^(1/4)*RWORK(1) +*> \endverbatim +*> +*> \param[in] JOBP +*> \verbatim +*> JOBP is CHARACTER*1 +*> = 'P' The rows of A are ordered in decreasing order with respect to +*> ||A(i,:)||_\infty. This enhances numerical accuracy at the cost +*> of extra data movement. Recommended for numerical robustness. +*> = 'N' No row pivoting. +*> \endverbatim +*> +*> \param[in] JOBR +*> \verbatim +*> JOBR is CHARACTER*1 +*> = 'T' After the initial pivoted QR factorization, ZGESVD is applied to +*> the adjoint R**H of the computed triangular factor R. This involves +*> some extra data movement (matrix transpositions). Useful for +*> experiments, research and development. +*> = 'N' The triangular factor R is given as input to CGESVD. This may be +*> preferred as it involves less data movement. +*> \endverbatim +*> +*> \param[in] JOBU +*> \verbatim +*> JOBU is CHARACTER*1 +*> = 'A' All M left singular vectors are computed and returned in the +*> matrix U. See the description of U. +*> = 'S' or 'U' N = min(M,N) left singular vectors are computed and returned +*> in the matrix U. See the description of U. +*> = 'R' Numerical rank NUMRANK is determined and only NUMRANK left singular +*> vectors are computed and returned in the matrix U. +*> = 'F' The N left singular vectors are returned in factored form as the +*> product of the Q factor from the initial QR factorization and the +*> N left singular vectors of (R**H , 0)**H. If row pivoting is used, +*> then the necessary information on the row pivoting is stored in +*> IWORK(N+1:N+M-1). +*> = 'N' The left singular vectors are not computed. +*> \endverbatim +*> +*> \param[in] JOBV +*> \verbatim +*> JOBV is CHARACTER*1 +*> = 'A', 'V' All N right singular vectors are computed and returned in +*> the matrix V. +*> = 'R' Numerical rank NUMRANK is determined and only NUMRANK right singular +*> vectors are computed and returned in the matrix V. This option is +*> allowed only if JOBU = 'R' or JOBU = 'N'; otherwise it is illegal. +*> = 'N' The right singular vectors are not computed. +*> \endverbatim +*> +*> \param[in] M +*> \verbatim +*> M is INTEGER +*> The number of rows of the input matrix A. M >= 0. +*> \endverbatim +*> +*> \param[in] N +*> \verbatim +*> N is INTEGER +*> The number of columns of the input matrix A. M >= N >= 0. +*> \endverbatim +*> +*> \param[in,out] A +*> \verbatim +*> A is COMPLEX*16 array of dimensions LDA x N +*> On entry, the input matrix A. +*> On exit, if JOBU .NE. 'N' or JOBV .NE. 'N', the lower triangle of A contains +*> the Householder vectors as stored by ZGEQP3. If JOBU = 'F', these Householder +*> vectors together with CWORK(1:N) can be used to restore the Q factors from +*> the initial pivoted QR factorization of A. See the description of U. +*> \endverbatim +*> +*> \param[in] LDA +*> \verbatim +*> LDA is INTEGER. +*> The leading dimension of the array A. LDA >= max(1,M). +*> \endverbatim +*> +*> \param[out] S +*> \verbatim +*> S is DOUBLE PRECISION array of dimension N. +*> The singular values of A, ordered so that S(i) >= S(i+1). +*> \endverbatim +*> +*> \param[out] U +*> \verbatim +*> U is COMPLEX*16 array, dimension +*> LDU x M if JOBU = 'A'; see the description of LDU. In this case, +*> on exit, U contains the M left singular vectors. +*> LDU x N if JOBU = 'S', 'U', 'R' ; see the description of LDU. In this +*> case, U contains the leading N or the leading NUMRANK left singular vectors. +*> LDU x N if JOBU = 'F' ; see the description of LDU. In this case U +*> contains N x N unitary matrix that can be used to form the left +*> singular vectors. +*> If JOBU = 'N', U is not referenced. +*> \endverbatim +*> +*> \param[in] LDU +*> \verbatim +*> LDU is INTEGER. +*> The leading dimension of the array U. +*> If JOBU = 'A', 'S', 'U', 'R', LDU >= max(1,M). +*> If JOBU = 'F', LDU >= max(1,N). +*> Otherwise, LDU >= 1. +*> \endverbatim +*> +*> \param[out] V +*> \verbatim +*> V is COMPLEX*16 array, dimension +*> LDV x N if JOBV = 'A', 'V', 'R' or if JOBA = 'E' . +*> If JOBV = 'A', or 'V', V contains the N-by-N unitary matrix V**H; +*> If JOBV = 'R', V contains the first NUMRANK rows of V**H (the right +*> singular vectors, stored rowwise, of the NUMRANK largest singular values). +*> If JOBV = 'N' and JOBA = 'E', V is used as a workspace. +*> If JOBV = 'N', and JOBA.NE.'E', V is not referenced. +*> \endverbatim +*> +*> \param[in] LDV +*> \verbatim +*> LDV is INTEGER +*> The leading dimension of the array V. +*> If JOBV = 'A', 'V', 'R', or JOBA = 'E', LDV >= max(1,N). +*> Otherwise, LDV >= 1. +*> \endverbatim +*> +*> \param[out] NUMRANK +*> \verbatim +*> NUMRANK is INTEGER +*> NUMRANK is the numerical rank first determined after the rank +*> revealing QR factorization, following the strategy specified by the +*> value of JOBA. If JOBV = 'R' and JOBU = 'R', only NUMRANK +*> leading singular values and vectors are then requested in the call +*> of CGESVD. The final value of NUMRANK might be further reduced if +*> some singular values are computed as zeros. +*> \endverbatim +*> +*> \param[out] IWORK +*> \verbatim +*> IWORK is INTEGER array, dimension (max(1, LIWORK)). +*> On exit, IWORK(1:N) contains column pivoting permutation of the +*> rank revealing QR factorization. +*> If JOBP = 'P', IWORK(N+1:N+M-1) contains the indices of the sequence +*> of row swaps used in row pivoting. These can be used to restore the +*> left singular vectors in the case JOBU = 'F'. +* +*> If LIWORK, LCWORK, or LRWORK = -1, then on exit, if INFO = 0, +*> LIWORK(1) returns the minimal LIWORK. +*> \endverbatim +*> +*> \param[in] LIWORK +*> \verbatim +*> LIWORK is INTEGER +*> The dimension of the array IWORK. +*> LIWORK >= N + M - 1, if JOBP = 'P'; +*> LIWORK >= N if JOBP = 'N'. +*> +*> If LIWORK = -1, then a workspace query is assumed; the routine +*> only calculates and returns the optimal and minimal sizes +*> for the CWORK, IWORK, and RWORK arrays, and no error +*> message related to LCWORK is issued by XERBLA. +*> \endverbatim +*> +*> \param[out] CWORK +*> \verbatim +*> CWORK is COMPLEX*12 array, dimension (max(2, LCWORK)), used as a workspace. +*> On exit, if, on entry, LCWORK.NE.-1, CWORK(1:N) contains parameters +*> needed to recover the Q factor from the QR factorization computed by +*> ZGEQP3. +* +*> If LIWORK, LCWORK, or LRWORK = -1, then on exit, if INFO = 0, +*> CWORK(1) returns the optimal LCWORK, and +*> CWORK(2) returns the minimal LCWORK. +*> \endverbatim +*> +*> \param[in,out] LCWORK +*> \verbatim +*> LCWORK is INTEGER +*> The dimension of the array CWORK. It is determined as follows: +*> Let LWQP3 = N+1, LWCON = 2*N, and let +*> LWUNQ = { MAX( N, 1 ), if JOBU = 'R', 'S', or 'U' +*> { MAX( M, 1 ), if JOBU = 'A' +*> LWSVD = MAX( 3*N, 1 ) +*> LWLQF = MAX( N/2, 1 ), LWSVD2 = MAX( 3*(N/2), 1 ), LWUNLQ = MAX( N, 1 ), +*> LWQRF = MAX( N/2, 1 ), LWUNQ2 = MAX( N, 1 ) +*> Then the minimal value of LCWORK is: +*> = MAX( N + LWQP3, LWSVD ) if only the singular values are needed; +*> = MAX( N + LWQP3, LWCON, LWSVD ) if only the singular values are needed, +*> and a scaled condition estimate requested; +*> +*> = N + MAX( LWQP3, LWSVD, LWUNQ ) if the singular values and the left +*> singular vectors are requested; +*> = N + MAX( LWQP3, LWCON, LWSVD, LWUNQ ) if the singular values and the left +*> singular vectors are requested, and also +*> a scaled condition estimate requested; +*> +*> = N + MAX( LWQP3, LWSVD ) if the singular values and the right +*> singular vectors are requested; +*> = N + MAX( LWQP3, LWCON, LWSVD ) if the singular values and the right +*> singular vectors are requested, and also +*> a scaled condition etimate requested; +*> +*> = N + MAX( LWQP3, LWSVD, LWUNQ ) if the full SVD is requested with JOBV = 'R'; +*> independent of JOBR; +*> = N + MAX( LWQP3, LWCON, LWSVD, LWUNQ ) if the full SVD is requested, +*> JOBV = 'R' and, also a scaled condition +*> estimate requested; independent of JOBR; +*> = MAX( N + MAX( LWQP3, LWSVD, LWUNQ ), +*> N + MAX( LWQP3, N/2+LWLQF, N/2+LWSVD2, N/2+LWUNLQ, LWUNQ) ) if the +*> full SVD is requested with JOBV = 'A' or 'V', and +*> JOBR ='N' +*> = MAX( N + MAX( LWQP3, LWCON, LWSVD, LWUNQ ), +*> N + MAX( LWQP3, LWCON, N/2+LWLQF, N/2+LWSVD2, N/2+LWUNLQ, LWUNQ ) ) +*> if the full SVD is requested with JOBV = 'A' or 'V', and +*> JOBR ='N', and also a scaled condition number estimate +*> requested. +*> = MAX( N + MAX( LWQP3, LWSVD, LWUNQ ), +*> N + MAX( LWQP3, N/2+LWQRF, N/2+LWSVD2, N/2+LWUNQ2, LWUNQ ) ) if the +*> full SVD is requested with JOBV = 'A', 'V', and JOBR ='T' +*> = MAX( N + MAX( LWQP3, LWCON, LWSVD, LWUNQ ), +*> N + MAX( LWQP3, LWCON, N/2+LWQRF, N/2+LWSVD2, N/2+LWUNQ2, LWUNQ ) ) +*> if the full SVD is requested with JOBV = 'A', 'V' and +*> JOBR ='T', and also a scaled condition number estimate +*> requested. +*> Finally, LCWORK must be at least two: LCWORK = MAX( 2, LCWORK ). +*> +*> If LCWORK = -1, then a workspace query is assumed; the routine +*> only calculates and returns the optimal and minimal sizes +*> for the CWORK, IWORK, and RWORK arrays, and no error +*> message related to LCWORK is issued by XERBLA. +*> \endverbatim +*> +*> \param[out] RWORK +*> \verbatim +*> RWORK is DOUBLE PRECISION array, dimension (max(1, LRWORK)). +*> On exit, +*> 1. If JOBA = 'E', RWORK(1) contains an estimate of the condition +*> number of column scaled A. If A = C * D where D is diagonal and C +*> has unit columns in the Euclidean norm, then, assuming full column rank, +*> N^(-1/4) * RWORK(1) <= ||pinv(C)||_2 <= N^(1/4) * RWORK(1). +*> Otherwise, RWORK(1) = -1. +*> 2. RWORK(2) contains the number of singular values computed as +*> exact zeros in ZGESVD applied to the upper triangular or trapeziodal +*> R (from the initial QR factorization). In case of early exit (no call to +*> ZGESVD, such as in the case of zero matrix) RWORK(2) = -1. +* +*> If LIWORK, LCWORK, or LRWORK = -1, then on exit, if INFO = 0, +*> RWORK(1) returns the minimal LRWORK. +*> \endverbatim +*> +*> \param[in] LRWORK +*> \verbatim +*> LRWORK is INTEGER. +*> The dimension of the array RWORK. +*> If JOBP ='P', then LRWORK >= MAX(2, M, 5*N); +*> Otherwise, LRWORK >= MAX(2, 5*N). +* +*> If LRWORK = -1, then a workspace query is assumed; the routine +*> only calculates and returns the optimal and minimal sizes +*> for the CWORK, IWORK, and RWORK arrays, and no error +*> message related to LCWORK is issued by XERBLA. +*> \endverbatim +*> +*> \param[out] INFO +*> \verbatim +*> INFO is INTEGER +*> = 0: successful exit. +*> < 0: if INFO = -i, the i-th argument had an illegal value. +*> > 0: if ZBDSQR did not converge, INFO specifies how many superdiagonals +*> of an intermediate bidiagonal form B (computed in ZGESVD) did not +*> converge to zero. +*> \endverbatim +* +*> \par Further Details: +* ======================== +*> +*> \verbatim +*> +*> 1. The data movement (matrix transpose) is coded using simple nested +*> DO-loops because BLAS and LAPACK do not provide corresponding subroutines. +*> Those DO-loops are easily identified in this source code - by the CONTINUE +*> statements labeled with 11**. In an optimized version of this code, the +*> nested DO loops should be replaced with calls to an optimized subroutine. +*> 2. This code scales A by 1/SQRT(M) if the largest ABS(A(i,j)) could cause +*> column norm overflow. This is the minial precaution and it is left to the +*> SVD routine (CGESVD) to do its own preemptive scaling if potential over- +*> or underflows are detected. To avoid repeated scanning of the array A, +*> an optimal implementation would do all necessary scaling before calling +*> CGESVD and the scaling in CGESVD can be switched off. +*> 3. Other comments related to code optimization are given in comments in the +*> code, enlosed in [[double brackets]]. +*> \endverbatim +* +*> \par Bugs, examples and comments +* =========================== +* +*> \verbatim +*> Please report all bugs and send interesting examples and/or comments to +*> drmac@math.hr. Thank you. +*> \endverbatim +* +*> \par References +* =============== +* +*> \verbatim +*> [1] Zlatko Drmac, Algorithm 977: A QR-Preconditioned QR SVD Method for +*> Computing the SVD with High Accuracy. ACM Trans. Math. Softw. +*> 44(1): 11:1-11:30 (2017) +*> +*> SIGMA library, xGESVDQ section updated February 2016. +*> Developed and coded by Zlatko Drmac, Department of Mathematics +*> University of Zagreb, Croatia, drmac@math.hr +*> \endverbatim +* +* +*> \par Contributors: +* ================== +*> +*> \verbatim +*> Developed and coded by Zlatko Drmac, Department of Mathematics +*> University of Zagreb, Croatia, drmac@math.hr +*> \endverbatim +* +* Authors: +* ======== +* +*> \author Univ. of Tennessee +*> \author Univ. of California Berkeley +*> \author Univ. of Colorado Denver +*> \author NAG Ltd. +* +*> \date November 2018 +* +*> \ingroup complex16GEsing +* +* ===================================================================== + SUBROUTINE ZGESVDQ( JOBA, JOBP, JOBR, JOBU, JOBV, M, N, A, LDA, + $ S, U, LDU, V, LDV, NUMRANK, IWORK, LIWORK, + $ CWORK, LCWORK, RWORK, LRWORK, INFO ) +* .. Scalar Arguments .. + IMPLICIT NONE + CHARACTER JOBA, JOBP, JOBR, JOBU, JOBV + INTEGER M, N, LDA, LDU, LDV, NUMRANK, LIWORK, LCWORK, LRWORK, + $ INFO +* .. +* .. Array Arguments .. + COMPLEX*16 A( LDA, * ), U( LDU, * ), V( LDV, * ), CWORK( * ) + DOUBLE PRECISION S( * ), RWORK( * ) + INTEGER IWORK( * ) +* +* ===================================================================== +* +* .. Parameters .. + DOUBLE PRECISION ZERO, ONE + PARAMETER ( ZERO = 0.0D0, ONE = 1.0D0 ) + COMPLEX*16 CZERO, CONE + PARAMETER ( CZERO = (0.0D0,0.0D0), CONE = (1.0D0,0.0D0) ) +* .. +* .. Local Scalars .. + INTEGER IERR, NR, N1, OPTRATIO, p, q + INTEGER LWCON, LWQP3, LWRK_ZGELQF, LWRK_ZGESVD, LWRK_ZGESVD2, + $ LWRK_ZGEQP3, LWRK_ZGEQRF, LWRK_ZUNMLQ, LWRK_ZUNMQR, + $ LWRK_ZUNMQR2, LWLQF, LWQRF, LWSVD, LWSVD2, LWUNQ, + $ LWUNQ2, LWUNLQ, MINWRK, MINWRK2, OPTWRK, OPTWRK2, + $ IMINWRK, RMINWRK + LOGICAL ACCLA, ACCLM, ACCLH, ASCALED, CONDA, DNTWU, DNTWV, + $ LQUERY, LSVC0, LSVEC, ROWPRM, RSVEC, RTRANS, WNTUA, + $ WNTUF, WNTUR, WNTUS, WNTVA, WNTVR + DOUBLE PRECISION BIG, EPSLN, RTMP, SCONDA, SFMIN + COMPLEX*16 CTMP +* .. +* .. Local Arrays + COMPLEX*16 CDUMMY(1) + DOUBLE PRECISION RDUMMY(1) +* .. +* .. External Subroutines (BLAS, LAPACK) + EXTERNAL ZGELQF, ZGEQP3, ZGEQRF, ZGESVD, ZLACPY, ZLAPMT, + $ ZLASCL, ZLASET, ZLASWP, ZDSCAL, DLASET, DLASCL, + $ ZPOCON, ZUNMLQ, ZUNMQR, XERBLA +* .. +* .. External Functions (BLAS, LAPACK) + LOGICAL LSAME + INTEGER IDAMAX + DOUBLE PRECISION ZLANGE, DZNRM2, DLAMCH + EXTERNAL LSAME, ZLANGE, IDAMAX, DZNRM2, DLAMCH +* .. +* .. Intrinsic Functions .. + INTRINSIC ABS, CONJG, MAX, MIN, DBLE, SQRT +* .. +* .. Executable Statements .. +* +* Test the input arguments +* + WNTUS = LSAME( JOBU, 'S' ) .OR. LSAME( JOBU, 'U' ) + WNTUR = LSAME( JOBU, 'R' ) + WNTUA = LSAME( JOBU, 'A' ) + WNTUF = LSAME( JOBU, 'F' ) + LSVC0 = WNTUS .OR. WNTUR .OR. WNTUA + LSVEC = LSVC0 .OR. WNTUF + DNTWU = LSAME( JOBU, 'N' ) +* + WNTVR = LSAME( JOBV, 'R' ) + WNTVA = LSAME( JOBV, 'A' ) .OR. LSAME( JOBV, 'V' ) + RSVEC = WNTVR .OR. WNTVA + DNTWV = LSAME( JOBV, 'N' ) +* + ACCLA = LSAME( JOBA, 'A' ) + ACCLM = LSAME( JOBA, 'M' ) + CONDA = LSAME( JOBA, 'E' ) + ACCLH = LSAME( JOBA, 'H' ) .OR. CONDA +* + ROWPRM = LSAME( JOBP, 'P' ) + RTRANS = LSAME( JOBR, 'T' ) +* + IF ( ROWPRM ) THEN + IMINWRK = MAX( 1, N + M - 1 ) + RMINWRK = MAX( 2, M, 5*N ) + ELSE + IMINWRK = MAX( 1, N ) + RMINWRK = MAX( 2, 5*N ) + END IF + LQUERY = (LIWORK .EQ. -1 .OR. LCWORK .EQ. -1 .OR. LRWORK .EQ. -1) + INFO = 0 + IF ( .NOT. ( ACCLA .OR. ACCLM .OR. ACCLH ) ) THEN + INFO = -1 + ELSE IF ( .NOT.( ROWPRM .OR. LSAME( JOBP, 'N' ) ) ) THEN + INFO = -2 + ELSE IF ( .NOT.( RTRANS .OR. LSAME( JOBR, 'N' ) ) ) THEN + INFO = -3 + ELSE IF ( .NOT.( LSVEC .OR. DNTWU ) ) THEN + INFO = -4 + ELSE IF ( WNTUR .AND. WNTVA ) THEN + INFO = -5 + ELSE IF ( .NOT.( RSVEC .OR. DNTWV )) THEN + INFO = -5 + ELSE IF ( M.LT.0 ) THEN + INFO = -6 + ELSE IF ( ( N.LT.0 ) .OR. ( N.GT.M ) ) THEN + INFO = -7 + ELSE IF ( LDA.LT.MAX( 1, M ) ) THEN + INFO = -9 + ELSE IF ( LDU.LT.1 .OR. ( LSVC0 .AND. LDU.LT.M ) .OR. + $ ( WNTUF .AND. LDU.LT.N ) ) THEN + INFO = -12 + ELSE IF ( LDV.LT.1 .OR. ( RSVEC .AND. LDV.LT.N ) .OR. + $ ( CONDA .AND. LDV.LT.N ) ) THEN + INFO = -14 + ELSE IF ( LIWORK .LT. IMINWRK .AND. .NOT. LQUERY ) THEN + INFO = -17 + END IF +* +* + IF ( INFO .EQ. 0 ) THEN +* .. compute the minimal and the optimal workspace lengths +* [[The expressions for computing the minimal and the optimal +* values of LCWORK are written with a lot of redundancy and +* can be simplified. However, this detailed form is easier for +* maintenance and modifications of the code.]] +* +* .. minimal workspace length for ZGEQP3 of an M x N matrix + LWQP3 = N+1 +* .. minimal workspace length for ZUNMQR to build left singular vectors + IF ( WNTUS .OR. WNTUR ) THEN + LWUNQ = MAX( N , 1 ) + ELSE IF ( WNTUA ) THEN + LWUNQ = MAX( M , 1 ) + END IF +* .. minimal workspace length for ZPOCON of an N x N matrix + LWCON = 2 * N +* .. ZGESVD of an N x N matrix + LWSVD = MAX( 3 * N, 1 ) + IF ( LQUERY ) THEN + CALL ZGEQP3( M, N, A, LDA, IWORK, CDUMMY, CDUMMY, -1, + $ RDUMMY, IERR ) + LWRK_ZGEQP3 = INT( CDUMMY(1) ) + IF ( WNTUS .OR. WNTUR ) THEN + CALL ZUNMQR( 'L', 'N', M, N, N, A, LDA, CDUMMY, U, + $ LDU, CDUMMY, -1, IERR ) + LWRK_ZUNMQR = INT( CDUMMY(1) ) + ELSE IF ( WNTUA ) THEN + CALL ZUNMQR( 'L', 'N', M, M, N, A, LDA, CDUMMY, U, + $ LDU, CDUMMY, -1, IERR ) + LWRK_ZUNMQR = INT( CDUMMY(1) ) + ELSE + LWRK_ZUNMQR = 0 + END IF + END IF + MINWRK = 2 + OPTWRK = 2 + IF ( .NOT. (LSVEC .OR. RSVEC ) ) THEN +* .. minimal and optimal sizes of the complex workspace if +* only the singular values are requested + IF ( CONDA ) THEN + MINWRK = MAX( N+LWQP3, LWCON, LWSVD ) + ELSE + MINWRK = MAX( N+LWQP3, LWSVD ) + END IF + IF ( LQUERY ) THEN + CALL ZGESVD( 'N', 'N', N, N, A, LDA, S, U, LDU, + $ V, LDV, CDUMMY, -1, RDUMMY, IERR ) + LWRK_ZGESVD = INT( CDUMMY(1) ) + IF ( CONDA ) THEN + OPTWRK = MAX( N+LWRK_ZGEQP3, N+LWCON, LWRK_ZGESVD ) + ELSE + OPTWRK = MAX( N+LWRK_ZGEQP3, LWRK_ZGESVD ) + END IF + END IF + ELSE IF ( LSVEC .AND. (.NOT.RSVEC) ) THEN +* .. minimal and optimal sizes of the complex workspace if the +* singular values and the left singular vectors are requested + IF ( CONDA ) THEN + MINWRK = N + MAX( LWQP3, LWCON, LWSVD, LWUNQ ) + ELSE + MINWRK = N + MAX( LWQP3, LWSVD, LWUNQ ) + END IF + IF ( LQUERY ) THEN + IF ( RTRANS ) THEN + CALL ZGESVD( 'N', 'O', N, N, A, LDA, S, U, LDU, + $ V, LDV, CDUMMY, -1, RDUMMY, IERR ) + ELSE + CALL ZGESVD( 'O', 'N', N, N, A, LDA, S, U, LDU, + $ V, LDV, CDUMMY, -1, RDUMMY, IERR ) + END IF + LWRK_ZGESVD = INT( CDUMMY(1) ) + IF ( CONDA ) THEN + OPTWRK = N + MAX( LWRK_ZGEQP3, LWCON, LWRK_ZGESVD, + $ LWRK_ZUNMQR ) + ELSE + OPTWRK = N + MAX( LWRK_ZGEQP3, LWRK_ZGESVD, + $ LWRK_ZUNMQR ) + END IF + END IF + ELSE IF ( RSVEC .AND. (.NOT.LSVEC) ) THEN +* .. minimal and optimal sizes of the complex workspace if the +* singular values and the right singular vectors are requested + IF ( CONDA ) THEN + MINWRK = N + MAX( LWQP3, LWCON, LWSVD ) + ELSE + MINWRK = N + MAX( LWQP3, LWSVD ) + END IF + IF ( LQUERY ) THEN + IF ( RTRANS ) THEN + CALL ZGESVD( 'O', 'N', N, N, A, LDA, S, U, LDU, + $ V, LDV, CDUMMY, -1, RDUMMY, IERR ) + ELSE + CALL ZGESVD( 'N', 'O', N, N, A, LDA, S, U, LDU, + $ V, LDV, CDUMMY, -1, RDUMMY, IERR ) + END IF + LWRK_ZGESVD = INT( CDUMMY(1) ) + IF ( CONDA ) THEN + OPTWRK = N + MAX( LWRK_ZGEQP3, LWCON, LWRK_ZGESVD ) + ELSE + OPTWRK = N + MAX( LWRK_ZGEQP3, LWRK_ZGESVD ) + END IF + END IF + ELSE +* .. minimal and optimal sizes of the complex workspace if the +* full SVD is requested + IF ( RTRANS ) THEN + MINWRK = MAX( LWQP3, LWSVD, LWUNQ ) + IF ( CONDA ) MINWRK = MAX( MINWRK, LWCON ) + MINWRK = MINWRK + N + IF ( WNTVA ) THEN +* .. minimal workspace length for N x N/2 ZGEQRF + LWQRF = MAX( N/2, 1 ) +* .. minimal workspace lengt for N/2 x N/2 ZGESVD + LWSVD2 = MAX( 3 * (N/2), 1 ) + LWUNQ2 = MAX( N, 1 ) + MINWRK2 = MAX( LWQP3, N/2+LWQRF, N/2+LWSVD2, + $ N/2+LWUNQ2, LWUNQ ) + IF ( CONDA ) MINWRK2 = MAX( MINWRK2, LWCON ) + MINWRK2 = N + MINWRK2 + MINWRK = MAX( MINWRK, MINWRK2 ) + END IF + ELSE + MINWRK = MAX( LWQP3, LWSVD, LWUNQ ) + IF ( CONDA ) MINWRK = MAX( MINWRK, LWCON ) + MINWRK = MINWRK + N + IF ( WNTVA ) THEN +* .. minimal workspace length for N/2 x N ZGELQF + LWLQF = MAX( N/2, 1 ) + LWSVD2 = MAX( 3 * (N/2), 1 ) + LWUNLQ = MAX( N , 1 ) + MINWRK2 = MAX( LWQP3, N/2+LWLQF, N/2+LWSVD2, + $ N/2+LWUNLQ, LWUNQ ) + IF ( CONDA ) MINWRK2 = MAX( MINWRK2, LWCON ) + MINWRK2 = N + MINWRK2 + MINWRK = MAX( MINWRK, MINWRK2 ) + END IF + END IF + IF ( LQUERY ) THEN + IF ( RTRANS ) THEN + CALL ZGESVD( 'O', 'A', N, N, A, LDA, S, U, LDU, + $ V, LDV, CDUMMY, -1, RDUMMY, IERR ) + LWRK_ZGESVD = INT( CDUMMY(1) ) + OPTWRK = MAX(LWRK_ZGEQP3,LWRK_ZGESVD,LWRK_ZUNMQR) + IF ( CONDA ) OPTWRK = MAX( OPTWRK, LWCON ) + OPTWRK = N + OPTWRK + IF ( WNTVA ) THEN + CALL ZGEQRF(N,N/2,U,LDU,CDUMMY,CDUMMY,-1,IERR) + LWRK_ZGEQRF = INT( CDUMMY(1) ) + CALL ZGESVD( 'S', 'O', N/2,N/2, V,LDV, S, U,LDU, + $ V, LDV, CDUMMY, -1, RDUMMY, IERR ) + LWRK_ZGESVD2 = INT( CDUMMY(1) ) + CALL ZUNMQR( 'R', 'C', N, N, N/2, U, LDU, CDUMMY, + $ V, LDV, CDUMMY, -1, IERR ) + LWRK_ZUNMQR2 = INT( CDUMMY(1) ) + OPTWRK2 = MAX( LWRK_ZGEQP3, N/2+LWRK_ZGEQRF, + $ N/2+LWRK_ZGESVD2, N/2+LWRK_ZUNMQR2 ) + IF ( CONDA ) OPTWRK2 = MAX( OPTWRK2, LWCON ) + OPTWRK2 = N + OPTWRK2 + OPTWRK = MAX( OPTWRK, OPTWRK2 ) + END IF + ELSE + CALL ZGESVD( 'S', 'O', N, N, A, LDA, S, U, LDU, + $ V, LDV, CDUMMY, -1, RDUMMY, IERR ) + LWRK_ZGESVD = INT( CDUMMY(1) ) + OPTWRK = MAX(LWRK_ZGEQP3,LWRK_ZGESVD,LWRK_ZUNMQR) + IF ( CONDA ) OPTWRK = MAX( OPTWRK, LWCON ) + OPTWRK = N + OPTWRK + IF ( WNTVA ) THEN + CALL ZGELQF(N/2,N,U,LDU,CDUMMY,CDUMMY,-1,IERR) + LWRK_ZGELQF = INT( CDUMMY(1) ) + CALL ZGESVD( 'S','O', N/2,N/2, V, LDV, S, U, LDU, + $ V, LDV, CDUMMY, -1, RDUMMY, IERR ) + LWRK_ZGESVD2 = INT( CDUMMY(1) ) + CALL ZUNMLQ( 'R', 'N', N, N, N/2, U, LDU, CDUMMY, + $ V, LDV, CDUMMY,-1,IERR ) + LWRK_ZUNMLQ = INT( CDUMMY(1) ) + OPTWRK2 = MAX( LWRK_ZGEQP3, N/2+LWRK_ZGELQF, + $ N/2+LWRK_ZGESVD2, N/2+LWRK_ZUNMLQ ) + IF ( CONDA ) OPTWRK2 = MAX( OPTWRK2, LWCON ) + OPTWRK2 = N + OPTWRK2 + OPTWRK = MAX( OPTWRK, OPTWRK2 ) + END IF + END IF + END IF + END IF +* + MINWRK = MAX( 2, MINWRK ) + OPTWRK = MAX( 2, OPTWRK ) + IF ( LCWORK .LT. MINWRK .AND. (.NOT.LQUERY) ) INFO = -19 +* + END IF +* + IF (INFO .EQ. 0 .AND. LRWORK .LT. RMINWRK .AND. .NOT. LQUERY) THEN + INFO = -21 + END IF + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'ZGESVDQ', -INFO ) + RETURN + ELSE IF ( LQUERY ) THEN +* +* Return optimal workspace +* + IWORK(1) = IMINWRK + CWORK(1) = OPTWRK + CWORK(2) = MINWRK + RWORK(1) = RMINWRK + RETURN + END IF +* +* Quick return if the matrix is void. +* + IF( ( M.EQ.0 ) .OR. ( N.EQ.0 ) ) THEN +* .. all output is void. + RETURN + END IF +* + BIG = DLAMCH('O') + ASCALED = .FALSE. + IF ( ROWPRM ) THEN +* .. reordering the rows in decreasing sequence in the +* ell-infinity norm - this enhances numerical robustness in +* the case of differently scaled rows. + DO 1904 p = 1, M +* RWORK(p) = ABS( A(p,IZAMAX(N,A(p,1),LDA)) ) +* [[ZLANGE will return NaN if an entry of the p-th row is Nan]] + RWORK(p) = ZLANGE( 'M', 1, N, A(p,1), LDA, RDUMMY ) +* .. check for NaN's and Inf's + IF ( ( RWORK(p) .NE. RWORK(p) ) .OR. + $ ( (RWORK(p)*ZERO) .NE. ZERO ) ) THEN + INFO = -8 + CALL XERBLA( 'ZGESVDQ', -INFO ) + RETURN + END IF + 1904 CONTINUE + DO 1952 p = 1, M - 1 + q = IDAMAX( M-p+1, RWORK(p), 1 ) + p - 1 + IWORK(N+p) = q + IF ( p .NE. q ) THEN + RTMP = RWORK(p) + RWORK(p) = RWORK(q) + RWORK(q) = RTMP + END IF + 1952 CONTINUE +* + IF ( RWORK(1) .EQ. ZERO ) THEN +* Quick return: A is the M x N zero matrix. + NUMRANK = 0 + CALL DLASET( 'G', N, 1, ZERO, ZERO, S, N ) + IF ( WNTUS ) CALL ZLASET('G', M, N, CZERO, CONE, U, LDU) + IF ( WNTUA ) CALL ZLASET('G', M, M, CZERO, CONE, U, LDU) + IF ( WNTVA ) CALL ZLASET('G', N, N, CZERO, CONE, V, LDV) + IF ( WNTUF ) THEN + CALL ZLASET( 'G', N, 1, CZERO, CZERO, CWORK, N ) + CALL ZLASET( 'G', M, N, CZERO, CONE, U, LDU ) + END IF + DO 5001 p = 1, N + IWORK(p) = p + 5001 CONTINUE + IF ( ROWPRM ) THEN + DO 5002 p = N + 1, N + M - 1 + IWORK(p) = p - N + 5002 CONTINUE + END IF + IF ( CONDA ) RWORK(1) = -1 + RWORK(2) = -1 + RETURN + END IF +* + IF ( RWORK(1) .GT. BIG / SQRT(DBLE(M)) ) THEN +* .. to prevent overflow in the QR factorization, scale the +* matrix by 1/sqrt(M) if too large entry detected + CALL ZLASCL('G',0,0,SQRT(DBLE(M)),ONE, M,N, A,LDA, IERR) + ASCALED = .TRUE. + END IF + CALL ZLASWP( N, A, LDA, 1, M-1, IWORK(N+1), 1 ) + END IF +* +* .. At this stage, preemptive scaling is done only to avoid column +* norms overflows during the QR factorization. The SVD procedure should +* have its own scaling to save the singular values from overflows and +* underflows. That depends on the SVD procedure. +* + IF ( .NOT.ROWPRM ) THEN + RTMP = ZLANGE( 'M', M, N, A, LDA, RWORK ) + IF ( ( RTMP .NE. RTMP ) .OR. + $ ( (RTMP*ZERO) .NE. ZERO ) ) THEN + INFO = -8 + CALL XERBLA( 'ZGESVDQ', -INFO ) + RETURN + END IF + IF ( RTMP .GT. BIG / SQRT(DBLE(M)) ) THEN +* .. to prevent overflow in the QR factorization, scale the +* matrix by 1/sqrt(M) if too large entry detected + CALL ZLASCL('G',0,0, SQRT(DBLE(M)),ONE, M,N, A,LDA, IERR) + ASCALED = .TRUE. + END IF + END IF +* +* .. QR factorization with column pivoting +* +* A * P = Q * [ R ] +* [ 0 ] +* + DO 1963 p = 1, N +* .. all columns are free columns + IWORK(p) = 0 + 1963 CONTINUE + CALL ZGEQP3( M, N, A, LDA, IWORK, CWORK, CWORK(N+1), LCWORK-N, + $ RWORK, IERR ) +* +* If the user requested accuracy level allows truncation in the +* computed upper triangular factor, the matrix R is examined and, +* if possible, replaced with its leading upper trapezoidal part. +* + EPSLN = DLAMCH('E') + SFMIN = DLAMCH('S') +* SMALL = SFMIN / EPSLN + NR = N +* + IF ( ACCLA ) THEN +* +* Standard absolute error bound suffices. All sigma_i with +* sigma_i < N*EPS*||A||_F are flushed to zero. This is an +* aggressive enforcement of lower numerical rank by introducing a +* backward error of the order of N*EPS*||A||_F. + NR = 1 + RTMP = SQRT(DBLE(N))*EPSLN + DO 3001 p = 2, N + IF ( ABS(A(p,p)) .LT. (RTMP*ABS(A(1,1))) ) GO TO 3002 + NR = NR + 1 + 3001 CONTINUE + 3002 CONTINUE +* + ELSEIF ( ACCLM ) THEN +* .. similarly as above, only slightly more gentle (less aggressive). +* Sudden drop on the diagonal of R is used as the criterion for being +* close-to-rank-deficient. The threshold is set to EPSLN=DLAMCH('E'). +* [[This can be made more flexible by replacing this hard-coded value +* with a user specified threshold.]] Also, the values that underflow +* will be truncated. + NR = 1 + DO 3401 p = 2, N + IF ( ( ABS(A(p,p)) .LT. (EPSLN*ABS(A(p-1,p-1))) ) .OR. + $ ( ABS(A(p,p)) .LT. SFMIN ) ) GO TO 3402 + NR = NR + 1 + 3401 CONTINUE + 3402 CONTINUE +* + ELSE +* .. RRQR not authorized to determine numerical rank except in the +* obvious case of zero pivots. +* .. inspect R for exact zeros on the diagonal; +* R(i,i)=0 => R(i:N,i:N)=0. + NR = 1 + DO 3501 p = 2, N + IF ( ABS(A(p,p)) .EQ. ZERO ) GO TO 3502 + NR = NR + 1 + 3501 CONTINUE + 3502 CONTINUE +* + IF ( CONDA ) THEN +* Estimate the scaled condition number of A. Use the fact that it is +* the same as the scaled condition number of R. +* .. V is used as workspace + CALL ZLACPY( 'U', N, N, A, LDA, V, LDV ) +* Only the leading NR x NR submatrix of the triangular factor +* is considered. Only if NR=N will this give a reliable error +* bound. However, even for NR < N, this can be used on an +* expert level and obtain useful information in the sense of +* perturbation theory. + DO 3053 p = 1, NR + RTMP = DZNRM2( p, V(1,p), 1 ) + CALL ZDSCAL( p, ONE/RTMP, V(1,p), 1 ) + 3053 CONTINUE + IF ( .NOT. ( LSVEC .OR. RSVEC ) ) THEN + CALL ZPOCON( 'U', NR, V, LDV, ONE, RTMP, + $ CWORK, RWORK, IERR ) + ELSE + CALL ZPOCON( 'U', NR, V, LDV, ONE, RTMP, + $ CWORK(N+1), RWORK, IERR ) + END IF + SCONDA = ONE / SQRT(RTMP) +* For NR=N, SCONDA is an estimate of SQRT(||(R^* * R)^(-1)||_1), +* N^(-1/4) * SCONDA <= ||R^(-1)||_2 <= N^(1/4) * SCONDA +* See the reference [1] for more details. + END IF +* + ENDIF +* + IF ( WNTUR ) THEN + N1 = NR + ELSE IF ( WNTUS .OR. WNTUF) THEN + N1 = N + ELSE IF ( WNTUA ) THEN + N1 = M + END IF +* + IF ( .NOT. ( RSVEC .OR. LSVEC ) ) THEN +*....................................................................... +* .. only the singular values are requested +*....................................................................... + IF ( RTRANS ) THEN +* +* .. compute the singular values of R**H = [A](1:NR,1:N)**H +* .. set the lower triangle of [A] to [A](1:NR,1:N)**H and +* the upper triangle of [A] to zero. + DO 1146 p = 1, MIN( N, NR ) + A(p,p) = CONJG(A(p,p)) + DO 1147 q = p + 1, N + A(q,p) = CONJG(A(p,q)) + IF ( q .LE. NR ) A(p,q) = CZERO + 1147 CONTINUE + 1146 CONTINUE +* + CALL ZGESVD( 'N', 'N', N, NR, A, LDA, S, U, LDU, + $ V, LDV, CWORK, LCWORK, RWORK, INFO ) +* + ELSE +* +* .. compute the singular values of R = [A](1:NR,1:N) +* + IF ( NR .GT. 1 ) + $ CALL ZLASET( 'L', NR-1,NR-1, CZERO,CZERO, A(2,1), LDA ) + CALL ZGESVD( 'N', 'N', NR, N, A, LDA, S, U, LDU, + $ V, LDV, CWORK, LCWORK, RWORK, INFO ) +* + END IF +* + ELSE IF ( LSVEC .AND. ( .NOT. RSVEC) ) THEN +*....................................................................... +* .. the singular values and the left singular vectors requested +*......................................................................."""""""" + IF ( RTRANS ) THEN +* .. apply ZGESVD to R**H +* .. copy R**H into [U] and overwrite [U] with the right singular +* vectors of R + DO 1192 p = 1, NR + DO 1193 q = p, N + U(q,p) = CONJG(A(p,q)) + 1193 CONTINUE + 1192 CONTINUE + IF ( NR .GT. 1 ) + $ CALL ZLASET( 'U', NR-1,NR-1, CZERO,CZERO, U(1,2), LDU ) +* .. the left singular vectors not computed, the NR right singular +* vectors overwrite [U](1:NR,1:NR) as conjugate transposed. These +* will be pre-multiplied by Q to build the left singular vectors of A. + CALL ZGESVD( 'N', 'O', N, NR, U, LDU, S, U, LDU, + $ U, LDU, CWORK(N+1), LCWORK-N, RWORK, INFO ) +* + DO 1119 p = 1, NR + U(p,p) = CONJG(U(p,p)) + DO 1120 q = p + 1, NR + CTMP = CONJG(U(q,p)) + U(q,p) = CONJG(U(p,q)) + U(p,q) = CTMP + 1120 CONTINUE + 1119 CONTINUE +* + ELSE +* .. apply ZGESVD to R +* .. copy R into [U] and overwrite [U] with the left singular vectors + CALL ZLACPY( 'U', NR, N, A, LDA, U, LDU ) + IF ( NR .GT. 1 ) + $ CALL ZLASET( 'L', NR-1, NR-1, CZERO, CZERO, U(2,1), LDU ) +* .. the right singular vectors not computed, the NR left singular +* vectors overwrite [U](1:NR,1:NR) + CALL ZGESVD( 'O', 'N', NR, N, U, LDU, S, U, LDU, + $ V, LDV, CWORK(N+1), LCWORK-N, RWORK, INFO ) +* .. now [U](1:NR,1:NR) contains the NR left singular vectors of +* R. These will be pre-multiplied by Q to build the left singular +* vectors of A. + END IF +* +* .. assemble the left singular vector matrix U of dimensions +* (M x NR) or (M x N) or (M x M). + IF ( ( NR .LT. M ) .AND. ( .NOT.WNTUF ) ) THEN + CALL ZLASET('A', M-NR, NR, CZERO, CZERO, U(NR+1,1), LDU) + IF ( NR .LT. N1 ) THEN + CALL ZLASET( 'A',NR,N1-NR,CZERO,CZERO,U(1,NR+1), LDU ) + CALL ZLASET( 'A',M-NR,N1-NR,CZERO,CONE, + $ U(NR+1,NR+1), LDU ) + END IF + END IF +* +* The Q matrix from the first QRF is built into the left singular +* vectors matrix U. +* + IF ( .NOT.WNTUF ) + $ CALL ZUNMQR( 'L', 'N', M, N1, N, A, LDA, CWORK, U, + $ LDU, CWORK(N+1), LCWORK-N, IERR ) + IF ( ROWPRM .AND. .NOT.WNTUF ) + $ CALL ZLASWP( N1, U, LDU, 1, M-1, IWORK(N+1), -1 ) +* + ELSE IF ( RSVEC .AND. ( .NOT. LSVEC ) ) THEN +*....................................................................... +* .. the singular values and the right singular vectors requested +*....................................................................... + IF ( RTRANS ) THEN +* .. apply ZGESVD to R**H +* .. copy R**H into V and overwrite V with the left singular vectors + DO 1165 p = 1, NR + DO 1166 q = p, N + V(q,p) = CONJG(A(p,q)) + 1166 CONTINUE + 1165 CONTINUE + IF ( NR .GT. 1 ) + $ CALL ZLASET( 'U', NR-1,NR-1, CZERO,CZERO, V(1,2), LDV ) +* .. the left singular vectors of R**H overwrite V, the right singular +* vectors not computed + IF ( WNTVR .OR. ( NR .EQ. N ) ) THEN + CALL ZGESVD( 'O', 'N', N, NR, V, LDV, S, U, LDU, + $ U, LDU, CWORK(N+1), LCWORK-N, RWORK, INFO ) +* + DO 1121 p = 1, NR + V(p,p) = CONJG(V(p,p)) + DO 1122 q = p + 1, NR + CTMP = CONJG(V(q,p)) + V(q,p) = CONJG(V(p,q)) + V(p,q) = CTMP + 1122 CONTINUE + 1121 CONTINUE +* + IF ( NR .LT. N ) THEN + DO 1103 p = 1, NR + DO 1104 q = NR + 1, N + V(p,q) = CONJG(V(q,p)) + 1104 CONTINUE + 1103 CONTINUE + END IF + CALL ZLAPMT( .FALSE., NR, N, V, LDV, IWORK ) + ELSE +* .. need all N right singular vectors and NR < N +* [!] This is simple implementation that augments [V](1:N,1:NR) +* by padding a zero block. In the case NR << N, a more efficient +* way is to first use the QR factorization. For more details +* how to implement this, see the " FULL SVD " branch. + CALL ZLASET('G', N, N-NR, CZERO, CZERO, V(1,NR+1), LDV) + CALL ZGESVD( 'O', 'N', N, N, V, LDV, S, U, LDU, + $ U, LDU, CWORK(N+1), LCWORK-N, RWORK, INFO ) +* + DO 1123 p = 1, N + V(p,p) = CONJG(V(p,p)) + DO 1124 q = p + 1, N + CTMP = CONJG(V(q,p)) + V(q,p) = CONJG(V(p,q)) + V(p,q) = CTMP + 1124 CONTINUE + 1123 CONTINUE + CALL ZLAPMT( .FALSE., N, N, V, LDV, IWORK ) + END IF +* + ELSE +* .. aply ZGESVD to R +* .. copy R into V and overwrite V with the right singular vectors + CALL ZLACPY( 'U', NR, N, A, LDA, V, LDV ) + IF ( NR .GT. 1 ) + $ CALL ZLASET( 'L', NR-1, NR-1, CZERO, CZERO, V(2,1), LDV ) +* .. the right singular vectors overwrite V, the NR left singular +* vectors stored in U(1:NR,1:NR) + IF ( WNTVR .OR. ( NR .EQ. N ) ) THEN + CALL ZGESVD( 'N', 'O', NR, N, V, LDV, S, U, LDU, + $ V, LDV, CWORK(N+1), LCWORK-N, RWORK, INFO ) + CALL ZLAPMT( .FALSE., NR, N, V, LDV, IWORK ) +* .. now [V](1:NR,1:N) contains V(1:N,1:NR)**H + ELSE +* .. need all N right singular vectors and NR < N +* [!] This is simple implementation that augments [V](1:NR,1:N) +* by padding a zero block. In the case NR << N, a more efficient +* way is to first use the LQ factorization. For more details +* how to implement this, see the " FULL SVD " branch. + CALL ZLASET('G', N-NR, N, CZERO,CZERO, V(NR+1,1), LDV) + CALL ZGESVD( 'N', 'O', N, N, V, LDV, S, U, LDU, + $ V, LDV, CWORK(N+1), LCWORK-N, RWORK, INFO ) + CALL ZLAPMT( .FALSE., N, N, V, LDV, IWORK ) + END IF +* .. now [V] contains the adjoint of the matrix of the right singular +* vectors of A. + END IF +* + ELSE +*....................................................................... +* .. FULL SVD requested +*....................................................................... + IF ( RTRANS ) THEN +* +* .. apply ZGESVD to R**H [[this option is left for R&D&T]] +* + IF ( WNTVR .OR. ( NR .EQ. N ) ) THEN +* .. copy R**H into [V] and overwrite [V] with the left singular +* vectors of R**H + DO 1168 p = 1, NR + DO 1169 q = p, N + V(q,p) = CONJG(A(p,q)) + 1169 CONTINUE + 1168 CONTINUE + IF ( NR .GT. 1 ) + $ CALL ZLASET( 'U', NR-1,NR-1, CZERO,CZERO, V(1,2), LDV ) +* +* .. the left singular vectors of R**H overwrite [V], the NR right +* singular vectors of R**H stored in [U](1:NR,1:NR) as conjugate +* transposed + CALL ZGESVD( 'O', 'A', N, NR, V, LDV, S, V, LDV, + $ U, LDU, CWORK(N+1), LCWORK-N, RWORK, INFO ) +* .. assemble V + DO 1115 p = 1, NR + V(p,p) = CONJG(V(p,p)) + DO 1116 q = p + 1, NR + CTMP = CONJG(V(q,p)) + V(q,p) = CONJG(V(p,q)) + V(p,q) = CTMP + 1116 CONTINUE + 1115 CONTINUE + IF ( NR .LT. N ) THEN + DO 1101 p = 1, NR + DO 1102 q = NR+1, N + V(p,q) = CONJG(V(q,p)) + 1102 CONTINUE + 1101 CONTINUE + END IF + CALL ZLAPMT( .FALSE., NR, N, V, LDV, IWORK ) +* + DO 1117 p = 1, NR + U(p,p) = CONJG(U(p,p)) + DO 1118 q = p + 1, NR + CTMP = CONJG(U(q,p)) + U(q,p) = CONJG(U(p,q)) + U(p,q) = CTMP + 1118 CONTINUE + 1117 CONTINUE +* + IF ( ( NR .LT. M ) .AND. .NOT.(WNTUF)) THEN + CALL ZLASET('A', M-NR,NR, CZERO,CZERO, U(NR+1,1), LDU) + IF ( NR .LT. N1 ) THEN + CALL ZLASET('A',NR,N1-NR,CZERO,CZERO,U(1,NR+1),LDU) + CALL ZLASET( 'A',M-NR,N1-NR,CZERO,CONE, + $ U(NR+1,NR+1), LDU ) + END IF + END IF +* + ELSE +* .. need all N right singular vectors and NR < N +* .. copy R**H into [V] and overwrite [V] with the left singular +* vectors of R**H +* [[The optimal ratio N/NR for using QRF instead of padding +* with zeros. Here hard coded to 2; it must be at least +* two due to work space constraints.]] +* OPTRATIO = ILAENV(6, 'ZGESVD', 'S' // 'O', NR,N,0,0) +* OPTRATIO = MAX( OPTRATIO, 2 ) + OPTRATIO = 2 + IF ( OPTRATIO*NR .GT. N ) THEN + DO 1198 p = 1, NR + DO 1199 q = p, N + V(q,p) = CONJG(A(p,q)) + 1199 CONTINUE + 1198 CONTINUE + IF ( NR .GT. 1 ) + $ CALL ZLASET('U',NR-1,NR-1, CZERO,CZERO, V(1,2),LDV) +* + CALL ZLASET('A',N,N-NR,CZERO,CZERO,V(1,NR+1),LDV) + CALL ZGESVD( 'O', 'A', N, N, V, LDV, S, V, LDV, + $ U, LDU, CWORK(N+1), LCWORK-N, RWORK, INFO ) +* + DO 1113 p = 1, N + V(p,p) = CONJG(V(p,p)) + DO 1114 q = p + 1, N + CTMP = CONJG(V(q,p)) + V(q,p) = CONJG(V(p,q)) + V(p,q) = CTMP + 1114 CONTINUE + 1113 CONTINUE + CALL ZLAPMT( .FALSE., N, N, V, LDV, IWORK ) +* .. assemble the left singular vector matrix U of dimensions +* (M x N1), i.e. (M x N) or (M x M). +* + DO 1111 p = 1, N + U(p,p) = CONJG(U(p,p)) + DO 1112 q = p + 1, N + CTMP = CONJG(U(q,p)) + U(q,p) = CONJG(U(p,q)) + U(p,q) = CTMP + 1112 CONTINUE + 1111 CONTINUE +* + IF ( ( N .LT. M ) .AND. .NOT.(WNTUF)) THEN + CALL ZLASET('A',M-N,N,CZERO,CZERO,U(N+1,1),LDU) + IF ( N .LT. N1 ) THEN + CALL ZLASET('A',N,N1-N,CZERO,CZERO,U(1,N+1),LDU) + CALL ZLASET('A',M-N,N1-N,CZERO,CONE, + $ U(N+1,N+1), LDU ) + END IF + END IF + ELSE +* .. copy R**H into [U] and overwrite [U] with the right +* singular vectors of R + DO 1196 p = 1, NR + DO 1197 q = p, N + U(q,NR+p) = CONJG(A(p,q)) + 1197 CONTINUE + 1196 CONTINUE + IF ( NR .GT. 1 ) + $ CALL ZLASET('U',NR-1,NR-1,CZERO,CZERO,U(1,NR+2),LDU) + CALL ZGEQRF( N, NR, U(1,NR+1), LDU, CWORK(N+1), + $ CWORK(N+NR+1), LCWORK-N-NR, IERR ) + DO 1143 p = 1, NR + DO 1144 q = 1, N + V(q,p) = CONJG(U(p,NR+q)) + 1144 CONTINUE + 1143 CONTINUE + CALL ZLASET('U',NR-1,NR-1,CZERO,CZERO,V(1,2),LDV) + CALL ZGESVD( 'S', 'O', NR, NR, V, LDV, S, U, LDU, + $ V,LDV, CWORK(N+NR+1),LCWORK-N-NR,RWORK, INFO ) + CALL ZLASET('A',N-NR,NR,CZERO,CZERO,V(NR+1,1),LDV) + CALL ZLASET('A',NR,N-NR,CZERO,CZERO,V(1,NR+1),LDV) + CALL ZLASET('A',N-NR,N-NR,CZERO,CONE,V(NR+1,NR+1),LDV) + CALL ZUNMQR('R','C', N, N, NR, U(1,NR+1), LDU, + $ CWORK(N+1),V,LDV,CWORK(N+NR+1),LCWORK-N-NR,IERR) + CALL ZLAPMT( .FALSE., N, N, V, LDV, IWORK ) +* .. assemble the left singular vector matrix U of dimensions +* (M x NR) or (M x N) or (M x M). + IF ( ( NR .LT. M ) .AND. .NOT.(WNTUF)) THEN + CALL ZLASET('A',M-NR,NR,CZERO,CZERO,U(NR+1,1),LDU) + IF ( NR .LT. N1 ) THEN + CALL ZLASET('A',NR,N1-NR,CZERO,CZERO,U(1,NR+1),LDU) + CALL ZLASET( 'A',M-NR,N1-NR,CZERO,CONE, + $ U(NR+1,NR+1),LDU) + END IF + END IF + END IF + END IF +* + ELSE +* +* .. apply ZGESVD to R [[this is the recommended option]] +* + IF ( WNTVR .OR. ( NR .EQ. N ) ) THEN +* .. copy R into [V] and overwrite V with the right singular vectors + CALL ZLACPY( 'U', NR, N, A, LDA, V, LDV ) + IF ( NR .GT. 1 ) + $ CALL ZLASET( 'L', NR-1,NR-1, CZERO,CZERO, V(2,1), LDV ) +* .. the right singular vectors of R overwrite [V], the NR left +* singular vectors of R stored in [U](1:NR,1:NR) + CALL ZGESVD( 'S', 'O', NR, N, V, LDV, S, U, LDU, + $ V, LDV, CWORK(N+1), LCWORK-N, RWORK, INFO ) + CALL ZLAPMT( .FALSE., NR, N, V, LDV, IWORK ) +* .. now [V](1:NR,1:N) contains V(1:N,1:NR)**H +* .. assemble the left singular vector matrix U of dimensions +* (M x NR) or (M x N) or (M x M). + IF ( ( NR .LT. M ) .AND. .NOT.(WNTUF)) THEN + CALL ZLASET('A', M-NR,NR, CZERO,CZERO, U(NR+1,1), LDU) + IF ( NR .LT. N1 ) THEN + CALL ZLASET('A',NR,N1-NR,CZERO,CZERO,U(1,NR+1),LDU) + CALL ZLASET( 'A',M-NR,N1-NR,CZERO,CONE, + $ U(NR+1,NR+1), LDU ) + END IF + END IF +* + ELSE +* .. need all N right singular vectors and NR < N +* .. the requested number of the left singular vectors +* is then N1 (N or M) +* [[The optimal ratio N/NR for using LQ instead of padding +* with zeros. Here hard coded to 2; it must be at least +* two due to work space constraints.]] +* OPTRATIO = ILAENV(6, 'ZGESVD', 'S' // 'O', NR,N,0,0) +* OPTRATIO = MAX( OPTRATIO, 2 ) + OPTRATIO = 2 + IF ( OPTRATIO * NR .GT. N ) THEN + CALL ZLACPY( 'U', NR, N, A, LDA, V, LDV ) + IF ( NR .GT. 1 ) + $ CALL ZLASET('L', NR-1,NR-1, CZERO,CZERO, V(2,1),LDV) +* .. the right singular vectors of R overwrite [V], the NR left +* singular vectors of R stored in [U](1:NR,1:NR) + CALL ZLASET('A', N-NR,N, CZERO,CZERO, V(NR+1,1),LDV) + CALL ZGESVD( 'S', 'O', N, N, V, LDV, S, U, LDU, + $ V, LDV, CWORK(N+1), LCWORK-N, RWORK, INFO ) + CALL ZLAPMT( .FALSE., N, N, V, LDV, IWORK ) +* .. now [V] contains the adjoint of the matrix of the right +* singular vectors of A. The leading N left singular vectors +* are in [U](1:N,1:N) +* .. assemble the left singular vector matrix U of dimensions +* (M x N1), i.e. (M x N) or (M x M). + IF ( ( N .LT. M ) .AND. .NOT.(WNTUF)) THEN + CALL ZLASET('A',M-N,N,CZERO,CZERO,U(N+1,1),LDU) + IF ( N .LT. N1 ) THEN + CALL ZLASET('A',N,N1-N,CZERO,CZERO,U(1,N+1),LDU) + CALL ZLASET( 'A',M-N,N1-N,CZERO,CONE, + $ U(N+1,N+1), LDU ) + END IF + END IF + ELSE + CALL ZLACPY( 'U', NR, N, A, LDA, U(NR+1,1), LDU ) + IF ( NR .GT. 1 ) + $ CALL ZLASET('L',NR-1,NR-1,CZERO,CZERO,U(NR+2,1),LDU) + CALL ZGELQF( NR, N, U(NR+1,1), LDU, CWORK(N+1), + $ CWORK(N+NR+1), LCWORK-N-NR, IERR ) + CALL ZLACPY('L',NR,NR,U(NR+1,1),LDU,V,LDV) + IF ( NR .GT. 1 ) + $ CALL ZLASET('U',NR-1,NR-1,CZERO,CZERO,V(1,2),LDV) + CALL ZGESVD( 'S', 'O', NR, NR, V, LDV, S, U, LDU, + $ V, LDV, CWORK(N+NR+1), LCWORK-N-NR, RWORK, INFO ) + CALL ZLASET('A',N-NR,NR,CZERO,CZERO,V(NR+1,1),LDV) + CALL ZLASET('A',NR,N-NR,CZERO,CZERO,V(1,NR+1),LDV) + CALL ZLASET('A',N-NR,N-NR,CZERO,CONE,V(NR+1,NR+1),LDV) + CALL ZUNMLQ('R','N',N,N,NR,U(NR+1,1),LDU,CWORK(N+1), + $ V, LDV, CWORK(N+NR+1),LCWORK-N-NR,IERR) + CALL ZLAPMT( .FALSE., N, N, V, LDV, IWORK ) +* .. assemble the left singular vector matrix U of dimensions +* (M x NR) or (M x N) or (M x M). + IF ( ( NR .LT. M ) .AND. .NOT.(WNTUF)) THEN + CALL ZLASET('A',M-NR,NR,CZERO,CZERO,U(NR+1,1),LDU) + IF ( NR .LT. N1 ) THEN + CALL ZLASET('A',NR,N1-NR,CZERO,CZERO,U(1,NR+1),LDU) + CALL ZLASET( 'A',M-NR,N1-NR,CZERO,CONE, + $ U(NR+1,NR+1), LDU ) + END IF + END IF + END IF + END IF +* .. end of the "R**H or R" branch + END IF +* +* The Q matrix from the first QRF is built into the left singular +* vectors matrix U. +* + IF ( .NOT. WNTUF ) + $ CALL ZUNMQR( 'L', 'N', M, N1, N, A, LDA, CWORK, U, + $ LDU, CWORK(N+1), LCWORK-N, IERR ) + IF ( ROWPRM .AND. .NOT.WNTUF ) + $ CALL ZLASWP( N1, U, LDU, 1, M-1, IWORK(N+1), -1 ) +* +* ... end of the "full SVD" branch + END IF +* +* Check whether some singular values are returned as zeros, e.g. +* due to underflow, and update the numerical rank. + p = NR + DO 4001 q = p, 1, -1 + IF ( S(q) .GT. ZERO ) GO TO 4002 + NR = NR - 1 + 4001 CONTINUE + 4002 CONTINUE +* +* .. if numerical rank deficiency is detected, the truncated +* singular values are set to zero. + IF ( NR .LT. N ) CALL DLASET( 'G', N-NR,1, ZERO,ZERO, S(NR+1), N ) +* .. undo scaling; this may cause overflow in the largest singular +* values. + IF ( ASCALED ) + $ CALL DLASCL( 'G',0,0, ONE,SQRT(DBLE(M)), NR,1, S, N, IERR ) + IF ( CONDA ) RWORK(1) = SCONDA + RWORK(2) = p - NR +* .. p-NR is the number of singular values that are computed as +* exact zeros in ZGESVD() applied to the (possibly truncated) +* full row rank triangular (trapezoidal) factor of A. + NUMRANK = NR +* + RETURN +* +* End of ZGESVDQ +* + END diff --git a/lapack-netlib/SRC/zgesvdx.f b/lapack-netlib/SRC/zgesvdx.f index 56b5cd4f2..12b20c0ba 100644 --- a/lapack-netlib/SRC/zgesvdx.f +++ b/lapack-netlib/SRC/zgesvdx.f @@ -18,7 +18,7 @@ * Definition: * =========== * -* SUBROUTINE CGESVDX( JOBU, JOBVT, RANGE, M, N, A, LDA, VL, VU, +* SUBROUTINE ZGESVDX( JOBU, JOBVT, RANGE, M, N, A, LDA, VL, VU, * $ IL, IU, NS, S, U, LDU, VT, LDVT, WORK, * $ LWORK, RWORK, IWORK, INFO ) * diff --git a/lapack-netlib/SRC/zgesvj.f b/lapack-netlib/SRC/zgesvj.f index fd32f92d8..7c25a3495 100644 --- a/lapack-netlib/SRC/zgesvj.f +++ b/lapack-netlib/SRC/zgesvj.f @@ -89,12 +89,12 @@ *> Specifies whether to compute the right singular vectors, that *> is, the matrix V: *> = 'V' or 'J': the matrix V is computed and returned in the array V -*> = 'A' : the Jacobi rotations are applied to the MV-by-N +*> = 'A': the Jacobi rotations are applied to the MV-by-N *> array V. In other words, the right singular vector *> matrix V is not computed explicitly; instead it is *> applied to an MV-by-N matrix initially stored in the *> first MV rows of V. -*> = 'N' : the matrix V is not computed and the array V is not +*> = 'N': the matrix V is not computed and the array V is not *> referenced *> \endverbatim *> @@ -116,8 +116,8 @@ *> A is COMPLEX*16 array, dimension (LDA,N) *> On entry, the M-by-N matrix A. *> On exit, -*> If JOBU .EQ. 'U' .OR. JOBU .EQ. 'C': -*> If INFO .EQ. 0 : +*> If JOBU = 'U' .OR. JOBU = 'C': +*> If INFO = 0 : *> RANKA orthonormal columns of U are returned in the *> leading RANKA columns of the array A. Here RANKA <= N *> is the number of computed singular values of A that are @@ -127,9 +127,9 @@ *> in the array RWORK as RANKA=NINT(RWORK(2)). Also see the *> descriptions of SVA and RWORK. The computed columns of U *> are mutually numerically orthogonal up to approximately -*> TOL=SQRT(M)*EPS (default); or TOL=CTOL*EPS (JOBU.EQ.'C'), +*> TOL=SQRT(M)*EPS (default); or TOL=CTOL*EPS (JOBU = 'C'), *> see the description of JOBU. -*> If INFO .GT. 0, +*> If INFO > 0, *> the procedure ZGESVJ did not converge in the given number *> of iterations (sweeps). In that case, the computed *> columns of U may not be orthogonal up to TOL. The output @@ -137,8 +137,8 @@ *> values in SVA(1:N)) and V is still a decomposition of the *> input matrix A in the sense that the residual *> || A - SCALE * U * SIGMA * V^* ||_2 / ||A||_2 is small. -*> If JOBU .EQ. 'N': -*> If INFO .EQ. 0 : +*> If JOBU = 'N': +*> If INFO = 0 : *> Note that the left singular vectors are 'for free' in the *> one-sided Jacobi SVD algorithm. However, if only the *> singular values are needed, the level of numerical @@ -147,7 +147,7 @@ *> numerically orthogonal up to approximately M*EPS. Thus, *> on exit, A contains the columns of U scaled with the *> corresponding singular values. -*> If INFO .GT. 0 : +*> If INFO > 0: *> the procedure ZGESVJ did not converge in the given number *> of iterations (sweeps). *> \endverbatim @@ -162,9 +162,9 @@ *> \verbatim *> SVA is DOUBLE PRECISION array, dimension (N) *> On exit, -*> If INFO .EQ. 0 : +*> If INFO = 0 : *> depending on the value SCALE = RWORK(1), we have: -*> If SCALE .EQ. ONE: +*> If SCALE = ONE: *> SVA(1:N) contains the computed singular values of A. *> During the computation SVA contains the Euclidean column *> norms of the iterated matrices in the array A. @@ -173,7 +173,7 @@ *> factored representation is due to the fact that some of the *> singular values of A might underflow or overflow. *> -*> If INFO .GT. 0 : +*> If INFO > 0: *> the procedure ZGESVJ did not converge in the given number of *> iterations (sweeps) and SCALE*SVA(1:N) may not be accurate. *> \endverbatim @@ -181,7 +181,7 @@ *> \param[in] MV *> \verbatim *> MV is INTEGER -*> If JOBV .EQ. 'A', then the product of Jacobi rotations in ZGESVJ +*> If JOBV = 'A', then the product of Jacobi rotations in ZGESVJ *> is applied to the first MV rows of V. See the description of JOBV. *> \endverbatim *> @@ -199,16 +199,16 @@ *> \param[in] LDV *> \verbatim *> LDV is INTEGER -*> The leading dimension of the array V, LDV .GE. 1. -*> If JOBV .EQ. 'V', then LDV .GE. max(1,N). -*> If JOBV .EQ. 'A', then LDV .GE. max(1,MV) . +*> The leading dimension of the array V, LDV >= 1. +*> If JOBV = 'V', then LDV >= max(1,N). +*> If JOBV = 'A', then LDV >= max(1,MV) . *> \endverbatim *> *> \param[in,out] CWORK *> \verbatim *> CWORK is COMPLEX*16 array, dimension (max(1,LWORK)) *> Used as workspace. -*> If on entry LWORK .EQ. -1, then a workspace query is assumed and +*> If on entry LWORK = -1, then a workspace query is assumed and *> no computation is done; CWORK(1) is set to the minial (and optimal) *> length of CWORK. *> \endverbatim @@ -223,7 +223,7 @@ *> \verbatim *> RWORK is DOUBLE PRECISION array, dimension (max(6,LRWORK)) *> On entry, -*> If JOBU .EQ. 'C' : +*> If JOBU = 'C' : *> RWORK(1) = CTOL, where CTOL defines the threshold for convergence. *> The process stops if all columns of A are mutually *> orthogonal up to CTOL*EPS, EPS=DLAMCH('E'). @@ -243,11 +243,11 @@ *> RWORK(5) = max_{i.NE.j} |COS(A(:,i),A(:,j))| in the last sweep. *> This is useful information in cases when ZGESVJ did *> not converge, as it can be used to estimate whether -*> the output is stil useful and for post festum analysis. +*> the output is still useful and for post festum analysis. *> RWORK(6) = the largest absolute value over all sines of the *> Jacobi rotation angles in the last sweep. It can be *> useful for a post festum analysis. -*> If on entry LRWORK .EQ. -1, then a workspace query is assumed and +*> If on entry LRWORK = -1, then a workspace query is assumed and *> no computation is done; RWORK(1) is set to the minial (and optimal) *> length of RWORK. *> \endverbatim @@ -261,9 +261,9 @@ *> \param[out] INFO *> \verbatim *> INFO is INTEGER -*> = 0 : successful exit. -*> < 0 : if INFO = -i, then the i-th argument had an illegal value -*> > 0 : ZGESVJ did not converge in the maximal allowed number +*> = 0: successful exit. +*> < 0: if INFO = -i, then the i-th argument had an illegal value +*> > 0: ZGESVJ did not converge in the maximal allowed number *> (NSWEEP=30) of sweeps. The output may still be useful. *> See the description of RWORK. *> \endverbatim diff --git a/lapack-netlib/SRC/zgesvxx.f b/lapack-netlib/SRC/zgesvxx.f index c3727b70e..60bb71cd3 100644 --- a/lapack-netlib/SRC/zgesvxx.f +++ b/lapack-netlib/SRC/zgesvxx.f @@ -411,7 +411,7 @@ *> information as described below. There currently are up to three *> pieces of information returned for each right-hand side. If *> componentwise accuracy is not requested (PARAMS(3) = 0.0), then -*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS .LT. 3, then at most +*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS < 3, then at most *> the first (:,N_ERR_BNDS) entries are returned. *> *> The first index in ERR_BNDS_COMP(i,:) corresponds to the ith @@ -447,14 +447,14 @@ *> \param[in] NPARAMS *> \verbatim *> NPARAMS is INTEGER -*> Specifies the number of parameters set in PARAMS. If .LE. 0, the +*> Specifies the number of parameters set in PARAMS. If <= 0, the *> PARAMS array is never referenced and default values are used. *> \endverbatim *> *> \param[in,out] PARAMS *> \verbatim *> PARAMS is DOUBLE PRECISION array, dimension NPARAMS -*> Specifies algorithm parameters. If an entry is .LT. 0.0, then +*> Specifies algorithm parameters. If an entry is < 0.0, then *> that entry will be filled with default value used for that *> parameter. Only positions up to NPARAMS are accessed; defaults *> are used for higher-numbered parameters. @@ -462,9 +462,9 @@ *> PARAMS(LA_LINRX_ITREF_I = 1) : Whether to perform iterative *> refinement or not. *> Default: 1.0D+0 -*> = 0.0 : No refinement is performed, and no error bounds are +*> = 0.0: No refinement is performed, and no error bounds are *> computed. -*> = 1.0 : Use the extra-precise refinement algorithm. +*> = 1.0: Use the extra-precise refinement algorithm. *> (other values are reserved for future use) *> *> PARAMS(LA_LINRX_ITHRESH_I = 2) : Maximum number of residual diff --git a/lapack-netlib/SRC/zgetsls.f b/lapack-netlib/SRC/zgetsls.f index 5ce11efef..11233785b 100644 --- a/lapack-netlib/SRC/zgetsls.f +++ b/lapack-netlib/SRC/zgetsls.f @@ -1,3 +1,5 @@ +*> \brief \b ZGETSLS +* * Definition: * =========== * @@ -259,7 +261,7 @@ TSZM = INT( TQ( 1 ) ) LWM = INT( WORKQ( 1 ) ) CALL ZGEMLQ( 'L', TRANS, N, NRHS, M, A, LDA, TQ, - $ TSZO, B, LDB, WORKQ, -1, INFO2 ) + $ TSZM, B, LDB, WORKQ, -1, INFO2 ) LWM = MAX( LWM, INT( WORKQ( 1 ) ) ) WSIZEO = TSZO + LWO WSIZEM = TSZM + LWM diff --git a/lapack-netlib/SRC/zggesx.f b/lapack-netlib/SRC/zggesx.f index 661523465..c546e61f1 100644 --- a/lapack-netlib/SRC/zggesx.f +++ b/lapack-netlib/SRC/zggesx.f @@ -120,10 +120,10 @@ *> \verbatim *> SENSE is CHARACTER*1 *> Determines which reciprocal condition numbers are computed. -*> = 'N' : None are computed; -*> = 'E' : Computed for average of selected eigenvalues only; -*> = 'V' : Computed for selected deflating subspaces only; -*> = 'B' : Computed for both. +*> = 'N': None are computed; +*> = 'E': Computed for average of selected eigenvalues only; +*> = 'V': Computed for selected deflating subspaces only; +*> = 'B': Computed for both. *> If SENSE = 'E', 'V', or 'B', SORT must equal 'S'. *> \endverbatim *> diff --git a/lapack-netlib/SRC/zgsvj0.f b/lapack-netlib/SRC/zgsvj0.f index c4a6bd38a..ab7e31725 100644 --- a/lapack-netlib/SRC/zgsvj0.f +++ b/lapack-netlib/SRC/zgsvj0.f @@ -117,7 +117,7 @@ *> \param[in] MV *> \verbatim *> MV is INTEGER -*> If JOBV .EQ. 'A', then MV rows of V are post-multipled by a +*> If JOBV = 'A', then MV rows of V are post-multipled by a *> sequence of Jacobi rotations. *> If JOBV = 'N', then MV is not referenced. *> \endverbatim @@ -125,9 +125,9 @@ *> \param[in,out] V *> \verbatim *> V is COMPLEX*16 array, dimension (LDV,N) -*> If JOBV .EQ. 'V' then N rows of V are post-multipled by a +*> If JOBV = 'V' then N rows of V are post-multipled by a *> sequence of Jacobi rotations. -*> If JOBV .EQ. 'A' then MV rows of V are post-multipled by a +*> If JOBV = 'A' then MV rows of V are post-multipled by a *> sequence of Jacobi rotations. *> If JOBV = 'N', then V is not referenced. *> \endverbatim @@ -136,8 +136,8 @@ *> \verbatim *> LDV is INTEGER *> The leading dimension of the array V, LDV >= 1. -*> If JOBV = 'V', LDV .GE. N. -*> If JOBV = 'A', LDV .GE. MV. +*> If JOBV = 'V', LDV >= N. +*> If JOBV = 'A', LDV >= MV. *> \endverbatim *> *> \param[in] EPS @@ -157,7 +157,7 @@ *> TOL is DOUBLE PRECISION *> TOL is the threshold for Jacobi rotations. For a pair *> A(:,p), A(:,q) of pivot columns, the Jacobi rotation is -*> applied only if ABS(COS(angle(A(:,p),A(:,q)))) .GT. TOL. +*> applied only if ABS(COS(angle(A(:,p),A(:,q)))) > TOL. *> \endverbatim *> *> \param[in] NSWEEP @@ -175,14 +175,14 @@ *> \param[in] LWORK *> \verbatim *> LWORK is INTEGER -*> LWORK is the dimension of WORK. LWORK .GE. M. +*> LWORK is the dimension of WORK. LWORK >= M. *> \endverbatim *> *> \param[out] INFO *> \verbatim *> INFO is INTEGER -*> = 0 : successful exit. -*> < 0 : if INFO = -i, then the i-th argument had an illegal value +*> = 0: successful exit. +*> < 0: if INFO = -i, then the i-th argument had an illegal value *> \endverbatim * * Authors: diff --git a/lapack-netlib/SRC/zgsvj1.f b/lapack-netlib/SRC/zgsvj1.f index 91e39ca8a..f0a23034b 100644 --- a/lapack-netlib/SRC/zgsvj1.f +++ b/lapack-netlib/SRC/zgsvj1.f @@ -61,7 +61,7 @@ *> In terms of the columns of A, the first N1 columns are rotated 'against' *> the remaining N-N1 columns, trying to increase the angle between the *> corresponding subspaces. The off-diagonal block is N1-by(N-N1) and it is -*> tiled using quadratic tiles of side KBL. Here, KBL is a tunning parmeter. +*> tiled using quadratic tiles of side KBL. Here, KBL is a tunning parameter. *> The number of sweeps is given in NSWEEP and the orthogonality threshold *> is given in TOL. *> \endverbatim @@ -147,7 +147,7 @@ *> \param[in] MV *> \verbatim *> MV is INTEGER -*> If JOBV .EQ. 'A', then MV rows of V are post-multipled by a +*> If JOBV = 'A', then MV rows of V are post-multipled by a *> sequence of Jacobi rotations. *> If JOBV = 'N', then MV is not referenced. *> \endverbatim @@ -155,9 +155,9 @@ *> \param[in,out] V *> \verbatim *> V is COMPLEX*16 array, dimension (LDV,N) -*> If JOBV .EQ. 'V' then N rows of V are post-multipled by a +*> If JOBV = 'V' then N rows of V are post-multipled by a *> sequence of Jacobi rotations. -*> If JOBV .EQ. 'A' then MV rows of V are post-multipled by a +*> If JOBV = 'A' then MV rows of V are post-multipled by a *> sequence of Jacobi rotations. *> If JOBV = 'N', then V is not referenced. *> \endverbatim @@ -166,8 +166,8 @@ *> \verbatim *> LDV is INTEGER *> The leading dimension of the array V, LDV >= 1. -*> If JOBV = 'V', LDV .GE. N. -*> If JOBV = 'A', LDV .GE. MV. +*> If JOBV = 'V', LDV >= N. +*> If JOBV = 'A', LDV >= MV. *> \endverbatim *> *> \param[in] EPS @@ -187,7 +187,7 @@ *> TOL is DOUBLE PRECISION *> TOL is the threshold for Jacobi rotations. For a pair *> A(:,p), A(:,q) of pivot columns, the Jacobi rotation is -*> applied only if ABS(COS(angle(A(:,p),A(:,q)))) .GT. TOL. +*> applied only if ABS(COS(angle(A(:,p),A(:,q)))) > TOL. *> \endverbatim *> *> \param[in] NSWEEP @@ -205,14 +205,14 @@ *> \param[in] LWORK *> \verbatim *> LWORK is INTEGER -*> LWORK is the dimension of WORK. LWORK .GE. M. +*> LWORK is the dimension of WORK. LWORK >= M. *> \endverbatim *> *> \param[out] INFO *> \verbatim *> INFO is INTEGER -*> = 0 : successful exit. -*> < 0 : if INFO = -i, then the i-th argument had an illegal value +*> = 0: successful exit. +*> < 0: if INFO = -i, then the i-th argument had an illegal value *> \endverbatim * * Authors: diff --git a/lapack-netlib/SRC/zhb2st_kernels.f b/lapack-netlib/SRC/zhb2st_kernels.f index a440b5c0d..2c0cb6870 100644 --- a/lapack-netlib/SRC/zhb2st_kernels.f +++ b/lapack-netlib/SRC/zhb2st_kernels.f @@ -1,26 +1,26 @@ *> \brief \b ZHB2ST_KERNELS * * @precisions fortran z -> s d c -* +* * =========== DOCUMENTATION =========== * -* Online html documentation available at -* http://www.netlib.org/lapack/explore-html/ +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ * *> \htmlonly -*> Download ZHB2ST_KERNELS + dependencies -*> -*> [TGZ] -*> -*> [ZIP] -*> +*> Download ZHB2ST_KERNELS + dependencies +*> +*> [TGZ] +*> +*> [ZIP] +*> *> [TXT] -*> \endhtmlonly +*> \endhtmlonly * * Definition: * =========== * -* SUBROUTINE ZHB2ST_KERNELS( UPLO, WANTZ, TTYPE, +* SUBROUTINE ZHB2ST_KERNELS( UPLO, WANTZ, TTYPE, * ST, ED, SWEEP, N, NB, IB, * A, LDA, V, TAU, LDVT, WORK) * @@ -32,9 +32,9 @@ * INTEGER TTYPE, ST, ED, SWEEP, N, NB, IB, LDA, LDVT * .. * .. Array Arguments .. -* COMPLEX*16 A( LDA, * ), V( * ), +* COMPLEX*16 A( LDA, * ), V( * ), * TAU( * ), WORK( * ) -* +* *> \par Purpose: * ============= *> @@ -124,7 +124,7 @@ *> LDVT is INTEGER. *> \endverbatim *> -*> \param[in] WORK +*> \param[out] WORK *> \verbatim *> WORK is COMPLEX*16 array. Workspace of size nb. *> \endverbatim @@ -147,7 +147,7 @@ *> http://doi.acm.org/10.1145/2063384.2063394 *> *> A. Haidar, J. Kurzak, P. Luszczek, 2013. -*> An improved parallel singular value algorithm and its implementation +*> An improved parallel singular value algorithm and its implementation *> for multicore hardware, In Proceedings of 2013 International Conference *> for High Performance Computing, Networking, Storage and Analysis (SC '13). *> Denver, Colorado, USA, 2013. @@ -155,16 +155,16 @@ *> http://doi.acm.org/10.1145/2503210.2503292 *> *> A. Haidar, R. Solca, S. Tomov, T. Schulthess and J. Dongarra. -*> A novel hybrid CPU-GPU generalized eigensolver for electronic structure +*> A novel hybrid CPU-GPU generalized eigensolver for electronic structure *> calculations based on fine-grained memory aware tasks. *> International Journal of High Performance Computing Applications. *> Volume 28 Issue 2, Pages 196-209, May 2014. -*> http://hpc.sagepub.com/content/28/2/196 +*> http://hpc.sagepub.com/content/28/2/196 *> *> \endverbatim *> * ===================================================================== - SUBROUTINE ZHB2ST_KERNELS( UPLO, WANTZ, TTYPE, + SUBROUTINE ZHB2ST_KERNELS( UPLO, WANTZ, TTYPE, $ ST, ED, SWEEP, N, NB, IB, $ A, LDA, V, TAU, LDVT, WORK) * @@ -181,7 +181,7 @@ INTEGER TTYPE, ST, ED, SWEEP, N, NB, IB, LDA, LDVT * .. * .. Array Arguments .. - COMPLEX*16 A( LDA, * ), V( * ), + COMPLEX*16 A( LDA, * ), V( * ), $ TAU( * ), WORK( * ) * .. * @@ -195,8 +195,8 @@ * .. Local Scalars .. LOGICAL UPPER INTEGER I, J1, J2, LM, LN, VPOS, TAUPOS, - $ DPOS, OFDPOS, AJETER - COMPLEX*16 CTMP + $ DPOS, OFDPOS, AJETER + COMPLEX*16 CTMP * .. * .. External Subroutines .. EXTERNAL ZLARFG, ZLARFX, ZLARFY @@ -209,7 +209,7 @@ * .. * .. * .. Executable Statements .. -* +* AJETER = IB + LDVT UPPER = LSAME( UPLO, 'U' ) @@ -240,10 +240,10 @@ V( VPOS ) = ONE DO 10 I = 1, LM-1 V( VPOS+I ) = DCONJG( A( OFDPOS-I, ST+I ) ) - A( OFDPOS-I, ST+I ) = ZERO + A( OFDPOS-I, ST+I ) = ZERO 10 CONTINUE CTMP = DCONJG( A( OFDPOS, ST ) ) - CALL ZLARFG( LM, CTMP, V( VPOS+1 ), 1, + CALL ZLARFG( LM, CTMP, V( VPOS+1 ), 1, $ TAU( TAUPOS ) ) A( OFDPOS, ST ) = CTMP * @@ -281,14 +281,14 @@ * V( VPOS ) = ONE DO 30 I = 1, LM-1 - V( VPOS+I ) = + V( VPOS+I ) = $ DCONJG( A( DPOS-NB-I, J1+I ) ) A( DPOS-NB-I, J1+I ) = ZERO 30 CONTINUE CTMP = DCONJG( A( DPOS-NB, J1 ) ) CALL ZLARFG( LM, CTMP, V( VPOS+1 ), 1, TAU( TAUPOS ) ) A( DPOS-NB, J1 ) = CTMP -* +* CALL ZLARFX( 'Right', LN-1, LM, V( VPOS ), $ TAU( TAUPOS ), $ A( DPOS-NB+1, J1 ), LDA-1, WORK) @@ -296,9 +296,9 @@ ENDIF * * Lower case -* +* ELSE -* +* IF( WANTZ ) THEN VPOS = MOD( SWEEP-1, 2 ) * N + ST TAUPOS = MOD( SWEEP-1, 2 ) * N + ST @@ -313,9 +313,9 @@ V( VPOS ) = ONE DO 20 I = 1, LM-1 V( VPOS+I ) = A( OFDPOS+I, ST-1 ) - A( OFDPOS+I, ST-1 ) = ZERO + A( OFDPOS+I, ST-1 ) = ZERO 20 CONTINUE - CALL ZLARFG( LM, A( OFDPOS, ST-1 ), V( VPOS+1 ), 1, + CALL ZLARFG( LM, A( OFDPOS, ST-1 ), V( VPOS+1 ), 1, $ TAU( TAUPOS ) ) * LM = ED - ST + 1 @@ -342,7 +342,7 @@ LM = J2-J1+1 * IF( LM.GT.0) THEN - CALL ZLARFX( 'Right', LM, LN, V( VPOS ), + CALL ZLARFX( 'Right', LM, LN, V( VPOS ), $ TAU( TAUPOS ), A( DPOS+NB, ST ), $ LDA-1, WORK) * @@ -359,13 +359,13 @@ V( VPOS+I ) = A( DPOS+NB+I, ST ) A( DPOS+NB+I, ST ) = ZERO 40 CONTINUE - CALL ZLARFG( LM, A( DPOS+NB, ST ), V( VPOS+1 ), 1, + CALL ZLARFG( LM, A( DPOS+NB, ST ), V( VPOS+1 ), 1, $ TAU( TAUPOS ) ) * - CALL ZLARFX( 'Left', LM, LN-1, V( VPOS ), + CALL ZLARFX( 'Left', LM, LN-1, V( VPOS ), $ DCONJG( TAU( TAUPOS ) ), $ A( DPOS+NB-1, ST+1 ), LDA-1, WORK) - + ENDIF ENDIF ENDIF @@ -374,4 +374,4 @@ * * END OF ZHB2ST_KERNELS * - END + END diff --git a/lapack-netlib/SRC/zhecon_3.f b/lapack-netlib/SRC/zhecon_3.f index 8c3a9f32b..9d2a240b6 100644 --- a/lapack-netlib/SRC/zhecon_3.f +++ b/lapack-netlib/SRC/zhecon_3.f @@ -19,7 +19,7 @@ * =========== * * SUBROUTINE ZHECON_3( UPLO, N, A, LDA, E, IPIV, ANORM, RCOND, -* WORK, IWORK, INFO ) +* WORK, INFO ) * * .. Scalar Arguments .. * CHARACTER UPLO @@ -27,7 +27,7 @@ * DOUBLE PRECISION ANORM, RCOND * .. * .. Array Arguments .. -* INTEGER IPIV( * ), IWORK( * ) +* INTEGER IPIV( * ) * COMPLEX*16 A( LDA, * ), E ( * ), WORK( * ) * .. * @@ -129,11 +129,6 @@ *> WORK is COMPLEX*16 array, dimension (2*N) *> \endverbatim *> -*> \param[out] IWORK -*> \verbatim -*> IWORK is INTEGER array, dimension (N) -*> \endverbatim -*> *> \param[out] INFO *> \verbatim *> INFO is INTEGER diff --git a/lapack-netlib/SRC/zheequb.f b/lapack-netlib/SRC/zheequb.f index d698232e8..7d719f41e 100644 --- a/lapack-netlib/SRC/zheequb.f +++ b/lapack-netlib/SRC/zheequb.f @@ -271,7 +271,7 @@ AVG = AVG / N STD = 0.0D0 - DO I = N+1, N + DO I = N+1, 2*N WORK( I ) = S( I-N ) * WORK( I-N ) - AVG END DO CALL ZLASSQ( N, WORK( N+1 ), 1, SCALE, SUMSQ ) diff --git a/lapack-netlib/SRC/zheevr.f b/lapack-netlib/SRC/zheevr.f index 810373c83..def2d1f9d 100644 --- a/lapack-netlib/SRC/zheevr.f +++ b/lapack-netlib/SRC/zheevr.f @@ -210,7 +210,7 @@ *> eigenvalues are computed to high relative accuracy when *> possible in future releases. The current code does not *> make any guarantees about high relative accuracy, but -*> furutre releases will. See J. Barlow and J. Demmel, +*> future releases will. See J. Barlow and J. Demmel, *> "Computing Accurate Eigensystems of Scaled Diagonally *> Dominant Matrices", LAPACK Working Note #7, for a discussion *> of which matrices define their eigenvalues to high relative diff --git a/lapack-netlib/SRC/zheevr_2stage.f b/lapack-netlib/SRC/zheevr_2stage.f index ab7f3374e..fe4a72160 100644 --- a/lapack-netlib/SRC/zheevr_2stage.f +++ b/lapack-netlib/SRC/zheevr_2stage.f @@ -217,7 +217,7 @@ *> eigenvalues are computed to high relative accuracy when *> possible in future releases. The current code does not *> make any guarantees about high relative accuracy, but -*> furutre releases will. See J. Barlow and J. Demmel, +*> future releases will. See J. Barlow and J. Demmel, *> "Computing Accurate Eigensystems of Scaled Diagonally *> Dominant Matrices", LAPACK Working Note #7, for a discussion *> of which matrices define their eigenvalues to high relative diff --git a/lapack-netlib/SRC/zhegs2.f b/lapack-netlib/SRC/zhegs2.f index 0bdc653b9..aec526353 100644 --- a/lapack-netlib/SRC/zhegs2.f +++ b/lapack-netlib/SRC/zhegs2.f @@ -97,6 +97,7 @@ *> B is COMPLEX*16 array, dimension (LDB,N) *> The triangular factor from the Cholesky factorization of B, *> as returned by ZPOTRF. +*> B is modified by the routine but restored on exit. *> \endverbatim *> *> \param[in] LDB diff --git a/lapack-netlib/SRC/zhegst.f b/lapack-netlib/SRC/zhegst.f index d0c08a8f6..dcf5fe8b5 100644 --- a/lapack-netlib/SRC/zhegst.f +++ b/lapack-netlib/SRC/zhegst.f @@ -97,6 +97,7 @@ *> B is COMPLEX*16 array, dimension (LDB,N) *> The triangular factor from the Cholesky factorization of B, *> as returned by ZPOTRF. +*> B is modified by the routine but restored on exit. *> \endverbatim *> *> \param[in] LDB diff --git a/lapack-netlib/SRC/zherfsx.f b/lapack-netlib/SRC/zherfsx.f index d176b102c..fa11702a8 100644 --- a/lapack-netlib/SRC/zherfsx.f +++ b/lapack-netlib/SRC/zherfsx.f @@ -102,7 +102,7 @@ *> \param[in] A *> \verbatim *> A is COMPLEX*16 array, dimension (LDA,N) -*> The symmetric matrix A. If UPLO = 'U', the leading N-by-N +*> The Hermitian matrix A. If UPLO = 'U', the leading N-by-N *> upper triangular part of A contains the upper triangular *> part of the matrix A, and the strictly lower triangular *> part of A is not referenced. If UPLO = 'L', the leading @@ -270,7 +270,7 @@ *> information as described below. There currently are up to three *> pieces of information returned for each right-hand side. If *> componentwise accuracy is not requested (PARAMS(3) = 0.0), then -*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS .LT. 3, then at most +*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS < 3, then at most *> the first (:,N_ERR_BNDS) entries are returned. *> *> The first index in ERR_BNDS_COMP(i,:) corresponds to the ith @@ -306,14 +306,14 @@ *> \param[in] NPARAMS *> \verbatim *> NPARAMS is INTEGER -*> Specifies the number of parameters set in PARAMS. If .LE. 0, the +*> Specifies the number of parameters set in PARAMS. If <= 0, the *> PARAMS array is never referenced and default values are used. *> \endverbatim *> *> \param[in,out] PARAMS *> \verbatim *> PARAMS is DOUBLE PRECISION array, dimension NPARAMS -*> Specifies algorithm parameters. If an entry is .LT. 0.0, then +*> Specifies algorithm parameters. If an entry is < 0.0, then *> that entry will be filled with default value used for that *> parameter. Only positions up to NPARAMS are accessed; defaults *> are used for higher-numbered parameters. @@ -321,9 +321,9 @@ *> PARAMS(LA_LINRX_ITREF_I = 1) : Whether to perform iterative *> refinement or not. *> Default: 1.0D+0 -*> = 0.0 : No refinement is performed, and no error bounds are +*> = 0.0: No refinement is performed, and no error bounds are *> computed. -*> = 1.0 : Use the double-precision refinement algorithm, +*> = 1.0: Use the double-precision refinement algorithm, *> possibly with doubled-single computations if the *> compilation environment does not support DOUBLE *> PRECISION. diff --git a/lapack-netlib/SRC/zhesv_aa.f b/lapack-netlib/SRC/zhesv_aa.f index 8511f0e7d..5f1a9f4b3 100644 --- a/lapack-netlib/SRC/zhesv_aa.f +++ b/lapack-netlib/SRC/zhesv_aa.f @@ -42,7 +42,7 @@ *> matrices. *> *> Aasen's algorithm is used to factor A as -*> A = U * T * U**H, if UPLO = 'U', or +*> A = U**H * T * U, if UPLO = 'U', or *> A = L * T * L**H, if UPLO = 'L', *> where U (or L) is a product of permutation and unit upper (lower) *> triangular matrices, and T is Hermitian and tridiagonal. The factored form @@ -86,7 +86,7 @@ *> *> On exit, if INFO = 0, the tridiagonal matrix T and the *> multipliers used to obtain the factor U or L from the -*> factorization A = U*T*U**H or A = L*T*L**H as computed by +*> factorization A = U**H*T*U or A = L*T*L**H as computed by *> ZHETRF_AA. *> \endverbatim *> @@ -230,7 +230,7 @@ RETURN END IF * -* Compute the factorization A = U*T*U**H or A = L*T*L**H. +* Compute the factorization A = U**H*T*U or A = L*T*L**H. * CALL ZHETRF_AA( UPLO, N, A, LDA, IPIV, WORK, LWORK, INFO ) IF( INFO.EQ.0 ) THEN diff --git a/lapack-netlib/SRC/zhesv_aa_2stage.f b/lapack-netlib/SRC/zhesv_aa_2stage.f index ed221dc69..7a4e35f45 100644 --- a/lapack-netlib/SRC/zhesv_aa_2stage.f +++ b/lapack-netlib/SRC/zhesv_aa_2stage.f @@ -44,7 +44,7 @@ *> matrices. *> *> Aasen's 2-stage algorithm is used to factor A as -*> A = U * T * U**H, if UPLO = 'U', or +*> A = U**H * T * U, if UPLO = 'U', or *> A = L * T * L**H, if UPLO = 'L', *> where U (or L) is a product of permutation and unit upper (lower) *> triangular matrices, and T is Hermitian and band. The matrix T is @@ -211,9 +211,7 @@ * * .. Local Scalars .. LOGICAL UPPER, TQUERY, WQUERY - INTEGER I, J, K, I1, I2, TD - INTEGER LDTB, LWKOPT, NB, KB, NT, IINFO - COMPLEX PIV + INTEGER LWKOPT * .. * .. External Functions .. LOGICAL LSAME @@ -263,7 +261,7 @@ RETURN END IF * -* Compute the factorization A = U*T*U**H or A = L*T*L**H. +* Compute the factorization A = U**H*T*U or A = L*T*L**H. * CALL ZHETRF_AA_2STAGE( UPLO, N, A, LDA, TB, LTB, IPIV, IPIV2, $ WORK, LWORK, INFO ) diff --git a/lapack-netlib/SRC/zhesvxx.f b/lapack-netlib/SRC/zhesvxx.f index 375fc072d..20168185c 100644 --- a/lapack-netlib/SRC/zhesvxx.f +++ b/lapack-netlib/SRC/zhesvxx.f @@ -46,7 +46,7 @@ *> *> ZHESVXX uses the diagonal pivoting factorization to compute the *> solution to a complex*16 system of linear equations A * X = B, where -*> A is an N-by-N symmetric matrix and X and B are N-by-NRHS +*> A is an N-by-N Hermitian matrix and X and B are N-by-NRHS *> matrices. *> *> If requested, both normwise and maximum componentwise error bounds @@ -88,7 +88,7 @@ *> A = L * D * L**T, if UPLO = 'L', *> *> where U (or L) is a product of permutation and unit upper (lower) -*> triangular matrices, and D is symmetric and block diagonal with +*> triangular matrices, and D is Hermitian and block diagonal with *> 1-by-1 and 2-by-2 diagonal blocks. *> *> 3. If some D(i,i)=0, so that D is exactly singular, then the @@ -161,7 +161,7 @@ *> \param[in,out] A *> \verbatim *> A is COMPLEX*16 array, dimension (LDA,N) -*> The symmetric matrix A. If UPLO = 'U', the leading N-by-N +*> The Hermitian matrix A. If UPLO = 'U', the leading N-by-N *> upper triangular part of A contains the upper triangular *> part of the matrix A, and the strictly lower triangular *> part of A is not referenced. If UPLO = 'L', the leading @@ -378,7 +378,7 @@ *> information as described below. There currently are up to three *> pieces of information returned for each right-hand side. If *> componentwise accuracy is not requested (PARAMS(3) = 0.0), then -*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS .LT. 3, then at most +*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS < 3, then at most *> the first (:,N_ERR_BNDS) entries are returned. *> *> The first index in ERR_BNDS_COMP(i,:) corresponds to the ith @@ -414,14 +414,14 @@ *> \param[in] NPARAMS *> \verbatim *> NPARAMS is INTEGER -*> Specifies the number of parameters set in PARAMS. If .LE. 0, the +*> Specifies the number of parameters set in PARAMS. If <= 0, the *> PARAMS array is never referenced and default values are used. *> \endverbatim *> *> \param[in,out] PARAMS *> \verbatim *> PARAMS is DOUBLE PRECISION array, dimension NPARAMS -*> Specifies algorithm parameters. If an entry is .LT. 0.0, then +*> Specifies algorithm parameters. If an entry is < 0.0, then *> that entry will be filled with default value used for that *> parameter. Only positions up to NPARAMS are accessed; defaults *> are used for higher-numbered parameters. @@ -429,9 +429,9 @@ *> PARAMS(LA_LINRX_ITREF_I = 1) : Whether to perform iterative *> refinement or not. *> Default: 1.0D+0 -*> = 0.0 : No refinement is performed, and no error bounds are +*> = 0.0: No refinement is performed, and no error bounds are *> computed. -*> = 1.0 : Use the extra-precise refinement algorithm. +*> = 1.0: Use the extra-precise refinement algorithm. *> (other values are reserved for future use) *> *> PARAMS(LA_LINRX_ITHRESH_I = 2) : Maximum number of residual diff --git a/lapack-netlib/SRC/zhetf2_rk.f b/lapack-netlib/SRC/zhetf2_rk.f index 84d3a0248..6578214df 100644 --- a/lapack-netlib/SRC/zhetf2_rk.f +++ b/lapack-netlib/SRC/zhetf2_rk.f @@ -322,7 +322,7 @@ * * Factorize A as U*D*U**H using the upper triangle of A * -* Initilize the first entry of array E, where superdiagonal +* Initialize the first entry of array E, where superdiagonal * elements of D are stored * E( 1 ) = CZERO @@ -676,7 +676,7 @@ * * Factorize A as L*D*L**H using the lower triangle of A * -* Initilize the unused last entry of the subdiagonal array E. +* Initialize the unused last entry of the subdiagonal array E. * E( N ) = CZERO * diff --git a/lapack-netlib/SRC/zhetrd_2stage.f b/lapack-netlib/SRC/zhetrd_2stage.f index 9d6a426a3..1a2c00a2f 100644 --- a/lapack-netlib/SRC/zhetrd_2stage.f +++ b/lapack-netlib/SRC/zhetrd_2stage.f @@ -123,23 +123,22 @@ *> *> \param[out] HOUS2 *> \verbatim -*> HOUS2 is COMPLEX*16 array, dimension LHOUS2, that -*> store the Householder representation of the stage2 +*> HOUS2 is COMPLEX*16 array, dimension (LHOUS2) +*> Stores the Householder representation of the stage2 *> band to tridiagonal. *> \endverbatim *> *> \param[in] LHOUS2 *> \verbatim *> LHOUS2 is INTEGER -*> The dimension of the array HOUS2. LHOUS2 = MAX(1, dimension) -*> If LWORK = -1, or LHOUS2=-1, +*> The dimension of the array HOUS2. +*> If LWORK = -1, or LHOUS2 = -1, *> then a query is assumed; the routine *> only calculates the optimal size of the HOUS2 array, returns *> this value as the first entry of the HOUS2 array, and no error *> message related to LHOUS2 is issued by XERBLA. -*> LHOUS2 = MAX(1, dimension) where -*> dimension = 4*N if VECT='N' -*> not available now if VECT='H' +*> If VECT='N', LHOUS2 = max(1, 4*n); +*> if VECT='V', option not yet available. *> \endverbatim *> *> \param[out] WORK diff --git a/lapack-netlib/SRC/zhetrd_hb2st.F b/lapack-netlib/SRC/zhetrd_hb2st.F index 86122cccc..4ba7bfc21 100644 --- a/lapack-netlib/SRC/zhetrd_hb2st.F +++ b/lapack-netlib/SRC/zhetrd_hb2st.F @@ -50,9 +50,9 @@ * Arguments: * ========== * -*> \param[in] STAGE +*> \param[in] STAGE1 *> \verbatim -*> STAGE is CHARACTER*1 +*> STAGE1 is CHARACTER*1 *> = 'N': "No": to mention that the stage 1 of the reduction *> from dense to band using the zhetrd_he2hb routine *> was not called before this routine to reproduce AB. diff --git a/lapack-netlib/SRC/zhetrd_he2hb.f b/lapack-netlib/SRC/zhetrd_he2hb.f index e33bf4b2b..b85b3889a 100644 --- a/lapack-netlib/SRC/zhetrd_he2hb.f +++ b/lapack-netlib/SRC/zhetrd_he2hb.f @@ -363,7 +363,7 @@ * * * Set the workspace of the triangular matrix T to zero once such a -* way everytime T is generated the upper/lower portion will be always zero +* way every time T is generated the upper/lower portion will be always zero * CALL ZLASET( "A", LDT, KD, ZERO, ZERO, WORK( TPOS ), LDT ) * diff --git a/lapack-netlib/SRC/zhetrf_aa.f b/lapack-netlib/SRC/zhetrf_aa.f index e355aed14..b80a84118 100644 --- a/lapack-netlib/SRC/zhetrf_aa.f +++ b/lapack-netlib/SRC/zhetrf_aa.f @@ -37,7 +37,7 @@ *> ZHETRF_AA computes the factorization of a complex hermitian matrix A *> using the Aasen's algorithm. The form of the factorization is *> -*> A = U*T*U**H or A = L*T*L**H +*> A = U**H*T*U or A = L*T*L**H *> *> where U (or L) is a product of permutation and unit upper (lower) *> triangular matrices, and T is a hermitian tridiagonal matrix. @@ -223,7 +223,7 @@ IF( UPPER ) THEN * * ..................................................... -* Factorize A as L*D*L**H using the upper triangle of A +* Factorize A as U**H*D*U using the upper triangle of A * ..................................................... * * copy first row A(1, 1:N) into H(1:n) (stored in WORK(1:N)) @@ -256,7 +256,7 @@ $ A( MAX(1, J), J+1 ), LDA, $ IPIV( J+1 ), WORK, N, WORK( N*NB+1 ) ) * -* Ajust IPIV and apply it back (J-th step picks (J+1)-th pivot) +* Adjust IPIV and apply it back (J-th step picks (J+1)-th pivot) * DO J2 = J+2, MIN(N, J+JB+1) IPIV( J2 ) = IPIV( J2 ) + J @@ -376,7 +376,7 @@ $ A( J+1, MAX(1, J) ), LDA, $ IPIV( J+1 ), WORK, N, WORK( N*NB+1 ) ) * -* Ajust IPIV and apply it back (J-th step picks (J+1)-th pivot) +* Adjust IPIV and apply it back (J-th step picks (J+1)-th pivot) * DO J2 = J+2, MIN(N, J+JB+1) IPIV( J2 ) = IPIV( J2 ) + J diff --git a/lapack-netlib/SRC/zhetrf_aa_2stage.f b/lapack-netlib/SRC/zhetrf_aa_2stage.f index 73c0ebe9a..f63713664 100644 --- a/lapack-netlib/SRC/zhetrf_aa_2stage.f +++ b/lapack-netlib/SRC/zhetrf_aa_2stage.f @@ -38,7 +38,7 @@ *> ZHETRF_AA_2STAGE computes the factorization of a double hermitian matrix A *> using the Aasen's algorithm. The form of the factorization is *> -*> A = U*T*U**T or A = L*T*L**T +*> A = U**H*T*U or A = L*T*L**H *> *> where U (or L) is a product of permutation and unit upper (lower) *> triangular matrices, and T is a hermitian band matrix with the @@ -66,7 +66,7 @@ *> *> \param[in,out] A *> \verbatim -*> A is COMPLEX array, dimension (LDA,N) +*> A is COMPLEX*16 array, dimension (LDA,N) *> On entry, the hermitian matrix A. If UPLO = 'U', the leading *> N-by-N upper triangular part of A contains the upper *> triangular part of the matrix A, and the strictly lower @@ -87,7 +87,7 @@ *> *> \param[out] TB *> \verbatim -*> TB is COMPLEX array, dimension (LTB) +*> TB is COMPLEX*16 array, dimension (LTB) *> On exit, details of the LU factorization of the band matrix. *> \endverbatim *> @@ -121,7 +121,7 @@ *> *> \param[out] WORK *> \verbatim -*> WORK is COMPLEX workspace of size LWORK +*> WORK is COMPLEX*16 workspace of size LWORK *> \endverbatim *> *> \param[in] LWORK @@ -276,7 +276,7 @@ IF( UPPER ) THEN * * ..................................................... -* Factorize A as L*D*L**T using the upper triangle of A +* Factorize A as U**H*D*U using the upper triangle of A * ..................................................... * DO J = 0, NT-1 @@ -452,14 +452,17 @@ c END IF * > Apply pivots to previous columns of L CALL ZSWAP( K-1, A( (J+1)*NB+1, I1 ), 1, $ A( (J+1)*NB+1, I2 ), 1 ) -* > Swap A(I1+1:M, I1) with A(I2, I1+1:M) - CALL ZSWAP( I2-I1-1, A( I1, I1+1 ), LDA, - $ A( I1+1, I2 ), 1 ) +* > Swap A(I1+1:M, I1) with A(I2, I1+1:M) + IF( I2.GT.(I1+1) ) THEN + CALL ZSWAP( I2-I1-1, A( I1, I1+1 ), LDA, + $ A( I1+1, I2 ), 1 ) + CALL ZLACGV( I2-I1-1, A( I1+1, I2 ), 1 ) + END IF CALL ZLACGV( I2-I1, A( I1, I1+1 ), LDA ) - CALL ZLACGV( I2-I1-1, A( I1+1, I2 ), 1 ) * > Swap A(I2+1:M, I1) with A(I2+1:M, I2) - CALL ZSWAP( N-I2, A( I1, I2+1 ), LDA, - $ A( I2, I2+1 ), LDA ) + IF( I2.LT.N ) + $ CALL ZSWAP( N-I2, A( I1, I2+1 ), LDA, + $ A( I2, I2+1 ), LDA ) * > Swap A(I1, I1) with A(I2, I2) PIV = A( I1, I1 ) A( I1, I1 ) = A( I2, I2 ) @@ -476,7 +479,7 @@ c END IF ELSE * * ..................................................... -* Factorize A as L*D*L**T using the lower triangle of A +* Factorize A as L*D*L**H using the lower triangle of A * ..................................................... * DO J = 0, NT-1 @@ -629,14 +632,17 @@ c END IF * > Apply pivots to previous columns of L CALL ZSWAP( K-1, A( I1, (J+1)*NB+1 ), LDA, $ A( I2, (J+1)*NB+1 ), LDA ) -* > Swap A(I1+1:M, I1) with A(I2, I1+1:M) - CALL ZSWAP( I2-I1-1, A( I1+1, I1 ), 1, - $ A( I2, I1+1 ), LDA ) +* > Swap A(I1+1:M, I1) with A(I2, I1+1:M) + IF( I2.GT.(I1+1) ) THEN + CALL ZSWAP( I2-I1-1, A( I1+1, I1 ), 1, + $ A( I2, I1+1 ), LDA ) + CALL ZLACGV( I2-I1-1, A( I2, I1+1 ), LDA ) + END IF CALL ZLACGV( I2-I1, A( I1+1, I1 ), 1 ) - CALL ZLACGV( I2-I1-1, A( I2, I1+1 ), LDA ) * > Swap A(I2+1:M, I1) with A(I2+1:M, I2) - CALL ZSWAP( N-I2, A( I2+1, I1 ), 1, - $ A( I2+1, I2 ), 1 ) + IF( I2.LT.N ) + $ CALL ZSWAP( N-I2, A( I2+1, I1 ), 1, + $ A( I2+1, I2 ), 1 ) * > Swap A(I1, I1) with A(I2, I2) PIV = A( I1, I1 ) A( I1, I1 ) = A( I2, I2 ) diff --git a/lapack-netlib/SRC/zhetri2.f b/lapack-netlib/SRC/zhetri2.f index a7acff49f..ae43b14fe 100644 --- a/lapack-netlib/SRC/zhetri2.f +++ b/lapack-netlib/SRC/zhetri2.f @@ -62,7 +62,7 @@ *> \param[in,out] A *> \verbatim *> A is COMPLEX*16 array, dimension (LDA,N) -*> On entry, the NB diagonal matrix D and the multipliers +*> On entry, the block diagonal matrix D and the multipliers *> used to obtain the factor U or L as computed by ZHETRF. *> *> On exit, if INFO = 0, the (symmetric) inverse of the original @@ -82,7 +82,7 @@ *> \param[in] IPIV *> \verbatim *> IPIV is INTEGER array, dimension (N) -*> Details of the interchanges and the NB structure of D +*> Details of the interchanges and the block structure of D *> as determined by ZHETRF. *> \endverbatim *> diff --git a/lapack-netlib/SRC/zhetrs_aa.f b/lapack-netlib/SRC/zhetrs_aa.f index 9d302b9cd..4b3253abc 100644 --- a/lapack-netlib/SRC/zhetrs_aa.f +++ b/lapack-netlib/SRC/zhetrs_aa.f @@ -38,8 +38,8 @@ *> \verbatim *> *> ZHETRS_AA solves a system of linear equations A*X = B with a complex -*> hermitian matrix A using the factorization A = U*T*U**H or -*> A = L*T*L**T computed by ZHETRF_AA. +*> hermitian matrix A using the factorization A = U**H*T*U or +*> A = L*T*L**H computed by ZHETRF_AA. *> \endverbatim * * Arguments: @@ -50,7 +50,7 @@ *> UPLO is CHARACTER*1 *> Specifies whether the details of the factorization are stored *> as an upper or lower triangular matrix. -*> = 'U': Upper triangular, form is A = U*T*U**H; +*> = 'U': Upper triangular, form is A = U**H*T*U; *> = 'L': Lower triangular, form is A = L*T*L**H. *> \endverbatim *> @@ -98,14 +98,16 @@ *> The leading dimension of the array B. LDB >= max(1,N). *> \endverbatim *> -*> \param[in] WORK +*> \param[out] WORK *> \verbatim -*> WORK is DOUBLE array, dimension (MAX(1,LWORK)) +*> WORK is COMPLEX*16 array, dimension (MAX(1,LWORK)) *> \endverbatim *> *> \param[in] LWORK *> \verbatim -*> LWORK is INTEGER, LWORK >= MAX(1,3*N-2). +*> LWORK is INTEGER +*> The dimension of the array WORK. LWORK >= max(1,3*N-2). +*> \endverbatim *> *> \param[out] INFO *> \verbatim @@ -199,61 +201,80 @@ * IF( UPPER ) THEN * -* Solve A*X = B, where A = U*T*U**T. +* Solve A*X = B, where A = U**H*T*U. * -* Pivot, P**T * B +* 1) Forward substitution with U**H * - DO K = 1, N - KP = IPIV( K ) - IF( KP.NE.K ) - $ CALL ZSWAP( NRHS, B( K, 1 ), LDB, B( KP, 1 ), LDB ) - END DO + IF( N.GT.1 ) THEN * -* Compute (U \P**T * B) -> B [ (U \P**T * B) ] +* Pivot, P**T * B -> B * - CALL ZTRSM('L', 'U', 'C', 'U', N-1, NRHS, ONE, A( 1, 2 ), LDA, - $ B( 2, 1 ), LDB) + DO K = 1, N + KP = IPIV( K ) + IF( KP.NE.K ) + $ CALL ZSWAP( NRHS, B( K, 1 ), LDB, B( KP, 1 ), LDB ) + END DO * -* Compute T \ B -> B [ T \ (U \P**T * B) ] +* Compute U**H \ B -> B [ (U**H \P**T * B) ] * - CALL ZLACPY( 'F', 1, N, A(1, 1), LDA+1, WORK(N), 1) + CALL ZTRSM( 'L', 'U', 'C', 'U', N-1, NRHS, ONE, A( 1, 2 ), + $ LDA, B( 2, 1 ), LDB ) + END IF +* +* 2) Solve with triangular matrix T +* +* Compute T \ B -> B [ T \ (U**H \P**T * B) ] +* + CALL ZLACPY( 'F', 1, N, A(1, 1), LDA+1, WORK(N), 1 ) IF( N.GT.1 ) THEN CALL ZLACPY( 'F', 1, N-1, A( 1, 2 ), LDA+1, WORK( 2*N ), 1) - CALL ZLACPY( 'F', 1, N-1, A( 1, 2 ), LDA+1, WORK( 1 ), 1) + CALL ZLACPY( 'F', 1, N-1, A( 1, 2 ), LDA+1, WORK( 1 ), 1 ) CALL ZLACGV( N-1, WORK( 1 ), 1 ) END IF - CALL ZGTSV(N, NRHS, WORK(1), WORK(N), WORK(2*N), B, LDB, - $ INFO) + CALL ZGTSV( N, NRHS, WORK(1), WORK(N), WORK(2*N), B, LDB, + $ INFO ) +* +* 3) Backward substitution with U +* + IF( N.GT.1 ) THEN * -* Compute (U**T \ B) -> B [ U**T \ (T \ (U \P**T * B) ) ] +* Compute U \ B -> B [ U \ (T \ (U**H \P**T * B) ) ] * - CALL ZTRSM( 'L', 'U', 'N', 'U', N-1, NRHS, ONE, A( 1, 2 ), LDA, - $ B(2, 1), LDB) + CALL ZTRSM( 'L', 'U', 'N', 'U', N-1, NRHS, ONE, A( 1, 2 ), + $ LDA, B(2, 1), LDB) * -* Pivot, P * B [ P * (U**T \ (T \ (U \P**T * B) )) ] +* Pivot, P * B [ P * (U**H \ (T \ (U \P**T * B) )) ] * - DO K = N, 1, -1 - KP = IPIV( K ) - IF( KP.NE.K ) - $ CALL ZSWAP( NRHS, B( K, 1 ), LDB, B( KP, 1 ), LDB ) - END DO + DO K = N, 1, -1 + KP = IPIV( K ) + IF( KP.NE.K ) + $ CALL ZSWAP( NRHS, B( K, 1 ), LDB, B( KP, 1 ), LDB ) + END DO + END IF * ELSE * -* Solve A*X = B, where A = L*T*L**T. +* Solve A*X = B, where A = L*T*L**H. +* +* 1) Forward substitution with L * -* Pivot, P**T * B + IF( N.GT.1 ) THEN +* +* Pivot, P**T * B -> B * - DO K = 1, N - KP = IPIV( K ) - IF( KP.NE.K ) - $ CALL ZSWAP( NRHS, B( K, 1 ), LDB, B( KP, 1 ), LDB ) - END DO + DO K = 1, N + KP = IPIV( K ) + IF( KP.NE.K ) + $ CALL ZSWAP( NRHS, B( K, 1 ), LDB, B( KP, 1 ), LDB ) + END DO * -* Compute (L \P**T * B) -> B [ (L \P**T * B) ] +* Compute L \ B -> B [ (L \P**T * B) ] * - CALL ZTRSM( 'L', 'L', 'N', 'U', N-1, NRHS, ONE, A( 2, 1 ), LDA, - $ B(2, 1), LDB) + CALL ZTRSM( 'L', 'L', 'N', 'U', N-1, NRHS, ONE, A( 2, 1 ), + $ LDA, B(2, 1), LDB) + END IF +* +* 2) Solve with triangular matrix T * * Compute T \ B -> B [ T \ (L \P**T * B) ] * @@ -266,18 +287,23 @@ CALL ZGTSV(N, NRHS, WORK(1), WORK(N), WORK(2*N), B, LDB, $ INFO) * -* Compute (L**T \ B) -> B [ L**T \ (T \ (L \P**T * B) ) ] +* 3) Backward substitution with L**H +* + IF( N.GT.1 ) THEN * - CALL ZTRSM( 'L', 'L', 'C', 'U', N-1, NRHS, ONE, A( 2, 1 ), LDA, - $ B( 2, 1 ), LDB) +* Compute L**H \ B -> B [ L**H \ (T \ (L \P**T * B) ) ] * -* Pivot, P * B [ P * (L**T \ (T \ (L \P**T * B) )) ] + CALL ZTRSM( 'L', 'L', 'C', 'U', N-1, NRHS, ONE, A( 2, 1 ), + $ LDA, B( 2, 1 ), LDB) * - DO K = N, 1, -1 - KP = IPIV( K ) - IF( KP.NE.K ) - $ CALL ZSWAP( NRHS, B( K, 1 ), LDB, B( KP, 1 ), LDB ) - END DO +* Pivot, P * B [ P * (L**H \ (T \ (L \P**T * B) )) ] +* + DO K = N, 1, -1 + KP = IPIV( K ) + IF( KP.NE.K ) + $ CALL ZSWAP( NRHS, B( K, 1 ), LDB, B( KP, 1 ), LDB ) + END DO + END IF * END IF * diff --git a/lapack-netlib/SRC/zhetrs_aa_2stage.f b/lapack-netlib/SRC/zhetrs_aa_2stage.f index 7fcee1118..c621bd571 100644 --- a/lapack-netlib/SRC/zhetrs_aa_2stage.f +++ b/lapack-netlib/SRC/zhetrs_aa_2stage.f @@ -38,8 +38,8 @@ *> \verbatim *> *> ZHETRS_AA_2STAGE solves a system of linear equations A*X = B with a -*> hermitian matrix A using the factorization A = U*T*U**T or -*> A = L*T*L**T computed by ZHETRF_AA_2STAGE. +*> hermitian matrix A using the factorization A = U**H*T*U or +*> A = L*T*L**H computed by ZHETRF_AA_2STAGE. *> \endverbatim * * Arguments: @@ -50,8 +50,8 @@ *> UPLO is CHARACTER*1 *> Specifies whether the details of the factorization are stored *> as an upper or lower triangular matrix. -*> = 'U': Upper triangular, form is A = U*T*U**T; -*> = 'L': Lower triangular, form is A = L*T*L**T. +*> = 'U': Upper triangular, form is A = U**H*T*U; +*> = 'L': Lower triangular, form is A = L*T*L**H. *> \endverbatim *> *> \param[in] N @@ -210,33 +210,33 @@ * IF( UPPER ) THEN * -* Solve A*X = B, where A = U*T*U**T. +* Solve A*X = B, where A = U**H*T*U. * IF( N.GT.NB ) THEN * -* Pivot, P**T * B +* Pivot, P**T * B -> B * CALL ZLASWP( NRHS, B, LDB, NB+1, N, IPIV, 1 ) * -* Compute (U**T \P**T * B) -> B [ (U**T \P**T * B) ] +* Compute (U**H \ B) -> B [ (U**H \P**T * B) ] * CALL ZTRSM( 'L', 'U', 'C', 'U', N-NB, NRHS, ONE, A(1, NB+1), $ LDA, B(NB+1, 1), LDB) * END IF * -* Compute T \ B -> B [ T \ (U**T \P**T * B) ] +* Compute T \ B -> B [ T \ (U**H \P**T * B) ] * CALL ZGBTRS( 'N', N, NB, NB, NRHS, TB, LDTB, IPIV2, B, LDB, $ INFO) IF( N.GT.NB ) THEN * -* Compute (U \ B) -> B [ U \ (T \ (U**T \P**T * B) ) ] +* Compute (U \ B) -> B [ U \ (T \ (U**H \P**T * B) ) ] * CALL ZTRSM( 'L', 'U', 'N', 'U', N-NB, NRHS, ONE, A(1, NB+1), $ LDA, B(NB+1, 1), LDB) * -* Pivot, P * B [ P * (U \ (T \ (U**T \P**T * B) )) ] +* Pivot, P * B -> B [ P * (U \ (T \ (U**H \P**T * B) )) ] * CALL ZLASWP( NRHS, B, LDB, NB+1, N, IPIV, -1 ) * @@ -244,15 +244,15 @@ * ELSE * -* Solve A*X = B, where A = L*T*L**T. +* Solve A*X = B, where A = L*T*L**H. * IF( N.GT.NB ) THEN * -* Pivot, P**T * B +* Pivot, P**T * B -> B * CALL ZLASWP( NRHS, B, LDB, NB+1, N, IPIV, 1 ) * -* Compute (L \P**T * B) -> B [ (L \P**T * B) ] +* Compute (L \ B) -> B [ (L \P**T * B) ] * CALL ZTRSM( 'L', 'L', 'N', 'U', N-NB, NRHS, ONE, A(NB+1, 1), $ LDA, B(NB+1, 1), LDB) @@ -265,12 +265,12 @@ $ INFO) IF( N.GT.NB ) THEN * -* Compute (L**T \ B) -> B [ L**T \ (T \ (L \P**T * B) ) ] +* Compute (L**H \ B) -> B [ L**H \ (T \ (L \P**T * B) ) ] * CALL ZTRSM( 'L', 'L', 'C', 'U', N-NB, NRHS, ONE, A(NB+1, 1), $ LDA, B(NB+1, 1), LDB) * -* Pivot, P * B [ P * (L**T \ (T \ (L \P**T * B) )) ] +* Pivot, P * B -> B [ P * (L**H \ (T \ (L \P**T * B) )) ] * CALL ZLASWP( NRHS, B, LDB, NB+1, N, IPIV, -1 ) * diff --git a/lapack-netlib/SRC/zhgeqz.f b/lapack-netlib/SRC/zhgeqz.f index b51cba4f7..b21199e9e 100644 --- a/lapack-netlib/SRC/zhgeqz.f +++ b/lapack-netlib/SRC/zhgeqz.f @@ -744,8 +744,14 @@ * * Exceptional shift. Chosen for no particularly good reason. * - ESHIFT = ESHIFT + (ASCALE*H(ILAST,ILAST-1))/ - $ (BSCALE*T(ILAST-1,ILAST-1)) + IF( ( IITER / 20 )*20.EQ.IITER .AND. + $ BSCALE*ABS1(T( ILAST, ILAST )).GT.SAFMIN ) THEN + ESHIFT = ESHIFT + ( ASCALE*H( ILAST, + $ ILAST ) )/( BSCALE*T( ILAST, ILAST ) ) + ELSE + ESHIFT = ESHIFT + ( ASCALE*H( ILAST, + $ ILAST-1 ) )/( BSCALE*T( ILAST-1, ILAST-1 ) ) + END IF SHIFT = ESHIFT END IF * diff --git a/lapack-netlib/SRC/zhseqr.f b/lapack-netlib/SRC/zhseqr.f index 1e8134c39..2ee874dfd 100644 --- a/lapack-netlib/SRC/zhseqr.f +++ b/lapack-netlib/SRC/zhseqr.f @@ -69,7 +69,7 @@ *> \param[in] N *> \verbatim *> N is INTEGER -*> The order of the matrix H. N .GE. 0. +*> The order of the matrix H. N >= 0. *> \endverbatim *> *> \param[in] ILO @@ -86,7 +86,7 @@ *> set by a previous call to ZGEBAL, and then passed to ZGEHRD *> when the matrix output by ZGEBAL is reduced to Hessenberg *> form. Otherwise ILO and IHI should be set to 1 and N -*> respectively. If N.GT.0, then 1.LE.ILO.LE.IHI.LE.N. +*> respectively. If N > 0, then 1 <= ILO <= IHI <= N. *> If N = 0, then ILO = 1 and IHI = 0. *> \endverbatim *> @@ -98,17 +98,17 @@ *> triangular matrix T from the Schur decomposition (the *> Schur form). If INFO = 0 and JOB = 'E', the contents of *> H are unspecified on exit. (The output value of H when -*> INFO.GT.0 is given under the description of INFO below.) +*> INFO > 0 is given under the description of INFO below.) *> *> Unlike earlier versions of ZHSEQR, this subroutine may -*> explicitly H(i,j) = 0 for i.GT.j and j = 1, 2, ... ILO-1 +*> explicitly H(i,j) = 0 for i > j and j = 1, 2, ... ILO-1 *> or j = IHI+1, IHI+2, ... N. *> \endverbatim *> *> \param[in] LDH *> \verbatim *> LDH is INTEGER -*> The leading dimension of the array H. LDH .GE. max(1,N). +*> The leading dimension of the array H. LDH >= max(1,N). *> \endverbatim *> *> \param[out] W @@ -131,7 +131,7 @@ *> if INFO = 0, Z contains Q*Z. *> Normally Q is the unitary matrix generated by ZUNGHR *> after the call to ZGEHRD which formed the Hessenberg matrix -*> H. (The output value of Z when INFO.GT.0 is given under +*> H. (The output value of Z when INFO > 0 is given under *> the description of INFO below.) *> \endverbatim *> @@ -139,7 +139,7 @@ *> \verbatim *> LDZ is INTEGER *> The leading dimension of the array Z. if COMPZ = 'I' or -*> COMPZ = 'V', then LDZ.GE.MAX(1,N). Otherwize, LDZ.GE.1. +*> COMPZ = 'V', then LDZ >= MAX(1,N). Otherwise, LDZ >= 1. *> \endverbatim *> *> \param[out] WORK @@ -152,7 +152,7 @@ *> \param[in] LWORK *> \verbatim *> LWORK is INTEGER -*> The dimension of the array WORK. LWORK .GE. max(1,N) +*> The dimension of the array WORK. LWORK >= max(1,N) *> is sufficient and delivers very good and sometimes *> optimal performance. However, LWORK as large as 11*N *> may be required for optimal performance. A workspace @@ -170,21 +170,21 @@ *> \param[out] INFO *> \verbatim *> INFO is INTEGER -*> = 0: successful exit -*> .LT. 0: if INFO = -i, the i-th argument had an illegal +*> = 0: successful exit +*> < 0: if INFO = -i, the i-th argument had an illegal *> value -*> .GT. 0: if INFO = i, ZHSEQR failed to compute all of -*> the eigenvalues. Elements 1:ilo-1 and i+1:n of WR -*> and WI contain those eigenvalues which have been +*> > 0: if INFO = i, ZHSEQR failed to compute all of +*> the eigenvalues. Elements 1:ilo-1 and i+1:n of W +*> contain those eigenvalues which have been *> successfully computed. (Failures are rare.) *> -*> If INFO .GT. 0 and JOB = 'E', then on exit, the +*> If INFO > 0 and JOB = 'E', then on exit, the *> remaining unconverged eigenvalues are the eigen- *> values of the upper Hessenberg matrix rows and *> columns ILO through INFO of the final, output *> value of H. *> -*> If INFO .GT. 0 and JOB = 'S', then on exit +*> If INFO > 0 and JOB = 'S', then on exit *> *> (*) (initial value of H)*U = U*(final value of H) *> @@ -192,19 +192,19 @@ *> value of H is upper Hessenberg and triangular in *> rows and columns INFO+1 through IHI. *> -*> If INFO .GT. 0 and COMPZ = 'V', then on exit +*> If INFO > 0 and COMPZ = 'V', then on exit *> *> (final value of Z) = (initial value of Z)*U *> *> where U is the unitary matrix in (*) (regard- *> less of the value of JOB.) *> -*> If INFO .GT. 0 and COMPZ = 'I', then on exit +*> If INFO > 0 and COMPZ = 'I', then on exit *> (final value of Z) = U *> where U is the unitary matrix in (*) (regard- *> less of the value of JOB.) *> -*> If INFO .GT. 0 and COMPZ = 'N', then Z is not +*> If INFO > 0 and COMPZ = 'N', then Z is not *> accessed. *> \endverbatim * @@ -244,8 +244,8 @@ *> This depends on ILO, IHI and NS. NS is the *> number of simultaneous shifts returned *> by ILAENV(ISPEC=15). (See ISPEC=15 below.) -*> The default for (IHI-ILO+1).LE.500 is NS. -*> The default for (IHI-ILO+1).GT.500 is 3*NS/2. +*> The default for (IHI-ILO+1) <= 500 is NS. +*> The default for (IHI-ILO+1) > 500 is 3*NS/2. *> *> ISPEC=14: Nibble crossover point. (See IPARMQ for *> details.) Default: 14% of deflation window @@ -323,8 +323,8 @@ PARAMETER ( NTINY = 11 ) * * ==== NL allocates some local workspace to help small matrices -* . through a rare ZLAHQR failure. NL .GT. NTINY = 11 is -* . required and NL .LE. NMIN = ILAENV(ISPEC=12,...) is recom- +* . through a rare ZLAHQR failure. NL > NTINY = 11 is +* . required and NL <= NMIN = ILAENV(ISPEC=12,...) is recom- * . mended. (The default value of NMIN is 75.) Using NL = 49 * . allows up to six simultaneous shifts and a 16-by-16 * . deflation window. ==== diff --git a/lapack-netlib/SRC/zla_gbrcond_c.f b/lapack-netlib/SRC/zla_gbrcond_c.f index 20109124b..5b2dc46fc 100644 --- a/lapack-netlib/SRC/zla_gbrcond_c.f +++ b/lapack-netlib/SRC/zla_gbrcond_c.f @@ -133,13 +133,13 @@ *> i > 0: The ith argument is invalid. *> \endverbatim *> -*> \param[in] WORK +*> \param[out] WORK *> \verbatim *> WORK is COMPLEX*16 array, dimension (2*N). *> Workspace. *> \endverbatim *> -*> \param[in] RWORK +*> \param[out] RWORK *> \verbatim *> RWORK is DOUBLE PRECISION array, dimension (N). *> Workspace. diff --git a/lapack-netlib/SRC/zla_gbrcond_x.f b/lapack-netlib/SRC/zla_gbrcond_x.f index 7e6c12ea5..17e9eede7 100644 --- a/lapack-netlib/SRC/zla_gbrcond_x.f +++ b/lapack-netlib/SRC/zla_gbrcond_x.f @@ -126,13 +126,13 @@ *> i > 0: The ith argument is invalid. *> \endverbatim *> -*> \param[in] WORK +*> \param[out] WORK *> \verbatim *> WORK is COMPLEX*16 array, dimension (2*N). *> Workspace. *> \endverbatim *> -*> \param[in] RWORK +*> \param[out] RWORK *> \verbatim *> RWORK is DOUBLE PRECISION array, dimension (N). *> Workspace. diff --git a/lapack-netlib/SRC/zla_gbrfsx_extended.f b/lapack-netlib/SRC/zla_gbrfsx_extended.f index 7a850f1aa..a22b5592e 100644 --- a/lapack-netlib/SRC/zla_gbrfsx_extended.f +++ b/lapack-netlib/SRC/zla_gbrfsx_extended.f @@ -65,19 +65,19 @@ *> \verbatim *> PREC_TYPE is INTEGER *> Specifies the intermediate precision to be used in refinement. -*> The value is defined by ILAPREC(P) where P is a CHARACTER and -*> P = 'S': Single +*> The value is defined by ILAPREC(P) where P is a CHARACTER and P +*> = 'S': Single *> = 'D': Double *> = 'I': Indigenous -*> = 'X', 'E': Extra +*> = 'X' or 'E': Extra *> \endverbatim *> *> \param[in] TRANS_TYPE *> \verbatim *> TRANS_TYPE is INTEGER *> Specifies the transposition operation on A. -*> The value is defined by ILATRANS(T) where T is a CHARACTER and -*> T = 'N': No transpose +*> The value is defined by ILATRANS(T) where T is a CHARACTER and T +*> = 'N': No transpose *> = 'T': Transpose *> = 'C': Conjugate transpose *> \endverbatim @@ -269,7 +269,7 @@ *> information as described below. There currently are up to three *> pieces of information returned for each right-hand side. If *> componentwise accuracy is not requested (PARAMS(3) = 0.0), then -*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS .LT. 3, then at most +*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS < 3, then at most *> the first (:,N_ERR_BNDS) entries are returned. *> *> The first index in ERR_BNDS_COMP(i,:) corresponds to the ith diff --git a/lapack-netlib/SRC/zla_gercond_c.f b/lapack-netlib/SRC/zla_gercond_c.f index e629f90e8..a1c0df588 100644 --- a/lapack-netlib/SRC/zla_gercond_c.f +++ b/lapack-netlib/SRC/zla_gercond_c.f @@ -22,7 +22,7 @@ * LDAF, IPIV, C, CAPPLY, * INFO, WORK, RWORK ) * -* .. Scalar Aguments .. +* .. Scalar Arguments .. * CHARACTER TRANS * LOGICAL CAPPLY * INTEGER N, LDA, LDAF, INFO @@ -114,13 +114,13 @@ *> i > 0: The ith argument is invalid. *> \endverbatim *> -*> \param[in] WORK +*> \param[out] WORK *> \verbatim *> WORK is COMPLEX*16 array, dimension (2*N). *> Workspace. *> \endverbatim *> -*> \param[in] RWORK +*> \param[out] RWORK *> \verbatim *> RWORK is DOUBLE PRECISION array, dimension (N). *> Workspace. @@ -148,7 +148,7 @@ * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * December 2016 * -* .. Scalar Aguments .. +* .. Scalar Arguments .. CHARACTER TRANS LOGICAL CAPPLY INTEGER N, LDA, LDAF, INFO diff --git a/lapack-netlib/SRC/zla_gercond_x.f b/lapack-netlib/SRC/zla_gercond_x.f index 244bf58a3..3aa63ea84 100644 --- a/lapack-netlib/SRC/zla_gercond_x.f +++ b/lapack-netlib/SRC/zla_gercond_x.f @@ -107,13 +107,13 @@ *> i > 0: The ith argument is invalid. *> \endverbatim *> -*> \param[in] WORK +*> \param[out] WORK *> \verbatim *> WORK is COMPLEX*16 array, dimension (2*N). *> Workspace. *> \endverbatim *> -*> \param[in] RWORK +*> \param[out] RWORK *> \verbatim *> RWORK is DOUBLE PRECISION array, dimension (N). *> Workspace. diff --git a/lapack-netlib/SRC/zla_gerfsx_extended.f b/lapack-netlib/SRC/zla_gerfsx_extended.f index 2e93e265e..e42ffa8e2 100644 --- a/lapack-netlib/SRC/zla_gerfsx_extended.f +++ b/lapack-netlib/SRC/zla_gerfsx_extended.f @@ -64,19 +64,19 @@ *> \verbatim *> PREC_TYPE is INTEGER *> Specifies the intermediate precision to be used in refinement. -*> The value is defined by ILAPREC(P) where P is a CHARACTER and -*> P = 'S': Single +*> The value is defined by ILAPREC(P) where P is a CHARACTER and P +*> = 'S': Single *> = 'D': Double *> = 'I': Indigenous -*> = 'X', 'E': Extra +*> = 'X' or 'E': Extra *> \endverbatim *> *> \param[in] TRANS_TYPE *> \verbatim *> TRANS_TYPE is INTEGER *> Specifies the transposition operation on A. -*> The value is defined by ILATRANS(T) where T is a CHARACTER and -*> T = 'N': No transpose +*> The value is defined by ILATRANS(T) where T is a CHARACTER and T +*> = 'N': No transpose *> = 'T': Transpose *> = 'C': Conjugate transpose *> \endverbatim @@ -256,7 +256,7 @@ *> information as described below. There currently are up to three *> pieces of information returned for each right-hand side. If *> componentwise accuracy is not requested (PARAMS(3) = 0.0), then -*> ERRS_C is not accessed. If N_ERR_BNDS .LT. 3, then at most +*> ERRS_C is not accessed. If N_ERR_BNDS < 3, then at most *> the first (:,N_ERR_BNDS) entries are returned. *> *> The first index in ERRS_C(i,:) corresponds to the ith diff --git a/lapack-netlib/SRC/zla_hercond_c.f b/lapack-netlib/SRC/zla_hercond_c.f index 61cfe95f1..7c933cc3c 100644 --- a/lapack-netlib/SRC/zla_hercond_c.f +++ b/lapack-netlib/SRC/zla_hercond_c.f @@ -111,13 +111,13 @@ *> i > 0: The ith argument is invalid. *> \endverbatim *> -*> \param[in] WORK +*> \param[out] WORK *> \verbatim *> WORK is COMPLEX*16 array, dimension (2*N). *> Workspace. *> \endverbatim *> -*> \param[in] RWORK +*> \param[out] RWORK *> \verbatim *> RWORK is DOUBLE PRECISION array, dimension (N). *> Workspace. diff --git a/lapack-netlib/SRC/zla_hercond_x.f b/lapack-netlib/SRC/zla_hercond_x.f index 9c19b487d..ee283c0b5 100644 --- a/lapack-netlib/SRC/zla_hercond_x.f +++ b/lapack-netlib/SRC/zla_hercond_x.f @@ -104,13 +104,13 @@ *> i > 0: The ith argument is invalid. *> \endverbatim *> -*> \param[in] WORK +*> \param[out] WORK *> \verbatim *> WORK is COMPLEX*16 array, dimension (2*N). *> Workspace. *> \endverbatim *> -*> \param[in] RWORK +*> \param[out] RWORK *> \verbatim *> RWORK is DOUBLE PRECISION array, dimension (N). *> Workspace. diff --git a/lapack-netlib/SRC/zla_herfsx_extended.f b/lapack-netlib/SRC/zla_herfsx_extended.f index 5b43a58b9..8329080ef 100644 --- a/lapack-netlib/SRC/zla_herfsx_extended.f +++ b/lapack-netlib/SRC/zla_herfsx_extended.f @@ -66,11 +66,11 @@ *> \verbatim *> PREC_TYPE is INTEGER *> Specifies the intermediate precision to be used in refinement. -*> The value is defined by ILAPREC(P) where P is a CHARACTER and -*> P = 'S': Single +*> The value is defined by ILAPREC(P) where P is a CHARACTER and P +*> = 'S': Single *> = 'D': Double *> = 'I': Indigenous -*> = 'X', 'E': Extra +*> = 'X' or 'E': Extra *> \endverbatim *> *> \param[in] UPLO @@ -254,7 +254,7 @@ *> information as described below. There currently are up to three *> pieces of information returned for each right-hand side. If *> componentwise accuracy is not requested (PARAMS(3) = 0.0), then -*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS .LT. 3, then at most +*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS < 3, then at most *> the first (:,N_ERR_BNDS) entries are returned. *> *> The first index in ERR_BNDS_COMP(i,:) corresponds to the ith diff --git a/lapack-netlib/SRC/zla_herpvgrw.f b/lapack-netlib/SRC/zla_herpvgrw.f index 557d6e830..d414c371f 100644 --- a/lapack-netlib/SRC/zla_herpvgrw.f +++ b/lapack-netlib/SRC/zla_herpvgrw.f @@ -102,7 +102,7 @@ *> as determined by ZHETRF. *> \endverbatim *> -*> \param[in] WORK +*> \param[out] WORK *> \verbatim *> WORK is DOUBLE PRECISION array, dimension (2*N) *> \endverbatim diff --git a/lapack-netlib/SRC/zla_porcond_c.f b/lapack-netlib/SRC/zla_porcond_c.f index a74295b41..2e591dd09 100644 --- a/lapack-netlib/SRC/zla_porcond_c.f +++ b/lapack-netlib/SRC/zla_porcond_c.f @@ -103,13 +103,13 @@ *> i > 0: The ith argument is invalid. *> \endverbatim *> -*> \param[in] WORK +*> \param[out] WORK *> \verbatim *> WORK is COMPLEX*16 array, dimension (2*N). *> Workspace. *> \endverbatim *> -*> \param[in] RWORK +*> \param[out] RWORK *> \verbatim *> RWORK is DOUBLE PRECISION array, dimension (N). *> Workspace. diff --git a/lapack-netlib/SRC/zla_porcond_x.f b/lapack-netlib/SRC/zla_porcond_x.f index 0b2c84f42..4f409544f 100644 --- a/lapack-netlib/SRC/zla_porcond_x.f +++ b/lapack-netlib/SRC/zla_porcond_x.f @@ -96,13 +96,13 @@ *> i > 0: The ith argument is invalid. *> \endverbatim *> -*> \param[in] WORK +*> \param[out] WORK *> \verbatim *> WORK is COMPLEX*16 array, dimension (2*N). *> Workspace. *> \endverbatim *> -*> \param[in] RWORK +*> \param[out] RWORK *> \verbatim *> RWORK is DOUBLE PRECISION array, dimension (N). *> Workspace. diff --git a/lapack-netlib/SRC/zla_porfsx_extended.f b/lapack-netlib/SRC/zla_porfsx_extended.f index 85dd42780..169a9a5d4 100644 --- a/lapack-netlib/SRC/zla_porfsx_extended.f +++ b/lapack-netlib/SRC/zla_porfsx_extended.f @@ -65,11 +65,11 @@ *> \verbatim *> PREC_TYPE is INTEGER *> Specifies the intermediate precision to be used in refinement. -*> The value is defined by ILAPREC(P) where P is a CHARACTER and -*> P = 'S': Single +*> The value is defined by ILAPREC(P) where P is a CHARACTER and P +*> = 'S': Single *> = 'D': Double *> = 'I': Indigenous -*> = 'X', 'E': Extra +*> = 'X' or 'E': Extra *> \endverbatim *> *> \param[in] UPLO @@ -246,7 +246,7 @@ *> information as described below. There currently are up to three *> pieces of information returned for each right-hand side. If *> componentwise accuracy is not requested (PARAMS(3) = 0.0), then -*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS .LT. 3, then at most +*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS < 3, then at most *> the first (:,N_ERR_BNDS) entries are returned. *> *> The first index in ERR_BNDS_COMP(i,:) corresponds to the ith diff --git a/lapack-netlib/SRC/zla_porpvgrw.f b/lapack-netlib/SRC/zla_porpvgrw.f index cd71635ec..f669b2864 100644 --- a/lapack-netlib/SRC/zla_porpvgrw.f +++ b/lapack-netlib/SRC/zla_porpvgrw.f @@ -86,7 +86,7 @@ *> The leading dimension of the array AF. LDAF >= max(1,N). *> \endverbatim *> -*> \param[in] WORK +*> \param[out] WORK *> \verbatim *> WORK is DOUBLE PRECISION array, dimension (2*N) *> \endverbatim diff --git a/lapack-netlib/SRC/zla_syrcond_c.f b/lapack-netlib/SRC/zla_syrcond_c.f index be9d14bd0..ff44d6c3b 100644 --- a/lapack-netlib/SRC/zla_syrcond_c.f +++ b/lapack-netlib/SRC/zla_syrcond_c.f @@ -111,13 +111,13 @@ *> i > 0: The ith argument is invalid. *> \endverbatim *> -*> \param[in] WORK +*> \param[out] WORK *> \verbatim *> WORK is COMPLEX*16 array, dimension (2*N). *> Workspace. *> \endverbatim *> -*> \param[in] RWORK +*> \param[out] RWORK *> \verbatim *> RWORK is DOUBLE PRECISION array, dimension (N). *> Workspace. diff --git a/lapack-netlib/SRC/zla_syrcond_x.f b/lapack-netlib/SRC/zla_syrcond_x.f index 2d0269092..53022bbfb 100644 --- a/lapack-netlib/SRC/zla_syrcond_x.f +++ b/lapack-netlib/SRC/zla_syrcond_x.f @@ -104,13 +104,13 @@ *> i > 0: The ith argument is invalid. *> \endverbatim *> -*> \param[in] WORK +*> \param[out] WORK *> \verbatim *> WORK is COMPLEX*16 array, dimension (2*N). *> Workspace. *> \endverbatim *> -*> \param[in] RWORK +*> \param[out] RWORK *> \verbatim *> RWORK is DOUBLE PRECISION array, dimension (N). *> Workspace. diff --git a/lapack-netlib/SRC/zla_syrfsx_extended.f b/lapack-netlib/SRC/zla_syrfsx_extended.f index a9716fd23..69844c94b 100644 --- a/lapack-netlib/SRC/zla_syrfsx_extended.f +++ b/lapack-netlib/SRC/zla_syrfsx_extended.f @@ -66,11 +66,11 @@ *> \verbatim *> PREC_TYPE is INTEGER *> Specifies the intermediate precision to be used in refinement. -*> The value is defined by ILAPREC(P) where P is a CHARACTER and -*> P = 'S': Single +*> The value is defined by ILAPREC(P) where P is a CHARACTER and P +*> = 'S': Single *> = 'D': Double *> = 'I': Indigenous -*> = 'X', 'E': Extra +*> = 'X' or 'E': Extra *> \endverbatim *> *> \param[in] UPLO @@ -254,7 +254,7 @@ *> information as described below. There currently are up to three *> pieces of information returned for each right-hand side. If *> componentwise accuracy is not requested (PARAMS(3) = 0.0), then -*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS .LT. 3, then at most +*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS < 3, then at most *> the first (:,N_ERR_BNDS) entries are returned. *> *> The first index in ERR_BNDS_COMP(i,:) corresponds to the ith diff --git a/lapack-netlib/SRC/zla_syrpvgrw.f b/lapack-netlib/SRC/zla_syrpvgrw.f index ccf4fc2d6..82c9f52f8 100644 --- a/lapack-netlib/SRC/zla_syrpvgrw.f +++ b/lapack-netlib/SRC/zla_syrpvgrw.f @@ -102,7 +102,7 @@ *> as determined by ZSYTRF. *> \endverbatim *> -*> \param[in] WORK +*> \param[out] WORK *> \verbatim *> WORK is DOUBLE PRECISION array, dimension (2*N) *> \endverbatim diff --git a/lapack-netlib/SRC/zla_wwaddw.f b/lapack-netlib/SRC/zla_wwaddw.f index b4f9df332..f06113a95 100644 --- a/lapack-netlib/SRC/zla_wwaddw.f +++ b/lapack-netlib/SRC/zla_wwaddw.f @@ -36,7 +36,7 @@ *> ZLA_WWADDW adds a vector W into a doubled-single vector (X, Y). *> *> This works for all extant IBM's hex and binary floating point -*> arithmetics, but not for decimal. +*> arithmetic, but not for decimal. *> \endverbatim * * Arguments: diff --git a/lapack-netlib/SRC/zlahef_aa.f b/lapack-netlib/SRC/zlahef_aa.f index 8bad4aba9..ddd1e9493 100644 --- a/lapack-netlib/SRC/zlahef_aa.f +++ b/lapack-netlib/SRC/zlahef_aa.f @@ -288,8 +288,9 @@ * * Swap A(I1, I2+1:N) with A(I2, I2+1:N) * - CALL ZSWAP( M-I2, A( J1+I1-1, I2+1 ), LDA, - $ A( J1+I2-1, I2+1 ), LDA ) + IF( I2.LT.M ) + $ CALL ZSWAP( M-I2, A( J1+I1-1, I2+1 ), LDA, + $ A( J1+I2-1, I2+1 ), LDA ) * * Swap A(I1, I1) with A(I2,I2) * @@ -329,13 +330,15 @@ * Compute L(J+2, J+1) = WORK( 3:N ) / T(J, J+1), * where A(J, J+1) = T(J, J+1) and A(J+2:N, J) = L(J+2:N, J+1) * - IF( A( K, J+1 ).NE.ZERO ) THEN - ALPHA = ONE / A( K, J+1 ) - CALL ZCOPY( M-J-1, WORK( 3 ), 1, A( K, J+2 ), LDA ) - CALL ZSCAL( M-J-1, ALPHA, A( K, J+2 ), LDA ) - ELSE - CALL ZLASET( 'Full', 1, M-J-1, ZERO, ZERO, - $ A( K, J+2 ), LDA) + IF( J.LT.(M-1) ) THEN + IF( A( K, J+1 ).NE.ZERO ) THEN + ALPHA = ONE / A( K, J+1 ) + CALL ZCOPY( M-J-1, WORK( 3 ), 1, A( K, J+2 ), LDA ) + CALL ZSCAL( M-J-1, ALPHA, A( K, J+2 ), LDA ) + ELSE + CALL ZLASET( 'Full', 1, M-J-1, ZERO, ZERO, + $ A( K, J+2 ), LDA) + END IF END IF END IF J = J + 1 @@ -440,8 +443,9 @@ * * Swap A(I2+1:N, I1) with A(I2+1:N, I2) * - CALL ZSWAP( M-I2, A( I2+1, J1+I1-1 ), 1, - $ A( I2+1, J1+I2-1 ), 1 ) + IF( I2.LT.M ) + $ CALL ZSWAP( M-I2, A( I2+1, J1+I1-1 ), 1, + $ A( I2+1, J1+I2-1 ), 1 ) * * Swap A(I1, I1) with A(I2, I2) * @@ -481,13 +485,15 @@ * Compute L(J+2, J+1) = WORK( 3:N ) / T(J, J+1), * where A(J, J+1) = T(J, J+1) and A(J+2:N, J) = L(J+2:N, J+1) * - IF( A( J+1, K ).NE.ZERO ) THEN - ALPHA = ONE / A( J+1, K ) - CALL ZCOPY( M-J-1, WORK( 3 ), 1, A( J+2, K ), 1 ) - CALL ZSCAL( M-J-1, ALPHA, A( J+2, K ), 1 ) - ELSE - CALL ZLASET( 'Full', M-J-1, 1, ZERO, ZERO, - $ A( J+2, K ), LDA ) + IF( J.LT.(M-1) ) THEN + IF( A( J+1, K ).NE.ZERO ) THEN + ALPHA = ONE / A( J+1, K ) + CALL ZCOPY( M-J-1, WORK( 3 ), 1, A( J+2, K ), 1 ) + CALL ZSCAL( M-J-1, ALPHA, A( J+2, K ), 1 ) + ELSE + CALL ZLASET( 'Full', M-J-1, 1, ZERO, ZERO, + $ A( J+2, K ), LDA ) + END IF END IF END IF J = J + 1 diff --git a/lapack-netlib/SRC/zlahef_rk.f b/lapack-netlib/SRC/zlahef_rk.f index d8d54f4ce..6a8549cf5 100644 --- a/lapack-netlib/SRC/zlahef_rk.f +++ b/lapack-netlib/SRC/zlahef_rk.f @@ -331,7 +331,7 @@ * Factorize the trailing columns of A using the upper triangle * of A and working backwards, and compute the matrix W = U12*D * for use in updating A11 (note that conjg(W) is actually stored) -* Initilize the first entry of array E, where superdiagonal +* Initialize the first entry of array E, where superdiagonal * elements of D are stored * E( 1 ) = CZERO @@ -789,7 +789,7 @@ * of A and working forwards, and compute the matrix W = L21*D * for use in updating A22 (note that conjg(W) is actually stored) * -* Initilize the unused last entry of the subdiagonal array E. +* Initialize the unused last entry of the subdiagonal array E. * E( N ) = CZERO * diff --git a/lapack-netlib/SRC/zlahqr.f b/lapack-netlib/SRC/zlahqr.f index 19015b3fa..0a8318874 100644 --- a/lapack-netlib/SRC/zlahqr.f +++ b/lapack-netlib/SRC/zlahqr.f @@ -138,26 +138,26 @@ *> \param[out] INFO *> \verbatim *> INFO is INTEGER -*> = 0: successful exit -*> .GT. 0: if INFO = i, ZLAHQR failed to compute all the +*> = 0: successful exit +*> > 0: if INFO = i, ZLAHQR failed to compute all the *> eigenvalues ILO to IHI in a total of 30 iterations *> per eigenvalue; elements i+1:ihi of W contain *> those eigenvalues which have been successfully *> computed. *> -*> If INFO .GT. 0 and WANTT is .FALSE., then on exit, +*> If INFO > 0 and WANTT is .FALSE., then on exit, *> the remaining unconverged eigenvalues are the *> eigenvalues of the upper Hessenberg matrix -*> rows and columns ILO thorugh INFO of the final, +*> rows and columns ILO through INFO of the final, *> output value of H. *> -*> If INFO .GT. 0 and WANTT is .TRUE., then on exit +*> If INFO > 0 and WANTT is .TRUE., then on exit *> (*) (initial value of H)*U = U*(final value of H) -*> where U is an orthognal matrix. The final +*> where U is an orthogonal matrix. The final *> value of H is upper Hessenberg and triangular in *> rows and columns INFO+1 through IHI. *> -*> If INFO .GT. 0 and WANTZ is .TRUE., then on exit +*> If INFO > 0 and WANTZ is .TRUE., then on exit *> (final value of Z) = (initial value of Z)*U *> where U is the orthogonal matrix in (*) *> (regardless of the value of WANTT.) diff --git a/lapack-netlib/SRC/zlamswlq.f b/lapack-netlib/SRC/zlamswlq.f index 0e0b0a1da..f32f5667c 100644 --- a/lapack-netlib/SRC/zlamswlq.f +++ b/lapack-netlib/SRC/zlamswlq.f @@ -1,3 +1,4 @@ +*> \brief \b ZLAMSWLQ * * Definition: * =========== diff --git a/lapack-netlib/SRC/zlamtsqr.f b/lapack-netlib/SRC/zlamtsqr.f index 1ee732425..034c45505 100644 --- a/lapack-netlib/SRC/zlamtsqr.f +++ b/lapack-netlib/SRC/zlamtsqr.f @@ -1,3 +1,4 @@ +*> \brief \b ZLAMTSQR * * Definition: * =========== diff --git a/lapack-netlib/SRC/zlangb.f b/lapack-netlib/SRC/zlangb.f index 949bb2c01..e40a470fd 100644 --- a/lapack-netlib/SRC/zlangb.f +++ b/lapack-netlib/SRC/zlangb.f @@ -130,6 +130,7 @@ * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * December 2016 * + IMPLICIT NONE * .. Scalar Arguments .. CHARACTER NORM INTEGER KL, KU, LDAB, N @@ -147,14 +148,17 @@ * .. * .. Local Scalars .. INTEGER I, J, K, L - DOUBLE PRECISION SCALE, SUM, VALUE, TEMP + DOUBLE PRECISION SUM, VALUE, TEMP +* .. +* .. Local Arrays .. + DOUBLE PRECISION SSQ( 2 ), COLSSQ( 2 ) * .. * .. External Functions .. LOGICAL LSAME, DISNAN EXTERNAL LSAME, DISNAN * .. * .. External Subroutines .. - EXTERNAL ZLASSQ + EXTERNAL ZLASSQ, DCOMBSSQ * .. * .. Intrinsic Functions .. INTRINSIC ABS, MAX, MIN, SQRT @@ -207,15 +211,22 @@ ELSE IF( ( LSAME( NORM, 'F' ) ) .OR. ( LSAME( NORM, 'E' ) ) ) THEN * * Find normF(A). +* SSQ(1) is scale +* SSQ(2) is sum-of-squares +* For better accuracy, sum each column separately. * - SCALE = ZERO - SUM = ONE + SSQ( 1 ) = ZERO + SSQ( 2 ) = ONE DO 90 J = 1, N L = MAX( 1, J-KU ) K = KU + 1 - J + L - CALL ZLASSQ( MIN( N, J+KL )-L+1, AB( K, J ), 1, SCALE, SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL ZLASSQ( MIN( N, J+KL )-L+1, AB( K, J ), 1, + $ COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL DCOMBSSQ( SSQ, COLSSQ ) 90 CONTINUE - VALUE = SCALE*SQRT( SUM ) + VALUE = SSQ( 1 )*SQRT( SSQ( 2 ) ) END IF * ZLANGB = VALUE diff --git a/lapack-netlib/SRC/zlange.f b/lapack-netlib/SRC/zlange.f index 5407decef..8162786fb 100644 --- a/lapack-netlib/SRC/zlange.f +++ b/lapack-netlib/SRC/zlange.f @@ -120,6 +120,7 @@ * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * December 2016 * + IMPLICIT NONE * .. Scalar Arguments .. CHARACTER NORM INTEGER LDA, M, N @@ -137,14 +138,17 @@ * .. * .. Local Scalars .. INTEGER I, J - DOUBLE PRECISION SCALE, SUM, VALUE, TEMP + DOUBLE PRECISION SUM, VALUE, TEMP +* .. +* .. Local Arrays .. + DOUBLE PRECISION SSQ( 2 ), COLSSQ( 2 ) * .. * .. External Functions .. LOGICAL LSAME, DISNAN EXTERNAL LSAME, DISNAN * .. * .. External Subroutines .. - EXTERNAL ZLASSQ + EXTERNAL ZLASSQ, DCOMBSSQ * .. * .. Intrinsic Functions .. INTRINSIC ABS, MIN, SQRT @@ -196,13 +200,19 @@ ELSE IF( ( LSAME( NORM, 'F' ) ) .OR. ( LSAME( NORM, 'E' ) ) ) THEN * * Find normF(A). +* SSQ(1) is scale +* SSQ(2) is sum-of-squares +* For better accuracy, sum each column separately. * - SCALE = ZERO - SUM = ONE + SSQ( 1 ) = ZERO + SSQ( 2 ) = ONE DO 90 J = 1, N - CALL ZLASSQ( M, A( 1, J ), 1, SCALE, SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL ZLASSQ( M, A( 1, J ), 1, COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL DCOMBSSQ( SSQ, COLSSQ ) 90 CONTINUE - VALUE = SCALE*SQRT( SUM ) + VALUE = SSQ( 1 )*SQRT( SSQ( 2 ) ) END IF * ZLANGE = VALUE diff --git a/lapack-netlib/SRC/zlanhb.f b/lapack-netlib/SRC/zlanhb.f index b3717804f..16b5c117c 100644 --- a/lapack-netlib/SRC/zlanhb.f +++ b/lapack-netlib/SRC/zlanhb.f @@ -137,6 +137,7 @@ * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * December 2016 * + IMPLICIT NONE * .. Scalar Arguments .. CHARACTER NORM, UPLO INTEGER K, LDAB, N @@ -154,14 +155,17 @@ * .. * .. Local Scalars .. INTEGER I, J, L - DOUBLE PRECISION ABSA, SCALE, SUM, VALUE + DOUBLE PRECISION ABSA, SUM, VALUE +* .. +* .. Local Arrays .. + DOUBLE PRECISION SSQ( 2 ), COLSSQ( 2 ) * .. * .. External Functions .. LOGICAL LSAME, DISNAN EXTERNAL LSAME, DISNAN * .. * .. External Subroutines .. - EXTERNAL ZLASSQ + EXTERNAL ZLASSQ, DCOMBSSQ * .. * .. Intrinsic Functions .. INTRINSIC ABS, DBLE, MAX, MIN, SQRT @@ -233,39 +237,57 @@ ELSE IF( ( LSAME( NORM, 'F' ) ) .OR. ( LSAME( NORM, 'E' ) ) ) THEN * * Find normF(A). +* SSQ(1) is scale +* SSQ(2) is sum-of-squares +* For better accuracy, sum each column separately. +* + SSQ( 1 ) = ZERO + SSQ( 2 ) = ONE +* +* Sum off-diagonals * - SCALE = ZERO - SUM = ONE IF( K.GT.0 ) THEN IF( LSAME( UPLO, 'U' ) ) THEN DO 110 J = 2, N + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE CALL ZLASSQ( MIN( J-1, K ), AB( MAX( K+2-J, 1 ), J ), - $ 1, SCALE, SUM ) + $ 1, COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL DCOMBSSQ( SSQ, COLSSQ ) 110 CONTINUE L = K + 1 ELSE DO 120 J = 1, N - 1 - CALL ZLASSQ( MIN( N-J, K ), AB( 2, J ), 1, SCALE, - $ SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL ZLASSQ( MIN( N-J, K ), AB( 2, J ), 1, + $ COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL DCOMBSSQ( SSQ, COLSSQ ) 120 CONTINUE L = 1 END IF - SUM = 2*SUM + SSQ( 2 ) = 2*SSQ( 2 ) ELSE L = 1 END IF +* +* Sum diagonal +* + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE DO 130 J = 1, N IF( DBLE( AB( L, J ) ).NE.ZERO ) THEN ABSA = ABS( DBLE( AB( L, J ) ) ) - IF( SCALE.LT.ABSA ) THEN - SUM = ONE + SUM*( SCALE / ABSA )**2 - SCALE = ABSA + IF( COLSSQ( 1 ).LT.ABSA ) THEN + COLSSQ( 2 ) = ONE + COLSSQ(2)*( COLSSQ(1) / ABSA )**2 + COLSSQ( 1 ) = ABSA ELSE - SUM = SUM + ( ABSA / SCALE )**2 + COLSSQ( 2 ) = COLSSQ( 2 ) + ( ABSA / COLSSQ( 1 ) )**2 END IF END IF 130 CONTINUE - VALUE = SCALE*SQRT( SUM ) + CALL DCOMBSSQ( SSQ, COLSSQ ) + VALUE = SSQ( 1 )*SQRT( SSQ( 2 ) ) END IF * ZLANHB = VALUE diff --git a/lapack-netlib/SRC/zlanhe.f b/lapack-netlib/SRC/zlanhe.f index 7c7f7f3be..5aef9a756 100644 --- a/lapack-netlib/SRC/zlanhe.f +++ b/lapack-netlib/SRC/zlanhe.f @@ -129,6 +129,7 @@ * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * December 2016 * + IMPLICIT NONE * .. Scalar Arguments .. CHARACTER NORM, UPLO INTEGER LDA, N @@ -146,14 +147,17 @@ * .. * .. Local Scalars .. INTEGER I, J - DOUBLE PRECISION ABSA, SCALE, SUM, VALUE + DOUBLE PRECISION ABSA, SUM, VALUE +* .. +* .. Local Arrays .. + DOUBLE PRECISION SSQ( 2 ), COLSSQ( 2 ) * .. * .. External Functions .. LOGICAL LSAME, DISNAN EXTERNAL LSAME, DISNAN * .. * .. External Subroutines .. - EXTERNAL ZLASSQ + EXTERNAL ZLASSQ, DCOMBSSQ * .. * .. Intrinsic Functions .. INTRINSIC ABS, DBLE, SQRT @@ -223,31 +227,48 @@ ELSE IF( ( LSAME( NORM, 'F' ) ) .OR. ( LSAME( NORM, 'E' ) ) ) THEN * * Find normF(A). +* SSQ(1) is scale +* SSQ(2) is sum-of-squares +* For better accuracy, sum each column separately. +* + SSQ( 1 ) = ZERO + SSQ( 2 ) = ONE +* +* Sum off-diagonals * - SCALE = ZERO - SUM = ONE IF( LSAME( UPLO, 'U' ) ) THEN DO 110 J = 2, N - CALL ZLASSQ( J-1, A( 1, J ), 1, SCALE, SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL ZLASSQ( J-1, A( 1, J ), 1, + $ COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL DCOMBSSQ( SSQ, COLSSQ ) 110 CONTINUE ELSE DO 120 J = 1, N - 1 - CALL ZLASSQ( N-J, A( J+1, J ), 1, SCALE, SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL ZLASSQ( N-J, A( J+1, J ), 1, + $ COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL DCOMBSSQ( SSQ, COLSSQ ) 120 CONTINUE END IF - SUM = 2*SUM + SSQ( 2 ) = 2*SSQ( 2 ) +* +* Sum diagonal +* DO 130 I = 1, N IF( DBLE( A( I, I ) ).NE.ZERO ) THEN ABSA = ABS( DBLE( A( I, I ) ) ) - IF( SCALE.LT.ABSA ) THEN - SUM = ONE + SUM*( SCALE / ABSA )**2 - SCALE = ABSA + IF( SSQ( 1 ).LT.ABSA ) THEN + SSQ( 2 ) = ONE + SSQ( 2 )*( SSQ( 1 ) / ABSA )**2 + SSQ( 1 ) = ABSA ELSE - SUM = SUM + ( ABSA / SCALE )**2 + SSQ( 2 ) = SSQ( 2 ) + ( ABSA / SSQ( 1 ) )**2 END IF END IF 130 CONTINUE - VALUE = SCALE*SQRT( SUM ) + VALUE = SSQ( 1 )*SQRT( SSQ( 2 ) ) END IF * ZLANHE = VALUE diff --git a/lapack-netlib/SRC/zlanhp.f b/lapack-netlib/SRC/zlanhp.f index 9ded60746..d795aeca9 100644 --- a/lapack-netlib/SRC/zlanhp.f +++ b/lapack-netlib/SRC/zlanhp.f @@ -122,6 +122,7 @@ * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * December 2016 * + IMPLICIT NONE * .. Scalar Arguments .. CHARACTER NORM, UPLO INTEGER N @@ -139,14 +140,17 @@ * .. * .. Local Scalars .. INTEGER I, J, K - DOUBLE PRECISION ABSA, SCALE, SUM, VALUE + DOUBLE PRECISION ABSA, SUM, VALUE +* .. +* .. Local Arrays .. + DOUBLE PRECISION SSQ( 2 ), COLSSQ( 2 ) * .. * .. External Functions .. LOGICAL LSAME, DISNAN EXTERNAL LSAME, DISNAN * .. * .. External Subroutines .. - EXTERNAL ZLASSQ + EXTERNAL ZLASSQ, DCOMBSSQ * .. * .. Intrinsic Functions .. INTRINSIC ABS, DBLE, SQRT @@ -225,31 +229,48 @@ ELSE IF( ( LSAME( NORM, 'F' ) ) .OR. ( LSAME( NORM, 'E' ) ) ) THEN * * Find normF(A). +* SSQ(1) is scale +* SSQ(2) is sum-of-squares +* For better accuracy, sum each column separately. +* + SSQ( 1 ) = ZERO + SSQ( 2 ) = ONE +* +* Sum off-diagonals * - SCALE = ZERO - SUM = ONE K = 2 IF( LSAME( UPLO, 'U' ) ) THEN DO 110 J = 2, N - CALL ZLASSQ( J-1, AP( K ), 1, SCALE, SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL ZLASSQ( J-1, AP( K ), 1, COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL DCOMBSSQ( SSQ, COLSSQ ) K = K + J 110 CONTINUE ELSE DO 120 J = 1, N - 1 - CALL ZLASSQ( N-J, AP( K ), 1, SCALE, SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL ZLASSQ( N-J, AP( K ), 1, COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL DCOMBSSQ( SSQ, COLSSQ ) K = K + N - J + 1 120 CONTINUE END IF - SUM = 2*SUM + SSQ( 2 ) = 2*SSQ( 2 ) +* +* Sum diagonal +* K = 1 + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE DO 130 I = 1, N IF( DBLE( AP( K ) ).NE.ZERO ) THEN ABSA = ABS( DBLE( AP( K ) ) ) - IF( SCALE.LT.ABSA ) THEN - SUM = ONE + SUM*( SCALE / ABSA )**2 - SCALE = ABSA + IF( COLSSQ( 1 ).LT.ABSA ) THEN + COLSSQ( 2 ) = ONE + COLSSQ(2)*( COLSSQ(1) / ABSA )**2 + COLSSQ( 1 ) = ABSA ELSE - SUM = SUM + ( ABSA / SCALE )**2 + COLSSQ( 2 ) = COLSSQ( 2 ) + ( ABSA / COLSSQ( 1 ) )**2 END IF END IF IF( LSAME( UPLO, 'U' ) ) THEN @@ -258,7 +279,8 @@ K = K + N - I + 1 END IF 130 CONTINUE - VALUE = SCALE*SQRT( SUM ) + CALL DCOMBSSQ( SSQ, COLSSQ ) + VALUE = SSQ( 1 )*SQRT( SSQ( 2 ) ) END IF * ZLANHP = VALUE diff --git a/lapack-netlib/SRC/zlanhs.f b/lapack-netlib/SRC/zlanhs.f index f2d36b304..bd8e86be9 100644 --- a/lapack-netlib/SRC/zlanhs.f +++ b/lapack-netlib/SRC/zlanhs.f @@ -114,6 +114,7 @@ * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * December 2016 * + IMPLICIT NONE * .. Scalar Arguments .. CHARACTER NORM INTEGER LDA, N @@ -131,14 +132,17 @@ * .. * .. Local Scalars .. INTEGER I, J - DOUBLE PRECISION SCALE, SUM, VALUE + DOUBLE PRECISION SUM, VALUE +* .. +* .. Local Arrays .. + DOUBLE PRECISION SSQ( 2 ), COLSSQ( 2 ) * .. * .. External Functions .. LOGICAL LSAME, DISNAN EXTERNAL LSAME, DISNAN * .. * .. External Subroutines .. - EXTERNAL ZLASSQ + EXTERNAL ZLASSQ, DCOMBSSQ * .. * .. Intrinsic Functions .. INTRINSIC ABS, MIN, SQRT @@ -190,13 +194,20 @@ ELSE IF( ( LSAME( NORM, 'F' ) ) .OR. ( LSAME( NORM, 'E' ) ) ) THEN * * Find normF(A). +* SSQ(1) is scale +* SSQ(2) is sum-of-squares +* For better accuracy, sum each column separately. * - SCALE = ZERO - SUM = ONE + SSQ( 1 ) = ZERO + SSQ( 2 ) = ONE DO 90 J = 1, N - CALL ZLASSQ( MIN( N, J+1 ), A( 1, J ), 1, SCALE, SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL ZLASSQ( MIN( N, J+1 ), A( 1, J ), 1, + $ COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL DCOMBSSQ( SSQ, COLSSQ ) 90 CONTINUE - VALUE = SCALE*SQRT( SUM ) + VALUE = SSQ( 1 )*SQRT( SSQ( 2 ) ) END IF * ZLANHS = VALUE diff --git a/lapack-netlib/SRC/zlansb.f b/lapack-netlib/SRC/zlansb.f index 3468c49b3..245dcaf4b 100644 --- a/lapack-netlib/SRC/zlansb.f +++ b/lapack-netlib/SRC/zlansb.f @@ -135,6 +135,7 @@ * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * December 2016 * + IMPLICIT NONE * .. Scalar Arguments .. CHARACTER NORM, UPLO INTEGER K, LDAB, N @@ -152,14 +153,17 @@ * .. * .. Local Scalars .. INTEGER I, J, L - DOUBLE PRECISION ABSA, SCALE, SUM, VALUE + DOUBLE PRECISION ABSA, SUM, VALUE +* .. +* .. Local Arrays .. + DOUBLE PRECISION SSQ( 2 ), COLSSQ( 2 ) * .. * .. External Functions .. LOGICAL LSAME, DISNAN EXTERNAL LSAME, DISNAN * .. * .. External Subroutines .. - EXTERNAL ZLASSQ + EXTERNAL ZLASSQ, DCOMBSSQ * .. * .. Intrinsic Functions .. INTRINSIC ABS, MAX, MIN, SQRT @@ -227,29 +231,47 @@ ELSE IF( ( LSAME( NORM, 'F' ) ) .OR. ( LSAME( NORM, 'E' ) ) ) THEN * * Find normF(A). +* SSQ(1) is scale +* SSQ(2) is sum-of-squares +* For better accuracy, sum each column separately. +* + SSQ( 1 ) = ZERO + SSQ( 2 ) = ONE +* +* Sum off-diagonals * - SCALE = ZERO - SUM = ONE IF( K.GT.0 ) THEN IF( LSAME( UPLO, 'U' ) ) THEN DO 110 J = 2, N + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE CALL ZLASSQ( MIN( J-1, K ), AB( MAX( K+2-J, 1 ), J ), - $ 1, SCALE, SUM ) + $ 1, COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL DCOMBSSQ( SSQ, COLSSQ ) 110 CONTINUE L = K + 1 ELSE DO 120 J = 1, N - 1 - CALL ZLASSQ( MIN( N-J, K ), AB( 2, J ), 1, SCALE, - $ SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL ZLASSQ( MIN( N-J, K ), AB( 2, J ), 1, + $ COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL DCOMBSSQ( SSQ, COLSSQ ) 120 CONTINUE L = 1 END IF - SUM = 2*SUM + SSQ( 2 ) = 2*SSQ( 2 ) ELSE L = 1 END IF - CALL ZLASSQ( N, AB( L, 1 ), LDAB, SCALE, SUM ) - VALUE = SCALE*SQRT( SUM ) +* +* Sum diagonal +* + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL ZLASSQ( N, AB( L, 1 ), LDAB, COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL DCOMBSSQ( SSQ, COLSSQ ) + VALUE = SSQ( 1 )*SQRT( SSQ( 2 ) ) END IF * ZLANSB = VALUE diff --git a/lapack-netlib/SRC/zlansp.f b/lapack-netlib/SRC/zlansp.f index 84fb972bb..fa9220487 100644 --- a/lapack-netlib/SRC/zlansp.f +++ b/lapack-netlib/SRC/zlansp.f @@ -120,6 +120,7 @@ * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * December 2016 * + IMPLICIT NONE * .. Scalar Arguments .. CHARACTER NORM, UPLO INTEGER N @@ -137,14 +138,17 @@ * .. * .. Local Scalars .. INTEGER I, J, K - DOUBLE PRECISION ABSA, SCALE, SUM, VALUE + DOUBLE PRECISION ABSA, SUM, VALUE +* .. +* .. Local Arrays .. + DOUBLE PRECISION SSQ( 2 ), COLSSQ( 2 ) * .. * .. External Functions .. LOGICAL LSAME, DISNAN EXTERNAL LSAME, DISNAN * .. * .. External Subroutines .. - EXTERNAL ZLASSQ + EXTERNAL ZLASSQ, DCOMBSSQ * .. * .. Intrinsic Functions .. INTRINSIC ABS, DBLE, DIMAG, SQRT @@ -219,40 +223,57 @@ ELSE IF( ( LSAME( NORM, 'F' ) ) .OR. ( LSAME( NORM, 'E' ) ) ) THEN * * Find normF(A). +* SSQ(1) is scale +* SSQ(2) is sum-of-squares +* For better accuracy, sum each column separately. +* + SSQ( 1 ) = ZERO + SSQ( 2 ) = ONE +* +* Sum off-diagonals * - SCALE = ZERO - SUM = ONE K = 2 IF( LSAME( UPLO, 'U' ) ) THEN DO 110 J = 2, N - CALL ZLASSQ( J-1, AP( K ), 1, SCALE, SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL ZLASSQ( J-1, AP( K ), 1, COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL DCOMBSSQ( SSQ, COLSSQ ) K = K + J 110 CONTINUE ELSE DO 120 J = 1, N - 1 - CALL ZLASSQ( N-J, AP( K ), 1, SCALE, SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL ZLASSQ( N-J, AP( K ), 1, COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL DCOMBSSQ( SSQ, COLSSQ ) K = K + N - J + 1 120 CONTINUE END IF - SUM = 2*SUM + SSQ( 2 ) = 2*SSQ( 2 ) +* +* Sum diagonal +* K = 1 + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE DO 130 I = 1, N IF( DBLE( AP( K ) ).NE.ZERO ) THEN ABSA = ABS( DBLE( AP( K ) ) ) - IF( SCALE.LT.ABSA ) THEN - SUM = ONE + SUM*( SCALE / ABSA )**2 - SCALE = ABSA + IF( COLSSQ( 1 ).LT.ABSA ) THEN + COLSSQ( 2 ) = ONE + COLSSQ(2)*( COLSSQ(1) / ABSA )**2 + COLSSQ( 1 ) = ABSA ELSE - SUM = SUM + ( ABSA / SCALE )**2 + COLSSQ( 2 ) = COLSSQ( 2 ) + ( ABSA / COLSSQ( 1 ) )**2 END IF END IF IF( DIMAG( AP( K ) ).NE.ZERO ) THEN ABSA = ABS( DIMAG( AP( K ) ) ) - IF( SCALE.LT.ABSA ) THEN - SUM = ONE + SUM*( SCALE / ABSA )**2 - SCALE = ABSA + IF( COLSSQ( 1 ).LT.ABSA ) THEN + COLSSQ( 2 ) = ONE + COLSSQ(2)*( COLSSQ(1) / ABSA )**2 + COLSSQ( 1 ) = ABSA ELSE - SUM = SUM + ( ABSA / SCALE )**2 + COLSSQ( 2 ) = COLSSQ( 2 ) + ( ABSA / COLSSQ( 1 ) )**2 END IF END IF IF( LSAME( UPLO, 'U' ) ) THEN @@ -261,7 +282,8 @@ K = K + N - I + 1 END IF 130 CONTINUE - VALUE = SCALE*SQRT( SUM ) + CALL DCOMBSSQ( SSQ, COLSSQ ) + VALUE = SSQ( 1 )*SQRT( SSQ( 2 ) ) END IF * ZLANSP = VALUE diff --git a/lapack-netlib/SRC/zlansy.f b/lapack-netlib/SRC/zlansy.f index 58269a911..e022f85e1 100644 --- a/lapack-netlib/SRC/zlansy.f +++ b/lapack-netlib/SRC/zlansy.f @@ -128,6 +128,7 @@ * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * December 2016 * + IMPLICIT NONE * .. Scalar Arguments .. CHARACTER NORM, UPLO INTEGER LDA, N @@ -145,14 +146,17 @@ * .. * .. Local Scalars .. INTEGER I, J - DOUBLE PRECISION ABSA, SCALE, SUM, VALUE + DOUBLE PRECISION ABSA, SUM, VALUE +* .. +* .. Local Arrays .. + DOUBLE PRECISION SSQ( 2 ), COLSSQ( 2 ) * .. * .. External Functions .. LOGICAL LSAME, DISNAN EXTERNAL LSAME, DISNAN * .. * .. External Subroutines .. - EXTERNAL ZLASSQ + EXTERNAL ZLASSQ, DCOMBSSQ * .. * .. Intrinsic Functions .. INTRINSIC ABS, SQRT @@ -218,21 +222,39 @@ ELSE IF( ( LSAME( NORM, 'F' ) ) .OR. ( LSAME( NORM, 'E' ) ) ) THEN * * Find normF(A). +* SSQ(1) is scale +* SSQ(2) is sum-of-squares +* For better accuracy, sum each column separately. +* + SSQ( 1 ) = ZERO + SSQ( 2 ) = ONE +* +* Sum off-diagonals * - SCALE = ZERO - SUM = ONE IF( LSAME( UPLO, 'U' ) ) THEN DO 110 J = 2, N - CALL ZLASSQ( J-1, A( 1, J ), 1, SCALE, SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL ZLASSQ( J-1, A( 1, J ), 1, COLSSQ(1), COLSSQ(2) ) + CALL DCOMBSSQ( SSQ, COLSSQ ) 110 CONTINUE ELSE DO 120 J = 1, N - 1 - CALL ZLASSQ( N-J, A( J+1, J ), 1, SCALE, SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL ZLASSQ( N-J, A( J+1, J ), 1, COLSSQ(1), COLSSQ(2) ) + CALL DCOMBSSQ( SSQ, COLSSQ ) 120 CONTINUE END IF - SUM = 2*SUM - CALL ZLASSQ( N, A, LDA+1, SCALE, SUM ) - VALUE = SCALE*SQRT( SUM ) + SSQ( 2 ) = 2*SSQ( 2 ) +* +* Sum diagonal +* + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL ZLASSQ( N, A, LDA+1, COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL DCOMBSSQ( SSQ, COLSSQ ) + VALUE = SSQ( 1 )*SQRT( SSQ( 2 ) ) END IF * ZLANSY = VALUE diff --git a/lapack-netlib/SRC/zlantb.f b/lapack-netlib/SRC/zlantb.f index 3077ba151..f02509223 100644 --- a/lapack-netlib/SRC/zlantb.f +++ b/lapack-netlib/SRC/zlantb.f @@ -146,6 +146,7 @@ * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * December 2016 * + IMPLICIT NONE * .. Scalar Arguments .. CHARACTER DIAG, NORM, UPLO INTEGER K, LDAB, N @@ -164,14 +165,17 @@ * .. Local Scalars .. LOGICAL UDIAG INTEGER I, J, L - DOUBLE PRECISION SCALE, SUM, VALUE + DOUBLE PRECISION SUM, VALUE +* .. +* .. Local Arrays .. + DOUBLE PRECISION SSQ( 2 ), COLSSQ( 2 ) * .. * .. External Functions .. LOGICAL LSAME, DISNAN EXTERNAL LSAME, DISNAN * .. * .. External Subroutines .. - EXTERNAL ZLASSQ + EXTERNAL ZLASSQ, DCOMBSSQ * .. * .. Intrinsic Functions .. INTRINSIC ABS, MAX, MIN, SQRT @@ -313,46 +317,61 @@ ELSE IF( ( LSAME( NORM, 'F' ) ) .OR. ( LSAME( NORM, 'E' ) ) ) THEN * * Find normF(A). +* SSQ(1) is scale +* SSQ(2) is sum-of-squares +* For better accuracy, sum each column separately. * IF( LSAME( UPLO, 'U' ) ) THEN IF( LSAME( DIAG, 'U' ) ) THEN - SCALE = ONE - SUM = N + SSQ( 1 ) = ONE + SSQ( 2 ) = N IF( K.GT.0 ) THEN DO 280 J = 2, N + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE CALL ZLASSQ( MIN( J-1, K ), - $ AB( MAX( K+2-J, 1 ), J ), 1, SCALE, - $ SUM ) + $ AB( MAX( K+2-J, 1 ), J ), 1, + $ COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL DCOMBSSQ( SSQ, COLSSQ ) 280 CONTINUE END IF ELSE - SCALE = ZERO - SUM = ONE + SSQ( 1 ) = ZERO + SSQ( 2 ) = ONE DO 290 J = 1, N + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE CALL ZLASSQ( MIN( J, K+1 ), AB( MAX( K+2-J, 1 ), J ), - $ 1, SCALE, SUM ) + $ 1, COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL DCOMBSSQ( SSQ, COLSSQ ) 290 CONTINUE END IF ELSE IF( LSAME( DIAG, 'U' ) ) THEN - SCALE = ONE - SUM = N + SSQ( 1 ) = ONE + SSQ( 2 ) = N IF( K.GT.0 ) THEN DO 300 J = 1, N - 1 - CALL ZLASSQ( MIN( N-J, K ), AB( 2, J ), 1, SCALE, - $ SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL ZLASSQ( MIN( N-J, K ), AB( 2, J ), 1, + $ COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL DCOMBSSQ( SSQ, COLSSQ ) 300 CONTINUE END IF ELSE - SCALE = ZERO - SUM = ONE + SSQ( 1 ) = ZERO + SSQ( 2 ) = ONE DO 310 J = 1, N - CALL ZLASSQ( MIN( N-J+1, K+1 ), AB( 1, J ), 1, SCALE, - $ SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL ZLASSQ( MIN( N-J+1, K+1 ), AB( 1, J ), 1, + $ COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL DCOMBSSQ( SSQ, COLSSQ ) 310 CONTINUE END IF END IF - VALUE = SCALE*SQRT( SUM ) + VALUE = SSQ( 1 )*SQRT( SSQ( 2 ) ) END IF * ZLANTB = VALUE diff --git a/lapack-netlib/SRC/zlantp.f b/lapack-netlib/SRC/zlantp.f index 69dbaa5bc..d32a00f13 100644 --- a/lapack-netlib/SRC/zlantp.f +++ b/lapack-netlib/SRC/zlantp.f @@ -130,6 +130,7 @@ * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * December 2016 * + IMPLICIT NONE * .. Scalar Arguments .. CHARACTER DIAG, NORM, UPLO INTEGER N @@ -148,14 +149,17 @@ * .. Local Scalars .. LOGICAL UDIAG INTEGER I, J, K - DOUBLE PRECISION SCALE, SUM, VALUE + DOUBLE PRECISION SUM, VALUE +* .. +* .. Local Arrays .. + DOUBLE PRECISION SSQ( 2 ), COLSSQ( 2 ) * .. * .. External Functions .. LOGICAL LSAME, DISNAN EXTERNAL LSAME, DISNAN * .. * .. External Subroutines .. - EXTERNAL ZLASSQ + EXTERNAL ZLASSQ, DCOMBSSQ * .. * .. Intrinsic Functions .. INTRINSIC ABS, SQRT @@ -308,45 +312,64 @@ ELSE IF( ( LSAME( NORM, 'F' ) ) .OR. ( LSAME( NORM, 'E' ) ) ) THEN * * Find normF(A). +* SSQ(1) is scale +* SSQ(2) is sum-of-squares +* For better accuracy, sum each column separately. * IF( LSAME( UPLO, 'U' ) ) THEN IF( LSAME( DIAG, 'U' ) ) THEN - SCALE = ONE - SUM = N + SSQ( 1 ) = ONE + SSQ( 2 ) = N K = 2 DO 280 J = 2, N - CALL ZLASSQ( J-1, AP( K ), 1, SCALE, SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL ZLASSQ( J-1, AP( K ), 1, + $ COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL DCOMBSSQ( SSQ, COLSSQ ) K = K + J 280 CONTINUE ELSE - SCALE = ZERO - SUM = ONE + SSQ( 1 ) = ZERO + SSQ( 2 ) = ONE K = 1 DO 290 J = 1, N - CALL ZLASSQ( J, AP( K ), 1, SCALE, SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL ZLASSQ( J, AP( K ), 1, + $ COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL DCOMBSSQ( SSQ, COLSSQ ) K = K + J 290 CONTINUE END IF ELSE IF( LSAME( DIAG, 'U' ) ) THEN - SCALE = ONE - SUM = N + SSQ( 1 ) = ONE + SSQ( 2 ) = N K = 2 DO 300 J = 1, N - 1 - CALL ZLASSQ( N-J, AP( K ), 1, SCALE, SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL ZLASSQ( N-J, AP( K ), 1, + $ COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL DCOMBSSQ( SSQ, COLSSQ ) K = K + N - J + 1 300 CONTINUE ELSE - SCALE = ZERO - SUM = ONE + SSQ( 1 ) = ZERO + SSQ( 2 ) = ONE K = 1 DO 310 J = 1, N - CALL ZLASSQ( N-J+1, AP( K ), 1, SCALE, SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL ZLASSQ( N-J+1, AP( K ), 1, + $ COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL DCOMBSSQ( SSQ, COLSSQ ) K = K + N - J + 1 310 CONTINUE END IF END IF - VALUE = SCALE*SQRT( SUM ) + VALUE = SSQ( 1 )*SQRT( SSQ( 2 ) ) END IF * ZLANTP = VALUE diff --git a/lapack-netlib/SRC/zlantr.f b/lapack-netlib/SRC/zlantr.f index 04ee482f7..7d63c972e 100644 --- a/lapack-netlib/SRC/zlantr.f +++ b/lapack-netlib/SRC/zlantr.f @@ -147,6 +147,7 @@ * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * December 2016 * + IMPLICIT NONE * .. Scalar Arguments .. CHARACTER DIAG, NORM, UPLO INTEGER LDA, M, N @@ -165,14 +166,17 @@ * .. Local Scalars .. LOGICAL UDIAG INTEGER I, J - DOUBLE PRECISION SCALE, SUM, VALUE + DOUBLE PRECISION SUM, VALUE +* .. +* .. Local Arrays .. + DOUBLE PRECISION SSQ( 2 ), COLSSQ( 2 ) * .. * .. External Functions .. LOGICAL LSAME, DISNAN EXTERNAL LSAME, DISNAN * .. * .. External Subroutines .. - EXTERNAL ZLASSQ + EXTERNAL ZLASSQ, DCOMBSSQ * .. * .. Intrinsic Functions .. INTRINSIC ABS, MIN, SQRT @@ -283,7 +287,7 @@ END IF ELSE IF( LSAME( DIAG, 'U' ) ) THEN - DO 210 I = 1, N + DO 210 I = 1, MIN( M, N ) WORK( I ) = ONE 210 CONTINUE DO 220 I = N + 1, M @@ -313,38 +317,56 @@ ELSE IF( ( LSAME( NORM, 'F' ) ) .OR. ( LSAME( NORM, 'E' ) ) ) THEN * * Find normF(A). +* SSQ(1) is scale +* SSQ(2) is sum-of-squares +* For better accuracy, sum each column separately. * IF( LSAME( UPLO, 'U' ) ) THEN IF( LSAME( DIAG, 'U' ) ) THEN - SCALE = ONE - SUM = MIN( M, N ) + SSQ( 1 ) = ONE + SSQ( 2 ) = MIN( M, N ) DO 290 J = 2, N - CALL ZLASSQ( MIN( M, J-1 ), A( 1, J ), 1, SCALE, SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL ZLASSQ( MIN( M, J-1 ), A( 1, J ), 1, + $ COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL DCOMBSSQ( SSQ, COLSSQ ) 290 CONTINUE ELSE - SCALE = ZERO - SUM = ONE + SSQ( 1 ) = ZERO + SSQ( 2 ) = ONE DO 300 J = 1, N - CALL ZLASSQ( MIN( M, J ), A( 1, J ), 1, SCALE, SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL ZLASSQ( MIN( M, J ), A( 1, J ), 1, + $ COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL DCOMBSSQ( SSQ, COLSSQ ) 300 CONTINUE END IF ELSE IF( LSAME( DIAG, 'U' ) ) THEN - SCALE = ONE - SUM = MIN( M, N ) + SSQ( 1 ) = ONE + SSQ( 2 ) = MIN( M, N ) DO 310 J = 1, N - CALL ZLASSQ( M-J, A( MIN( M, J+1 ), J ), 1, SCALE, - $ SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL ZLASSQ( M-J, A( MIN( M, J+1 ), J ), 1, + $ COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL DCOMBSSQ( SSQ, COLSSQ ) 310 CONTINUE ELSE - SCALE = ZERO - SUM = ONE + SSQ( 1 ) = ZERO + SSQ( 2 ) = ONE DO 320 J = 1, N - CALL ZLASSQ( M-J+1, A( J, J ), 1, SCALE, SUM ) + COLSSQ( 1 ) = ZERO + COLSSQ( 2 ) = ONE + CALL ZLASSQ( M-J+1, A( J, J ), 1, + $ COLSSQ( 1 ), COLSSQ( 2 ) ) + CALL DCOMBSSQ( SSQ, COLSSQ ) 320 CONTINUE END IF END IF - VALUE = SCALE*SQRT( SUM ) + VALUE = SSQ( 1 )*SQRT( SSQ( 2 ) ) END IF * ZLANTR = VALUE diff --git a/lapack-netlib/SRC/zlaqps.f b/lapack-netlib/SRC/zlaqps.f index c142e8c69..66c721517 100644 --- a/lapack-netlib/SRC/zlaqps.f +++ b/lapack-netlib/SRC/zlaqps.f @@ -127,7 +127,7 @@ *> \param[in,out] AUXV *> \verbatim *> AUXV is COMPLEX*16 array, dimension (NB) -*> Auxiliar vector. +*> Auxiliary vector. *> \endverbatim *> *> \param[in,out] F diff --git a/lapack-netlib/SRC/zlaqr0.f b/lapack-netlib/SRC/zlaqr0.f index 59b8ed7a6..feffe9782 100644 --- a/lapack-netlib/SRC/zlaqr0.f +++ b/lapack-netlib/SRC/zlaqr0.f @@ -66,7 +66,7 @@ *> \param[in] N *> \verbatim *> N is INTEGER -*> The order of the matrix H. N .GE. 0. +*> The order of the matrix H. N >= 0. *> \endverbatim *> *> \param[in] ILO @@ -79,12 +79,12 @@ *> IHI is INTEGER *> *> It is assumed that H is already upper triangular in rows -*> and columns 1:ILO-1 and IHI+1:N and, if ILO.GT.1, +*> and columns 1:ILO-1 and IHI+1:N and, if ILO > 1, *> H(ILO,ILO-1) is zero. ILO and IHI are normally set by a *> previous call to ZGEBAL, and then passed to ZGEHRD when the *> matrix output by ZGEBAL is reduced to Hessenberg form. *> Otherwise, ILO and IHI should be set to 1 and N, -*> respectively. If N.GT.0, then 1.LE.ILO.LE.IHI.LE.N. +*> respectively. If N > 0, then 1 <= ILO <= IHI <= N. *> If N = 0, then ILO = 1 and IHI = 0. *> \endverbatim *> @@ -96,17 +96,17 @@ *> contains the upper triangular matrix T from the Schur *> decomposition (the Schur form). If INFO = 0 and WANT is *> .FALSE., then the contents of H are unspecified on exit. -*> (The output value of H when INFO.GT.0 is given under the +*> (The output value of H when INFO > 0 is given under the *> description of INFO below.) *> -*> This subroutine may explicitly set H(i,j) = 0 for i.GT.j and +*> This subroutine may explicitly set H(i,j) = 0 for i > j and *> j = 1, 2, ... ILO-1 or j = IHI+1, IHI+2, ... N. *> \endverbatim *> *> \param[in] LDH *> \verbatim *> LDH is INTEGER -*> The leading dimension of the array H. LDH .GE. max(1,N). +*> The leading dimension of the array H. LDH >= max(1,N). *> \endverbatim *> *> \param[out] W @@ -128,7 +128,7 @@ *> IHIZ is INTEGER *> Specify the rows of Z to which transformations must be *> applied if WANTZ is .TRUE.. -*> 1 .LE. ILOZ .LE. ILO; IHI .LE. IHIZ .LE. N. +*> 1 <= ILOZ <= ILO; IHI <= IHIZ <= N. *> \endverbatim *> *> \param[in,out] Z @@ -138,7 +138,7 @@ *> If WANTZ is .TRUE., then Z(ILO:IHI,ILOZ:IHIZ) is *> replaced by Z(ILO:IHI,ILOZ:IHIZ)*U where U is the *> orthogonal Schur factor of H(ILO:IHI,ILO:IHI). -*> (The output value of Z when INFO.GT.0 is given under +*> (The output value of Z when INFO > 0 is given under *> the description of INFO below.) *> \endverbatim *> @@ -146,7 +146,7 @@ *> \verbatim *> LDZ is INTEGER *> The leading dimension of the array Z. if WANTZ is .TRUE. -*> then LDZ.GE.MAX(1,IHIZ). Otherwize, LDZ.GE.1. +*> then LDZ >= MAX(1,IHIZ). Otherwise, LDZ >= 1. *> \endverbatim *> *> \param[out] WORK @@ -159,7 +159,7 @@ *> \param[in] LWORK *> \verbatim *> LWORK is INTEGER -*> The dimension of the array WORK. LWORK .GE. max(1,N) +*> The dimension of the array WORK. LWORK >= max(1,N) *> is sufficient, but LWORK typically as large as 6*N may *> be required for optimal performance. A workspace query *> to determine the optimal workspace size is recommended. @@ -175,19 +175,19 @@ *> \param[out] INFO *> \verbatim *> INFO is INTEGER -*> = 0: successful exit -*> .GT. 0: if INFO = i, ZLAQR0 failed to compute all of +*> = 0: successful exit +*> > 0: if INFO = i, ZLAQR0 failed to compute all of *> the eigenvalues. Elements 1:ilo-1 and i+1:n of WR *> and WI contain those eigenvalues which have been *> successfully computed. (Failures are rare.) *> -*> If INFO .GT. 0 and WANT is .FALSE., then on exit, +*> If INFO > 0 and WANT is .FALSE., then on exit, *> the remaining unconverged eigenvalues are the eigen- *> values of the upper Hessenberg matrix rows and *> columns ILO through INFO of the final, output *> value of H. *> -*> If INFO .GT. 0 and WANTT is .TRUE., then on exit +*> If INFO > 0 and WANTT is .TRUE., then on exit *> *> (*) (initial value of H)*U = U*(final value of H) *> @@ -195,7 +195,7 @@ *> value of H is upper Hessenberg and triangular in *> rows and columns INFO+1 through IHI. *> -*> If INFO .GT. 0 and WANTZ is .TRUE., then on exit +*> If INFO > 0 and WANTZ is .TRUE., then on exit *> *> (final value of Z(ILO:IHI,ILOZ:IHIZ) *> = (initial value of Z(ILO:IHI,ILOZ:IHIZ)*U @@ -203,7 +203,7 @@ *> where U is the unitary matrix in (*) (regard- *> less of the value of WANTT.) *> -*> If INFO .GT. 0 and WANTZ is .FALSE., then Z is not +*> If INFO > 0 and WANTZ is .FALSE., then Z is not *> accessed. *> \endverbatim * @@ -641,7 +641,7 @@ END IF END IF * -* ==== Use up to NS of the the smallest magnatiude +* ==== Use up to NS of the the smallest magnitude * . shifts. If there aren't NS shifts available, * . then use them all, possibly dropping one to * . make the number of shifts even. ==== diff --git a/lapack-netlib/SRC/zlaqr1.f b/lapack-netlib/SRC/zlaqr1.f index 34341cb10..fc2df3cb4 100644 --- a/lapack-netlib/SRC/zlaqr1.f +++ b/lapack-netlib/SRC/zlaqr1.f @@ -64,7 +64,7 @@ *> \verbatim *> LDH is INTEGER *> The leading dimension of H as declared in -*> the calling procedure. LDH.GE.N +*> the calling procedure. LDH >= N *> \endverbatim *> *> \param[in] S1 diff --git a/lapack-netlib/SRC/zlaqr2.f b/lapack-netlib/SRC/zlaqr2.f index e6e2ea48c..b5434e899 100644 --- a/lapack-netlib/SRC/zlaqr2.f +++ b/lapack-netlib/SRC/zlaqr2.f @@ -103,7 +103,7 @@ *> \param[in] NW *> \verbatim *> NW is INTEGER -*> Deflation window size. 1 .LE. NW .LE. (KBOT-KTOP+1). +*> Deflation window size. 1 <= NW <= (KBOT-KTOP+1). *> \endverbatim *> *> \param[in,out] H @@ -121,7 +121,7 @@ *> \verbatim *> LDH is INTEGER *> Leading dimension of H just as declared in the calling -*> subroutine. N .LE. LDH +*> subroutine. N <= LDH *> \endverbatim *> *> \param[in] ILOZ @@ -133,7 +133,7 @@ *> \verbatim *> IHIZ is INTEGER *> Specify the rows of Z to which transformations must be -*> applied if WANTZ is .TRUE.. 1 .LE. ILOZ .LE. IHIZ .LE. N. +*> applied if WANTZ is .TRUE.. 1 <= ILOZ <= IHIZ <= N. *> \endverbatim *> *> \param[in,out] Z @@ -149,7 +149,7 @@ *> \verbatim *> LDZ is INTEGER *> The leading dimension of Z just as declared in the -*> calling subroutine. 1 .LE. LDZ. +*> calling subroutine. 1 <= LDZ. *> \endverbatim *> *> \param[out] NS @@ -186,13 +186,13 @@ *> \verbatim *> LDV is INTEGER *> The leading dimension of V just as declared in the -*> calling subroutine. NW .LE. LDV +*> calling subroutine. NW <= LDV *> \endverbatim *> *> \param[in] NH *> \verbatim *> NH is INTEGER -*> The number of columns of T. NH.GE.NW. +*> The number of columns of T. NH >= NW. *> \endverbatim *> *> \param[out] T @@ -204,14 +204,14 @@ *> \verbatim *> LDT is INTEGER *> The leading dimension of T just as declared in the -*> calling subroutine. NW .LE. LDT +*> calling subroutine. NW <= LDT *> \endverbatim *> *> \param[in] NV *> \verbatim *> NV is INTEGER *> The number of rows of work array WV available for -*> workspace. NV.GE.NW. +*> workspace. NV >= NW. *> \endverbatim *> *> \param[out] WV @@ -223,7 +223,7 @@ *> \verbatim *> LDWV is INTEGER *> The leading dimension of W just as declared in the -*> calling subroutine. NW .LE. LDV +*> calling subroutine. NW <= LDV *> \endverbatim *> *> \param[out] WORK diff --git a/lapack-netlib/SRC/zlaqr3.f b/lapack-netlib/SRC/zlaqr3.f index 64ab59f31..dfb798ca9 100644 --- a/lapack-netlib/SRC/zlaqr3.f +++ b/lapack-netlib/SRC/zlaqr3.f @@ -100,7 +100,7 @@ *> \param[in] NW *> \verbatim *> NW is INTEGER -*> Deflation window size. 1 .LE. NW .LE. (KBOT-KTOP+1). +*> Deflation window size. 1 <= NW <= (KBOT-KTOP+1). *> \endverbatim *> *> \param[in,out] H @@ -118,7 +118,7 @@ *> \verbatim *> LDH is INTEGER *> Leading dimension of H just as declared in the calling -*> subroutine. N .LE. LDH +*> subroutine. N <= LDH *> \endverbatim *> *> \param[in] ILOZ @@ -130,7 +130,7 @@ *> \verbatim *> IHIZ is INTEGER *> Specify the rows of Z to which transformations must be -*> applied if WANTZ is .TRUE.. 1 .LE. ILOZ .LE. IHIZ .LE. N. +*> applied if WANTZ is .TRUE.. 1 <= ILOZ <= IHIZ <= N. *> \endverbatim *> *> \param[in,out] Z @@ -146,7 +146,7 @@ *> \verbatim *> LDZ is INTEGER *> The leading dimension of Z just as declared in the -*> calling subroutine. 1 .LE. LDZ. +*> calling subroutine. 1 <= LDZ. *> \endverbatim *> *> \param[out] NS @@ -183,13 +183,13 @@ *> \verbatim *> LDV is INTEGER *> The leading dimension of V just as declared in the -*> calling subroutine. NW .LE. LDV +*> calling subroutine. NW <= LDV *> \endverbatim *> *> \param[in] NH *> \verbatim *> NH is INTEGER -*> The number of columns of T. NH.GE.NW. +*> The number of columns of T. NH >= NW. *> \endverbatim *> *> \param[out] T @@ -201,14 +201,14 @@ *> \verbatim *> LDT is INTEGER *> The leading dimension of T just as declared in the -*> calling subroutine. NW .LE. LDT +*> calling subroutine. NW <= LDT *> \endverbatim *> *> \param[in] NV *> \verbatim *> NV is INTEGER *> The number of rows of work array WV available for -*> workspace. NV.GE.NW. +*> workspace. NV >= NW. *> \endverbatim *> *> \param[out] WV @@ -220,7 +220,7 @@ *> \verbatim *> LDWV is INTEGER *> The leading dimension of W just as declared in the -*> calling subroutine. NW .LE. LDV +*> calling subroutine. NW <= LDV *> \endverbatim *> *> \param[out] WORK diff --git a/lapack-netlib/SRC/zlaqr4.f b/lapack-netlib/SRC/zlaqr4.f index 012fa37e2..a88f6508e 100644 --- a/lapack-netlib/SRC/zlaqr4.f +++ b/lapack-netlib/SRC/zlaqr4.f @@ -73,7 +73,7 @@ *> \param[in] N *> \verbatim *> N is INTEGER -*> The order of the matrix H. N .GE. 0. +*> The order of the matrix H. N >= 0. *> \endverbatim *> *> \param[in] ILO @@ -85,12 +85,12 @@ *> \verbatim *> IHI is INTEGER *> It is assumed that H is already upper triangular in rows -*> and columns 1:ILO-1 and IHI+1:N and, if ILO.GT.1, +*> and columns 1:ILO-1 and IHI+1:N and, if ILO > 1, *> H(ILO,ILO-1) is zero. ILO and IHI are normally set by a *> previous call to ZGEBAL, and then passed to ZGEHRD when the *> matrix output by ZGEBAL is reduced to Hessenberg form. *> Otherwise, ILO and IHI should be set to 1 and N, -*> respectively. If N.GT.0, then 1.LE.ILO.LE.IHI.LE.N. +*> respectively. If N > 0, then 1 <= ILO <= IHI <= N. *> If N = 0, then ILO = 1 and IHI = 0. *> \endverbatim *> @@ -102,17 +102,17 @@ *> contains the upper triangular matrix T from the Schur *> decomposition (the Schur form). If INFO = 0 and WANT is *> .FALSE., then the contents of H are unspecified on exit. -*> (The output value of H when INFO.GT.0 is given under the +*> (The output value of H when INFO > 0 is given under the *> description of INFO below.) *> -*> This subroutine may explicitly set H(i,j) = 0 for i.GT.j and +*> This subroutine may explicitly set H(i,j) = 0 for i > j and *> j = 1, 2, ... ILO-1 or j = IHI+1, IHI+2, ... N. *> \endverbatim *> *> \param[in] LDH *> \verbatim *> LDH is INTEGER -*> The leading dimension of the array H. LDH .GE. max(1,N). +*> The leading dimension of the array H. LDH >= max(1,N). *> \endverbatim *> *> \param[out] W @@ -134,7 +134,7 @@ *> IHIZ is INTEGER *> Specify the rows of Z to which transformations must be *> applied if WANTZ is .TRUE.. -*> 1 .LE. ILOZ .LE. ILO; IHI .LE. IHIZ .LE. N. +*> 1 <= ILOZ <= ILO; IHI <= IHIZ <= N. *> \endverbatim *> *> \param[in,out] Z @@ -144,7 +144,7 @@ *> If WANTZ is .TRUE., then Z(ILO:IHI,ILOZ:IHIZ) is *> replaced by Z(ILO:IHI,ILOZ:IHIZ)*U where U is the *> orthogonal Schur factor of H(ILO:IHI,ILO:IHI). -*> (The output value of Z when INFO.GT.0 is given under +*> (The output value of Z when INFO > 0 is given under *> the description of INFO below.) *> \endverbatim *> @@ -152,7 +152,7 @@ *> \verbatim *> LDZ is INTEGER *> The leading dimension of the array Z. if WANTZ is .TRUE. -*> then LDZ.GE.MAX(1,IHIZ). Otherwize, LDZ.GE.1. +*> then LDZ >= MAX(1,IHIZ). Otherwise, LDZ >= 1. *> \endverbatim *> *> \param[out] WORK @@ -165,7 +165,7 @@ *> \param[in] LWORK *> \verbatim *> LWORK is INTEGER -*> The dimension of the array WORK. LWORK .GE. max(1,N) +*> The dimension of the array WORK. LWORK >= max(1,N) *> is sufficient, but LWORK typically as large as 6*N may *> be required for optimal performance. A workspace query *> to determine the optimal workspace size is recommended. @@ -182,18 +182,18 @@ *> \verbatim *> INFO is INTEGER *> = 0: successful exit -*> .GT. 0: if INFO = i, ZLAQR4 failed to compute all of +*> > 0: if INFO = i, ZLAQR4 failed to compute all of *> the eigenvalues. Elements 1:ilo-1 and i+1:n of WR *> and WI contain those eigenvalues which have been *> successfully computed. (Failures are rare.) *> -*> If INFO .GT. 0 and WANT is .FALSE., then on exit, +*> If INFO > 0 and WANT is .FALSE., then on exit, *> the remaining unconverged eigenvalues are the eigen- *> values of the upper Hessenberg matrix rows and *> columns ILO through INFO of the final, output *> value of H. *> -*> If INFO .GT. 0 and WANTT is .TRUE., then on exit +*> If INFO > 0 and WANTT is .TRUE., then on exit *> *> (*) (initial value of H)*U = U*(final value of H) *> @@ -201,7 +201,7 @@ *> value of H is upper Hessenberg and triangular in *> rows and columns INFO+1 through IHI. *> -*> If INFO .GT. 0 and WANTZ is .TRUE., then on exit +*> If INFO > 0 and WANTZ is .TRUE., then on exit *> *> (final value of Z(ILO:IHI,ILOZ:IHIZ) *> = (initial value of Z(ILO:IHI,ILOZ:IHIZ)*U @@ -209,7 +209,7 @@ *> where U is the unitary matrix in (*) (regard- *> less of the value of WANTT.) *> -*> If INFO .GT. 0 and WANTZ is .FALSE., then Z is not +*> If INFO > 0 and WANTZ is .FALSE., then Z is not *> accessed. *> \endverbatim * @@ -641,7 +641,7 @@ END IF END IF * -* ==== Use up to NS of the the smallest magnatiude +* ==== Use up to NS of the the smallest magnitude * . shifts. If there aren't NS shifts available, * . then use them all, possibly dropping one to * . make the number of shifts even. ==== diff --git a/lapack-netlib/SRC/zlaqr5.f b/lapack-netlib/SRC/zlaqr5.f index 0dfbce82c..9ff7e7eca 100644 --- a/lapack-netlib/SRC/zlaqr5.f +++ b/lapack-netlib/SRC/zlaqr5.f @@ -125,7 +125,7 @@ *> \verbatim *> LDH is INTEGER *> LDH is the leading dimension of H just as declared in the -*> calling procedure. LDH.GE.MAX(1,N). +*> calling procedure. LDH >= MAX(1,N). *> \endverbatim *> *> \param[in] ILOZ @@ -137,7 +137,7 @@ *> \verbatim *> IHIZ is INTEGER *> Specify the rows of Z to which transformations must be -*> applied if WANTZ is .TRUE.. 1 .LE. ILOZ .LE. IHIZ .LE. N +*> applied if WANTZ is .TRUE.. 1 <= ILOZ <= IHIZ <= N *> \endverbatim *> *> \param[in,out] Z @@ -153,7 +153,7 @@ *> \verbatim *> LDZ is INTEGER *> LDA is the leading dimension of Z just as declared in -*> the calling procedure. LDZ.GE.N. +*> the calling procedure. LDZ >= N. *> \endverbatim *> *> \param[out] V @@ -165,7 +165,7 @@ *> \verbatim *> LDV is INTEGER *> LDV is the leading dimension of V as declared in the -*> calling procedure. LDV.GE.3. +*> calling procedure. LDV >= 3. *> \endverbatim *> *> \param[out] U @@ -177,33 +177,14 @@ *> \verbatim *> LDU is INTEGER *> LDU is the leading dimension of U just as declared in the -*> in the calling subroutine. LDU.GE.3*NSHFTS-3. -*> \endverbatim -*> -*> \param[in] NH -*> \verbatim -*> NH is INTEGER -*> NH is the number of columns in array WH available for -*> workspace. NH.GE.1. -*> \endverbatim -*> -*> \param[out] WH -*> \verbatim -*> WH is COMPLEX*16 array, dimension (LDWH,NH) -*> \endverbatim -*> -*> \param[in] LDWH -*> \verbatim -*> LDWH is INTEGER -*> Leading dimension of WH just as declared in the -*> calling procedure. LDWH.GE.3*NSHFTS-3. +*> in the calling subroutine. LDU >= 3*NSHFTS-3. *> \endverbatim *> *> \param[in] NV *> \verbatim *> NV is INTEGER *> NV is the number of rows in WV agailable for workspace. -*> NV.GE.1. +*> NV >= 1. *> \endverbatim *> *> \param[out] WV @@ -215,9 +196,28 @@ *> \verbatim *> LDWV is INTEGER *> LDWV is the leading dimension of WV as declared in the -*> in the calling subroutine. LDWV.GE.NV. +*> in the calling subroutine. LDWV >= NV. *> \endverbatim * +*> \param[in] NH +*> \verbatim +*> NH is INTEGER +*> NH is the number of columns in array WH available for +*> workspace. NH >= 1. +*> \endverbatim +*> +*> \param[out] WH +*> \verbatim +*> WH is COMPLEX*16 array, dimension (LDWH,NH) +*> \endverbatim +*> +*> \param[in] LDWH +*> \verbatim +*> LDWH is INTEGER +*> Leading dimension of WH just as declared in the +*> calling procedure. LDWH >= 3*NSHFTS-3. +*> \endverbatim +*> * Authors: * ======== * diff --git a/lapack-netlib/SRC/zlarfb.f b/lapack-netlib/SRC/zlarfb.f index b4a2b4d1a..3da49f2fc 100644 --- a/lapack-netlib/SRC/zlarfb.f +++ b/lapack-netlib/SRC/zlarfb.f @@ -92,6 +92,8 @@ *> K is INTEGER *> The order of the matrix T (= the number of elementary *> reflectors whose product defines the block reflector). +*> If SIDE = 'L', M >= K >= 0; +*> if SIDE = 'R', N >= K >= 0. *> \endverbatim *> *> \param[in] V diff --git a/lapack-netlib/SRC/zlarfx.f b/lapack-netlib/SRC/zlarfx.f index 685d164eb..ba6d4ed74 100644 --- a/lapack-netlib/SRC/zlarfx.f +++ b/lapack-netlib/SRC/zlarfx.f @@ -94,7 +94,7 @@ *> \param[in] LDC *> \verbatim *> LDC is INTEGER -*> The leading dimension of the array C. LDA >= max(1,M). +*> The leading dimension of the array C. LDC >= max(1,M). *> \endverbatim *> *> \param[out] WORK diff --git a/lapack-netlib/SRC/zlarfy.f b/lapack-netlib/SRC/zlarfy.f index 57605731b..4c9e08bac 100644 --- a/lapack-netlib/SRC/zlarfy.f +++ b/lapack-netlib/SRC/zlarfy.f @@ -103,7 +103,7 @@ * *> \date December 2016 * -*> \ingroup complex16_eig +*> \ingroup complex16OTHERauxiliary * * ===================================================================== SUBROUTINE ZLARFY( UPLO, N, V, INCV, TAU, C, LDC, WORK ) diff --git a/lapack-netlib/SRC/zlargv.f b/lapack-netlib/SRC/zlargv.f index 1e17983d5..f83ca1851 100644 --- a/lapack-netlib/SRC/zlargv.f +++ b/lapack-netlib/SRC/zlargv.f @@ -201,7 +201,7 @@ FS = FS*SAFMN2 GS = GS*SAFMN2 SCALE = SCALE*SAFMN2 - IF( SCALE.GE.SAFMX2 ) + IF( SCALE.GE.SAFMX2 .AND. COUNT .LT. 20 ) $ GO TO 10 ELSE IF( SCALE.LE.SAFMN2 ) THEN IF( G.EQ.CZERO ) THEN diff --git a/lapack-netlib/SRC/zlarrv.f b/lapack-netlib/SRC/zlarrv.f index 67a67584c..23976dbef 100644 --- a/lapack-netlib/SRC/zlarrv.f +++ b/lapack-netlib/SRC/zlarrv.f @@ -143,7 +143,7 @@ *> RTOL2 is DOUBLE PRECISION *> Parameters for bisection. *> An interval [LEFT,RIGHT] has converged if -*> RIGHT-LEFT.LT.MAX( RTOL1*GAP, RTOL2*MAX(|LEFT|,|RIGHT|) ) +*> RIGHT-LEFT < MAX( RTOL1*GAP, RTOL2*MAX(|LEFT|,|RIGHT|) ) *> \endverbatim *> *> \param[in,out] W diff --git a/lapack-netlib/SRC/zlartg.f b/lapack-netlib/SRC/zlartg.f index 8989bb896..894b4ded0 100644 --- a/lapack-netlib/SRC/zlartg.f +++ b/lapack-netlib/SRC/zlartg.f @@ -161,7 +161,7 @@ FS = FS*SAFMN2 GS = GS*SAFMN2 SCALE = SCALE*SAFMN2 - IF( SCALE.GE.SAFMX2 ) + IF( SCALE.GE.SAFMX2 .AND. COUNT .LT. 20 ) $ GO TO 10 ELSE IF( SCALE.LE.SAFMN2 ) THEN IF( G.EQ.CZERO.OR.DISNAN( ABS( G ) ) ) THEN diff --git a/lapack-netlib/SRC/zlassq.f b/lapack-netlib/SRC/zlassq.f index fd13811bd..dccec988d 100644 --- a/lapack-netlib/SRC/zlassq.f +++ b/lapack-netlib/SRC/zlassq.f @@ -41,7 +41,7 @@ *> where x( i ) = abs( X( 1 + ( i - 1 )*INCX ) ). The value of sumsq is *> assumed to be at least unity and the value of ssq will then satisfy *> -*> 1.0 .le. ssq .le. ( sumsq + 2*n ). +*> 1.0 <= ssq <= ( sumsq + 2*n ). *> *> scale is assumed to be non-negative and scl returns the value *> @@ -65,7 +65,7 @@ *> *> \param[in] X *> \verbatim -*> X is COMPLEX*16 array, dimension (N) +*> X is COMPLEX*16 array, dimension (1+(N-1)*INCX) *> The vector x as described above. *> x( i ) = X( 1 + ( i - 1 )*INCX ), 1 <= i <= n. *> \endverbatim diff --git a/lapack-netlib/SRC/zlaswlq.f b/lapack-netlib/SRC/zlaswlq.f index 24dd41d79..990630925 100644 --- a/lapack-netlib/SRC/zlaswlq.f +++ b/lapack-netlib/SRC/zlaswlq.f @@ -1,3 +1,4 @@ +*> \brief \b ZLASWLQ * * Definition: * =========== @@ -18,9 +19,20 @@ *> *> \verbatim *> -*> ZLASWLQ computes a blocked Short-Wide LQ factorization of a -*> M-by-N matrix A, where N >= M: -*> A = L * Q +*> ZLASWLQ computes a blocked Tall-Skinny LQ factorization of +*> a complexx M-by-N matrix A for M <= N: +*> +*> A = ( L 0 ) * Q, +*> +*> where: +*> +*> Q is a n-by-N orthogonal matrix, stored on exit in an implicit +*> form in the elements above the digonal of the array A and in +*> the elemenst of the array T; +*> L is an lower-triangular M-by-M matrix stored on exit in +*> the elements on and below the diagonal of the array A. +*> 0 is a M-by-(N-M) zero matrix, if M < N, and is not stored. +*> *> \endverbatim * * Arguments: @@ -150,7 +162,7 @@ SUBROUTINE ZLASWLQ( M, N, MB, NB, A, LDA, T, LDT, WORK, LWORK, $ INFO) * -* -- LAPACK computational routine (version 3.7.1) -- +* -- LAPACK computational routine (version 3.9.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd. -- * June 2017 diff --git a/lapack-netlib/SRC/zlasyf_aa.f b/lapack-netlib/SRC/zlasyf_aa.f index f321b72de..b1f1c2790 100644 --- a/lapack-netlib/SRC/zlasyf_aa.f +++ b/lapack-netlib/SRC/zlasyf_aa.f @@ -284,8 +284,9 @@ * * Swap A(I1, I2+1:M) with A(I2, I2+1:M) * - CALL ZSWAP( M-I2, A( J1+I1-1, I2+1 ), LDA, - $ A( J1+I2-1, I2+1 ), LDA ) + IF( I2.LT.M ) + $ CALL ZSWAP( M-I2, A( J1+I1-1, I2+1 ), LDA, + $ A( J1+I2-1, I2+1 ), LDA ) * * Swap A(I1, I1) with A(I2,I2) * @@ -325,13 +326,15 @@ * Compute L(J+2, J+1) = WORK( 3:M ) / T(J, J+1), * where A(J, J+1) = T(J, J+1) and A(J+2:M, J) = L(J+2:M, J+1) * - IF( A( K, J+1 ).NE.ZERO ) THEN - ALPHA = ONE / A( K, J+1 ) - CALL ZCOPY( M-J-1, WORK( 3 ), 1, A( K, J+2 ), LDA ) - CALL ZSCAL( M-J-1, ALPHA, A( K, J+2 ), LDA ) - ELSE - CALL ZLASET( 'Full', 1, M-J-1, ZERO, ZERO, - $ A( K, J+2 ), LDA) + IF( J.LT.(M-1) ) THEN + IF( A( K, J+1 ).NE.ZERO ) THEN + ALPHA = ONE / A( K, J+1 ) + CALL ZCOPY( M-J-1, WORK( 3 ), 1, A( K, J+2 ), LDA ) + CALL ZSCAL( M-J-1, ALPHA, A( K, J+2 ), LDA ) + ELSE + CALL ZLASET( 'Full', 1, M-J-1, ZERO, ZERO, + $ A( K, J+2 ), LDA) + END IF END IF END IF J = J + 1 @@ -432,8 +435,9 @@ * * Swap A(I2+1:M, I1) with A(I2+1:M, I2) * - CALL ZSWAP( M-I2, A( I2+1, J1+I1-1 ), 1, - $ A( I2+1, J1+I2-1 ), 1 ) + IF( I2.LT.M ) + $ CALL ZSWAP( M-I2, A( I2+1, J1+I1-1 ), 1, + $ A( I2+1, J1+I2-1 ), 1 ) * * Swap A(I1, I1) with A(I2, I2) * @@ -473,13 +477,15 @@ * Compute L(J+2, J+1) = WORK( 3:M ) / T(J, J+1), * where A(J, J+1) = T(J, J+1) and A(J+2:M, J) = L(J+2:M, J+1) * - IF( A( J+1, K ).NE.ZERO ) THEN - ALPHA = ONE / A( J+1, K ) - CALL ZCOPY( M-J-1, WORK( 3 ), 1, A( J+2, K ), 1 ) - CALL ZSCAL( M-J-1, ALPHA, A( J+2, K ), 1 ) - ELSE - CALL ZLASET( 'Full', M-J-1, 1, ZERO, ZERO, - $ A( J+2, K ), LDA ) + IF( J.LT.(M-1) ) THEN + IF( A( J+1, K ).NE.ZERO ) THEN + ALPHA = ONE / A( J+1, K ) + CALL ZCOPY( M-J-1, WORK( 3 ), 1, A( J+2, K ), 1 ) + CALL ZSCAL( M-J-1, ALPHA, A( J+2, K ), 1 ) + ELSE + CALL ZLASET( 'Full', M-J-1, 1, ZERO, ZERO, + $ A( J+2, K ), LDA ) + END IF END IF END IF J = J + 1 diff --git a/lapack-netlib/SRC/zlasyf_rk.f b/lapack-netlib/SRC/zlasyf_rk.f index 664ed93f3..b6c5a27c6 100644 --- a/lapack-netlib/SRC/zlasyf_rk.f +++ b/lapack-netlib/SRC/zlasyf_rk.f @@ -330,7 +330,7 @@ * of A and working backwards, and compute the matrix W = U12*D * for use in updating A11 * -* Initilize the first entry of array E, where superdiagonal +* Initialize the first entry of array E, where superdiagonal * elements of D are stored * E( 1 ) = CZERO @@ -658,7 +658,7 @@ * of A and working forwards, and compute the matrix W = L21*D * for use in updating A22 * -* Initilize the unused last entry of the subdiagonal array E. +* Initialize the unused last entry of the subdiagonal array E. * E( N ) = CZERO * diff --git a/lapack-netlib/SRC/zlatdf.f b/lapack-netlib/SRC/zlatdf.f index ab88570c5..4b8b5e330 100644 --- a/lapack-netlib/SRC/zlatdf.f +++ b/lapack-netlib/SRC/zlatdf.f @@ -261,7 +261,7 @@ * * Solve for U- part, lockahead for RHS(N) = +-1. This is not done * In BSOLVE and will hopefully give us a better estimate because -* any ill-conditioning of the original matrix is transfered to U +* any ill-conditioning of the original matrix is transferred to U * and not to L. U(N, N) is an approximation to sigma_min(LU). * CALL ZCOPY( N-1, RHS, 1, WORK, 1 ) diff --git a/lapack-netlib/SRC/zlatsqr.f b/lapack-netlib/SRC/zlatsqr.f index 1fdf3be24..0f98cae93 100644 --- a/lapack-netlib/SRC/zlatsqr.f +++ b/lapack-netlib/SRC/zlatsqr.f @@ -1,3 +1,4 @@ +*> \brief \b ZLATSQR * * Definition: * =========== @@ -18,9 +19,23 @@ *> *> \verbatim *> -*> SLATSQR computes a blocked Tall-Skinny QR factorization of -*> an M-by-N matrix A, where M >= N: -*> A = Q * R . +*> ZLATSQR computes a blocked Tall-Skinny QR factorization of +*> a complex M-by-N matrix A for M >= N: +*> +*> A = Q * ( R ), +*> ( 0 ) +*> +*> where: +*> +*> Q is a M-by-M orthogonal matrix, stored on exit in an implicit +*> form in the elements below the digonal of the array A and in +*> the elemenst of the array T; +*> +*> R is an upper-triangular N-by-N matrix, stored on exit in +*> the elements on and above the diagonal of the array A. +*> +*> 0 is a (M-N)-by-N zero matrix, and is not stored. +*> *> \endverbatim * * Arguments: @@ -149,10 +164,10 @@ SUBROUTINE ZLATSQR( M, N, MB, NB, A, LDA, T, LDT, WORK, $ LWORK, INFO) * -* -- LAPACK computational routine (version 3.7.0) -- +* -- LAPACK computational routine (version 3.9.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd. -- -* December 2016 +* November 2019 * * .. Scalar Arguments .. INTEGER INFO, LDA, M, N, MB, NB, LDT, LWORK diff --git a/lapack-netlib/SRC/zlaunhr_col_getrfnp.f b/lapack-netlib/SRC/zlaunhr_col_getrfnp.f new file mode 100644 index 000000000..0ab7f0349 --- /dev/null +++ b/lapack-netlib/SRC/zlaunhr_col_getrfnp.f @@ -0,0 +1,248 @@ +*> \brief \b ZLAUNHR_COL_GETRFNP +* +* =========== DOCUMENTATION =========== +* +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ +* +*> \htmlonly +*> Download ZLAUNHR_COL_GETRFNP + dependencies +*> +*> [TGZ] +*> +*> [ZIP] +*> +*> [TXT] +*> \endhtmlonly +* +* Definition: +* =========== +* +* SUBROUTINE ZLAUNHR_COL_GETRFNP( M, N, A, LDA, D, INFO ) +* +* .. Scalar Arguments .. +* INTEGER INFO, LDA, M, N +* .. +* .. Array Arguments .. +* COMPLEX*16 A( LDA, * ), D( * ) +* .. +* +* +*> \par Purpose: +* ============= +*> +*> \verbatim +*> +*> ZLAUNHR_COL_GETRFNP computes the modified LU factorization without +*> pivoting of a complex general M-by-N matrix A. The factorization has +*> the form: +*> +*> A - S = L * U, +*> +*> where: +*> S is a m-by-n diagonal sign matrix with the diagonal D, so that +*> D(i) = S(i,i), 1 <= i <= min(M,N). The diagonal D is constructed +*> as D(i)=-SIGN(A(i,i)), where A(i,i) is the value after performing +*> i-1 steps of Gaussian elimination. This means that the diagonal +*> element at each step of "modified" Gaussian elimination is +*> at least one in absolute value (so that division-by-zero not +*> not possible during the division by the diagonal element); +*> +*> L is a M-by-N lower triangular matrix with unit diagonal elements +*> (lower trapezoidal if M > N); +*> +*> and U is a M-by-N upper triangular matrix +*> (upper trapezoidal if M < N). +*> +*> This routine is an auxiliary routine used in the Householder +*> reconstruction routine ZUNHR_COL. In ZUNHR_COL, this routine is +*> applied to an M-by-N matrix A with orthonormal columns, where each +*> element is bounded by one in absolute value. With the choice of +*> the matrix S above, one can show that the diagonal element at each +*> step of Gaussian elimination is the largest (in absolute value) in +*> the column on or below the diagonal, so that no pivoting is required +*> for numerical stability [1]. +*> +*> For more details on the Householder reconstruction algorithm, +*> including the modified LU factorization, see [1]. +*> +*> This is the blocked right-looking version of the algorithm, +*> calling Level 3 BLAS to update the submatrix. To factorize a block, +*> this routine calls the recursive routine ZLAUNHR_COL_GETRFNP2. +*> +*> [1] "Reconstructing Householder vectors from tall-skinny QR", +*> G. Ballard, J. Demmel, L. Grigori, M. Jacquelin, H.D. Nguyen, +*> E. Solomonik, J. Parallel Distrib. Comput., +*> vol. 85, pp. 3-31, 2015. +*> \endverbatim +* +* Arguments: +* ========== +* +*> \param[in] M +*> \verbatim +*> M is INTEGER +*> The number of rows of the matrix A. M >= 0. +*> \endverbatim +*> +*> \param[in] N +*> \verbatim +*> N is INTEGER +*> The number of columns of the matrix A. N >= 0. +*> \endverbatim +*> +*> \param[in,out] A +*> \verbatim +*> A is COMPLEX*16 array, dimension (LDA,N) +*> On entry, the M-by-N matrix to be factored. +*> On exit, the factors L and U from the factorization +*> A-S=L*U; the unit diagonal elements of L are not stored. +*> \endverbatim +*> +*> \param[in] LDA +*> \verbatim +*> LDA is INTEGER +*> The leading dimension of the array A. LDA >= max(1,M). +*> \endverbatim +*> +*> \param[out] D +*> \verbatim +*> D is COMPLEX*16 array, dimension min(M,N) +*> The diagonal elements of the diagonal M-by-N sign matrix S, +*> D(i) = S(i,i), where 1 <= i <= min(M,N). The elements can be +*> only ( +1.0, 0.0 ) or (-1.0, 0.0 ). +*> \endverbatim +*> +*> \param[out] INFO +*> \verbatim +*> INFO is INTEGER +*> = 0: successful exit +*> < 0: if INFO = -i, the i-th argument had an illegal value +*> \endverbatim +*> +* Authors: +* ======== +* +*> \author Univ. of Tennessee +*> \author Univ. of California Berkeley +*> \author Univ. of Colorado Denver +*> \author NAG Ltd. +* +*> \date November 2019 +* +*> \ingroup complex16GEcomputational +* +*> \par Contributors: +* ================== +*> +*> \verbatim +*> +*> November 2019, Igor Kozachenko, +*> Computer Science Division, +*> University of California, Berkeley +*> +*> \endverbatim +* +* ===================================================================== + SUBROUTINE ZLAUNHR_COL_GETRFNP( M, N, A, LDA, D, INFO ) + IMPLICIT NONE +* +* -- LAPACK computational routine (version 3.9.0) -- +* -- LAPACK is a software package provided by Univ. of Tennessee, -- +* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- +* November 2019 +* +* .. Scalar Arguments .. + INTEGER INFO, LDA, M, N +* .. +* .. Array Arguments .. + COMPLEX*16 A( LDA, * ), D( * ) +* .. +* +* ===================================================================== +* +* .. Parameters .. + COMPLEX*16 CONE + PARAMETER ( CONE = ( 1.0D+0, 0.0D+0 ) ) +* .. +* .. Local Scalars .. + INTEGER IINFO, J, JB, NB +* .. +* .. External Subroutines .. + EXTERNAL ZGEMM, ZLAUNHR_COL_GETRFNP2, ZTRSM, XERBLA +* .. +* .. External Functions .. + INTEGER ILAENV + EXTERNAL ILAENV +* .. +* .. Intrinsic Functions .. + INTRINSIC MAX, MIN +* .. +* .. Executable Statements .. +* +* Test the input parameters. +* + INFO = 0 + IF( M.LT.0 ) THEN + INFO = -1 + ELSE IF( N.LT.0 ) THEN + INFO = -2 + ELSE IF( LDA.LT.MAX( 1, M ) ) THEN + INFO = -4 + END IF + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'ZLAUNHR_COL_GETRFNP', -INFO ) + RETURN + END IF +* +* Quick return if possible +* + IF( MIN( M, N ).EQ.0 ) + $ RETURN +* +* Determine the block size for this environment. +* + + NB = ILAENV( 1, 'ZLAUNHR_COL_GETRFNP', ' ', M, N, -1, -1 ) + + IF( NB.LE.1 .OR. NB.GE.MIN( M, N ) ) THEN +* +* Use unblocked code. +* + CALL ZLAUNHR_COL_GETRFNP2( M, N, A, LDA, D, INFO ) + ELSE +* +* Use blocked code. +* + DO J = 1, MIN( M, N ), NB + JB = MIN( MIN( M, N )-J+1, NB ) +* +* Factor diagonal and subdiagonal blocks. +* + CALL ZLAUNHR_COL_GETRFNP2( M-J+1, JB, A( J, J ), LDA, + $ D( J ), IINFO ) +* + IF( J+JB.LE.N ) THEN +* +* Compute block row of U. +* + CALL ZTRSM( 'Left', 'Lower', 'No transpose', 'Unit', JB, + $ N-J-JB+1, CONE, A( J, J ), LDA, A( J, J+JB ), + $ LDA ) + IF( J+JB.LE.M ) THEN +* +* Update trailing submatrix. +* + CALL ZGEMM( 'No transpose', 'No transpose', M-J-JB+1, + $ N-J-JB+1, JB, -CONE, A( J+JB, J ), LDA, + $ A( J, J+JB ), LDA, CONE, A( J+JB, J+JB ), + $ LDA ) + END IF + END IF + END DO + END IF + RETURN +* +* End of ZLAUNHR_COL_GETRFNP +* + END diff --git a/lapack-netlib/SRC/zlaunhr_col_getrfnp2.f b/lapack-netlib/SRC/zlaunhr_col_getrfnp2.f new file mode 100644 index 000000000..0057e430d --- /dev/null +++ b/lapack-netlib/SRC/zlaunhr_col_getrfnp2.f @@ -0,0 +1,314 @@ +*> \brief \b ZLAUNHR_COL_GETRFNP2 +* +* =========== DOCUMENTATION =========== +* +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ +* +*> \htmlonly +*> Download ZLAUNHR_COL_GETRFNP2 + dependencies +*> +*> [TGZ] +*> +*> [ZIP] +*> +*> [TXT] +*> \endhtmlonly +* +* Definition: +* =========== +* +* RECURSIVE SUBROUTINE ZLAUNHR_COL_GETRFNP2( M, N, A, LDA, D, INFO ) +* +* .. Scalar Arguments .. +* INTEGER INFO, LDA, M, N +* .. +* .. Array Arguments .. +* COMPLEX*16 A( LDA, * ), D( * ) +* .. +* +* +*> \par Purpose: +* ============= +*> +*> \verbatim +*> +*> ZLAUNHR_COL_GETRFNP2 computes the modified LU factorization without +*> pivoting of a complex general M-by-N matrix A. The factorization has +*> the form: +*> +*> A - S = L * U, +*> +*> where: +*> S is a m-by-n diagonal sign matrix with the diagonal D, so that +*> D(i) = S(i,i), 1 <= i <= min(M,N). The diagonal D is constructed +*> as D(i)=-SIGN(A(i,i)), where A(i,i) is the value after performing +*> i-1 steps of Gaussian elimination. This means that the diagonal +*> element at each step of "modified" Gaussian elimination is at +*> least one in absolute value (so that division-by-zero not +*> possible during the division by the diagonal element); +*> +*> L is a M-by-N lower triangular matrix with unit diagonal elements +*> (lower trapezoidal if M > N); +*> +*> and U is a M-by-N upper triangular matrix +*> (upper trapezoidal if M < N). +*> +*> This routine is an auxiliary routine used in the Householder +*> reconstruction routine ZUNHR_COL. In ZUNHR_COL, this routine is +*> applied to an M-by-N matrix A with orthonormal columns, where each +*> element is bounded by one in absolute value. With the choice of +*> the matrix S above, one can show that the diagonal element at each +*> step of Gaussian elimination is the largest (in absolute value) in +*> the column on or below the diagonal, so that no pivoting is required +*> for numerical stability [1]. +*> +*> For more details on the Householder reconstruction algorithm, +*> including the modified LU factorization, see [1]. +*> +*> This is the recursive version of the LU factorization algorithm. +*> Denote A - S by B. The algorithm divides the matrix B into four +*> submatrices: +*> +*> [ B11 | B12 ] where B11 is n1 by n1, +*> B = [ -----|----- ] B21 is (m-n1) by n1, +*> [ B21 | B22 ] B12 is n1 by n2, +*> B22 is (m-n1) by n2, +*> with n1 = min(m,n)/2, n2 = n-n1. +*> +*> +*> The subroutine calls itself to factor B11, solves for B21, +*> solves for B12, updates B22, then calls itself to factor B22. +*> +*> For more details on the recursive LU algorithm, see [2]. +*> +*> ZLAUNHR_COL_GETRFNP2 is called to factorize a block by the blocked +*> routine ZLAUNHR_COL_GETRFNP, which uses blocked code calling +*. Level 3 BLAS to update the submatrix. However, ZLAUNHR_COL_GETRFNP2 +*> is self-sufficient and can be used without ZLAUNHR_COL_GETRFNP. +*> +*> [1] "Reconstructing Householder vectors from tall-skinny QR", +*> G. Ballard, J. Demmel, L. Grigori, M. Jacquelin, H.D. Nguyen, +*> E. Solomonik, J. Parallel Distrib. Comput., +*> vol. 85, pp. 3-31, 2015. +*> +*> [2] "Recursion leads to automatic variable blocking for dense linear +*> algebra algorithms", F. Gustavson, IBM J. of Res. and Dev., +*> vol. 41, no. 6, pp. 737-755, 1997. +*> \endverbatim +* +* Arguments: +* ========== +* +*> \param[in] M +*> \verbatim +*> M is INTEGER +*> The number of rows of the matrix A. M >= 0. +*> \endverbatim +*> +*> \param[in] N +*> \verbatim +*> N is INTEGER +*> The number of columns of the matrix A. N >= 0. +*> \endverbatim +*> +*> \param[in,out] A +*> \verbatim +*> A is COMPLEX*16 array, dimension (LDA,N) +*> On entry, the M-by-N matrix to be factored. +*> On exit, the factors L and U from the factorization +*> A-S=L*U; the unit diagonal elements of L are not stored. +*> \endverbatim +*> +*> \param[in] LDA +*> \verbatim +*> LDA is INTEGER +*> The leading dimension of the array A. LDA >= max(1,M). +*> \endverbatim +*> +*> \param[out] D +*> \verbatim +*> D is COMPLEX*16 array, dimension min(M,N) +*> The diagonal elements of the diagonal M-by-N sign matrix S, +*> D(i) = S(i,i), where 1 <= i <= min(M,N). The elements can be +*> only ( +1.0, 0.0 ) or (-1.0, 0.0 ). +*> \endverbatim +*> +*> \param[out] INFO +*> \verbatim +*> INFO is INTEGER +*> = 0: successful exit +*> < 0: if INFO = -i, the i-th argument had an illegal value +*> \endverbatim +*> +* Authors: +* ======== +* +*> \author Univ. of Tennessee +*> \author Univ. of California Berkeley +*> \author Univ. of Colorado Denver +*> \author NAG Ltd. +* +*> \date November 2019 +* +*> \ingroup complex16GEcomputational +* +*> \par Contributors: +* ================== +*> +*> \verbatim +*> +*> November 2019, Igor Kozachenko, +*> Computer Science Division, +*> University of California, Berkeley +*> +*> \endverbatim +* +* ===================================================================== + RECURSIVE SUBROUTINE ZLAUNHR_COL_GETRFNP2( M, N, A, LDA, D, INFO ) + IMPLICIT NONE +* +* -- LAPACK computational routine (version 3.9.0) -- +* -- LAPACK is a software package provided by Univ. of Tennessee, -- +* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- +* November 2019 +* +* .. Scalar Arguments .. + INTEGER INFO, LDA, M, N +* .. +* .. Array Arguments .. + COMPLEX*16 A( LDA, * ), D( * ) +* .. +* +* ===================================================================== +* +* .. Parameters .. + DOUBLE PRECISION ONE + PARAMETER ( ONE = 1.0D+0 ) + COMPLEX*16 CONE + PARAMETER ( CONE = ( 1.0D+0, 0.0D+0 ) ) +* .. +* .. Local Scalars .. + DOUBLE PRECISION SFMIN + INTEGER I, IINFO, N1, N2 + COMPLEX*16 Z +* .. +* .. External Functions .. + DOUBLE PRECISION DLAMCH + EXTERNAL DLAMCH +* .. +* .. External Subroutines .. + EXTERNAL ZGEMM, ZSCAL, ZTRSM, XERBLA +* .. +* .. Intrinsic Functions .. + INTRINSIC ABS, DBLE, DCMPLX, DIMAG, DSIGN, MAX, MIN +* .. +* .. Statement Functions .. + DOUBLE PRECISION CABS1 +* .. +* .. Statement Function definitions .. + CABS1( Z ) = ABS( DBLE( Z ) ) + ABS( DIMAG( Z ) ) +* .. +* .. Executable Statements .. +* +* Test the input parameters +* + INFO = 0 + IF( M.LT.0 ) THEN + INFO = -1 + ELSE IF( N.LT.0 ) THEN + INFO = -2 + ELSE IF( LDA.LT.MAX( 1, M ) ) THEN + INFO = -4 + END IF + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'ZLAUNHR_COL_GETRFNP2', -INFO ) + RETURN + END IF +* +* Quick return if possible +* + IF( MIN( M, N ).EQ.0 ) + $ RETURN + + IF ( M.EQ.1 ) THEN +* +* One row case, (also recursion termination case), +* use unblocked code +* +* Transfer the sign +* + D( 1 ) = DCMPLX( -DSIGN( ONE, DBLE( A( 1, 1 ) ) ) ) +* +* Construct the row of U +* + A( 1, 1 ) = A( 1, 1 ) - D( 1 ) +* + ELSE IF( N.EQ.1 ) THEN +* +* One column case, (also recursion termination case), +* use unblocked code +* +* Transfer the sign +* + D( 1 ) = DCMPLX( -DSIGN( ONE, DBLE( A( 1, 1 ) ) ) ) +* +* Construct the row of U +* + A( 1, 1 ) = A( 1, 1 ) - D( 1 ) +* +* Scale the elements 2:M of the column +* +* Determine machine safe minimum +* + SFMIN = DLAMCH('S') +* +* Construct the subdiagonal elements of L +* + IF( CABS1( A( 1, 1 ) ) .GE. SFMIN ) THEN + CALL ZSCAL( M-1, CONE / A( 1, 1 ), A( 2, 1 ), 1 ) + ELSE + DO I = 2, M + A( I, 1 ) = A( I, 1 ) / A( 1, 1 ) + END DO + END IF +* + ELSE +* +* Divide the matrix B into four submatrices +* + N1 = MIN( M, N ) / 2 + N2 = N-N1 + +* +* Factor B11, recursive call +* + CALL ZLAUNHR_COL_GETRFNP2( N1, N1, A, LDA, D, IINFO ) +* +* Solve for B21 +* + CALL ZTRSM( 'R', 'U', 'N', 'N', M-N1, N1, CONE, A, LDA, + $ A( N1+1, 1 ), LDA ) +* +* Solve for B12 +* + CALL ZTRSM( 'L', 'L', 'N', 'U', N1, N2, CONE, A, LDA, + $ A( 1, N1+1 ), LDA ) +* +* Update B22, i.e. compute the Schur complement +* B22 := B22 - B21*B12 +* + CALL ZGEMM( 'N', 'N', M-N1, N2, N1, -CONE, A( N1+1, 1 ), LDA, + $ A( 1, N1+1 ), LDA, CONE, A( N1+1, N1+1 ), LDA ) +* +* Factor B22, recursive call +* + CALL ZLAUNHR_COL_GETRFNP2( M-N1, N2, A( N1+1, N1+1 ), LDA, + $ D( N1+1 ), IINFO ) +* + END IF + RETURN +* +* End of ZLAUNHR_COL_GETRFNP2 +* + END diff --git a/lapack-netlib/SRC/zporfsx.f b/lapack-netlib/SRC/zporfsx.f index ee8cfbc6a..bbff4331e 100644 --- a/lapack-netlib/SRC/zporfsx.f +++ b/lapack-netlib/SRC/zporfsx.f @@ -44,7 +44,7 @@ *> \verbatim *> *> ZPORFSX improves the computed solution to a system of linear -*> equations when the coefficient matrix is symmetric positive +*> equations when the coefficient matrix is Hermitian positive *> definite, and provides error bounds and backward error estimates *> for the solution. In addition to normwise error bound, the code *> provides maximum componentwise error bound if possible. See @@ -103,7 +103,7 @@ *> \param[in] A *> \verbatim *> A is COMPLEX*16 array, dimension (LDA,N) -*> The symmetric matrix A. If UPLO = 'U', the leading N-by-N +*> The Hermitian matrix A. If UPLO = 'U', the leading N-by-N *> upper triangular part of A contains the upper triangular part *> of the matrix A, and the strictly lower triangular part of A *> is not referenced. If UPLO = 'L', the leading N-by-N lower @@ -134,7 +134,7 @@ *> \param[in,out] S *> \verbatim *> S is DOUBLE PRECISION array, dimension (N) -*> The row scale factors for A. If EQUED = 'Y', A is multiplied on +*> The scale factors for A. If EQUED = 'Y', A is multiplied on *> the left and right by diag(S). S is an input argument if FACT = *> 'F'; otherwise, S is an output argument. If FACT = 'F' and EQUED *> = 'Y', each element of S must be positive. If S is output, each @@ -262,7 +262,7 @@ *> information as described below. There currently are up to three *> pieces of information returned for each right-hand side. If *> componentwise accuracy is not requested (PARAMS(3) = 0.0), then -*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS .LT. 3, then at most +*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS < 3, then at most *> the first (:,N_ERR_BNDS) entries are returned. *> *> The first index in ERR_BNDS_COMP(i,:) corresponds to the ith @@ -298,14 +298,14 @@ *> \param[in] NPARAMS *> \verbatim *> NPARAMS is INTEGER -*> Specifies the number of parameters set in PARAMS. If .LE. 0, the +*> Specifies the number of parameters set in PARAMS. If <= 0, the *> PARAMS array is never referenced and default values are used. *> \endverbatim *> *> \param[in,out] PARAMS *> \verbatim *> PARAMS is DOUBLE PRECISION array, dimension NPARAMS -*> Specifies algorithm parameters. If an entry is .LT. 0.0, then +*> Specifies algorithm parameters. If an entry is < 0.0, then *> that entry will be filled with default value used for that *> parameter. Only positions up to NPARAMS are accessed; defaults *> are used for higher-numbered parameters. @@ -313,9 +313,9 @@ *> PARAMS(LA_LINRX_ITREF_I = 1) : Whether to perform iterative *> refinement or not. *> Default: 1.0D+0 -*> = 0.0 : No refinement is performed, and no error bounds are +*> = 0.0: No refinement is performed, and no error bounds are *> computed. -*> = 1.0 : Use the double-precision refinement algorithm, +*> = 1.0: Use the double-precision refinement algorithm, *> possibly with doubled-single computations if the *> compilation environment does not support DOUBLE *> PRECISION. diff --git a/lapack-netlib/SRC/zposvxx.f b/lapack-netlib/SRC/zposvxx.f index 8126f14be..913d16cb2 100644 --- a/lapack-netlib/SRC/zposvxx.f +++ b/lapack-netlib/SRC/zposvxx.f @@ -45,7 +45,7 @@ *> *> ZPOSVXX uses the Cholesky factorization A = U**T*U or A = L*L**T *> to compute the solution to a complex*16 system of linear equations -*> A * X = B, where A is an N-by-N symmetric positive definite matrix +*> A * X = B, where A is an N-by-N Hermitian positive definite matrix *> and X and B are N-by-NRHS matrices. *> *> If requested, both normwise and maximum componentwise error bounds @@ -157,7 +157,7 @@ *> \param[in,out] A *> \verbatim *> A is COMPLEX*16 array, dimension (LDA,N) -*> On entry, the symmetric matrix A, except if FACT = 'F' and EQUED = +*> On entry, the Hermitian matrix A, except if FACT = 'F' and EQUED = *> 'Y', then A must contain the equilibrated matrix *> diag(S)*A*diag(S). If UPLO = 'U', the leading N-by-N upper *> triangular part of A contains the upper triangular part of the @@ -365,7 +365,7 @@ *> information as described below. There currently are up to three *> pieces of information returned for each right-hand side. If *> componentwise accuracy is not requested (PARAMS(3) = 0.0), then -*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS .LT. 3, then at most +*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS < 3, then at most *> the first (:,N_ERR_BNDS) entries are returned. *> *> The first index in ERR_BNDS_COMP(i,:) corresponds to the ith @@ -401,14 +401,14 @@ *> \param[in] NPARAMS *> \verbatim *> NPARAMS is INTEGER -*> Specifies the number of parameters set in PARAMS. If .LE. 0, the +*> Specifies the number of parameters set in PARAMS. If <= 0, the *> PARAMS array is never referenced and default values are used. *> \endverbatim *> *> \param[in,out] PARAMS *> \verbatim *> PARAMS is DOUBLE PRECISION array, dimension NPARAMS -*> Specifies algorithm parameters. If an entry is .LT. 0.0, then +*> Specifies algorithm parameters. If an entry is < 0.0, then *> that entry will be filled with default value used for that *> parameter. Only positions up to NPARAMS are accessed; defaults *> are used for higher-numbered parameters. @@ -416,9 +416,9 @@ *> PARAMS(LA_LINRX_ITREF_I = 1) : Whether to perform iterative *> refinement or not. *> Default: 1.0D+0 -*> = 0.0 : No refinement is performed, and no error bounds are +*> = 0.0: No refinement is performed, and no error bounds are *> computed. -*> = 1.0 : Use the extra-precise refinement algorithm. +*> = 1.0: Use the extra-precise refinement algorithm. *> (other values are reserved for future use) *> *> PARAMS(LA_LINRX_ITHRESH_I = 2) : Maximum number of residual diff --git a/lapack-netlib/SRC/zpotrf2.f b/lapack-netlib/SRC/zpotrf2.f index e37c9f6d6..85c434d47 100644 --- a/lapack-netlib/SRC/zpotrf2.f +++ b/lapack-netlib/SRC/zpotrf2.f @@ -24,7 +24,7 @@ *> *> \verbatim *> -*> ZPOTRF2 computes the Cholesky factorization of a real symmetric +*> ZPOTRF2 computes the Cholesky factorization of a Hermitian *> positive definite matrix A using the recursive algorithm. *> *> The factorization has the form @@ -63,7 +63,7 @@ *> \param[in,out] A *> \verbatim *> A is COMPLEX*16 array, dimension (LDA,N) -*> On entry, the symmetric matrix A. If UPLO = 'U', the leading +*> On entry, the Hermitian matrix A. If UPLO = 'U', the leading *> N-by-N upper triangular part of A contains the upper *> triangular part of the matrix A, and the strictly lower *> triangular part of A is not referenced. If UPLO = 'L', the diff --git a/lapack-netlib/SRC/zstemr.f b/lapack-netlib/SRC/zstemr.f index ac7552a6a..8685542de 100644 --- a/lapack-netlib/SRC/zstemr.f +++ b/lapack-netlib/SRC/zstemr.f @@ -250,13 +250,13 @@ *> \param[in,out] TRYRAC *> \verbatim *> TRYRAC is LOGICAL -*> If TRYRAC.EQ..TRUE., indicates that the code should check whether +*> If TRYRAC = .TRUE., indicates that the code should check whether *> the tridiagonal matrix defines its eigenvalues to high relative *> accuracy. If so, the code uses relative-accuracy preserving *> algorithms that might be (a bit) slower depending on the matrix. *> If the matrix does not define its eigenvalues to high relative *> accuracy, the code can uses possibly faster algorithms. -*> If TRYRAC.EQ..FALSE., the code is not required to guarantee +*> If TRYRAC = .FALSE., the code is not required to guarantee *> relatively accurate eigenvalues and can use the fastest possible *> techniques. *> On exit, a .TRUE. TRYRAC will be set to .FALSE. if the matrix diff --git a/lapack-netlib/SRC/zsycon_3.f b/lapack-netlib/SRC/zsycon_3.f index 856845960..33bd23849 100644 --- a/lapack-netlib/SRC/zsycon_3.f +++ b/lapack-netlib/SRC/zsycon_3.f @@ -19,7 +19,7 @@ * =========== * * SUBROUTINE ZSYCON_3( UPLO, N, A, LDA, E, IPIV, ANORM, RCOND, -* WORK, IWORK, INFO ) +* WORK, INFO ) * * .. Scalar Arguments .. * CHARACTER UPLO @@ -27,7 +27,7 @@ * DOUBLE PRECISION ANORM, RCOND * .. * .. Array Arguments .. -* INTEGER IPIV( * ), IWORK( * ) +* INTEGER IPIV( * ) * COMPLEX*16 A( LDA, * ), E ( * ), WORK( * ) * .. * @@ -129,11 +129,6 @@ *> WORK is COMPLEX*16 array, dimension (2*N) *> \endverbatim *> -*> \param[out] IWORK -*> \verbatim -*> IWORK is INTEGER array, dimension (N) -*> \endverbatim -*> *> \param[out] INFO *> \verbatim *> INFO is INTEGER diff --git a/lapack-netlib/SRC/zsyconvf.f b/lapack-netlib/SRC/zsyconvf.f index b26bfd63b..2d5ce882e 100644 --- a/lapack-netlib/SRC/zsyconvf.f +++ b/lapack-netlib/SRC/zsyconvf.f @@ -294,7 +294,7 @@ * * Convert PERMUTATIONS and IPIV * -* Apply permutaions to submatrices of upper part of A +* Apply permutations to submatrices of upper part of A * in factorization order where i decreases from N to 1 * I = N @@ -347,7 +347,7 @@ * * Revert PERMUTATIONS and IPIV * -* Apply permutaions to submatrices of upper part of A +* Apply permutations to submatrices of upper part of A * in reverse factorization order where i increases from 1 to N * I = 1 @@ -438,7 +438,7 @@ * * Convert PERMUTATIONS and IPIV * -* Apply permutaions to submatrices of lower part of A +* Apply permutations to submatrices of lower part of A * in factorization order where k increases from 1 to N * I = 1 @@ -491,7 +491,7 @@ * * Revert PERMUTATIONS and IPIV * -* Apply permutaions to submatrices of lower part of A +* Apply permutations to submatrices of lower part of A * in reverse factorization order where i decreases from N to 1 * I = N diff --git a/lapack-netlib/SRC/zsyconvf_rook.f b/lapack-netlib/SRC/zsyconvf_rook.f index 5c36f4bcd..410d2eb34 100644 --- a/lapack-netlib/SRC/zsyconvf_rook.f +++ b/lapack-netlib/SRC/zsyconvf_rook.f @@ -285,7 +285,7 @@ * * Convert PERMUTATIONS * -* Apply permutaions to submatrices of upper part of A +* Apply permutations to submatrices of upper part of A * in factorization order where i decreases from N to 1 * I = N @@ -336,7 +336,7 @@ * * Revert PERMUTATIONS * -* Apply permutaions to submatrices of upper part of A +* Apply permutations to submatrices of upper part of A * in reverse factorization order where i increases from 1 to N * I = 1 @@ -426,7 +426,7 @@ * * Convert PERMUTATIONS * -* Apply permutaions to submatrices of lower part of A +* Apply permutations to submatrices of lower part of A * in factorization order where i increases from 1 to N * I = 1 @@ -477,7 +477,7 @@ * * Revert PERMUTATIONS * -* Apply permutaions to submatrices of lower part of A +* Apply permutations to submatrices of lower part of A * in reverse factorization order where i decreases from N to 1 * I = N diff --git a/lapack-netlib/SRC/zsyrfsx.f b/lapack-netlib/SRC/zsyrfsx.f index 3420d70cd..d086510d8 100644 --- a/lapack-netlib/SRC/zsyrfsx.f +++ b/lapack-netlib/SRC/zsyrfsx.f @@ -271,7 +271,7 @@ *> information as described below. There currently are up to three *> pieces of information returned for each right-hand side. If *> componentwise accuracy is not requested (PARAMS(3) = 0.0), then -*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS .LT. 3, then at most +*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS < 3, then at most *> the first (:,N_ERR_BNDS) entries are returned. *> *> The first index in ERR_BNDS_COMP(i,:) corresponds to the ith @@ -307,14 +307,14 @@ *> \param[in] NPARAMS *> \verbatim *> NPARAMS is INTEGER -*> Specifies the number of parameters set in PARAMS. If .LE. 0, the +*> Specifies the number of parameters set in PARAMS. If <= 0, the *> PARAMS array is never referenced and default values are used. *> \endverbatim *> *> \param[in,out] PARAMS *> \verbatim *> PARAMS is DOUBLE PRECISION array, dimension NPARAMS -*> Specifies algorithm parameters. If an entry is .LT. 0.0, then +*> Specifies algorithm parameters. If an entry is < 0.0, then *> that entry will be filled with default value used for that *> parameter. Only positions up to NPARAMS are accessed; defaults *> are used for higher-numbered parameters. @@ -322,9 +322,9 @@ *> PARAMS(LA_LINRX_ITREF_I = 1) : Whether to perform iterative *> refinement or not. *> Default: 1.0D+0 -*> = 0.0 : No refinement is performed, and no error bounds are +*> = 0.0: No refinement is performed, and no error bounds are *> computed. -*> = 1.0 : Use the double-precision refinement algorithm, +*> = 1.0: Use the double-precision refinement algorithm, *> possibly with doubled-single computations if the *> compilation environment does not support DOUBLE *> PRECISION. diff --git a/lapack-netlib/SRC/zsysv_aa.f b/lapack-netlib/SRC/zsysv_aa.f index 325d07c54..4e87bd105 100644 --- a/lapack-netlib/SRC/zsysv_aa.f +++ b/lapack-netlib/SRC/zsysv_aa.f @@ -42,7 +42,7 @@ *> matrices. *> *> Aasen's algorithm is used to factor A as -*> A = U * T * U**T, if UPLO = 'U', or +*> A = U**T * T * U, if UPLO = 'U', or *> A = L * T * L**T, if UPLO = 'L', *> where U (or L) is a product of permutation and unit upper (lower) *> triangular matrices, and T is symmetric tridiagonal. The factored @@ -86,7 +86,7 @@ *> *> On exit, if INFO = 0, the tridiagonal matrix T and the *> multipliers used to obtain the factor U or L from the -*> factorization A = U*T*U**T or A = L*T*L**T as computed by +*> factorization A = U**T*T*U or A = L*T*L**T as computed by *> ZSYTRF. *> \endverbatim *> @@ -230,7 +230,7 @@ RETURN END IF * -* Compute the factorization A = U*T*U**T or A = L*T*L**T. +* Compute the factorization A = U**T*T*U or A = L*T*L**T. * CALL ZSYTRF_AA( UPLO, N, A, LDA, IPIV, WORK, LWORK, INFO ) IF( INFO.EQ.0 ) THEN diff --git a/lapack-netlib/SRC/zsysv_aa_2stage.f b/lapack-netlib/SRC/zsysv_aa_2stage.f index 029ed587d..923eaaec0 100644 --- a/lapack-netlib/SRC/zsysv_aa_2stage.f +++ b/lapack-netlib/SRC/zsysv_aa_2stage.f @@ -43,8 +43,8 @@ *> matrices. *> *> Aasen's 2-stage algorithm is used to factor A as -*> A = U * T * U**H, if UPLO = 'U', or -*> A = L * T * L**H, if UPLO = 'L', +*> A = U**T * T * U, if UPLO = 'U', or +*> A = L * T * L**T, if UPLO = 'L', *> where U (or L) is a product of permutation and unit upper (lower) *> triangular matrices, and T is symmetric and band. The matrix T is *> then LU-factored with partial pivoting. The factored form of A @@ -257,7 +257,7 @@ END IF * * -* Compute the factorization A = U*T*U**H or A = L*T*L**H. +* Compute the factorization A = U**T*T*U or A = L*T*L**T. * CALL ZSYTRF_AA_2STAGE( UPLO, N, A, LDA, TB, LTB, IPIV, IPIV2, $ WORK, LWORK, INFO ) diff --git a/lapack-netlib/SRC/zsysvxx.f b/lapack-netlib/SRC/zsysvxx.f index ef44d09d3..e29439385 100644 --- a/lapack-netlib/SRC/zsysvxx.f +++ b/lapack-netlib/SRC/zsysvxx.f @@ -378,7 +378,7 @@ *> information as described below. There currently are up to three *> pieces of information returned for each right-hand side. If *> componentwise accuracy is not requested (PARAMS(3) = 0.0), then -*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS .LT. 3, then at most +*> ERR_BNDS_COMP is not accessed. If N_ERR_BNDS < 3, then at most *> the first (:,N_ERR_BNDS) entries are returned. *> *> The first index in ERR_BNDS_COMP(i,:) corresponds to the ith @@ -414,14 +414,14 @@ *> \param[in] NPARAMS *> \verbatim *> NPARAMS is INTEGER -*> Specifies the number of parameters set in PARAMS. If .LE. 0, the +*> Specifies the number of parameters set in PARAMS. If <= 0, the *> PARAMS array is never referenced and default values are used. *> \endverbatim *> *> \param[in,out] PARAMS *> \verbatim *> PARAMS is DOUBLE PRECISION array, dimension NPARAMS -*> Specifies algorithm parameters. If an entry is .LT. 0.0, then +*> Specifies algorithm parameters. If an entry is < 0.0, then *> that entry will be filled with default value used for that *> parameter. Only positions up to NPARAMS are accessed; defaults *> are used for higher-numbered parameters. @@ -429,9 +429,9 @@ *> PARAMS(LA_LINRX_ITREF_I = 1) : Whether to perform iterative *> refinement or not. *> Default: 1.0D+0 -*> = 0.0 : No refinement is performed, and no error bounds are +*> = 0.0: No refinement is performed, and no error bounds are *> computed. -*> = 1.0 : Use the extra-precise refinement algorithm. +*> = 1.0: Use the extra-precise refinement algorithm. *> (other values are reserved for future use) *> *> PARAMS(LA_LINRX_ITHRESH_I = 2) : Maximum number of residual diff --git a/lapack-netlib/SRC/zsytf2_rk.f b/lapack-netlib/SRC/zsytf2_rk.f index b1a02f4a5..4ae1a4a22 100644 --- a/lapack-netlib/SRC/zsytf2_rk.f +++ b/lapack-netlib/SRC/zsytf2_rk.f @@ -321,7 +321,7 @@ * * Factorize A as U*D*U**T using the upper triangle of A * -* Initilize the first entry of array E, where superdiagonal +* Initialize the first entry of array E, where superdiagonal * elements of D are stored * E( 1 ) = CZERO @@ -632,7 +632,7 @@ * * Factorize A as L*D*L**T using the lower triangle of A * -* Initilize the unused last entry of the subdiagonal array E. +* Initialize the unused last entry of the subdiagonal array E. * E( N ) = CZERO * diff --git a/lapack-netlib/SRC/zsytrf.f b/lapack-netlib/SRC/zsytrf.f index 663199c8a..54e22cca1 100644 --- a/lapack-netlib/SRC/zsytrf.f +++ b/lapack-netlib/SRC/zsytrf.f @@ -43,7 +43,7 @@ *> *> where U (or L) is a product of permutation and unit upper (lower) *> triangular matrices, and D is symmetric and block diagonal with -*> with 1-by-1 and 2-by-2 diagonal blocks. +*> 1-by-1 and 2-by-2 diagonal blocks. *> *> This is the blocked version of the algorithm, calling Level 3 BLAS. *> \endverbatim diff --git a/lapack-netlib/SRC/zsytrf_aa.f b/lapack-netlib/SRC/zsytrf_aa.f index b25b1fbce..e547c6a60 100644 --- a/lapack-netlib/SRC/zsytrf_aa.f +++ b/lapack-netlib/SRC/zsytrf_aa.f @@ -37,7 +37,7 @@ *> ZSYTRF_AA computes the factorization of a complex symmetric matrix A *> using the Aasen's algorithm. The form of the factorization is *> -*> A = U*T*U**T or A = L*T*L**T +*> A = U**T*T*U or A = L*T*L**T *> *> where U (or L) is a product of permutation and unit upper (lower) *> triangular matrices, and T is a complex symmetric tridiagonal matrix. @@ -223,7 +223,7 @@ IF( UPPER ) THEN * * ..................................................... -* Factorize A as L*D*L**T using the upper triangle of A +* Factorize A as U**T*D*U using the upper triangle of A * ..................................................... * * Copy first row A(1, 1:N) into H(1:n) (stored in WORK(1:N)) @@ -256,7 +256,7 @@ $ A( MAX(1, J), J+1 ), LDA, $ IPIV( J+1 ), WORK, N, WORK( N*NB+1 ) ) * -* Ajust IPIV and apply it back (J-th step picks (J+1)-th pivot) +* Adjust IPIV and apply it back (J-th step picks (J+1)-th pivot) * DO J2 = J+2, MIN(N, J+JB+1) IPIV( J2 ) = IPIV( J2 ) + J @@ -375,7 +375,7 @@ $ A( J+1, MAX(1, J) ), LDA, $ IPIV( J+1 ), WORK, N, WORK( N*NB+1 ) ) * -* Ajust IPIV and apply it back (J-th step picks (J+1)-th pivot) +* Adjust IPIV and apply it back (J-th step picks (J+1)-th pivot) * DO J2 = J+2, MIN(N, J+JB+1) IPIV( J2 ) = IPIV( J2 ) + J diff --git a/lapack-netlib/SRC/zsytrf_aa_2stage.f b/lapack-netlib/SRC/zsytrf_aa_2stage.f index d3486c1a7..67a1c1f6f 100644 --- a/lapack-netlib/SRC/zsytrf_aa_2stage.f +++ b/lapack-netlib/SRC/zsytrf_aa_2stage.f @@ -38,7 +38,7 @@ *> ZSYTRF_AA_2STAGE computes the factorization of a complex symmetric matrix A *> using the Aasen's algorithm. The form of the factorization is *> -*> A = U*T*U**T or A = L*T*L**T +*> A = U**T*T*U or A = L*T*L**T *> *> where U (or L) is a product of permutation and unit upper (lower) *> triangular matrices, and T is a complex symmetric band matrix with the @@ -275,7 +275,7 @@ IF( UPPER ) THEN * * ..................................................... -* Factorize A as L*D*L**T using the upper triangle of A +* Factorize A as U**T*D*U using the upper triangle of A * ..................................................... * DO J = 0, NT-1 @@ -448,12 +448,14 @@ c END IF * > Apply pivots to previous columns of L CALL ZSWAP( K-1, A( (J+1)*NB+1, I1 ), 1, $ A( (J+1)*NB+1, I2 ), 1 ) -* > Swap A(I1+1:M, I1) with A(I2, I1+1:M) - CALL ZSWAP( I2-I1-1, A( I1, I1+1 ), LDA, - $ A( I1+1, I2 ), 1 ) +* > Swap A(I1+1:M, I1) with A(I2, I1+1:M) + IF( I2.GT.(I1+1) ) + $ CALL ZSWAP( I2-I1-1, A( I1, I1+1 ), LDA, + $ A( I1+1, I2 ), 1 ) * > Swap A(I2+1:M, I1) with A(I2+1:M, I2) - CALL ZSWAP( N-I2, A( I1, I2+1 ), LDA, - $ A( I2, I2+1 ), LDA ) + IF( I2.LT.N ) + $ CALL ZSWAP( N-I2, A( I1, I2+1 ), LDA, + $ A( I2, I2+1 ), LDA ) * > Swap A(I1, I1) with A(I2, I2) PIV = A( I1, I1 ) A( I1, I1 ) = A( I2, I2 ) @@ -637,11 +639,13 @@ c END IF CALL ZSWAP( K-1, A( I1, (J+1)*NB+1 ), LDA, $ A( I2, (J+1)*NB+1 ), LDA ) * > Swap A(I1+1:M, I1) with A(I2, I1+1:M) - CALL ZSWAP( I2-I1-1, A( I1+1, I1 ), 1, - $ A( I2, I1+1 ), LDA ) + IF( I2.GT.(I1+1) ) + $ CALL ZSWAP( I2-I1-1, A( I1+1, I1 ), 1, + $ A( I2, I1+1 ), LDA ) * > Swap A(I2+1:M, I1) with A(I2+1:M, I2) - CALL ZSWAP( N-I2, A( I2+1, I1 ), 1, - $ A( I2+1, I2 ), 1 ) + IF( I2.LT.N ) + $ CALL ZSWAP( N-I2, A( I2+1, I1 ), 1, + $ A( I2+1, I2 ), 1 ) * > Swap A(I1, I1) with A(I2, I2) PIV = A( I1, I1 ) A( I1, I1 ) = A( I2, I2 ) diff --git a/lapack-netlib/SRC/zsytri2.f b/lapack-netlib/SRC/zsytri2.f index e7303c90b..9929eb2c6 100644 --- a/lapack-netlib/SRC/zsytri2.f +++ b/lapack-netlib/SRC/zsytri2.f @@ -62,7 +62,7 @@ *> \param[in,out] A *> \verbatim *> A is COMPLEX*16 array, dimension (LDA,N) -*> On entry, the NB diagonal matrix D and the multipliers +*> On entry, the block diagonal matrix D and the multipliers *> used to obtain the factor U or L as computed by ZSYTRF. *> *> On exit, if INFO = 0, the (symmetric) inverse of the original @@ -82,7 +82,7 @@ *> \param[in] IPIV *> \verbatim *> IPIV is INTEGER array, dimension (N) -*> Details of the interchanges and the NB structure of D +*> Details of the interchanges and the block structure of D *> as determined by ZSYTRF. *> \endverbatim *> diff --git a/lapack-netlib/SRC/zsytrs2.f b/lapack-netlib/SRC/zsytrs2.f index c0ee206a5..6e9cca425 100644 --- a/lapack-netlib/SRC/zsytrs2.f +++ b/lapack-netlib/SRC/zsytrs2.f @@ -36,7 +36,7 @@ *> *> \verbatim *> -*> ZSYTRS2 solves a system of linear equations A*X = B with a real +*> ZSYTRS2 solves a system of linear equations A*X = B with a complex *> symmetric matrix A using the factorization A = U*D*U**T or *> A = L*D*L**T computed by ZSYTRF and converted by ZSYCONV. *> \endverbatim diff --git a/lapack-netlib/SRC/zsytrs_aa.f b/lapack-netlib/SRC/zsytrs_aa.f index e62e9e486..0f0664009 100644 --- a/lapack-netlib/SRC/zsytrs_aa.f +++ b/lapack-netlib/SRC/zsytrs_aa.f @@ -37,7 +37,7 @@ *> \verbatim *> *> ZSYTRS_AA solves a system of linear equations A*X = B with a complex -*> symmetric matrix A using the factorization A = U*T*U**T or +*> symmetric matrix A using the factorization A = U**T*T*U or *> A = L*T*L**T computed by ZSYTRF_AA. *> \endverbatim * @@ -49,7 +49,7 @@ *> UPLO is CHARACTER*1 *> Specifies whether the details of the factorization are stored *> as an upper or lower triangular matrix. -*> = 'U': Upper triangular, form is A = U*T*U**T; +*> = 'U': Upper triangular, form is A = U**T*T*U; *> = 'L': Lower triangular, form is A = L*T*L**T. *> \endverbatim *> @@ -97,14 +97,16 @@ *> The leading dimension of the array B. LDB >= max(1,N). *> \endverbatim *> -*> \param[in] WORK +*> \param[out] WORK *> \verbatim -*> WORK is DOUBLE array, dimension (MAX(1,LWORK)) +*> WORK is COMPLEX*16 array, dimension (MAX(1,LWORK)) *> \endverbatim *> *> \param[in] LWORK *> \verbatim -*> LWORK is INTEGER, LWORK >= MAX(1,3*N-2). +*> LWORK is INTEGER +*> The dimension of the array WORK. LWORK >= max(1,3*N-2). +*> \endverbatim *> *> \param[out] INFO *> \verbatim @@ -198,22 +200,29 @@ * IF( UPPER ) THEN * -* Solve A*X = B, where A = U*T*U**T. +* Solve A*X = B, where A = U**T*T*U. +* +* 1) Forward substitution with U**T +* + IF( N.GT.1 ) THEN +* +* Pivot, P**T * B -> B * -* Pivot, P**T * B + DO K = 1, N + KP = IPIV( K ) + IF( KP.NE.K ) + $ CALL ZSWAP( NRHS, B( K, 1 ), LDB, B( KP, 1 ), LDB ) + END DO * - DO K = 1, N - KP = IPIV( K ) - IF( KP.NE.K ) - $ CALL ZSWAP( NRHS, B( K, 1 ), LDB, B( KP, 1 ), LDB ) - END DO +* Compute U**T \ B -> B [ (U**T \P**T * B) ] * -* Compute (U \P**T * B) -> B [ (U \P**T * B) ] + CALL ZTRSM( 'L', 'U', 'T', 'U', N-1, NRHS, ONE, A( 1, 2 ), + $ LDA, B( 2, 1 ), LDB) + END IF * - CALL ZTRSM('L', 'U', 'T', 'U', N-1, NRHS, ONE, A( 1, 2 ), LDA, - $ B( 2, 1 ), LDB) +* 2) Solve with triangular matrix T * -* Compute T \ B -> B [ T \ (U \P**T * B) ] +* Compute T \ B -> B [ T \ (U**T \P**T * B) ] * CALL ZLACPY( 'F', 1, N, A( 1, 1 ), LDA+1, WORK( N ), 1) IF( N.GT.1 ) THEN @@ -223,35 +232,47 @@ CALL ZGTSV( N, NRHS, WORK( 1 ), WORK( N ), WORK( 2*N ), B, LDB, $ INFO ) * -* Compute (U**T \ B) -> B [ U**T \ (T \ (U \P**T * B) ) ] +* 3) Backward substitution with U * - CALL ZTRSM( 'L', 'U', 'N', 'U', N-1, NRHS, ONE, A( 1, 2 ), LDA, - $ B( 2, 1 ), LDB) + IF( N.GT.1 ) THEN * -* Pivot, P * B [ P * (U**T \ (T \ (U \P**T * B) )) ] +* Compute U \ B -> B [ U \ (T \ (U**T \P**T * B) ) ] * - DO K = N, 1, -1 - KP = IPIV( K ) - IF( KP.NE.K ) - $ CALL ZSWAP( NRHS, B( K, 1 ), LDB, B( KP, 1 ), LDB ) - END DO + CALL ZTRSM( 'L', 'U', 'N', 'U', N-1, NRHS, ONE, A( 1, 2 ), + $ LDA, B( 2, 1 ), LDB) +* +* Pivot, P * B -> B [ P * (U \ (T \ (U**T \P**T * B) )) ] +* + DO K = N, 1, -1 + KP = IPIV( K ) + IF( KP.NE.K ) + $ CALL ZSWAP( NRHS, B( K, 1 ), LDB, B( KP, 1 ), LDB ) + END DO + END IF * ELSE * * Solve A*X = B, where A = L*T*L**T. * -* Pivot, P**T * B +* 1) Forward substitution with L * - DO K = 1, N - KP = IPIV( K ) - IF( KP.NE.K ) - $ CALL ZSWAP( NRHS, B( K, 1 ), LDB, B( KP, 1 ), LDB ) - END DO + IF( N.GT.1 ) THEN +* +* Pivot, P**T * B -> B * -* Compute (L \P**T * B) -> B [ (L \P**T * B) ] + DO K = 1, N + KP = IPIV( K ) + IF( KP.NE.K ) + $ CALL ZSWAP( NRHS, B( K, 1 ), LDB, B( KP, 1 ), LDB ) + END DO +* +* Compute L \ B -> B [ (L \P**T * B) ] +* + CALL ZTRSM( 'L', 'L', 'N', 'U', N-1, NRHS, ONE, A( 2, 1 ), + $ LDA, B( 2, 1 ), LDB) + END IF * - CALL ZTRSM( 'L', 'L', 'N', 'U', N-1, NRHS, ONE, A( 2, 1 ), LDA, - $ B( 2, 1 ), LDB) +* 2) Solve with triangular matrix T * * Compute T \ B -> B [ T \ (L \P**T * B) ] * @@ -263,18 +284,23 @@ CALL ZGTSV( N, NRHS, WORK( 1 ), WORK(N), WORK( 2*N ), B, LDB, $ INFO) * -* Compute (L**T \ B) -> B [ L**T \ (T \ (L \P**T * B) ) ] +* 3) Backward substitution with L**T * - CALL ZTRSM( 'L', 'L', 'T', 'U', N-1, NRHS, ONE, A( 2, 1 ), LDA, - $ B( 2, 1 ), LDB) + IF( N.GT.1 ) THEN * -* Pivot, P * B [ P * (L**T \ (T \ (L \P**T * B) )) ] +* Compute (L**T \ B) -> B [ L**T \ (T \ (L \P**T * B) ) ] * - DO K = N, 1, -1 - KP = IPIV( K ) - IF( KP.NE.K ) - $ CALL ZSWAP( NRHS, B( K, 1 ), LDB, B( KP, 1 ), LDB ) - END DO + CALL ZTRSM( 'L', 'L', 'T', 'U', N-1, NRHS, ONE, A( 2, 1 ), + $ LDA, B( 2, 1 ), LDB) +* +* Pivot, P * B -> B [ P * (L**T \ (T \ (L \P**T * B) )) ] +* + DO K = N, 1, -1 + KP = IPIV( K ) + IF( KP.NE.K ) + $ CALL ZSWAP( NRHS, B( K, 1 ), LDB, B( KP, 1 ), LDB ) + END DO + END IF * END IF * diff --git a/lapack-netlib/SRC/zsytrs_aa_2stage.f b/lapack-netlib/SRC/zsytrs_aa_2stage.f index fa15eee90..bf060b2d3 100644 --- a/lapack-netlib/SRC/zsytrs_aa_2stage.f +++ b/lapack-netlib/SRC/zsytrs_aa_2stage.f @@ -36,7 +36,7 @@ *> \verbatim *> *> ZSYTRS_AA_2STAGE solves a system of linear equations A*X = B with a complex -*> symmetric matrix A using the factorization A = U*T*U**T or +*> symmetric matrix A using the factorization A = U**T*T*U or *> A = L*T*L**T computed by ZSYTRF_AA_2STAGE. *> \endverbatim * @@ -48,7 +48,7 @@ *> UPLO is CHARACTER*1 *> Specifies whether the details of the factorization are stored *> as an upper or lower triangular matrix. -*> = 'U': Upper triangular, form is A = U*T*U**T; +*> = 'U': Upper triangular, form is A = U**T*T*U; *> = 'L': Lower triangular, form is A = L*T*L**T. *> \endverbatim *> @@ -208,15 +208,15 @@ * IF( UPPER ) THEN * -* Solve A*X = B, where A = U*T*U**T. +* Solve A*X = B, where A = U**T*T*U. * IF( N.GT.NB ) THEN * -* Pivot, P**T * B +* Pivot, P**T * B -> B * CALL ZLASWP( NRHS, B, LDB, NB+1, N, IPIV, 1 ) * -* Compute (U**T \P**T * B) -> B [ (U**T \P**T * B) ] +* Compute (U**T \ B) -> B [ (U**T \P**T * B) ] * CALL ZTRSM( 'L', 'U', 'T', 'U', N-NB, NRHS, ONE, A(1, NB+1), $ LDA, B(NB+1, 1), LDB) @@ -234,7 +234,7 @@ CALL ZTRSM( 'L', 'U', 'N', 'U', N-NB, NRHS, ONE, A(1, NB+1), $ LDA, B(NB+1, 1), LDB) * -* Pivot, P * B [ P * (U \ (T \ (U**T \P**T * B) )) ] +* Pivot, P * B -> B [ P * (U \ (T \ (U**T \P**T * B) )) ] * CALL ZLASWP( NRHS, B, LDB, NB+1, N, IPIV, -1 ) * @@ -246,11 +246,11 @@ * IF( N.GT.NB ) THEN * -* Pivot, P**T * B +* Pivot, P**T * B -> B * CALL ZLASWP( NRHS, B, LDB, NB+1, N, IPIV, 1 ) * -* Compute (L \P**T * B) -> B [ (L \P**T * B) ] +* Compute (L \ B) -> B [ (L \P**T * B) ] * CALL ZTRSM( 'L', 'L', 'N', 'U', N-NB, NRHS, ONE, A(NB+1, 1), $ LDA, B(NB+1, 1), LDB) @@ -268,7 +268,7 @@ CALL ZTRSM( 'L', 'L', 'T', 'U', N-NB, NRHS, ONE, A(NB+1, 1), $ LDA, B(NB+1, 1), LDB) * -* Pivot, P * B [ P * (L**T \ (T \ (L \P**T * B) )) ] +* Pivot, P * B -> B [ P * (L**T \ (T \ (L \P**T * B) )) ] * CALL ZLASWP( NRHS, B, LDB, NB+1, N, IPIV, -1 ) * diff --git a/lapack-netlib/SRC/ztgsy2.f b/lapack-netlib/SRC/ztgsy2.f index f89effd6c..028ddfd3d 100644 --- a/lapack-netlib/SRC/ztgsy2.f +++ b/lapack-netlib/SRC/ztgsy2.f @@ -67,7 +67,7 @@ *> R * B**H + L * E**H = scale * -F *> *> This case is used to compute an estimate of Dif[(A, D), (B, E)] = -*> = sigma_min(Z) using reverse communicaton with ZLACON. +*> = sigma_min(Z) using reverse communication with ZLACON. *> *> ZTGSY2 also (IJOB >= 1) contributes to the computation in ZTGSYL *> of an upper bound on the separation between to matrix pairs. Then @@ -81,7 +81,7 @@ *> \param[in] TRANS *> \verbatim *> TRANS is CHARACTER*1 -*> = 'N', solve the generalized Sylvester equation (1). +*> = 'N': solve the generalized Sylvester equation (1). *> = 'T': solve the 'transposed' system (3). *> \endverbatim *> diff --git a/lapack-netlib/SRC/ztpmlqt.f b/lapack-netlib/SRC/ztpmlqt.f index 6a67e4443..cc333f5a2 100644 --- a/lapack-netlib/SRC/ztpmlqt.f +++ b/lapack-netlib/SRC/ztpmlqt.f @@ -94,7 +94,7 @@ *> *> \param[in] V *> \verbatim -*> V is COMPLEX*16 array, dimension (LDA,K) +*> V is COMPLEX*16 array, dimension (LDV,K) *> The i-th row must contain the vector which defines the *> elementary reflector H(i), for i = 1,2,...,k, as returned by *> DTPLQT in B. See Further Details. diff --git a/lapack-netlib/SRC/ztpmqrt.f b/lapack-netlib/SRC/ztpmqrt.f index aca7ff00f..530dca458 100644 --- a/lapack-netlib/SRC/ztpmqrt.f +++ b/lapack-netlib/SRC/ztpmqrt.f @@ -94,7 +94,7 @@ *> *> \param[in] V *> \verbatim -*> V is COMPLEX*16 array, dimension (LDA,K) +*> V is COMPLEX*16 array, dimension (LDV,K) *> The i-th column must contain the vector which defines the *> elementary reflector H(i), for i = 1,2,...,k, as returned by *> CTPQRT in B. See Further Details. diff --git a/lapack-netlib/SRC/ztprfb.f b/lapack-netlib/SRC/ztprfb.f index 1a62829d5..f96c237ee 100644 --- a/lapack-netlib/SRC/ztprfb.f +++ b/lapack-netlib/SRC/ztprfb.f @@ -152,8 +152,8 @@ *> \verbatim *> LDA is INTEGER *> The leading dimension of the array A. -*> If SIDE = 'L', LDC >= max(1,K); -*> If SIDE = 'R', LDC >= max(1,M). +*> If SIDE = 'L', LDA >= max(1,K); +*> If SIDE = 'R', LDA >= max(1,M). *> \endverbatim *> *> \param[in,out] B diff --git a/lapack-netlib/SRC/zungtsqr.f b/lapack-netlib/SRC/zungtsqr.f new file mode 100644 index 000000000..7b04e9a29 --- /dev/null +++ b/lapack-netlib/SRC/zungtsqr.f @@ -0,0 +1,307 @@ +*> \brief \b ZUNGTSQR +* +* =========== DOCUMENTATION =========== +* +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ +* +*> \htmlonly +*> Download ZUNGTSQR + dependencies +*> +*> [TGZ] +*> +*> [ZIP] +*> +*> [TXT] +*> +* Definition: +* =========== +* +* SUBROUTINE ZUNGTSQR( M, N, MB, NB, A, LDA, T, LDT, WORK, LWORK, +* $ INFO ) +* +* .. Scalar Arguments .. +* INTEGER INFO, LDA, LDT, LWORK, M, N, MB, NB +* .. +* .. Array Arguments .. +* COMPLEX*16 A( LDA, * ), T( LDT, * ), WORK( * ) +* .. +* +*> \par Purpose: +* ============= +*> +*> \verbatim +*> +*> ZUNGTSQR generates an M-by-N complex matrix Q_out with orthonormal +*> columns, which are the first N columns of a product of comlpex unitary +*> matrices of order M which are returned by ZLATSQR +*> +*> Q_out = first_N_columns_of( Q(1)_in * Q(2)_in * ... * Q(k)_in ). +*> +*> See the documentation for ZLATSQR. +*> \endverbatim +* +* Arguments: +* ========== +* +*> \param[in] M +*> \verbatim +*> M is INTEGER +*> The number of rows of the matrix A. M >= 0. +*> \endverbatim +*> +*> \param[in] N +*> \verbatim +*> N is INTEGER +*> The number of columns of the matrix A. M >= N >= 0. +*> \endverbatim +*> +*> \param[in] MB +*> \verbatim +*> MB is INTEGER +*> The row block size used by DLATSQR to return +*> arrays A and T. MB > N. +*> (Note that if MB > M, then M is used instead of MB +*> as the row block size). +*> \endverbatim +*> +*> \param[in] NB +*> \verbatim +*> NB is INTEGER +*> The column block size used by ZLATSQR to return +*> arrays A and T. NB >= 1. +*> (Note that if NB > N, then N is used instead of NB +*> as the column block size). +*> \endverbatim +*> +*> \param[in,out] A +*> \verbatim +*> A is COMPLEX*16 array, dimension (LDA,N) +*> +*> On entry: +*> +*> The elements on and above the diagonal are not accessed. +*> The elements below the diagonal represent the unit +*> lower-trapezoidal blocked matrix V computed by ZLATSQR +*> that defines the input matrices Q_in(k) (ones on the +*> diagonal are not stored) (same format as the output A +*> below the diagonal in ZLATSQR). +*> +*> On exit: +*> +*> The array A contains an M-by-N orthonormal matrix Q_out, +*> i.e the columns of A are orthogonal unit vectors. +*> \endverbatim +*> +*> \param[in] LDA +*> \verbatim +*> LDA is INTEGER +*> The leading dimension of the array A. LDA >= max(1,M). +*> \endverbatim +*> +*> \param[in] T +*> \verbatim +*> T is COMPLEX*16 array, +*> dimension (LDT, N * NIRB) +*> where NIRB = Number_of_input_row_blocks +*> = MAX( 1, CEIL((M-N)/(MB-N)) ) +*> Let NICB = Number_of_input_col_blocks +*> = CEIL(N/NB) +*> +*> The upper-triangular block reflectors used to define the +*> input matrices Q_in(k), k=(1:NIRB*NICB). The block +*> reflectors are stored in compact form in NIRB block +*> reflector sequences. Each of NIRB block reflector sequences +*> is stored in a larger NB-by-N column block of T and consists +*> of NICB smaller NB-by-NB upper-triangular column blocks. +*> (same format as the output T in ZLATSQR). +*> \endverbatim +*> +*> \param[in] LDT +*> \verbatim +*> LDT is INTEGER +*> The leading dimension of the array T. +*> LDT >= max(1,min(NB1,N)). +*> \endverbatim +*> +*> \param[out] WORK +*> \verbatim +*> (workspace) COMPLEX*16 array, dimension (MAX(2,LWORK)) +*> On exit, if INFO = 0, WORK(1) returns the optimal LWORK. +*> \endverbatim +*> +*> \param[in] LWORK +*> \verbatim +*> The dimension of the array WORK. LWORK >= (M+NB)*N. +*> If LWORK = -1, then a workspace query is assumed. +*> The routine only calculates the optimal size of the WORK +*> array, returns this value as the first entry of the WORK +*> array, and no error message related to LWORK is issued +*> by XERBLA. +*> \endverbatim +*> +*> \param[out] INFO +*> \verbatim +*> INFO is INTEGER +*> = 0: successful exit +*> < 0: if INFO = -i, the i-th argument had an illegal value +*> \endverbatim +*> +* Authors: +* ======== +* +*> \author Univ. of Tennessee +*> \author Univ. of California Berkeley +*> \author Univ. of Colorado Denver +*> \author NAG Ltd. +* +*> \date November 2019 +* +*> \ingroup comlex16OTHERcomputational +* +*> \par Contributors: +* ================== +*> +*> \verbatim +*> +*> November 2019, Igor Kozachenko, +*> Computer Science Division, +*> University of California, Berkeley +*> +*> \endverbatim +* +* ===================================================================== + SUBROUTINE ZUNGTSQR( M, N, MB, NB, A, LDA, T, LDT, WORK, LWORK, + $ INFO ) + IMPLICIT NONE +* +* -- LAPACK computational routine (version 3.9.0) -- +* -- LAPACK is a software package provided by Univ. of Tennessee, -- +* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- +* November 2019 +* +* .. Scalar Arguments .. + INTEGER INFO, LDA, LDT, LWORK, M, N, MB, NB +* .. +* .. Array Arguments .. + COMPLEX*16 A( LDA, * ), T( LDT, * ), WORK( * ) +* .. +* +* ===================================================================== +* +* .. Parameters .. + COMPLEX*16 CONE, CZERO + PARAMETER ( CONE = ( 1.0D+0, 0.0D+0 ), + $ CZERO = ( 0.0D+0, 0.0D+0 ) ) +* .. +* .. Local Scalars .. + LOGICAL LQUERY + INTEGER IINFO, LDC, LWORKOPT, LC, LW, NBLOCAL, J +* .. +* .. External Subroutines .. + EXTERNAL ZCOPY, ZLAMTSQR, ZLASET, XERBLA +* .. +* .. Intrinsic Functions .. + INTRINSIC DCMPLX, MAX, MIN +* .. +* .. Executable Statements .. +* +* Test the input parameters +* + LQUERY = LWORK.EQ.-1 + INFO = 0 + IF( M.LT.0 ) THEN + INFO = -1 + ELSE IF( N.LT.0 .OR. M.LT.N ) THEN + INFO = -2 + ELSE IF( MB.LE.N ) THEN + INFO = -3 + ELSE IF( NB.LT.1 ) THEN + INFO = -4 + ELSE IF( LDA.LT.MAX( 1, M ) ) THEN + INFO = -6 + ELSE IF( LDT.LT.MAX( 1, MIN( NB, N ) ) ) THEN + INFO = -8 + ELSE +* +* Test the input LWORK for the dimension of the array WORK. +* This workspace is used to store array C(LDC, N) and WORK(LWORK) +* in the call to ZLAMTSQR. See the documentation for ZLAMTSQR. +* + IF( LWORK.LT.2 .AND. (.NOT.LQUERY) ) THEN + INFO = -10 + ELSE +* +* Set block size for column blocks +* + NBLOCAL = MIN( NB, N ) +* +* LWORK = -1, then set the size for the array C(LDC,N) +* in ZLAMTSQR call and set the optimal size of the work array +* WORK(LWORK) in ZLAMTSQR call. +* + LDC = M + LC = LDC*N + LW = N * NBLOCAL +* + LWORKOPT = LC+LW +* + IF( ( LWORK.LT.MAX( 1, LWORKOPT ) ).AND.(.NOT.LQUERY) ) THEN + INFO = -10 + END IF + END IF +* + END IF +* +* Handle error in the input parameters and return workspace query. +* + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'ZUNGTSQR', -INFO ) + RETURN + ELSE IF ( LQUERY ) THEN + WORK( 1 ) = DCMPLX( LWORKOPT ) + RETURN + END IF +* +* Quick return if possible +* + IF( MIN( M, N ).EQ.0 ) THEN + WORK( 1 ) = DCMPLX( LWORKOPT ) + RETURN + END IF +* +* (1) Form explicitly the tall-skinny M-by-N left submatrix Q1_in +* of M-by-M orthogonal matrix Q_in, which is implicitly stored in +* the subdiagonal part of input array A and in the input array T. +* Perform by the following operation using the routine ZLAMTSQR. +* +* Q1_in = Q_in * ( I ), where I is a N-by-N identity matrix, +* ( 0 ) 0 is a (M-N)-by-N zero matrix. +* +* (1a) Form M-by-N matrix in the array WORK(1:LDC*N) with ones +* on the diagonal and zeros elsewhere. +* + CALL ZLASET( 'F', M, N, CZERO, CONE, WORK, LDC ) +* +* (1b) On input, WORK(1:LDC*N) stores ( I ); +* ( 0 ) +* +* On output, WORK(1:LDC*N) stores Q1_in. +* + CALL ZLAMTSQR( 'L', 'N', M, N, N, MB, NBLOCAL, A, LDA, T, LDT, + $ WORK, LDC, WORK( LC+1 ), LW, IINFO ) +* +* (2) Copy the result from the part of the work array (1:M,1:N) +* with the leading dimension LDC that starts at WORK(1) into +* the output array A(1:M,1:N) column-by-column. +* + DO J = 1, N + CALL ZCOPY( M, WORK( (J-1)*LDC + 1 ), 1, A( 1, J ), 1 ) + END DO +* + WORK( 1 ) = DCMPLX( LWORKOPT ) + RETURN +* +* End of ZUNGTSQR +* + END \ No newline at end of file diff --git a/lapack-netlib/SRC/zunhr_col.f b/lapack-netlib/SRC/zunhr_col.f new file mode 100644 index 000000000..71039fddb --- /dev/null +++ b/lapack-netlib/SRC/zunhr_col.f @@ -0,0 +1,441 @@ +*> \brief \b ZUNHR_COL +* +* =========== DOCUMENTATION =========== +* +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ +* +*> \htmlonly +*> Download ZUNHR_COL + dependencies +*> +*> [TGZ] +*> +*> [ZIP] +*> +*> [TXT] +*> +* Definition: +* =========== +* +* SUBROUTINE ZUNHR_COL( M, N, NB, A, LDA, T, LDT, D, INFO ) +* +* .. Scalar Arguments .. +* INTEGER INFO, LDA, LDT, M, N, NB +* .. +* .. Array Arguments .. +* COMPLEX*16 A( LDA, * ), D( * ), T( LDT, * ) +* .. +* +*> \par Purpose: +* ============= +*> +*> \verbatim +*> +*> ZUNHR_COL takes an M-by-N complex matrix Q_in with orthonormal columns +*> as input, stored in A, and performs Householder Reconstruction (HR), +*> i.e. reconstructs Householder vectors V(i) implicitly representing +*> another M-by-N matrix Q_out, with the property that Q_in = Q_out*S, +*> where S is an N-by-N diagonal matrix with diagonal entries +*> equal to +1 or -1. The Householder vectors (columns V(i) of V) are +*> stored in A on output, and the diagonal entries of S are stored in D. +*> Block reflectors are also returned in T +*> (same output format as ZGEQRT). +*> \endverbatim +* +* Arguments: +* ========== +* +*> \param[in] M +*> \verbatim +*> M is INTEGER +*> The number of rows of the matrix A. M >= 0. +*> \endverbatim +*> +*> \param[in] N +*> \verbatim +*> N is INTEGER +*> The number of columns of the matrix A. M >= N >= 0. +*> \endverbatim +*> +*> \param[in] NB +*> \verbatim +*> NB is INTEGER +*> The column block size to be used in the reconstruction +*> of Householder column vector blocks in the array A and +*> corresponding block reflectors in the array T. NB >= 1. +*> (Note that if NB > N, then N is used instead of NB +*> as the column block size.) +*> \endverbatim +*> +*> \param[in,out] A +*> \verbatim +*> A is COMPLEX*16 array, dimension (LDA,N) +*> +*> On entry: +*> +*> The array A contains an M-by-N orthonormal matrix Q_in, +*> i.e the columns of A are orthogonal unit vectors. +*> +*> On exit: +*> +*> The elements below the diagonal of A represent the unit +*> lower-trapezoidal matrix V of Householder column vectors +*> V(i). The unit diagonal entries of V are not stored +*> (same format as the output below the diagonal in A from +*> ZGEQRT). The matrix T and the matrix V stored on output +*> in A implicitly define Q_out. +*> +*> The elements above the diagonal contain the factor U +*> of the "modified" LU-decomposition: +*> Q_in - ( S ) = V * U +*> ( 0 ) +*> where 0 is a (M-N)-by-(M-N) zero matrix. +*> \endverbatim +*> +*> \param[in] LDA +*> \verbatim +*> LDA is INTEGER +*> The leading dimension of the array A. LDA >= max(1,M). +*> \endverbatim +*> +*> \param[out] T +*> \verbatim +*> T is COMPLEX*16 array, +*> dimension (LDT, N) +*> +*> Let NOCB = Number_of_output_col_blocks +*> = CEIL(N/NB) +*> +*> On exit, T(1:NB, 1:N) contains NOCB upper-triangular +*> block reflectors used to define Q_out stored in compact +*> form as a sequence of upper-triangular NB-by-NB column +*> blocks (same format as the output T in ZGEQRT). +*> The matrix T and the matrix V stored on output in A +*> implicitly define Q_out. NOTE: The lower triangles +*> below the upper-triangular blcoks will be filled with +*> zeros. See Further Details. +*> \endverbatim +*> +*> \param[in] LDT +*> \verbatim +*> LDT is INTEGER +*> The leading dimension of the array T. +*> LDT >= max(1,min(NB,N)). +*> \endverbatim +*> +*> \param[out] D +*> \verbatim +*> D is COMPLEX*16 array, dimension min(M,N). +*> The elements can be only plus or minus one. +*> +*> D(i) is constructed as D(i) = -SIGN(Q_in_i(i,i)), where +*> 1 <= i <= min(M,N), and Q_in_i is Q_in after performing +*> i-1 steps of “modified” Gaussian elimination. +*> See Further Details. +*> \endverbatim +*> +*> \param[out] INFO +*> \verbatim +*> INFO is INTEGER +*> = 0: successful exit +*> < 0: if INFO = -i, the i-th argument had an illegal value +*> \endverbatim +*> +*> \par Further Details: +* ===================== +*> +*> \verbatim +*> +*> The computed M-by-M unitary factor Q_out is defined implicitly as +*> a product of unitary matrices Q_out(i). Each Q_out(i) is stored in +*> the compact WY-representation format in the corresponding blocks of +*> matrices V (stored in A) and T. +*> +*> The M-by-N unit lower-trapezoidal matrix V stored in the M-by-N +*> matrix A contains the column vectors V(i) in NB-size column +*> blocks VB(j). For example, VB(1) contains the columns +*> V(1), V(2), ... V(NB). NOTE: The unit entries on +*> the diagonal of Y are not stored in A. +*> +*> The number of column blocks is +*> +*> NOCB = Number_of_output_col_blocks = CEIL(N/NB) +*> +*> where each block is of order NB except for the last block, which +*> is of order LAST_NB = N - (NOCB-1)*NB. +*> +*> For example, if M=6, N=5 and NB=2, the matrix V is +*> +*> +*> V = ( VB(1), VB(2), VB(3) ) = +*> +*> = ( 1 ) +*> ( v21 1 ) +*> ( v31 v32 1 ) +*> ( v41 v42 v43 1 ) +*> ( v51 v52 v53 v54 1 ) +*> ( v61 v62 v63 v54 v65 ) +*> +*> +*> For each of the column blocks VB(i), an upper-triangular block +*> reflector TB(i) is computed. These blocks are stored as +*> a sequence of upper-triangular column blocks in the NB-by-N +*> matrix T. The size of each TB(i) block is NB-by-NB, except +*> for the last block, whose size is LAST_NB-by-LAST_NB. +*> +*> For example, if M=6, N=5 and NB=2, the matrix T is +*> +*> T = ( TB(1), TB(2), TB(3) ) = +*> +*> = ( t11 t12 t13 t14 t15 ) +*> ( t22 t24 ) +*> +*> +*> The M-by-M factor Q_out is given as a product of NOCB +*> unitary M-by-M matrices Q_out(i). +*> +*> Q_out = Q_out(1) * Q_out(2) * ... * Q_out(NOCB), +*> +*> where each matrix Q_out(i) is given by the WY-representation +*> using corresponding blocks from the matrices V and T: +*> +*> Q_out(i) = I - VB(i) * TB(i) * (VB(i))**T, +*> +*> where I is the identity matrix. Here is the formula with matrix +*> dimensions: +*> +*> Q(i){M-by-M} = I{M-by-M} - +*> VB(i){M-by-INB} * TB(i){INB-by-INB} * (VB(i))**T {INB-by-M}, +*> +*> where INB = NB, except for the last block NOCB +*> for which INB=LAST_NB. +*> +*> ===== +*> NOTE: +*> ===== +*> +*> If Q_in is the result of doing a QR factorization +*> B = Q_in * R_in, then: +*> +*> B = (Q_out*S) * R_in = Q_out * (S * R_in) = O_out * R_out. +*> +*> So if one wants to interpret Q_out as the result +*> of the QR factorization of B, then corresponding R_out +*> should be obtained by R_out = S * R_in, i.e. some rows of R_in +*> should be multiplied by -1. +*> +*> For the details of the algorithm, see [1]. +*> +*> [1] "Reconstructing Householder vectors from tall-skinny QR", +*> G. Ballard, J. Demmel, L. Grigori, M. Jacquelin, H.D. Nguyen, +*> E. Solomonik, J. Parallel Distrib. Comput., +*> vol. 85, pp. 3-31, 2015. +*> \endverbatim +*> +* Authors: +* ======== +* +*> \author Univ. of Tennessee +*> \author Univ. of California Berkeley +*> \author Univ. of Colorado Denver +*> \author NAG Ltd. +* +*> \date November 2019 +* +*> \ingroup complex16OTHERcomputational +* +*> \par Contributors: +* ================== +*> +*> \verbatim +*> +*> November 2019, Igor Kozachenko, +*> Computer Science Division, +*> University of California, Berkeley +*> +*> \endverbatim +* +* ===================================================================== + SUBROUTINE ZUNHR_COL( M, N, NB, A, LDA, T, LDT, D, INFO ) + IMPLICIT NONE +* +* -- LAPACK computational routine (version 3.9.0) -- +* -- LAPACK is a software package provided by Univ. of Tennessee, -- +* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- +* November 2019 +* +* .. Scalar Arguments .. + INTEGER INFO, LDA, LDT, M, N, NB +* .. +* .. Array Arguments .. + COMPLEX*16 A( LDA, * ), D( * ), T( LDT, * ) +* .. +* +* ===================================================================== +* +* .. Parameters .. + COMPLEX*16 CONE, CZERO + PARAMETER ( CONE = ( 1.0D+0, 0.0D+0 ), + $ CZERO = ( 0.0D+0, 0.0D+0 ) ) +* .. +* .. Local Scalars .. + INTEGER I, IINFO, J, JB, JBTEMP1, JBTEMP2, JNB, + $ NPLUSONE +* .. +* .. External Subroutines .. + EXTERNAL ZCOPY, ZLAUNHR_COL_GETRFNP, ZSCAL, ZTRSM, + $ XERBLA +* .. +* .. Intrinsic Functions .. + INTRINSIC MAX, MIN +* .. +* .. Executable Statements .. +* +* Test the input parameters +* + INFO = 0 + IF( M.LT.0 ) THEN + INFO = -1 + ELSE IF( N.LT.0 .OR. N.GT.M ) THEN + INFO = -2 + ELSE IF( NB.LT.1 ) THEN + INFO = -3 + ELSE IF( LDA.LT.MAX( 1, M ) ) THEN + INFO = -5 + ELSE IF( LDT.LT.MAX( 1, MIN( NB, N ) ) ) THEN + INFO = -7 + END IF +* +* Handle error in the input parameters. +* + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'ZUNHR_COL', -INFO ) + RETURN + END IF +* +* Quick return if possible +* + IF( MIN( M, N ).EQ.0 ) THEN + RETURN + END IF +* +* On input, the M-by-N matrix A contains the unitary +* M-by-N matrix Q_in. +* +* (1) Compute the unit lower-trapezoidal V (ones on the diagonal +* are not stored) by performing the "modified" LU-decomposition. +* +* Q_in - ( S ) = V * U = ( V1 ) * U, +* ( 0 ) ( V2 ) +* +* where 0 is an (M-N)-by-N zero matrix. +* +* (1-1) Factor V1 and U. + + CALL ZLAUNHR_COL_GETRFNP( N, N, A, LDA, D, IINFO ) +* +* (1-2) Solve for V2. +* + IF( M.GT.N ) THEN + CALL ZTRSM( 'R', 'U', 'N', 'N', M-N, N, CONE, A, LDA, + $ A( N+1, 1 ), LDA ) + END IF +* +* (2) Reconstruct the block reflector T stored in T(1:NB, 1:N) +* as a sequence of upper-triangular blocks with NB-size column +* blocking. +* +* Loop over the column blocks of size NB of the array A(1:M,1:N) +* and the array T(1:NB,1:N), JB is the column index of a column +* block, JNB is the column block size at each step JB. +* + NPLUSONE = N + 1 + DO JB = 1, N, NB +* +* (2-0) Determine the column block size JNB. +* + JNB = MIN( NPLUSONE-JB, NB ) +* +* (2-1) Copy the upper-triangular part of the current JNB-by-JNB +* diagonal block U(JB) (of the N-by-N matrix U) stored +* in A(JB:JB+JNB-1,JB:JB+JNB-1) into the upper-triangular part +* of the current JNB-by-JNB block T(1:JNB,JB:JB+JNB-1) +* column-by-column, total JNB*(JNB+1)/2 elements. +* + JBTEMP1 = JB - 1 + DO J = JB, JB+JNB-1 + CALL ZCOPY( J-JBTEMP1, A( JB, J ), 1, T( 1, J ), 1 ) + END DO +* +* (2-2) Perform on the upper-triangular part of the current +* JNB-by-JNB diagonal block U(JB) (of the N-by-N matrix U) stored +* in T(1:JNB,JB:JB+JNB-1) the following operation in place: +* (-1)*U(JB)*S(JB), i.e the result will be stored in the upper- +* triangular part of T(1:JNB,JB:JB+JNB-1). This multiplication +* of the JNB-by-JNB diagonal block U(JB) by the JNB-by-JNB +* diagonal block S(JB) of the N-by-N sign matrix S from the +* right means changing the sign of each J-th column of the block +* U(JB) according to the sign of the diagonal element of the block +* S(JB), i.e. S(J,J) that is stored in the array element D(J). +* + DO J = JB, JB+JNB-1 + IF( D( J ).EQ.CONE ) THEN + CALL ZSCAL( J-JBTEMP1, -CONE, T( 1, J ), 1 ) + END IF + END DO +* +* (2-3) Perform the triangular solve for the current block +* matrix X(JB): +* +* X(JB) * (A(JB)**T) = B(JB), where: +* +* A(JB)**T is a JNB-by-JNB unit upper-triangular +* coefficient block, and A(JB)=V1(JB), which +* is a JNB-by-JNB unit lower-triangular block +* stored in A(JB:JB+JNB-1,JB:JB+JNB-1). +* The N-by-N matrix V1 is the upper part +* of the M-by-N lower-trapezoidal matrix V +* stored in A(1:M,1:N); +* +* B(JB) is a JNB-by-JNB upper-triangular right-hand +* side block, B(JB) = (-1)*U(JB)*S(JB), and +* B(JB) is stored in T(1:JNB,JB:JB+JNB-1); +* +* X(JB) is a JNB-by-JNB upper-triangular solution +* block, X(JB) is the upper-triangular block +* reflector T(JB), and X(JB) is stored +* in T(1:JNB,JB:JB+JNB-1). +* +* In other words, we perform the triangular solve for the +* upper-triangular block T(JB): +* +* T(JB) * (V1(JB)**T) = (-1)*U(JB)*S(JB). +* +* Even though the blocks X(JB) and B(JB) are upper- +* triangular, the routine ZTRSM will access all JNB**2 +* elements of the square T(1:JNB,JB:JB+JNB-1). Therefore, +* we need to set to zero the elements of the block +* T(1:JNB,JB:JB+JNB-1) below the diagonal before the call +* to ZTRSM. +* +* (2-3a) Set the elements to zero. +* + JBTEMP2 = JB - 2 + DO J = JB, JB+JNB-2 + DO I = J-JBTEMP2, NB + T( I, J ) = CZERO + END DO + END DO +* +* (2-3b) Perform the triangular solve. +* + CALL ZTRSM( 'R', 'L', 'C', 'U', JNB, JNB, CONE, + $ A( JB, JB ), LDA, T( 1, JB ), LDT ) +* + END DO +* + RETURN +* +* End of ZUNHR_COL +* + END \ No newline at end of file diff --git a/lapack-netlib/TESTING/CMakeLists.txt b/lapack-netlib/TESTING/CMakeLists.txt index ec3d85221..80e6b3232 100644 --- a/lapack-netlib/TESTING/CMakeLists.txt +++ b/lapack-netlib/TESTING/CMakeLists.txt @@ -1,3 +1,7 @@ +enable_language(Fortran) + +enable_testing() + if(MSVC_VERSION) # string(REPLACE "/STACK:10000000" "/STACK:900000000000000000" # CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS}") @@ -161,10 +165,401 @@ endif() # Only run this test if python 2.7 or greater is found if(PYTHONINTERP_FOUND) message(STATUS "Running Summary") - execute_process(COMMAND ${CMAKE_COMMAND} -E copy ${LAPACK_SOURCE_DIR}/lapack_testing.py ${LAPACK_BINARY_DIR}) + file(COPY ${LAPACK_SOURCE_DIR}/lapack_testing.py DESTINATION ${LAPACK_BINARY_DIR}) add_test( NAME LAPACK_Test_Summary WORKING_DIRECTORY ${LAPACK_BINARY_DIR} COMMAND ${PYTHON_EXECUTABLE} "lapack_testing.py" ) endif() + + + +# $1 exec, $2 input, $3 output_result +FILE(WRITE ${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh +"rm -f $3\n" +"$1 < $2\n" +"grep -q FATAL $3\n" +"if [ $? -eq 0 ]; then\n" +"echo Error\n" +"exit 1\n" +"else\n" +"exit 0\n" +"fi\n" +) + + +add_test(NAME "REAL_LAPACK_linear_equation_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/LIN/xlintsts" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/stest.in" "${CMAKE_CURRENT_BINARY_DIR}/stest.out" +) +add_test(NAME "COMPLEX_LAPACK_linear_equation_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/LIN/xlintstc" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/ctest.in" "${CMAKE_CURRENT_BINARY_DIR}/ctest.out" +) +add_test(NAME "DOUBLE_PRECISION_LAPACK_linear_equation_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/LIN//xlintstd" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/dtest.in" "${CMAKE_CURRENT_BINARY_DIR}/dtest.out" +) +add_test(NAME "COMPLEX16_LAPACK_linear_equation_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/LIN//xlintstz" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/ztest.in" "${CMAKE_CURRENT_BINARY_DIR}/ztest.out" +) + +add_test(NAME "SINGLE-DOUBLE_PRECISION_LAPACK_prototype_linear_equation_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/LIN/xlintstds" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/dstest.in" " ${CMAKE_CURRENT_BINARY_DIR}/dstest.out" +) +# ======== COMPLEX-COMPLEX16 LIN TESTS ======================== + +add_test(NAME "Testing_COMPLEX-COMPLEX16_LAPACK_prototype_linear_equation_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/LIN/xlintstzc" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/zctest.in" " ${CMAKE_CURRENT_BINARY_DIR}/zctest.out" +) + +# ======== SINGLE RFP LIN TESTS ======================== + +add_test(NAME "Testing_REAL_LAPACK_RFP_prototype_linear_equation_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/LIN/xlintstrfs" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/stest_rfp.in" "${CMAKE_CURRENT_BINARY_DIR}/stest_rfp.out" +) + +# ======== COMPLEX16 RFP LIN TESTS ======================== + +add_test(NAME "Testing_DOUBLE_PRECISION_LAPACK_RFP_prototype_linear_equation_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/LIN/xlintstrfd" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/dtest_rfp.in" " ${CMAKE_CURRENT_BINARY_DIR}/dtest_rfp.out" +) +# ======== COMPLEX16 RFP LIN TESTS ======================== + +add_test(NAME "Testing_COMPLEX_LAPACK_RFP_prototype_linear_equation_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/LIN/xlintstrfc" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/ctest_rfp.in" " ${CMAKE_CURRENT_BINARY_DIR}/ctest_rfp.out" +) + +# ======== COMPLEX16 RFP LIN TESTS ======================== + +add_test(NAME "Testing_COMPLEX16_LAPACK_RFP_prototype_linear_equation_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/LIN/xlintstrfz" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/ztest_rfp.in" " ${CMAKE_CURRENT_BINARY_DIR}/ztest_rfp.out" +) +# +# +# ======== SINGLE EIG TESTS =========================== +# + +add_test(NAME "SNEP:_Testing_Nonsymmetric_Eigenvalue_Problem_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtsts" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/nep.in" " ${CMAKE_CURRENT_BINARY_DIR}/snep.out" +) + +add_test(NAME "SSEP:_Testing_Symmetric_Eigenvalue_Problem_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtsts" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/sep.in" " ${CMAKE_CURRENT_BINARY_DIR}/ssep.out" +) + +add_test(NAME "SSE2:_Testing_Symmetric_Eigenvalue_Problem_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtsts" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/se2.in" " ${CMAKE_CURRENT_BINARY_DIR}/sse2.out" +) + +add_test(NAME "SSVD:_Testing_Singular_Value_Decomposition_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtsts" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/svd.in" " ${CMAKE_CURRENT_BINARY_DIR}/ssvd.out" +) + +add_test(NAME "SSEC:_Testing_REAL_Eigen_Condition_Routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtsts" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/sec.in" " ${CMAKE_CURRENT_BINARY_DIR}/sec.out" +) + +add_test(NAME "SSEV:_Testing_REAL_Nonsymmetric_Eigenvalue_Driver" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtsts" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/sed.in" " ${CMAKE_CURRENT_BINARY_DIR}/sed.out" +) + +add_test(NAME "SGG:_Testing_REAL_Nonsymmetric_Generalized_Eigenvalue_Problem_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtsts" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/sgg.in" " ${CMAKE_CURRENT_BINARY_DIR}/sgg.out" +) + +add_test(NAME "SGD:_Testing_REAL_Nonsymmetric_Generalized_Eigenvalue_Problem_driver_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtsts" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/sgd.in" " ${CMAKE_CURRENT_BINARY_DIR}/sgd.out" +) + +add_test(NAME "SSB:_Testing_REAL_Symmetric_Eigenvalue_Problem_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtsts" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/ssb.in" " ${CMAKE_CURRENT_BINARY_DIR}/ssb.out" +) + +add_test(NAME "SSG:_Testing_REAL_Symmetric_Generalized_Eigenvalue_Problem_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtsts" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/ssg.in" " ${CMAKE_CURRENT_BINARY_DIR}/ssg.out" +) + +add_test(NAME "SGEBAL:_Testing_the_balancing_of_a_REAL_general_matrix" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtsts" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/sbal.in" " ${CMAKE_CURRENT_BINARY_DIR}/sbal.out" +) + +add_test(NAME "SGEBAK:_Testing_the_back_transformation_of_a_REAL_balanced_matrix" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtsts" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/sbak.in" " ${CMAKE_CURRENT_BINARY_DIR}/sbak.out" +) + +add_test(NAME "SGGBAL:_Testing_the_balancing_of_a_pair_of_REAL_general_matrices" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtsts" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/sgbal.in" " ${CMAKE_CURRENT_BINARY_DIR}/sgbal.out" +) + +add_test(NAME "SGGBAK:_Testing_the_back_transformation_of_a_pair_of_REAL_balanced_matrices" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtsts" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/sgbak.in" " ${CMAKE_CURRENT_BINARY_DIR}/sgbak.out" +) + +add_test(NAME "SBB:_Testing_banded_Singular_Value_Decomposition_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtsts" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/sbb.in" " ${CMAKE_CURRENT_BINARY_DIR}/sbb.out" +) + +add_test(NAME "SGLM:_Testing_Generalized_Linear_Regression_Model_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtsts" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/glm.in" " ${CMAKE_CURRENT_BINARY_DIR}/sglm.out" +) + +add_test(NAME "SGQR:_Testing_Generalized_QR_and_RQ_factorization_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtsts" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/gqr.in" " ${CMAKE_CURRENT_BINARY_DIR}/sgqr.out" +) + +add_test(NAME "SGSV:_Testing_Generalized_Singular_Value_Decomposition_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtsts" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/gsv.in" "${CMAKE_CURRENT_BINARY_DIR}/sgsv.out" +) + +add_test(NAME "SCSD:_Testing_CS_Decomposition_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtsts" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/csd.in" " ${CMAKE_CURRENT_BINARY_DIR}/scsd.out" +) + +add_test(NAME "SLSE:_Testing_Constrained_Linear_Least_Squares_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtsts" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/lse.in" " ${CMAKE_CURRENT_BINARY_DIR}/slse.out" +) + +# ======== COMPLEX EIG TESTS =========================== + +add_test(NAME "CNEP:_Testing_Nonsymmetric_Eigenvalue_Problem_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtstc" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/nep.in" " ${CMAKE_CURRENT_BINARY_DIR}/cnep.out" +) + +add_test(NAME "CSEP:_Testing_Symmetric_Eigenvalue_Problem_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtstc" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/sep.in" " ${CMAKE_CURRENT_BINARY_DIR}/csep.out" +) + +add_test(NAME "CSE2:_Testing_Symmetric_Eigenvalue_Problem_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtstc" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/se2.in" " ${CMAKE_CURRENT_BINARY_DIR}/cse2.out" +) + +add_test(NAME "CSVD:_Testing_Singular_Value_Decomposition_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtstc" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/svd.in" " ${CMAKE_CURRENT_BINARY_DIR}/csvd.out" +) + +add_test(NAME "CEC:_Testing_COMPLEX_Eigen_Condition_Routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtstc" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/cec.in" " ${CMAKE_CURRENT_BINARY_DIR}/cec.out" +) + +add_test(NAME "CES:_Testing_COMPLEX_Nonsymmetric_Schur_Form_Driver" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtstc" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/ced.in" " ${CMAKE_CURRENT_BINARY_DIR}/ced.out" +) + +add_test(NAME "CGG:_Testing_COMPLEX_Nonsymmetric_Generalized_Eigenvalue_Problem_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtstc" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/cgg.in" " ${CMAKE_CURRENT_BINARY_DIR}/cgg.out" +) + +add_test(NAME "CGD:_Testing_COMPLEX_Nonsymmetric_Generalized_Eigenvalue_Problem_driver_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtstc" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/cgd.in" " ${CMAKE_CURRENT_BINARY_DIR}/cgd.out" +) + +add_test(NAME "CHB:_Testing_Hermitian_Eigenvalue_Problem_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtstc" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/csb.in" " ${CMAKE_CURRENT_BINARY_DIR}/csb.out" +) + +add_test(NAME "CSG:_Testing_Symmetric_Generalized_Eigenvalue_Problem_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtstc" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/csg.in" " ${CMAKE_CURRENT_BINARY_DIR}/csg.out" +) + +add_test(NAME "CGEBAL:_Testing_the_balancing_of_a_COMPLEX_general_matrix" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtstc" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/cbal.in" " ${CMAKE_CURRENT_BINARY_DIR}/cbal.out" +) + +add_test(NAME "CGEBAK:_Testing_the_back_transformation_of_a_COMPLEX_balanced_matrix" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtstc" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/cbak.in" " ${CMAKE_CURRENT_BINARY_DIR}/cbak.out" +) + +add_test(NAME "CGGBAL:_Testing_the_balancing_of_a_pair_of_COMPLEX_general_matrices" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtstc" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/cgbal.in" " ${CMAKE_CURRENT_BINARY_DIR}/cgbal.out" +) + +add_test(NAME "CGGBAK:_Testing_the_back_transformation_of_a_pair_of_COMPLEX_balanced_matrices" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtstc" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/cgbak.in" " ${CMAKE_CURRENT_BINARY_DIR}/cgbak.out" +) + +add_test(NAME "CBB:_Testing_banded_Singular_Value_Decomposition_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtstc" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/cbb.in" " ${CMAKE_CURRENT_BINARY_DIR}/cbb.out" +) + +add_test(NAME "CGLM:_Testing_Generalized_Linear_Regression_Model_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtstc" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/glm.in" " ${CMAKE_CURRENT_BINARY_DIR}/cglm.out" +) + +add_test(NAME "CGQR:_Testing_Generalized_QR_and_RQ_factorization_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtstc" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/gqr.in" " ${CMAKE_CURRENT_BINARY_DIR}/cgqr.out" +) + +add_test(NAME "CGSV:_Testing_Generalized_Singular_Value_Decomposition_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtstc" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/gsv.in" " ${CMAKE_CURRENT_BINARY_DIR}/cgsv.out" +) + +add_test(NAME "CCSD:_Testing_CS_Decomposition_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtstc" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/csd.in" " ${CMAKE_CURRENT_BINARY_DIR}/ccsd.out" +) + +add_test(NAME "CLSE:_Testing_Constrained_Linear_Least_Squares_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtstc" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/lse.in" " ${CMAKE_CURRENT_BINARY_DIR}/clse.out" +) + +# ======== DOUBLE EIG TESTS =========================== + +add_test(NAME "DNEP:_Testing_Nonsymmetric_Eigenvalue_Problem_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtstd" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/nep.in" " ${CMAKE_CURRENT_BINARY_DIR}/dnep.out" +) + +add_test(NAME "DSEP:_Testing_Symmetric_Eigenvalue_Problem_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtstd" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/sep.in" " ${CMAKE_CURRENT_BINARY_DIR}/dsep.out" +) + +add_test(NAME "DSE2:_Testing_Symmetric_Eigenvalue_Problem_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtstd" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/se2.in" " ${CMAKE_CURRENT_BINARY_DIR}/dse2.out" +) + +add_test(NAME "DSVD:_Testing_Singular_Value_Decomposition_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtstd" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/svd.in" " ${CMAKE_CURRENT_BINARY_DIR}/dsvd.out" +) + +add_test(NAME "DEC:_Testing_DOUBLE_PRECISION_Eigen_Condition_Routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtstd" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/dec.in" " ${CMAKE_CURRENT_BINARY_DIR}/dec.out" +) + +add_test(NAME "DEV:_Testing_DOUBLE_PRECISION_Nonsymmetric_Eigenvalue_Driver" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtstd" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/ded.in" " ${CMAKE_CURRENT_BINARY_DIR}/ded.out" +) + +add_test(NAME "DGG:_Testing_DOUBLE_PRECISION_Nonsymmetric_Generalized_Eigenvalue_Problem_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtstd" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/dgg.in" " ${CMAKE_CURRENT_BINARY_DIR}/dgg.out" +) + +add_test(NAME "DGD:_Testing_DOUBLE_PRECISION_Nonsymmetric_Generalized_Eigenvalue_Problem_driver_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtstd" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/dgd.in" " ${CMAKE_CURRENT_BINARY_DIR}/dgd.out" +) + +add_test(NAME "DSB:_Testing_DOUBLE_PRECISION_Symmetric_Eigenvalue_Problem_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtstd" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/dsb.in" " ${CMAKE_CURRENT_BINARY_DIR}/dsb.out" +) + +add_test(NAME "DSG:_Testing_DOUBLE_PRECISION_Symmetric_Generalized_Eigenvalue_Problem_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtstd" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/dsg.in" " ${CMAKE_CURRENT_BINARY_DIR}/dsg.out" +) + +add_test(NAME "DGEBAL:_Testing_the_balancing_of_a_DOUBLE_PRECISION_general_matrix" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtstd" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/dbal.in" " ${CMAKE_CURRENT_BINARY_DIR}/dbal.out" +) + +add_test(NAME "DGEBAK:_Testing_the_back_transformation_of_a_DOUBLE_PRECISION_balanced_matrix" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtstd" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/dbak.in" " ${CMAKE_CURRENT_BINARY_DIR}/dbak.out" +) + +add_test(NAME "DGGBAL:_Testing_the_balancing_of_a_pair_of_DOUBLE_PRECISION_general_matrices" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtstd" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/dgbal.in" " ${CMAKE_CURRENT_BINARY_DIR}/dgbal.out" +) + +add_test(NAME "DGGBAK:_Testing_the_back_transformation_of_a_pair_of_DOUBLE_PRECISION_balanced_matrices" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtstd" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/dgbak.in" " ${CMAKE_CURRENT_BINARY_DIR}/dgbak.out" +) + +add_test(NAME "DBB:_Testing_banded_Singular_Value_Decomposition_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtstd" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/dbb.in" " ${CMAKE_CURRENT_BINARY_DIR}/dbb.out" +) + +add_test(NAME "DGLM:_Testing_Generalized_Linear_Regression_Model_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtstd" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/glm.in" " ${CMAKE_CURRENT_BINARY_DIR}/dglm.out" +) + +add_test(NAME "DGQR:_Testing_Generalized_QR_and_RQ_factorization_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtstd" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/gqr.in" " ${CMAKE_CURRENT_BINARY_DIR}/dgqr.out" +) + +add_test(NAME "DGSV:_Testing_Generalized_Singular_Value_Decomposition_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtstd" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/gsv.in" " ${CMAKE_CURRENT_BINARY_DIR}/dgsv.out" +) + +add_test(NAME "DCSD:_Testing_CS_Decomposition_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtstd" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/csd.in" " ${CMAKE_CURRENT_BINARY_DIR}/dcsd.out" +) + +add_test(NAME "DLSE:_Testing_Constrained_Linear_Least_Squares_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtstd" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/lse.in" " ${CMAKE_CURRENT_BINARY_DIR}/dlse.out" +) + +# ======== COMPLEX16 EIG TESTS =========================== + +add_test(NAME "ZNEP:_Testing_Nonsymmetric_Eigenvalue_Problem_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtstz" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/nep.in" " ${CMAKE_CURRENT_BINARY_DIR}/znep.out" +) + +add_test(NAME "ZSEP:_Testing_Symmetric_Eigenvalue_Problem_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtstz" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/sep.in" " ${CMAKE_CURRENT_BINARY_DIR}/zsep.out" +) + +add_test(NAME "ZSE2:_Testing_Symmetric_Eigenvalue_Problem_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtstz" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/se2.in" " ${CMAKE_CURRENT_BINARY_DIR}/zse2.out" +) + +add_test(NAME "ZSVD:_Testing_Singular_Value_Decomposition_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtstz" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/svd.in" " ${CMAKE_CURRENT_BINARY_DIR}/zsvd.out" +) + +add_test(NAME "ZEC:_Testing_COMPLEX16_Eigen_Condition_Routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtstz" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/zec.in" " ${CMAKE_CURRENT_BINARY_DIR}/zec.out" +) + +add_test(NAME "ZES:_Testing_COMPLEX16_Nonsymmetric_Schur_Form_Driver" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtstz" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/zed.in" " ${CMAKE_CURRENT_BINARY_DIR}/zed.out" +) + +add_test(NAME "ZGG:_Testing_COMPLEX16_Nonsymmetric_Generalized_Eigenvalue_Problem_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtstz" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/zgg.in" " ${CMAKE_CURRENT_BINARY_DIR}/zgg.out" +) + +add_test(NAME "ZGD:_Testing_COMPLEX16_Nonsymmetric_Generalized_Eigenvalue_Problem_driver_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtstz" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/zgd.in" " ${CMAKE_CURRENT_BINARY_DIR}/zgd.out" +) + +add_test(NAME "ZHB:_Testing_Hermitian_Eigenvalue_Problem_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtstz" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/zsb.in" " ${CMAKE_CURRENT_BINARY_DIR}/zsb.out" +) + +add_test(NAME "ZSG:_Testing_Symmetric_Generalized_Eigenvalue_Problem_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtstz" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/zsg.in" " ${CMAKE_CURRENT_BINARY_DIR}/zsg.out" +) + +add_test(NAME "ZGEBAL:_Testing_the_balancing_of_a_COMPLEX16_general_matrix" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtstz" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/zbal.in" " ${CMAKE_CURRENT_BINARY_DIR}/zbal.out" +) + +add_test(NAME "ZGEBAK:_Testing_the_back_transformation_of_a_COMPLEX16_balanced_matrix" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtstz" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/zbak.in" " ${CMAKE_CURRENT_BINARY_DIR}/zbak.out" +) + +add_test(NAME "ZGGBAL:_Testing_the_balancing_of_a_pair_of_COMPLEX_general_matrices" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtstz" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/zgbal.in" " ${CMAKE_CURRENT_BINARY_DIR}/zgbal.out" +) + +add_test(NAME "ZGGBAK:_Testing_the_back_transformation_of_a_pair_of_COMPLEX16_balanced_matrices" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtstz" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/zgbak.in" " ${CMAKE_CURRENT_BINARY_DIR}/zgbak.out" +) + +add_test(NAME "ZBB:_Testing_banded_Singular_Value_Decomposition_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtstz" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/zbb.in" " ${CMAKE_CURRENT_BINARY_DIR}/zbb.out" +) + +add_test(NAME "ZGLM:_Testing_Generalized_Linear_Regression_Model_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtstz" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/glm.in" " ${CMAKE_CURRENT_BINARY_DIR}/zglm.out" +) + +add_test(NAME "ZGQR:_Testing_Generalized_QR_and_RQ_factorization_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtstz" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/gqr.in" " ${CMAKE_CURRENT_BINARY_DIR}/zgqr.out" +) + +add_test(NAME "ZGSV:_Testing_Generalized_Singular_Value_Decomposition_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtstz" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/gsv.in" " ${CMAKE_CURRENT_BINARY_DIR}/zgsv.out" +) + +add_test(NAME "ZCSD:_Testing_CS_Decomposition_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtstz" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/csd.in" " ${CMAKE_CURRENT_BINARY_DIR}/zcsd.out" +) + +add_test(NAME "Constrained_Linear_Least_Squares_routines" + COMMAND sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh" "${CMAKE_CURRENT_BINARY_DIR}/EIG/xeigtstz" "${PROJECT_SOURCE_DIR}/lapack-netlib/TESTING/lse.in" " ${CMAKE_CURRENT_BINARY_DIR}/zlse.out" +) diff --git a/lapack-netlib/TESTING/EIG/CMakeLists.txt b/lapack-netlib/TESTING/EIG/CMakeLists.txt index 20fd25b4a..e877b1422 100644 --- a/lapack-netlib/TESTING/EIG/CMakeLists.txt +++ b/lapack-netlib/TESTING/EIG/CMakeLists.txt @@ -98,7 +98,7 @@ set(ZEIGTST zchkee.f macro(add_eig_executable name) add_executable(${name} ${ARGN}) - target_link_libraries(${name} tmglib ${LAPACK_LIBRARIES} ${BLAS_LIBRARIES}) + target_link_libraries(${name} openblas${SUFFIX64_UNDERSCORE}) endmacro() if(BUILD_SINGLE) diff --git a/lapack-netlib/TESTING/EIG/Makefile b/lapack-netlib/TESTING/EIG/Makefile index 78046125a..b3efebcd0 100644 --- a/lapack-netlib/TESTING/EIG/Makefile +++ b/lapack-netlib/TESTING/EIG/Makefile @@ -1,5 +1,3 @@ -include ../../make.inc - ######################################################################## # This is the makefile for the eigenvalue test program from LAPACK. # The test files are organized as follows: @@ -33,6 +31,9 @@ include ../../make.inc # ######################################################################## +TOPSRCDIR = ../.. +include $(TOPSRCDIR)/make.inc + AEIGTST = \ alahdg.o \ alasum.o \ @@ -117,24 +118,26 @@ ZEIGTST = zchkee.o \ zsgt01.o zslect.o \ zstt21.o zstt22.o zunt01.o zunt03.o +.PHONY: all all: single complex double complex16 +.PHONY: single complex double complex16 single: xeigtsts complex: xeigtstc double: xeigtstd complex16: xeigtstz -xeigtsts: $(SEIGTST) $(SCIGTST) $(AEIGTST) ../../$(TMGLIB) ../../$(LAPACKLIB) $(BLASLIB) - $(LOADER) $(LOADOPTS) -o $@ $^ +xeigtsts: $(SEIGTST) $(SCIGTST) $(AEIGTST) $(TMGLIB) ../$(LAPACKLIB) $(BLASLIB) + $(LOADER) $(FFLAGS) $(LDFLAGS) -o $@ $^ -xeigtstc: $(CEIGTST) $(SCIGTST) $(AEIGTST) ../../$(TMGLIB) ../../$(LAPACKLIB) $(BLASLIB) - $(LOADER) $(LOADOPTS) -o $@ $^ +xeigtstc: $(CEIGTST) $(SCIGTST) $(AEIGTST) $(TMGLIB) ../$(LAPACKLIB) $(BLASLIB) + $(LOADER) $(FFLAGS) $(LDFLAGS) -o $@ $^ -xeigtstd: $(DEIGTST) $(DZIGTST) $(AEIGTST) ../../$(TMGLIB) ../../$(LAPACKLIB) $(BLASLIB) - $(LOADER) $(LOADOPTS) -o $@ $^ +xeigtstd: $(DEIGTST) $(DZIGTST) $(AEIGTST) $(TMGLIB) ../$(LAPACKLIB) $(BLASLIB) + $(LOADER) $(FFLAGS) $(LDFLAGS) -o $@ $^ -xeigtstz: $(ZEIGTST) $(DZIGTST) $(AEIGTST) ../../$(TMGLIB) ../../$(LAPACKLIB) $(BLASLIB) - $(LOADER) $(LOADOPTS) -o $@ $^ +xeigtstz: $(ZEIGTST) $(DZIGTST) $(AEIGTST) $(TMGLIB) ../$(LAPACKLIB) $(BLASLIB) + $(LOADER) $(FFLAGS) $(LDFLAGS) -o $@ $^ $(AEIGTST): $(FRC) $(SCIGTST): $(FRC) @@ -147,6 +150,7 @@ $(ZEIGTST): $(FRC) FRC: @FRC=$(FRC) +.PHONY: clean cleanobj cleanexe clean: cleanobj cleanexe cleanobj: rm -f *.o @@ -154,13 +158,10 @@ cleanexe: rm -f xeigtst* schkee.o: schkee.f - $(FORTRAN) $(DRVOPTS) -c -o $@ $< + $(FC) $(FFLAGS_DRV) -c -o $@ $< dchkee.o: dchkee.f - $(FORTRAN) $(DRVOPTS) -c -o $@ $< + $(FC) $(FFLAGS_DRV) -c -o $@ $< cchkee.o: cchkee.f - $(FORTRAN) $(DRVOPTS) -c -o $@ $< + $(FC) $(FFLAGS_DRV) -c -o $@ $< zchkee.o: zchkee.f - $(FORTRAN) $(DRVOPTS) -c -o $@ $< - -.f.o: - $(FORTRAN) $(OPTS) -c -o $@ $< + $(FC) $(FFLAGS_DRV) -c -o $@ $< diff --git a/lapack-netlib/TESTING/EIG/cbdt05.f b/lapack-netlib/TESTING/EIG/cbdt05.f index 192a8d0b6..5a08ccce3 100644 --- a/lapack-netlib/TESTING/EIG/cbdt05.f +++ b/lapack-netlib/TESTING/EIG/cbdt05.f @@ -52,6 +52,7 @@ *> \verbatim *> A is COMPLEX array, dimension (LDA,N) *> The m by n matrix A. +*> \endverbatim *> *> \param[in] LDA *> \verbatim diff --git a/lapack-netlib/TESTING/EIG/cchkhb2stg.f b/lapack-netlib/TESTING/EIG/cchkhb2stg.f index 61537f44b..100f133ab 100644 --- a/lapack-netlib/TESTING/EIG/cchkhb2stg.f +++ b/lapack-netlib/TESTING/EIG/cchkhb2stg.f @@ -680,8 +680,8 @@ * the one from above. Compare it with D1 computed * using the DSBTRD. * - CALL DLASET( 'Full', N, 1, ZERO, ZERO, SD, 1 ) - CALL DLASET( 'Full', N, 1, ZERO, ZERO, SE, 1 ) + CALL SLASET( 'Full', N, 1, ZERO, ZERO, SD, N ) + CALL SLASET( 'Full', N, 1, ZERO, ZERO, SE, N ) CALL CLACPY( ' ', K+1, N, A, LDA, U, LDU ) LH = MAX(1, 4*N) LW = LWORK - LH @@ -753,8 +753,8 @@ * the one from above. Compare it with D1 computed * using the DSBTRD. * - CALL DLASET( 'Full', N, 1, ZERO, ZERO, SD, 1 ) - CALL DLASET( 'Full', N, 1, ZERO, ZERO, SE, 1 ) + CALL SLASET( 'Full', N, 1, ZERO, ZERO, SD, N ) + CALL SLASET( 'Full', N, 1, ZERO, ZERO, SE, N ) CALL CLACPY( ' ', K+1, N, A, LDA, U, LDU ) LH = MAX(1, 4*N) LW = LWORK - LH diff --git a/lapack-netlib/TESTING/EIG/cchkst.f b/lapack-netlib/TESTING/EIG/cchkst.f index 471fe9c92..2d25f3fb1 100644 --- a/lapack-netlib/TESTING/EIG/cchkst.f +++ b/lapack-netlib/TESTING/EIG/cchkst.f @@ -167,7 +167,7 @@ *> CSTEMR('V', 'I') *> *> Tests 29 through 34 are disable at present because CSTEMR -*> does not handle partial specturm requests. +*> does not handle partial spectrum requests. *> *> (29) | S - Z D Z* | / ( |S| n ulp ) CSTEMR('V', 'I') *> diff --git a/lapack-netlib/TESTING/EIG/cchkst2stg.f b/lapack-netlib/TESTING/EIG/cchkst2stg.f index df610c207..8c7f962b7 100644 --- a/lapack-netlib/TESTING/EIG/cchkst2stg.f +++ b/lapack-netlib/TESTING/EIG/cchkst2stg.f @@ -188,7 +188,7 @@ *> CSTEMR('V', 'I') *> *> Tests 29 through 34 are disable at present because CSTEMR -*> does not handle partial specturm requests. +*> does not handle partial spectrum requests. *> *> (29) | S - Z D Z* | / ( |S| n ulp ) CSTEMR('V', 'I') *> @@ -1014,8 +1014,8 @@ * the one from above. Compare it with D1 computed * using the 1-stage. * - CALL DLASET( 'Full', N, 1, ZERO, ZERO, SD, 1 ) - CALL DLASET( 'Full', N, 1, ZERO, ZERO, SE, 1 ) + CALL DLASET( 'Full', N, 1, ZERO, ZERO, SD, N ) + CALL DLASET( 'Full', N, 1, ZERO, ZERO, SE, N ) CALL CLACPY( 'U', N, N, A, LDA, V, LDU ) LH = MAX(1, 4*N) LW = LWORK - LH @@ -1048,8 +1048,8 @@ * the one from above. Compare it with D1 computed * using the 1-stage. * - CALL DLASET( 'Full', N, 1, ZERO, ZERO, SD, 1 ) - CALL DLASET( 'Full', N, 1, ZERO, ZERO, SE, 1 ) + CALL DLASET( 'Full', N, 1, ZERO, ZERO, SD, N ) + CALL DLASET( 'Full', N, 1, ZERO, ZERO, SE, N ) CALL CLACPY( 'L', N, N, A, LDA, V, LDU ) CALL CHETRD_2STAGE( 'N', "L", N, V, LDU, SD, SE, TAU, $ WORK, LH, WORK( LH+1 ), LW, IINFO ) diff --git a/lapack-netlib/TESTING/EIG/cdrgsx.f b/lapack-netlib/TESTING/EIG/cdrgsx.f index 4e0f8b468..746946d07 100644 --- a/lapack-netlib/TESTING/EIG/cdrgsx.f +++ b/lapack-netlib/TESTING/EIG/cdrgsx.f @@ -737,7 +737,7 @@ CALL CLACPY( 'Full', MPLUSN, MPLUSN, AI, LDA, A, LDA ) CALL CLACPY( 'Full', MPLUSN, MPLUSN, BI, LDA, B, LDA ) * -* Compute the Schur factorization while swaping the +* Compute the Schur factorization while swapping the * m-by-m (1,1)-blocks with n-by-n (2,2)-blocks. * CALL CGGESX( 'V', 'V', 'S', CLCTSX, 'B', MPLUSN, AI, LDA, BI, LDA, diff --git a/lapack-netlib/TESTING/EIG/cdrvbd.f b/lapack-netlib/TESTING/EIG/cdrvbd.f index 64bed3b13..7b7b01b47 100644 --- a/lapack-netlib/TESTING/EIG/cdrvbd.f +++ b/lapack-netlib/TESTING/EIG/cdrvbd.f @@ -33,8 +33,9 @@ *> *> \verbatim *> -*> CDRVBD checks the singular value decomposition (SVD) driver CGESVD -*> and CGESDD. +*> CDRVBD checks the singular value decomposition (SVD) driver CGESVD, +*> CGESDD, CGESVJ, CGEJSV, CGESVDX, and CGESVDQ. +*> *> CGESVD and CGESDD factors A = U diag(S) VT, where U and VT are *> unitary and diag(S) is diagonal with the entries of the array S on *> its diagonal. The entries of S are the singular values, nonnegative @@ -73,81 +74,92 @@ *> *> Test for CGESDD: *> -*> (1) | A - U diag(S) VT | / ( |A| max(M,N) ulp ) +*> (8) | A - U diag(S) VT | / ( |A| max(M,N) ulp ) *> -*> (2) | I - U'U | / ( M ulp ) +*> (9) | I - U'U | / ( M ulp ) *> -*> (3) | I - VT VT' | / ( N ulp ) +*> (10) | I - VT VT' | / ( N ulp ) *> -*> (4) S contains MNMIN nonnegative values in decreasing order. +*> (11) S contains MNMIN nonnegative values in decreasing order. *> (Return 0 if true, 1/ULP if false.) *> -*> (5) | U - Upartial | / ( M ulp ) where Upartial is a partially +*> (12) | U - Upartial | / ( M ulp ) where Upartial is a partially *> computed U. *> -*> (6) | VT - VTpartial | / ( N ulp ) where VTpartial is a partially +*> (13) | VT - VTpartial | / ( N ulp ) where VTpartial is a partially *> computed VT. *> -*> (7) | S - Spartial | / ( MNMIN ulp |S| ) where Spartial is the +*> (14) | S - Spartial | / ( MNMIN ulp |S| ) where Spartial is the *> vector of singular values from the partial SVD *> +*> Test for CGESVDQ: +*> +*> (36) | A - U diag(S) VT | / ( |A| max(M,N) ulp ) +*> +*> (37) | I - U'U | / ( M ulp ) +*> +*> (38) | I - VT VT' | / ( N ulp ) +*> +*> (39) S contains MNMIN nonnegative values in decreasing order. +*> (Return 0 if true, 1/ULP if false.) +*> *> Test for CGESVJ: *> -*> (1) | A - U diag(S) VT | / ( |A| max(M,N) ulp ) +*> (15) | A - U diag(S) VT | / ( |A| max(M,N) ulp ) *> -*> (2) | I - U'U | / ( M ulp ) +*> (16) | I - U'U | / ( M ulp ) *> -*> (3) | I - VT VT' | / ( N ulp ) +*> (17) | I - VT VT' | / ( N ulp ) *> -*> (4) S contains MNMIN nonnegative values in decreasing order. +*> (18) S contains MNMIN nonnegative values in decreasing order. *> (Return 0 if true, 1/ULP if false.) *> *> Test for CGEJSV: *> -*> (1) | A - U diag(S) VT | / ( |A| max(M,N) ulp ) +*> (19) | A - U diag(S) VT | / ( |A| max(M,N) ulp ) *> -*> (2) | I - U'U | / ( M ulp ) +*> (20) | I - U'U | / ( M ulp ) *> -*> (3) | I - VT VT' | / ( N ulp ) +*> (21) | I - VT VT' | / ( N ulp ) *> -*> (4) S contains MNMIN nonnegative values in decreasing order. +*> (22) S contains MNMIN nonnegative values in decreasing order. *> (Return 0 if true, 1/ULP if false.) *> *> Test for CGESVDX( 'V', 'V', 'A' )/CGESVDX( 'N', 'N', 'A' ) *> -*> (1) | A - U diag(S) VT | / ( |A| max(M,N) ulp ) +*> (23) | A - U diag(S) VT | / ( |A| max(M,N) ulp ) *> -*> (2) | I - U'U | / ( M ulp ) +*> (24) | I - U'U | / ( M ulp ) *> -*> (3) | I - VT VT' | / ( N ulp ) +*> (25) | I - VT VT' | / ( N ulp ) *> -*> (4) S contains MNMIN nonnegative values in decreasing order. +*> (26) S contains MNMIN nonnegative values in decreasing order. *> (Return 0 if true, 1/ULP if false.) *> -*> (5) | U - Upartial | / ( M ulp ) where Upartial is a partially +*> (27) | U - Upartial | / ( M ulp ) where Upartial is a partially *> computed U. *> -*> (6) | VT - VTpartial | / ( N ulp ) where VTpartial is a partially +*> (28) | VT - VTpartial | / ( N ulp ) where VTpartial is a partially *> computed VT. *> -*> (7) | S - Spartial | / ( MNMIN ulp |S| ) where Spartial is the +*> (29) | S - Spartial | / ( MNMIN ulp |S| ) where Spartial is the *> vector of singular values from the partial SVD *> *> Test for CGESVDX( 'V', 'V', 'I' ) *> -*> (8) | U' A VT''' - diag(S) | / ( |A| max(M,N) ulp ) +*> (30) | U' A VT''' - diag(S) | / ( |A| max(M,N) ulp ) *> -*> (9) | I - U'U | / ( M ulp ) +*> (31) | I - U'U | / ( M ulp ) *> -*> (10) | I - VT VT' | / ( N ulp ) +*> (32) | I - VT VT' | / ( N ulp ) *> *> Test for CGESVDX( 'V', 'V', 'V' ) *> -*> (11) | U' A VT''' - diag(S) | / ( |A| max(M,N) ulp ) +*> (33) | U' A VT''' - diag(S) | / ( |A| max(M,N) ulp ) *> -*> (12) | I - U'U | / ( M ulp ) +*> (34) | I - U'U | / ( M ulp ) *> -*> (13) | I - VT VT' | / ( N ulp ) +*> (35) | I - VT VT' | / ( N ulp ) *> *> The "sizes" are specified by the arrays MM(1:NSIZES) and *> NN(1:NSIZES); the value of each element pair (MM(j),NN(j)) @@ -393,6 +405,8 @@ * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * June 2016 +* + IMPLICIT NONE * * .. Scalar Arguments .. INTEGER INFO, LDA, LDU, LDVT, LWORK, NOUNIT, NSIZES, @@ -411,7 +425,7 @@ * ===================================================================== * * .. Parameters .. - REAL ZERO, ONE, TWO, HALF + REAL ZERO, ONE, TWO, HALF PARAMETER ( ZERO = 0.0E0, ONE = 1.0E0, TWO = 2.0E0, $ HALF = 0.5E0 ) COMPLEX CZERO, CONE @@ -431,10 +445,13 @@ REAL ANORM, DIF, DIV, OVFL, RTUNFL, ULP, ULPINV, $ UNFL, VL, VU * .. +* .. Local Scalars for CGESVDQ .. + INTEGER LIWORK, NUMRANK +* .. * .. Local Arrays .. CHARACTER CJOB( 4 ), CJOBR( 3 ), CJOBV( 2 ) INTEGER IOLDSD( 4 ), ISEED2( 4 ) - REAL RESULT( 35 ) + REAL RESULT( 39 ) * .. * .. External Functions .. REAL SLAMCH, SLARND @@ -442,8 +459,8 @@ * .. * .. External Subroutines .. EXTERNAL ALASVM, XERBLA, CBDT01, CBDT05, CGESDD, - $ CGESVD, CGESVJ, CGEJSV, CGESVDX, CLACPY, - $ CLASET, CLATMS, CUNT01, CUNT03 + $ CGESVD, CGESVDQ, CGESVJ, CGEJSV, CGESVDX, + $ CLACPY, CLASET, CLATMS, CUNT01, CUNT03 * .. * .. Intrinsic Functions .. INTRINSIC ABS, REAL, MAX, MIN @@ -838,8 +855,64 @@ 130 CONTINUE * -* Test CGESVJ: Factorize A -* Note: CGESVJ does not work for M < N +* Test CGESVDQ +* Note: CGESVDQ only works for M >= N +* + RESULT( 36 ) = ZERO + RESULT( 37 ) = ZERO + RESULT( 38 ) = ZERO + RESULT( 39 ) = ZERO +* + IF( M.GE.N ) THEN + IWTMP = 2*MNMIN*MNMIN + 2*MNMIN + MAX( M, N ) + LSWORK = IWTMP + ( IWSPC-1 )*( LWORK-IWTMP ) / 3 + LSWORK = MIN( LSWORK, LWORK ) + LSWORK = MAX( LSWORK, 1 ) + IF( IWSPC.EQ.4 ) + $ LSWORK = LWORK +* + CALL CLACPY( 'F', M, N, ASAV, LDA, A, LDA ) + SRNAMT = 'CGESVDQ' +* + LRWORK = MAX(2, M, 5*N) + LIWORK = MAX( N, 1 ) + CALL CGESVDQ( 'H', 'N', 'N', 'A', 'A', + $ M, N, A, LDA, SSAV, USAV, LDU, + $ VTSAV, LDVT, NUMRANK, IWORK, LIWORK, + $ WORK, LWORK, RWORK, LRWORK, IINFO ) +* + IF( IINFO.NE.0 ) THEN + WRITE( NOUNIT, FMT = 9995 )'CGESVDQ', IINFO, M, N, + $ JTYPE, LSWORK, IOLDSD + INFO = ABS( IINFO ) + RETURN + END IF +* +* Do tests 36--39 +* + CALL CBDT01( M, N, 0, ASAV, LDA, USAV, LDU, SSAV, E, + $ VTSAV, LDVT, WORK, RWORK, RESULT( 36 ) ) + IF( M.NE.0 .AND. N.NE.0 ) THEN + CALL CUNT01( 'Columns', M, M, USAV, LDU, WORK, + $ LWORK, RWORK, RESULT( 37 ) ) + CALL CUNT01( 'Rows', N, N, VTSAV, LDVT, WORK, + $ LWORK, RWORK, RESULT( 38 ) ) + END IF + RESULT( 39 ) = ZERO + DO 199 I = 1, MNMIN - 1 + IF( SSAV( I ).LT.SSAV( I+1 ) ) + $ RESULT( 39 ) = ULPINV + IF( SSAV( I ).LT.ZERO ) + $ RESULT( 39 ) = ULPINV + 199 CONTINUE + IF( MNMIN.GE.1 ) THEN + IF( SSAV( MNMIN ).LT.ZERO ) + $ RESULT( 39 ) = ULPINV + END IF + END IF +* +* Test CGESVJ +* Note: CGESVJ only works for M >= N * RESULT( 15 ) = ZERO RESULT( 16 ) = ZERO @@ -847,13 +920,13 @@ RESULT( 18 ) = ZERO * IF( M.GE.N ) THEN - IWTMP = 2*MNMIN*MNMIN + 2*MNMIN + MAX( M, N ) - LSWORK = IWTMP + ( IWSPC-1 )*( LWORK-IWTMP ) / 3 - LSWORK = MIN( LSWORK, LWORK ) - LSWORK = MAX( LSWORK, 1 ) - LRWORK = MAX(6,N) - IF( IWSPC.EQ.4 ) - $ LSWORK = LWORK + IWTMP = 2*MNMIN*MNMIN + 2*MNMIN + MAX( M, N ) + LSWORK = IWTMP + ( IWSPC-1 )*( LWORK-IWTMP ) / 3 + LSWORK = MIN( LSWORK, LWORK ) + LSWORK = MAX( LSWORK, 1 ) + LRWORK = MAX(6,N) + IF( IWSPC.EQ.4 ) + $ LSWORK = LWORK * CALL CLACPY( 'F', M, N, ASAV, LDA, USAV, LDA ) SRNAMT = 'CGESVJ' @@ -861,8 +934,7 @@ & 0, A, LDVT, WORK, LWORK, RWORK, & LRWORK, IINFO ) * -* CGESVJ retuns V not VT, so we transpose to use the same -* test suite. +* CGESVJ returns V not VH * DO J=1,N DO I=1,N @@ -900,31 +972,30 @@ END IF END IF * -* Test CGEJSV: Factorize A -* Note: CGEJSV does not work for M < N +* Test CGEJSV +* Note: CGEJSV only works for M >= N * RESULT( 19 ) = ZERO RESULT( 20 ) = ZERO RESULT( 21 ) = ZERO RESULT( 22 ) = ZERO IF( M.GE.N ) THEN - IWTMP = 2*MNMIN*MNMIN + 2*MNMIN + MAX( M, N ) - LSWORK = IWTMP + ( IWSPC-1 )*( LWORK-IWTMP ) / 3 - LSWORK = MIN( LSWORK, LWORK ) - LSWORK = MAX( LSWORK, 1 ) - IF( IWSPC.EQ.4 ) - $ LSWORK = LWORK - LRWORK = MAX( 7, N + 2*M) -* - CALL CLACPY( 'F', M, N, ASAV, LDA, VTSAV, LDA ) + IWTMP = 2*MNMIN*MNMIN + 2*MNMIN + MAX( M, N ) + LSWORK = IWTMP + ( IWSPC-1 )*( LWORK-IWTMP ) / 3 + LSWORK = MIN( LSWORK, LWORK ) + LSWORK = MAX( LSWORK, 1 ) + IF( IWSPC.EQ.4 ) + $ LSWORK = LWORK + LRWORK = MAX( 7, N + 2*M) +* + CALL CLACPY( 'F', M, N, ASAV, LDA, VTSAV, LDA ) SRNAMT = 'CGEJSV' CALL CGEJSV( 'G', 'U', 'V', 'R', 'N', 'N', & M, N, VTSAV, LDA, SSAV, USAV, LDU, A, LDVT, & WORK, LWORK, RWORK, & LRWORK, IWORK, IINFO ) * -* CGEJSV retuns V not VT, so we transpose to use the same -* test suite. +* CGEJSV returns V not VH * DO 133 J=1,N DO 132 I=1,N @@ -933,7 +1004,7 @@ 133 END DO * IF( IINFO.NE.0 ) THEN - WRITE( NOUNIT, FMT = 9995 )'GESVJ', IINFO, M, N, + WRITE( NOUNIT, FMT = 9995 )'GEJSV', IINFO, M, N, $ JTYPE, LSWORK, IOLDSD INFO = ABS( IINFO ) RETURN @@ -1160,7 +1231,7 @@ * NTEST = 0 NFAIL = 0 - DO 190 J = 1, 35 + DO 190 J = 1, 39 IF( RESULT( J ).GE.ZERO ) $ NTEST = NTEST + 1 IF( RESULT( J ).GE.THRESH ) @@ -1175,7 +1246,7 @@ NTESTF = 2 END IF * - DO 200 J = 1, 35 + DO 200 J = 1, 39 IF( RESULT( J ).GE.THRESH ) THEN WRITE( NOUNIT, FMT = 9997 )M, N, JTYPE, IWSPC, $ IOLDSD, J, RESULT( J ) @@ -1251,6 +1322,12 @@ $ / '33 = | U**T A VT**T - diag(S) | / ( |A| max(M,N) ulp )', $ / '34 = | I - U**T U | / ( M ulp ) ', $ / '35 = | I - VT VT**T | / ( N ulp ) ', + $ ' CGESVDQ(H,N,N,A,A', + $ / '36 = | A - U diag(S) VT | / ( |A| max(M,N) ulp ) ', + $ / '37 = | I - U**T U | / ( M ulp ) ', + $ / '38 = | I - VT VT**T | / ( N ulp ) ', + $ / '39 = 0 if S contains min(M,N) nonnegative values in', + $ ' decreasing order, else 1/ulp', $ / / ) 9997 FORMAT( ' M=', I5, ', N=', I5, ', type ', I1, ', IWS=', I1, $ ', seed=', 4( I4, ',' ), ' test(', I2, ')=', G11.4 ) diff --git a/lapack-netlib/TESTING/EIG/cerred.f b/lapack-netlib/TESTING/EIG/cerred.f index f1670e983..a0ceff76e 100644 --- a/lapack-netlib/TESTING/EIG/cerred.f +++ b/lapack-netlib/TESTING/EIG/cerred.f @@ -36,6 +36,8 @@ *> CGEJSV compute SVD of an M-by-N matrix A where M >= N *> CGESVDX compute SVD of an M-by-N matrix A(by bisection *> and inverse iteration) +*> CGESVDQ compute SVD of an M-by-N matrix A(with a +*> QR-Preconditioned ) *> \endverbatim * * Arguments: @@ -101,7 +103,7 @@ * .. * .. External Subroutines .. EXTERNAL CHKXER, CGEES, CGEESX, CGEEV, CGEEVX, CGEJSV, - $ CGESDD, CGESVD + $ CGESDD, CGESVD, CGESVDX, CGESVDQ * .. * .. External Functions .. LOGICAL LSAMEN, CSLECT @@ -495,6 +497,61 @@ ELSE WRITE( NOUT, FMT = 9998 ) END IF +* +* Test CGESVDQ +* + SRNAMT = 'CGESVDQ' + INFOT = 1 + CALL CGESVDQ( 'X', 'P', 'T', 'A', 'A', 0, 0, A, 1, S, U, + $ 0, VT, 0, NS, IW, 1, W, 1, RW, 1, INFO ) + CALL CHKXER( 'CGESVDQ', INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL CGESVDQ( 'A', 'X', 'T', 'A', 'A', 0, 0, A, 1, S, U, + $ 0, VT, 0, NS, IW, 1, W, 1, RW, 1, INFO ) + CALL CHKXER( 'CGESVDQ', INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL CGESVDQ( 'A', 'P', 'X', 'A', 'A', 0, 0, A, 1, S, U, + $ 0, VT, 0, NS, IW, 1, W, 1, RW, 1, INFO ) + CALL CHKXER( 'CGESVDQ', INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL CGESVDQ( 'A', 'P', 'T', 'X', 'A', 0, 0, A, 1, S, U, + $ 0, VT, 0, NS, IW, 1, W, 1, RW, 1, INFO ) + CALL CHKXER( 'CGESVDQ', INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL CGESVDQ( 'A', 'P', 'T', 'A', 'X', 0, 0, A, 1, S, U, + $ 0, VT, 0, NS, IW, 1, W, 1, RW, 1, INFO ) + CALL CHKXER( 'CGESVDQ', INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL CGESVDQ( 'A', 'P', 'T', 'A', 'A', -1, 0, A, 1, S, U, + $ 0, VT, 0, NS, IW, 1, W, 1, RW, 1, INFO ) + CALL CHKXER( 'CGESVDQ', INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL CGESVDQ( 'A', 'P', 'T', 'A', 'A', 0, 1, A, 1, S, U, + $ 0, VT, 0, NS, IW, 1, W, 1, RW, 1, INFO ) + CALL CHKXER( 'CGESVDQ', INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL CGESVDQ( 'A', 'P', 'T', 'A', 'A', 1, 1, A, 0, S, U, + $ 0, VT, 0, NS, IW, 1, W, 1, RW, 1, INFO ) + CALL CHKXER( 'CGESVDQ', INFOT, NOUT, LERR, OK ) + INFOT = 12 + CALL CGESVDQ( 'A', 'P', 'T', 'A', 'A', 1, 1, A, 1, S, U, + $ -1, VT, 0, NS, IW, 1, W, 1, RW, 1, INFO ) + CALL CHKXER( 'CGESVDQ', INFOT, NOUT, LERR, OK ) + INFOT = 14 + CALL CGESVDQ( 'A', 'P', 'T', 'A', 'A', 1, 1, A, 1, S, U, + $ 1, VT, -1, NS, IW, 1, W, 1, RW, 1, INFO ) + CALL CHKXER( 'CGESVDQ', INFOT, NOUT, LERR, OK ) + INFOT = 17 + CALL CGESVDQ( 'A', 'P', 'T', 'A', 'A', 1, 1, A, 1, S, U, + $ 1, VT, 1, NS, IW, -5, W, 1, RW, 1, INFO ) + CALL CHKXER( 'CGESVDQ', INFOT, NOUT, LERR, OK ) + NT = 11 + IF( OK ) THEN + WRITE( NOUT, FMT = 9999 )SRNAMT( 1:LEN_TRIM( SRNAMT ) ), + $ NT + ELSE + WRITE( NOUT, FMT = 9998 ) + END IF END IF * * Print a summary line. diff --git a/lapack-netlib/TESTING/EIG/cget51.f b/lapack-netlib/TESTING/EIG/cget51.f index ce1108aa4..ec58086d4 100644 --- a/lapack-netlib/TESTING/EIG/cget51.f +++ b/lapack-netlib/TESTING/EIG/cget51.f @@ -29,12 +29,13 @@ *> *> CGET51 generally checks a decomposition of the form *> -*> A = U B VC> -*> where * means conjugate transpose and U and V are unitary. +*> A = U B V**H +*> +*> where **H means conjugate transpose and U and V are unitary. *> *> Specifically, if ITYPE=1 *> -*> RESULT = | A - U B V* | / ( |A| n ulp ) +*> RESULT = | A - U B V**H | / ( |A| n ulp ) *> *> If ITYPE=2, then: *> @@ -42,7 +43,7 @@ *> *> If ITYPE=3, then: *> -*> RESULT = | I - UU* | / ( n ulp ) +*> RESULT = | I - U U**H | / ( n ulp ) *> \endverbatim * * Arguments: @@ -52,9 +53,9 @@ *> \verbatim *> ITYPE is INTEGER *> Specifies the type of tests to be performed. -*> =1: RESULT = | A - U B V* | / ( |A| n ulp ) +*> =1: RESULT = | A - U B V**H | / ( |A| n ulp ) *> =2: RESULT = | A - B | / ( |A| n ulp ) -*> =3: RESULT = | I - UU* | / ( n ulp ) +*> =3: RESULT = | I - U U**H | / ( n ulp ) *> \endverbatim *> *> \param[in] N @@ -218,7 +219,7 @@ * IF( ITYPE.EQ.1 ) THEN * -* ITYPE=1: Compute W = A - UBV' +* ITYPE=1: Compute W = A - U B V**H * CALL CLACPY( ' ', N, N, A, LDA, WORK, N ) CALL CGEMM( 'N', 'N', N, N, N, CONE, U, LDU, B, LDB, CZERO, @@ -259,7 +260,7 @@ * * Tests not scaled by norm(A) * -* ITYPE=3: Compute UU' - I +* ITYPE=3: Compute U U**H - I * CALL CGEMM( 'N', 'C', N, N, N, CONE, U, LDU, U, LDU, CZERO, $ WORK, N ) diff --git a/lapack-netlib/TESTING/EIG/chbt21.f b/lapack-netlib/TESTING/EIG/chbt21.f index 90ec74c23..76eb7d115 100644 --- a/lapack-netlib/TESTING/EIG/chbt21.f +++ b/lapack-netlib/TESTING/EIG/chbt21.f @@ -28,14 +28,16 @@ *> *> CHBT21 generally checks a decomposition of the form *> -*> A = U S UC> -*> where * means conjugate transpose, A is hermitian banded, U is +*> A = U S U**H +*> +*> where **H means conjugate transpose, A is hermitian banded, U is *> unitary, and S is diagonal (if KS=0) or symmetric *> tridiagonal (if KS=1). *> *> Specifically: *> -*> RESULT(1) = | A - U S U* | / ( |A| n ulp ) *andC> RESULT(2) = | I - UU* | / ( n ulp ) +*> RESULT(1) = | A - U S U**H | / ( |A| n ulp ) and +*> RESULT(2) = | I - U U**H | / ( n ulp ) *> \endverbatim * * Arguments: @@ -220,7 +222,7 @@ * ANORM = MAX( CLANHB( '1', CUPLO, N, IKA, A, LDA, RWORK ), UNFL ) * -* Compute error matrix: Error = A - U S U* +* Compute error matrix: Error = A - U S U**H * * Copy A from SB to SP storage format. * @@ -271,7 +273,7 @@ * * Do Test 2 * -* Compute UU* - I +* Compute U U**H - I * CALL CGEMM( 'N', 'C', N, N, N, CONE, U, LDU, U, LDU, CZERO, WORK, $ N ) diff --git a/lapack-netlib/TESTING/EIG/chet21.f b/lapack-netlib/TESTING/EIG/chet21.f index 5aff64904..d5c4f1348 100644 --- a/lapack-netlib/TESTING/EIG/chet21.f +++ b/lapack-netlib/TESTING/EIG/chet21.f @@ -29,8 +29,9 @@ *> *> CHET21 generally checks a decomposition of the form *> -*> A = U S UC> -*> where * means conjugate transpose, A is hermitian, U is unitary, and +*> A = U S U**H +*> +*> where **H means conjugate transpose, A is hermitian, U is unitary, and *> S is diagonal (if KBAND=0) or (real) symmetric tridiagonal (if *> KBAND=1). *> @@ -42,18 +43,19 @@ *> *> Specifically, if ITYPE=1, then: *> -*> RESULT(1) = | A - U S U* | / ( |A| n ulp ) *andC> RESULT(2) = | I - UU* | / ( n ulp ) +*> RESULT(1) = | A - U S U**H | / ( |A| n ulp ) and +*> RESULT(2) = | I - U U**H | / ( n ulp ) *> *> If ITYPE=2, then: *> -*> RESULT(1) = | A - V S V* | / ( |A| n ulp ) +*> RESULT(1) = | A - V S V**H | / ( |A| n ulp ) *> *> If ITYPE=3, then: *> -*> RESULT(1) = | I - UV* | / ( n ulp ) +*> RESULT(1) = | I - U V**H | / ( n ulp ) *> *> For ITYPE > 1, the transformation U is expressed as a product -*> V = H(1)...H(n-2), where H(j) = I - tau(j) v(j) v(j)C> and each +*> V = H(1)...H(n-2), where H(j) = I - tau(j) v(j) v(j)**H and each *> vector v(j) has its first j elements 0 and the remaining n-j elements *> stored in V(j+1:n,j). *> \endverbatim @@ -66,14 +68,15 @@ *> ITYPE is INTEGER *> Specifies the type of tests to be performed. *> 1: U expressed as a dense unitary matrix: -*> RESULT(1) = | A - U S U* | / ( |A| n ulp ) *andC> RESULT(2) = | I - UU* | / ( n ulp ) +*> RESULT(1) = | A - U S U**H | / ( |A| n ulp ) and +*> RESULT(2) = | I - U U**H | / ( n ulp ) *> *> 2: U expressed as a product V of Housholder transformations: -*> RESULT(1) = | A - V S V* | / ( |A| n ulp ) +*> RESULT(1) = | A - V S V**H | / ( |A| n ulp ) *> *> 3: U expressed both as a dense unitary matrix and *> as a product of Housholder transformations: -*> RESULT(1) = | I - UV* | / ( n ulp ) +*> RESULT(1) = | I - U V**H | / ( n ulp ) *> \endverbatim *> *> \param[in] UPLO @@ -171,7 +174,7 @@ *> \verbatim *> TAU is COMPLEX array, dimension (N) *> If ITYPE >= 2, then TAU(j) is the scalar factor of -*> v(j) v(j)* in the Householder transformation H(j) of +*> v(j) v(j)**H in the Householder transformation H(j) of *> the product U = H(1)...H(n-2) *> If ITYPE < 2, then TAU is not referenced. *> \endverbatim @@ -294,7 +297,7 @@ * IF( ITYPE.EQ.1 ) THEN * -* ITYPE=1: error = A - U S U* +* ITYPE=1: error = A - U S U**H * CALL CLASET( 'Full', N, N, CZERO, CZERO, WORK, N ) CALL CLACPY( CUPLO, N, N, A, LDA, WORK, N ) @@ -304,7 +307,6 @@ 10 CONTINUE * IF( N.GT.1 .AND. KBAND.EQ.1 ) THEN -CMK DO 20 J = 1, N - 1 DO 20 J = 2, N - 1 CALL CHER2( CUPLO, N, -CMPLX( E( J ) ), U( 1, J ), 1, $ U( 1, J-1 ), 1, WORK, N ) @@ -314,7 +316,7 @@ CMK DO 20 J = 1, N - 1 * ELSE IF( ITYPE.EQ.2 ) THEN * -* ITYPE=2: error = V S V* - A +* ITYPE=2: error = V S V**H - A * CALL CLASET( 'Full', N, N, CZERO, CZERO, WORK, N ) * @@ -371,7 +373,7 @@ CMK DO 20 J = 1, N - 1 * ELSE IF( ITYPE.EQ.3 ) THEN * -* ITYPE=3: error = U V* - I +* ITYPE=3: error = U V**H - I * IF( N.LT.2 ) $ RETURN @@ -407,7 +409,7 @@ CMK DO 20 J = 1, N - 1 * * Do Test 2 * -* Compute UU* - I +* Compute U U**H - I * IF( ITYPE.EQ.1 ) THEN CALL CGEMM( 'N', 'C', N, N, N, CONE, U, LDU, U, LDU, CZERO, diff --git a/lapack-netlib/TESTING/EIG/chet22.f b/lapack-netlib/TESTING/EIG/chet22.f index 5087ecbca..354387f2a 100644 --- a/lapack-netlib/TESTING/EIG/chet22.f +++ b/lapack-netlib/TESTING/EIG/chet22.f @@ -42,7 +42,8 @@ *> *> Specifically, if ITYPE=1, then: *> -*> RESULT(1) = | U' A U - S | / ( |A| m ulp ) *andC> RESULT(2) = | I - U'U | / ( m ulp ) +*> RESULT(1) = | U**H A U - S | / ( |A| m ulp ) and +*> RESULT(2) = | I - U**H U | / ( m ulp ) *> \endverbatim * * Arguments: @@ -52,7 +53,8 @@ *> ITYPE INTEGER *> Specifies the type of tests to be performed. *> 1: U expressed as a dense orthogonal matrix: -*> RESULT(1) = | A - U S U' | / ( |A| n ulp ) *andC> RESULT(2) = | I - UU' | / ( n ulp ) +*> RESULT(1) = | A - U S U**H | / ( |A| n ulp ) and +*> RESULT(2) = | I - U U**H | / ( n ulp ) *> *> UPLO CHARACTER *> If UPLO='U', the upper triangle of A will be used and the @@ -122,7 +124,7 @@ *> *> TAU COMPLEX array, dimension (N) *> If ITYPE >= 2, then TAU(j) is the scalar factor of -*> v(j) v(j)' in the Householder transformation H(j) of +*> v(j) v(j)**H in the Householder transformation H(j) of *> the product U = H(1)...H(n-2) *> If ITYPE < 2, then TAU is not referenced. *> Not modified. @@ -215,7 +217,7 @@ * * Compute error matrix: * -* ITYPE=1: error = U' A U - S +* ITYPE=1: error = U**H A U - S * CALL CHEMM( 'L', UPLO, N, M, CONE, A, LDA, U, LDU, CZERO, WORK, $ N ) @@ -249,7 +251,7 @@ * * Do Test 2 * -* Compute U'U - I +* Compute U**H U - I * IF( ITYPE.EQ.1 ) $ CALL CUNT01( 'Columns', N, M, U, LDU, WORK, 2*N*N, RWORK, diff --git a/lapack-netlib/TESTING/EIG/chpt21.f b/lapack-netlib/TESTING/EIG/chpt21.f index e151a8bd8..f20921bd9 100644 --- a/lapack-netlib/TESTING/EIG/chpt21.f +++ b/lapack-netlib/TESTING/EIG/chpt21.f @@ -29,8 +29,9 @@ *> *> CHPT21 generally checks a decomposition of the form *> -*> A = U S UC> -*> where * means conjugate transpose, A is hermitian, U is +*> A = U S U**H +*> +*> where **H means conjugate transpose, A is hermitian, U is *> unitary, and S is diagonal (if KBAND=0) or (real) symmetric *> tridiagonal (if KBAND=1). If ITYPE=1, then U is represented as *> a dense matrix, otherwise the U is expressed as a product of @@ -41,15 +42,16 @@ *> *> Specifically, if ITYPE=1, then: *> -*> RESULT(1) = | A - U S U* | / ( |A| n ulp ) *andC> RESULT(2) = | I - UU* | / ( n ulp ) +*> RESULT(1) = | A - U S U**H | / ( |A| n ulp ) and +*> RESULT(2) = | I - U U**H | / ( n ulp ) *> *> If ITYPE=2, then: *> -*> RESULT(1) = | A - V S V* | / ( |A| n ulp ) +*> RESULT(1) = | A - V S V**H | / ( |A| n ulp ) *> *> If ITYPE=3, then: *> -*> RESULT(1) = | I - UV* | / ( n ulp ) +*> RESULT(1) = | I - U V**H | / ( n ulp ) *> *> Packed storage means that, for example, if UPLO='U', then the columns *> of the upper triangle of A are stored one after another, so that @@ -70,14 +72,16 @@ *> *> If UPLO='U', then V = H(n-1)...H(1), where *> -*> H(j) = I - tau(j) v(j) v(j)C> +*> H(j) = I - tau(j) v(j) v(j)**H +*> *> and the first j-1 elements of v(j) are stored in V(1:j-1,j+1), *> (i.e., VP( j*(j+1)/2 + 1 : j*(j+1)/2 + j-1 ) ), *> the j-th element is 1, and the last n-j elements are 0. *> *> If UPLO='L', then V = H(1)...H(n-1), where *> -*> H(j) = I - tau(j) v(j) v(j)C> +*> H(j) = I - tau(j) v(j) v(j)**H +*> *> and the first j elements of v(j) are 0, the (j+1)-st is 1, and the *> (j+2)-nd through n-th elements are stored in V(j+2:n,j) (i.e., *> in VP( (2*n-j)*(j-1)/2 + j+2 : (2*n-j)*(j-1)/2 + n ) .) @@ -91,14 +95,15 @@ *> ITYPE is INTEGER *> Specifies the type of tests to be performed. *> 1: U expressed as a dense unitary matrix: -*> RESULT(1) = | A - U S U* | / ( |A| n ulp ) *andC> RESULT(2) = | I - UU* | / ( n ulp ) +*> RESULT(1) = | A - U S U**H | / ( |A| n ulp ) and +*> RESULT(2) = | I - U U**H | / ( n ulp ) *> *> 2: U expressed as a product V of Housholder transformations: -*> RESULT(1) = | A - V S V* | / ( |A| n ulp ) +*> RESULT(1) = | A - V S V**H | / ( |A| n ulp ) *> *> 3: U expressed both as a dense unitary matrix and *> as a product of Housholder transformations: -*> RESULT(1) = | I - UV* | / ( n ulp ) +*> RESULT(1) = | I - U V**H | / ( n ulp ) *> \endverbatim *> *> \param[in] UPLO @@ -181,7 +186,7 @@ *> \verbatim *> TAU is COMPLEX array, dimension (N) *> If ITYPE >= 2, then TAU(j) is the scalar factor of -*> v(j) v(j)* in the Householder transformation H(j) of +*> v(j) v(j)**H in the Householder transformation H(j) of *> the product U = H(1)...H(n-2) *> If ITYPE < 2, then TAU is not referenced. *> \endverbatim @@ -313,7 +318,7 @@ * IF( ITYPE.EQ.1 ) THEN * -* ITYPE=1: error = A - U S U* +* ITYPE=1: error = A - U S U**H * CALL CLASET( 'Full', N, N, CZERO, CZERO, WORK, N ) CALL CCOPY( LAP, AP, 1, WORK, 1 ) @@ -332,7 +337,7 @@ * ELSE IF( ITYPE.EQ.2 ) THEN * -* ITYPE=2: error = V S V* - A +* ITYPE=2: error = V S V**H - A * CALL CLASET( 'Full', N, N, CZERO, CZERO, WORK, N ) * @@ -400,7 +405,7 @@ * ELSE IF( ITYPE.EQ.3 ) THEN * -* ITYPE=3: error = U V* - I +* ITYPE=3: error = U V**H - I * IF( N.LT.2 ) $ RETURN @@ -431,7 +436,7 @@ * * Do Test 2 * -* Compute UU* - I +* Compute U U**H - I * IF( ITYPE.EQ.1 ) THEN CALL CGEMM( 'N', 'C', N, N, N, CONE, U, LDU, U, LDU, CZERO, diff --git a/lapack-netlib/TESTING/EIG/cstt21.f b/lapack-netlib/TESTING/EIG/cstt21.f index 47d99ac49..3fdfa1675 100644 --- a/lapack-netlib/TESTING/EIG/cstt21.f +++ b/lapack-netlib/TESTING/EIG/cstt21.f @@ -28,14 +28,15 @@ *> *> CSTT21 checks a decomposition of the form *> -*> A = U S UC> -*> where * means conjugate transpose, A is real symmetric tridiagonal, +*> A = U S U**H +*> +*> where **H means conjugate transpose, A is real symmetric tridiagonal, *> U is unitary, and S is real and diagonal (if KBAND=0) or symmetric *> tridiagonal (if KBAND=1). Two tests are performed: *> -*> RESULT(1) = | A - U S U* | / ( |A| n ulp ) +*> RESULT(1) = | A - U S U**H | / ( |A| n ulp ) *> -*> RESULT(2) = | I - UU* | / ( n ulp ) +*> RESULT(2) = | I - U U**H | / ( n ulp ) *> \endverbatim * * Arguments: @@ -201,7 +202,7 @@ WORK( N**2 ) = AD( N ) ANORM = MAX( ANORM, ABS( AD( N ) )+TEMP1, UNFL ) * -* Norm of A - USU* +* Norm of A - U S U**H * DO 20 J = 1, N CALL CHER( 'L', N, -SD( J ), U( 1, J ), 1, WORK, N ) @@ -228,7 +229,7 @@ * * Do Test 2 * -* Compute UU* - I +* Compute U U**H - I * CALL CGEMM( 'N', 'C', N, N, N, CONE, U, LDU, U, LDU, CZERO, WORK, $ N ) diff --git a/lapack-netlib/TESTING/EIG/dbdt05.f b/lapack-netlib/TESTING/EIG/dbdt05.f index 3580aec81..356bb5fc8 100644 --- a/lapack-netlib/TESTING/EIG/dbdt05.f +++ b/lapack-netlib/TESTING/EIG/dbdt05.f @@ -52,6 +52,7 @@ *> \verbatim *> A is DOUBLE PRECISION array, dimension (LDA,N) *> The m by n matrix A. +*> \endverbatim *> *> \param[in] LDA *> \verbatim diff --git a/lapack-netlib/TESTING/EIG/dchksb2stg.f b/lapack-netlib/TESTING/EIG/dchksb2stg.f index ee66f7ebb..88f6e18d3 100644 --- a/lapack-netlib/TESTING/EIG/dchksb2stg.f +++ b/lapack-netlib/TESTING/EIG/dchksb2stg.f @@ -670,8 +670,8 @@ * the one from above. Compare it with D1 computed * using the DSBTRD. * - CALL DLASET( 'Full', N, 1, ZERO, ZERO, SD, 1 ) - CALL DLASET( 'Full', N, 1, ZERO, ZERO, SE, 1 ) + CALL DLASET( 'Full', N, 1, ZERO, ZERO, SD, N ) + CALL DLASET( 'Full', N, 1, ZERO, ZERO, SE, N ) CALL DLACPY( ' ', K+1, N, A, LDA, U, LDU ) LH = MAX(1, 4*N) LW = LWORK - LH @@ -743,8 +743,8 @@ * the one from above. Compare it with D1 computed * using the DSBTRD. * - CALL DLASET( 'Full', N, 1, ZERO, ZERO, SD, 1 ) - CALL DLASET( 'Full', N, 1, ZERO, ZERO, SE, 1 ) + CALL DLASET( 'Full', N, 1, ZERO, ZERO, SD, N ) + CALL DLASET( 'Full', N, 1, ZERO, ZERO, SE, N ) CALL DLACPY( ' ', K+1, N, A, LDA, U, LDU ) LH = MAX(1, 4*N) LW = LWORK - LH diff --git a/lapack-netlib/TESTING/EIG/dchkst.f b/lapack-netlib/TESTING/EIG/dchkst.f index f08deb529..1b4d85f79 100644 --- a/lapack-netlib/TESTING/EIG/dchkst.f +++ b/lapack-netlib/TESTING/EIG/dchkst.f @@ -166,7 +166,7 @@ *> DSTEMR('V', 'I') *> *> Tests 29 through 34 are disable at present because DSTEMR -*> does not handle partial specturm requests. +*> does not handle partial spectrum requests. *> *> (29) | S - Z D Z' | / ( |S| n ulp ) DSTEMR('V', 'I') *> diff --git a/lapack-netlib/TESTING/EIG/dchkst2stg.f b/lapack-netlib/TESTING/EIG/dchkst2stg.f index fc015334d..7115175c2 100644 --- a/lapack-netlib/TESTING/EIG/dchkst2stg.f +++ b/lapack-netlib/TESTING/EIG/dchkst2stg.f @@ -187,7 +187,7 @@ *> DSTEMR('V', 'I') *> *> Tests 29 through 34 are disable at present because DSTEMR -*> does not handle partial specturm requests. +*> does not handle partial spectrum requests. *> *> (29) | S - Z D Z' | / ( |S| n ulp ) DSTEMR('V', 'I') *> @@ -999,8 +999,8 @@ * the one from above. Compare it with D1 computed * using the 1-stage. * - CALL DLASET( 'Full', N, 1, ZERO, ZERO, SD, 1 ) - CALL DLASET( 'Full', N, 1, ZERO, ZERO, SE, 1 ) + CALL DLASET( 'Full', N, 1, ZERO, ZERO, SD, N ) + CALL DLASET( 'Full', N, 1, ZERO, ZERO, SE, N ) CALL DLACPY( "U", N, N, A, LDA, V, LDU ) LH = MAX(1, 4*N) LW = LWORK - LH @@ -1032,8 +1032,8 @@ * the one from above. Compare it with D1 computed * using the 1-stage. * - CALL DLASET( 'Full', N, 1, ZERO, ZERO, SD, 1 ) - CALL DLASET( 'Full', N, 1, ZERO, ZERO, SE, 1 ) + CALL DLASET( 'Full', N, 1, ZERO, ZERO, SD, N ) + CALL DLASET( 'Full', N, 1, ZERO, ZERO, SE, N ) CALL DLACPY( "L", N, N, A, LDA, V, LDU ) CALL DSYTRD_2STAGE( 'N', "L", N, V, LDU, SD, SE, TAU, $ WORK, LH, WORK( LH+1 ), LW, IINFO ) diff --git a/lapack-netlib/TESTING/EIG/ddrgsx.f b/lapack-netlib/TESTING/EIG/ddrgsx.f index 44c36407f..7fe9dfc14 100644 --- a/lapack-netlib/TESTING/EIG/ddrgsx.f +++ b/lapack-netlib/TESTING/EIG/ddrgsx.f @@ -769,7 +769,7 @@ CALL DLACPY( 'Full', MPLUSN, MPLUSN, AI, LDA, A, LDA ) CALL DLACPY( 'Full', MPLUSN, MPLUSN, BI, LDA, B, LDA ) * -* Compute the Schur factorization while swaping the +* Compute the Schur factorization while swapping the * m-by-m (1,1)-blocks with n-by-n (2,2)-blocks. * CALL DGGESX( 'V', 'V', 'S', DLCTSX, 'B', MPLUSN, AI, LDA, BI, LDA, diff --git a/lapack-netlib/TESTING/EIG/ddrvbd.f b/lapack-netlib/TESTING/EIG/ddrvbd.f index 868679052..bd4ae60da 100644 --- a/lapack-netlib/TESTING/EIG/ddrvbd.f +++ b/lapack-netlib/TESTING/EIG/ddrvbd.f @@ -32,7 +32,7 @@ *> \verbatim *> *> DDRVBD checks the singular value decomposition (SVD) drivers -*> DGESVD, DGESDD, DGESVJ, and DGEJSV. +*> DGESVD, DGESDD, DGESVDQ, DGESVJ, DGEJSV, and DGESVDX. *> *> Both DGESVD and DGESDD factor A = U diag(S) VT, where U and VT are *> orthogonal and diag(S) is diagonal with the entries of the array S @@ -90,6 +90,17 @@ *> (14) | S - Spartial | / ( MNMIN ulp |S| ) where Spartial is the *> vector of singular values from the partial SVD *> +*> Test for DGESVDQ: +*> +*> (36) | A - U diag(S) VT | / ( |A| max(M,N) ulp ) +*> +*> (37) | I - U'U | / ( M ulp ) +*> +*> (38) | I - VT VT' | / ( N ulp ) +*> +*> (39) S contains MNMIN nonnegative values in decreasing order. +*> (Return 0 if true, 1/ULP if false.) +*> *> Test for DGESVJ: *> *> (15) | A - U diag(S) VT | / ( |A| max(M,N) ulp ) @@ -354,6 +365,8 @@ SUBROUTINE DDRVBD( NSIZES, MM, NN, NTYPES, DOTYPE, ISEED, THRESH, $ A, LDA, U, LDU, VT, LDVT, ASAV, USAV, VTSAV, S, $ SSAV, E, WORK, LWORK, IWORK, NOUT, INFO ) +* + IMPLICIT NONE * * -- LAPACK test routine (version 3.7.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- @@ -390,13 +403,19 @@ $ ITEMP, J, JSIZE, JTYPE, LSWORK, M, MINWRK, $ MMAX, MNMAX, MNMIN, MTYPES, N, NFAIL, $ NMAX, NS, NSI, NSV, NTEST - DOUBLE PRECISION ANORM, DIF, DIV, OVFL, RTUNFL, ULP, - $ ULPINV, UNFL, VL, VU + DOUBLE PRECISION ANORM, DIF, DIV, OVFL, RTUNFL, ULP, + $ ULPINV, UNFL, VL, VU +* .. +* .. Local Scalars for DGESVDQ .. + INTEGER LIWORK, LRWORK, NUMRANK +* .. +* .. Local Arrays for DGESVDQ .. + DOUBLE PRECISION RWORK( 2 ) * .. * .. Local Arrays .. CHARACTER CJOB( 4 ), CJOBR( 3 ), CJOBV( 2 ) INTEGER IOLDSD( 4 ), ISEED2( 4 ) - DOUBLE PRECISION RESULT( 40 ) + DOUBLE PRECISION RESULT( 39 ) * .. * .. External Functions .. DOUBLE PRECISION DLAMCH, DLARND @@ -404,8 +423,8 @@ * .. * .. External Subroutines .. EXTERNAL ALASVM, DBDT01, DGEJSV, DGESDD, DGESVD, - $ DGESVDX, DGESVJ, DLABAD, DLACPY, DLASET, - $ DLATMS, DORT01, DORT03, XERBLA + $ DGESVDQ, DGESVDX, DGESVJ, DLABAD, DLACPY, + $ DLASET, DLATMS, DORT01, DORT03, XERBLA * .. * .. Intrinsic Functions .. INTRINSIC ABS, DBLE, INT, MAX, MIN @@ -781,8 +800,64 @@ RESULT( 14 ) = MAX( RESULT( 14 ), DIF ) 110 CONTINUE * -* Test DGESVJ: Factorize A -* Note: DGESVJ does not work for M < N +* Test DGESVDQ +* Note: DGESVDQ only works for M >= N +* + RESULT( 36 ) = ZERO + RESULT( 37 ) = ZERO + RESULT( 38 ) = ZERO + RESULT( 39 ) = ZERO +* + IF( M.GE.N ) THEN + IWTMP = 5*MNMIN*MNMIN + 9*MNMIN + MAX( M, N ) + LSWORK = IWTMP + ( IWS-1 )*( LWORK-IWTMP ) / 3 + LSWORK = MIN( LSWORK, LWORK ) + LSWORK = MAX( LSWORK, 1 ) + IF( IWS.EQ.4 ) + $ LSWORK = LWORK +* + CALL DLACPY( 'F', M, N, ASAV, LDA, A, LDA ) + SRNAMT = 'DGESVDQ' +* + LRWORK = 2 + LIWORK = MAX( N, 1 ) + CALL DGESVDQ( 'H', 'N', 'N', 'A', 'A', + $ M, N, A, LDA, SSAV, USAV, LDU, + $ VTSAV, LDVT, NUMRANK, IWORK, LIWORK, + $ WORK, LWORK, RWORK, LRWORK, IINFO ) +* + IF( IINFO.NE.0 ) THEN + WRITE( NOUT, FMT = 9995 )'DGESVDQ', IINFO, M, N, + $ JTYPE, LSWORK, IOLDSD + INFO = ABS( IINFO ) + RETURN + END IF +* +* Do tests 36--39 +* + CALL DBDT01( M, N, 0, ASAV, LDA, USAV, LDU, SSAV, E, + $ VTSAV, LDVT, WORK, RESULT( 36 ) ) + IF( M.NE.0 .AND. N.NE.0 ) THEN + CALL DORT01( 'Columns', M, M, USAV, LDU, WORK, + $ LWORK, RESULT( 37 ) ) + CALL DORT01( 'Rows', N, N, VTSAV, LDVT, WORK, + $ LWORK, RESULT( 38 ) ) + END IF + RESULT( 39 ) = ZERO + DO 199 I = 1, MNMIN - 1 + IF( SSAV( I ).LT.SSAV( I+1 ) ) + $ RESULT( 39 ) = ULPINV + IF( SSAV( I ).LT.ZERO ) + $ RESULT( 39 ) = ULPINV + 199 CONTINUE + IF( MNMIN.GE.1 ) THEN + IF( SSAV( MNMIN ).LT.ZERO ) + $ RESULT( 39 ) = ULPINV + END IF + END IF +* +* Test DGESVJ +* Note: DGESVJ only works for M >= N * RESULT( 15 ) = ZERO RESULT( 16 ) = ZERO @@ -802,8 +877,7 @@ CALL DGESVJ( 'G', 'U', 'V', M, N, USAV, LDA, SSAV, & 0, A, LDVT, WORK, LWORK, INFO ) * -* DGESVJ retuns V not VT, so we transpose to use the same -* test suite. +* DGESVJ returns V not VT * DO J=1,N DO I=1,N @@ -841,8 +915,8 @@ END IF END IF * -* Test DGEJSV: Factorize A -* Note: DGEJSV does not work for M < N +* Test DGEJSV +* Note: DGEJSV only works for M >= N * RESULT( 19 ) = ZERO RESULT( 20 ) = ZERO @@ -862,8 +936,7 @@ & M, N, VTSAV, LDA, SSAV, USAV, LDU, A, LDVT, & WORK, LWORK, IWORK, INFO ) * -* DGEJSV retuns V not VT, so we transpose to use the same -* test suite. +* DGEJSV returns V not VT * DO 140 J=1,N DO 130 I=1,N @@ -872,7 +945,7 @@ 140 END DO * IF( IINFO.NE.0 ) THEN - WRITE( NOUT, FMT = 9995 )'GESVJ', IINFO, M, N, + WRITE( NOUT, FMT = 9995 )'GEJSV', IINFO, M, N, $ JTYPE, LSWORK, IOLDSD INFO = ABS( IINFO ) RETURN @@ -1086,7 +1159,7 @@ * * End of Loop -- Check for RESULT(j) > THRESH * - DO 210 J = 1, 35 + DO 210 J = 1, 39 IF( RESULT( J ).GE.THRESH ) THEN IF( NFAIL.EQ.0 ) THEN WRITE( NOUT, FMT = 9999 ) @@ -1097,7 +1170,7 @@ NFAIL = NFAIL + 1 END IF 210 CONTINUE - NTEST = NTEST + 35 + NTEST = NTEST + 39 220 CONTINUE 230 CONTINUE 240 CONTINUE @@ -1158,6 +1231,12 @@ $ ' DGESVDX(V,V,V) ', $ / '34 = | I - U**T U | / ( M ulp ) ', $ / '35 = | I - VT VT**T | / ( N ulp ) ', + $ ' DGESVDQ(H,N,N,A,A', + $ / '36 = | A - U diag(S) VT | / ( |A| max(M,N) ulp ) ', + $ / '37 = | I - U**T U | / ( M ulp ) ', + $ / '38 = | I - VT VT**T | / ( N ulp ) ', + $ / '39 = 0 if S contains min(M,N) nonnegative values in', + $ ' decreasing order, else 1/ulp', $ / / ) 9997 FORMAT( ' M=', I5, ', N=', I5, ', type ', I1, ', IWS=', I1, $ ', seed=', 4( I4, ',' ), ' test(', I2, ')=', G11.4 ) diff --git a/lapack-netlib/TESTING/EIG/derred.f b/lapack-netlib/TESTING/EIG/derred.f index 5bde7f67d..94264e256 100644 --- a/lapack-netlib/TESTING/EIG/derred.f +++ b/lapack-netlib/TESTING/EIG/derred.f @@ -36,6 +36,8 @@ *> DGEJSV compute SVD of an M-by-N matrix A where M >= N *> DGESVDX compute SVD of an M-by-N matrix A(by bisection *> and inverse iteration) +*> DGESVDQ compute SVD of an M-by-N matrix A(with a +*> QR-Preconditioned ) *> \endverbatim * * Arguments: @@ -100,7 +102,7 @@ * .. * .. External Subroutines .. EXTERNAL CHKXER, DGEES, DGEESX, DGEEV, DGEEVX, DGEJSV, - $ DGESDD, DGESVD + $ DGESDD, DGESVD, DGESVDX, DGESVQ * .. * .. External Functions .. LOGICAL DSLECT, LSAMEN @@ -486,6 +488,61 @@ ELSE WRITE( NOUT, FMT = 9998 ) END IF +* +* Test DGESVDQ +* + SRNAMT = 'DGESVDQ' + INFOT = 1 + CALL DGESVDQ( 'X', 'P', 'T', 'A', 'A', 0, 0, A, 1, S, U, + $ 0, VT, 0, NS, IW, 1, W, 1, W, 1, INFO ) + CALL CHKXER( 'DGESVDQ', INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL DGESVDQ( 'A', 'X', 'T', 'A', 'A', 0, 0, A, 1, S, U, + $ 0, VT, 0, NS, IW, 1, W, 1, W, 1, INFO ) + CALL CHKXER( 'DGESVDQ', INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL DGESVDQ( 'A', 'P', 'X', 'A', 'A', 0, 0, A, 1, S, U, + $ 0, VT, 0, NS, IW, 1, W, 1, W, 1, INFO ) + CALL CHKXER( 'DGESVDQ', INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL DGESVDQ( 'A', 'P', 'T', 'X', 'A', 0, 0, A, 1, S, U, + $ 0, VT, 0, NS, IW, 1, W, 1, W, 1, INFO ) + CALL CHKXER( 'DGESVDQ', INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL DGESVDQ( 'A', 'P', 'T', 'A', 'X', 0, 0, A, 1, S, U, + $ 0, VT, 0, NS, IW, 1, W, 1, W, 1, INFO ) + CALL CHKXER( 'DGESVDQ', INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL DGESVDQ( 'A', 'P', 'T', 'A', 'A', -1, 0, A, 1, S, U, + $ 0, VT, 0, NS, IW, 1, W, 1, W, 1, INFO ) + CALL CHKXER( 'DGESVDQ', INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL DGESVDQ( 'A', 'P', 'T', 'A', 'A', 0, 1, A, 1, S, U, + $ 0, VT, 0, NS, IW, 1, W, 1, W, 1, INFO ) + CALL CHKXER( 'DGESVDQ', INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL DGESVDQ( 'A', 'P', 'T', 'A', 'A', 1, 1, A, 0, S, U, + $ 0, VT, 0, NS, IW, 1, W, 1, W, 1, INFO ) + CALL CHKXER( 'DGESVDQ', INFOT, NOUT, LERR, OK ) + INFOT = 12 + CALL DGESVDQ( 'A', 'P', 'T', 'A', 'A', 1, 1, A, 1, S, U, + $ -1, VT, 0, NS, IW, 1, W, 1, W, 1, INFO ) + CALL CHKXER( 'DGESVDQ', INFOT, NOUT, LERR, OK ) + INFOT = 14 + CALL DGESVDQ( 'A', 'P', 'T', 'A', 'A', 1, 1, A, 1, S, U, + $ 1, VT, -1, NS, IW, 1, W, 1, W, 1, INFO ) + CALL CHKXER( 'DGESVDQ', INFOT, NOUT, LERR, OK ) + INFOT = 17 + CALL DGESVDQ( 'A', 'P', 'T', 'A', 'A', 1, 1, A, 1, S, U, + $ 1, VT, 1, NS, IW, -5, W, 1, W, 1, INFO ) + CALL CHKXER( 'DGESVDQ', INFOT, NOUT, LERR, OK ) + NT = 11 + IF( OK ) THEN + WRITE( NOUT, FMT = 9999 )SRNAMT( 1:LEN_TRIM( SRNAMT ) ), + $ NT + ELSE + WRITE( NOUT, FMT = 9998 ) + END IF END IF * * Print a summary line. diff --git a/lapack-netlib/TESTING/EIG/dget39.f b/lapack-netlib/TESTING/EIG/dget39.f index 1d0ec1f45..17e66c8e6 100644 --- a/lapack-netlib/TESTING/EIG/dget39.f +++ b/lapack-netlib/TESTING/EIG/dget39.f @@ -194,7 +194,7 @@ VM5( 2 ) = EPS VM5( 3 ) = SQRT( SMLNUM ) * -* Initalization +* Initialization * KNT = 0 RMAX = ZERO diff --git a/lapack-netlib/TESTING/EIG/dsbt21.f b/lapack-netlib/TESTING/EIG/dsbt21.f index e7db231a9..54795623b 100644 --- a/lapack-netlib/TESTING/EIG/dsbt21.f +++ b/lapack-netlib/TESTING/EIG/dsbt21.f @@ -28,15 +28,16 @@ *> *> DSBT21 generally checks a decomposition of the form *> -*> A = U S U' +*> A = U S U**T *> -*> where ' means transpose, A is symmetric banded, U is +*> where **T means transpose, A is symmetric banded, U is *> orthogonal, and S is diagonal (if KS=0) or symmetric *> tridiagonal (if KS=1). *> *> Specifically: *> -*> RESULT(1) = | A - U S U' | / ( |A| n ulp ) *andC> RESULT(2) = | I - UU' | / ( n ulp ) +*> RESULT(1) = | A - U S U**T | / ( |A| n ulp ) and +*> RESULT(2) = | I - U U**T | / ( n ulp ) *> \endverbatim * * Arguments: @@ -214,7 +215,7 @@ * ANORM = MAX( DLANSB( '1', CUPLO, N, IKA, A, LDA, WORK ), UNFL ) * -* Compute error matrix: Error = A - U S U' +* Compute error matrix: Error = A - U S U**T * * Copy A from SB to SP storage format. * @@ -265,7 +266,7 @@ * * Do Test 2 * -* Compute UU' - I +* Compute U U**T - I * CALL DGEMM( 'N', 'C', N, N, N, ONE, U, LDU, U, LDU, ZERO, WORK, $ N ) diff --git a/lapack-netlib/TESTING/EIG/dspt21.f b/lapack-netlib/TESTING/EIG/dspt21.f index 9f87959fe..4b1d360c5 100644 --- a/lapack-netlib/TESTING/EIG/dspt21.f +++ b/lapack-netlib/TESTING/EIG/dspt21.f @@ -28,9 +28,9 @@ *> *> DSPT21 generally checks a decomposition of the form *> -*> A = U S U' +*> A = U S U**T *> -*> where ' means transpose, A is symmetric (stored in packed format), U +*> where **T means transpose, A is symmetric (stored in packed format), U *> is orthogonal, and S is diagonal (if KBAND=0) or symmetric *> tridiagonal (if KBAND=1). If ITYPE=1, then U is represented as a *> dense matrix, otherwise the U is expressed as a product of @@ -41,15 +41,16 @@ *> *> Specifically, if ITYPE=1, then: *> -*> RESULT(1) = | A - U S U' | / ( |A| n ulp ) *andC> RESULT(2) = | I - UU' | / ( n ulp ) +*> RESULT(1) = | A - U S U**T | / ( |A| n ulp ) and +*> RESULT(2) = | I - U U**T | / ( n ulp ) *> *> If ITYPE=2, then: *> -*> RESULT(1) = | A - V S V' | / ( |A| n ulp ) +*> RESULT(1) = | A - V S V**T | / ( |A| n ulp ) *> *> If ITYPE=3, then: *> -*> RESULT(1) = | I - VU' | / ( n ulp ) +*> RESULT(1) = | I - V U**T | / ( n ulp ) *> *> Packed storage means that, for example, if UPLO='U', then the columns *> of the upper triangle of A are stored one after another, so that @@ -70,7 +71,7 @@ *> *> If UPLO='U', then V = H(n-1)...H(1), where *> -*> H(j) = I - tau(j) v(j) v(j)' +*> H(j) = I - tau(j) v(j) v(j)**T *> *> and the first j-1 elements of v(j) are stored in V(1:j-1,j+1), *> (i.e., VP( j*(j+1)/2 + 1 : j*(j+1)/2 + j-1 ) ), @@ -78,7 +79,7 @@ *> *> If UPLO='L', then V = H(1)...H(n-1), where *> -*> H(j) = I - tau(j) v(j) v(j)' +*> H(j) = I - tau(j) v(j) v(j)**T *> *> and the first j elements of v(j) are 0, the (j+1)-st is 1, and the *> (j+2)-nd through n-th elements are stored in V(j+2:n,j) (i.e., @@ -93,14 +94,15 @@ *> ITYPE is INTEGER *> Specifies the type of tests to be performed. *> 1: U expressed as a dense orthogonal matrix: -*> RESULT(1) = | A - U S U' | / ( |A| n ulp ) *andC> RESULT(2) = | I - UU' | / ( n ulp ) +*> RESULT(1) = | A - U S U**T | / ( |A| n ulp ) and +*> RESULT(2) = | I - U U**T | / ( n ulp ) *> *> 2: U expressed as a product V of Housholder transformations: -*> RESULT(1) = | A - V S V' | / ( |A| n ulp ) +*> RESULT(1) = | A - V S V**T | / ( |A| n ulp ) *> *> 3: U expressed both as a dense orthogonal matrix and *> as a product of Housholder transformations: -*> RESULT(1) = | I - VU' | / ( n ulp ) +*> RESULT(1) = | I - V U**T | / ( n ulp ) *> \endverbatim *> *> \param[in] UPLO @@ -183,7 +185,7 @@ *> \verbatim *> TAU is DOUBLE PRECISION array, dimension (N) *> If ITYPE >= 2, then TAU(j) is the scalar factor of -*> v(j) v(j)' in the Householder transformation H(j) of +*> v(j) v(j)**T in the Householder transformation H(j) of *> the product U = H(1)...H(n-2) *> If ITYPE < 2, then TAU is not referenced. *> \endverbatim @@ -303,7 +305,7 @@ * IF( ITYPE.EQ.1 ) THEN * -* ITYPE=1: error = A - U S U' +* ITYPE=1: error = A - U S U**T * CALL DLASET( 'Full', N, N, ZERO, ZERO, WORK, N ) CALL DCOPY( LAP, AP, 1, WORK, 1 ) @@ -322,7 +324,7 @@ * ELSE IF( ITYPE.EQ.2 ) THEN * -* ITYPE=2: error = V S V' - A +* ITYPE=2: error = V S V**T - A * CALL DLASET( 'Full', N, N, ZERO, ZERO, WORK, N ) * @@ -389,7 +391,7 @@ * ELSE IF( ITYPE.EQ.3 ) THEN * -* ITYPE=3: error = U V' - I +* ITYPE=3: error = U V**T - I * IF( N.LT.2 ) $ RETURN @@ -420,7 +422,7 @@ * * Do Test 2 * -* Compute UU' - I +* Compute U U**T - I * IF( ITYPE.EQ.1 ) THEN CALL DGEMM( 'N', 'C', N, N, N, ONE, U, LDU, U, LDU, ZERO, WORK, diff --git a/lapack-netlib/TESTING/EIG/dsyt21.f b/lapack-netlib/TESTING/EIG/dsyt21.f index 0da3e5882..e00bd0db2 100644 --- a/lapack-netlib/TESTING/EIG/dsyt21.f +++ b/lapack-netlib/TESTING/EIG/dsyt21.f @@ -28,9 +28,9 @@ *> *> DSYT21 generally checks a decomposition of the form *> -*> A = U S U' +*> A = U S U**T *> -*> where ' means transpose, A is symmetric, U is orthogonal, and S is +*> where **T means transpose, A is symmetric, U is orthogonal, and S is *> diagonal (if KBAND=0) or symmetric tridiagonal (if KBAND=1). *> *> If ITYPE=1, then U is represented as a dense matrix; otherwise U is @@ -41,18 +41,19 @@ *> *> Specifically, if ITYPE=1, then: *> -*> RESULT(1) = | A - U S U' | / ( |A| n ulp ) *andC> RESULT(2) = | I - UU' | / ( n ulp ) +*> RESULT(1) = | A - U S U**T | / ( |A| n ulp ) and +*> RESULT(2) = | I - U U**T | / ( n ulp ) *> *> If ITYPE=2, then: *> -*> RESULT(1) = | A - V S V' | / ( |A| n ulp ) +*> RESULT(1) = | A - V S V**T | / ( |A| n ulp ) *> *> If ITYPE=3, then: *> -*> RESULT(1) = | I - VU' | / ( n ulp ) +*> RESULT(1) = | I - V U**T | / ( n ulp ) *> *> For ITYPE > 1, the transformation U is expressed as a product -*> V = H(1)...H(n-2), where H(j) = I - tau(j) v(j) v(j)' and each +*> V = H(1)...H(n-2), where H(j) = I - tau(j) v(j) v(j)**T and each *> vector v(j) has its first j elements 0 and the remaining n-j elements *> stored in V(j+1:n,j). *> \endverbatim @@ -65,14 +66,15 @@ *> ITYPE is INTEGER *> Specifies the type of tests to be performed. *> 1: U expressed as a dense orthogonal matrix: -*> RESULT(1) = | A - U S U' | / ( |A| n ulp ) *andC> RESULT(2) = | I - UU' | / ( n ulp ) +*> RESULT(1) = | A - U S U**T | / ( |A| n ulp ) and +*> RESULT(2) = | I - U U**T | / ( n ulp ) *> *> 2: U expressed as a product V of Housholder transformations: -*> RESULT(1) = | A - V S V' | / ( |A| n ulp ) +*> RESULT(1) = | A - V S V**T | / ( |A| n ulp ) *> *> 3: U expressed both as a dense orthogonal matrix and *> as a product of Housholder transformations: -*> RESULT(1) = | I - VU' | / ( n ulp ) +*> RESULT(1) = | I - V U**T | / ( n ulp ) *> \endverbatim *> *> \param[in] UPLO @@ -170,7 +172,7 @@ *> \verbatim *> TAU is DOUBLE PRECISION array, dimension (N) *> If ITYPE >= 2, then TAU(j) is the scalar factor of -*> v(j) v(j)' in the Householder transformation H(j) of +*> v(j) v(j)**T in the Householder transformation H(j) of *> the product U = H(1)...H(n-2) *> If ITYPE < 2, then TAU is not referenced. *> \endverbatim @@ -283,7 +285,7 @@ * IF( ITYPE.EQ.1 ) THEN * -* ITYPE=1: error = A - U S U' +* ITYPE=1: error = A - U S U**T * CALL DLASET( 'Full', N, N, ZERO, ZERO, WORK, N ) CALL DLACPY( CUPLO, N, N, A, LDA, WORK, N ) @@ -302,7 +304,7 @@ * ELSE IF( ITYPE.EQ.2 ) THEN * -* ITYPE=2: error = V S V' - A +* ITYPE=2: error = V S V**T - A * CALL DLASET( 'Full', N, N, ZERO, ZERO, WORK, N ) * @@ -359,7 +361,7 @@ * ELSE IF( ITYPE.EQ.3 ) THEN * -* ITYPE=3: error = U V' - I +* ITYPE=3: error = U V**T - I * IF( N.LT.2 ) $ RETURN @@ -395,7 +397,7 @@ * * Do Test 2 * -* Compute UU' - I +* Compute U U**T - I * IF( ITYPE.EQ.1 ) THEN CALL DGEMM( 'N', 'C', N, N, N, ONE, U, LDU, U, LDU, ZERO, WORK, diff --git a/lapack-netlib/TESTING/EIG/dsyt22.f b/lapack-netlib/TESTING/EIG/dsyt22.f index 479b3ba5e..09e4aeb82 100644 --- a/lapack-netlib/TESTING/EIG/dsyt22.f +++ b/lapack-netlib/TESTING/EIG/dsyt22.f @@ -41,7 +41,8 @@ *> *> Specifically, if ITYPE=1, then: *> -*> RESULT(1) = | U' A U - S | / ( |A| m ulp ) *andC> RESULT(2) = | I - U'U | / ( m ulp ) +*> RESULT(1) = | U**T A U - S | / ( |A| m ulp ) and +*> RESULT(2) = | I - U**T U | / ( m ulp ) *> \endverbatim * * Arguments: @@ -51,7 +52,8 @@ *> ITYPE INTEGER *> Specifies the type of tests to be performed. *> 1: U expressed as a dense orthogonal matrix: -*> RESULT(1) = | A - U S U' | / ( |A| n ulp ) *andC> RESULT(2) = | I - UU' | / ( n ulp ) +*> RESULT(1) = | A - U S U**T | / ( |A| n ulp ) and +*> RESULT(2) = | I - U U**T | / ( n ulp ) *> *> UPLO CHARACTER *> If UPLO='U', the upper triangle of A will be used and the @@ -122,7 +124,7 @@ *> *> TAU DOUBLE PRECISION array, dimension (N) *> If ITYPE >= 2, then TAU(j) is the scalar factor of -*> v(j) v(j)' in the Householder transformation H(j) of +*> v(j) v(j)**T in the Householder transformation H(j) of *> the product U = H(1)...H(n-2) *> If ITYPE < 2, then TAU is not referenced. *> Not modified. @@ -207,7 +209,7 @@ * * Compute error matrix: * -* ITYPE=1: error = U' A U - S +* ITYPE=1: error = U**T A U - S * CALL DSYMM( 'L', UPLO, N, M, ONE, A, LDA, U, LDU, ZERO, WORK, N ) NN = N*N @@ -240,7 +242,7 @@ * * Do Test 2 * -* Compute U'U - I +* Compute U**T U - I * IF( ITYPE.EQ.1 ) $ CALL DORT01( 'Columns', N, M, U, LDU, WORK, 2*N*N, diff --git a/lapack-netlib/TESTING/EIG/sbdt05.f b/lapack-netlib/TESTING/EIG/sbdt05.f index 972ff952f..e3e79e91e 100644 --- a/lapack-netlib/TESTING/EIG/sbdt05.f +++ b/lapack-netlib/TESTING/EIG/sbdt05.f @@ -52,6 +52,7 @@ *> \verbatim *> A is REAL array, dimension (LDA,N) *> The m by n matrix A. +*> \endverbatim *> *> \param[in] LDA *> \verbatim diff --git a/lapack-netlib/TESTING/EIG/schksb2stg.f b/lapack-netlib/TESTING/EIG/schksb2stg.f index 07b6fa95c..7308bb690 100644 --- a/lapack-netlib/TESTING/EIG/schksb2stg.f +++ b/lapack-netlib/TESTING/EIG/schksb2stg.f @@ -670,8 +670,8 @@ * the one from above. Compare it with D1 computed * using the SSBTRD. * - CALL SLASET( 'Full', N, 1, ZERO, ZERO, SD, 1 ) - CALL SLASET( 'Full', N, 1, ZERO, ZERO, SE, 1 ) + CALL SLASET( 'Full', N, 1, ZERO, ZERO, SD, N ) + CALL SLASET( 'Full', N, 1, ZERO, ZERO, SE, N ) CALL SLACPY( ' ', K+1, N, A, LDA, U, LDU ) LH = MAX(1, 4*N) LW = LWORK - LH diff --git a/lapack-netlib/TESTING/EIG/schkst.f b/lapack-netlib/TESTING/EIG/schkst.f index f4ae46832..a851bbbbf 100644 --- a/lapack-netlib/TESTING/EIG/schkst.f +++ b/lapack-netlib/TESTING/EIG/schkst.f @@ -166,7 +166,7 @@ *> SSTEMR('V', 'I') *> *> Tests 29 through 34 are disable at present because SSTEMR -*> does not handle partial specturm requests. +*> does not handle partial spectrum requests. *> *> (29) | S - Z D Z' | / ( |S| n ulp ) SSTEMR('V', 'I') *> diff --git a/lapack-netlib/TESTING/EIG/schkst2stg.f b/lapack-netlib/TESTING/EIG/schkst2stg.f index 1c18e21bc..83edb9dce 100644 --- a/lapack-netlib/TESTING/EIG/schkst2stg.f +++ b/lapack-netlib/TESTING/EIG/schkst2stg.f @@ -187,7 +187,7 @@ *> SSTEMR('V', 'I') *> *> Tests 29 through 34 are disable at present because SSTEMR -*> does not handle partial specturm requests. +*> does not handle partial spectrum requests. *> *> (29) | S - Z D Z' | / ( |S| n ulp ) SSTEMR('V', 'I') *> @@ -999,8 +999,8 @@ * the one from above. Compare it with D1 computed * using the 1-stage. * - CALL SLASET( 'Full', N, 1, ZERO, ZERO, SD, 1 ) - CALL SLASET( 'Full', N, 1, ZERO, ZERO, SE, 1 ) + CALL SLASET( 'Full', N, 1, ZERO, ZERO, SD, N ) + CALL SLASET( 'Full', N, 1, ZERO, ZERO, SE, N ) CALL SLACPY( "U", N, N, A, LDA, V, LDU ) LH = MAX(1, 4*N) LW = LWORK - LH diff --git a/lapack-netlib/TESTING/EIG/sdrgsx.f b/lapack-netlib/TESTING/EIG/sdrgsx.f index bb5af0fd6..58e63e793 100644 --- a/lapack-netlib/TESTING/EIG/sdrgsx.f +++ b/lapack-netlib/TESTING/EIG/sdrgsx.f @@ -770,7 +770,7 @@ c MINWRK = MAX( 10*( NSIZE+1 ), 5*NSIZE*NSIZE / 2-2 ) CALL SLACPY( 'Full', MPLUSN, MPLUSN, AI, LDA, A, LDA ) CALL SLACPY( 'Full', MPLUSN, MPLUSN, BI, LDA, B, LDA ) * -* Compute the Schur factorization while swaping the +* Compute the Schur factorization while swapping the * m-by-m (1,1)-blocks with n-by-n (2,2)-blocks. * CALL SGGESX( 'V', 'V', 'S', SLCTSX, 'B', MPLUSN, AI, LDA, BI, LDA, diff --git a/lapack-netlib/TESTING/EIG/sdrvbd.f b/lapack-netlib/TESTING/EIG/sdrvbd.f index b5d8a9b9a..101c8ba09 100644 --- a/lapack-netlib/TESTING/EIG/sdrvbd.f +++ b/lapack-netlib/TESTING/EIG/sdrvbd.f @@ -32,7 +32,7 @@ *> \verbatim *> *> SDRVBD checks the singular value decomposition (SVD) drivers -*> SGESVD, SGESDD, SGESVJ, and SGEJSV. +*> SGESVD, SGESDD, SGESVDQ, SGESVJ, SGEJSV, and DGESVDX. *> *> Both SGESVD and SGESDD factor A = U diag(S) VT, where U and VT are *> orthogonal and diag(S) is diagonal with the entries of the array S @@ -90,6 +90,17 @@ *> (14) | S - Spartial | / ( MNMIN ulp |S| ) where Spartial is the *> vector of singular values from the partial SVD *> +*> Test for SGESVDQ: +*> +*> (36) | A - U diag(S) VT | / ( |A| max(M,N) ulp ) +*> +*> (37) | I - U'U | / ( M ulp ) +*> +*> (38) | I - VT VT' | / ( N ulp ) +*> +*> (39) S contains MNMIN nonnegative values in decreasing order. +*> (Return 0 if true, 1/ULP if false.) +*> *> Test for SGESVJ: *> *> (15) | A - U diag(S) VT | / ( |A| max(M,N) ulp ) @@ -359,6 +370,8 @@ * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * June 2016 +* + IMPLICIT NONE * * .. Scalar Arguments .. INTEGER INFO, LDA, LDU, LDVT, LWORK, NOUT, NSIZES, @@ -391,12 +404,18 @@ $ MMAX, MNMAX, MNMIN, MTYPES, N, NFAIL, $ NMAX, NS, NSI, NSV, NTEST REAL ANORM, DIF, DIV, OVFL, RTUNFL, ULP, - $ ULPINV, UNFL, VL, VU + $ ULPINV, UNFL, VL, VU +* .. +* .. Local Scalars for DGESVDQ .. + INTEGER LIWORK, LRWORK, NUMRANK +* .. +* .. Local Arrays for DGESVDQ .. + REAL RWORK( 2 ) * .. * .. Local Arrays .. CHARACTER CJOB( 4 ), CJOBR( 3 ), CJOBV( 2 ) INTEGER IOLDSD( 4 ), ISEED2( 4 ) - REAL RESULT( 40 ) + REAL RESULT( 39 ) * .. * .. External Functions .. REAL SLAMCH, SLARND @@ -404,8 +423,8 @@ * .. * .. External Subroutines .. EXTERNAL ALASVM, SBDT01, SGEJSV, SGESDD, SGESVD, - $ SGESVDX, SGESVJ, SLABAD, SLACPY, SLASET, - $ SLATMS, SORT01, SORT03, XERBLA + $ SGESVDQ, SGESVDX, SGESVJ, SLABAD, SLACPY, + $ SLASET, SLATMS, SORT01, SORT03, XERBLA * .. * .. Intrinsic Functions .. INTRINSIC ABS, REAL, INT, MAX, MIN @@ -781,8 +800,64 @@ RESULT( 14 ) = MAX( RESULT( 14 ), DIF ) 110 CONTINUE * -* Test SGESVJ: Factorize A -* Note: SGESVJ does not work for M < N +* Test SGESVDQ +* Note: SGESVDQ only works for M >= N +* + RESULT( 36 ) = ZERO + RESULT( 37 ) = ZERO + RESULT( 38 ) = ZERO + RESULT( 39 ) = ZERO +* + IF( M.GE.N ) THEN + IWTMP = 5*MNMIN*MNMIN + 9*MNMIN + MAX( M, N ) + LSWORK = IWTMP + ( IWS-1 )*( LWORK-IWTMP ) / 3 + LSWORK = MIN( LSWORK, LWORK ) + LSWORK = MAX( LSWORK, 1 ) + IF( IWS.EQ.4 ) + $ LSWORK = LWORK +* + CALL SLACPY( 'F', M, N, ASAV, LDA, A, LDA ) + SRNAMT = 'SGESVDQ' +* + LRWORK = 2 + LIWORK = MAX( N, 1 ) + CALL SGESVDQ( 'H', 'N', 'N', 'A', 'A', + $ M, N, A, LDA, SSAV, USAV, LDU, + $ VTSAV, LDVT, NUMRANK, IWORK, LIWORK, + $ WORK, LWORK, RWORK, LRWORK, IINFO ) +* + IF( IINFO.NE.0 ) THEN + WRITE( NOUT, FMT = 9995 )'SGESVDQ', IINFO, M, N, + $ JTYPE, LSWORK, IOLDSD + INFO = ABS( IINFO ) + RETURN + END IF +* +* Do tests 36--39 +* + CALL SBDT01( M, N, 0, ASAV, LDA, USAV, LDU, SSAV, E, + $ VTSAV, LDVT, WORK, RESULT( 36 ) ) + IF( M.NE.0 .AND. N.NE.0 ) THEN + CALL SORT01( 'Columns', M, M, USAV, LDU, WORK, + $ LWORK, RESULT( 37 ) ) + CALL SORT01( 'Rows', N, N, VTSAV, LDVT, WORK, + $ LWORK, RESULT( 38 ) ) + END IF + RESULT( 39 ) = ZERO + DO 199 I = 1, MNMIN - 1 + IF( SSAV( I ).LT.SSAV( I+1 ) ) + $ RESULT( 39 ) = ULPINV + IF( SSAV( I ).LT.ZERO ) + $ RESULT( 39 ) = ULPINV + 199 CONTINUE + IF( MNMIN.GE.1 ) THEN + IF( SSAV( MNMIN ).LT.ZERO ) + $ RESULT( 39 ) = ULPINV + END IF + END IF +* +* Test SGESVJ +* Note: SGESVJ only works for M >= N * RESULT( 15 ) = ZERO RESULT( 16 ) = ZERO @@ -802,8 +877,7 @@ CALL SGESVJ( 'G', 'U', 'V', M, N, USAV, LDA, SSAV, & 0, A, LDVT, WORK, LWORK, INFO ) * -* SGESVJ retuns V not VT, so we transpose to use the same -* test suite. +* SGESVJ returns V not VT * DO J=1,N DO I=1,N @@ -841,8 +915,8 @@ END IF END IF * -* Test SGEJSV: Factorize A -* Note: SGEJSV does not work for M < N +* Test SGEJSV +* Note: SGEJSV only works for M >= N * RESULT( 19 ) = ZERO RESULT( 20 ) = ZERO @@ -862,8 +936,7 @@ & M, N, VTSAV, LDA, SSAV, USAV, LDU, A, LDVT, & WORK, LWORK, IWORK, INFO ) * -* SGEJSV retuns V not VT, so we transpose to use the same -* test suite. +* SGEJSV returns V not VT * DO 140 J=1,N DO 130 I=1,N @@ -872,7 +945,7 @@ 140 END DO * IF( IINFO.NE.0 ) THEN - WRITE( NOUT, FMT = 9995 )'GESVJ', IINFO, M, N, + WRITE( NOUT, FMT = 9995 )'GEJSV', IINFO, M, N, $ JTYPE, LSWORK, IOLDSD INFO = ABS( IINFO ) RETURN @@ -1086,7 +1159,7 @@ * * End of Loop -- Check for RESULT(j) > THRESH * - DO 210 J = 1, 35 + DO 210 J = 1, 39 IF( RESULT( J ).GE.THRESH ) THEN IF( NFAIL.EQ.0 ) THEN WRITE( NOUT, FMT = 9999 ) @@ -1097,7 +1170,7 @@ NFAIL = NFAIL + 1 END IF 210 CONTINUE - NTEST = NTEST + 35 + NTEST = NTEST + 39 220 CONTINUE 230 CONTINUE 240 CONTINUE @@ -1158,6 +1231,12 @@ $ ' SGESVDX(V,V,V) ', $ / '34 = | I - U**T U | / ( M ulp ) ', $ / '35 = | I - VT VT**T | / ( N ulp ) ', + $ ' SGESVDQ(H,N,N,A,A', + $ / '36 = | A - U diag(S) VT | / ( |A| max(M,N) ulp ) ', + $ / '37 = | I - U**T U | / ( M ulp ) ', + $ / '38 = | I - VT VT**T | / ( N ulp ) ', + $ / '39 = 0 if S contains min(M,N) nonnegative values in', + $ ' decreasing order, else 1/ulp', $ / / ) 9997 FORMAT( ' M=', I5, ', N=', I5, ', type ', I1, ', IWS=', I1, $ ', seed=', 4( I4, ',' ), ' test(', I2, ')=', G11.4 ) diff --git a/lapack-netlib/TESTING/EIG/serred.f b/lapack-netlib/TESTING/EIG/serred.f index f478fcdb1..7d3772e84 100644 --- a/lapack-netlib/TESTING/EIG/serred.f +++ b/lapack-netlib/TESTING/EIG/serred.f @@ -36,6 +36,8 @@ *> SGEJSV compute SVD of an M-by-N matrix A where M >= N *> SGESVDX compute SVD of an M-by-N matrix A(by bisection *> and inverse iteration) +*> SGESVDQ compute SVD of an M-by-N matrix A(with a +*> QR-Preconditioned ) *> \endverbatim * * Arguments: @@ -100,7 +102,7 @@ * .. * .. External Subroutines .. EXTERNAL CHKXER, SGEES, SGEESX, SGEEV, SGEEVX, SGEJSV, - $ SGESDD, SGESVD + $ SGESDD, SGESVD, SGESVDX, SGESVDQ * .. * .. External Functions .. LOGICAL SSLECT, LSAMEN @@ -486,6 +488,61 @@ ELSE WRITE( NOUT, FMT = 9998 ) END IF +* +* Test SGESVDQ +* + SRNAMT = 'SGESVDQ' + INFOT = 1 + CALL SGESVDQ( 'X', 'P', 'T', 'A', 'A', 0, 0, A, 1, S, U, + $ 0, VT, 0, NS, IW, 1, W, 1, W, 1, INFO ) + CALL CHKXER( 'SGESVDQ', INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL SGESVDQ( 'A', 'X', 'T', 'A', 'A', 0, 0, A, 1, S, U, + $ 0, VT, 0, NS, IW, 1, W, 1, W, 1, INFO ) + CALL CHKXER( 'SGESVDQ', INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL SGESVDQ( 'A', 'P', 'X', 'A', 'A', 0, 0, A, 1, S, U, + $ 0, VT, 0, NS, IW, 1, W, 1, W, 1, INFO ) + CALL CHKXER( 'SGESVDQ', INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL SGESVDQ( 'A', 'P', 'T', 'X', 'A', 0, 0, A, 1, S, U, + $ 0, VT, 0, NS, IW, 1, W, 1, W, 1, INFO ) + CALL CHKXER( 'SGESVDQ', INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL SGESVDQ( 'A', 'P', 'T', 'A', 'X', 0, 0, A, 1, S, U, + $ 0, VT, 0, NS, IW, 1, W, 1, W, 1, INFO ) + CALL CHKXER( 'SGESVDQ', INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL SGESVDQ( 'A', 'P', 'T', 'A', 'A', -1, 0, A, 1, S, U, + $ 0, VT, 0, NS, IW, 1, W, 1, W, 1, INFO ) + CALL CHKXER( 'SGESVDQ', INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL SGESVDQ( 'A', 'P', 'T', 'A', 'A', 0, 1, A, 1, S, U, + $ 0, VT, 0, NS, IW, 1, W, 1, W, 1, INFO ) + CALL CHKXER( 'SGESVDQ', INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL SGESVDQ( 'A', 'P', 'T', 'A', 'A', 1, 1, A, 0, S, U, + $ 0, VT, 0, NS, IW, 1, W, 1, W, 1, INFO ) + CALL CHKXER( 'SGESVDQ', INFOT, NOUT, LERR, OK ) + INFOT = 12 + CALL SGESVDQ( 'A', 'P', 'T', 'A', 'A', 1, 1, A, 1, S, U, + $ -1, VT, 0, NS, IW, 1, W, 1, W, 1, INFO ) + CALL CHKXER( 'SGESVDQ', INFOT, NOUT, LERR, OK ) + INFOT = 14 + CALL SGESVDQ( 'A', 'P', 'T', 'A', 'A', 1, 1, A, 1, S, U, + $ 1, VT, -1, NS, IW, 1, W, 1, W, 1, INFO ) + CALL CHKXER( 'SGESVDQ', INFOT, NOUT, LERR, OK ) + INFOT = 17 + CALL SGESVDQ( 'A', 'P', 'T', 'A', 'A', 1, 1, A, 1, S, U, + $ 1, VT, 1, NS, IW, -5, W, 1, W, 1, INFO ) + CALL CHKXER( 'SGESVDQ', INFOT, NOUT, LERR, OK ) + NT = 11 + IF( OK ) THEN + WRITE( NOUT, FMT = 9999 )SRNAMT( 1:LEN_TRIM( SRNAMT ) ), + $ NT + ELSE + WRITE( NOUT, FMT = 9998 ) + END IF END IF * * Print a summary line. diff --git a/lapack-netlib/TESTING/EIG/sget39.f b/lapack-netlib/TESTING/EIG/sget39.f index f02c6f856..f6c0f7e7c 100644 --- a/lapack-netlib/TESTING/EIG/sget39.f +++ b/lapack-netlib/TESTING/EIG/sget39.f @@ -194,7 +194,7 @@ VM5( 2 ) = EPS VM5( 3 ) = SQRT( SMLNUM ) * -* Initalization +* Initialization * KNT = 0 RMAX = ZERO diff --git a/lapack-netlib/TESTING/EIG/ssbt21.f b/lapack-netlib/TESTING/EIG/ssbt21.f index 50128ddbb..7ef5ad9b3 100644 --- a/lapack-netlib/TESTING/EIG/ssbt21.f +++ b/lapack-netlib/TESTING/EIG/ssbt21.f @@ -28,15 +28,16 @@ *> *> SSBT21 generally checks a decomposition of the form *> -*> A = U S U' +*> A = U S U**T *> -*> where ' means transpose, A is symmetric banded, U is +*> where **T means transpose, A is symmetric banded, U is *> orthogonal, and S is diagonal (if KS=0) or symmetric *> tridiagonal (if KS=1). *> *> Specifically: *> -*> RESULT(1) = | A - U S U' | / ( |A| n ulp ) *andC> RESULT(2) = | I - UU' | / ( n ulp ) +*> RESULT(1) = | A - U S U**T | / ( |A| n ulp ) and +*> RESULT(2) = | I - U U**T | / ( n ulp ) *> \endverbatim * * Arguments: @@ -214,7 +215,7 @@ * ANORM = MAX( SLANSB( '1', CUPLO, N, IKA, A, LDA, WORK ), UNFL ) * -* Compute error matrix: Error = A - U S U' +* Compute error matrix: Error = A - U S U**T * * Copy A from SB to SP storage format. * @@ -265,7 +266,7 @@ * * Do Test 2 * -* Compute UU' - I +* Compute U U**T - I * CALL SGEMM( 'N', 'C', N, N, N, ONE, U, LDU, U, LDU, ZERO, WORK, $ N ) diff --git a/lapack-netlib/TESTING/EIG/sspt21.f b/lapack-netlib/TESTING/EIG/sspt21.f index 2384c87de..4ecb04c0e 100644 --- a/lapack-netlib/TESTING/EIG/sspt21.f +++ b/lapack-netlib/TESTING/EIG/sspt21.f @@ -28,9 +28,9 @@ *> *> SSPT21 generally checks a decomposition of the form *> -*> A = U S U' +*> A = U S U**T *> -*> where ' means transpose, A is symmetric (stored in packed format), U +*> where **T means transpose, A is symmetric (stored in packed format), U *> is orthogonal, and S is diagonal (if KBAND=0) or symmetric *> tridiagonal (if KBAND=1). If ITYPE=1, then U is represented as a *> dense matrix, otherwise the U is expressed as a product of @@ -41,15 +41,16 @@ *> *> Specifically, if ITYPE=1, then: *> -*> RESULT(1) = | A - U S U' | / ( |A| n ulp ) *andC> RESULT(2) = | I - UU' | / ( n ulp ) +*> RESULT(1) = | A - U S U**T | / ( |A| n ulp ) and +*> RESULT(2) = | I - U U**T | / ( n ulp ) *> *> If ITYPE=2, then: *> -*> RESULT(1) = | A - V S V' | / ( |A| n ulp ) +*> RESULT(1) = | A - V S V**T | / ( |A| n ulp ) *> *> If ITYPE=3, then: *> -*> RESULT(1) = | I - VU' | / ( n ulp ) +*> RESULT(1) = | I - V U**T | / ( n ulp ) *> *> Packed storage means that, for example, if UPLO='U', then the columns *> of the upper triangle of A are stored one after another, so that @@ -70,7 +71,7 @@ *> *> If UPLO='U', then V = H(n-1)...H(1), where *> -*> H(j) = I - tau(j) v(j) v(j)' +*> H(j) = I - tau(j) v(j) v(j)**T *> *> and the first j-1 elements of v(j) are stored in V(1:j-1,j+1), *> (i.e., VP( j*(j+1)/2 + 1 : j*(j+1)/2 + j-1 ) ), @@ -78,7 +79,7 @@ *> *> If UPLO='L', then V = H(1)...H(n-1), where *> -*> H(j) = I - tau(j) v(j) v(j)' +*> H(j) = I - tau(j) v(j) v(j)**T *> *> and the first j elements of v(j) are 0, the (j+1)-st is 1, and the *> (j+2)-nd through n-th elements are stored in V(j+2:n,j) (i.e., @@ -93,14 +94,15 @@ *> ITYPE is INTEGER *> Specifies the type of tests to be performed. *> 1: U expressed as a dense orthogonal matrix: -*> RESULT(1) = | A - U S U' | / ( |A| n ulp ) *andC> RESULT(2) = | I - UU' | / ( n ulp ) +*> RESULT(1) = | A - U S U**T | / ( |A| n ulp ) and +*> RESULT(2) = | I - U U**T | / ( n ulp ) *> *> 2: U expressed as a product V of Housholder transformations: -*> RESULT(1) = | A - V S V' | / ( |A| n ulp ) +*> RESULT(1) = | A - V S V**T | / ( |A| n ulp ) *> *> 3: U expressed both as a dense orthogonal matrix and *> as a product of Housholder transformations: -*> RESULT(1) = | I - VU' | / ( n ulp ) +*> RESULT(1) = | I - V U**T | / ( n ulp ) *> \endverbatim *> *> \param[in] UPLO @@ -183,7 +185,7 @@ *> \verbatim *> TAU is REAL array, dimension (N) *> If ITYPE >= 2, then TAU(j) is the scalar factor of -*> v(j) v(j)' in the Householder transformation H(j) of +*> v(j) v(j)**T in the Householder transformation H(j) of *> the product U = H(1)...H(n-2) *> If ITYPE < 2, then TAU is not referenced. *> \endverbatim @@ -303,7 +305,7 @@ * IF( ITYPE.EQ.1 ) THEN * -* ITYPE=1: error = A - U S U' +* ITYPE=1: error = A - U S U**T * CALL SLASET( 'Full', N, N, ZERO, ZERO, WORK, N ) CALL SCOPY( LAP, AP, 1, WORK, 1 ) @@ -322,7 +324,7 @@ * ELSE IF( ITYPE.EQ.2 ) THEN * -* ITYPE=2: error = V S V' - A +* ITYPE=2: error = V S V**T - A * CALL SLASET( 'Full', N, N, ZERO, ZERO, WORK, N ) * @@ -389,7 +391,7 @@ * ELSE IF( ITYPE.EQ.3 ) THEN * -* ITYPE=3: error = U V' - I +* ITYPE=3: error = U V**T - I * IF( N.LT.2 ) $ RETURN @@ -420,7 +422,7 @@ * * Do Test 2 * -* Compute UU' - I +* Compute U U**T - I * IF( ITYPE.EQ.1 ) THEN CALL SGEMM( 'N', 'C', N, N, N, ONE, U, LDU, U, LDU, ZERO, WORK, diff --git a/lapack-netlib/TESTING/EIG/ssyt21.f b/lapack-netlib/TESTING/EIG/ssyt21.f index a7add3418..fc7ca6a2a 100644 --- a/lapack-netlib/TESTING/EIG/ssyt21.f +++ b/lapack-netlib/TESTING/EIG/ssyt21.f @@ -28,9 +28,9 @@ *> *> SSYT21 generally checks a decomposition of the form *> -*> A = U S U' +*> A = U S U**T *> -*> where ' means transpose, A is symmetric, U is orthogonal, and S is +*> where **T means transpose, A is symmetric, U is orthogonal, and S is *> diagonal (if KBAND=0) or symmetric tridiagonal (if KBAND=1). *> *> If ITYPE=1, then U is represented as a dense matrix; otherwise U is @@ -41,18 +41,19 @@ *> *> Specifically, if ITYPE=1, then: *> -*> RESULT(1) = | A - U S U' | / ( |A| n ulp ) *andC> RESULT(2) = | I - UU' | / ( n ulp ) +*> RESULT(1) = | A - U S U**T | / ( |A| n ulp ) and +*> RESULT(2) = | I - U U**T | / ( n ulp ) *> *> If ITYPE=2, then: *> -*> RESULT(1) = | A - V S V' | / ( |A| n ulp ) +*> RESULT(1) = | A - V S V**T | / ( |A| n ulp ) *> *> If ITYPE=3, then: *> -*> RESULT(1) = | I - VU' | / ( n ulp ) +*> RESULT(1) = | I - V U**T | / ( n ulp ) *> *> For ITYPE > 1, the transformation U is expressed as a product -*> V = H(1)...H(n-2), where H(j) = I - tau(j) v(j) v(j)' and each +*> V = H(1)...H(n-2), where H(j) = I - tau(j) v(j) v(j)**T and each *> vector v(j) has its first j elements 0 and the remaining n-j elements *> stored in V(j+1:n,j). *> \endverbatim @@ -65,14 +66,15 @@ *> ITYPE is INTEGER *> Specifies the type of tests to be performed. *> 1: U expressed as a dense orthogonal matrix: -*> RESULT(1) = | A - U S U' | / ( |A| n ulp ) *andC> RESULT(2) = | I - UU' | / ( n ulp ) +*> RESULT(1) = | A - U S U**T | / ( |A| n ulp ) and +*> RESULT(2) = | I - U U**T | / ( n ulp ) *> *> 2: U expressed as a product V of Housholder transformations: -*> RESULT(1) = | A - V S V' | / ( |A| n ulp ) +*> RESULT(1) = | A - V S V**T | / ( |A| n ulp ) *> *> 3: U expressed both as a dense orthogonal matrix and *> as a product of Housholder transformations: -*> RESULT(1) = | I - VU' | / ( n ulp ) +*> RESULT(1) = | I - V U**T | / ( n ulp ) *> \endverbatim *> *> \param[in] UPLO @@ -170,7 +172,7 @@ *> \verbatim *> TAU is REAL array, dimension (N) *> If ITYPE >= 2, then TAU(j) is the scalar factor of -*> v(j) v(j)' in the Householder transformation H(j) of +*> v(j) v(j)**T in the Householder transformation H(j) of *> the product U = H(1)...H(n-2) *> If ITYPE < 2, then TAU is not referenced. *> \endverbatim @@ -283,7 +285,7 @@ * IF( ITYPE.EQ.1 ) THEN * -* ITYPE=1: error = A - U S U' +* ITYPE=1: error = A - U S U**T * CALL SLASET( 'Full', N, N, ZERO, ZERO, WORK, N ) CALL SLACPY( CUPLO, N, N, A, LDA, WORK, N ) @@ -302,7 +304,7 @@ * ELSE IF( ITYPE.EQ.2 ) THEN * -* ITYPE=2: error = V S V' - A +* ITYPE=2: error = V S V**T - A * CALL SLASET( 'Full', N, N, ZERO, ZERO, WORK, N ) * @@ -359,7 +361,7 @@ * ELSE IF( ITYPE.EQ.3 ) THEN * -* ITYPE=3: error = U V' - I +* ITYPE=3: error = U V**T - I * IF( N.LT.2 ) $ RETURN @@ -395,7 +397,7 @@ * * Do Test 2 * -* Compute UU' - I +* Compute U U**T - I * IF( ITYPE.EQ.1 ) THEN CALL SGEMM( 'N', 'C', N, N, N, ONE, U, LDU, U, LDU, ZERO, WORK, diff --git a/lapack-netlib/TESTING/EIG/ssyt22.f b/lapack-netlib/TESTING/EIG/ssyt22.f index 3b748ec7f..38fc3e555 100644 --- a/lapack-netlib/TESTING/EIG/ssyt22.f +++ b/lapack-netlib/TESTING/EIG/ssyt22.f @@ -41,7 +41,8 @@ *> *> Specifically, if ITYPE=1, then: *> -*> RESULT(1) = | U' A U - S | / ( |A| m ulp ) *andC> RESULT(2) = | I - U'U | / ( m ulp ) +*> RESULT(1) = | U**T A U - S | / ( |A| m ulp ) and +*> RESULT(2) = | I - U**T U | / ( m ulp ) *> \endverbatim * * Arguments: @@ -51,7 +52,8 @@ *> ITYPE INTEGER *> Specifies the type of tests to be performed. *> 1: U expressed as a dense orthogonal matrix: -*> RESULT(1) = | A - U S U' | / ( |A| n ulp ) *andC> RESULT(2) = | I - UU' | / ( n ulp ) +*> RESULT(1) = | A - U S U**T | / ( |A| n ulp ) and +*> RESULT(2) = | I - U U**T | / ( n ulp ) *> *> UPLO CHARACTER *> If UPLO='U', the upper triangle of A will be used and the @@ -122,7 +124,7 @@ *> *> TAU REAL array, dimension (N) *> If ITYPE >= 2, then TAU(j) is the scalar factor of -*> v(j) v(j)' in the Householder transformation H(j) of +*> v(j) v(j)**T in the Householder transformation H(j) of *> the product U = H(1)...H(n-2) *> If ITYPE < 2, then TAU is not referenced. *> Not modified. @@ -207,7 +209,7 @@ * * Compute error matrix: * -* ITYPE=1: error = U' A U - S +* ITYPE=1: error = U**T A U - S * CALL SSYMM( 'L', UPLO, N, M, ONE, A, LDA, U, LDU, ZERO, WORK, N ) NN = N*N @@ -240,7 +242,7 @@ * * Do Test 2 * -* Compute U'U - I +* Compute U**T U - I * IF( ITYPE.EQ.1 ) $ CALL SORT01( 'Columns', N, M, U, LDU, WORK, 2*N*N, diff --git a/lapack-netlib/TESTING/EIG/zbdt05.f b/lapack-netlib/TESTING/EIG/zbdt05.f index 7a493292a..bbf0208b7 100644 --- a/lapack-netlib/TESTING/EIG/zbdt05.f +++ b/lapack-netlib/TESTING/EIG/zbdt05.f @@ -52,6 +52,7 @@ *> \verbatim *> A is COMPLEX*16 array, dimension (LDA,N) *> The m by n matrix A. +*> \endverbatim *> *> \param[in] LDA *> \verbatim diff --git a/lapack-netlib/TESTING/EIG/zchkhb2stg.f b/lapack-netlib/TESTING/EIG/zchkhb2stg.f index dbbb84348..05434e4e3 100644 --- a/lapack-netlib/TESTING/EIG/zchkhb2stg.f +++ b/lapack-netlib/TESTING/EIG/zchkhb2stg.f @@ -680,8 +680,8 @@ * the one from above. Compare it with D1 computed * using the DSBTRD. * - CALL DLASET( 'Full', N, 1, ZERO, ZERO, SD, 1 ) - CALL DLASET( 'Full', N, 1, ZERO, ZERO, SE, 1 ) + CALL DLASET( 'Full', N, 1, ZERO, ZERO, SD, N ) + CALL DLASET( 'Full', N, 1, ZERO, ZERO, SE, N ) CALL ZLACPY( ' ', K+1, N, A, LDA, U, LDU ) LH = MAX(1, 4*N) LW = LWORK - LH @@ -753,8 +753,8 @@ * the one from above. Compare it with D1 computed * using the DSBTRD. * - CALL DLASET( 'Full', N, 1, ZERO, ZERO, SD, 1 ) - CALL DLASET( 'Full', N, 1, ZERO, ZERO, SE, 1 ) + CALL DLASET( 'Full', N, 1, ZERO, ZERO, SD, N ) + CALL DLASET( 'Full', N, 1, ZERO, ZERO, SE, N ) CALL ZLACPY( ' ', K+1, N, A, LDA, U, LDU ) LH = MAX(1, 4*N) LW = LWORK - LH diff --git a/lapack-netlib/TESTING/EIG/zchkst.f b/lapack-netlib/TESTING/EIG/zchkst.f index 4a8636ad9..cd45e98e1 100644 --- a/lapack-netlib/TESTING/EIG/zchkst.f +++ b/lapack-netlib/TESTING/EIG/zchkst.f @@ -167,7 +167,7 @@ *> ZSTEMR('V', 'I') *> *> Tests 29 through 34 are disable at present because ZSTEMR -*> does not handle partial specturm requests. +*> does not handle partial spectrum requests. *> *> (29) | S - Z D Z* | / ( |S| n ulp ) ZSTEMR('V', 'I') *> diff --git a/lapack-netlib/TESTING/EIG/zchkst2stg.f b/lapack-netlib/TESTING/EIG/zchkst2stg.f index cd952bc37..4eadca4f3 100644 --- a/lapack-netlib/TESTING/EIG/zchkst2stg.f +++ b/lapack-netlib/TESTING/EIG/zchkst2stg.f @@ -188,7 +188,7 @@ *> ZSTEMR('V', 'I') *> *> Tests 29 through 34 are disable at present because ZSTEMR -*> does not handle partial specturm requests. +*> does not handle partial spectrum requests. *> *> (29) | S - Z D Z* | / ( |S| n ulp ) ZSTEMR('V', 'I') *> @@ -1014,8 +1014,8 @@ * the one from above. Compare it with D1 computed * using the 1-stage. * - CALL DLASET( 'Full', N, 1, ZERO, ZERO, SD, 1 ) - CALL DLASET( 'Full', N, 1, ZERO, ZERO, SE, 1 ) + CALL DLASET( 'Full', N, 1, ZERO, ZERO, SD, N ) + CALL DLASET( 'Full', N, 1, ZERO, ZERO, SE, N ) CALL ZLACPY( 'U', N, N, A, LDA, V, LDU ) LH = MAX(1, 4*N) LW = LWORK - LH @@ -1048,8 +1048,8 @@ * the one from above. Compare it with D1 computed * using the 1-stage. * - CALL DLASET( 'Full', N, 1, ZERO, ZERO, SD, 1 ) - CALL DLASET( 'Full', N, 1, ZERO, ZERO, SE, 1 ) + CALL DLASET( 'Full', N, 1, ZERO, ZERO, SD, N ) + CALL DLASET( 'Full', N, 1, ZERO, ZERO, SE, N ) CALL ZLACPY( 'L', N, N, A, LDA, V, LDU ) CALL ZHETRD_2STAGE( 'N', "L", N, V, LDU, SD, SE, TAU, $ WORK, LH, WORK( LH+1 ), LW, IINFO ) diff --git a/lapack-netlib/TESTING/EIG/zdrgev3.f b/lapack-netlib/TESTING/EIG/zdrgev3.f index 62ddf2b56..11e8562d7 100644 --- a/lapack-netlib/TESTING/EIG/zdrgev3.f +++ b/lapack-netlib/TESTING/EIG/zdrgev3.f @@ -389,7 +389,7 @@ *> \author Univ. of Colorado Denver *> \author NAG Ltd. * -*> \date Febuary 2015 +*> \date February 2015 * *> \ingroup complex16_eig * diff --git a/lapack-netlib/TESTING/EIG/zdrgsx.f b/lapack-netlib/TESTING/EIG/zdrgsx.f index 51a7d773f..f5821e520 100644 --- a/lapack-netlib/TESTING/EIG/zdrgsx.f +++ b/lapack-netlib/TESTING/EIG/zdrgsx.f @@ -738,7 +738,7 @@ CALL ZLACPY( 'Full', MPLUSN, MPLUSN, AI, LDA, A, LDA ) CALL ZLACPY( 'Full', MPLUSN, MPLUSN, BI, LDA, B, LDA ) * -* Compute the Schur factorization while swaping the +* Compute the Schur factorization while swapping the * m-by-m (1,1)-blocks with n-by-n (2,2)-blocks. * CALL ZGGESX( 'V', 'V', 'S', ZLCTSX, 'B', MPLUSN, AI, LDA, BI, LDA, diff --git a/lapack-netlib/TESTING/EIG/zdrvbd.f b/lapack-netlib/TESTING/EIG/zdrvbd.f index 4bdbdfe2e..105e9dff7 100644 --- a/lapack-netlib/TESTING/EIG/zdrvbd.f +++ b/lapack-netlib/TESTING/EIG/zdrvbd.f @@ -33,8 +33,9 @@ *> *> \verbatim *> -*> ZDRVBD checks the singular value decomposition (SVD) driver ZGESVD -*> and ZGESDD. +*> ZDRVBD checks the singular value decomposition (SVD) driver ZGESVD, +*> ZGESDD, ZGESVJ, ZGEJSV, ZGESVDX, and ZGESVDQ. +*> *> ZGESVD and ZGESDD factors A = U diag(S) VT, where U and VT are *> unitary and diag(S) is diagonal with the entries of the array S on *> its diagonal. The entries of S are the singular values, nonnegative @@ -73,81 +74,92 @@ *> *> Test for ZGESDD: *> -*> (1) | A - U diag(S) VT | / ( |A| max(M,N) ulp ) +*> (8) | A - U diag(S) VT | / ( |A| max(M,N) ulp ) *> -*> (2) | I - U'U | / ( M ulp ) +*> (9) | I - U'U | / ( M ulp ) *> -*> (3) | I - VT VT' | / ( N ulp ) +*> (10) | I - VT VT' | / ( N ulp ) *> -*> (4) S contains MNMIN nonnegative values in decreasing order. +*> (11) S contains MNMIN nonnegative values in decreasing order. *> (Return 0 if true, 1/ULP if false.) *> -*> (5) | U - Upartial | / ( M ulp ) where Upartial is a partially +*> (12) | U - Upartial | / ( M ulp ) where Upartial is a partially *> computed U. *> -*> (6) | VT - VTpartial | / ( N ulp ) where VTpartial is a partially +*> (13) | VT - VTpartial | / ( N ulp ) where VTpartial is a partially *> computed VT. *> -*> (7) | S - Spartial | / ( MNMIN ulp |S| ) where Spartial is the +*> (14) | S - Spartial | / ( MNMIN ulp |S| ) where Spartial is the *> vector of singular values from the partial SVD *> +*> Test for ZGESVDQ: +*> +*> (36) | A - U diag(S) VT | / ( |A| max(M,N) ulp ) +*> +*> (37) | I - U'U | / ( M ulp ) +*> +*> (38) | I - VT VT' | / ( N ulp ) +*> +*> (39) S contains MNMIN nonnegative values in decreasing order. +*> (Return 0 if true, 1/ULP if false.) +*> *> Test for ZGESVJ: *> -*> (1) | A - U diag(S) VT | / ( |A| max(M,N) ulp ) +*> (15) | A - U diag(S) VT | / ( |A| max(M,N) ulp ) *> -*> (2) | I - U'U | / ( M ulp ) +*> (16) | I - U'U | / ( M ulp ) *> -*> (3) | I - VT VT' | / ( N ulp ) +*> (17) | I - VT VT' | / ( N ulp ) *> -*> (4) S contains MNMIN nonnegative values in decreasing order. +*> (18) S contains MNMIN nonnegative values in decreasing order. *> (Return 0 if true, 1/ULP if false.) *> *> Test for ZGEJSV: *> -*> (1) | A - U diag(S) VT | / ( |A| max(M,N) ulp ) +*> (19) | A - U diag(S) VT | / ( |A| max(M,N) ulp ) *> -*> (2) | I - U'U | / ( M ulp ) +*> (20) | I - U'U | / ( M ulp ) *> -*> (3) | I - VT VT' | / ( N ulp ) +*> (21) | I - VT VT' | / ( N ulp ) *> -*> (4) S contains MNMIN nonnegative values in decreasing order. +*> (22) S contains MNMIN nonnegative values in decreasing order. *> (Return 0 if true, 1/ULP if false.) *> *> Test for ZGESVDX( 'V', 'V', 'A' )/ZGESVDX( 'N', 'N', 'A' ) *> -*> (1) | A - U diag(S) VT | / ( |A| max(M,N) ulp ) +*> (23) | A - U diag(S) VT | / ( |A| max(M,N) ulp ) *> -*> (2) | I - U'U | / ( M ulp ) +*> (24) | I - U'U | / ( M ulp ) *> -*> (3) | I - VT VT' | / ( N ulp ) +*> (25) | I - VT VT' | / ( N ulp ) *> -*> (4) S contains MNMIN nonnegative values in decreasing order. +*> (26) S contains MNMIN nonnegative values in decreasing order. *> (Return 0 if true, 1/ULP if false.) *> -*> (5) | U - Upartial | / ( M ulp ) where Upartial is a partially +*> (27) | U - Upartial | / ( M ulp ) where Upartial is a partially *> computed U. *> -*> (6) | VT - VTpartial | / ( N ulp ) where VTpartial is a partially +*> (28) | VT - VTpartial | / ( N ulp ) where VTpartial is a partially *> computed VT. *> -*> (7) | S - Spartial | / ( MNMIN ulp |S| ) where Spartial is the +*> (29) | S - Spartial | / ( MNMIN ulp |S| ) where Spartial is the *> vector of singular values from the partial SVD *> *> Test for ZGESVDX( 'V', 'V', 'I' ) *> -*> (8) | U' A VT''' - diag(S) | / ( |A| max(M,N) ulp ) +*> (30) | U' A VT''' - diag(S) | / ( |A| max(M,N) ulp ) *> -*> (9) | I - U'U | / ( M ulp ) +*> (31) | I - U'U | / ( M ulp ) *> -*> (10) | I - VT VT' | / ( N ulp ) +*> (32) | I - VT VT' | / ( N ulp ) *> *> Test for ZGESVDX( 'V', 'V', 'V' ) *> -*> (11) | U' A VT''' - diag(S) | / ( |A| max(M,N) ulp ) +*> (33) | U' A VT''' - diag(S) | / ( |A| max(M,N) ulp ) *> -*> (12) | I - U'U | / ( M ulp ) +*> (34) | I - U'U | / ( M ulp ) *> -*> (13) | I - VT VT' | / ( N ulp ) +*> (35) | I - VT VT' | / ( N ulp ) *> *> The "sizes" are specified by the arrays MM(1:NSIZES) and *> NN(1:NSIZES); the value of each element pair (MM(j),NN(j)) @@ -393,6 +405,8 @@ * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * June 2016 +* + IMPLICIT NONE * * .. Scalar Arguments .. INTEGER INFO, LDA, LDU, LDVT, LWORK, NOUNIT, NSIZES, @@ -411,7 +425,7 @@ * ===================================================================== * * .. Parameters .. - DOUBLE PRECISION ZERO, ONE, TWO, HALF + DOUBLE PRECISION ZERO, ONE, TWO, HALF PARAMETER ( ZERO = 0.0D0, ONE = 1.0D0, TWO = 2.0D0, $ HALF = 0.5D0 ) COMPLEX*16 CZERO, CONE @@ -431,10 +445,13 @@ DOUBLE PRECISION ANORM, DIF, DIV, OVFL, RTUNFL, ULP, ULPINV, $ UNFL, VL, VU * .. +* .. Local Scalars for ZGESVDQ .. + INTEGER LIWORK, NUMRANK +* .. * .. Local Arrays .. CHARACTER CJOB( 4 ), CJOBR( 3 ), CJOBV( 2 ) INTEGER IOLDSD( 4 ), ISEED2( 4 ) - DOUBLE PRECISION RESULT( 35 ) + DOUBLE PRECISION RESULT( 39 ) * .. * .. External Functions .. DOUBLE PRECISION DLAMCH, DLARND @@ -442,8 +459,8 @@ * .. * .. External Subroutines .. EXTERNAL ALASVM, XERBLA, ZBDT01, ZBDT05, ZGESDD, - $ ZGESVD, ZGESVJ, ZGEJSV, ZGESVDX, ZLACPY, - $ ZLASET, ZLATMS, ZUNT01, ZUNT03 + $ ZGESVD, ZGESVDQ, ZGESVJ, ZGEJSV, ZGESVDX, + $ ZLACPY, ZLASET, ZLATMS, ZUNT01, ZUNT03 * .. * .. Intrinsic Functions .. INTRINSIC ABS, DBLE, MAX, MIN @@ -836,10 +853,65 @@ 120 CONTINUE RESULT( 14 ) = MAX( RESULT( 14 ), DIF ) 130 CONTINUE - * -* Test ZGESVJ: Factorize A -* Note: ZGESVJ does not work for M < N +* Test ZGESVDQ +* Note: ZGESVDQ only works for M >= N +* + RESULT( 36 ) = ZERO + RESULT( 37 ) = ZERO + RESULT( 38 ) = ZERO + RESULT( 39 ) = ZERO +* + IF( M.GE.N ) THEN + IWTMP = 2*MNMIN*MNMIN + 2*MNMIN + MAX( M, N ) + LSWORK = IWTMP + ( IWSPC-1 )*( LWORK-IWTMP ) / 3 + LSWORK = MIN( LSWORK, LWORK ) + LSWORK = MAX( LSWORK, 1 ) + IF( IWSPC.EQ.4 ) + $ LSWORK = LWORK +* + CALL ZLACPY( 'F', M, N, ASAV, LDA, A, LDA ) + SRNAMT = 'ZGESVDQ' +* + LRWORK = MAX(2, M, 5*N) + LIWORK = MAX( N, 1 ) + CALL ZGESVDQ( 'H', 'N', 'N', 'A', 'A', + $ M, N, A, LDA, SSAV, USAV, LDU, + $ VTSAV, LDVT, NUMRANK, IWORK, LIWORK, + $ WORK, LWORK, RWORK, LRWORK, IINFO ) +* + IF( IINFO.NE.0 ) THEN + WRITE( NOUNIT, FMT = 9995 )'ZGESVDQ', IINFO, M, N, + $ JTYPE, LSWORK, IOLDSD + INFO = ABS( IINFO ) + RETURN + END IF +* +* Do tests 36--39 +* + CALL ZBDT01( M, N, 0, ASAV, LDA, USAV, LDU, SSAV, E, + $ VTSAV, LDVT, WORK, RWORK, RESULT( 36 ) ) + IF( M.NE.0 .AND. N.NE.0 ) THEN + CALL ZUNT01( 'Columns', M, M, USAV, LDU, WORK, + $ LWORK, RWORK, RESULT( 37 ) ) + CALL ZUNT01( 'Rows', N, N, VTSAV, LDVT, WORK, + $ LWORK, RWORK, RESULT( 38 ) ) + END IF + RESULT( 39 ) = ZERO + DO 199 I = 1, MNMIN - 1 + IF( SSAV( I ).LT.SSAV( I+1 ) ) + $ RESULT( 39 ) = ULPINV + IF( SSAV( I ).LT.ZERO ) + $ RESULT( 39 ) = ULPINV + 199 CONTINUE + IF( MNMIN.GE.1 ) THEN + IF( SSAV( MNMIN ).LT.ZERO ) + $ RESULT( 39 ) = ULPINV + END IF + END IF +* +* Test ZGESVJ +* Note: ZGESVJ only works for M >= N * RESULT( 15 ) = ZERO RESULT( 16 ) = ZERO @@ -847,13 +919,13 @@ RESULT( 18 ) = ZERO * IF( M.GE.N ) THEN - IWTMP = 2*MNMIN*MNMIN + 2*MNMIN + MAX( M, N ) - LSWORK = IWTMP + ( IWSPC-1 )*( LWORK-IWTMP ) / 3 - LSWORK = MIN( LSWORK, LWORK ) - LSWORK = MAX( LSWORK, 1 ) - LRWORK = MAX(6,N) - IF( IWSPC.EQ.4 ) - $ LSWORK = LWORK + IWTMP = 2*MNMIN*MNMIN + 2*MNMIN + MAX( M, N ) + LSWORK = IWTMP + ( IWSPC-1 )*( LWORK-IWTMP ) / 3 + LSWORK = MIN( LSWORK, LWORK ) + LSWORK = MAX( LSWORK, 1 ) + LRWORK = MAX(6,N) + IF( IWSPC.EQ.4 ) + $ LSWORK = LWORK * CALL ZLACPY( 'F', M, N, ASAV, LDA, USAV, LDA ) SRNAMT = 'ZGESVJ' @@ -861,8 +933,7 @@ & 0, A, LDVT, WORK, LWORK, RWORK, & LRWORK, IINFO ) * -* ZGESVJ retuns V not VT, so we transpose to use the same -* test suite. +* ZGESVJ returns V not VH * DO J=1,N DO I=1,N @@ -900,21 +971,21 @@ END IF END IF * -* Test ZGEJSV: Factorize A -* Note: ZGEJSV does not work for M < N +* Test ZGEJSV +* Note: ZGEJSV only works for M >= N * RESULT( 19 ) = ZERO RESULT( 20 ) = ZERO RESULT( 21 ) = ZERO RESULT( 22 ) = ZERO IF( M.GE.N ) THEN - IWTMP = 2*MNMIN*MNMIN + 2*MNMIN + MAX( M, N ) - LSWORK = IWTMP + ( IWSPC-1 )*( LWORK-IWTMP ) / 3 - LSWORK = MIN( LSWORK, LWORK ) - LSWORK = MAX( LSWORK, 1 ) - IF( IWSPC.EQ.4 ) - $ LSWORK = LWORK - LRWORK = MAX( 7, N + 2*M) + IWTMP = 2*MNMIN*MNMIN + 2*MNMIN + MAX( M, N ) + LSWORK = IWTMP + ( IWSPC-1 )*( LWORK-IWTMP ) / 3 + LSWORK = MIN( LSWORK, LWORK ) + LSWORK = MAX( LSWORK, 1 ) + IF( IWSPC.EQ.4 ) + $ LSWORK = LWORK + LRWORK = MAX( 7, N + 2*M) * CALL ZLACPY( 'F', M, N, ASAV, LDA, VTSAV, LDA ) SRNAMT = 'ZGEJSV' @@ -923,8 +994,7 @@ & WORK, LWORK, RWORK, & LRWORK, IWORK, IINFO ) * -* ZGEJSV retuns V not VT, so we transpose to use the same -* test suite. +* ZGEJSV returns V not VH * DO 133 J=1,N DO 132 I=1,N @@ -933,7 +1003,7 @@ 133 END DO * IF( IINFO.NE.0 ) THEN - WRITE( NOUNIT, FMT = 9995 )'GESVJ', IINFO, M, N, + WRITE( NOUNIT, FMT = 9995 )'GEJSV', IINFO, M, N, $ JTYPE, LSWORK, IOLDSD INFO = ABS( IINFO ) RETURN @@ -1160,7 +1230,7 @@ * NTEST = 0 NFAIL = 0 - DO 190 J = 1, 35 + DO 190 J = 1, 39 IF( RESULT( J ).GE.ZERO ) $ NTEST = NTEST + 1 IF( RESULT( J ).GE.THRESH ) @@ -1175,7 +1245,7 @@ NTESTF = 2 END IF * - DO 200 J = 1, 35 + DO 200 J = 1, 39 IF( RESULT( J ).GE.THRESH ) THEN WRITE( NOUNIT, FMT = 9997 )M, N, JTYPE, IWSPC, $ IOLDSD, J, RESULT( J ) @@ -1251,6 +1321,12 @@ $ / '33 = | U**T A VT**T - diag(S) | / ( |A| max(M,N) ulp )', $ / '34 = | I - U**T U | / ( M ulp ) ', $ / '35 = | I - VT VT**T | / ( N ulp ) ', + $ ' ZGESVDQ(H,N,N,A,A', + $ / '36 = | A - U diag(S) VT | / ( |A| max(M,N) ulp ) ', + $ / '37 = | I - U**T U | / ( M ulp ) ', + $ / '38 = | I - VT VT**T | / ( N ulp ) ', + $ / '39 = 0 if S contains min(M,N) nonnegative values in', + $ ' decreasing order, else 1/ulp', $ / / ) 9997 FORMAT( ' M=', I5, ', N=', I5, ', type ', I1, ', IWS=', I1, $ ', seed=', 4( I4, ',' ), ' test(', I2, ')=', G11.4 ) diff --git a/lapack-netlib/TESTING/EIG/zerred.f b/lapack-netlib/TESTING/EIG/zerred.f index 00bfbf261..013dc16c5 100644 --- a/lapack-netlib/TESTING/EIG/zerred.f +++ b/lapack-netlib/TESTING/EIG/zerred.f @@ -36,6 +36,8 @@ *> ZGEJSV compute SVD of an M-by-N matrix A where M >= N *> ZGESVDX compute SVD of an M-by-N matrix A(by bisection *> and inverse iteration) +*> ZGESVDQ compute SVD of an M-by-N matrix A(with a +*> QR-Preconditioned ) *> \endverbatim * * Arguments: @@ -101,7 +103,7 @@ * .. * .. External Subroutines .. EXTERNAL CHKXER, ZGEES, ZGEESX, ZGEEV, ZGEEVX, ZGESVJ, - $ ZGESDD, ZGESVD + $ ZGESDD, ZGESVD, ZGESVDX, ZGESVQ * .. * .. External Functions .. LOGICAL LSAMEN, ZSLECT @@ -495,6 +497,61 @@ ELSE WRITE( NOUT, FMT = 9998 ) END IF +* +* Test ZGESVDQ +* + SRNAMT = 'ZGESVDQ' + INFOT = 1 + CALL ZGESVDQ( 'X', 'P', 'T', 'A', 'A', 0, 0, A, 1, S, U, + $ 0, VT, 0, NS, IW, 1, W, 1, RW, 1, INFO ) + CALL CHKXER( 'ZGESVDQ', INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL ZGESVDQ( 'A', 'X', 'T', 'A', 'A', 0, 0, A, 1, S, U, + $ 0, VT, 0, NS, IW, 1, W, 1, RW, 1, INFO ) + CALL CHKXER( 'ZGESVDQ', INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL ZGESVDQ( 'A', 'P', 'X', 'A', 'A', 0, 0, A, 1, S, U, + $ 0, VT, 0, NS, IW, 1, W, 1, RW, 1, INFO ) + CALL CHKXER( 'ZGESVDQ', INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL ZGESVDQ( 'A', 'P', 'T', 'X', 'A', 0, 0, A, 1, S, U, + $ 0, VT, 0, NS, IW, 1, W, 1, RW, 1, INFO ) + CALL CHKXER( 'ZGESVDQ', INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL ZGESVDQ( 'A', 'P', 'T', 'A', 'X', 0, 0, A, 1, S, U, + $ 0, VT, 0, NS, IW, 1, W, 1, RW, 1, INFO ) + CALL CHKXER( 'ZGESVDQ', INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL ZGESVDQ( 'A', 'P', 'T', 'A', 'A', -1, 0, A, 1, S, U, + $ 0, VT, 0, NS, IW, 1, W, 1, RW, 1, INFO ) + CALL CHKXER( 'ZGESVDQ', INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL ZGESVDQ( 'A', 'P', 'T', 'A', 'A', 0, 1, A, 1, S, U, + $ 0, VT, 0, NS, IW, 1, W, 1, RW, 1, INFO ) + CALL CHKXER( 'ZGESVDQ', INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL ZGESVDQ( 'A', 'P', 'T', 'A', 'A', 1, 1, A, 0, S, U, + $ 0, VT, 0, NS, IW, 1, W, 1, RW, 1, INFO ) + CALL CHKXER( 'ZGESVDQ', INFOT, NOUT, LERR, OK ) + INFOT = 12 + CALL ZGESVDQ( 'A', 'P', 'T', 'A', 'A', 1, 1, A, 1, S, U, + $ -1, VT, 0, NS, IW, 1, W, 1, RW, 1, INFO ) + CALL CHKXER( 'ZGESVDQ', INFOT, NOUT, LERR, OK ) + INFOT = 14 + CALL ZGESVDQ( 'A', 'P', 'T', 'A', 'A', 1, 1, A, 1, S, U, + $ 1, VT, -1, NS, IW, 1, W, 1, RW, 1, INFO ) + CALL CHKXER( 'ZGESVDQ', INFOT, NOUT, LERR, OK ) + INFOT = 17 + CALL ZGESVDQ( 'A', 'P', 'T', 'A', 'A', 1, 1, A, 1, S, U, + $ 1, VT, 1, NS, IW, -5, W, 1, RW, 1, INFO ) + CALL CHKXER( 'ZGESVDQ', INFOT, NOUT, LERR, OK ) + NT = 11 + IF( OK ) THEN + WRITE( NOUT, FMT = 9999 )SRNAMT( 1:LEN_TRIM( SRNAMT ) ), + $ NT + ELSE + WRITE( NOUT, FMT = 9998 ) + END IF END IF * * Print a summary line. diff --git a/lapack-netlib/TESTING/EIG/zget51.f b/lapack-netlib/TESTING/EIG/zget51.f index 96b1dfae4..e019127a3 100644 --- a/lapack-netlib/TESTING/EIG/zget51.f +++ b/lapack-netlib/TESTING/EIG/zget51.f @@ -29,12 +29,13 @@ *> *> ZGET51 generally checks a decomposition of the form *> -*> A = U B VC> -*> where * means conjugate transpose and U and V are unitary. +*> A = U B V**H +*> +*> where **H means conjugate transpose and U and V are unitary. *> *> Specifically, if ITYPE=1 *> -*> RESULT = | A - U B V* | / ( |A| n ulp ) +*> RESULT = | A - U B V**H | / ( |A| n ulp ) *> *> If ITYPE=2, then: *> @@ -42,7 +43,7 @@ *> *> If ITYPE=3, then: *> -*> RESULT = | I - UU* | / ( n ulp ) +*> RESULT = | I - U U**H | / ( n ulp ) *> \endverbatim * * Arguments: @@ -52,9 +53,9 @@ *> \verbatim *> ITYPE is INTEGER *> Specifies the type of tests to be performed. -*> =1: RESULT = | A - U B V* | / ( |A| n ulp ) +*> =1: RESULT = | A - U B V**H | / ( |A| n ulp ) *> =2: RESULT = | A - B | / ( |A| n ulp ) -*> =3: RESULT = | I - UU* | / ( n ulp ) +*> =3: RESULT = | I - U U**H | / ( n ulp ) *> \endverbatim *> *> \param[in] N @@ -218,7 +219,7 @@ * IF( ITYPE.EQ.1 ) THEN * -* ITYPE=1: Compute W = A - UBV' +* ITYPE=1: Compute W = A - U B V**H * CALL ZLACPY( ' ', N, N, A, LDA, WORK, N ) CALL ZGEMM( 'N', 'N', N, N, N, CONE, U, LDU, B, LDB, CZERO, @@ -259,7 +260,7 @@ * * Tests not scaled by norm(A) * -* ITYPE=3: Compute UU' - I +* ITYPE=3: Compute U U**H - I * CALL ZGEMM( 'N', 'C', N, N, N, CONE, U, LDU, U, LDU, CZERO, $ WORK, N ) diff --git a/lapack-netlib/TESTING/EIG/zhbt21.f b/lapack-netlib/TESTING/EIG/zhbt21.f index 4cd8ed9f7..68125854c 100644 --- a/lapack-netlib/TESTING/EIG/zhbt21.f +++ b/lapack-netlib/TESTING/EIG/zhbt21.f @@ -28,14 +28,16 @@ *> *> ZHBT21 generally checks a decomposition of the form *> -*> A = U S UC> -*> where * means conjugate transpose, A is hermitian banded, U is +*> A = U S U**H +*> +*> where **H means conjugate transpose, A is hermitian banded, U is *> unitary, and S is diagonal (if KS=0) or symmetric *> tridiagonal (if KS=1). *> *> Specifically: *> -*> RESULT(1) = | A - U S U* | / ( |A| n ulp ) *andC> RESULT(2) = | I - UU* | / ( n ulp ) +*> RESULT(1) = | A - U S U**H | / ( |A| n ulp ) and +*> RESULT(2) = | I - U U**H | / ( n ulp ) *> \endverbatim * * Arguments: @@ -220,7 +222,7 @@ * ANORM = MAX( ZLANHB( '1', CUPLO, N, IKA, A, LDA, RWORK ), UNFL ) * -* Compute error matrix: Error = A - U S U* +* Compute error matrix: Error = A - U S U**H * * Copy A from SB to SP storage format. * @@ -271,7 +273,7 @@ * * Do Test 2 * -* Compute UU* - I +* Compute U U**H - I * CALL ZGEMM( 'N', 'C', N, N, N, CONE, U, LDU, U, LDU, CZERO, WORK, $ N ) diff --git a/lapack-netlib/TESTING/EIG/zhet21.f b/lapack-netlib/TESTING/EIG/zhet21.f index f6cb2d70a..cb854a850 100644 --- a/lapack-netlib/TESTING/EIG/zhet21.f +++ b/lapack-netlib/TESTING/EIG/zhet21.f @@ -29,8 +29,9 @@ *> *> ZHET21 generally checks a decomposition of the form *> -*> A = U S UC> -*> where * means conjugate transpose, A is hermitian, U is unitary, and +*> A = U S U**H +*> +*> where **H means conjugate transpose, A is hermitian, U is unitary, and *> S is diagonal (if KBAND=0) or (real) symmetric tridiagonal (if *> KBAND=1). *> @@ -42,18 +43,19 @@ *> *> Specifically, if ITYPE=1, then: *> -*> RESULT(1) = | A - U S U* | / ( |A| n ulp ) *andC> RESULT(2) = | I - UU* | / ( n ulp ) +*> RESULT(1) = | A - U S U**H | / ( |A| n ulp ) and +*> RESULT(2) = | I - U U**H | / ( n ulp ) *> *> If ITYPE=2, then: *> -*> RESULT(1) = | A - V S V* | / ( |A| n ulp ) +*> RESULT(1) = | A - V S V**H | / ( |A| n ulp ) *> *> If ITYPE=3, then: *> -*> RESULT(1) = | I - UV* | / ( n ulp ) +*> RESULT(1) = | I - U V**H | / ( n ulp ) *> *> For ITYPE > 1, the transformation U is expressed as a product -*> V = H(1)...H(n-2), where H(j) = I - tau(j) v(j) v(j)C> and each +*> V = H(1)...H(n-2), where H(j) = I - tau(j) v(j) v(j)**H and each *> vector v(j) has its first j elements 0 and the remaining n-j elements *> stored in V(j+1:n,j). *> \endverbatim @@ -66,14 +68,15 @@ *> ITYPE is INTEGER *> Specifies the type of tests to be performed. *> 1: U expressed as a dense unitary matrix: -*> RESULT(1) = | A - U S U* | / ( |A| n ulp ) *andC> RESULT(2) = | I - UU* | / ( n ulp ) +*> RESULT(1) = | A - U S U**H | / ( |A| n ulp ) and +*> RESULT(2) = | I - U U**H | / ( n ulp ) *> *> 2: U expressed as a product V of Housholder transformations: -*> RESULT(1) = | A - V S V* | / ( |A| n ulp ) +*> RESULT(1) = | A - V S V**H | / ( |A| n ulp ) *> *> 3: U expressed both as a dense unitary matrix and *> as a product of Housholder transformations: -*> RESULT(1) = | I - UV* | / ( n ulp ) +*> RESULT(1) = | I - U V**H | / ( n ulp ) *> \endverbatim *> *> \param[in] UPLO @@ -171,7 +174,7 @@ *> \verbatim *> TAU is COMPLEX*16 array, dimension (N) *> If ITYPE >= 2, then TAU(j) is the scalar factor of -*> v(j) v(j)* in the Householder transformation H(j) of +*> v(j) v(j)**H in the Householder transformation H(j) of *> the product U = H(1)...H(n-2) *> If ITYPE < 2, then TAU is not referenced. *> \endverbatim @@ -294,7 +297,7 @@ * IF( ITYPE.EQ.1 ) THEN * -* ITYPE=1: error = A - U S U* +* ITYPE=1: error = A - U S U**H * CALL ZLASET( 'Full', N, N, CZERO, CZERO, WORK, N ) CALL ZLACPY( CUPLO, N, N, A, LDA, WORK, N ) @@ -304,7 +307,6 @@ 10 CONTINUE * IF( N.GT.1 .AND. KBAND.EQ.1 ) THEN -CMK DO 20 J = 1, N - 1 DO 20 J = 2, N - 1 CALL ZHER2( CUPLO, N, -DCMPLX( E( J ) ), U( 1, J ), 1, $ U( 1, J-1 ), 1, WORK, N ) @@ -314,7 +316,7 @@ CMK DO 20 J = 1, N - 1 * ELSE IF( ITYPE.EQ.2 ) THEN * -* ITYPE=2: error = V S V* - A +* ITYPE=2: error = V S V**H - A * CALL ZLASET( 'Full', N, N, CZERO, CZERO, WORK, N ) * @@ -371,7 +373,7 @@ CMK DO 20 J = 1, N - 1 * ELSE IF( ITYPE.EQ.3 ) THEN * -* ITYPE=3: error = U V* - I +* ITYPE=3: error = U V**H - I * IF( N.LT.2 ) $ RETURN @@ -407,7 +409,7 @@ CMK DO 20 J = 1, N - 1 * * Do Test 2 * -* Compute UU* - I +* Compute U U**H - I * IF( ITYPE.EQ.1 ) THEN CALL ZGEMM( 'N', 'C', N, N, N, CONE, U, LDU, U, LDU, CZERO, diff --git a/lapack-netlib/TESTING/EIG/zhet22.f b/lapack-netlib/TESTING/EIG/zhet22.f index 7237f43f7..8ef73aef3 100644 --- a/lapack-netlib/TESTING/EIG/zhet22.f +++ b/lapack-netlib/TESTING/EIG/zhet22.f @@ -42,7 +42,8 @@ *> *> Specifically, if ITYPE=1, then: *> -*> RESULT(1) = | U' A U - S | / ( |A| m ulp ) *andC> RESULT(2) = | I - U'U | / ( m ulp ) +*> RESULT(1) = | U**H A U - S | / ( |A| m ulp ) and +*> RESULT(2) = | I - U**H U | / ( m ulp ) *> \endverbatim * * Arguments: @@ -52,7 +53,8 @@ *> ITYPE INTEGER *> Specifies the type of tests to be performed. *> 1: U expressed as a dense orthogonal matrix: -*> RESULT(1) = | A - U S U' | / ( |A| n ulp ) *andC> RESULT(2) = | I - UU' | / ( n ulp ) +*> RESULT(1) = | A - U S U**H | / ( |A| n ulp ) *and +*> RESULT(2) = | I - U U**H | / ( n ulp ) *> *> UPLO CHARACTER *> If UPLO='U', the upper triangle of A will be used and the @@ -122,7 +124,7 @@ *> *> TAU COMPLEX*16 array, dimension (N) *> If ITYPE >= 2, then TAU(j) is the scalar factor of -*> v(j) v(j)' in the Householder transformation H(j) of +*> v(j) v(j)**H in the Householder transformation H(j) of *> the product U = H(1)...H(n-2) *> If ITYPE < 2, then TAU is not referenced. *> Not modified. @@ -215,7 +217,7 @@ * * Compute error matrix: * -* ITYPE=1: error = U' A U - S +* ITYPE=1: error = U**H A U - S * CALL ZHEMM( 'L', UPLO, N, M, CONE, A, LDA, U, LDU, CZERO, WORK, $ N ) @@ -249,7 +251,7 @@ * * Do Test 2 * -* Compute U'U - I +* Compute U**H U - I * IF( ITYPE.EQ.1 ) $ CALL ZUNT01( 'Columns', N, M, U, LDU, WORK, 2*N*N, RWORK, diff --git a/lapack-netlib/TESTING/EIG/zhpt21.f b/lapack-netlib/TESTING/EIG/zhpt21.f index ef9e4418d..825d387c7 100644 --- a/lapack-netlib/TESTING/EIG/zhpt21.f +++ b/lapack-netlib/TESTING/EIG/zhpt21.f @@ -29,8 +29,9 @@ *> *> ZHPT21 generally checks a decomposition of the form *> -*> A = U S UC> -*> where * means conjugate transpose, A is hermitian, U is +*> A = U S U**H +*> +*> where **H means conjugate transpose, A is hermitian, U is *> unitary, and S is diagonal (if KBAND=0) or (real) symmetric *> tridiagonal (if KBAND=1). If ITYPE=1, then U is represented as *> a dense matrix, otherwise the U is expressed as a product of @@ -41,15 +42,16 @@ *> *> Specifically, if ITYPE=1, then: *> -*> RESULT(1) = | A - U S U* | / ( |A| n ulp ) *andC> RESULT(2) = | I - UU* | / ( n ulp ) +*> RESULT(1) = | A - U S U**H | / ( |A| n ulp ) and +*> RESULT(2) = | I - U U**H | / ( n ulp ) *> *> If ITYPE=2, then: *> -*> RESULT(1) = | A - V S V* | / ( |A| n ulp ) +*> RESULT(1) = | A - V S V**H | / ( |A| n ulp ) *> *> If ITYPE=3, then: *> -*> RESULT(1) = | I - UV* | / ( n ulp ) +*> RESULT(1) = | I - U V**H | / ( n ulp ) *> *> Packed storage means that, for example, if UPLO='U', then the columns *> of the upper triangle of A are stored one after another, so that @@ -70,14 +72,16 @@ *> *> If UPLO='U', then V = H(n-1)...H(1), where *> -*> H(j) = I - tau(j) v(j) v(j)C> +*> H(j) = I - tau(j) v(j) v(j)**H +*> *> and the first j-1 elements of v(j) are stored in V(1:j-1,j+1), *> (i.e., VP( j*(j+1)/2 + 1 : j*(j+1)/2 + j-1 ) ), *> the j-th element is 1, and the last n-j elements are 0. *> *> If UPLO='L', then V = H(1)...H(n-1), where *> -*> H(j) = I - tau(j) v(j) v(j)C> +*> H(j) = I - tau(j) v(j) v(j)**H +*> *> and the first j elements of v(j) are 0, the (j+1)-st is 1, and the *> (j+2)-nd through n-th elements are stored in V(j+2:n,j) (i.e., *> in VP( (2*n-j)*(j-1)/2 + j+2 : (2*n-j)*(j-1)/2 + n ) .) @@ -91,14 +95,15 @@ *> ITYPE is INTEGER *> Specifies the type of tests to be performed. *> 1: U expressed as a dense unitary matrix: -*> RESULT(1) = | A - U S U* | / ( |A| n ulp ) *andC> RESULT(2) = | I - UU* | / ( n ulp ) +*> RESULT(1) = | A - U S U**H | / ( |A| n ulp ) and +*> RESULT(2) = | I - U U**H | / ( n ulp ) *> *> 2: U expressed as a product V of Housholder transformations: -*> RESULT(1) = | A - V S V* | / ( |A| n ulp ) +*> RESULT(1) = | A - V S V**H | / ( |A| n ulp ) *> *> 3: U expressed both as a dense unitary matrix and *> as a product of Housholder transformations: -*> RESULT(1) = | I - UV* | / ( n ulp ) +*> RESULT(1) = | I - U V**H | / ( n ulp ) *> \endverbatim *> *> \param[in] UPLO @@ -181,7 +186,7 @@ *> \verbatim *> TAU is COMPLEX*16 array, dimension (N) *> If ITYPE >= 2, then TAU(j) is the scalar factor of -*> v(j) v(j)* in the Householder transformation H(j) of +*> v(j) v(j)**H in the Householder transformation H(j) of *> the product U = H(1)...H(n-2) *> If ITYPE < 2, then TAU is not referenced. *> \endverbatim @@ -313,7 +318,7 @@ * IF( ITYPE.EQ.1 ) THEN * -* ITYPE=1: error = A - U S U* +* ITYPE=1: error = A - U S U**H * CALL ZLASET( 'Full', N, N, CZERO, CZERO, WORK, N ) CALL ZCOPY( LAP, AP, 1, WORK, 1 ) @@ -323,7 +328,6 @@ 10 CONTINUE * IF( N.GT.1 .AND. KBAND.EQ.1 ) THEN -CMK DO 20 J = 1, N - 1 DO 20 J = 2, N - 1 CALL ZHPR2( CUPLO, N, -DCMPLX( E( J ) ), U( 1, J ), 1, $ U( 1, J-1 ), 1, WORK ) @@ -333,7 +337,7 @@ CMK DO 20 J = 1, N - 1 * ELSE IF( ITYPE.EQ.2 ) THEN * -* ITYPE=2: error = V S V* - A +* ITYPE=2: error = V S V**H - A * CALL ZLASET( 'Full', N, N, CZERO, CZERO, WORK, N ) * @@ -401,7 +405,7 @@ CMK DO 20 J = 1, N - 1 * ELSE IF( ITYPE.EQ.3 ) THEN * -* ITYPE=3: error = U V* - I +* ITYPE=3: error = U V**H - I * IF( N.LT.2 ) $ RETURN @@ -432,7 +436,7 @@ CMK DO 20 J = 1, N - 1 * * Do Test 2 * -* Compute UU* - I +* Compute U U**H - I * IF( ITYPE.EQ.1 ) THEN CALL ZGEMM( 'N', 'C', N, N, N, CONE, U, LDU, U, LDU, CZERO, diff --git a/lapack-netlib/TESTING/EIG/zstt21.f b/lapack-netlib/TESTING/EIG/zstt21.f index ad1fe5529..f2e32a12e 100644 --- a/lapack-netlib/TESTING/EIG/zstt21.f +++ b/lapack-netlib/TESTING/EIG/zstt21.f @@ -28,14 +28,15 @@ *> *> ZSTT21 checks a decomposition of the form *> -*> A = U S UC> -*> where * means conjugate transpose, A is real symmetric tridiagonal, +*> A = U S U**H +*> +*> where **H means conjugate transpose, A is real symmetric tridiagonal, *> U is unitary, and S is real and diagonal (if KBAND=0) or symmetric *> tridiagonal (if KBAND=1). Two tests are performed: *> -*> RESULT(1) = | A - U S U* | / ( |A| n ulp ) +*> RESULT(1) = | A - U S U**H | / ( |A| n ulp ) *> -*> RESULT(2) = | I - UU* | / ( n ulp ) +*> RESULT(2) = | I - U U**H | / ( n ulp ) *> \endverbatim * * Arguments: @@ -228,7 +229,7 @@ * * Do Test 2 * -* Compute UU* - I +* Compute U U**H - I * CALL ZGEMM( 'N', 'C', N, N, N, CONE, U, LDU, U, LDU, CZERO, WORK, $ N ) diff --git a/lapack-netlib/TESTING/LIN/CMakeLists.txt b/lapack-netlib/TESTING/LIN/CMakeLists.txt index 50ba8fc28..0d0bb5418 100644 --- a/lapack-netlib/TESTING/LIN/CMakeLists.txt +++ b/lapack-netlib/TESTING/LIN/CMakeLists.txt @@ -39,7 +39,8 @@ set(SLINTST schkaa.f strt02.f strt03.f strt05.f strt06.f sgennd.f sqrt04.f sqrt05.f schkqrt.f serrqrt.f schkqrtp.f serrqrtp.f schklqt.f schklqtp.f schktsqr.f - serrlqt.f serrlqtp.f serrtsqr.f stsqr01.f slqt04.f slqt05.f) + serrlqt.f serrlqtp.f serrtsqr.f stsqr01.f slqt04.f slqt05.f + schkorhr_col.f serrorhr_col.f sorhr_col01.f) if(USE_XBLAS) list(APPEND SLINTST sdrvgbx.f sdrvgex.f sdrvsyx.f sdrvpox.f @@ -94,7 +95,8 @@ set(CLINTST cchkaa.f sget06.f cgennd.f cqrt04.f cqrt05.f cchkqrt.f cerrqrt.f cchkqrtp.f cerrqrtp.f cchklqt.f cchklqtp.f cchktsqr.f - cerrlqt.f cerrlqtp.f cerrtsqr.f ctsqr01.f clqt04.f clqt05.f) + cerrlqt.f cerrlqtp.f cerrtsqr.f ctsqr01.f clqt04.f clqt05.f + cchkunhr_col.f cerrunhr_col.f cunhr_col01.f) if(USE_XBLAS) list(APPEND CLINTST cdrvgbx.f cdrvgex.f cdrvhex.f cdrvsyx.f cdrvpox.f @@ -139,7 +141,8 @@ set(DLINTST dchkaa.f dgennd.f dqrt04.f dqrt05.f dchkqrt.f derrqrt.f dchkqrtp.f derrqrtp.f dchklq.f dchklqt.f dchklqtp.f dchktsqr.f - derrlqt.f derrlqtp.f derrtsqr.f dtsqr01.f dlqt04.f dlqt05.f) + derrlqt.f derrlqtp.f derrtsqr.f dtsqr01.f dlqt04.f dlqt05.f + dchkorhr_col.f derrorhr_col.f dorhr_col01.f) if(USE_XBLAS) list(APPEND DLINTST ddrvgbx.f ddrvgex.f ddrvsyx.f ddrvpox.f @@ -194,7 +197,8 @@ set(ZLINTST zchkaa.f dget06.f zgennd.f zqrt04.f zqrt05.f zchkqrt.f zerrqrt.f zchkqrtp.f zerrqrtp.f zchklqt.f zchklqtp.f zchktsqr.f - zerrlqt.f zerrlqtp.f zerrtsqr.f ztsqr01.f zlqt04.f zlqt05.f) + zerrlqt.f zerrlqtp.f zerrtsqr.f ztsqr01.f zlqt04.f zlqt05.f + zchkunhr_col.f zerrunhr_col.f zunhr_col01.f) if(USE_XBLAS) list(APPEND ZLINTST zdrvgbx.f zdrvgex.f zdrvhex.f zdrvsyx.f zdrvpox.f @@ -235,7 +239,8 @@ set(ZLINTSTRFP zchkrfp.f zdrvrfp.f zdrvrf1.f zdrvrf2.f zdrvrf3.f zdrvrf4.f zerrr macro(add_lin_executable name) add_executable(${name} ${ARGN}) - target_link_libraries(${name} tmglib ${LAPACK_LIBRARIES} ${BLAS_LIBRARIES}) + target_link_libraries(${name} openblas${SUFFIX64_UNDERSCORE}) +#${TMGLIB} ${LAPACK_LIBRARIES} ${BLAS_LIBRARIES}) endmacro() if(BUILD_SINGLE) diff --git a/lapack-netlib/TESTING/LIN/Makefile b/lapack-netlib/TESTING/LIN/Makefile index 1a332f70b..6e790aa93 100644 --- a/lapack-netlib/TESTING/LIN/Makefile +++ b/lapack-netlib/TESTING/LIN/Makefile @@ -1,5 +1,3 @@ -include ../../make.inc - ####################################################################### # This makefile creates the test programs for the linear equation # routines in LAPACK. The test files are grouped as follows: @@ -33,10 +31,8 @@ include ../../make.inc # ####################################################################### -ifneq ($(strip $(VARLIB)),) - LAPACKLIB := $(VARLIB) ../../$(LAPACKLIB) -endif - +TOPSRCDIR = ../.. +include $(TOPSRCDIR)/make.inc ALINTST = \ aladhd.o alaerh.o alaesm.o alahd.o alareq.o \ @@ -77,7 +73,8 @@ SLINTST = schkaa.o \ strt02.o strt03.o strt05.o strt06.o \ sgennd.o sqrt04.o sqrt05.o schkqrt.o serrqrt.o schkqrtp.o serrqrtp.o \ schklqt.o schklqtp.o schktsqr.o \ - serrlqt.o serrlqtp.o serrtsqr.o stsqr01.o slqt04.o slqt05.o + serrlqt.o serrlqtp.o serrtsqr.o stsqr01.o slqt04.o slqt05.o \ + schkorhr_col.o serrorhr_col.o sorhr_col01.o ifdef USEXBLAS SLINTST += sdrvgbx.o sdrvgex.o sdrvsyx.o sdrvpox.o \ @@ -125,7 +122,8 @@ CLINTST = cchkaa.o \ sget06.o cgennd.o \ cqrt04.o cqrt05.o cchkqrt.o cerrqrt.o cchkqrtp.o cerrqrtp.o \ cchklqt.o cchklqtp.o cchktsqr.o \ - cerrlqt.o cerrlqtp.o cerrtsqr.o ctsqr01.o clqt04.o clqt05.o + cerrlqt.o cerrlqtp.o cerrtsqr.o ctsqr01.o clqt04.o clqt05.o \ + cchkunhr_col.o cerrunhr_col.o cunhr_col01.o ifdef USEXBLAS CLINTST += cdrvgbx.o cdrvgex.o cdrvhex.o cdrvsyx.o cdrvpox.o \ @@ -168,7 +166,8 @@ DLINTST = dchkaa.o \ dgennd.o \ dqrt04.o dqrt05.o dchkqrt.o derrqrt.o dchkqrtp.o derrqrtp.o \ dchklq.o dchklqt.o dchklqtp.o dchktsqr.o \ - derrlqt.o derrlqtp.o derrtsqr.o dtsqr01.o dlqt04.o dlqt05.o + derrlqt.o derrlqtp.o derrtsqr.o dtsqr01.o dlqt04.o dlqt05.o \ + dchkorhr_col.o derrorhr_col.o dorhr_col01.o ifdef USEXBLAS DLINTST += ddrvgbx.o ddrvgex.o ddrvsyx.o ddrvpox.o \ @@ -215,7 +214,8 @@ ZLINTST = zchkaa.o \ dget06.o zgennd.o \ zqrt04.o zqrt05.o zchkqrt.o zerrqrt.o zchkqrtp.o zerrqrtp.o \ zchklqt.o zchklqtp.o zchktsqr.o \ - zerrlqt.o zerrlqtp.o zerrtsqr.o ztsqr01.o zlqt04.o zlqt05.o + zerrlqt.o zerrlqtp.o zerrtsqr.o ztsqr01.o zlqt04.o zlqt05.o \ + zchkunhr_col.o zerrunhr_col.o zunhr_col01.o ifdef USEXBLAS ZLINTST += zdrvgbx.o zdrvgex.o zdrvhex.o zdrvsyx.o zdrvpox.o \ @@ -254,47 +254,50 @@ ZLINTSTRFP = zchkrfp.o zdrvrfp.o zdrvrf1.o zdrvrf2.o zdrvrf3.o zdrvrf4.o zerrrfp zlatb4.o zlaipd.o zlarhs.o zsbmv.o zget04.o zpot01.o zpot03.o zpot02.o \ chkxer.o xerbla.o alaerh.o aladhd.o alahd.o alasvm.o +.PHONY: all all: single double complex complex16 proto-single proto-double proto-complex proto-complex16 +.PHONY: single double complex complex16 single: xlintsts double: xlintstd complex: xlintstc complex16: xlintstz +.PHONY: proto-single proto-double proto-complex proto-complex16 proto-single: xlintstrfs proto-double: xlintstds xlintstrfd proto-complex: xlintstrfc proto-complex16: xlintstzc xlintstrfz -xlintsts: $(ALINTST) $(SLINTST) $(SCLNTST) ../../$(TMGLIB) ../../$(LAPACKLIB) $(XBLASLIB) $(BLASLIB) - $(LOADER) $(LOADOPTS) -o $@ $^ +xlintsts: $(ALINTST) $(SLINTST) $(SCLNTST) $(TMGLIB) $(VARLIB) ../$(LAPACKLIB) $(XBLASLIB) $(BLASLIB) + $(LOADER) $(FFLAGS) $(LDFLAGS) -o $@ $^ -xlintstc: $(ALINTST) $(CLINTST) $(SCLNTST) ../../$(TMGLIB) ../../$(LAPACKLIB) $(XBLASLIB) $(BLASLIB) - $(LOADER) $(LOADOPTS) -o $@ $^ +xlintstc: $(ALINTST) $(CLINTST) $(SCLNTST) $(TMGLIB) $(VARLIB) ../$(LAPACKLIB) $(XBLASLIB) $(BLASLIB) + $(LOADER) $(FFLAGS) $(LDFLAGS) -o $@ $^ -xlintstd: $(ALINTST) $(DLINTST) $(DZLNTST) ../../$(TMGLIB) ../../$(LAPACKLIB) $(XBLASLIB) $(BLASLIB) - $(LOADER) $(LOADOPTS) -o $@ $^ +xlintstd: $(ALINTST) $(DLINTST) $(DZLNTST) $(TMGLIB) $(VARLIB) ../$(LAPACKLIB) $(XBLASLIB) $(BLASLIB) + $(LOADER) $(FFLAGS) $(LDFLAGS) -o $@ $^ -xlintstz: $(ALINTST) $(ZLINTST) $(DZLNTST) ../../$(TMGLIB) ../../$(LAPACKLIB) $(XBLASLIB) $(BLASLIB) - $(LOADER) $(LOADOPTS) -o $@ $^ +xlintstz: $(ALINTST) $(ZLINTST) $(DZLNTST) $(TMGLIB) $(VARLIB) ../$(LAPACKLIB) $(XBLASLIB) $(BLASLIB) + $(LOADER) $(FFLAGS) $(LDFLAGS) -o $@ $^ -xlintstds: $(DSLINTST) ../../$(TMGLIB) ../../$(LAPACKLIB) $(BLASLIB) - $(LOADER) $(LOADOPTS) -o $@ $^ +xlintstds: $(DSLINTST) $(TMGLIB) $(VARLIB) ../$(LAPACKLIB) $(BLASLIB) + $(LOADER) $(FFLAGS) $(LDFLAGS) -o $@ $^ -xlintstzc: $(ZCLINTST) ../../$(TMGLIB) ../../$(LAPACKLIB) $(BLASLIB) - $(LOADER) $(LOADOPTS) -o $@ $^ +xlintstzc: $(ZCLINTST) $(TMGLIB) $(VARLIB) ../$(LAPACKLIB) $(BLASLIB) + $(LOADER) $(FFLAGS) $(LDFLAGS) -o $@ $^ -xlintstrfs: $(SLINTSTRFP) ../../$(TMGLIB) ../../$(LAPACKLIB) $(BLASLIB) - $(LOADER) $(LOADOPTS) -o $@ $^ +xlintstrfs: $(SLINTSTRFP) $(TMGLIB) $(VARLIB) ../$(LAPACKLIB) $(BLASLIB) + $(LOADER) $(FFLAGS) $(LDFLAGS) -o $@ $^ -xlintstrfd: $(DLINTSTRFP) ../../$(TMGLIB) ../../$(LAPACKLIB) $(BLASLIB) - $(LOADER) $(LOADOPTS) -o $@ $^ +xlintstrfd: $(DLINTSTRFP) $(TMGLIB) $(VARLIB) ../$(LAPACKLIB) $(BLASLIB) + $(LOADER) $(FFLAGS) $(LDFLAGS) -o $@ $^ -xlintstrfc: $(CLINTSTRFP) ../../$(TMGLIB) ../../$(LAPACKLIB) $(BLASLIB) - $(LOADER) $(LOADOPTS) -o $@ $^ +xlintstrfc: $(CLINTSTRFP) $(TMGLIB) $(VARLIB) ../$(LAPACKLIB) $(BLASLIB) + $(LOADER) $(FFLAGS) $(LDFLAGS) -o $@ $^ -xlintstrfz: $(ZLINTSTRFP) ../../$(TMGLIB) ../../$(LAPACKLIB) $(BLASLIB) - $(LOADER) $(LOADOPTS) -o $@ $^ +xlintstrfz: $(ZLINTSTRFP) $(TMGLIB) $(VARLIB) ../$(LAPACKLIB) $(BLASLIB) + $(LOADER) $(FFLAGS) $(LDFLAGS) -o $@ $^ $(ALINTST): $(FRC) $(SCLNTST): $(FRC) @@ -307,6 +310,7 @@ $(ZLINTST): $(FRC) FRC: @FRC=$(FRC) +.PHONY: clean cleanobj cleanexe clean: cleanobj cleanexe cleanobj: rm -f *.o @@ -314,15 +318,12 @@ cleanexe: rm -f xlintst* schkaa.o: schkaa.f - $(FORTRAN) $(DRVOPTS) -c -o $@ $< + $(FC) $(FFLAGS_DRV) -c -o $@ $< dchkaa.o: dchkaa.f - $(FORTRAN) $(DRVOPTS) -c -o $@ $< + $(FC) $(FFLAGS_DRV) -c -o $@ $< cchkaa.o: cchkaa.f - $(FORTRAN) $(DRVOPTS) -c -o $@ $< + $(FC) $(FFLAGS_DRV) -c -o $@ $< zchkaa.o: zchkaa.f - $(FORTRAN) $(DRVOPTS) -c -o $@ $< - -.f.o: - $(FORTRAN) $(OPTS) -c -o $@ $< + $(FC) $(FFLAGS_DRV) -c -o $@ $< .NOTPARALLEL: diff --git a/lapack-netlib/TESTING/LIN/cchkaa.f b/lapack-netlib/TESTING/LIN/cchkaa.f index d8d5060c3..d36770be7 100644 --- a/lapack-netlib/TESTING/LIN/cchkaa.f +++ b/lapack-netlib/TESTING/LIN/cchkaa.f @@ -74,6 +74,8 @@ *> CEQ *> CQT *> CQX +*> CTS +*> CHH *> \endverbatim * * Parameters: @@ -108,14 +110,14 @@ *> \author Univ. of Colorado Denver *> \author NAG Ltd. * -*> \date November 2017 +*> \date November 2019 * *> \ingroup complex_lin * * ===================================================================== PROGRAM CCHKAA * -* -- LAPACK test routine (version 3.8.0) -- +* -- LAPACK test routine (version 3.9.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * November 2017 @@ -165,15 +167,16 @@ * .. * .. External Subroutines .. EXTERNAL ALAREQ, CCHKEQ, CCHKGB, CCHKGE, CCHKGT, CCHKHE, - $ CCHKHE_ROOK, CCHKHE_RK, CCHKHE_AA, CCHKLQ, - $ CCHKPB,CCHKPO, CCHKPS, CCHKPP, CCHKPT, CCHKQ3, - $ CCHKQL, CCHKQR, CCHKRQ, CCHKSP, CCHKSY, - $ CCHKSY_ROOK, CCHKSY_RK, CCHKSY_AA, CCHKTB, - $ CCHKTP, CCHKTR, CCHKTZ, CDRVGB, CDRVGE, CDRVGT, - $ CDRVHE, CDRVHE_ROOK, CDRVHE_RK, CDRVHE_AA, - $ CDRVHP, CDRVLS, CDRVPB, CDRVPO, CDRVPP, CDRVPT, - $ CDRVSP, CDRVSY, CDRVSY_ROOK, CDRVSY_RK, - $ CDRVSY_AA, ILAVER, CCHKQRT, CCHKQRTP + $ CCHKHE_ROOK, CCHKHE_RK, CCHKHE_AA, CCHKHP, + $ CCHKLQ, CCHKUNHR_COL, CCHKPB, CCHKPO, CCHKPS, + $ CCHKPP, CCHKPT, CCHKQ3, CCHKQL, CCHKQR, CCHKRQ, + $ CCHKSP, CCHKSY, CCHKSY_ROOK, CCHKSY_RK, + $ CCHKSY_AA, CCHKTB, CCHKTP, CCHKTR, CCHKTZ, + $ CDRVGB, CDRVGE, CDRVGT, CDRVHE, CDRVHE_ROOK, + $ CDRVHE_RK, CDRVHE_AA, CDRVHP, CDRVLS, CDRVPB, + $ CDRVPO, CDRVPP, CDRVPT, CDRVSP, CDRVSY, + $ CDRVSY_ROOK, CDRVSY_RK, CDRVSY_AA, ILAVER, + $ CCHKQRT, CCHKQRTP * .. * .. Scalars in Common .. LOGICAL LERR, OK @@ -678,7 +681,7 @@ * * HK: Hermitian indefinite matrices, * with bounded Bunch-Kaufman (rook) pivoting algorithm, -* differnet matrix storage format than HR path version. +* different matrix storage format than HR path version. * NTYPES = 10 CALL ALAREQ( PATH, NMATS, DOTYPE, NTYPES, NIN, NOUT ) @@ -838,7 +841,7 @@ * * SK: symmetric indefinite matrices, * with bounded Bunch-Kaufman (rook) pivoting algorithm, -* differnet matrix storage format than SR path version. +* different matrix storage format than SR path version. * NTYPES = 11 CALL ALAREQ( PATH, NMATS, DOTYPE, NTYPES, NIN, NOUT ) @@ -1165,6 +1168,17 @@ ELSE WRITE( NOUT, FMT = 9989 )PATH END IF +* + ELSE IF( LSAMEN( 2, C2, 'HH' ) ) THEN +* +* HH: Householder reconstruction for tall-skinny matrices +* + IF( TSTCHK ) THEN + CALL CCHKUNHR_COL( THRESH, TSTERR, NM, MVAL, NN, NVAL, NNB, + $ NBVAL, NOUT ) + ELSE + WRITE( NOUT, FMT = 9989 ) PATH + END IF * ELSE * diff --git a/lapack-netlib/TESTING/LIN/cchkunhr_col.f b/lapack-netlib/TESTING/LIN/cchkunhr_col.f new file mode 100644 index 000000000..00077ddd9 --- /dev/null +++ b/lapack-netlib/TESTING/LIN/cchkunhr_col.f @@ -0,0 +1,239 @@ +*> \brief \b CCHKUNHR_COL +* +* =========== DOCUMENTATION =========== +* +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ +* +* Definition: +* =========== +* +* SUBROUTINE CCHKUNHR_COL( THRESH, TSTERR, NM, MVAL, NN, NVAL, NNB, +* NBVAL, NOUT ) +* +* .. Scalar Arguments .. +* LOGICAL TSTERR +* INTEGER NM, NN, NNB, NOUT +* REAL THRESH +* .. +* .. Array Arguments .. +* INTEGER MVAL( * ), NBVAL( * ), NVAL( * ) +* +*> \par Purpose: +* ============= +*> +*> \verbatim +*> +*> CCHKUNHR_COL tests CUNHR_COL using CLATSQR and CGEMQRT. Therefore, CLATSQR +*> (used in CGEQR) and CGEMQRT (used in CGEMQR) have to be tested +*> before this test. +*> +*> \endverbatim +* +* Arguments: +* ========== +* +*> \param[in] THRESH +*> \verbatim +*> THRESH is REAL +*> The threshold value for the test ratios. A result is +*> included in the output file if RESULT >= THRESH. To have +*> every test ratio printed, use THRESH = 0. +*> \endverbatim +*> +*> \param[in] TSTERR +*> \verbatim +*> TSTERR is LOGICAL +*> Flag that indicates whether error exits are to be tested. +*> \endverbatim +*> +*> \param[in] NM +*> \verbatim +*> NM is INTEGER +*> The number of values of M contained in the vector MVAL. +*> \endverbatim +*> +*> \param[in] MVAL +*> \verbatim +*> MVAL is INTEGER array, dimension (NM) +*> The values of the matrix row dimension M. +*> \endverbatim +*> +*> \param[in] NN +*> \verbatim +*> NN is INTEGER +*> The number of values of N contained in the vector NVAL. +*> \endverbatim +*> +*> \param[in] NVAL +*> \verbatim +*> NVAL is INTEGER array, dimension (NN) +*> The values of the matrix column dimension N. +*> \endverbatim +*> +*> \param[in] NNB +*> \verbatim +*> NNB is INTEGER +*> The number of values of NB contained in the vector NBVAL. +*> \endverbatim +*> +*> \param[in] NBVAL +*> \verbatim +*> NBVAL is INTEGER array, dimension (NBVAL) +*> The values of the blocksize NB. +*> \endverbatim +*> +*> \param[in] NOUT +*> \verbatim +*> NOUT is INTEGER +*> The unit number for output. +*> \endverbatim +* +* Authors: +* ======== +* +*> \author Univ. of Tennessee +*> \author Univ. of California Berkeley +*> \author Univ. of Colorado Denver +*> \author NAG Ltd. +* +*> \date November 2019 +* +*> \ingroup complex_lin +* +* ===================================================================== + SUBROUTINE CCHKUNHR_COL( THRESH, TSTERR, NM, MVAL, NN, NVAL, NNB, + $ NBVAL, NOUT ) + IMPLICIT NONE +* +* -- LAPACK test routine (version 3.7.0) -- +* -- LAPACK is a software package provided by Univ. of Tennessee, -- +* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- +* December 2016 +* +* .. Scalar Arguments .. + LOGICAL TSTERR + INTEGER NM, NN, NNB, NOUT + REAL THRESH +* .. +* .. Array Arguments .. + INTEGER MVAL( * ), NBVAL( * ), NVAL( * ) +* .. +* +* ===================================================================== +* +* .. Parameters .. + INTEGER NTESTS + PARAMETER ( NTESTS = 6 ) +* .. +* .. Local Scalars .. + CHARACTER(LEN=3) PATH + INTEGER I, IMB1, INB1, INB2, J, T, M, N, MB1, NB1, + $ NB2, NFAIL, NERRS, NRUN +* +* .. Local Arrays .. + REAL RESULT( NTESTS ) +* .. +* .. External Subroutines .. + EXTERNAL ALAHD, ALASUM, CERRUNHR_COL, CUNHR_COL01 +* .. +* .. Intrinsic Functions .. + INTRINSIC MAX, MIN +* .. +* .. Scalars in Common .. + LOGICAL LERR, OK + CHARACTER(LEN=32) SRNAMT + INTEGER INFOT, NUNIT +* .. +* .. Common blocks .. + COMMON / INFOC / INFOT, NUNIT, OK, LERR + COMMON / SRNAMC / SRNAMT +* .. +* .. Executable Statements .. +* +* Initialize constants +* + PATH( 1: 1 ) = 'C' + PATH( 2: 3 ) = 'HH' + NRUN = 0 + NFAIL = 0 + NERRS = 0 +* +* Test the error exits +* + IF( TSTERR ) CALL CERRUNHR_COL( PATH, NOUT ) + INFOT = 0 +* +* Do for each value of M in MVAL. +* + DO I = 1, NM + M = MVAL( I ) +* +* Do for each value of N in NVAL. +* + DO J = 1, NN + N = NVAL( J ) +* +* Only for M >= N +* + IF ( MIN( M, N ).GT.0 .AND. M.GE.N ) THEN +* +* Do for each possible value of MB1 +* + DO IMB1 = 1, NNB + MB1 = NBVAL( IMB1 ) +* +* Only for MB1 > N +* + IF ( MB1.GT.N ) THEN +* +* Do for each possible value of NB1 +* + DO INB1 = 1, NNB + NB1 = NBVAL( INB1 ) +* +* Do for each possible value of NB2 +* + DO INB2 = 1, NNB + NB2 = NBVAL( INB2 ) +* + IF( NB1.GT.0 .AND. NB2.GT.0 ) THEN +* +* Test CUNHR_COL +* + CALL CUNHR_COL01( M, N, MB1, NB1, NB2, + $ RESULT ) +* +* Print information about the tests that did +* not pass the threshold. +* + DO T = 1, NTESTS + IF( RESULT( T ).GE.THRESH ) THEN + IF( NFAIL.EQ.0 .AND. NERRS.EQ.0 ) + $ CALL ALAHD( NOUT, PATH ) + WRITE( NOUT, FMT = 9999 ) M, N, MB1, + $ NB1, NB2, T, RESULT( T ) + NFAIL = NFAIL + 1 + END IF + END DO + NRUN = NRUN + NTESTS + END IF + END DO + END DO + END IF + END DO + END IF + END DO + END DO +* +* Print a summary of the results. +* + CALL ALASUM( PATH, NOUT, NFAIL, NRUN, NERRS ) +* + 9999 FORMAT( 'M=', I5, ', N=', I5, ', MB1=', I5, + $ ', NB1=', I5, ', NB2=', I5,' test(', I2, ')=', G12.5 ) + RETURN +* +* End of CCHKUNHR_COL +* + END \ No newline at end of file diff --git a/lapack-netlib/TESTING/LIN/cdrvls.f b/lapack-netlib/TESTING/LIN/cdrvls.f index 2c2d9abb8..f43c10b72 100644 --- a/lapack-netlib/TESTING/LIN/cdrvls.f +++ b/lapack-netlib/TESTING/LIN/cdrvls.f @@ -237,13 +237,13 @@ REAL EPS, NORMA, NORMB, RCOND * .. * .. Local Arrays .. - INTEGER ISEED( 4 ), ISEEDY( 4 ), IWQ - REAL RESULT( NTESTS ), RWQ - COMPLEX WQ + INTEGER ISEED( 4 ), ISEEDY( 4 ), IWQ( 1 ) + REAL RESULT( NTESTS ), RWQ( 1 ) + COMPLEX WQ( 1 ) * .. * .. Allocatable Arrays .. COMPLEX, ALLOCATABLE :: WORK (:) - REAL, ALLOCATABLE :: RWORK (:) + REAL, ALLOCATABLE :: RWORK (:), WORK2 (:) INTEGER, ALLOCATABLE :: IWORK (:) * .. * .. External Functions .. @@ -363,32 +363,32 @@ * Compute workspace needed for CGELS CALL CGELS( TRANS, M, N, NRHS, A, LDA, $ B, LDB, WQ, -1, INFO ) - LWORK_CGELS = INT( WQ ) + LWORK_CGELS = INT( WQ( 1 ) ) * Compute workspace needed for CGETSLS CALL CGETSLS( TRANS, M, N, NRHS, A, LDA, $ B, LDB, WQ, -1, INFO ) - LWORK_CGETSLS = INT( WQ ) + LWORK_CGETSLS = INT( WQ( 1 ) ) ENDDO END IF * Compute workspace needed for CGELSY CALL CGELSY( M, N, NRHS, A, LDA, B, LDB, - $ IWQ, RCOND, CRANK, WQ, -1, RWORK, + $ IWQ, RCOND, CRANK, WQ, -1, RWQ, $ INFO ) - LWORK_CGELSY = INT( WQ ) + LWORK_CGELSY = INT( WQ( 1 ) ) LRWORK_CGELSY = 2*N * Compute workspace needed for CGELSS CALL CGELSS( M, N, NRHS, A, LDA, B, LDB, S, - $ RCOND, CRANK, WQ, -1, RWORK, INFO ) - LWORK_CGELSS = INT( WQ ) + $ RCOND, CRANK, WQ, -1, RWQ, INFO ) + LWORK_CGELSS = INT( WQ( 1 ) ) LRWORK_CGELSS = 5*MNMIN * Compute workspace needed for CGELSD CALL CGELSD( M, N, NRHS, A, LDA, B, LDB, S, $ RCOND, CRANK, WQ, -1, RWQ, IWQ, $ INFO ) - LWORK_CGELSD = INT( WQ ) - LRWORK_CGELSD = INT( RWQ ) + LWORK_CGELSD = INT( WQ( 1 ) ) + LRWORK_CGELSD = INT( RWQ ( 1 ) ) * Compute LIWORK workspace needed for CGELSY and CGELSD - LIWORK = MAX( LIWORK, N, IWQ ) + LIWORK = MAX( LIWORK, N, IWQ ( 1 ) ) * Compute LRWORK workspace needed for CGELSY, CGELSS and CGELSD LRWORK = MAX( LRWORK, LRWORK_CGELSY, $ LRWORK_CGELSS, LRWORK_CGELSD ) @@ -408,6 +408,7 @@ ALLOCATE( WORK( LWORK ) ) ALLOCATE( IWORK( LIWORK ) ) ALLOCATE( RWORK( LRWORK ) ) + ALLOCATE( WORK2( 2 * LWORK ) ) * DO 140 IM = 1, NM M = MVAL( IM ) @@ -563,7 +564,7 @@ CALL CLARNV( 2, ISEED, NCOLS*NRHS, $ WORK ) CALL CSCAL( NCOLS*NRHS, - $ ONE / REAL( NCOLS ), WORK, + $ CONE / REAL( NCOLS ), WORK, $ 1 ) END IF CALL CGEMM( TRANS, 'No transpose', NROWS, @@ -596,7 +597,7 @@ $ CALL CLACPY( 'Full', NROWS, NRHS, $ COPYB, LDB, C, LDB ) CALL CQRT16( TRANS, M, N, NRHS, COPYA, - $ LDA, B, LDB, C, LDB, WORK, + $ LDA, B, LDB, C, LDB, WORK2, $ RESULT( 15 ) ) * IF( ( ITRAN.EQ.1 .AND. M.GE.N ) .OR. diff --git a/lapack-netlib/TESTING/LIN/cdrvsy_rk.f b/lapack-netlib/TESTING/LIN/cdrvsy_rk.f index ae313c243..d3ed8c0a9 100644 --- a/lapack-netlib/TESTING/LIN/cdrvsy_rk.f +++ b/lapack-netlib/TESTING/LIN/cdrvsy_rk.f @@ -98,8 +98,9 @@ *> \param[out] E *> \verbatim *> E is COMPLEX array, dimension (NMAX) -*> \param[out] AINV +*> \endverbatim *> +*> \param[out] AINV *> \verbatim *> AINV is COMPLEX array, dimension (NMAX*NMAX) *> \endverbatim diff --git a/lapack-netlib/TESTING/LIN/cerrunhr_col.f b/lapack-netlib/TESTING/LIN/cerrunhr_col.f new file mode 100644 index 000000000..8fd58a683 --- /dev/null +++ b/lapack-netlib/TESTING/LIN/cerrunhr_col.f @@ -0,0 +1,164 @@ +*> \brief \b CERRUNHR_COL +* +* =========== DOCUMENTATION =========== +* +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ +* +* Definition: +* =========== +* +* SUBROUTINE CERRUNHR_COL( PATH, NUNIT ) +* +* .. Scalar Arguments .. +* CHARACTER*3 PATH +* INTEGER NUNIT +* .. +* +* +*> \par Purpose: +* ============= +*> +*> \verbatim +*> +*> CERRUNHR_COL tests the error exits for CUNHR_COL that does +*> Householder reconstruction from the ouput of tall-skinny +*> factorization CLATSQR. +*> \endverbatim +* +* Arguments: +* ========== +* +*> \param[in] PATH +*> \verbatim +*> PATH is CHARACTER*3 +*> The LAPACK path name for the routines to be tested. +*> \endverbatim +*> +*> \param[in] NUNIT +*> \verbatim +*> NUNIT is INTEGER +*> The unit number for output. +*> \endverbatim +* +* Authors: +* ======== +* +*> \author Univ. of Tennessee +*> \author Univ. of California Berkeley +*> \author Univ. of Colorado Denver +*> \author NAG Ltd. +* +*> \date November 2019 +* +*> \ingroup complex_lin +* +* ===================================================================== + SUBROUTINE CERRUNHR_COL( PATH, NUNIT ) + IMPLICIT NONE +* +* -- LAPACK test routine (version 3.9.0) -- +* -- LAPACK is a software package provided by Univ. of Tennessee, -- +* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- +* November 2019 +* +* .. Scalar Arguments .. + CHARACTER(LEN=3) PATH + INTEGER NUNIT +* .. +* +* ===================================================================== +* +* .. Parameters .. + INTEGER NMAX + PARAMETER ( NMAX = 2 ) +* .. +* .. Local Scalars .. + INTEGER I, INFO, J +* .. +* .. Local Arrays .. + COMPLEX A( NMAX, NMAX ), T( NMAX, NMAX ), D(NMAX) +* .. +* .. External Subroutines .. + EXTERNAL ALAESM, CHKXER, CUNHR_COL +* .. +* .. Scalars in Common .. + LOGICAL LERR, OK + CHARACTER(LEN=32) SRNAMT + INTEGER INFOT, NOUT +* .. +* .. Common blocks .. + COMMON / INFOC / INFOT, NOUT, OK, LERR + COMMON / SRNAMC / SRNAMT +* .. +* .. Intrinsic Functions .. + INTRINSIC REAL, CMPLX +* .. +* .. Executable Statements .. +* + NOUT = NUNIT + WRITE( NOUT, FMT = * ) +* +* Set the variables to innocuous values. +* + DO J = 1, NMAX + DO I = 1, NMAX + A( I, J ) = CMPLX( 1.E+0 / REAL( I+J ) ) + T( I, J ) = CMPLX( 1.E+0 / REAL( I+J ) ) + END DO + D( J ) = ( 0.E+0, 0.E+0 ) + END DO + OK = .TRUE. +* +* Error exits for Householder reconstruction +* +* CUNHR_COL +* + SRNAMT = 'CUNHR_COL' +* + INFOT = 1 + CALL CUNHR_COL( -1, 0, 1, A, 1, T, 1, D, INFO ) + CALL CHKXER( 'CUNHR_COL', INFOT, NOUT, LERR, OK ) +* + INFOT = 2 + CALL CUNHR_COL( 0, -1, 1, A, 1, T, 1, D, INFO ) + CALL CHKXER( 'CUNHR_COL', INFOT, NOUT, LERR, OK ) + CALL CUNHR_COL( 1, 2, 1, A, 1, T, 1, D, INFO ) + CALL CHKXER( 'CUNHR_COL', INFOT, NOUT, LERR, OK ) +* + INFOT = 3 + CALL CUNHR_COL( 0, 0, -1, A, 1, T, 1, D, INFO ) + CALL CHKXER( 'CUNHR_COL', INFOT, NOUT, LERR, OK ) +* + CALL CUNHR_COL( 0, 0, 0, A, 1, T, 1, D, INFO ) + CALL CHKXER( 'CUNHR_COL', INFOT, NOUT, LERR, OK ) +* + INFOT = 5 + CALL CUNHR_COL( 0, 0, 1, A, -1, T, 1, D, INFO ) + CALL CHKXER( 'CUNHR_COL', INFOT, NOUT, LERR, OK ) +* + CALL CUNHR_COL( 0, 0, 1, A, 0, T, 1, D, INFO ) + CALL CHKXER( 'CUNHR_COL', INFOT, NOUT, LERR, OK ) +* + CALL CUNHR_COL( 2, 0, 1, A, 1, T, 1, D, INFO ) + CALL CHKXER( 'CUNHR_COL', INFOT, NOUT, LERR, OK ) +* + INFOT = 7 + CALL CUNHR_COL( 0, 0, 1, A, 1, T, -1, D, INFO ) + CALL CHKXER( 'CUNHR_COL', INFOT, NOUT, LERR, OK ) +* + CALL CUNHR_COL( 0, 0, 1, A, 1, T, 0, D, INFO ) + CALL CHKXER( 'CUNHR_COL', INFOT, NOUT, LERR, OK ) +* + CALL CUNHR_COL( 4, 3, 2, A, 4, T, 1, D, INFO ) + CALL CHKXER( 'CUNHR_COL', INFOT, NOUT, LERR, OK ) +* +* Print a summary line. +* + CALL ALAESM( PATH, OK, NOUT ) +* + RETURN +* +* End of CERRUNHR_COL +* + END diff --git a/lapack-netlib/TESTING/LIN/cerrvx.f b/lapack-netlib/TESTING/LIN/cerrvx.f index d2d3d2a85..7f929f07f 100644 --- a/lapack-netlib/TESTING/LIN/cerrvx.f +++ b/lapack-netlib/TESTING/LIN/cerrvx.f @@ -739,7 +739,7 @@ $ W, 1, INFO ) CALL CHKXER( 'CHESV_AA_2STAGE', INFOT, NOUT, LERR, OK ) INFOT = 11 - CALL CHESV_AA_2STAGE( 'U', 2, 1, A, 2, A, 2, IP, IP, B, 1, + CALL CHESV_AA_2STAGE( 'U', 2, 1, A, 2, A, 8, IP, IP, B, 1, $ W, 1, INFO ) CALL CHKXER( 'CHESV_AA_2STAGE', INFOT, NOUT, LERR, OK ) INFOT = 7 @@ -769,7 +769,7 @@ $ W, 1, INFO ) CALL CHKXER( 'CSYSV_AA_2STAGE', INFOT, NOUT, LERR, OK ) INFOT = 11 - CALL CSYSV_AA_2STAGE( 'U', 2, 1, A, 2, A, 2, IP, IP, B, 1, + CALL CSYSV_AA_2STAGE( 'U', 2, 1, A, 2, A, 8, IP, IP, B, 1, $ W, 1, INFO ) CALL CHKXER( 'CSYSV_AA_2STAGE', INFOT, NOUT, LERR, OK ) INFOT = 7 diff --git a/lapack-netlib/TESTING/LIN/clahilb.f b/lapack-netlib/TESTING/LIN/clahilb.f index f88491a0d..c54884b9f 100644 --- a/lapack-netlib/TESTING/LIN/clahilb.f +++ b/lapack-netlib/TESTING/LIN/clahilb.f @@ -164,7 +164,7 @@ INTEGER NMAX_EXACT, NMAX_APPROX, SIZE_D PARAMETER (NMAX_EXACT = 6, NMAX_APPROX = 11, SIZE_D = 8) * -* d's are generated from random permuation of those eight elements. +* d's are generated from random permutation of those eight elements. COMPLEX D1(8), D2(8), INVD1(8), INVD2(8) DATA D1 /(-1,0),(0,1),(-1,-1),(0,-1),(1,0),(-1,1),(1,1),(1,-1)/ DATA D2 /(-1,0),(0,-1),(-1,1),(0,1),(1,0),(-1,-1),(1,-1),(1,1)/ diff --git a/lapack-netlib/TESTING/LIN/ctsqr01.f b/lapack-netlib/TESTING/LIN/ctsqr01.f index a3bd9ebc9..6d788ba41 100644 --- a/lapack-netlib/TESTING/LIN/ctsqr01.f +++ b/lapack-netlib/TESTING/LIN/ctsqr01.f @@ -114,7 +114,7 @@ * .. * .. Local Arrays .. INTEGER ISEED( 4 ) - COMPLEX TQUERY( 5 ), WORKQUERY + COMPLEX TQUERY( 5 ), WORKQUERY( 1 ) * .. * .. External Functions .. REAL SLAMCH, CLANGE, CLANSY @@ -173,22 +173,22 @@ * CALL CGEQR( M, N, AF, M, TQUERY, -1, WORKQUERY, -1, INFO ) TSIZE = INT( TQUERY( 1 ) ) - LWORK = INT( WORKQUERY ) + LWORK = INT( WORKQUERY( 1 ) ) CALL CGEMQR( 'L', 'N', M, M, K, AF, M, TQUERY, TSIZE, CF, M, $ WORKQUERY, -1, INFO) - LWORK = MAX( LWORK, INT( WORKQUERY ) ) + LWORK = MAX( LWORK, INT( WORKQUERY( 1 ) ) ) CALL CGEMQR( 'L', 'N', M, N, K, AF, M, TQUERY, TSIZE, CF, M, $ WORKQUERY, -1, INFO) - LWORK = MAX( LWORK, INT( WORKQUERY ) ) + LWORK = MAX( LWORK, INT( WORKQUERY( 1 ) ) ) CALL CGEMQR( 'L', 'C', M, N, K, AF, M, TQUERY, TSIZE, CF, M, $ WORKQUERY, -1, INFO) - LWORK = MAX( LWORK, INT( WORKQUERY ) ) + LWORK = MAX( LWORK, INT( WORKQUERY( 1 ) ) ) CALL CGEMQR( 'R', 'N', N, M, K, AF, M, TQUERY, TSIZE, DF, N, $ WORKQUERY, -1, INFO) - LWORK = MAX( LWORK, INT( WORKQUERY ) ) + LWORK = MAX( LWORK, INT( WORKQUERY( 1 ) ) ) CALL CGEMQR( 'R', 'C', N, M, K, AF, M, TQUERY, TSIZE, DF, N, $ WORKQUERY, -1, INFO) - LWORK = MAX( LWORK, INT( WORKQUERY ) ) + LWORK = MAX( LWORK, INT( WORKQUERY( 1 ) ) ) ALLOCATE ( T( TSIZE ) ) ALLOCATE ( WORK( LWORK ) ) srnamt = 'CGEQR' @@ -316,22 +316,22 @@ ELSE CALL CGELQ( M, N, AF, M, TQUERY, -1, WORKQUERY, -1, INFO ) TSIZE = INT( TQUERY( 1 ) ) - LWORK = INT( WORKQUERY ) + LWORK = INT( WORKQUERY( 1 ) ) CALL CGEMLQ( 'R', 'N', N, N, K, AF, M, TQUERY, TSIZE, Q, N, $ WORKQUERY, -1, INFO ) - LWORK = MAX( LWORK, INT( WORKQUERY ) ) + LWORK = MAX( LWORK, INT( WORKQUERY( 1 ) ) ) CALL CGEMLQ( 'L', 'N', N, M, K, AF, M, TQUERY, TSIZE, DF, N, $ WORKQUERY, -1, INFO) - LWORK = MAX( LWORK, INT( WORKQUERY ) ) + LWORK = MAX( LWORK, INT( WORKQUERY( 1 ) ) ) CALL CGEMLQ( 'L', 'C', N, M, K, AF, M, TQUERY, TSIZE, DF, N, $ WORKQUERY, -1, INFO) - LWORK = MAX( LWORK, INT( WORKQUERY ) ) + LWORK = MAX( LWORK, INT( WORKQUERY( 1 ) ) ) CALL CGEMLQ( 'R', 'N', M, N, K, AF, M, TQUERY, TSIZE, CF, M, $ WORKQUERY, -1, INFO) - LWORK = MAX( LWORK, INT( WORKQUERY ) ) + LWORK = MAX( LWORK, INT( WORKQUERY( 1 ) ) ) CALL CGEMLQ( 'R', 'C', M, N, K, AF, M, TQUERY, TSIZE, CF, M, $ WORKQUERY, -1, INFO) - LWORK = MAX( LWORK, INT( WORKQUERY ) ) + LWORK = MAX( LWORK, INT( WORKQUERY( 1 ) ) ) ALLOCATE ( T( TSIZE ) ) ALLOCATE ( WORK( LWORK ) ) srnamt = 'CGELQ' diff --git a/lapack-netlib/TESTING/LIN/cunhr_col01.f b/lapack-netlib/TESTING/LIN/cunhr_col01.f new file mode 100644 index 000000000..d760caba5 --- /dev/null +++ b/lapack-netlib/TESTING/LIN/cunhr_col01.f @@ -0,0 +1,390 @@ +*> \brief \b CUNHR_COL01 +* +* =========== DOCUMENTATION =========== +* +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ +* +* Definition: +* =========== +* +* SUBROUTINE CUNHR_COL01( M, N, MB1, NB1, NB2, RESULT ) +* +* .. Scalar Arguments .. +* INTEGER M, N, MB1, NB1, NB2 +* .. Return values .. +* REAL RESULT(6) +* +* +*> \par Purpose: +* ============= +*> +*> \verbatim +*> +*> CUNHR_COL01 tests CUNHR_COL using CLATSQR, CGEMQRT and CUNGTSQR. +*> Therefore, CLATSQR (part of CGEQR), CGEMQRT (part CGEMQR), CUNGTSQR +*> have to be tested before this test. +*> +*> \endverbatim +* +* Arguments: +* ========== +* +*> \param[in] M +*> \verbatim +*> M is INTEGER +*> Number of rows in test matrix. +*> \endverbatim +*> \param[in] N +*> \verbatim +*> N is INTEGER +*> Number of columns in test matrix. +*> \endverbatim +*> \param[in] MB1 +*> \verbatim +*> MB1 is INTEGER +*> Number of row in row block in an input test matrix. +*> \endverbatim +*> +*> \param[in] NB1 +*> \verbatim +*> NB1 is INTEGER +*> Number of columns in column block an input test matrix. +*> \endverbatim +*> +*> \param[in] NB2 +*> \verbatim +*> NB2 is INTEGER +*> Number of columns in column block in an output test matrix. +*> \endverbatim +*> +*> \param[out] RESULT +*> \verbatim +*> RESULT is REAL array, dimension (6) +*> Results of each of the six tests below. +*> ( C is a M-by-N random matrix, D is a N-by-M random matrix ) +*> +*> RESULT(1) = | A - Q * R | / (eps * m * |A|) +*> RESULT(2) = | I - (Q**H) * Q | / (eps * m ) +*> RESULT(3) = | Q * C - Q * C | / (eps * m * |C|) +*> RESULT(4) = | (Q**H) * C - (Q**H) * C | / (eps * m * |C|) +*> RESULT(5) = | (D * Q) - D * Q | / (eps * m * |D|) +*> RESULT(6) = | D * (Q**H) - D * (Q**H) | / (eps * m * |D|) +*> \endverbatim +* +* Authors: +* ======== +* +*> \author Univ. of Tennessee +*> \author Univ. of California Berkeley +*> \author Univ. of Colorado Denver +*> \author NAG Ltd. +* +*> \date November 2019 +* +*> \ingroup complex16_lin +* +* ===================================================================== + SUBROUTINE CUNHR_COL01( M, N, MB1, NB1, NB2, RESULT ) + IMPLICIT NONE +* +* -- LAPACK test routine (version 3.9.0) -- +* -- LAPACK is a software package provided by Univ. of Tennessee, -- +* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- +* November 2019 +* +* .. Scalar Arguments .. + INTEGER M, N, MB1, NB1, NB2 +* .. Return values .. + REAL RESULT(6) +* +* ===================================================================== +* +* .. +* .. Local allocatable arrays + COMPLEX, ALLOCATABLE :: A(:,:), AF(:,:), Q(:,:), R(:,:), + $ WORK( : ), T1(:,:), T2(:,:), DIAG(:), + $ C(:,:), CF(:,:), D(:,:), DF(:,:) + REAL, ALLOCATABLE :: RWORK(:) +* +* .. Parameters .. + REAL ZERO + PARAMETER ( ZERO = 0.0E+0 ) + COMPLEX CONE, CZERO + PARAMETER ( CONE = ( 1.0E+0, 0.0E+0 ), + $ CZERO = ( 0.0E+0, 0.0E+0 ) ) +* .. +* .. Local Scalars .. + LOGICAL TESTZEROS + INTEGER INFO, I, J, K, L, LWORK, NB1_UB, NB2_UB, NRB + REAL ANORM, EPS, RESID, CNORM, DNORM +* .. +* .. Local Arrays .. + INTEGER ISEED( 4 ) + COMPLEX WORKQUERY( 1 ) +* .. +* .. External Functions .. + REAL SLAMCH, CLANGE, CLANSY + EXTERNAL SLAMCH, CLANGE, CLANSY +* .. +* .. External Subroutines .. + EXTERNAL CLACPY, CLARNV, CLASET, CLATSQR, CUNHR_COL, + $ CUNGTSQR, CSCAL, CGEMM, CGEMQRT, CHERK +* .. +* .. Intrinsic Functions .. + INTRINSIC CEILING, REAL, MAX, MIN +* .. +* .. Scalars in Common .. + CHARACTER(LEN=32) SRNAMT +* .. +* .. Common blocks .. + COMMON / SRMNAMC / SRNAMT +* .. +* .. Data statements .. + DATA ISEED / 1988, 1989, 1990, 1991 / +* +* TEST MATRICES WITH HALF OF MATRIX BEING ZEROS +* + TESTZEROS = .FALSE. +* + EPS = SLAMCH( 'Epsilon' ) + K = MIN( M, N ) + L = MAX( M, N, 1) +* +* Dynamically allocate local arrays +* + ALLOCATE ( A(M,N), AF(M,N), Q(L,L), R(M,L), RWORK(L), + $ C(M,N), CF(M,N), + $ D(N,M), DF(N,M) ) +* +* Put random numbers into A and copy to AF +* + DO J = 1, N + CALL CLARNV( 2, ISEED, M, A( 1, J ) ) + END DO + IF( TESTZEROS ) THEN + IF( M.GE.4 ) THEN + DO J = 1, N + CALL CLARNV( 2, ISEED, M/2, A( M/4, J ) ) + END DO + END IF + END IF + CALL CLACPY( 'Full', M, N, A, M, AF, M ) +* +* Number of row blocks in CLATSQR +* + NRB = MAX( 1, CEILING( REAL( M - N ) / REAL( MB1 - N ) ) ) +* + ALLOCATE ( T1( NB1, N * NRB ) ) + ALLOCATE ( T2( NB2, N ) ) + ALLOCATE ( DIAG( N ) ) +* +* Begin determine LWORK for the array WORK and allocate memory. +* +* CLATSQR requires NB1 to be bounded by N. +* + NB1_UB = MIN( NB1, N) +* +* CGEMQRT requires NB2 to be bounded by N. +* + NB2_UB = MIN( NB2, N) +* + CALL CLATSQR( M, N, MB1, NB1_UB, AF, M, T1, NB1, + $ WORKQUERY, -1, INFO ) + LWORK = INT( WORKQUERY( 1 ) ) + CALL CUNGTSQR( M, N, MB1, NB1, AF, M, T1, NB1, WORKQUERY, -1, + $ INFO ) + + LWORK = MAX( LWORK, INT( WORKQUERY( 1 ) ) ) +* +* In CGEMQRT, WORK is N*NB2_UB if SIDE = 'L', +* or M*NB2_UB if SIDE = 'R'. +* + LWORK = MAX( LWORK, NB2_UB * N, NB2_UB * M ) +* + ALLOCATE ( WORK( LWORK ) ) +* +* End allocate memory for WORK. +* +* +* Begin Householder reconstruction routines +* +* Factor the matrix A in the array AF. +* + SRNAMT = 'CLATSQR' + CALL CLATSQR( M, N, MB1, NB1_UB, AF, M, T1, NB1, WORK, LWORK, + $ INFO ) +* +* Copy the factor R into the array R. +* + SRNAMT = 'CLACPY' + CALL CLACPY( 'U', M, N, AF, M, R, M ) +* +* Reconstruct the orthogonal matrix Q. +* + SRNAMT = 'CUNGTSQR' + CALL CUNGTSQR( M, N, MB1, NB1, AF, M, T1, NB1, WORK, LWORK, + $ INFO ) +* +* Perform the Householder reconstruction, the result is stored +* the arrays AF and T2. +* + SRNAMT = 'CUNHR_COL' + CALL CUNHR_COL( M, N, NB2, AF, M, T2, NB2, DIAG, INFO ) +* +* Compute the factor R_hr corresponding to the Householder +* reconstructed Q_hr and place it in the upper triangle of AF to +* match the Q storage format in CGEQRT. R_hr = R_tsqr * S, +* this means changing the sign of I-th row of the matrix R_tsqr +* according to sign of of I-th diagonal element DIAG(I) of the +* matrix S. +* + SRNAMT = 'CLACPY' + CALL CLACPY( 'U', M, N, R, M, AF, M ) +* + DO I = 1, N + IF( DIAG( I ).EQ.-CONE ) THEN + CALL CSCAL( N+1-I, -CONE, AF( I, I ), M ) + END IF + END DO +* +* End Householder reconstruction routines. +* +* +* Generate the m-by-m matrix Q +* + CALL CLASET( 'Full', M, M, CZERO, CONE, Q, M ) +* + SRNAMT = 'CGEMQRT' + CALL CGEMQRT( 'L', 'N', M, M, K, NB2_UB, AF, M, T2, NB2, Q, M, + $ WORK, INFO ) +* +* Copy R +* + CALL CLASET( 'Full', M, N, CZERO, CZERO, R, M ) +* + CALL CLACPY( 'Upper', M, N, AF, M, R, M ) +* +* TEST 1 +* Compute |R - (Q**H)*A| / ( eps * m * |A| ) and store in RESULT(1) +* + CALL CGEMM( 'C', 'N', M, N, M, -CONE, Q, M, A, M, CONE, R, M ) +* + ANORM = CLANGE( '1', M, N, A, M, RWORK ) + RESID = CLANGE( '1', M, N, R, M, RWORK ) + IF( ANORM.GT.ZERO ) THEN + RESULT( 1 ) = RESID / ( EPS * MAX( 1, M ) * ANORM ) + ELSE + RESULT( 1 ) = ZERO + END IF +* +* TEST 2 +* Compute |I - (Q**H)*Q| / ( eps * m ) and store in RESULT(2) +* + CALL CLASET( 'Full', M, M, CZERO, CONE, R, M ) + CALL CHERK( 'U', 'C', M, M, -CONE, Q, M, CONE, R, M ) + RESID = CLANSY( '1', 'Upper', M, R, M, RWORK ) + RESULT( 2 ) = RESID / ( EPS * MAX( 1, M ) ) +* +* Generate random m-by-n matrix C +* + DO J = 1, N + CALL CLARNV( 2, ISEED, M, C( 1, J ) ) + END DO + CNORM = CLANGE( '1', M, N, C, M, RWORK ) + CALL CLACPY( 'Full', M, N, C, M, CF, M ) +* +* Apply Q to C as Q*C = CF +* + SRNAMT = 'CGEMQRT' + CALL CGEMQRT( 'L', 'N', M, N, K, NB2_UB, AF, M, T2, NB2, CF, M, + $ WORK, INFO ) +* +* TEST 3 +* Compute |CF - Q*C| / ( eps * m * |C| ) +* + CALL CGEMM( 'N', 'N', M, N, M, -CONE, Q, M, C, M, CONE, CF, M ) + RESID = CLANGE( '1', M, N, CF, M, RWORK ) + IF( CNORM.GT.ZERO ) THEN + RESULT( 3 ) = RESID / ( EPS * MAX( 1, M ) * CNORM ) + ELSE + RESULT( 3 ) = ZERO + END IF +* +* Copy C into CF again +* + CALL CLACPY( 'Full', M, N, C, M, CF, M ) +* +* Apply Q to C as (Q**H)*C = CF +* + SRNAMT = 'CGEMQRT' + CALL CGEMQRT( 'L', 'C', M, N, K, NB2_UB, AF, M, T2, NB2, CF, M, + $ WORK, INFO ) +* +* TEST 4 +* Compute |CF - (Q**H)*C| / ( eps * m * |C|) +* + CALL CGEMM( 'C', 'N', M, N, M, -CONE, Q, M, C, M, CONE, CF, M ) + RESID = CLANGE( '1', M, N, CF, M, RWORK ) + IF( CNORM.GT.ZERO ) THEN + RESULT( 4 ) = RESID / ( EPS * MAX( 1, M ) * CNORM ) + ELSE + RESULT( 4 ) = ZERO + END IF +* +* Generate random n-by-m matrix D and a copy DF +* + DO J = 1, M + CALL CLARNV( 2, ISEED, N, D( 1, J ) ) + END DO + DNORM = CLANGE( '1', N, M, D, N, RWORK ) + CALL CLACPY( 'Full', N, M, D, N, DF, N ) +* +* Apply Q to D as D*Q = DF +* + SRNAMT = 'CGEMQRT' + CALL CGEMQRT( 'R', 'N', N, M, K, NB2_UB, AF, M, T2, NB2, DF, N, + $ WORK, INFO ) +* +* TEST 5 +* Compute |DF - D*Q| / ( eps * m * |D| ) +* + CALL CGEMM( 'N', 'N', N, M, M, -CONE, D, N, Q, M, CONE, DF, N ) + RESID = CLANGE( '1', N, M, DF, N, RWORK ) + IF( DNORM.GT.ZERO ) THEN + RESULT( 5 ) = RESID / ( EPS * MAX( 1, M ) * DNORM ) + ELSE + RESULT( 5 ) = ZERO + END IF +* +* Copy D into DF again +* + CALL CLACPY( 'Full', N, M, D, N, DF, N ) +* +* Apply Q to D as D*QT = DF +* + SRNAMT = 'CGEMQRT' + CALL CGEMQRT( 'R', 'C', N, M, K, NB2_UB, AF, M, T2, NB2, DF, N, + $ WORK, INFO ) +* +* TEST 6 +* Compute |DF - D*(Q**H)| / ( eps * m * |D| ) +* + CALL CGEMM( 'N', 'C', N, M, M, -CONE, D, N, Q, M, CONE, DF, N ) + RESID = CLANGE( '1', N, M, DF, N, RWORK ) + IF( DNORM.GT.ZERO ) THEN + RESULT( 6 ) = RESID / ( EPS * MAX( 1, M ) * DNORM ) + ELSE + RESULT( 6 ) = ZERO + END IF +* +* Deallocate all arrays +* + DEALLOCATE ( A, AF, Q, R, RWORK, WORK, T1, T2, DIAG, + $ C, D, CF, DF ) +* + RETURN +* +* End of CUNHR_COL01 +* + END diff --git a/lapack-netlib/TESTING/LIN/dchkaa.f b/lapack-netlib/TESTING/LIN/dchkaa.f index c5fd7afda..03575c4d1 100644 --- a/lapack-netlib/TESTING/LIN/dchkaa.f +++ b/lapack-netlib/TESTING/LIN/dchkaa.f @@ -68,6 +68,10 @@ *> DEQ *> DQT *> DQX +*> DTQ +*> DXQ +*> DTS +*> DHH *> \endverbatim * * Parameters: @@ -102,17 +106,17 @@ *> \author Univ. of Colorado Denver *> \author NAG Ltd. * -*> \date April 2012 +*> \date November 2019 * *> \ingroup double_lin * * ===================================================================== PROGRAM DCHKAA * -* -- LAPACK test routine (version 3.8.0) -- +* -- LAPACK test routine (version 3.9.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- -* April 2012 +* Novemebr 2019 * * ===================================================================== * @@ -159,15 +163,14 @@ * .. * .. External Subroutines .. EXTERNAL ALAREQ, DCHKEQ, DCHKGB, DCHKGE, DCHKGT, DCHKLQ, - $ DCHKPB, DCHKPO, DCHKPS, DCHKPP, DCHKPT, DCHKQ3, - $ DCHKQL, DCHKQR, DCHKRQ, DCHKSP, DCHKSY, - $ DCHKSY_ROOK, DCHKSY_RK, DCHKSY_AA, DCHKTB, - $ DCHKTP, DCHKTR, DCHKTZ, DDRVGB, DDRVGE, - $ DDRVGT, DDRVLS, DDRVPB, DDRVPO, DDRVPP, - $ DDRVPT, DDRVSP, DDRVSY, DDRVSY_ROOK, DDRVSY_RK, - $ DDRVSY_AA, ILAVER, DCHKQRT, - $ DCHKQRTP, DCHKLQTP, DCHKTSQR, DCHKLQT - + $ DCHKORHR_COL, DCHKPB, DCHKPO, DCHKPS, DCHKPP, + $ DCHKPT, DCHKQ3, DCHKQL, DCHKQR, DCHKRQ, DCHKSP, + $ DCHKSY, DCHKSY_ROOK, DCHKSY_RK, DCHKSY_AA, + $ DCHKTB, DCHKTP, DCHKTR, DCHKTZ, DDRVGB, DDRVGE, + $ DDRVGT, DDRVLS, DDRVPB, DDRVPO, DDRVPP, DDRVPT, + $ DDRVSP, DDRVSY, DDRVSY_ROOK, DDRVSY_RK, + $ DDRVSY_AA, ILAVER, DCHKLQTP, DCHKQRT, DCHKQRTP, + $ DCHKLQT,DCHKTSQR * .. * .. Scalars in Common .. LOGICAL LERR, OK @@ -1007,8 +1010,20 @@ ELSE WRITE( NOUT, FMT = 9989 )PATH END IF +* + ELSE IF( LSAMEN( 2, C2, 'HH' ) ) THEN +* +* HH: Householder reconstruction for tall-skinny matrices +* + IF( TSTCHK ) THEN + CALL DCHKORHR_COL( THRESH, TSTERR, NM, MVAL, NN, NVAL, NNB, + $ NBVAL, NOUT ) + ELSE + WRITE( NOUT, FMT = 9989 ) PATH + END IF * ELSE + * WRITE( NOUT, FMT = 9990 )PATH END IF diff --git a/lapack-netlib/TESTING/LIN/dchkorhr_col.f b/lapack-netlib/TESTING/LIN/dchkorhr_col.f new file mode 100644 index 000000000..3b3e421eb --- /dev/null +++ b/lapack-netlib/TESTING/LIN/dchkorhr_col.f @@ -0,0 +1,239 @@ +*> \brief \b DCHKORHR_COL +* +* =========== DOCUMENTATION =========== +* +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ +* +* Definition: +* =========== +* +* SUBROUTINE DCHKORHR_COL( THRESH, TSTERR, NM, MVAL, NN, NVAL, NNB, +* NBVAL, NOUT ) +* +* .. Scalar Arguments .. +* LOGICAL TSTERR +* INTEGER NM, NN, NNB, NOUT +* DOUBLE PRECISION THRESH +* .. +* .. Array Arguments .. +* INTEGER MVAL( * ), NBVAL( * ), NVAL( * ) +* +*> \par Purpose: +* ============= +*> +*> \verbatim +*> +*> DCHKORHR_COL tests DORHR_COL using DLATSQR and DGEMQRT. Therefore, DLATSQR +*> (used in DGEQR) and DGEMQRT (used in DGEMQR) have to be tested +*> before this test. +*> +*> \endverbatim +* +* Arguments: +* ========== +* +*> \param[in] THRESH +*> \verbatim +*> THRESH is DOUBLE PRECISION +*> The threshold value for the test ratios. A result is +*> included in the output file if RESULT >= THRESH. To have +*> every test ratio printed, use THRESH = 0. +*> \endverbatim +*> +*> \param[in] TSTERR +*> \verbatim +*> TSTERR is LOGICAL +*> Flag that indicates whether error exits are to be tested. +*> \endverbatim +*> +*> \param[in] NM +*> \verbatim +*> NM is INTEGER +*> The number of values of M contained in the vector MVAL. +*> \endverbatim +*> +*> \param[in] MVAL +*> \verbatim +*> MVAL is INTEGER array, dimension (NM) +*> The values of the matrix row dimension M. +*> \endverbatim +*> +*> \param[in] NN +*> \verbatim +*> NN is INTEGER +*> The number of values of N contained in the vector NVAL. +*> \endverbatim +*> +*> \param[in] NVAL +*> \verbatim +*> NVAL is INTEGER array, dimension (NN) +*> The values of the matrix column dimension N. +*> \endverbatim +*> +*> \param[in] NNB +*> \verbatim +*> NNB is INTEGER +*> The number of values of NB contained in the vector NBVAL. +*> \endverbatim +*> +*> \param[in] NBVAL +*> \verbatim +*> NBVAL is INTEGER array, dimension (NBVAL) +*> The values of the blocksize NB. +*> \endverbatim +*> +*> \param[in] NOUT +*> \verbatim +*> NOUT is INTEGER +*> The unit number for output. +*> \endverbatim +* +* Authors: +* ======== +* +*> \author Univ. of Tennessee +*> \author Univ. of California Berkeley +*> \author Univ. of Colorado Denver +*> \author NAG Ltd. +* +*> \date November 2019 +* +*> \ingroup double_lin +* +* ===================================================================== + SUBROUTINE DCHKORHR_COL( THRESH, TSTERR, NM, MVAL, NN, NVAL, NNB, + $ NBVAL, NOUT ) + IMPLICIT NONE +* +* -- LAPACK test routine (version 3.7.0) -- +* -- LAPACK is a software package provided by Univ. of Tennessee, -- +* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- +* December 2016 +* +* .. Scalar Arguments .. + LOGICAL TSTERR + INTEGER NM, NN, NNB, NOUT + DOUBLE PRECISION THRESH +* .. +* .. Array Arguments .. + INTEGER MVAL( * ), NBVAL( * ), NVAL( * ) +* .. +* +* ===================================================================== +* +* .. Parameters .. + INTEGER NTESTS + PARAMETER ( NTESTS = 6 ) +* .. +* .. Local Scalars .. + CHARACTER(LEN=3) PATH + INTEGER I, IMB1, INB1, INB2, J, T, M, N, MB1, NB1, + $ NB2, NFAIL, NERRS, NRUN +* +* .. Local Arrays .. + DOUBLE PRECISION RESULT( NTESTS ) +* .. +* .. External Subroutines .. + EXTERNAL ALAHD, ALASUM, DERRORHR_COL, DORHR_COL01 +* .. +* .. Intrinsic Functions .. + INTRINSIC MAX, MIN +* .. +* .. Scalars in Common .. + LOGICAL LERR, OK + CHARACTER(LEN=32) SRNAMT + INTEGER INFOT, NUNIT +* .. +* .. Common blocks .. + COMMON / INFOC / INFOT, NUNIT, OK, LERR + COMMON / SRNAMC / SRNAMT +* .. +* .. Executable Statements .. +* +* Initialize constants +* + PATH( 1: 1 ) = 'D' + PATH( 2: 3 ) = 'HH' + NRUN = 0 + NFAIL = 0 + NERRS = 0 +* +* Test the error exits +* + IF( TSTERR ) CALL DERRORHR_COL( PATH, NOUT ) + INFOT = 0 +* +* Do for each value of M in MVAL. +* + DO I = 1, NM + M = MVAL( I ) +* +* Do for each value of N in NVAL. +* + DO J = 1, NN + N = NVAL( J ) +* +* Only for M >= N +* + IF ( MIN( M, N ).GT.0 .AND. M.GE.N ) THEN +* +* Do for each possible value of MB1 +* + DO IMB1 = 1, NNB + MB1 = NBVAL( IMB1 ) +* +* Only for MB1 > N +* + IF ( MB1.GT.N ) THEN +* +* Do for each possible value of NB1 +* + DO INB1 = 1, NNB + NB1 = NBVAL( INB1 ) +* +* Do for each possible value of NB2 +* + DO INB2 = 1, NNB + NB2 = NBVAL( INB2 ) +* + IF( NB1.GT.0 .AND. NB2.GT.0 ) THEN +* +* Test DORHR_COL +* + CALL DORHR_COL01( M, N, MB1, NB1, NB2, + $ RESULT ) +* +* Print information about the tests that did +* not pass the threshold. +* + DO T = 1, NTESTS + IF( RESULT( T ).GE.THRESH ) THEN + IF( NFAIL.EQ.0 .AND. NERRS.EQ.0 ) + $ CALL ALAHD( NOUT, PATH ) + WRITE( NOUT, FMT = 9999 ) M, N, MB1, + $ NB1, NB2, T, RESULT( T ) + NFAIL = NFAIL + 1 + END IF + END DO + NRUN = NRUN + NTESTS + END IF + END DO + END DO + END IF + END DO + END IF + END DO + END DO +* +* Print a summary of the results. +* + CALL ALASUM( PATH, NOUT, NFAIL, NRUN, NERRS ) +* + 9999 FORMAT( 'M=', I5, ', N=', I5, ', MB1=', I5, + $ ', NB1=', I5, ', NB2=', I5,' test(', I2, ')=', G12.5 ) + RETURN +* +* End of DCHKORHR_COL +* + END \ No newline at end of file diff --git a/lapack-netlib/TESTING/LIN/ddrvls.f b/lapack-netlib/TESTING/LIN/ddrvls.f index 2f4975553..adfd71e09 100644 --- a/lapack-netlib/TESTING/LIN/ddrvls.f +++ b/lapack-netlib/TESTING/LIN/ddrvls.f @@ -233,8 +233,8 @@ DOUBLE PRECISION EPS, NORMA, NORMB, RCOND * .. * .. Local Arrays .. - INTEGER ISEED( 4 ), ISEEDY( 4 ), IWQ - DOUBLE PRECISION RESULT( NTESTS ), WQ + INTEGER ISEED( 4 ), ISEEDY( 4 ), IWQ( 1 ) + DOUBLE PRECISION RESULT( NTESTS ), WQ( 1 ) * .. * .. Allocatable Arrays .. DOUBLE PRECISION, ALLOCATABLE :: WORK (:) @@ -359,27 +359,27 @@ * Compute workspace needed for DGELS CALL DGELS( TRANS, M, N, NRHS, A, LDA, $ B, LDB, WQ, -1, INFO ) - LWORK_DGELS = INT ( WQ ) + LWORK_DGELS = INT ( WQ ( 1 ) ) * Compute workspace needed for DGETSLS CALL DGETSLS( TRANS, M, N, NRHS, A, LDA, $ B, LDB, WQ, -1, INFO ) - LWORK_DGETSLS = INT( WQ ) + LWORK_DGETSLS = INT( WQ ( 1 ) ) ENDDO END IF * Compute workspace needed for DGELSY CALL DGELSY( M, N, NRHS, A, LDA, B, LDB, IWQ, $ RCOND, CRANK, WQ, -1, INFO ) - LWORK_DGELSY = INT( WQ ) + LWORK_DGELSY = INT( WQ ( 1 ) ) * Compute workspace needed for DGELSS CALL DGELSS( M, N, NRHS, A, LDA, B, LDB, S, $ RCOND, CRANK, WQ, -1 , INFO ) - LWORK_DGELSS = INT( WQ ) + LWORK_DGELSS = INT( WQ ( 1 ) ) * Compute workspace needed for DGELSD CALL DGELSD( M, N, NRHS, A, LDA, B, LDB, S, $ RCOND, CRANK, WQ, -1, IWQ, INFO ) - LWORK_DGELSD = INT( WQ ) + LWORK_DGELSD = INT( WQ ( 1 ) ) * Compute LIWORK workspace needed for DGELSY and DGELSD - LIWORK = MAX( LIWORK, N, IWQ ) + LIWORK = MAX( LIWORK, N, IWQ( 1 ) ) * Compute LWORK workspace needed for all functions LWORK = MAX( LWORK, LWORK_DGELS, LWORK_DGETSLS, $ LWORK_DGELSY, LWORK_DGELSS, diff --git a/lapack-netlib/TESTING/LIN/derrorhr_col.f b/lapack-netlib/TESTING/LIN/derrorhr_col.f new file mode 100644 index 000000000..6d545bc91 --- /dev/null +++ b/lapack-netlib/TESTING/LIN/derrorhr_col.f @@ -0,0 +1,164 @@ +*> \brief \b DERRORHR_COL +* +* =========== DOCUMENTATION =========== +* +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ +* +* Definition: +* =========== +* +* SUBROUTINE DERRORHR_COL( PATH, NUNIT ) +* +* .. Scalar Arguments .. +* CHARACTER*3 PATH +* INTEGER NUNIT +* .. +* +* +*> \par Purpose: +* ============= +*> +*> \verbatim +*> +*> DERRORHR_COL tests the error exits for DORHR_COL that does +*> Householder reconstruction from the ouput of tall-skinny +*> factorization DLATSQR. +*> \endverbatim +* +* Arguments: +* ========== +* +*> \param[in] PATH +*> \verbatim +*> PATH is CHARACTER*3 +*> The LAPACK path name for the routines to be tested. +*> \endverbatim +*> +*> \param[in] NUNIT +*> \verbatim +*> NUNIT is INTEGER +*> The unit number for output. +*> \endverbatim +* +* Authors: +* ======== +* +*> \author Univ. of Tennessee +*> \author Univ. of California Berkeley +*> \author Univ. of Colorado Denver +*> \author NAG Ltd. +* +*> \date November 2019 +* +*> \ingroup double_lin +* +* ===================================================================== + SUBROUTINE DERRORHR_COL( PATH, NUNIT ) + IMPLICIT NONE +* +* -- LAPACK test routine (version 3.9.0) -- +* -- LAPACK is a software package provided by Univ. of Tennessee, -- +* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- +* November 2019 +* +* .. Scalar Arguments .. + CHARACTER(LEN=3) PATH + INTEGER NUNIT +* .. +* +* ===================================================================== +* +* .. Parameters .. + INTEGER NMAX + PARAMETER ( NMAX = 2 ) +* .. +* .. Local Scalars .. + INTEGER I, INFO, J +* .. +* .. Local Arrays .. + DOUBLE PRECISION A( NMAX, NMAX ), T( NMAX, NMAX ), D(NMAX) +* .. +* .. External Subroutines .. + EXTERNAL ALAESM, CHKXER, DORHR_COL +* .. +* .. Scalars in Common .. + LOGICAL LERR, OK + CHARACTER(LEN=32) SRNAMT + INTEGER INFOT, NOUT +* .. +* .. Common blocks .. + COMMON / INFOC / INFOT, NOUT, OK, LERR + COMMON / SRNAMC / SRNAMT +* .. +* .. Intrinsic Functions .. + INTRINSIC DBLE +* .. +* .. Executable Statements .. +* + NOUT = NUNIT + WRITE( NOUT, FMT = * ) +* +* Set the variables to innocuous values. +* + DO J = 1, NMAX + DO I = 1, NMAX + A( I, J ) = 1.D+0 / DBLE( I+J ) + T( I, J ) = 1.D+0 / DBLE( I+J ) + END DO + D( J ) = 0.D+0 + END DO + OK = .TRUE. +* +* Error exits for Householder reconstruction +* +* DORHR_COL +* + SRNAMT = 'DORHR_COL' +* + INFOT = 1 + CALL DORHR_COL( -1, 0, 1, A, 1, T, 1, D, INFO ) + CALL CHKXER( 'DORHR_COL', INFOT, NOUT, LERR, OK ) +* + INFOT = 2 + CALL DORHR_COL( 0, -1, 1, A, 1, T, 1, D, INFO ) + CALL CHKXER( 'DORHR_COL', INFOT, NOUT, LERR, OK ) + CALL DORHR_COL( 1, 2, 1, A, 1, T, 1, D, INFO ) + CALL CHKXER( 'DORHR_COL', INFOT, NOUT, LERR, OK ) +* + INFOT = 3 + CALL DORHR_COL( 0, 0, -1, A, 1, T, 1, D, INFO ) + CALL CHKXER( 'DORHR_COL', INFOT, NOUT, LERR, OK ) +* + CALL DORHR_COL( 0, 0, 0, A, 1, T, 1, D, INFO ) + CALL CHKXER( 'DORHR_COL', INFOT, NOUT, LERR, OK ) +* + INFOT = 5 + CALL DORHR_COL( 0, 0, 1, A, -1, T, 1, D, INFO ) + CALL CHKXER( 'DORHR_COL', INFOT, NOUT, LERR, OK ) +* + CALL DORHR_COL( 0, 0, 1, A, 0, T, 1, D, INFO ) + CALL CHKXER( 'DORHR_COL', INFOT, NOUT, LERR, OK ) +* + CALL DORHR_COL( 2, 0, 1, A, 1, T, 1, D, INFO ) + CALL CHKXER( 'DORHR_COL', INFOT, NOUT, LERR, OK ) +* + INFOT = 7 + CALL DORHR_COL( 0, 0, 1, A, 1, T, -1, D, INFO ) + CALL CHKXER( 'DORHR_COL', INFOT, NOUT, LERR, OK ) +* + CALL DORHR_COL( 0, 0, 1, A, 1, T, 0, D, INFO ) + CALL CHKXER( 'DORHR_COL', INFOT, NOUT, LERR, OK ) +* + CALL DORHR_COL( 4, 3, 2, A, 4, T, 1, D, INFO ) + CALL CHKXER( 'DORHR_COL', INFOT, NOUT, LERR, OK ) +* +* Print a summary line. +* + CALL ALAESM( PATH, OK, NOUT ) +* + RETURN +* +* End of DERRORHR_COL +* + END diff --git a/lapack-netlib/TESTING/LIN/derrtsqr.f b/lapack-netlib/TESTING/LIN/derrtsqr.f index c8ad30257..d1d0ff02d 100644 --- a/lapack-netlib/TESTING/LIN/derrtsqr.f +++ b/lapack-netlib/TESTING/LIN/derrtsqr.f @@ -77,7 +77,7 @@ * .. * .. Local Arrays .. DOUBLE PRECISION A( NMAX, NMAX ), T( NMAX, NMAX ), W( NMAX ), - $ C( NMAX, NMAX ), TAU(NMAX) + $ C( NMAX, NMAX ), TAU(NMAX*2) * .. * .. External Subroutines .. EXTERNAL ALAESM, CHKXER, DGEQR, @@ -137,6 +137,8 @@ * TAU(1)=1 TAU(2)=1 + TAU(3)=1 + TAU(4)=1 SRNAMT = 'DGEMQR' NB=1 INFOT = 1 diff --git a/lapack-netlib/TESTING/LIN/derrvx.f b/lapack-netlib/TESTING/LIN/derrvx.f index 3a4a6b7fc..fd1d038a6 100644 --- a/lapack-netlib/TESTING/LIN/derrvx.f +++ b/lapack-netlib/TESTING/LIN/derrvx.f @@ -740,7 +740,7 @@ $ W, 1, INFO ) CALL CHKXER( 'DSYSV_AA_2STAGE', INFOT, NOUT, LERR, OK ) INFOT = 11 - CALL DSYSV_AA_2STAGE( 'U', 2, 1, A, 2, A, 2, IP, IP, B, 1, + CALL DSYSV_AA_2STAGE( 'U', 2, 1, A, 2, A, 8, IP, IP, B, 1, $ W, 1, INFO ) CALL CHKXER( 'DSYSV_AA_2STAGE', INFOT, NOUT, LERR, OK ) INFOT = 7 diff --git a/lapack-netlib/TESTING/LIN/dorhr_col01.f b/lapack-netlib/TESTING/LIN/dorhr_col01.f new file mode 100644 index 000000000..3e48de37f --- /dev/null +++ b/lapack-netlib/TESTING/LIN/dorhr_col01.f @@ -0,0 +1,386 @@ +*> \brief \b DORHR_COL01 +* +* =========== DOCUMENTATION =========== +* +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ +* +* Definition: +* =========== +* +* SUBROUTINE DORHR_COL01( M, N, MB1, NB1, NB2, RESULT ) +* +* .. Scalar Arguments .. +* INTEGER M, N, MB1, NB1, NB2 +* .. Return values .. +* DOUBLE PRECISION RESULT(6) +* +* +*> \par Purpose: +* ============= +*> +*> \verbatim +*> +*> DORHR_COL01 tests DORHR_COL using DLATSQR, DGEMQRT and DORGTSQR. +*> Therefore, DLATSQR (part of DGEQR), DGEMQRT (part DGEMQR), DORGTSQR +*> have to be tested before this test. +*> +*> \endverbatim +* +* Arguments: +* ========== +* +*> \param[in] M +*> \verbatim +*> M is INTEGER +*> Number of rows in test matrix. +*> \endverbatim +*> \param[in] N +*> \verbatim +*> N is INTEGER +*> Number of columns in test matrix. +*> \endverbatim +*> \param[in] MB1 +*> \verbatim +*> MB1 is INTEGER +*> Number of row in row block in an input test matrix. +*> \endverbatim +*> +*> \param[in] NB1 +*> \verbatim +*> NB1 is INTEGER +*> Number of columns in column block an input test matrix. +*> \endverbatim +*> +*> \param[in] NB2 +*> \verbatim +*> NB2 is INTEGER +*> Number of columns in column block in an output test matrix. +*> \endverbatim +*> +*> \param[out] RESULT +*> \verbatim +*> RESULT is DOUBLE PRECISION array, dimension (6) +*> Results of each of the six tests below. +*> ( C is a M-by-N random matrix, D is a N-by-M random matrix ) +*> +*> RESULT(1) = | A - Q * R | / (eps * m * |A|) +*> RESULT(2) = | I - (Q**H) * Q | / (eps * m ) +*> RESULT(3) = | Q * C - Q * C | / (eps * m * |C|) +*> RESULT(4) = | (Q**H) * C - (Q**H) * C | / (eps * m * |C|) +*> RESULT(5) = | (D * Q) - D * Q | / (eps * m * |D|) +*> RESULT(6) = | D * (Q**H) - D * (Q**H) | / (eps * m * |D|) +*> \endverbatim +* +* Authors: +* ======== +* +*> \author Univ. of Tennessee +*> \author Univ. of California Berkeley +*> \author Univ. of Colorado Denver +*> \author NAG Ltd. +* +*> \date November 2019 +* +*> \ingroup single_lin +* +* ===================================================================== + SUBROUTINE DORHR_COL01( M, N, MB1, NB1, NB2, RESULT ) + IMPLICIT NONE +* +* -- LAPACK test routine (version 3.9.0) -- +* -- LAPACK is a software package provided by Univ. of Tennessee, -- +* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- +* November 2019 +* +* .. Scalar Arguments .. + INTEGER M, N, MB1, NB1, NB2 +* .. Return values .. + DOUBLE PRECISION RESULT(6) +* +* ===================================================================== +* +* .. +* .. Local allocatable arrays + DOUBLE PRECISION, ALLOCATABLE :: A(:,:), AF(:,:), Q(:,:), R(:,:), + $ RWORK(:), WORK( : ), T1(:,:), T2(:,:), DIAG(:), + $ C(:,:), CF(:,:), D(:,:), DF(:,:) +* +* .. Parameters .. + DOUBLE PRECISION ONE, ZERO + PARAMETER ( ZERO = 0.0D+0, ONE = 1.0D+0 ) +* .. +* .. Local Scalars .. + LOGICAL TESTZEROS + INTEGER INFO, I, J, K, L, LWORK, NB1_UB, NB2_UB, NRB + DOUBLE PRECISION ANORM, EPS, RESID, CNORM, DNORM +* .. +* .. Local Arrays .. + INTEGER ISEED( 4 ) + DOUBLE PRECISION WORKQUERY( 1 ) +* .. +* .. External Functions .. + DOUBLE PRECISION DLAMCH, DLANGE, DLANSY + EXTERNAL DLAMCH, DLANGE, DLANSY +* .. +* .. External Subroutines .. + EXTERNAL DLACPY, DLARNV, DLASET, DLATSQR, DORHR_COL, + $ DORGTSQR, DSCAL, DGEMM, DGEMQRT, DSYRK +* .. +* .. Intrinsic Functions .. + INTRINSIC CEILING, DBLE, MAX, MIN +* .. +* .. Scalars in Common .. + CHARACTER(LEN=32) SRNAMT +* .. +* .. Common blocks .. + COMMON / SRMNAMC / SRNAMT +* .. +* .. Data statements .. + DATA ISEED / 1988, 1989, 1990, 1991 / +* +* TEST MATRICES WITH HALF OF MATRIX BEING ZEROS +* + TESTZEROS = .FALSE. +* + EPS = DLAMCH( 'Epsilon' ) + K = MIN( M, N ) + L = MAX( M, N, 1) +* +* Dynamically allocate local arrays +* + ALLOCATE ( A(M,N), AF(M,N), Q(L,L), R(M,L), RWORK(L), + $ C(M,N), CF(M,N), + $ D(N,M), DF(N,M) ) +* +* Put random numbers into A and copy to AF +* + DO J = 1, N + CALL DLARNV( 2, ISEED, M, A( 1, J ) ) + END DO + IF( TESTZEROS ) THEN + IF( M.GE.4 ) THEN + DO J = 1, N + CALL DLARNV( 2, ISEED, M/2, A( M/4, J ) ) + END DO + END IF + END IF + CALL DLACPY( 'Full', M, N, A, M, AF, M ) +* +* Number of row blocks in DLATSQR +* + NRB = MAX( 1, CEILING( DBLE( M - N ) / DBLE( MB1 - N ) ) ) +* + ALLOCATE ( T1( NB1, N * NRB ) ) + ALLOCATE ( T2( NB2, N ) ) + ALLOCATE ( DIAG( N ) ) +* +* Begin determine LWORK for the array WORK and allocate memory. +* +* DLATSQR requires NB1 to be bounded by N. +* + NB1_UB = MIN( NB1, N) +* +* DGEMQRT requires NB2 to be bounded by N. +* + NB2_UB = MIN( NB2, N) +* + CALL DLATSQR( M, N, MB1, NB1_UB, AF, M, T1, NB1, + $ WORKQUERY, -1, INFO ) + LWORK = INT( WORKQUERY( 1 ) ) + CALL DORGTSQR( M, N, MB1, NB1, AF, M, T1, NB1, WORKQUERY, -1, + $ INFO ) + + LWORK = MAX( LWORK, INT( WORKQUERY( 1 ) ) ) +* +* In DGEMQRT, WORK is N*NB2_UB if SIDE = 'L', +* or M*NB2_UB if SIDE = 'R'. +* + LWORK = MAX( LWORK, NB2_UB * N, NB2_UB * M ) +* + ALLOCATE ( WORK( LWORK ) ) +* +* End allocate memory for WORK. +* +* +* Begin Householder reconstruction routines +* +* Factor the matrix A in the array AF. +* + SRNAMT = 'DLATSQR' + CALL DLATSQR( M, N, MB1, NB1_UB, AF, M, T1, NB1, WORK, LWORK, + $ INFO ) +* +* Copy the factor R into the array R. +* + SRNAMT = 'DLACPY' + CALL DLACPY( 'U', N, N, AF, M, R, M ) +* +* Reconstruct the orthogonal matrix Q. +* + SRNAMT = 'DORGTSQR' + CALL DORGTSQR( M, N, MB1, NB1, AF, M, T1, NB1, WORK, LWORK, + $ INFO ) +* +* Perform the Householder reconstruction, the result is stored +* the arrays AF and T2. +* + SRNAMT = 'DORHR_COL' + CALL DORHR_COL( M, N, NB2, AF, M, T2, NB2, DIAG, INFO ) +* +* Compute the factor R_hr corresponding to the Householder +* reconstructed Q_hr and place it in the upper triangle of AF to +* match the Q storage format in DGEQRT. R_hr = R_tsqr * S, +* this means changing the sign of I-th row of the matrix R_tsqr +* according to sign of of I-th diagonal element DIAG(I) of the +* matrix S. +* + SRNAMT = 'DLACPY' + CALL DLACPY( 'U', N, N, R, M, AF, M ) +* + DO I = 1, N + IF( DIAG( I ).EQ.-ONE ) THEN + CALL DSCAL( N+1-I, -ONE, AF( I, I ), M ) + END IF + END DO +* +* End Householder reconstruction routines. +* +* +* Generate the m-by-m matrix Q +* + CALL DLASET( 'Full', M, M, ZERO, ONE, Q, M ) +* + SRNAMT = 'DGEMQRT' + CALL DGEMQRT( 'L', 'N', M, M, K, NB2_UB, AF, M, T2, NB2, Q, M, + $ WORK, INFO ) +* +* Copy R +* + CALL DLASET( 'Full', M, N, ZERO, ZERO, R, M ) +* + CALL DLACPY( 'Upper', M, N, AF, M, R, M ) +* +* TEST 1 +* Compute |R - (Q**T)*A| / ( eps * m * |A| ) and store in RESULT(1) +* + CALL DGEMM( 'T', 'N', M, N, M, -ONE, Q, M, A, M, ONE, R, M ) +* + ANORM = DLANGE( '1', M, N, A, M, RWORK ) + RESID = DLANGE( '1', M, N, R, M, RWORK ) + IF( ANORM.GT.ZERO ) THEN + RESULT( 1 ) = RESID / ( EPS * MAX( 1, M ) * ANORM ) + ELSE + RESULT( 1 ) = ZERO + END IF +* +* TEST 2 +* Compute |I - (Q**T)*Q| / ( eps * m ) and store in RESULT(2) +* + CALL DLASET( 'Full', M, M, ZERO, ONE, R, M ) + CALL DSYRK( 'U', 'T', M, M, -ONE, Q, M, ONE, R, M ) + RESID = DLANSY( '1', 'Upper', M, R, M, RWORK ) + RESULT( 2 ) = RESID / ( EPS * MAX( 1, M ) ) +* +* Generate random m-by-n matrix C +* + DO J = 1, N + CALL DLARNV( 2, ISEED, M, C( 1, J ) ) + END DO + CNORM = DLANGE( '1', M, N, C, M, RWORK ) + CALL DLACPY( 'Full', M, N, C, M, CF, M ) +* +* Apply Q to C as Q*C = CF +* + SRNAMT = 'DGEMQRT' + CALL DGEMQRT( 'L', 'N', M, N, K, NB2_UB, AF, M, T2, NB2, CF, M, + $ WORK, INFO ) +* +* TEST 3 +* Compute |CF - Q*C| / ( eps * m * |C| ) +* + CALL DGEMM( 'N', 'N', M, N, M, -ONE, Q, M, C, M, ONE, CF, M ) + RESID = DLANGE( '1', M, N, CF, M, RWORK ) + IF( CNORM.GT.ZERO ) THEN + RESULT( 3 ) = RESID / ( EPS * MAX( 1, M ) * CNORM ) + ELSE + RESULT( 3 ) = ZERO + END IF +* +* Copy C into CF again +* + CALL DLACPY( 'Full', M, N, C, M, CF, M ) +* +* Apply Q to C as (Q**T)*C = CF +* + SRNAMT = 'DGEMQRT' + CALL DGEMQRT( 'L', 'T', M, N, K, NB2_UB, AF, M, T2, NB2, CF, M, + $ WORK, INFO ) +* +* TEST 4 +* Compute |CF - (Q**T)*C| / ( eps * m * |C|) +* + CALL DGEMM( 'T', 'N', M, N, M, -ONE, Q, M, C, M, ONE, CF, M ) + RESID = DLANGE( '1', M, N, CF, M, RWORK ) + IF( CNORM.GT.ZERO ) THEN + RESULT( 4 ) = RESID / ( EPS * MAX( 1, M ) * CNORM ) + ELSE + RESULT( 4 ) = ZERO + END IF +* +* Generate random n-by-m matrix D and a copy DF +* + DO J = 1, M + CALL DLARNV( 2, ISEED, N, D( 1, J ) ) + END DO + DNORM = DLANGE( '1', N, M, D, N, RWORK ) + CALL DLACPY( 'Full', N, M, D, N, DF, N ) +* +* Apply Q to D as D*Q = DF +* + SRNAMT = 'DGEMQRT' + CALL DGEMQRT( 'R', 'N', N, M, K, NB2_UB, AF, M, T2, NB2, DF, N, + $ WORK, INFO ) +* +* TEST 5 +* Compute |DF - D*Q| / ( eps * m * |D| ) +* + CALL DGEMM( 'N', 'N', N, M, M, -ONE, D, N, Q, M, ONE, DF, N ) + RESID = DLANGE( '1', N, M, DF, N, RWORK ) + IF( DNORM.GT.ZERO ) THEN + RESULT( 5 ) = RESID / ( EPS * MAX( 1, M ) * DNORM ) + ELSE + RESULT( 5 ) = ZERO + END IF +* +* Copy D into DF again +* + CALL DLACPY( 'Full', N, M, D, N, DF, N ) +* +* Apply Q to D as D*QT = DF +* + SRNAMT = 'DGEMQRT' + CALL DGEMQRT( 'R', 'T', N, M, K, NB2_UB, AF, M, T2, NB2, DF, N, + $ WORK, INFO ) +* +* TEST 6 +* Compute |DF - D*(Q**T)| / ( eps * m * |D| ) +* + CALL DGEMM( 'N', 'T', N, M, M, -ONE, D, N, Q, M, ONE, DF, N ) + RESID = DLANGE( '1', N, M, DF, N, RWORK ) + IF( DNORM.GT.ZERO ) THEN + RESULT( 6 ) = RESID / ( EPS * MAX( 1, M ) * DNORM ) + ELSE + RESULT( 6 ) = ZERO + END IF +* +* Deallocate all arrays +* + DEALLOCATE ( A, AF, Q, R, RWORK, WORK, T1, T2, DIAG, + $ C, D, CF, DF ) +* + RETURN +* +* End of DORHR_COL01 +* + END diff --git a/lapack-netlib/TESTING/LIN/dtsqr01.f b/lapack-netlib/TESTING/LIN/dtsqr01.f index 7a50009cc..25bf58a81 100644 --- a/lapack-netlib/TESTING/LIN/dtsqr01.f +++ b/lapack-netlib/TESTING/LIN/dtsqr01.f @@ -115,7 +115,7 @@ * .. * .. Local Arrays .. INTEGER ISEED( 4 ) - DOUBLE PRECISION TQUERY( 5 ), WORKQUERY + DOUBLE PRECISION TQUERY( 5 ), WORKQUERY( 1 ) * .. * .. External Functions .. DOUBLE PRECISION DLAMCH, DLANGE, DLANSY @@ -174,22 +174,22 @@ * CALL DGEQR( M, N, AF, M, TQUERY, -1, WORKQUERY, -1, INFO ) TSIZE = INT( TQUERY( 1 ) ) - LWORK = INT( WORKQUERY ) + LWORK = INT( WORKQUERY( 1 ) ) CALL DGEMQR( 'L', 'N', M, M, K, AF, M, TQUERY, TSIZE, CF, M, $ WORKQUERY, -1, INFO) - LWORK = MAX( LWORK, INT( WORKQUERY ) ) + LWORK = MAX( LWORK, INT( WORKQUERY( 1 ) ) ) CALL DGEMQR( 'L', 'N', M, N, K, AF, M, TQUERY, TSIZE, CF, M, $ WORKQUERY, -1, INFO) - LWORK = MAX( LWORK, INT( WORKQUERY ) ) + LWORK = MAX( LWORK, INT( WORKQUERY( 1 ) ) ) CALL DGEMQR( 'L', 'T', M, N, K, AF, M, TQUERY, TSIZE, CF, M, $ WORKQUERY, -1, INFO) - LWORK = MAX( LWORK, INT( WORKQUERY ) ) + LWORK = MAX( LWORK, INT( WORKQUERY( 1 ) ) ) CALL DGEMQR( 'R', 'N', N, M, K, AF, M, TQUERY, TSIZE, DF, N, $ WORKQUERY, -1, INFO) - LWORK = MAX( LWORK, INT( WORKQUERY ) ) + LWORK = MAX( LWORK, INT( WORKQUERY( 1 ) ) ) CALL DGEMQR( 'R', 'T', N, M, K, AF, M, TQUERY, TSIZE, DF, N, $ WORKQUERY, -1, INFO) - LWORK = MAX( LWORK, INT( WORKQUERY ) ) + LWORK = MAX( LWORK, INT( WORKQUERY( 1 ) ) ) ALLOCATE ( T( TSIZE ) ) ALLOCATE ( WORK( LWORK ) ) srnamt = 'DGEQR' @@ -317,22 +317,22 @@ ELSE CALL DGELQ( M, N, AF, M, TQUERY, -1, WORKQUERY, -1, INFO ) TSIZE = INT( TQUERY( 1 ) ) - LWORK = INT( WORKQUERY ) + LWORK = INT( WORKQUERY( 1 ) ) CALL DGEMLQ( 'R', 'N', N, N, K, AF, M, TQUERY, TSIZE, Q, N, $ WORKQUERY, -1, INFO ) - LWORK = MAX( LWORK, INT( WORKQUERY ) ) + LWORK = MAX( LWORK, INT( WORKQUERY( 1 ) ) ) CALL DGEMLQ( 'L', 'N', N, M, K, AF, M, TQUERY, TSIZE, DF, N, $ WORKQUERY, -1, INFO) - LWORK = MAX( LWORK, INT( WORKQUERY ) ) + LWORK = MAX( LWORK, INT( WORKQUERY( 1 ) ) ) CALL DGEMLQ( 'L', 'T', N, M, K, AF, M, TQUERY, TSIZE, DF, N, $ WORKQUERY, -1, INFO) - LWORK = MAX( LWORK, INT( WORKQUERY ) ) + LWORK = MAX( LWORK, INT( WORKQUERY( 1 ) ) ) CALL DGEMLQ( 'R', 'N', M, N, K, AF, M, TQUERY, TSIZE, CF, M, $ WORKQUERY, -1, INFO) - LWORK = MAX( LWORK, INT( WORKQUERY ) ) + LWORK = MAX( LWORK, INT( WORKQUERY( 1 ) ) ) CALL DGEMLQ( 'R', 'T', M, N, K, AF, M, TQUERY, TSIZE, CF, M, $ WORKQUERY, -1, INFO) - LWORK = MAX( LWORK, INT( WORKQUERY ) ) + LWORK = MAX( LWORK, INT( WORKQUERY( 1 ) ) ) ALLOCATE ( T( TSIZE ) ) ALLOCATE ( WORK( LWORK ) ) srnamt = 'DGELQ' diff --git a/lapack-netlib/TESTING/LIN/schkaa.f b/lapack-netlib/TESTING/LIN/schkaa.f index 33b109aa7..a9c13e442 100644 --- a/lapack-netlib/TESTING/LIN/schkaa.f +++ b/lapack-netlib/TESTING/LIN/schkaa.f @@ -68,6 +68,8 @@ *> SEQ *> SQT *> SQX +*> STS +*> SHH *> \endverbatim * * Parameters: @@ -102,17 +104,17 @@ *> \author Univ. of Colorado Denver *> \author NAG Ltd. * -*> \date April 2012 +*> \date November 2019 * *> \ingroup single_lin * * ===================================================================== PROGRAM SCHKAA * -* -- LAPACK test routine (version 3.8.0) -- +* -- LAPACK test routine (version 3.9.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- -* April 2012 +* November 2019 * * ===================================================================== * @@ -159,13 +161,13 @@ * .. * .. External Subroutines .. EXTERNAL ALAREQ, SCHKEQ, SCHKGB, SCHKGE, SCHKGT, SCHKLQ, - $ SCHKPB, SCHKPO, SCHKPS, SCHKPP, SCHKPT, SCHKQ3, - $ SCHKQL, SCHKQR, SCHKRQ, SCHKSP, SCHKSY, - $ SCHKSY_ROOK, SCHKSY_RK, SCHKSY_AA, SCHKTB, - $ SCHKTP, SCHKTR, SCHKTZ, SDRVGB, SDRVGE, SDRVGT, - $ SDRVLS, SDRVPB, SDRVPO, SDRVPP, SDRVPT, SDRVSP, - $ SDRVSY, SDRVSY_ROOK, SDRVSY_RK, SDRVSY_AA, - $ ILAVER, SCHKLQTP, SCHKQRT, SCHKQRTP, + $ SCHKORHR_COL, SCHKPB, SCHKPO, SCHKPS, SCHKPP, + $ SCHKPT, SCHKQ3, SCHKQL, SCHKQR, SCHKRQ, SCHKSP, + $ SCHKSY, SCHKSY_ROOK, SCHKSY_RK, SCHKSY_AA, + $ SCHKTB, SCHKTP, SCHKTR, SCHKTZ, SDRVGB, SDRVGE, + $ SDRVGT, SDRVLS, SDRVPB, SDRVPO, SDRVPP, SDRVPT, + $ SDRVSP, SDRVSY, SDRVSY_ROOK, SDRVSY_RK, + $ SDRVSY_AA, ILAVER, SCHKLQTP, SCHKQRT, SCHKQRTP, $ SCHKLQT, SCHKTSQR * .. * .. Scalars in Common .. @@ -673,7 +675,7 @@ * * SK: symmetric indefinite matrices, * with bounded Bunch-Kaufman (rook) pivoting algorithm, -* differnet matrix storage format than SR path version. +* different matrix storage format than SR path version. * NTYPES = 10 CALL ALAREQ( PATH, NMATS, DOTYPE, NTYPES, NIN, NOUT ) @@ -1004,6 +1006,17 @@ ELSE WRITE( NOUT, FMT = 9989 )PATH END IF +* + ELSE IF( LSAMEN( 2, C2, 'HH' ) ) THEN +* +* HH: Householder reconstruction for tall-skinny matrices +* + IF( TSTCHK ) THEN + CALL SCHKORHR_COL( THRESH, TSTERR, NM, MVAL, NN, NVAL, NNB, + $ NBVAL, NOUT ) + ELSE + WRITE( NOUT, FMT = 9989 ) PATH + END IF * ELSE * diff --git a/lapack-netlib/TESTING/LIN/schkorhr_col.f b/lapack-netlib/TESTING/LIN/schkorhr_col.f new file mode 100644 index 000000000..cf6d2d323 --- /dev/null +++ b/lapack-netlib/TESTING/LIN/schkorhr_col.f @@ -0,0 +1,239 @@ +*> \brief \b SCHKORHR_COL +* +* =========== DOCUMENTATION =========== +* +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ +* +* Definition: +* =========== +* +* SUBROUTINE SCHKORHR_COL( THRESH, TSTERR, NM, MVAL, NN, NVAL, NNB, +* NBVAL, NOUT ) +* +* .. Scalar Arguments .. +* LOGICAL TSTERR +* INTEGER NM, NN, NNB, NOUT +* REAL THRESH +* .. +* .. Array Arguments .. +* INTEGER MVAL( * ), NBVAL( * ), NVAL( * ) +* +*> \par Purpose: +* ============= +*> +*> \verbatim +*> +*> SCHKORHR_COL tests SORHR_COL using SLATSQR, SGEMQRT and SORGTSQR. +*> Therefore, SLATSQR (part of SGEQR), SGEMQRT (part SGEMQR), SORGTSQR +*> have to be tested before this test. +*> +*> \endverbatim +* +* Arguments: +* ========== +* +*> \param[in] THRESH +*> \verbatim +*> THRESH is REAL +*> The threshold value for the test ratios. A result is +*> included in the output file if RESULT >= THRESH. To have +*> every test ratio printed, use THRESH = 0. +*> \endverbatim +*> +*> \param[in] TSTERR +*> \verbatim +*> TSTERR is LOGICAL +*> Flag that indicates whether error exits are to be tested. +*> \endverbatim +*> +*> \param[in] NM +*> \verbatim +*> NM is INTEGER +*> The number of values of M contained in the vector MVAL. +*> \endverbatim +*> +*> \param[in] MVAL +*> \verbatim +*> MVAL is INTEGER array, dimension (NM) +*> The values of the matrix row dimension M. +*> \endverbatim +*> +*> \param[in] NN +*> \verbatim +*> NN is INTEGER +*> The number of values of N contained in the vector NVAL. +*> \endverbatim +*> +*> \param[in] NVAL +*> \verbatim +*> NVAL is INTEGER array, dimension (NN) +*> The values of the matrix column dimension N. +*> \endverbatim +*> +*> \param[in] NNB +*> \verbatim +*> NNB is INTEGER +*> The number of values of NB contained in the vector NBVAL. +*> \endverbatim +*> +*> \param[in] NBVAL +*> \verbatim +*> NBVAL is INTEGER array, dimension (NBVAL) +*> The values of the blocksize NB. +*> \endverbatim +*> +*> \param[in] NOUT +*> \verbatim +*> NOUT is INTEGER +*> The unit number for output. +*> \endverbatim +* +* Authors: +* ======== +* +*> \author Univ. of Tennessee +*> \author Univ. of California Berkeley +*> \author Univ. of Colorado Denver +*> \author NAG Ltd. +* +*> \date November 2019 +* +*> \ingroup sigle_lin +* +* ===================================================================== + SUBROUTINE SCHKORHR_COL( THRESH, TSTERR, NM, MVAL, NN, NVAL, NNB, + $ NBVAL, NOUT ) + IMPLICIT NONE +* +* -- LAPACK test routine (version 3.9.0) -- +* -- LAPACK is a software package provided by Univ. of Tennessee, -- +* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- +* June 2019 +* +* .. Scalar Arguments .. + LOGICAL TSTERR + INTEGER NM, NN, NNB, NOUT + REAL THRESH +* .. +* .. Array Arguments .. + INTEGER MVAL( * ), NBVAL( * ), NVAL( * ) +* .. +* +* ===================================================================== +* +* .. Parameters .. + INTEGER NTESTS + PARAMETER ( NTESTS = 6 ) +* .. +* .. Local Scalars .. + CHARACTER(LEN=3) PATH + INTEGER I, IMB1, INB1, INB2, J, T, M, N, MB1, NB1, + $ NB2, NFAIL, NERRS, NRUN +* +* .. Local Arrays .. + REAL RESULT( NTESTS ) +* .. +* .. External Subroutines .. + EXTERNAL ALAHD, ALASUM, SERRORHR_COL, SORHR_COL01 +* .. +* .. Intrinsic Functions .. + INTRINSIC MAX, MIN +* .. +* .. Scalars in Common .. + LOGICAL LERR, OK + CHARACTER(LEN=32) SRNAMT + INTEGER INFOT, NUNIT +* .. +* .. Common blocks .. + COMMON / INFOC / INFOT, NUNIT, OK, LERR + COMMON / SRNAMC / SRNAMT +* .. +* .. Executable Statements .. +* +* Initialize constants +* + PATH( 1: 1 ) = 'S' + PATH( 2: 3 ) = 'HH' + NRUN = 0 + NFAIL = 0 + NERRS = 0 +* +* Test the error exits +* + IF( TSTERR ) CALL SERRORHR_COL( PATH, NOUT ) + INFOT = 0 +* +* Do for each value of M in MVAL. +* + DO I = 1, NM + M = MVAL( I ) +* +* Do for each value of N in NVAL. +* + DO J = 1, NN + N = NVAL( J ) +* +* Only for M >= N +* + IF ( MIN( M, N ).GT.0 .AND. M.GE.N ) THEN +* +* Do for each possible value of MB1 +* + DO IMB1 = 1, NNB + MB1 = NBVAL( IMB1 ) +* +* Only for MB1 > N +* + IF ( MB1.GT.N ) THEN +* +* Do for each possible value of NB1 +* + DO INB1 = 1, NNB + NB1 = NBVAL( INB1 ) +* +* Do for each possible value of NB2 +* + DO INB2 = 1, NNB + NB2 = NBVAL( INB2 ) +* + IF( NB1.GT.0 .AND. NB2.GT.0 ) THEN +* +* Test SORHR_COL +* + CALL SORHR_COL01( M, N, MB1, NB1, NB2, + $ RESULT ) +* +* Print information about the tests that did +* not pass the threshold. +* + DO T = 1, NTESTS + IF( RESULT( T ).GE.THRESH ) THEN + IF( NFAIL.EQ.0 .AND. NERRS.EQ.0 ) + $ CALL ALAHD( NOUT, PATH ) + WRITE( NOUT, FMT = 9999 ) M, N, MB1, + $ NB1, NB2, T, RESULT( T ) + NFAIL = NFAIL + 1 + END IF + END DO + NRUN = NRUN + NTESTS + END IF + END DO + END DO + END IF + END DO + END IF + END DO + END DO +* +* Print a summary of the results. +* + CALL ALASUM( PATH, NOUT, NFAIL, NRUN, NERRS ) +* + 9999 FORMAT( 'M=', I5, ', N=', I5, ', MB1=', I5, + $ ', NB1=', I5, ', NB2=', I5,' test(', I2, ')=', G12.5 ) + RETURN +* +* End of SCHKORHR_COL +* + END \ No newline at end of file diff --git a/lapack-netlib/TESTING/LIN/sdrvls.f b/lapack-netlib/TESTING/LIN/sdrvls.f index 2cf3439b5..649ca558c 100644 --- a/lapack-netlib/TESTING/LIN/sdrvls.f +++ b/lapack-netlib/TESTING/LIN/sdrvls.f @@ -233,8 +233,8 @@ REAL EPS, NORMA, NORMB, RCOND * .. * .. Local Arrays .. - INTEGER ISEED( 4 ), ISEEDY( 4 ), IWQ - REAL RESULT( NTESTS ), WQ + INTEGER ISEED( 4 ), ISEEDY( 4 ), IWQ( 1 ) + REAL RESULT( NTESTS ), WQ( 1 ) * .. * .. Allocatable Arrays .. REAL, ALLOCATABLE :: WORK (:) @@ -358,28 +358,28 @@ * * Compute workspace needed for SGELS CALL SGELS( TRANS, M, N, NRHS, A, LDA, - $ B, LDB, WQ, -1, INFO ) - LWORK_SGELS = INT ( WQ ) + $ B, LDB, WQ( 1 ), -1, INFO ) + LWORK_SGELS = INT ( WQ( 1 ) ) * Compute workspace needed for SGETSLS CALL SGETSLS( TRANS, M, N, NRHS, A, LDA, - $ B, LDB, WQ, -1, INFO ) - LWORK_SGETSLS = INT( WQ ) + $ B, LDB, WQ( 1 ), -1, INFO ) + LWORK_SGETSLS = INT( WQ( 1 ) ) ENDDO END IF * Compute workspace needed for SGELSY CALL SGELSY( M, N, NRHS, A, LDA, B, LDB, IWQ, $ RCOND, CRANK, WQ, -1, INFO ) - LWORK_SGELSY = INT( WQ ) + LWORK_SGELSY = INT( WQ( 1 ) ) * Compute workspace needed for SGELSS CALL SGELSS( M, N, NRHS, A, LDA, B, LDB, S, $ RCOND, CRANK, WQ, -1 , INFO ) - LWORK_SGELSS = INT( WQ ) + LWORK_SGELSS = INT( WQ( 1 ) ) * Compute workspace needed for SGELSD CALL SGELSD( M, N, NRHS, A, LDA, B, LDB, S, $ RCOND, CRANK, WQ, -1, IWQ, INFO ) - LWORK_SGELSD = INT( WQ ) + LWORK_SGELSD = INT( WQ( 1 ) ) * Compute LIWORK workspace needed for SGELSY and SGELSD - LIWORK = MAX( LIWORK, N, IWQ ) + LIWORK = MAX( LIWORK, N, IWQ( 1 ) ) * Compute LWORK workspace needed for all functions LWORK = MAX( LWORK, LWORK_SGELS, LWORK_SGETSLS, $ LWORK_SGELSY, LWORK_SGELSS, diff --git a/lapack-netlib/TESTING/LIN/serrorhr_col.f b/lapack-netlib/TESTING/LIN/serrorhr_col.f new file mode 100644 index 000000000..e8d81a99c --- /dev/null +++ b/lapack-netlib/TESTING/LIN/serrorhr_col.f @@ -0,0 +1,164 @@ +*> \brief \b SERRORHR_COL +* +* =========== DOCUMENTATION =========== +* +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ +* +* Definition: +* =========== +* +* SUBROUTINE SERRORHR_COL( PATH, NUNIT ) +* +* .. Scalar Arguments .. +* CHARACTER*3 PATH +* INTEGER NUNIT +* .. +* +* +*> \par Purpose: +* ============= +*> +*> \verbatim +*> +*> SERRORHR_COL tests the error exits for SORHR_COL that does +*> Householder reconstruction from the ouput of tall-skinny +*> factorization SLATSQR. +*> \endverbatim +* +* Arguments: +* ========== +* +*> \param[in] PATH +*> \verbatim +*> PATH is CHARACTER*3 +*> The LAPACK path name for the routines to be tested. +*> \endverbatim +*> +*> \param[in] NUNIT +*> \verbatim +*> NUNIT is INTEGER +*> The unit number for output. +*> \endverbatim +* +* Authors: +* ======== +* +*> \author Univ. of Tennessee +*> \author Univ. of California Berkeley +*> \author Univ. of Colorado Denver +*> \author NAG Ltd. +* +*> \date November 2019 +* +*> \ingroup singlr_lin +* +* ===================================================================== + SUBROUTINE SERRORHR_COL( PATH, NUNIT ) + IMPLICIT NONE +* +* -- LAPACK test routine (version 3.9.0) -- +* -- LAPACK is a software package provided by Univ. of Tennessee, -- +* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- +* November 2019 +* +* .. Scalar Arguments .. + CHARACTER(LEN=3) PATH + INTEGER NUNIT +* .. +* +* ===================================================================== +* +* .. Parameters .. + INTEGER NMAX + PARAMETER ( NMAX = 2 ) +* .. +* .. Local Scalars .. + INTEGER I, INFO, J +* .. +* .. Local Arrays .. + REAL A( NMAX, NMAX ), T( NMAX, NMAX ), D(NMAX) +* .. +* .. External Subroutines .. + EXTERNAL ALAESM, CHKXER, SORHR_COL +* .. +* .. Scalars in Common .. + LOGICAL LERR, OK + CHARACTER(LEN=32) SRNAMT + INTEGER INFOT, NOUT +* .. +* .. Common blocks .. + COMMON / INFOC / INFOT, NOUT, OK, LERR + COMMON / SRNAMC / SRNAMT +* .. +* .. Intrinsic Functions .. + INTRINSIC REAL +* .. +* .. Executable Statements .. +* + NOUT = NUNIT + WRITE( NOUT, FMT = * ) +* +* Set the variables to innocuous values. +* + DO J = 1, NMAX + DO I = 1, NMAX + A( I, J ) = 1.E+0 / REAL( I+J ) + T( I, J ) = 1.E+0 / REAL( I+J ) + END DO + D( J ) = 0.E+0 + END DO + OK = .TRUE. +* +* Error exits for Householder reconstruction +* +* SORHR_COL +* + SRNAMT = 'SORHR_COL' +* + INFOT = 1 + CALL SORHR_COL( -1, 0, 1, A, 1, T, 1, D, INFO ) + CALL CHKXER( 'SORHR_COL', INFOT, NOUT, LERR, OK ) +* + INFOT = 2 + CALL SORHR_COL( 0, -1, 1, A, 1, T, 1, D, INFO ) + CALL CHKXER( 'SORHR_COL', INFOT, NOUT, LERR, OK ) + CALL SORHR_COL( 1, 2, 1, A, 1, T, 1, D, INFO ) + CALL CHKXER( 'SORHR_COL', INFOT, NOUT, LERR, OK ) +* + INFOT = 3 + CALL SORHR_COL( 0, 0, -1, A, 1, T, 1, D, INFO ) + CALL CHKXER( 'SORHR_COL', INFOT, NOUT, LERR, OK ) +* + CALL SORHR_COL( 0, 0, 0, A, 1, T, 1, D, INFO ) + CALL CHKXER( 'SORHR_COL', INFOT, NOUT, LERR, OK ) +* + INFOT = 5 + CALL SORHR_COL( 0, 0, 1, A, -1, T, 1, D, INFO ) + CALL CHKXER( 'SORHR_COL', INFOT, NOUT, LERR, OK ) +* + CALL SORHR_COL( 0, 0, 1, A, 0, T, 1, D, INFO ) + CALL CHKXER( 'SORHR_COL', INFOT, NOUT, LERR, OK ) +* + CALL SORHR_COL( 2, 0, 1, A, 1, T, 1, D, INFO ) + CALL CHKXER( 'SORHR_COL', INFOT, NOUT, LERR, OK ) +* + INFOT = 7 + CALL SORHR_COL( 0, 0, 1, A, 1, T, -1, D, INFO ) + CALL CHKXER( 'SORHR_COL', INFOT, NOUT, LERR, OK ) +* + CALL SORHR_COL( 0, 0, 1, A, 1, T, 0, D, INFO ) + CALL CHKXER( 'SORHR_COL', INFOT, NOUT, LERR, OK ) +* + CALL SORHR_COL( 4, 3, 2, A, 4, T, 1, D, INFO ) + CALL CHKXER( 'SORHR_COL', INFOT, NOUT, LERR, OK ) +* +* Print a summary line. +* + CALL ALAESM( PATH, OK, NOUT ) +* + RETURN +* +* End of SERRORHR_COL +* + END diff --git a/lapack-netlib/TESTING/LIN/serrtsqr.f b/lapack-netlib/TESTING/LIN/serrtsqr.f index f00f3e14b..7f91a3c39 100644 --- a/lapack-netlib/TESTING/LIN/serrtsqr.f +++ b/lapack-netlib/TESTING/LIN/serrtsqr.f @@ -77,7 +77,7 @@ * .. * .. Local Arrays .. REAL A( NMAX, NMAX ), T( NMAX, NMAX ), W( NMAX ), - $ C( NMAX, NMAX ), TAU(NMAX) + $ C( NMAX, NMAX ), TAU(NMAX*2) * .. * .. External Subroutines .. EXTERNAL ALAESM, CHKXER, SGEQR, @@ -137,6 +137,8 @@ * TAU(1)=1 TAU(2)=1 + TAU(3)=1 + TAU(4)=1 SRNAMT = 'SGEMQR' NB=1 INFOT = 1 diff --git a/lapack-netlib/TESTING/LIN/serrvx.f b/lapack-netlib/TESTING/LIN/serrvx.f index a63ed38d7..910bff1e5 100644 --- a/lapack-netlib/TESTING/LIN/serrvx.f +++ b/lapack-netlib/TESTING/LIN/serrvx.f @@ -735,7 +735,7 @@ $ W, 1, INFO ) CALL CHKXER( 'SSYSV_AA_2STAGE', INFOT, NOUT, LERR, OK ) INFOT = 11 - CALL SSYSV_AA_2STAGE( 'U', 2, 1, A, 2, A, 2, IP, IP, B, 1, + CALL SSYSV_AA_2STAGE( 'U', 2, 1, A, 2, A, 8, IP, IP, B, 1, $ W, 1, INFO ) CALL CHKXER( 'SSYSV_AA_2STAGE', INFOT, NOUT, LERR, OK ) INFOT = 7 diff --git a/lapack-netlib/TESTING/LIN/sorhr_col01.f b/lapack-netlib/TESTING/LIN/sorhr_col01.f new file mode 100644 index 000000000..02429041b --- /dev/null +++ b/lapack-netlib/TESTING/LIN/sorhr_col01.f @@ -0,0 +1,386 @@ +*> \brief \b SORHR_COL01 +* +* =========== DOCUMENTATION =========== +* +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ +* +* Definition: +* =========== +* +* SUBROUTINE SORHR_COL01( M, N, MB1, NB1, NB2, RESULT) +* +* .. Scalar Arguments .. +* INTEGER M, N, MB1, NB1, NB2 +* .. Return values .. +* REAL RESULT(6) +* +* +*> \par Purpose: +* ============= +*> +*> \verbatim +*> +*> SORHR_COL01 tests SORHR_COL using SLATSQR, SGEMQRT and SORGTSQR. +*> Therefore, SLATSQR (part of SGEQR), SGEMQRT (part SGEMQR), SORGTSQR +*> have to be tested before this test. +*> +*> \endverbatim +* +* Arguments: +* ========== +* +*> \param[in] M +*> \verbatim +*> M is INTEGER +*> Number of rows in test matrix. +*> \endverbatim +*> \param[in] N +*> \verbatim +*> N is INTEGER +*> Number of columns in test matrix. +*> \endverbatim +*> \param[in] MB1 +*> \verbatim +*> MB1 is INTEGER +*> Number of row in row block in an input test matrix. +*> \endverbatim +*> +*> \param[in] NB1 +*> \verbatim +*> NB1 is INTEGER +*> Number of columns in column block an input test matrix. +*> \endverbatim +*> +*> \param[in] NB2 +*> \verbatim +*> NB2 is INTEGER +*> Number of columns in column block in an output test matrix. +*> \endverbatim +*> +*> \param[out] RESULT +*> \verbatim +*> RESULT is REAL array, dimension (6) +*> Results of each of the six tests below. +*> ( C is a M-by-N random matrix, D is a N-by-M random matrix ) +*> +*> RESULT(1) = | A - Q * R | / (eps * m * |A|) +*> RESULT(2) = | I - (Q**H) * Q | / (eps * m ) +*> RESULT(3) = | Q * C - Q * C | / (eps * m * |C|) +*> RESULT(4) = | (Q**H) * C - (Q**H) * C | / (eps * m * |C|) +*> RESULT(5) = | (D * Q) - D * Q | / (eps * m * |D|) +*> RESULT(6) = | D * (Q**H) - D * (Q**H) | / (eps * m * |D|) +*> \endverbatim +* +* Authors: +* ======== +* +*> \author Univ. of Tennessee +*> \author Univ. of California Berkeley +*> \author Univ. of Colorado Denver +*> \author NAG Ltd. +* +*> \date November 2019 +* +*> \ingroup single_lin +* +* ===================================================================== + SUBROUTINE SORHR_COL01( M, N, MB1, NB1, NB2, RESULT ) + IMPLICIT NONE +* +* -- LAPACK test routine (version 3.9.0) -- +* -- LAPACK is a software package provided by Univ. of Tennessee, -- +* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- +* November 2019 +* +* .. Scalar Arguments .. + INTEGER M, N, MB1, NB1, NB2 +* .. Return values .. + REAL RESULT(6) +* +* ===================================================================== +* +* .. +* .. Local allocatable arrays + REAL, ALLOCATABLE :: A(:,:), AF(:,:), Q(:,:), R(:,:), + $ RWORK(:), WORK( : ), T1(:,:), T2(:,:), DIAG(:), + $ C(:,:), CF(:,:), D(:,:), DF(:,:) +* +* .. Parameters .. + REAL ONE, ZERO + PARAMETER ( ZERO = 0.0E+0, ONE = 1.0E+0 ) +* .. +* .. Local Scalars .. + LOGICAL TESTZEROS + INTEGER INFO, I, J, K, L, LWORK, NB1_UB, NB2_UB, NRB + REAL ANORM, EPS, RESID, CNORM, DNORM +* .. +* .. Local Arrays .. + INTEGER ISEED( 4 ) + REAL WORKQUERY( 1 ) +* .. +* .. External Functions .. + REAL SLAMCH, SLANGE, SLANSY + EXTERNAL SLAMCH, SLANGE, SLANSY +* .. +* .. External Subroutines .. + EXTERNAL SLACPY, SLARNV, SLASET, SLATSQR, SORHR_COL, + $ SORGTSQR, SSCAL, SGEMM, SGEMQRT, SSYRK +* .. +* .. Intrinsic Functions .. + INTRINSIC CEILING, MAX, MIN, REAL +* .. +* .. Scalars in Common .. + CHARACTER(LEN=32) SRNAMT +* .. +* .. Common blocks .. + COMMON / SRMNAMC / SRNAMT +* .. +* .. Data statements .. + DATA ISEED / 1988, 1989, 1990, 1991 / +* +* TEST MATRICES WITH HALF OF MATRIX BEING ZEROS +* + TESTZEROS = .FALSE. +* + EPS = SLAMCH( 'Epsilon' ) + K = MIN( M, N ) + L = MAX( M, N, 1) +* +* Dynamically allocate local arrays +* + ALLOCATE ( A(M,N), AF(M,N), Q(L,L), R(M,L), RWORK(L), + $ C(M,N), CF(M,N), + $ D(N,M), DF(N,M) ) +* +* Put random numbers into A and copy to AF +* + DO J = 1, N + CALL SLARNV( 2, ISEED, M, A( 1, J ) ) + END DO + IF( TESTZEROS ) THEN + IF( M.GE.4 ) THEN + DO J = 1, N + CALL SLARNV( 2, ISEED, M/2, A( M/4, J ) ) + END DO + END IF + END IF + CALL SLACPY( 'Full', M, N, A, M, AF, M ) +* +* Number of row blocks in SLATSQR +* + NRB = MAX( 1, CEILING( REAL( M - N ) / REAL( MB1 - N ) ) ) +* + ALLOCATE ( T1( NB1, N * NRB ) ) + ALLOCATE ( T2( NB2, N ) ) + ALLOCATE ( DIAG( N ) ) +* +* Begin determine LWORK for the array WORK and allocate memory. +* +* SLATSQR requires NB1 to be bounded by N. +* + NB1_UB = MIN( NB1, N) +* +* SGEMQRT requires NB2 to be bounded by N. +* + NB2_UB = MIN( NB2, N) +* + CALL SLATSQR( M, N, MB1, NB1_UB, AF, M, T1, NB1, + $ WORKQUERY, -1, INFO ) + LWORK = INT( WORKQUERY( 1 ) ) + CALL SORGTSQR( M, N, MB1, NB1, AF, M, T1, NB1, WORKQUERY, -1, + $ INFO ) + + LWORK = MAX( LWORK, INT( WORKQUERY( 1 ) ) ) +* +* In SGEMQRT, WORK is N*NB2_UB if SIDE = 'L', +* or M*NB2_UB if SIDE = 'R'. +* + LWORK = MAX( LWORK, NB2_UB * N, NB2_UB * M ) +* + ALLOCATE ( WORK( LWORK ) ) +* +* End allocate memory for WORK. +* +* +* Begin Householder reconstruction routines +* +* Factor the matrix A in the array AF. +* + SRNAMT = 'SLATSQR' + CALL SLATSQR( M, N, MB1, NB1_UB, AF, M, T1, NB1, WORK, LWORK, + $ INFO ) +* +* Copy the factor R into the array R. +* + SRNAMT = 'SLACPY' + CALL SLACPY( 'U', N, N, AF, M, R, M ) +* +* Reconstruct the orthogonal matrix Q. +* + SRNAMT = 'SORGTSQR' + CALL SORGTSQR( M, N, MB1, NB1, AF, M, T1, NB1, WORK, LWORK, + $ INFO ) +* +* Perform the Householder reconstruction, the result is stored +* the arrays AF and T2. +* + SRNAMT = 'SORHR_COL' + CALL SORHR_COL( M, N, NB2, AF, M, T2, NB2, DIAG, INFO ) +* +* Compute the factor R_hr corresponding to the Householder +* reconstructed Q_hr and place it in the upper triangle of AF to +* match the Q storage format in DGEQRT. R_hr = R_tsqr * S, +* this means changing the sign of I-th row of the matrix R_tsqr +* according to sign of of I-th diagonal element DIAG(I) of the +* matrix S. +* + SRNAMT = 'SLACPY' + CALL SLACPY( 'U', N, N, R, M, AF, M ) +* + DO I = 1, N + IF( DIAG( I ).EQ.-ONE ) THEN + CALL SSCAL( N+1-I, -ONE, AF( I, I ), M ) + END IF + END DO +* +* End Householder reconstruction routines. +* +* +* Generate the m-by-m matrix Q +* + CALL SLASET( 'Full', M, M, ZERO, ONE, Q, M ) +* + SRNAMT = 'SGEMQRT' + CALL SGEMQRT( 'L', 'N', M, M, K, NB2_UB, AF, M, T2, NB2, Q, M, + $ WORK, INFO ) +* +* Copy R +* + CALL SLASET( 'Full', M, N, ZERO, ZERO, R, M ) +* + CALL SLACPY( 'Upper', M, N, AF, M, R, M ) +* +* TEST 1 +* Compute |R - (Q**T)*A| / ( eps * m * |A| ) and store in RESULT(1) +* + CALL SGEMM( 'T', 'N', M, N, M, -ONE, Q, M, A, M, ONE, R, M ) +* + ANORM = SLANGE( '1', M, N, A, M, RWORK ) + RESID = SLANGE( '1', M, N, R, M, RWORK ) + IF( ANORM.GT.ZERO ) THEN + RESULT( 1 ) = RESID / ( EPS * MAX( 1, M ) * ANORM ) + ELSE + RESULT( 1 ) = ZERO + END IF +* +* TEST 2 +* Compute |I - (Q**T)*Q| / ( eps * m ) and store in RESULT(2) +* + CALL SLASET( 'Full', M, M, ZERO, ONE, R, M ) + CALL SSYRK( 'U', 'T', M, M, -ONE, Q, M, ONE, R, M ) + RESID = SLANSY( '1', 'Upper', M, R, M, RWORK ) + RESULT( 2 ) = RESID / ( EPS * MAX( 1, M ) ) +* +* Generate random m-by-n matrix C +* + DO J = 1, N + CALL SLARNV( 2, ISEED, M, C( 1, J ) ) + END DO + CNORM = SLANGE( '1', M, N, C, M, RWORK ) + CALL SLACPY( 'Full', M, N, C, M, CF, M ) +* +* Apply Q to C as Q*C = CF +* + SRNAMT = 'SGEMQRT' + CALL SGEMQRT( 'L', 'N', M, N, K, NB2_UB, AF, M, T2, NB2, CF, M, + $ WORK, INFO ) +* +* TEST 3 +* Compute |CF - Q*C| / ( eps * m * |C| ) +* + CALL SGEMM( 'N', 'N', M, N, M, -ONE, Q, M, C, M, ONE, CF, M ) + RESID = SLANGE( '1', M, N, CF, M, RWORK ) + IF( CNORM.GT.ZERO ) THEN + RESULT( 3 ) = RESID / ( EPS * MAX( 1, M ) * CNORM ) + ELSE + RESULT( 3 ) = ZERO + END IF +* +* Copy C into CF again +* + CALL SLACPY( 'Full', M, N, C, M, CF, M ) +* +* Apply Q to C as (Q**T)*C = CF +* + SRNAMT = 'SGEMQRT' + CALL SGEMQRT( 'L', 'T', M, N, K, NB2_UB, AF, M, T2, NB2, CF, M, + $ WORK, INFO ) +* +* TEST 4 +* Compute |CF - (Q**T)*C| / ( eps * m * |C|) +* + CALL SGEMM( 'T', 'N', M, N, M, -ONE, Q, M, C, M, ONE, CF, M ) + RESID = SLANGE( '1', M, N, CF, M, RWORK ) + IF( CNORM.GT.ZERO ) THEN + RESULT( 4 ) = RESID / ( EPS * MAX( 1, M ) * CNORM ) + ELSE + RESULT( 4 ) = ZERO + END IF +* +* Generate random n-by-m matrix D and a copy DF +* + DO J = 1, M + CALL SLARNV( 2, ISEED, N, D( 1, J ) ) + END DO + DNORM = SLANGE( '1', N, M, D, N, RWORK ) + CALL SLACPY( 'Full', N, M, D, N, DF, N ) +* +* Apply Q to D as D*Q = DF +* + SRNAMT = 'SGEMQRT' + CALL SGEMQRT( 'R', 'N', N, M, K, NB2_UB, AF, M, T2, NB2, DF, N, + $ WORK, INFO ) +* +* TEST 5 +* Compute |DF - D*Q| / ( eps * m * |D| ) +* + CALL SGEMM( 'N', 'N', N, M, M, -ONE, D, N, Q, M, ONE, DF, N ) + RESID = SLANGE( '1', N, M, DF, N, RWORK ) + IF( DNORM.GT.ZERO ) THEN + RESULT( 5 ) = RESID / ( EPS * MAX( 1, M ) * DNORM ) + ELSE + RESULT( 5 ) = ZERO + END IF +* +* Copy D into DF again +* + CALL SLACPY( 'Full', N, M, D, N, DF, N ) +* +* Apply Q to D as D*QT = DF +* + SRNAMT = 'SGEMQRT' + CALL SGEMQRT( 'R', 'T', N, M, K, NB2_UB, AF, M, T2, NB2, DF, N, + $ WORK, INFO ) +* +* TEST 6 +* Compute |DF - D*(Q**T)| / ( eps * m * |D| ) +* + CALL SGEMM( 'N', 'T', N, M, M, -ONE, D, N, Q, M, ONE, DF, N ) + RESID = SLANGE( '1', N, M, DF, N, RWORK ) + IF( DNORM.GT.ZERO ) THEN + RESULT( 6 ) = RESID / ( EPS * MAX( 1, M ) * DNORM ) + ELSE + RESULT( 6 ) = ZERO + END IF +* +* Deallocate all arrays +* + DEALLOCATE ( A, AF, Q, R, RWORK, WORK, T1, T2, DIAG, + $ C, D, CF, DF ) +* + RETURN +* +* End of SORHR_COL01 +* + END diff --git a/lapack-netlib/TESTING/LIN/stsqr01.f b/lapack-netlib/TESTING/LIN/stsqr01.f index b661d61f4..8eb69eae7 100644 --- a/lapack-netlib/TESTING/LIN/stsqr01.f +++ b/lapack-netlib/TESTING/LIN/stsqr01.f @@ -115,7 +115,7 @@ * .. * .. Local Arrays .. INTEGER ISEED( 4 ) - REAL TQUERY( 5 ), WORKQUERY + REAL TQUERY( 5 ), WORKQUERY( 1 ) * .. * .. External Functions .. REAL SLAMCH, SLANGE, SLANSY @@ -174,22 +174,22 @@ * CALL SGEQR( M, N, AF, M, TQUERY, -1, WORKQUERY, -1, INFO ) TSIZE = INT( TQUERY( 1 ) ) - LWORK = INT( WORKQUERY ) + LWORK = INT( WORKQUERY( 1 ) ) CALL SGEMQR( 'L', 'N', M, M, K, AF, M, TQUERY, TSIZE, CF, M, $ WORKQUERY, -1, INFO) - LWORK = MAX( LWORK, INT( WORKQUERY ) ) + LWORK = MAX( LWORK, INT( WORKQUERY( 1 ) ) ) CALL SGEMQR( 'L', 'N', M, N, K, AF, M, TQUERY, TSIZE, CF, M, $ WORKQUERY, -1, INFO) - LWORK = MAX( LWORK, INT( WORKQUERY ) ) + LWORK = MAX( LWORK, INT( WORKQUERY( 1 ) ) ) CALL SGEMQR( 'L', 'T', M, N, K, AF, M, TQUERY, TSIZE, CF, M, $ WORKQUERY, -1, INFO) - LWORK = MAX( LWORK, INT( WORKQUERY ) ) + LWORK = MAX( LWORK, INT( WORKQUERY( 1 ) ) ) CALL SGEMQR( 'R', 'N', N, M, K, AF, M, TQUERY, TSIZE, DF, N, $ WORKQUERY, -1, INFO) - LWORK = MAX( LWORK, INT( WORKQUERY ) ) + LWORK = MAX( LWORK, INT( WORKQUERY( 1 ) ) ) CALL SGEMQR( 'R', 'T', N, M, K, AF, M, TQUERY, TSIZE, DF, N, $ WORKQUERY, -1, INFO) - LWORK = MAX( LWORK, INT( WORKQUERY ) ) + LWORK = MAX( LWORK, INT( WORKQUERY( 1 ) ) ) ALLOCATE ( T( TSIZE ) ) ALLOCATE ( WORK( LWORK ) ) srnamt = 'SGEQR' @@ -317,22 +317,22 @@ ELSE CALL SGELQ( M, N, AF, M, TQUERY, -1, WORKQUERY, -1, INFO ) TSIZE = INT( TQUERY( 1 ) ) - LWORK = INT( WORKQUERY ) + LWORK = INT( WORKQUERY( 1 )) CALL SGEMLQ( 'R', 'N', N, N, K, AF, M, TQUERY, TSIZE, Q, N, $ WORKQUERY, -1, INFO ) - LWORK = MAX( LWORK, INT( WORKQUERY ) ) + LWORK = MAX( LWORK, INT( WORKQUERY( 1 ) ) ) CALL SGEMLQ( 'L', 'N', N, M, K, AF, M, TQUERY, TSIZE, DF, N, $ WORKQUERY, -1, INFO) - LWORK = MAX( LWORK, INT( WORKQUERY ) ) + LWORK = MAX( LWORK, INT( WORKQUERY( 1 ) ) ) CALL SGEMLQ( 'L', 'T', N, M, K, AF, M, TQUERY, TSIZE, DF, N, $ WORKQUERY, -1, INFO) - LWORK = MAX( LWORK, INT( WORKQUERY ) ) + LWORK = MAX( LWORK, INT( WORKQUERY( 1 ) ) ) CALL SGEMLQ( 'R', 'N', M, N, K, AF, M, TQUERY, TSIZE, CF, M, $ WORKQUERY, -1, INFO) - LWORK = MAX( LWORK, INT( WORKQUERY ) ) + LWORK = MAX( LWORK, INT( WORKQUERY( 1 ) ) ) CALL SGEMLQ( 'R', 'T', M, N, K, AF, M, TQUERY, TSIZE, CF, M, $ WORKQUERY, -1, INFO) - LWORK = MAX( LWORK, INT( WORKQUERY ) ) + LWORK = MAX( LWORK, INT( WORKQUERY( 1 ) ) ) ALLOCATE ( T( TSIZE ) ) ALLOCATE ( WORK( LWORK ) ) srnamt = 'SGELQ' diff --git a/lapack-netlib/TESTING/LIN/zchkaa.f b/lapack-netlib/TESTING/LIN/zchkaa.f index d2be2525d..30d2a084a 100644 --- a/lapack-netlib/TESTING/LIN/zchkaa.f +++ b/lapack-netlib/TESTING/LIN/zchkaa.f @@ -74,6 +74,8 @@ *> ZEQ *> ZQT *> ZQX +*> ZTS +*> ZHH *> \endverbatim * * Parameters: @@ -108,17 +110,17 @@ *> \author Univ. of Colorado Denver *> \author NAG Ltd. * -*> \date November 2017 +*> \date November 2019 * *> \ingroup complex16_lin * * ===================================================================== PROGRAM ZCHKAA * -* -- LAPACK test routine (version 3.8.0) -- +* -- LAPACK test routine (version 3.9.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- -* November 2017 +* November 2019 * * ===================================================================== * @@ -166,16 +168,16 @@ * .. External Subroutines .. EXTERNAL ALAREQ, ZCHKEQ, ZCHKGB, ZCHKGE, ZCHKGT, ZCHKHE, $ ZCHKHE_ROOK, ZCHKHE_RK, ZCHKHE_AA, ZCHKHP, - $ ZCHKLQ, ZCHKPB, ZCHKPO, ZCHKPS, ZCHKPP, ZCHKPT, - $ ZCHKQ3, ZCHKQL, ZCHKQR, ZCHKRQ, ZCHKSP, ZCHKSY, - $ ZCHKSY_ROOK, ZCHKSY_RK, ZCHKSY_AA, ZCHKTB, - $ ZCHKTP, ZCHKTR, ZCHKTZ, ZDRVGB, ZDRVGE, ZDRVGT, - $ ZDRVHE, ZDRVHE_ROOK, ZDRVHE_RK, ZDRVHE_AA, - $ ZDRVHE_AA_2STAGE, ZDRVHP, ZDRVLS, ZDRVPB, - $ ZDRVPO, ZDRVPP, ZDRVPT, ZDRVSP, ZDRVSY, - $ ZDRVSY_ROOK, ZDRVSY_RK, ZDRVSY_AA, - $ ZDRVSY_AA_2STAGE, ILAVER, ZCHKQRT, ZCHKQRTP, - $ ZCHKLQT, ZCHKLQTP, ZCHKTSQR + $ ZCHKLQ, ZCHKUNHR_COL, ZCHKPB, ZCHKPO, ZCHKPS, + $ ZCHKPP, ZCHKPT, ZCHKQ3, ZCHKQL, ZCHKQR, ZCHKRQ, + $ ZCHKSP, ZCHKSY, ZCHKSY_ROOK, ZCHKSY_RK, + $ ZCHKSY_AA, ZCHKTB, ZCHKTP, ZCHKTR, ZCHKTZ, + $ ZDRVGB, ZDRVGE, ZDRVGT, ZDRVHE, ZDRVHE_ROOK, + $ ZDRVHE_RK, ZDRVHE_AA, ZDRVHE_AA_2STAGE, ZDRVHP, + $ ZDRVLS, ZDRVPB, ZDRVPO, ZDRVPP, ZDRVPT, + $ ZDRVSP, ZDRVSY, ZDRVSY_ROOK, ZDRVSY_RK, + $ ZDRVSY_AA, ZDRVSY_AA_2STAGE, ILAVER, ZCHKQRT, + $ ZCHKQRTP, ZCHKLQT, ZCHKLQTP, ZCHKTSQR * .. * .. Scalars in Common .. LOGICAL LERR, OK @@ -679,7 +681,7 @@ * * HK: Hermitian indefinite matrices, * with bounded Bunch-Kaufman (rook) pivoting algorithm, -* differnet matrix storage format than HR path version. +* different matrix storage format than HR path version. * NTYPES = 10 CALL ALAREQ( PATH, NMATS, DOTYPE, NTYPES, NIN, NOUT ) @@ -839,7 +841,7 @@ * * SK: symmetric indefinite matrices, * with bounded Bunch-Kaufman (rook) pivoting algorithm, -* differnet matrix storage format than SR path version. +* different matrix storage format than SR path version. * NTYPES = 11 CALL ALAREQ( PATH, NMATS, DOTYPE, NTYPES, NIN, NOUT ) @@ -1201,6 +1203,17 @@ ELSE WRITE( NOUT, FMT = 9989 )PATH END IF +* + ELSE IF( LSAMEN( 2, C2, 'HH' ) ) THEN +* +* HH: Householder reconstruction for tall-skinny matrices +* + IF( TSTCHK ) THEN + CALL ZCHKUNHR_COL( THRESH, TSTERR, NM, MVAL, NN, NVAL, NNB, + $ NBVAL, NOUT ) + ELSE + WRITE( NOUT, FMT = 9989 ) PATH + END IF * ELSE * diff --git a/lapack-netlib/TESTING/LIN/zchkunhr_col.f b/lapack-netlib/TESTING/LIN/zchkunhr_col.f new file mode 100644 index 000000000..ef8f8bcc4 --- /dev/null +++ b/lapack-netlib/TESTING/LIN/zchkunhr_col.f @@ -0,0 +1,239 @@ +*> \brief \b ZCHKUNHR_COL +* +* =========== DOCUMENTATION =========== +* +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ +* +* Definition: +* =========== +* +* SUBROUTINE ZCHKUNHR_COL( THRESH, TSTERR, NM, MVAL, NN, NVAL, NNB, +* NBVAL, NOUT ) +* +* .. Scalar Arguments .. +* LOGICAL TSTERR +* INTEGER NM, NN, NNB, NOUT +* DOUBLE PRECISION THRESH +* .. +* .. Array Arguments .. +* INTEGER MVAL( * ), NBVAL( * ), NVAL( * ) +* +*> \par Purpose: +* ============= +*> +*> \verbatim +*> +*> ZCHKUNHR_COL tests ZUNHR_COL using ZLATSQR and ZGEMQRT. Therefore, ZLATSQR +*> (used in ZGEQR) and ZGEMQRT (used in ZGEMQR) have to be tested +*> before this test. +*> +*> \endverbatim +* +* Arguments: +* ========== +* +*> \param[in] THRESH +*> \verbatim +*> THRESH is DOUBLE PRECISION +*> The threshold value for the test ratios. A result is +*> included in the output file if RESULT >= THRESH. To have +*> every test ratio printed, use THRESH = 0. +*> \endverbatim +*> +*> \param[in] TSTERR +*> \verbatim +*> TSTERR is LOGICAL +*> Flag that indicates whether error exits are to be tested. +*> \endverbatim +*> +*> \param[in] NM +*> \verbatim +*> NM is INTEGER +*> The number of values of M contained in the vector MVAL. +*> \endverbatim +*> +*> \param[in] MVAL +*> \verbatim +*> MVAL is INTEGER array, dimension (NM) +*> The values of the matrix row dimension M. +*> \endverbatim +*> +*> \param[in] NN +*> \verbatim +*> NN is INTEGER +*> The number of values of N contained in the vector NVAL. +*> \endverbatim +*> +*> \param[in] NVAL +*> \verbatim +*> NVAL is INTEGER array, dimension (NN) +*> The values of the matrix column dimension N. +*> \endverbatim +*> +*> \param[in] NNB +*> \verbatim +*> NNB is INTEGER +*> The number of values of NB contained in the vector NBVAL. +*> \endverbatim +*> +*> \param[in] NBVAL +*> \verbatim +*> NBVAL is INTEGER array, dimension (NBVAL) +*> The values of the blocksize NB. +*> \endverbatim +*> +*> \param[in] NOUT +*> \verbatim +*> NOUT is INTEGER +*> The unit number for output. +*> \endverbatim +* +* Authors: +* ======== +* +*> \author Univ. of Tennessee +*> \author Univ. of California Berkeley +*> \author Univ. of Colorado Denver +*> \author NAG Ltd. +* +*> \date November 2019 +* +*> \ingroup complex16_lin +* +* ===================================================================== + SUBROUTINE ZCHKUNHR_COL( THRESH, TSTERR, NM, MVAL, NN, NVAL, NNB, + $ NBVAL, NOUT ) + IMPLICIT NONE +* +* -- LAPACK test routine (version 3.7.0) -- +* -- LAPACK is a software package provided by Univ. of Tennessee, -- +* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- +* December 2016 +* +* .. Scalar Arguments .. + LOGICAL TSTERR + INTEGER NM, NN, NNB, NOUT + DOUBLE PRECISION THRESH +* .. +* .. Array Arguments .. + INTEGER MVAL( * ), NBVAL( * ), NVAL( * ) +* .. +* +* ===================================================================== +* +* .. Parameters .. + INTEGER NTESTS + PARAMETER ( NTESTS = 6 ) +* .. +* .. Local Scalars .. + CHARACTER(LEN=3) PATH + INTEGER I, IMB1, INB1, INB2, J, T, M, N, MB1, NB1, + $ NB2, NFAIL, NERRS, NRUN +* +* .. Local Arrays .. + DOUBLE PRECISION RESULT( NTESTS ) +* .. +* .. External Subroutines .. + EXTERNAL ALAHD, ALASUM, ZERRUNHR_COL, ZUNHR_COL01 +* .. +* .. Intrinsic Functions .. + INTRINSIC MAX, MIN +* .. +* .. Scalars in Common .. + LOGICAL LERR, OK + CHARACTER(LEN=32) SRNAMT + INTEGER INFOT, NUNIT +* .. +* .. Common blocks .. + COMMON / INFOC / INFOT, NUNIT, OK, LERR + COMMON / SRNAMC / SRNAMT +* .. +* .. Executable Statements .. +* +* Initialize constants +* + PATH( 1: 1 ) = 'Z' + PATH( 2: 3 ) = 'HH' + NRUN = 0 + NFAIL = 0 + NERRS = 0 +* +* Test the error exits +* + IF( TSTERR ) CALL ZERRUNHR_COL( PATH, NOUT ) + INFOT = 0 +* +* Do for each value of M in MVAL. +* + DO I = 1, NM + M = MVAL( I ) +* +* Do for each value of N in NVAL. +* + DO J = 1, NN + N = NVAL( J ) +* +* Only for M >= N +* + IF ( MIN( M, N ).GT.0 .AND. M.GE.N ) THEN +* +* Do for each possible value of MB1 +* + DO IMB1 = 1, NNB + MB1 = NBVAL( IMB1 ) +* +* Only for MB1 > N +* + IF ( MB1.GT.N ) THEN +* +* Do for each possible value of NB1 +* + DO INB1 = 1, NNB + NB1 = NBVAL( INB1 ) +* +* Do for each possible value of NB2 +* + DO INB2 = 1, NNB + NB2 = NBVAL( INB2 ) +* + IF( NB1.GT.0 .AND. NB2.GT.0 ) THEN +* +* Test ZUNHR_COL +* + CALL ZUNHR_COL01( M, N, MB1, NB1, NB2, + $ RESULT ) +* +* Print information about the tests that did +* not pass the threshold. +* + DO T = 1, NTESTS + IF( RESULT( T ).GE.THRESH ) THEN + IF( NFAIL.EQ.0 .AND. NERRS.EQ.0 ) + $ CALL ALAHD( NOUT, PATH ) + WRITE( NOUT, FMT = 9999 ) M, N, MB1, + $ NB1, NB2, T, RESULT( T ) + NFAIL = NFAIL + 1 + END IF + END DO + NRUN = NRUN + NTESTS + END IF + END DO + END DO + END IF + END DO + END IF + END DO + END DO +* +* Print a summary of the results. +* + CALL ALASUM( PATH, NOUT, NFAIL, NRUN, NERRS ) +* + 9999 FORMAT( 'M=', I5, ', N=', I5, ', MB1=', I5, + $ ', NB1=', I5, ', NB2=', I5,' test(', I2, ')=', G12.5 ) + RETURN +* +* End of ZCHKUNHR_COL +* + END \ No newline at end of file diff --git a/lapack-netlib/TESTING/LIN/zdrvhe_rk.f b/lapack-netlib/TESTING/LIN/zdrvhe_rk.f index 93c3fe61d..355260aad 100644 --- a/lapack-netlib/TESTING/LIN/zdrvhe_rk.f +++ b/lapack-netlib/TESTING/LIN/zdrvhe_rk.f @@ -98,6 +98,7 @@ *> \param[out] E *> \verbatim *> E is COMPLEX*16 array, dimension (NMAX) +*> \endverbatim *> *> \param[out] AINV *> \verbatim diff --git a/lapack-netlib/TESTING/LIN/zdrvls.f b/lapack-netlib/TESTING/LIN/zdrvls.f index 681852bc2..1313c853b 100644 --- a/lapack-netlib/TESTING/LIN/zdrvls.f +++ b/lapack-netlib/TESTING/LIN/zdrvls.f @@ -237,13 +237,13 @@ DOUBLE PRECISION EPS, NORMA, NORMB, RCOND * .. * .. Local Arrays .. - INTEGER ISEED( 4 ), ISEEDY( 4 ), IWQ - DOUBLE PRECISION RESULT( NTESTS ), RWQ - COMPLEX*16 WQ + INTEGER ISEED( 4 ), ISEEDY( 4 ), IWQ( 1 ) + DOUBLE PRECISION RESULT( NTESTS ), RWQ( 1 ) + COMPLEX*16 WQ( 1 ) * .. * .. Allocatable Arrays .. COMPLEX*16, ALLOCATABLE :: WORK (:) - DOUBLE PRECISION, ALLOCATABLE :: RWORK (:) + DOUBLE PRECISION, ALLOCATABLE :: RWORK (:), WORK2 (:) INTEGER, ALLOCATABLE :: IWORK (:) * .. * .. External Functions .. @@ -363,32 +363,32 @@ * Compute workspace needed for ZGELS CALL ZGELS( TRANS, M, N, NRHS, A, LDA, $ B, LDB, WQ, -1, INFO ) - LWORK_ZGELS = INT ( WQ ) + LWORK_ZGELS = INT ( WQ( 1 ) ) * Compute workspace needed for ZGETSLS CALL ZGETSLS( TRANS, M, N, NRHS, A, LDA, $ B, LDB, WQ, -1, INFO ) - LWORK_ZGETSLS = INT( WQ ) + LWORK_ZGETSLS = INT( WQ( 1 ) ) ENDDO END IF * Compute workspace needed for ZGELSY CALL ZGELSY( M, N, NRHS, A, LDA, B, LDB, IWQ, - $ RCOND, CRANK, WQ, -1, RWORK, INFO ) - LWORK_ZGELSY = INT( WQ ) + $ RCOND, CRANK, WQ, -1, RWQ, INFO ) + LWORK_ZGELSY = INT( WQ( 1 ) ) LRWORK_ZGELSY = 2*N * Compute workspace needed for ZGELSS CALL ZGELSS( M, N, NRHS, A, LDA, B, LDB, S, - $ RCOND, CRANK, WQ, -1 , RWORK, + $ RCOND, CRANK, WQ, -1 , RWQ, $ INFO ) - LWORK_ZGELSS = INT( WQ ) + LWORK_ZGELSS = INT( WQ( 1 ) ) LRWORK_ZGELSS = 5*MNMIN * Compute workspace needed for ZGELSD CALL ZGELSD( M, N, NRHS, A, LDA, B, LDB, S, $ RCOND, CRANK, WQ, -1, RWQ, IWQ, $ INFO ) - LWORK_ZGELSD = INT( WQ ) - LRWORK_ZGELSD = INT( RWQ ) + LWORK_ZGELSD = INT( WQ( 1 ) ) + LRWORK_ZGELSD = INT( RWQ ( 1 ) ) * Compute LIWORK workspace needed for ZGELSY and ZGELSD - LIWORK = MAX( LIWORK, N, IWQ ) + LIWORK = MAX( LIWORK, N, IWQ( 1 ) ) * Compute LRWORK workspace needed for ZGELSY, ZGELSS and ZGELSD LRWORK = MAX( LRWORK, LRWORK_ZGELSY, $ LRWORK_ZGELSS, LRWORK_ZGELSD ) @@ -406,6 +406,7 @@ LWLSY = LWORK * ALLOCATE( WORK( LWORK ) ) + ALLOCATE( WORK2( 2 * LWORK ) ) ALLOCATE( IWORK( LIWORK ) ) ALLOCATE( RWORK( LRWORK ) ) * @@ -563,7 +564,7 @@ CALL ZLARNV( 2, ISEED, NCOLS*NRHS, $ WORK ) CALL ZSCAL( NCOLS*NRHS, - $ ONE / DBLE( NCOLS ), WORK, + $ CONE / DBLE( NCOLS ), WORK, $ 1 ) END IF CALL ZGEMM( TRANS, 'No transpose', NROWS, @@ -596,7 +597,7 @@ $ CALL ZLACPY( 'Full', NROWS, NRHS, $ COPYB, LDB, C, LDB ) CALL ZQRT16( TRANS, M, N, NRHS, COPYA, - $ LDA, B, LDB, C, LDB, WORK, + $ LDA, B, LDB, C, LDB, WORK2, $ RESULT( 15 ) ) * IF( ( ITRAN.EQ.1 .AND. M.GE.N ) .OR. diff --git a/lapack-netlib/TESTING/LIN/zerrunhr_col.f b/lapack-netlib/TESTING/LIN/zerrunhr_col.f new file mode 100644 index 000000000..4fb62734d --- /dev/null +++ b/lapack-netlib/TESTING/LIN/zerrunhr_col.f @@ -0,0 +1,164 @@ +*> \brief \b ZERRUNHR_COL +* +* =========== DOCUMENTATION =========== +* +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ +* +* Definition: +* =========== +* +* SUBROUTINE ZERRUNHR_COL( PATH, NUNIT ) +* +* .. Scalar Arguments .. +* CHARACTER*3 PATH +* INTEGER NUNIT +* .. +* +* +*> \par Purpose: +* ============= +*> +*> \verbatim +*> +*> ZERRUNHR_COL tests the error exits for ZUNHR_COL that does +*> Householder reconstruction from the ouput of tall-skinny +*> factorization ZLATSQR. +*> \endverbatim +* +* Arguments: +* ========== +* +*> \param[in] PATH +*> \verbatim +*> PATH is CHARACTER*3 +*> The LAPACK path name for the routines to be tested. +*> \endverbatim +*> +*> \param[in] NUNIT +*> \verbatim +*> NUNIT is INTEGER +*> The unit number for output. +*> \endverbatim +* +* Authors: +* ======== +* +*> \author Univ. of Tennessee +*> \author Univ. of California Berkeley +*> \author Univ. of Colorado Denver +*> \author NAG Ltd. +* +*> \date November 2019 +* +*> \ingroup complex16_lin +* +* ===================================================================== + SUBROUTINE ZERRUNHR_COL( PATH, NUNIT ) + IMPLICIT NONE +* +* -- LAPACK test routine (version 3.9.0) -- +* -- LAPACK is a software package provided by Univ. of Tennessee, -- +* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- +* November 2019 +* +* .. Scalar Arguments .. + CHARACTER(LEN=3) PATH + INTEGER NUNIT +* .. +* +* ===================================================================== +* +* .. Parameters .. + INTEGER NMAX + PARAMETER ( NMAX = 2 ) +* .. +* .. Local Scalars .. + INTEGER I, INFO, J +* .. +* .. Local Arrays .. + COMPLEX*16 A( NMAX, NMAX ), T( NMAX, NMAX ), D(NMAX) +* .. +* .. External Subroutines .. + EXTERNAL ALAESM, CHKXER, ZUNHR_COL +* .. +* .. Scalars in Common .. + LOGICAL LERR, OK + CHARACTER(LEN=32) SRNAMT + INTEGER INFOT, NOUT +* .. +* .. Common blocks .. + COMMON / INFOC / INFOT, NOUT, OK, LERR + COMMON / SRNAMC / SRNAMT +* .. +* .. Intrinsic Functions .. + INTRINSIC DBLE, DCMPLX +* .. +* .. Executable Statements .. +* + NOUT = NUNIT + WRITE( NOUT, FMT = * ) +* +* Set the variables to innocuous values. +* + DO J = 1, NMAX + DO I = 1, NMAX + A( I, J ) = DCMPLX( 1.D+0 / DBLE( I+J ) ) + T( I, J ) = DCMPLX( 1.D+0 / DBLE( I+J ) ) + END DO + D( J ) = ( 0.D+0, 0.D+0 ) + END DO + OK = .TRUE. +* +* Error exits for Householder reconstruction +* +* ZUNHR_COL +* + SRNAMT = 'ZUNHR_COL' +* + INFOT = 1 + CALL ZUNHR_COL( -1, 0, 1, A, 1, T, 1, D, INFO ) + CALL CHKXER( 'ZUNHR_COL', INFOT, NOUT, LERR, OK ) +* + INFOT = 2 + CALL ZUNHR_COL( 0, -1, 1, A, 1, T, 1, D, INFO ) + CALL CHKXER( 'ZUNHR_COL', INFOT, NOUT, LERR, OK ) + CALL ZUNHR_COL( 1, 2, 1, A, 1, T, 1, D, INFO ) + CALL CHKXER( 'ZUNHR_COL', INFOT, NOUT, LERR, OK ) +* + INFOT = 3 + CALL ZUNHR_COL( 0, 0, -1, A, 1, T, 1, D, INFO ) + CALL CHKXER( 'ZUNHR_COL', INFOT, NOUT, LERR, OK ) +* + CALL ZUNHR_COL( 0, 0, 0, A, 1, T, 1, D, INFO ) + CALL CHKXER( 'ZUNHR_COL', INFOT, NOUT, LERR, OK ) +* + INFOT = 5 + CALL ZUNHR_COL( 0, 0, 1, A, -1, T, 1, D, INFO ) + CALL CHKXER( 'ZUNHR_COL', INFOT, NOUT, LERR, OK ) +* + CALL ZUNHR_COL( 0, 0, 1, A, 0, T, 1, D, INFO ) + CALL CHKXER( 'ZUNHR_COL', INFOT, NOUT, LERR, OK ) +* + CALL ZUNHR_COL( 2, 0, 1, A, 1, T, 1, D, INFO ) + CALL CHKXER( 'ZUNHR_COL', INFOT, NOUT, LERR, OK ) +* + INFOT = 7 + CALL ZUNHR_COL( 0, 0, 1, A, 1, T, -1, D, INFO ) + CALL CHKXER( 'ZUNHR_COL', INFOT, NOUT, LERR, OK ) +* + CALL ZUNHR_COL( 0, 0, 1, A, 1, T, 0, D, INFO ) + CALL CHKXER( 'ZUNHR_COL', INFOT, NOUT, LERR, OK ) +* + CALL ZUNHR_COL( 4, 3, 2, A, 4, T, 1, D, INFO ) + CALL CHKXER( 'ZUNHR_COL', INFOT, NOUT, LERR, OK ) +* +* Print a summary line. +* + CALL ALAESM( PATH, OK, NOUT ) +* + RETURN +* +* End of ZERRUNHR_COL +* + END diff --git a/lapack-netlib/TESTING/LIN/zerrvx.f b/lapack-netlib/TESTING/LIN/zerrvx.f index 29ba744ed..7759384e6 100644 --- a/lapack-netlib/TESTING/LIN/zerrvx.f +++ b/lapack-netlib/TESTING/LIN/zerrvx.f @@ -94,7 +94,7 @@ $ ZHPSV, ZHPSVX, ZPBSV, ZPBSVX, ZPOSV, ZPOSVX, $ ZPPSV, ZPPSVX, ZPTSV, ZPTSVX, ZSPSV, ZSPSVX, $ ZSYSV, ZSYSV_AA, ZSYSV_RK, ZSYSV_ROOK, - $ ZSYSVX, ZSYSV_AA_2STAGE + $ ZSYSVX, ZHESV_AA_2STAGE * .. * .. Scalars in Common .. LOGICAL LERR, OK @@ -721,7 +721,7 @@ * ELSE IF( LSAMEN( 2, C2, 'H2' ) ) THEN * -* CHESV_AASEN_2STAGE +* ZHESV_AASEN_2STAGE * SRNAMT = 'ZHESV_AA_2STAGE' INFOT = 1 @@ -741,7 +741,7 @@ $ W, 1, INFO ) CALL CHKXER( 'ZHESV_AA_2STAGE', INFOT, NOUT, LERR, OK ) INFOT = 11 - CALL ZHESV_AA_2STAGE( 'U', 2, 1, A, 2, A, 2, IP, IP, B, 1, + CALL ZHESV_AA_2STAGE( 'U', 2, 1, A, 2, A, 8, IP, IP, B, 1, $ W, 1, INFO ) CALL CHKXER( 'ZHESV_AA_2STAGE', INFOT, NOUT, LERR, OK ) INFOT = 7 @@ -749,6 +749,36 @@ $ W, 1, INFO ) CALL CHKXER( 'ZHESV_AA_2STAGE', INFOT, NOUT, LERR, OK ) * + ELSE IF( LSAMEN( 2, C2, 'S2' ) ) THEN +* +* ZSYSV_AASEN_2STAGE +* + SRNAMT = 'ZSYSV_AA_2STAGE' + INFOT = 1 + CALL ZSYSV_AA_2STAGE( '/', 0, 0, A, 1, A, 1, IP, IP, B, 1, + $ W, 1, INFO ) + CALL CHKXER( 'ZSYSV_AA_2STAGE', INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL ZSYSV_AA_2STAGE( 'U', -1, 0, A, 1, A, 1, IP, IP, B, 1, + $ W, 1, INFO ) + CALL CHKXER( 'ZSYSV_AA_2STAGE', INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL ZSYSV_AA_2STAGE( 'U', 0, -1, A, 1, A, 1, IP, IP, B, 1, + $ W, 1, INFO ) + CALL CHKXER( 'ZSYSV_AA_2STAGE', INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL ZSYSV_AA_2STAGE( 'U', 2, 1, A, 1, A, 1, IP, IP, B, 1, + $ W, 1, INFO ) + CALL CHKXER( 'ZSYSV_AA_2STAGE', INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL ZSYSV_AA_2STAGE( 'U', 2, 1, A, 2, A, 8, IP, IP, B, 1, + $ W, 1, INFO ) + CALL CHKXER( 'ZSYSV_AA_2STAGE', INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL ZSYSV_AA_2STAGE( 'U', 2, 1, A, 2, A, 1, IP, IP, B, 2, + $ W, 1, INFO ) + CALL CHKXER( 'ZSYSV_AA_2STAGE', INFOT, NOUT, LERR, OK ) +** ELSE IF( LSAMEN( 2, C2, 'HP' ) ) THEN * * ZHPSV diff --git a/lapack-netlib/TESTING/LIN/zlahilb.f b/lapack-netlib/TESTING/LIN/zlahilb.f index a6dc79b20..ba83af825 100644 --- a/lapack-netlib/TESTING/LIN/zlahilb.f +++ b/lapack-netlib/TESTING/LIN/zlahilb.f @@ -164,7 +164,7 @@ INTEGER NMAX_EXACT, NMAX_APPROX, SIZE_D PARAMETER (NMAX_EXACT = 6, NMAX_APPROX = 11, SIZE_D = 8) * -* d's are generated from random permuation of those eight elements. +* d's are generated from random permutation of those eight elements. COMPLEX*16 d1(8), d2(8), invd1(8), invd2(8) DATA D1 /(-1,0),(0,1),(-1,-1),(0,-1),(1,0),(-1,1),(1,1),(1,-1)/ DATA D2 /(-1,0),(0,-1),(-1,1),(0,1),(1,0),(-1,-1),(1,-1),(1,1)/ diff --git a/lapack-netlib/TESTING/LIN/ztsqr01.f b/lapack-netlib/TESTING/LIN/ztsqr01.f index 094473888..81d7fdb44 100644 --- a/lapack-netlib/TESTING/LIN/ztsqr01.f +++ b/lapack-netlib/TESTING/LIN/ztsqr01.f @@ -114,7 +114,7 @@ * .. * .. Local Arrays .. INTEGER ISEED( 4 ) - COMPLEX*16 TQUERY( 5 ), WORKQUERY + COMPLEX*16 TQUERY( 5 ), WORKQUERY( 1 ) * .. * .. External Functions .. DOUBLE PRECISION DLAMCH, ZLANGE, ZLANSY @@ -173,22 +173,22 @@ * CALL ZGEQR( M, N, AF, M, TQUERY, -1, WORKQUERY, -1, INFO ) TSIZE = INT( TQUERY( 1 ) ) - LWORK = INT( WORKQUERY ) + LWORK = INT( WORKQUERY( 1 ) ) CALL ZGEMQR( 'L', 'N', M, M, K, AF, M, TQUERY, TSIZE, CF, M, $ WORKQUERY, -1, INFO) - LWORK = MAX( LWORK, INT( WORKQUERY ) ) + LWORK = MAX( LWORK, INT( WORKQUERY( 1 ) ) ) CALL ZGEMQR( 'L', 'N', M, N, K, AF, M, TQUERY, TSIZE, CF, M, $ WORKQUERY, -1, INFO) - LWORK = MAX( LWORK, INT( WORKQUERY ) ) + LWORK = MAX( LWORK, INT( WORKQUERY( 1 ) ) ) CALL ZGEMQR( 'L', 'C', M, N, K, AF, M, TQUERY, TSIZE, CF, M, $ WORKQUERY, -1, INFO) - LWORK = MAX( LWORK, INT( WORKQUERY ) ) + LWORK = MAX( LWORK, INT( WORKQUERY( 1 ) ) ) CALL ZGEMQR( 'R', 'N', N, M, K, AF, M, TQUERY, TSIZE, DF, N, $ WORKQUERY, -1, INFO) - LWORK = MAX( LWORK, INT( WORKQUERY ) ) + LWORK = MAX( LWORK, INT( WORKQUERY( 1 ) ) ) CALL ZGEMQR( 'R', 'C', N, M, K, AF, M, TQUERY, TSIZE, DF, N, $ WORKQUERY, -1, INFO) - LWORK = MAX( LWORK, INT( WORKQUERY ) ) + LWORK = MAX( LWORK, INT( WORKQUERY( 1 ) ) ) ALLOCATE ( T( TSIZE ) ) ALLOCATE ( WORK( LWORK ) ) srnamt = 'ZGEQR' @@ -316,22 +316,22 @@ ELSE CALL ZGELQ( M, N, AF, M, TQUERY, -1, WORKQUERY, -1, INFO ) TSIZE = INT( TQUERY( 1 ) ) - LWORK = INT( WORKQUERY ) + LWORK = INT( WORKQUERY( 1 ) ) CALL ZGEMLQ( 'R', 'N', N, N, K, AF, M, TQUERY, TSIZE, Q, N, $ WORKQUERY, -1, INFO ) - LWORK = MAX( LWORK, INT( WORKQUERY ) ) + LWORK = MAX( LWORK, INT( WORKQUERY( 1 ) ) ) CALL ZGEMLQ( 'L', 'N', N, M, K, AF, M, TQUERY, TSIZE, DF, N, $ WORKQUERY, -1, INFO) - LWORK = MAX( LWORK, INT( WORKQUERY ) ) + LWORK = MAX( LWORK, INT( WORKQUERY( 1 ) ) ) CALL ZGEMLQ( 'L', 'C', N, M, K, AF, M, TQUERY, TSIZE, DF, N, $ WORKQUERY, -1, INFO) - LWORK = MAX( LWORK, INT( WORKQUERY ) ) + LWORK = MAX( LWORK, INT( WORKQUERY( 1 ) ) ) CALL ZGEMLQ( 'R', 'N', M, N, K, AF, M, TQUERY, TSIZE, CF, M, $ WORKQUERY, -1, INFO) - LWORK = MAX( LWORK, INT( WORKQUERY ) ) + LWORK = MAX( LWORK, INT( WORKQUERY( 1 ) ) ) CALL ZGEMLQ( 'R', 'C', M, N, K, AF, M, TQUERY, TSIZE, CF, M, $ WORKQUERY, -1, INFO) - LWORK = MAX( LWORK, INT( WORKQUERY ) ) + LWORK = MAX( LWORK, INT( WORKQUERY( 1 ) ) ) ALLOCATE ( T( TSIZE ) ) ALLOCATE ( WORK( LWORK ) ) srnamt = 'ZGELQ' diff --git a/lapack-netlib/TESTING/LIN/zunhr_col01.f b/lapack-netlib/TESTING/LIN/zunhr_col01.f new file mode 100644 index 000000000..9fb3bf352 --- /dev/null +++ b/lapack-netlib/TESTING/LIN/zunhr_col01.f @@ -0,0 +1,390 @@ +*> \brief \b ZUNHR_COL01 +* +* =========== DOCUMENTATION =========== +* +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ +* +* Definition: +* =========== +* +* SUBROUTINE ZUNHR_COL01( M, N, MB1, NB1, NB2, RESULT ) +* +* .. Scalar Arguments .. +* INTEGER M, N, MB1, NB1, NB2 +* .. Return values .. +* DOUBLE PRECISION RESULT(6) +* +* +*> \par Purpose: +* ============= +*> +*> \verbatim +*> +*> ZUNHR_COL01 tests ZUNHR_COL using ZLATSQR, ZGEMQRT and ZUNGTSQR. +*> Therefore, ZLATSQR (part of ZGEQR), ZGEMQRT (part ZGEMQR), ZUNGTSQR +*> have to be tested before this test. +*> +*> \endverbatim +* +* Arguments: +* ========== +* +*> \param[in] M +*> \verbatim +*> M is INTEGER +*> Number of rows in test matrix. +*> \endverbatim +*> \param[in] N +*> \verbatim +*> N is INTEGER +*> Number of columns in test matrix. +*> \endverbatim +*> \param[in] MB1 +*> \verbatim +*> MB1 is INTEGER +*> Number of row in row block in an input test matrix. +*> \endverbatim +*> +*> \param[in] NB1 +*> \verbatim +*> NB1 is INTEGER +*> Number of columns in column block an input test matrix. +*> \endverbatim +*> +*> \param[in] NB2 +*> \verbatim +*> NB2 is INTEGER +*> Number of columns in column block in an output test matrix. +*> \endverbatim +*> +*> \param[out] RESULT +*> \verbatim +*> RESULT is DOUBLE PRECISION array, dimension (6) +*> Results of each of the six tests below. +*> ( C is a M-by-N random matrix, D is a N-by-M random matrix ) +*> +*> RESULT(1) = | A - Q * R | / (eps * m * |A|) +*> RESULT(2) = | I - (Q**H) * Q | / (eps * m ) +*> RESULT(3) = | Q * C - Q * C | / (eps * m * |C|) +*> RESULT(4) = | (Q**H) * C - (Q**H) * C | / (eps * m * |C|) +*> RESULT(5) = | (D * Q) - D * Q | / (eps * m * |D|) +*> RESULT(6) = | D * (Q**H) - D * (Q**H) | / (eps * m * |D|) +*> \endverbatim +* +* Authors: +* ======== +* +*> \author Univ. of Tennessee +*> \author Univ. of California Berkeley +*> \author Univ. of Colorado Denver +*> \author NAG Ltd. +* +*> \date November 2019 +* +*> \ingroup complex16_lin +* +* ===================================================================== + SUBROUTINE ZUNHR_COL01( M, N, MB1, NB1, NB2, RESULT ) + IMPLICIT NONE +* +* -- LAPACK test routine (version 3.9.0) -- +* -- LAPACK is a software package provided by Univ. of Tennessee, -- +* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- +* November 2019 +* +* .. Scalar Arguments .. + INTEGER M, N, MB1, NB1, NB2 +* .. Return values .. + DOUBLE PRECISION RESULT(6) +* +* ===================================================================== +* +* .. +* .. Local allocatable arrays + COMPLEX*16, ALLOCATABLE :: A(:,:), AF(:,:), Q(:,:), R(:,:), + $ WORK( : ), T1(:,:), T2(:,:), DIAG(:), + $ C(:,:), CF(:,:), D(:,:), DF(:,:) + DOUBLE PRECISION, ALLOCATABLE :: RWORK(:) +* +* .. Parameters .. + DOUBLE PRECISION ZERO + PARAMETER ( ZERO = 0.0D+0 ) + COMPLEX*16 CONE, CZERO + PARAMETER ( CONE = ( 1.0D+0, 0.0D+0 ), + $ CZERO = ( 0.0D+0, 0.0D+0 ) ) +* .. +* .. Local Scalars .. + LOGICAL TESTZEROS + INTEGER INFO, I, J, K, L, LWORK, NB1_UB, NB2_UB, NRB + DOUBLE PRECISION ANORM, EPS, RESID, CNORM, DNORM +* .. +* .. Local Arrays .. + INTEGER ISEED( 4 ) + COMPLEX*16 WORKQUERY( 1 ) +* .. +* .. External Functions .. + DOUBLE PRECISION DLAMCH, ZLANGE, ZLANSY + EXTERNAL DLAMCH, ZLANGE, ZLANSY +* .. +* .. External Subroutines .. + EXTERNAL ZLACPY, ZLARNV, ZLASET, ZLATSQR, ZUNHR_COL, + $ ZUNGTSQR, ZSCAL, ZGEMM, ZGEMQRT, ZHERK +* .. +* .. Intrinsic Functions .. + INTRINSIC CEILING, DBLE, MAX, MIN +* .. +* .. Scalars in Common .. + CHARACTER(LEN=32) SRNAMT +* .. +* .. Common blocks .. + COMMON / SRMNAMC / SRNAMT +* .. +* .. Data statements .. + DATA ISEED / 1988, 1989, 1990, 1991 / +* +* TEST MATRICES WITH HALF OF MATRIX BEING ZEROS +* + TESTZEROS = .FALSE. +* + EPS = DLAMCH( 'Epsilon' ) + K = MIN( M, N ) + L = MAX( M, N, 1) +* +* Dynamically allocate local arrays +* + ALLOCATE ( A(M,N), AF(M,N), Q(L,L), R(M,L), RWORK(L), + $ C(M,N), CF(M,N), + $ D(N,M), DF(N,M) ) +* +* Put random numbers into A and copy to AF +* + DO J = 1, N + CALL ZLARNV( 2, ISEED, M, A( 1, J ) ) + END DO + IF( TESTZEROS ) THEN + IF( M.GE.4 ) THEN + DO J = 1, N + CALL ZLARNV( 2, ISEED, M/2, A( M/4, J ) ) + END DO + END IF + END IF + CALL ZLACPY( 'Full', M, N, A, M, AF, M ) +* +* Number of row blocks in ZLATSQR +* + NRB = MAX( 1, CEILING( DBLE( M - N ) / DBLE( MB1 - N ) ) ) +* + ALLOCATE ( T1( NB1, N * NRB ) ) + ALLOCATE ( T2( NB2, N ) ) + ALLOCATE ( DIAG( N ) ) +* +* Begin determine LWORK for the array WORK and allocate memory. +* +* ZLATSQR requires NB1 to be bounded by N. +* + NB1_UB = MIN( NB1, N) +* +* ZGEMQRT requires NB2 to be bounded by N. +* + NB2_UB = MIN( NB2, N) +* + CALL ZLATSQR( M, N, MB1, NB1_UB, AF, M, T1, NB1, + $ WORKQUERY, -1, INFO ) + LWORK = INT( WORKQUERY( 1 ) ) + CALL ZUNGTSQR( M, N, MB1, NB1, AF, M, T1, NB1, WORKQUERY, -1, + $ INFO ) + + LWORK = MAX( LWORK, INT( WORKQUERY( 1 ) ) ) +* +* In ZGEMQRT, WORK is N*NB2_UB if SIDE = 'L', +* or M*NB2_UB if SIDE = 'R'. +* + LWORK = MAX( LWORK, NB2_UB * N, NB2_UB * M ) +* + ALLOCATE ( WORK( LWORK ) ) +* +* End allocate memory for WORK. +* +* +* Begin Householder reconstruction routines +* +* Factor the matrix A in the array AF. +* + SRNAMT = 'ZLATSQR' + CALL ZLATSQR( M, N, MB1, NB1_UB, AF, M, T1, NB1, WORK, LWORK, + $ INFO ) +* +* Copy the factor R into the array R. +* + SRNAMT = 'ZLACPY' + CALL ZLACPY( 'U', M, N, AF, M, R, M ) +* +* Reconstruct the orthogonal matrix Q. +* + SRNAMT = 'ZUNGTSQR' + CALL ZUNGTSQR( M, N, MB1, NB1, AF, M, T1, NB1, WORK, LWORK, + $ INFO ) +* +* Perform the Householder reconstruction, the result is stored +* the arrays AF and T2. +* + SRNAMT = 'ZUNHR_COL' + CALL ZUNHR_COL( M, N, NB2, AF, M, T2, NB2, DIAG, INFO ) +* +* Compute the factor R_hr corresponding to the Householder +* reconstructed Q_hr and place it in the upper triangle of AF to +* match the Q storage format in ZGEQRT. R_hr = R_tsqr * S, +* this means changing the sign of I-th row of the matrix R_tsqr +* according to sign of of I-th diagonal element DIAG(I) of the +* matrix S. +* + SRNAMT = 'ZLACPY' + CALL ZLACPY( 'U', M, N, R, M, AF, M ) +* + DO I = 1, N + IF( DIAG( I ).EQ.-CONE ) THEN + CALL ZSCAL( N+1-I, -CONE, AF( I, I ), M ) + END IF + END DO +* +* End Householder reconstruction routines. +* +* +* Generate the m-by-m matrix Q +* + CALL ZLASET( 'Full', M, M, CZERO, CONE, Q, M ) +* + SRNAMT = 'ZGEMQRT' + CALL ZGEMQRT( 'L', 'N', M, M, K, NB2_UB, AF, M, T2, NB2, Q, M, + $ WORK, INFO ) +* +* Copy R +* + CALL ZLASET( 'Full', M, N, CZERO, CZERO, R, M ) +* + CALL ZLACPY( 'Upper', M, N, AF, M, R, M ) +* +* TEST 1 +* Compute |R - (Q**H)*A| / ( eps * m * |A| ) and store in RESULT(1) +* + CALL ZGEMM( 'C', 'N', M, N, M, -CONE, Q, M, A, M, CONE, R, M ) +* + ANORM = ZLANGE( '1', M, N, A, M, RWORK ) + RESID = ZLANGE( '1', M, N, R, M, RWORK ) + IF( ANORM.GT.ZERO ) THEN + RESULT( 1 ) = RESID / ( EPS * MAX( 1, M ) * ANORM ) + ELSE + RESULT( 1 ) = ZERO + END IF +* +* TEST 2 +* Compute |I - (Q**H)*Q| / ( eps * m ) and store in RESULT(2) +* + CALL ZLASET( 'Full', M, M, CZERO, CONE, R, M ) + CALL ZHERK( 'U', 'C', M, M, -CONE, Q, M, CONE, R, M ) + RESID = ZLANSY( '1', 'Upper', M, R, M, RWORK ) + RESULT( 2 ) = RESID / ( EPS * MAX( 1, M ) ) +* +* Generate random m-by-n matrix C +* + DO J = 1, N + CALL ZLARNV( 2, ISEED, M, C( 1, J ) ) + END DO + CNORM = ZLANGE( '1', M, N, C, M, RWORK ) + CALL ZLACPY( 'Full', M, N, C, M, CF, M ) +* +* Apply Q to C as Q*C = CF +* + SRNAMT = 'ZGEMQRT' + CALL ZGEMQRT( 'L', 'N', M, N, K, NB2_UB, AF, M, T2, NB2, CF, M, + $ WORK, INFO ) +* +* TEST 3 +* Compute |CF - Q*C| / ( eps * m * |C| ) +* + CALL ZGEMM( 'N', 'N', M, N, M, -CONE, Q, M, C, M, CONE, CF, M ) + RESID = ZLANGE( '1', M, N, CF, M, RWORK ) + IF( CNORM.GT.ZERO ) THEN + RESULT( 3 ) = RESID / ( EPS * MAX( 1, M ) * CNORM ) + ELSE + RESULT( 3 ) = ZERO + END IF +* +* Copy C into CF again +* + CALL ZLACPY( 'Full', M, N, C, M, CF, M ) +* +* Apply Q to C as (Q**H)*C = CF +* + SRNAMT = 'ZGEMQRT' + CALL ZGEMQRT( 'L', 'C', M, N, K, NB2_UB, AF, M, T2, NB2, CF, M, + $ WORK, INFO ) +* +* TEST 4 +* Compute |CF - (Q**H)*C| / ( eps * m * |C|) +* + CALL ZGEMM( 'C', 'N', M, N, M, -CONE, Q, M, C, M, CONE, CF, M ) + RESID = ZLANGE( '1', M, N, CF, M, RWORK ) + IF( CNORM.GT.ZERO ) THEN + RESULT( 4 ) = RESID / ( EPS * MAX( 1, M ) * CNORM ) + ELSE + RESULT( 4 ) = ZERO + END IF +* +* Generate random n-by-m matrix D and a copy DF +* + DO J = 1, M + CALL ZLARNV( 2, ISEED, N, D( 1, J ) ) + END DO + DNORM = ZLANGE( '1', N, M, D, N, RWORK ) + CALL ZLACPY( 'Full', N, M, D, N, DF, N ) +* +* Apply Q to D as D*Q = DF +* + SRNAMT = 'ZGEMQRT' + CALL ZGEMQRT( 'R', 'N', N, M, K, NB2_UB, AF, M, T2, NB2, DF, N, + $ WORK, INFO ) +* +* TEST 5 +* Compute |DF - D*Q| / ( eps * m * |D| ) +* + CALL ZGEMM( 'N', 'N', N, M, M, -CONE, D, N, Q, M, CONE, DF, N ) + RESID = ZLANGE( '1', N, M, DF, N, RWORK ) + IF( DNORM.GT.ZERO ) THEN + RESULT( 5 ) = RESID / ( EPS * MAX( 1, M ) * DNORM ) + ELSE + RESULT( 5 ) = ZERO + END IF +* +* Copy D into DF again +* + CALL ZLACPY( 'Full', N, M, D, N, DF, N ) +* +* Apply Q to D as D*QT = DF +* + SRNAMT = 'ZGEMQRT' + CALL ZGEMQRT( 'R', 'C', N, M, K, NB2_UB, AF, M, T2, NB2, DF, N, + $ WORK, INFO ) +* +* TEST 6 +* Compute |DF - D*(Q**H)| / ( eps * m * |D| ) +* + CALL ZGEMM( 'N', 'C', N, M, M, -CONE, D, N, Q, M, CONE, DF, N ) + RESID = ZLANGE( '1', N, M, DF, N, RWORK ) + IF( DNORM.GT.ZERO ) THEN + RESULT( 6 ) = RESID / ( EPS * MAX( 1, M ) * DNORM ) + ELSE + RESULT( 6 ) = ZERO + END IF +* +* Deallocate all arrays +* + DEALLOCATE ( A, AF, Q, R, RWORK, WORK, T1, T2, DIAG, + $ C, D, CF, DF ) +* + RETURN +* +* End of ZUNHR_COL01 +* + END diff --git a/lapack-netlib/TESTING/MATGEN/Makefile b/lapack-netlib/TESTING/MATGEN/Makefile index a1d784fa5..e21ebd6c3 100644 --- a/lapack-netlib/TESTING/MATGEN/Makefile +++ b/lapack-netlib/TESTING/MATGEN/Makefile @@ -1,5 +1,3 @@ -include ../../make.inc - ####################################################################### # This is the makefile to create a library of the test matrix # generators used in LAPACK. The files are organized as follows: @@ -32,52 +30,67 @@ include ../../make.inc # ####################################################################### +TOPSRCDIR = ../.. +include $(TOPSRCDIR)/make.inc + +ifneq "$(or $(BUILD_SINGLE),$(BUILD_COMPLEX))" "" SCATGEN = slatm1.o slatm7.o slaran.o slarnd.o +endif +ifeq ($(BUILD_SINGLE),1) SMATGEN = slatms.o slatme.o slatmr.o slatmt.o \ slagge.o slagsy.o slakf2.o slarge.o slaror.o slarot.o slatm2.o \ slatm3.o slatm5.o slatm6.o slahilb.o +endif +ifeq ($(BUILD_COMPLEX),1) CMATGEN = clatms.o clatme.o clatmr.o clatmt.o \ clagge.o claghe.o clagsy.o clakf2.o clarge.o claror.o clarot.o \ clatm1.o clarnd.o clatm2.o clatm3.o clatm5.o clatm6.o clahilb.o +endif +ifneq "$(or $(BUILD_DOUBLE),$(BUILD_COMPLEX16))" "" DZATGEN = dlatm1.o dlatm7.o dlaran.o dlarnd.o +endif +ifeq ($(BUILD_DOUBLE),1) DMATGEN = dlatms.o dlatme.o dlatmr.o dlatmt.o \ dlagge.o dlagsy.o dlakf2.o dlarge.o dlaror.o dlarot.o dlatm2.o \ dlatm3.o dlatm5.o dlatm6.o dlahilb.o +endif +ifeq ($(BUILD_COMPLEX16),1) ZMATGEN = zlatms.o zlatme.o zlatmr.o zlatmt.o \ zlagge.o zlaghe.o zlagsy.o zlakf2.o zlarge.o zlaror.o zlarot.o \ zlatm1.o zlarnd.o zlatm2.o zlatm3.o zlatm5.o zlatm6.o zlahilb.o +endif -all: ../../$(TMGLIB) +.PHONY: all +all: $(TMGLIB) ALLOBJ = $(SMATGEN) $(CMATGEN) $(SCATGEN) $(DMATGEN) $(ZMATGEN) \ $(DZATGEN) -.PHONY: ../../$(TMGLIB) - -../../$(TMGLIB): $(ALLOBJ) - $(ARCH) $(ARCHFLAGS) $@ $^ +$(TMGLIB): $(ALLOBJ) + $(AR) $(ARFLAGS) $@ $^ $(RANLIB) $@ +.PHONY: single complex double complex16 single: $(SMATGEN) $(SCATGEN) - $(ARCH) $(ARCHFLAGS) ../../$(TMGLIB) $^ - $(RANLIB) ../../$(TMGLIB) + $(AR) $(ARFLAGS) $(TMGLIB) $^ + $(RANLIB) $(TMGLIB) complex: $(CMATGEN) $(SCATGEN) - $(ARCH) $(ARCHFLAGS) ../../$(TMGLIB) $^ - $(RANLIB) ../../$(TMGLIB) + $(AR) $(ARFLAGS) $(TMGLIB) $^ + $(RANLIB) $(TMGLIB) double: $(DMATGEN) $(DZATGEN) - $(ARCH) $(ARCHFLAGS) ../../$(TMGLIB) $^ - $(RANLIB) ../../$(TMGLIB) + $(AR) $(ARFLAGS) $(TMGLIB) $^ + $(RANLIB) $(TMGLIB) complex16: $(ZMATGEN) $(DZATGEN) - $(ARCH) $(ARCHFLAGS) ../../$(TMGLIB) $^ - $(RANLIB) ../../$(TMGLIB) + $(AR) $(ARFLAGS) $(TMGLIB) $^ + $(RANLIB) $(TMGLIB) $(SCATGEN): $(FRC) $(SMATGEN): $(FRC) @@ -89,14 +102,16 @@ $(ZMATGEN): $(FRC) FRC: @FRC=$(FRC) -clean: cleanobj #cleanlib +.PHONY: clean cleanobj cleanlib +clean: cleanobj cleanlib cleanobj: rm -f *.o cleanlib: - rm -f ../../$(TMGLIB) - -.f.o: - $(FORTRAN) $(OPTS) -c -o $@ $< - -slaran.o: slaran.f ; $(FORTRAN) $(NOOPT) -c -o $@ $< -dlaran.o: dlaran.f ; $(FORTRAN) $(NOOPT) -c -o $@ $< + rm -f $(TMGLIB) + +ifeq ($(filter $(BUILD_SINGLE) $(BUILD_COMPLEX),1),) +slaran.o: slaran.f ; $(FC) $(FFLAGS_NOOPT) -c -o $@ $< +endif +ifeq ($(filter $(BUILD_DOUBLE) $(BUILD_COMPLEX16),1),) +dlaran.o: dlaran.f ; $(FC) $(FFLAGS_NOOPT) -c -o $@ $< +endif diff --git a/lapack-netlib/TESTING/MATGEN/clahilb.f b/lapack-netlib/TESTING/MATGEN/clahilb.f index 13902872c..f4481fc78 100644 --- a/lapack-netlib/TESTING/MATGEN/clahilb.f +++ b/lapack-netlib/TESTING/MATGEN/clahilb.f @@ -164,7 +164,7 @@ INTEGER NMAX_EXACT, NMAX_APPROX, SIZE_D PARAMETER (NMAX_EXACT = 6, NMAX_APPROX = 11, SIZE_D = 8) * -* d's are generated from random permuation of those eight elements. +* d's are generated from random permutation of those eight elements. COMPLEX D1(8), D2(8), INVD1(8), INVD2(8) DATA D1 /(-1,0),(0,1),(-1,-1),(0,-1),(1,0),(-1,1),(1,1),(1,-1)/ DATA D2 /(-1,0),(0,-1),(-1,1),(0,1),(1,0),(-1,-1),(1,-1),(1,1)/ diff --git a/lapack-netlib/TESTING/MATGEN/clatm2.f b/lapack-netlib/TESTING/MATGEN/clatm2.f index 01221e0cc..5bd6b9dc8 100644 --- a/lapack-netlib/TESTING/MATGEN/clatm2.f +++ b/lapack-netlib/TESTING/MATGEN/clatm2.f @@ -186,7 +186,7 @@ *> SPARSE is REAL *> Value between 0. and 1. *> On entry specifies the sparsity of the matrix -*> if sparse matix is to be generated. +*> if sparse matrix is to be generated. *> SPARSE should lie between 0 and 1. *> A uniform ( 0, 1 ) random number x is generated and *> compared to SPARSE; if x is larger the matrix entry diff --git a/lapack-netlib/TESTING/MATGEN/clatm3.f b/lapack-netlib/TESTING/MATGEN/clatm3.f index 3e07f3ec0..42b453553 100644 --- a/lapack-netlib/TESTING/MATGEN/clatm3.f +++ b/lapack-netlib/TESTING/MATGEN/clatm3.f @@ -202,7 +202,7 @@ *> \verbatim *> SPARSE is REAL between 0. and 1. *> On entry specifies the sparsity of the matrix -*> if sparse matix is to be generated. +*> if sparse matrix is to be generated. *> SPARSE should lie between 0 and 1. *> A uniform ( 0, 1 ) random number x is generated and *> compared to SPARSE; if x is larger the matrix entry diff --git a/lapack-netlib/TESTING/MATGEN/clatmr.f b/lapack-netlib/TESTING/MATGEN/clatmr.f index 11d29a3d0..e80c4a514 100644 --- a/lapack-netlib/TESTING/MATGEN/clatmr.f +++ b/lapack-netlib/TESTING/MATGEN/clatmr.f @@ -316,20 +316,6 @@ *> Not referenced if PIVTNG = 'N'. Not modified. *> \endverbatim *> -*> \param[in] SPARSE -*> \verbatim -*> SPARSE is REAL -*> On entry specifies the sparsity of the matrix if a sparse -*> matrix is to be generated. SPARSE should lie between -*> 0 and 1. To generate a sparse matrix, for each matrix entry -*> a uniform ( 0, 1 ) random number x is generated and -*> compared to SPARSE; if x is larger the matrix entry -*> is unchanged and if x is smaller the entry is set -*> to zero. Thus on the average a fraction SPARSE of the -*> entries will be set to zero. -*> Not modified. -*> \endverbatim -*> *> \param[in] KL *> \verbatim *> KL is INTEGER @@ -350,6 +336,20 @@ *> Not modified. *> \endverbatim *> +*> \param[in] SPARSE +*> \verbatim +*> SPARSE is REAL +*> On entry specifies the sparsity of the matrix if a sparse +*> matrix is to be generated. SPARSE should lie between +*> 0 and 1. To generate a sparse matrix, for each matrix entry +*> a uniform ( 0, 1 ) random number x is generated and +*> compared to SPARSE; if x is larger the matrix entry +*> is unchanged and if x is smaller the entry is set +*> to zero. Thus on the average a fraction SPARSE of the +*> entries will be set to zero. +*> Not modified. +*> \endverbatim +*> *> \param[in] ANORM *> \verbatim *> ANORM is REAL @@ -416,7 +416,7 @@ *> If PACK='C' or 'R', LDA must be at least 1. *> If PACK='B', or 'Q', LDA must be MIN ( KU+1, N ) *> If PACK='Z', LDA must be at least KUU+KLL+1, where -*> KUU = MIN ( KU, N-1 ) and KLL = MIN ( KL, N-1 ) +*> KUU = MIN ( KU, N-1 ) and KLL = MIN ( KL, M-1 ) *> Not modified. *> \endverbatim *> diff --git a/lapack-netlib/TESTING/MATGEN/dlatm2.f b/lapack-netlib/TESTING/MATGEN/dlatm2.f index 446f5a801..d7a6d19f3 100644 --- a/lapack-netlib/TESTING/MATGEN/dlatm2.f +++ b/lapack-netlib/TESTING/MATGEN/dlatm2.f @@ -182,7 +182,7 @@ *> \verbatim *> SPARSE is DOUBLE PRECISION between 0. and 1. *> On entry specifies the sparsity of the matrix -*> if sparse matix is to be generated. +*> if sparse matrix is to be generated. *> SPARSE should lie between 0 and 1. *> A uniform ( 0, 1 ) random number x is generated and *> compared to SPARSE; if x is larger the matrix entry diff --git a/lapack-netlib/TESTING/MATGEN/dlatm3.f b/lapack-netlib/TESTING/MATGEN/dlatm3.f index cf6da10f8..15f5ac080 100644 --- a/lapack-netlib/TESTING/MATGEN/dlatm3.f +++ b/lapack-netlib/TESTING/MATGEN/dlatm3.f @@ -199,7 +199,7 @@ *> \verbatim *> SPARSE is DOUBLE PRECISION between 0. and 1. *> On entry specifies the sparsity of the matrix -*> if sparse matix is to be generated. +*> if sparse matrix is to be generated. *> SPARSE should lie between 0 and 1. *> A uniform ( 0, 1 ) random number x is generated and *> compared to SPARSE; if x is larger the matrix entry diff --git a/lapack-netlib/TESTING/MATGEN/dlatmr.f b/lapack-netlib/TESTING/MATGEN/dlatmr.f index e7ea41907..a914481f7 100644 --- a/lapack-netlib/TESTING/MATGEN/dlatmr.f +++ b/lapack-netlib/TESTING/MATGEN/dlatmr.f @@ -303,20 +303,6 @@ *> Not referenced if PIVTNG = 'N'. Not modified. *> \endverbatim *> -*> \param[in] SPARSE -*> \verbatim -*> SPARSE is DOUBLE PRECISION -*> On entry specifies the sparsity of the matrix if a sparse -*> matrix is to be generated. SPARSE should lie between -*> 0 and 1. To generate a sparse matrix, for each matrix entry -*> a uniform ( 0, 1 ) random number x is generated and -*> compared to SPARSE; if x is larger the matrix entry -*> is unchanged and if x is smaller the entry is set -*> to zero. Thus on the average a fraction SPARSE of the -*> entries will be set to zero. -*> Not modified. -*> \endverbatim -*> *> \param[in] KL *> \verbatim *> KL is INTEGER @@ -337,6 +323,20 @@ *> Not modified. *> \endverbatim *> +*> \param[in] SPARSE +*> \verbatim +*> SPARSE is DOUBLE PRECISION +*> On entry specifies the sparsity of the matrix if a sparse +*> matrix is to be generated. SPARSE should lie between +*> 0 and 1. To generate a sparse matrix, for each matrix entry +*> a uniform ( 0, 1 ) random number x is generated and +*> compared to SPARSE; if x is larger the matrix entry +*> is unchanged and if x is smaller the entry is set +*> to zero. Thus on the average a fraction SPARSE of the +*> entries will be set to zero. +*> Not modified. +*> \endverbatim +*> *> \param[in] ANORM *> \verbatim *> ANORM is DOUBLE PRECISION @@ -398,7 +398,7 @@ *> If PACK='C' or 'R', LDA must be at least 1. *> If PACK='B', or 'Q', LDA must be MIN ( KU+1, N ) *> If PACK='Z', LDA must be at least KUU+KLL+1, where -*> KUU = MIN ( KU, N-1 ) and KLL = MIN ( KL, N-1 ) +*> KUU = MIN ( KU, N-1 ) and KLL = MIN ( KL, M-1 ) *> Not modified. *> \endverbatim *> diff --git a/lapack-netlib/TESTING/MATGEN/slatm2.f b/lapack-netlib/TESTING/MATGEN/slatm2.f index fc7e78126..2473f1f44 100644 --- a/lapack-netlib/TESTING/MATGEN/slatm2.f +++ b/lapack-netlib/TESTING/MATGEN/slatm2.f @@ -182,7 +182,7 @@ *> \verbatim *> SPARSE is REAL between 0. and 1. *> On entry specifies the sparsity of the matrix -*> if sparse matix is to be generated. +*> if sparse matrix is to be generated. *> SPARSE should lie between 0 and 1. *> A uniform ( 0, 1 ) random number x is generated and *> compared to SPARSE; if x is larger the matrix entry diff --git a/lapack-netlib/TESTING/MATGEN/slatm3.f b/lapack-netlib/TESTING/MATGEN/slatm3.f index e61c954bd..18c2c07d5 100644 --- a/lapack-netlib/TESTING/MATGEN/slatm3.f +++ b/lapack-netlib/TESTING/MATGEN/slatm3.f @@ -199,7 +199,7 @@ *> \verbatim *> SPARSE is REAL between 0. and 1. *> On entry specifies the sparsity of the matrix -*> if sparse matix is to be generated. +*> if sparse matrix is to be generated. *> SPARSE should lie between 0 and 1. *> A uniform ( 0, 1 ) random number x is generated and *> compared to SPARSE; if x is larger the matrix entry diff --git a/lapack-netlib/TESTING/MATGEN/slatmr.f b/lapack-netlib/TESTING/MATGEN/slatmr.f index e4705994a..c2cedd21c 100644 --- a/lapack-netlib/TESTING/MATGEN/slatmr.f +++ b/lapack-netlib/TESTING/MATGEN/slatmr.f @@ -303,20 +303,6 @@ *> Not referenced if PIVTNG = 'N'. Not modified. *> \endverbatim *> -*> \param[in] SPARSE -*> \verbatim -*> SPARSE is REAL -*> On entry specifies the sparsity of the matrix if a sparse -*> matrix is to be generated. SPARSE should lie between -*> 0 and 1. To generate a sparse matrix, for each matrix entry -*> a uniform ( 0, 1 ) random number x is generated and -*> compared to SPARSE; if x is larger the matrix entry -*> is unchanged and if x is smaller the entry is set -*> to zero. Thus on the average a fraction SPARSE of the -*> entries will be set to zero. -*> Not modified. -*> \endverbatim -*> *> \param[in] KL *> \verbatim *> KL is INTEGER @@ -337,6 +323,20 @@ *> Not modified. *> \endverbatim *> +*> \param[in] SPARSE +*> \verbatim +*> SPARSE is REAL +*> On entry specifies the sparsity of the matrix if a sparse +*> matrix is to be generated. SPARSE should lie between +*> 0 and 1. To generate a sparse matrix, for each matrix entry +*> a uniform ( 0, 1 ) random number x is generated and +*> compared to SPARSE; if x is larger the matrix entry +*> is unchanged and if x is smaller the entry is set +*> to zero. Thus on the average a fraction SPARSE of the +*> entries will be set to zero. +*> Not modified. +*> \endverbatim +*> *> \param[in] ANORM *> \verbatim *> ANORM is REAL @@ -398,7 +398,7 @@ *> If PACK='C' or 'R', LDA must be at least 1. *> If PACK='B', or 'Q', LDA must be MIN ( KU+1, N ) *> If PACK='Z', LDA must be at least KUU+KLL+1, where -*> KUU = MIN ( KU, N-1 ) and KLL = MIN ( KL, N-1 ) +*> KUU = MIN ( KU, N-1 ) and KLL = MIN ( KL, M-1 ) *> Not modified. *> \endverbatim *> diff --git a/lapack-netlib/TESTING/MATGEN/zlahilb.f b/lapack-netlib/TESTING/MATGEN/zlahilb.f index 43057931d..e5a317821 100644 --- a/lapack-netlib/TESTING/MATGEN/zlahilb.f +++ b/lapack-netlib/TESTING/MATGEN/zlahilb.f @@ -164,7 +164,7 @@ INTEGER NMAX_EXACT, NMAX_APPROX, SIZE_D PARAMETER (NMAX_EXACT = 6, NMAX_APPROX = 11, SIZE_D = 8) * -* d's are generated from random permuation of those eight elements. +* d's are generated from random permutation of those eight elements. COMPLEX*16 d1(8), d2(8), invd1(8), invd2(8) DATA D1 /(-1,0),(0,1),(-1,-1),(0,-1),(1,0),(-1,1),(1,1),(1,-1)/ DATA D2 /(-1,0),(0,-1),(-1,1),(0,1),(1,0),(-1,-1),(1,-1),(1,1)/ diff --git a/lapack-netlib/TESTING/MATGEN/zlatm2.f b/lapack-netlib/TESTING/MATGEN/zlatm2.f index 2de69eeca..ea93431e7 100644 --- a/lapack-netlib/TESTING/MATGEN/zlatm2.f +++ b/lapack-netlib/TESTING/MATGEN/zlatm2.f @@ -185,7 +185,7 @@ *> \verbatim *> SPARSE is DOUBLE PRECISION between 0. and 1. *> On entry specifies the sparsity of the matrix -*> if sparse matix is to be generated. +*> if sparse matrix is to be generated. *> SPARSE should lie between 0 and 1. *> A uniform ( 0, 1 ) random number x is generated and *> compared to SPARSE; if x is larger the matrix entry diff --git a/lapack-netlib/TESTING/MATGEN/zlatm3.f b/lapack-netlib/TESTING/MATGEN/zlatm3.f index 42d58c853..25d6233f3 100644 --- a/lapack-netlib/TESTING/MATGEN/zlatm3.f +++ b/lapack-netlib/TESTING/MATGEN/zlatm3.f @@ -202,7 +202,7 @@ *> \verbatim *> SPARSE is DOUBLE PRECISION between 0. and 1. *> On entry specifies the sparsity of the matrix -*> if sparse matix is to be generated. +*> if sparse matrix is to be generated. *> SPARSE should lie between 0 and 1. *> A uniform ( 0, 1 ) random number x is generated and *> compared to SPARSE; if x is larger the matrix entry diff --git a/lapack-netlib/TESTING/MATGEN/zlatmr.f b/lapack-netlib/TESTING/MATGEN/zlatmr.f index 6685a3570..56285e1f4 100644 --- a/lapack-netlib/TESTING/MATGEN/zlatmr.f +++ b/lapack-netlib/TESTING/MATGEN/zlatmr.f @@ -316,20 +316,6 @@ *> Not referenced if PIVTNG = 'N'. Not modified. *> \endverbatim *> -*> \param[in] SPARSE -*> \verbatim -*> SPARSE is DOUBLE PRECISION -*> On entry specifies the sparsity of the matrix if a sparse -*> matrix is to be generated. SPARSE should lie between -*> 0 and 1. To generate a sparse matrix, for each matrix entry -*> a uniform ( 0, 1 ) random number x is generated and -*> compared to SPARSE; if x is larger the matrix entry -*> is unchanged and if x is smaller the entry is set -*> to zero. Thus on the average a fraction SPARSE of the -*> entries will be set to zero. -*> Not modified. -*> \endverbatim -*> *> \param[in] KL *> \verbatim *> KL is INTEGER @@ -350,6 +336,20 @@ *> Not modified. *> \endverbatim *> +*> \param[in] SPARSE +*> \verbatim +*> SPARSE is DOUBLE PRECISION +*> On entry specifies the sparsity of the matrix if a sparse +*> matrix is to be generated. SPARSE should lie between +*> 0 and 1. To generate a sparse matrix, for each matrix entry +*> a uniform ( 0, 1 ) random number x is generated and +*> compared to SPARSE; if x is larger the matrix entry +*> is unchanged and if x is smaller the entry is set +*> to zero. Thus on the average a fraction SPARSE of the +*> entries will be set to zero. +*> Not modified. +*> \endverbatim +*> *> \param[in] ANORM *> \verbatim *> ANORM is DOUBLE PRECISION @@ -416,7 +416,7 @@ *> If PACK='C' or 'R', LDA must be at least 1. *> If PACK='B', or 'Q', LDA must be MIN ( KU+1, N ) *> If PACK='Z', LDA must be at least KUU+KLL+1, where -*> KUU = MIN ( KU, N-1 ) and KLL = MIN ( KL, N-1 ) +*> KUU = MIN ( KU, N-1 ) and KLL = MIN ( KL, M-1 ) *> Not modified. *> \endverbatim *> diff --git a/lapack-netlib/TESTING/Makefile b/lapack-netlib/TESTING/Makefile index 8b883c0fa..bdea2bfaa 100644 --- a/lapack-netlib/TESTING/Makefile +++ b/lapack-netlib/TESTING/Makefile @@ -34,8 +34,10 @@ # ####################################################################### -include ../make.inc +TOPSRCDIR = .. +include $(TOPSRCDIR)/make.inc +.PHONY: all all: single complex double complex16 singleproto doubleproto complexproto complex16proto SEIGTST= snep.out \ @@ -139,10 +141,13 @@ ZLINTST= ztest.out ZLINTSTPROTO= zctest.out ztest_rfp.out +.PHONY: single complex double complex16 single: $(SLINTST) $(SEIGTST) complex: $(CLINTST) $(CEIGTST) double: $(DLINTST) $(DEIGTST) complex16: $(ZLINTST) $(ZEIGTST) + +.PHONY: singleproto complexproto doubleproto complex16proto singleproto: $(SLINTSTPROTO) complexproto: $(CLINTSTPROTO) doubleproto: $(DLINTSTPROTO) @@ -153,61 +158,61 @@ complex16proto: $(ZLINTSTPROTO) stest.out: stest.in LIN/xlintsts @echo Testing REAL LAPACK linear equation routines - ./LIN/xlintsts < $< > $@ 2>&1 + ./LIN/xlintsts < stest.in > $@ 2>&1 # # ======== COMPLEX LIN TESTS ========================== ctest.out: ctest.in LIN/xlintstc @echo Testing COMPLEX LAPACK linear equation routines - ./LIN/xlintstc < $< > $@ 2>&1 + ./LIN/xlintstc < ctest.in > $@ 2>&1 # # ======== DOUBLE LIN TESTS =========================== dtest.out: dtest.in LIN/xlintstd @echo Testing DOUBLE PRECISION LAPACK linear equation routines - ./LIN/xlintstd < $< > $@ 2>&1 + ./LIN/xlintstd < dtest.in > $@ 2>&1 # # ======== COMPLEX16 LIN TESTS ======================== ztest.out: ztest.in LIN/xlintstz @echo Testing COMPLEX16 LAPACK linear equation routines - ./LIN/xlintstz < $< > $@ 2>&1 + ./LIN/xlintstz < ztest.in > $@ 2>&1 # # ======== SINGLE-DOUBLE PROTO LIN TESTS ============== dstest.out: dstest.in LIN/xlintstds @echo Testing SINGLE-DOUBLE PRECISION LAPACK prototype linear equation routines - ./LIN/xlintstds < $< > $@ 2>&1 + ./LIN/xlintstds < dstest.in > $@ 2>&1 # # ======== COMPLEX-COMPLEX16 LIN TESTS ======================== zctest.out: zctest.in LIN/xlintstzc @echo Testing COMPLEX-COMPLEX16 LAPACK prototype linear equation routines - ./LIN/xlintstzc < $< > $@ 2>&1 + ./LIN/xlintstzc < zctest.in > $@ 2>&1 # # ======== SINGLE RFP LIN TESTS ======================== stest_rfp.out: stest_rfp.in LIN/xlintstrfs @echo Testing REAL LAPACK RFP prototype linear equation routines - ./LIN/xlintstrfs < $< > $@ 2>&1 + ./LIN/xlintstrfs < stest_rfp.in > $@ 2>&1 # # ======== COMPLEX16 RFP LIN TESTS ======================== dtest_rfp.out: dtest_rfp.in LIN/xlintstrfd @echo Testing DOUBLE PRECISION LAPACK RFP prototype linear equation routines - ./LIN/xlintstrfd < $< > $@ 2>&1 + ./LIN/xlintstrfd < dtest_rfp.in > $@ 2>&1 # # ======== COMPLEX16 RFP LIN TESTS ======================== ctest_rfp.out: ctest_rfp.in LIN/xlintstrfc @echo Testing COMPLEX LAPACK RFP prototype linear equation routines - ./LIN/xlintstrfc < $< > $@ 2>&1 + ./LIN/xlintstrfc < ctest_rfp.in > $@ 2>&1 # # ======== COMPLEX16 RFP LIN TESTS ======================== ztest_rfp.out: ztest_rfp.in LIN/xlintstrfz @echo Testing COMPLEX16 LAPACK RFP prototype linear equation routines - ./LIN/xlintstrfz < $< > $@ 2>&1 + ./LIN/xlintstrfz < ztest_rfp.in > $@ 2>&1 # # # ======== SINGLE EIG TESTS =========================== @@ -215,329 +220,329 @@ ztest_rfp.out: ztest_rfp.in LIN/xlintstrfz snep.out: nep.in EIG/xeigtsts @echo NEP: Testing Nonsymmetric Eigenvalue Problem routines - ./EIG/xeigtsts < $< > $@ 2>&1 + ./EIG/xeigtsts < nep.in > $@ 2>&1 ssep.out: sep.in EIG/xeigtsts @echo SEP: Testing Symmetric Eigenvalue Problem routines - ./EIG/xeigtsts < $< > $@ 2>&1 + ./EIG/xeigtsts < sep.in > $@ 2>&1 sse2.out: se2.in EIG/xeigtsts @echo SEP: Testing Symmetric Eigenvalue Problem routines - ./EIG/xeigtsts < $< > $@ 2>&1 + ./EIG/xeigtsts < se2.in > $@ 2>&1 ssvd.out: svd.in EIG/xeigtsts @echo SVD: Testing Singular Value Decomposition routines - ./EIG/xeigtsts < $< > $@ 2>&1 + ./EIG/xeigtsts < svd.in > $@ 2>&1 sec.out: sec.in EIG/xeigtsts @echo SEC: Testing REAL Eigen Condition Routines - ./EIG/xeigtsts < $< > $@ 2>&1 + ./EIG/xeigtsts < sec.in > $@ 2>&1 sed.out: sed.in EIG/xeigtsts @echo SEV: Testing REAL Nonsymmetric Eigenvalue Driver - ./EIG/xeigtsts < $< > $@ 2>&1 + ./EIG/xeigtsts < sed.in > $@ 2>&1 sgg.out: sgg.in EIG/xeigtsts @echo SGG: Testing REAL Nonsymmetric Generalized Eigenvalue Problem routines - ./EIG/xeigtsts < $< > $@ 2>&1 + ./EIG/xeigtsts < sgg.in > $@ 2>&1 sgd.out: sgd.in EIG/xeigtsts @echo SGD: Testing REAL Nonsymmetric Generalized Eigenvalue Problem driver routines - ./EIG/xeigtsts < $< > $@ 2>&1 + ./EIG/xeigtsts < sgd.in > $@ 2>&1 ssb.out: ssb.in EIG/xeigtsts @echo SSB: Testing REAL Symmetric Eigenvalue Problem routines - ./EIG/xeigtsts < $< > $@ 2>&1 + ./EIG/xeigtsts < ssb.in > $@ 2>&1 ssg.out: ssg.in EIG/xeigtsts @echo SSG: Testing REAL Symmetric Generalized Eigenvalue Problem routines - ./EIG/xeigtsts < $< > $@ 2>&1 + ./EIG/xeigtsts < ssg.in > $@ 2>&1 sbal.out: sbal.in EIG/xeigtsts @echo SGEBAL: Testing the balancing of a REAL general matrix - ./EIG/xeigtsts < $< > $@ 2>&1 + ./EIG/xeigtsts < sbal.in > $@ 2>&1 sbak.out: sbak.in EIG/xeigtsts @echo SGEBAK: Testing the back transformation of a REAL balanced matrix - ./EIG/xeigtsts < $< > $@ 2>&1 + ./EIG/xeigtsts < sbak.in > $@ 2>&1 sgbal.out: sgbal.in EIG/xeigtsts @echo SGGBAL: Testing the balancing of a pair of REAL general matrices - ./EIG/xeigtsts < $< > $@ 2>&1 + ./EIG/xeigtsts < sgbal.in > $@ 2>&1 sgbak.out: sgbak.in EIG/xeigtsts @echo SGGBAK: Testing the back transformation of a pair of REAL balanced matrices - ./EIG/xeigtsts < $< > $@ 2>&1 + ./EIG/xeigtsts < sgbak.in > $@ 2>&1 sbb.out: sbb.in EIG/xeigtsts @echo SBB: Testing banded Singular Value Decomposition routines - ./EIG/xeigtsts < $< > $@ 2>&1 + ./EIG/xeigtsts < sbb.in > $@ 2>&1 sglm.out: glm.in EIG/xeigtsts @echo GLM: Testing Generalized Linear Regression Model routines - ./EIG/xeigtsts < $< > $@ 2>&1 + ./EIG/xeigtsts < glm.in > $@ 2>&1 sgqr.out: gqr.in EIG/xeigtsts @echo GQR: Testing Generalized QR and RQ factorization routines - ./EIG/xeigtsts < $< > $@ 2>&1 + ./EIG/xeigtsts < gqr.in > $@ 2>&1 sgsv.out: gsv.in EIG/xeigtsts @echo GSV: Testing Generalized Singular Value Decomposition routines - ./EIG/xeigtsts < $< > $@ 2>&1 + ./EIG/xeigtsts < gsv.in > $@ 2>&1 scsd.out: csd.in EIG/xeigtsts @echo CSD: Testing CS Decomposition routines - ./EIG/xeigtsts < $< > $@ 2>&1 + ./EIG/xeigtsts < csd.in > $@ 2>&1 slse.out: lse.in EIG/xeigtsts @echo LSE: Testing Constrained Linear Least Squares routines - ./EIG/xeigtsts < $< > $@ 2>&1 + ./EIG/xeigtsts < lse.in > $@ 2>&1 # # ======== COMPLEX EIG TESTS =========================== cnep.out: nep.in EIG/xeigtstc @echo NEP: Testing Nonsymmetric Eigenvalue Problem routines - ./EIG/xeigtstc < $< > $@ 2>&1 + ./EIG/xeigtstc < nep.in > $@ 2>&1 csep.out: sep.in EIG/xeigtstc @echo SEP: Testing Symmetric Eigenvalue Problem routines - ./EIG/xeigtstc < $< > $@ 2>&1 + ./EIG/xeigtstc < sep.in > $@ 2>&1 cse2.out: se2.in EIG/xeigtstc @echo SEP: Testing Symmetric Eigenvalue Problem routines - ./EIG/xeigtstc < $< > $@ 2>&1 + ./EIG/xeigtstc < se2.in > $@ 2>&1 csvd.out: svd.in EIG/xeigtstc @echo SVD: Testing Singular Value Decomposition routines - ./EIG/xeigtstc < $< > $@ 2>&1 + ./EIG/xeigtstc < svd.in > $@ 2>&1 cec.out: cec.in EIG/xeigtstc @echo CEC: Testing COMPLEX Eigen Condition Routines - ./EIG/xeigtstc < $< > $@ 2>&1 + ./EIG/xeigtstc < cec.in > $@ 2>&1 ced.out: ced.in EIG/xeigtstc @echo CES: Testing COMPLEX Nonsymmetric Schur Form Driver - ./EIG/xeigtstc < $< > $@ 2>&1 + ./EIG/xeigtstc < ced.in > $@ 2>&1 cgg.out: cgg.in EIG/xeigtstc @echo CGG: Testing COMPLEX Nonsymmetric Generalized Eigenvalue Problem routines - ./EIG/xeigtstc < $< > $@ 2>&1 + ./EIG/xeigtstc < cgg.in > $@ 2>&1 cgd.out: cgd.in EIG/xeigtstc @echo CGD: Testing COMPLEX Nonsymmetric Generalized Eigenvalue Problem driver routines - ./EIG/xeigtstc < $< > $@ 2>&1 + ./EIG/xeigtstc < cgd.in > $@ 2>&1 csb.out: csb.in EIG/xeigtstc @echo CHB: Testing Hermitian Eigenvalue Problem routines - ./EIG/xeigtstc < $< > $@ 2>&1 + ./EIG/xeigtstc < csb.in > $@ 2>&1 csg.out: csg.in EIG/xeigtstc @echo CSG: Testing Symmetric Generalized Eigenvalue Problem routines - ./EIG/xeigtstc < $< > $@ 2>&1 + ./EIG/xeigtstc < csg.in > $@ 2>&1 cbal.out: cbal.in EIG/xeigtstc @echo CGEBAL: Testing the balancing of a COMPLEX general matrix - ./EIG/xeigtstc < $< > $@ 2>&1 + ./EIG/xeigtstc < cbal.in > $@ 2>&1 cbak.out: cbak.in EIG/xeigtstc @echo CGEBAK: Testing the back transformation of a COMPLEX balanced matrix - ./EIG/xeigtstc < $< > $@ 2>&1 + ./EIG/xeigtstc < cbak.in > $@ 2>&1 cgbal.out: cgbal.in EIG/xeigtstc @echo CGGBAL: Testing the balancing of a pair of COMPLEX general matrices - ./EIG/xeigtstc < $< > $@ 2>&1 + ./EIG/xeigtstc < cgbal.in > $@ 2>&1 cgbak.out: cgbak.in EIG/xeigtstc @echo CGGBAK: Testing the back transformation of a pair of COMPLEX balanced matrices - ./EIG/xeigtstc < $< > $@ 2>&1 + ./EIG/xeigtstc < cgbak.in > $@ 2>&1 cbb.out: cbb.in EIG/xeigtstc @echo CBB: Testing banded Singular Value Decomposition routines - ./EIG/xeigtstc < $< > $@ 2>&1 + ./EIG/xeigtstc < cbb.in > $@ 2>&1 cglm.out: glm.in EIG/xeigtstc @echo GLM: Testing Generalized Linear Regression Model routines - ./EIG/xeigtstc < $< > $@ 2>&1 + ./EIG/xeigtstc < glm.in > $@ 2>&1 cgqr.out: gqr.in EIG/xeigtstc @echo GQR: Testing Generalized QR and RQ factorization routines - ./EIG/xeigtstc < $< > $@ 2>&1 + ./EIG/xeigtstc < gqr.in > $@ 2>&1 cgsv.out: gsv.in EIG/xeigtstc @echo GSV: Testing Generalized Singular Value Decomposition routines - ./EIG/xeigtstc < $< > $@ 2>&1 + ./EIG/xeigtstc < gsv.in > $@ 2>&1 ccsd.out: csd.in EIG/xeigtstc @echo CSD: Testing CS Decomposition routines - ./EIG/xeigtstc < $< > $@ 2>&1 + ./EIG/xeigtstc < csd.in > $@ 2>&1 clse.out: lse.in EIG/xeigtstc @echo LSE: Testing Constrained Linear Least Squares routines - ./EIG/xeigtstc < $< > $@ 2>&1 + ./EIG/xeigtstc < lse.in > $@ 2>&1 # # ======== DOUBLE EIG TESTS =========================== dnep.out: nep.in EIG/xeigtstd @echo NEP: Testing Nonsymmetric Eigenvalue Problem routines - ./EIG/xeigtstd < $< > $@ 2>&1 + ./EIG/xeigtstd < nep.in > $@ 2>&1 dsep.out: sep.in EIG/xeigtstd @echo SEP: Testing Symmetric Eigenvalue Problem routines - ./EIG/xeigtstd < $< > $@ 2>&1 + ./EIG/xeigtstd < sep.in > $@ 2>&1 dse2.out: se2.in EIG/xeigtstd @echo SEP: Testing Symmetric Eigenvalue Problem routines - ./EIG/xeigtstd < $< > $@ 2>&1 + ./EIG/xeigtstd < se2.in > $@ 2>&1 dsvd.out: svd.in EIG/xeigtstd @echo SVD: Testing Singular Value Decomposition routines - ./EIG/xeigtstd < $< > $@ 2>&1 + ./EIG/xeigtstd < svd.in > $@ 2>&1 dec.out: dec.in EIG/xeigtstd @echo DEC: Testing DOUBLE PRECISION Eigen Condition Routines - ./EIG/xeigtstd < $< > $@ 2>&1 + ./EIG/xeigtstd < dec.in > $@ 2>&1 ded.out: ded.in EIG/xeigtstd @echo DEV: Testing DOUBLE PRECISION Nonsymmetric Eigenvalue Driver - ./EIG/xeigtstd < $< > $@ 2>&1 + ./EIG/xeigtstd < ded.in > $@ 2>&1 dgg.out: dgg.in EIG/xeigtstd @echo DGG: Testing DOUBLE PRECISION Nonsymmetric Generalized Eigenvalue Problem routines - ./EIG/xeigtstd < $< > $@ 2>&1 + ./EIG/xeigtstd < dgg.in > $@ 2>&1 dgd.out: dgd.in EIG/xeigtstd @echo DGD: Testing DOUBLE PRECISION Nonsymmetric Generalized Eigenvalue Problem driver routines - ./EIG/xeigtstd < $< > $@ 2>&1 + ./EIG/xeigtstd < dgd.in > $@ 2>&1 dsb.out: dsb.in EIG/xeigtstd @echo DSB: Testing DOUBLE PRECISION Symmetric Eigenvalue Problem routines - ./EIG/xeigtstd < $< > $@ 2>&1 + ./EIG/xeigtstd < dsb.in > $@ 2>&1 dsg.out: dsg.in EIG/xeigtstd @echo DSG: Testing DOUBLE PRECISION Symmetric Generalized Eigenvalue Problem routines - ./EIG/xeigtstd < $< > $@ 2>&1 + ./EIG/xeigtstd < dsg.in > $@ 2>&1 dbal.out: dbal.in EIG/xeigtstd @echo DGEBAL: Testing the balancing of a DOUBLE PRECISION general matrix - ./EIG/xeigtstd < $< > $@ 2>&1 + ./EIG/xeigtstd < dbal.in > $@ 2>&1 dbak.out: dbak.in EIG/xeigtstd @echo DGEBAK: Testing the back transformation of a DOUBLE PRECISION balanced matrix - ./EIG/xeigtstd < $< > $@ 2>&1 + ./EIG/xeigtstd < dbak.in > $@ 2>&1 dgbal.out: dgbal.in EIG/xeigtstd @echo DGGBAL: Testing the balancing of a pair of DOUBLE PRECISION general matrices - ./EIG/xeigtstd < $< > $@ 2>&1 + ./EIG/xeigtstd < dgbal.in > $@ 2>&1 dgbak.out: dgbak.in EIG/xeigtstd @echo DGGBAK: Testing the back transformation of a pair of DOUBLE PRECISION balanced matrices - ./EIG/xeigtstd < $< > $@ 2>&1 + ./EIG/xeigtstd < dgbak.in > $@ 2>&1 dbb.out: dbb.in EIG/xeigtstd @echo DBB: Testing banded Singular Value Decomposition routines - ./EIG/xeigtstd < $< > $@ 2>&1 + ./EIG/xeigtstd < dbb.in > $@ 2>&1 dglm.out: glm.in EIG/xeigtstd @echo GLM: Testing Generalized Linear Regression Model routines - ./EIG/xeigtstd < $< > $@ 2>&1 + ./EIG/xeigtstd < glm.in > $@ 2>&1 dgqr.out: gqr.in EIG/xeigtstd @echo GQR: Testing Generalized QR and RQ factorization routines - ./EIG/xeigtstd < $< > $@ 2>&1 + ./EIG/xeigtstd < gqr.in > $@ 2>&1 dgsv.out: gsv.in EIG/xeigtstd @echo GSV: Testing Generalized Singular Value Decomposition routines - ./EIG/xeigtstd < $< > $@ 2>&1 + ./EIG/xeigtstd < gsv.in > $@ 2>&1 dcsd.out: csd.in EIG/xeigtstd @echo CSD: Testing CS Decomposition routines - ./EIG/xeigtstd < $< > $@ 2>&1 + ./EIG/xeigtstd < csd.in > $@ 2>&1 dlse.out: lse.in EIG/xeigtstd @echo LSE: Testing Constrained Linear Least Squares routines - ./EIG/xeigtstd < $< > $@ 2>&1 + ./EIG/xeigtstd < lse.in > $@ 2>&1 # # ======== COMPLEX16 EIG TESTS =========================== znep.out: nep.in EIG/xeigtstz @echo NEP: Testing Nonsymmetric Eigenvalue Problem routines - ./EIG/xeigtstz < $< > $@ 2>&1 + ./EIG/xeigtstz < nep.in > $@ 2>&1 zsep.out: sep.in EIG/xeigtstz @echo SEP: Testing Symmetric Eigenvalue Problem routines - ./EIG/xeigtstz < $< > $@ 2>&1 + ./EIG/xeigtstz < sep.in > $@ 2>&1 zse2.out: se2.in EIG/xeigtstz @echo SEP: Testing Symmetric Eigenvalue Problem routines - ./EIG/xeigtstz < $< > $@ 2>&1 + ./EIG/xeigtstz < se2.in > $@ 2>&1 zsvd.out: svd.in EIG/xeigtstz @echo SVD: Testing Singular Value Decomposition routines - ./EIG/xeigtstz < $< > $@ 2>&1 + ./EIG/xeigtstz < svd.in > $@ 2>&1 zec.out: zec.in EIG/xeigtstz @echo ZEC: Testing COMPLEX16 Eigen Condition Routines - ./EIG/xeigtstz < $< > $@ 2>&1 + ./EIG/xeigtstz < zec.in > $@ 2>&1 zed.out: zed.in EIG/xeigtstz @echo ZES: Testing COMPLEX16 Nonsymmetric Schur Form Driver - ./EIG/xeigtstz < $< > $@ 2>&1 + ./EIG/xeigtstz < zed.in > $@ 2>&1 zgg.out: zgg.in EIG/xeigtstz @echo ZGG: Testing COMPLEX16 Nonsymmetric Generalized Eigenvalue Problem routines - ./EIG/xeigtstz < $< > $@ 2>&1 + ./EIG/xeigtstz < zgg.in > $@ 2>&1 zgd.out: zgd.in EIG/xeigtstz @echo ZGD: Testing COMPLEX16 Nonsymmetric Generalized Eigenvalue Problem driver routines - ./EIG/xeigtstz < $< > $@ 2>&1 + ./EIG/xeigtstz < zgd.in > $@ 2>&1 zsb.out: zsb.in EIG/xeigtstz @echo ZHB: Testing Hermitian Eigenvalue Problem routines - ./EIG/xeigtstz < $< > $@ 2>&1 + ./EIG/xeigtstz < zsb.in > $@ 2>&1 zsg.out: zsg.in EIG/xeigtstz @echo ZSG: Testing Symmetric Generalized Eigenvalue Problem routines - ./EIG/xeigtstz < $< > $@ 2>&1 + ./EIG/xeigtstz < zsg.in > $@ 2>&1 zbal.out: zbal.in EIG/xeigtstz @echo ZGEBAL: Testing the balancing of a COMPLEX16 general matrix - ./EIG/xeigtstz < $< > $@ 2>&1 + ./EIG/xeigtstz < zbal.in > $@ 2>&1 zbak.out: zbak.in EIG/xeigtstz @echo ZGEBAK: Testing the back transformation of a COMPLEX16 balanced matrix - ./EIG/xeigtstz < $< > $@ 2>&1 + ./EIG/xeigtstz < zbak.in > $@ 2>&1 zgbal.out: zgbal.in EIG/xeigtstz @echo ZGGBAL: Testing the balancing of a pair of COMPLEX general matrices - ./EIG/xeigtstz < $< > $@ 2>&1 + ./EIG/xeigtstz < zgbal.in > $@ 2>&1 zgbak.out: zgbak.in EIG/xeigtstz @echo ZGGBAK: Testing the back transformation of a pair of COMPLEX16 balanced matrices - ./EIG/xeigtstz < $< > $@ 2>&1 + ./EIG/xeigtstz < zgbak.in > $@ 2>&1 zbb.out: zbb.in EIG/xeigtstz @echo ZBB: Testing banded Singular Value Decomposition routines - ./EIG/xeigtstz < $< > $@ 2>&1 + ./EIG/xeigtstz < zbb.in > $@ 2>&1 zglm.out: glm.in EIG/xeigtstz @echo GLM: Testing Generalized Linear Regression Model routines - ./EIG/xeigtstz < $< > $@ 2>&1 + ./EIG/xeigtstz < glm.in > $@ 2>&1 zgqr.out: gqr.in EIG/xeigtstz @echo GQR: Testing Generalized QR and RQ factorization routines - ./EIG/xeigtstz < $< > $@ 2>&1 + ./EIG/xeigtstz < gqr.in > $@ 2>&1 zgsv.out: gsv.in EIG/xeigtstz @echo GSV: Testing Generalized Singular Value Decomposition routines - ./EIG/xeigtstz < $< > $@ 2>&1 + ./EIG/xeigtstz < gsv.in > $@ 2>&1 zcsd.out: csd.in EIG/xeigtstz @echo CSD: Testing CS Decomposition routines - ./EIG/xeigtstz < $< > $@ 2>&1 + ./EIG/xeigtstz < csd.in > $@ 2>&1 zlse.out: lse.in EIG/xeigtstz @echo LSE: Testing Constrained Linear Least Squares routines - ./EIG/xeigtstz < $< > $@ 2>&1 + ./EIG/xeigtstz < lse.in > $@ 2>&1 # ============================================================================== LIN/xlintsts: $(FRCLIN) $(FRC) @@ -582,6 +587,7 @@ EIG/xeigtstd: $(FRCEIG) $(FRC) EIG/xeigtstz: $(FRCEIG) $(FRC) $(MAKE) -C EIG xeigtstz +.PHONY: clean cleantest clean: cleantest cleantest: rm -f *.out core diff --git a/lapack-netlib/TESTING/ctest.in b/lapack-netlib/TESTING/ctest.in index 2f3853a03..a3588b4a1 100644 --- a/lapack-netlib/TESTING/ctest.in +++ b/lapack-netlib/TESTING/ctest.in @@ -50,3 +50,4 @@ CQX CXQ CTQ CTS +CHH diff --git a/lapack-netlib/TESTING/dtest.in b/lapack-netlib/TESTING/dtest.in index a7a16ee41..29bb8b92e 100644 --- a/lapack-netlib/TESTING/dtest.in +++ b/lapack-netlib/TESTING/dtest.in @@ -44,3 +44,4 @@ DQX DXQ DTQ DTS +DHH diff --git a/lapack-netlib/TESTING/stest.in b/lapack-netlib/TESTING/stest.in index d32047047..27ac30040 100644 --- a/lapack-netlib/TESTING/stest.in +++ b/lapack-netlib/TESTING/stest.in @@ -44,3 +44,4 @@ SQX SXQ STQ STS +SHH diff --git a/lapack-netlib/TESTING/ztest.in b/lapack-netlib/TESTING/ztest.in index 520253941..58da33d60 100644 --- a/lapack-netlib/TESTING/ztest.in +++ b/lapack-netlib/TESTING/ztest.in @@ -50,3 +50,4 @@ ZQX ZXQ ZTQ ZTS +ZHH diff --git a/lapack-netlib/appveyor.yml b/lapack-netlib/appveyor.yml deleted file mode 100644 index 7fc3fbdd7..000000000 --- a/lapack-netlib/appveyor.yml +++ /dev/null @@ -1,64 +0,0 @@ -# Windows testing. -# Syntax for this file: -# http://www.appveyor.com/docs/appveyor-yml - -shallow_clone: true - -platform: x64 - -cache: - - x86_64-4.9.2-release-win32-seh-rt_v4-rev4.7z - - i686-4.9.2-release-win32-dwarf-rt_v4-rev4.7z - -environment: - CTEST_OUTPUT_ON_FAILURE: 1 - matrix: - - MINGW_DIR: mingw64 - MINGW_URL: https://sourceforge.net/projects/mingw-w64/files/Toolchains%20targetting%20Win64/Personal%20Builds/mingw-builds/4.9.2/threads-win32/seh/x86_64-4.9.2-release-win32-seh-rt_v4-rev4.7z/download - MINGW_ARCHIVE: x86_64-4.9.2-release-win32-seh-rt_v4-rev4.7z - - MINGW_DIR: mingw32 - MINGW_URL: https://sourceforge.net/projects/mingw-w64/files/Toolchains%20targetting%20Win32/Personal%20Builds/mingw-builds/4.9.2/threads-win32/dwarf/i686-4.9.2-release-win32-dwarf-rt_v4-rev4.7z/download - MINGW_ARCHIVE: i686-4.9.2-release-win32-dwarf-rt_v4-rev4.7z - -install: - - if not exist "%MINGW_ARCHIVE%" appveyor DownloadFile "%MINGW_URL%" -FileName "%MINGW_ARCHIVE%" - - 7z x -y "%MINGW_ARCHIVE%" > nul - # CMake refuses to generate MinGW Makefiles if sh.exe is in the Path - - ps: Get-Command sh.exe -All | Remove-Item - -build_script: - - echo "NUMBER_OF_PROCESSORS=%NUMBER_OF_PROCESSORS%" - - set PATH=%CD%\%MINGW_DIR%\bin;%PATH% - - g++ --version - - mingw32-make --version - - cmake --version - - if "%APPVEYOR_REPO_TAG%"=="true" (set CMAKE_BUILD_TYPE=Release) else (set CMAKE_BUILD_TYPE=Debug) - - set SRC_DIR=%CD% - - echo %SRC_DIR% - - set BLD_DIR=%SRC_DIR%\..\lapack-appveyor-bld - - set INST_DIR=%SRC_DIR%\..\lapack-appveyor-install - - mkdir -p %BLD_DIR% - - cd %BLD_DIR% - # See issue #17 on github dashboard. Once resolved, use -DCBLAS=ON - # - cmake -DCMAKE_INSTALL_PREFIX=${INST_DIR} -DLAPACKE=ON ${SRC_DIR} - - cmake - -G "MinGW Makefiles" - -DBUILDNAME:STRING="appveyor-%MINGW_DIR%-%APPVEYOR_REPO_BRANCH%" - -DCMAKE_BUILD_TYPE=%CMAKE_BUILD_TYPE% - -DCMAKE_INSTALL_PREFIX=%INST_DIR% - -DCBLAS:BOOL=ON - -DLAPACKE:BOOL=ON - -DBUILD_TESTING=ON - -DLAPACKE_WITH_TMG:BOOL=ON - %SRC_DIR% - - mingw32-make -j%NUMBER_OF_PROCESSORS% - -test_script: - - ctest -D ExperimentalStart - - ctest -D ExperimentalConfigure - - ctest -D ExperimentalBuild -j%NUMBER_OF_PROCESSORS% - - ctest -D ExperimentalTest --schedule-random -j%NUMBER_OF_PROCESSORS% --output-on-failure --timeout 100 -E "CBLAS\-.*cblat1" - - ctest -D ExperimentalSubmit - -after_test: - - mingw32-make install -j%NUMBER_OF_PROCESSORS% diff --git a/lapack-netlib/lapack_build.cmake b/lapack-netlib/lapack_build.cmake index 68744cc4c..39878cb24 100644 --- a/lapack-netlib/lapack_build.cmake +++ b/lapack-netlib/lapack_build.cmake @@ -69,7 +69,8 @@ find_program(HOSTNAME NAMES hostname) find_program(UNAME NAMES uname) # Get the build name and hostname -exec_program(${HOSTNAME} ARGS OUTPUT_VARIABLE hostname) +execute_process(${HOSTNAME} + OUTPUT_VARIABLE hostname) string(REGEX REPLACE "[/\\\\+<> #]" "-" hostname "${hostname}") message("HOSTNAME: ${hostname}") @@ -83,7 +84,8 @@ find_package(Git REQUIRED) set(CTEST_GIT_COMMAND ${GIT_EXECUTABLE}) set(CTEST_UPDATE_COMMAND ${GIT_EXECUTABLE}) macro(getuname name flag) - exec_program("${UNAME}" ARGS "${flag}" OUTPUT_VARIABLE "${name}") + execute_process(COMMAND "${UNAME}" "${flag}" + OUTPUT_VARIABLE "${name}") string(REGEX REPLACE "[/\\\\+<> #]" "-" "${name}" "${${name}}") string(REGEX REPLACE "^(......|.....|....|...|..|.).*" "\\1" "${name}" "${${name}}") endmacro() @@ -167,7 +169,7 @@ endif() # dashboard then set this variable to the directory # the dashboard should be in make_directory("${CTEST_DASHBOARD_ROOT}") -# these are the the name of the source and binary directory on disk. +# these are the names of the source and binary directory on disk. # They will be appended to DASHBOARD_ROOT set(CTEST_SOURCE_DIRECTORY "${CTEST_DASHBOARD_ROOT}/${CTEST_DIR_NAME}") set(CTEST_BINARY_DIRECTORY "${CTEST_SOURCE_DIRECTORY}-${CTEST_BUILD_NAME}") diff --git a/lapack-netlib/lapack_testing.py b/lapack-netlib/lapack_testing.py index 5d07e1e87..5582744a0 100755 --- a/lapack-netlib/lapack_testing.py +++ b/lapack-netlib/lapack_testing.py @@ -12,8 +12,8 @@ import os, sys, math import getopt # Arguments try: - opts, args = getopt.getopt(sys.argv[1:], "hd:srep:t:n", - ["help", "dir", "short", "run", "error","prec=","test=","number"]) + opts, args = getopt.getopt(sys.argv[1:], "hd:b:srep:t:n", + ["help", "dir", "bin", "short", "run", "error","prec=","test=","number"]) except getopt.error as msg: print(msg) @@ -29,14 +29,13 @@ only_numbers=0 test_dir='TESTING' bin_dir='bin/Release' -abs_bin_dir=os.path.normpath(os.path.join(os.getcwd(),bin_dir)) - for o, a in opts: if o in ("-h", "--help"): print(sys.argv[0]+" [-h|--help] [-d dir |--dir dir] [-s |--short] [-r |--run] [-e |--error] [-p p |--prec p] [-t test |--test test] [-n | --number]") print(" - h is to print this message") print(" - r is to use to run the LAPACK tests then analyse the output (.out files). By default, the script will not run all the LAPACK tests") print(" - d [dir] is to indicate where is the LAPACK testing directory (.out files). By default, the script will use .") + print(" - b [bin] is to indicate where is the LAPACK binary files are located. By default, the script will use .") print(" LEVEL OF OUTPUT") print(" - x is to print a detailed summary") print(" - e is to print only the error summary") @@ -75,6 +74,8 @@ for o, a in opts: just_errors = 1 if o in ( '-p', '--prec' ): prec = a + if o in ( '-b', '--bin' ): + bin_dir = a if o in ( '-d', '--dir' ): test_dir = a if o in ( '-t', '--test' ): @@ -85,6 +86,8 @@ for o, a in opts: # process options +abs_bin_dir=os.path.normpath(os.path.join(os.getcwd(),bin_dir)) + os.chdir(test_dir) execution=1 @@ -114,10 +117,7 @@ def run_summary_test( f, cmdline, short_summary): pipe = open(cmdline,'r') r=0 else: - if os.name != 'nt': - cmdline='./' + cmdline - else : - cmdline=abs_bin_dir+os.path.sep+cmdline + cmdline = os.path.join(abs_bin_dir, cmdline) outfile=cmdline.split()[4] #pipe = open(outfile,'w') diff --git a/lapack-netlib/make.inc.example b/lapack-netlib/make.inc.example index d780c3a23..57fd51ebe 100644 --- a/lapack-netlib/make.inc.example +++ b/lapack-netlib/make.inc.example @@ -8,10 +8,10 @@ SHELL = /bin/sh # CC is the C compiler, normally invoked with options CFLAGS. # -CC = gcc +CC = gcc CFLAGS = -O3 -# Modify the FORTRAN and OPTS definitions to refer to the compiler +# Modify the FC and FFLAGS definitions to the desired compiler # and desired compiler options for your machine. NOOPT refers to # the compiler options desired when NO OPTIMIZATION is selected. # @@ -19,23 +19,21 @@ CFLAGS = -O3 # and handle these quantities appropriately. As a consequence, one # should not compile LAPACK with flags such as -ffpe-trap=overflow. # -FORTRAN = gfortran -OPTS = -O2 -frecursive -DRVOPTS = $(OPTS) -NOOPT = -O0 -frecursive +FC = gfortran +FFLAGS = -O2 -frecursive +FFLAGS_DRV = $(FFLAGS) +FFLAGS_NOOPT = -O0 -frecursive -# Define LOADER and LOADOPTS to refer to the loader and desired -# load options for your machine. +# Define LDFLAGS to the desired linker options for your machine. # -LOADER = gfortran -LOADOPTS = +LDFLAGS = # The archiver and the flag(s) to use when building an archive # (library). If your system has no ranlib, set RANLIB = echo. # -ARCH = ar -ARCHFLAGS = cr -RANLIB = ranlib +AR = ar +ARFLAGS = cr +RANLIB = ranlib # Timer for the SECOND and DSECND routines # @@ -78,8 +76,8 @@ TIMER = INT_ETIME # machine-specific, optimized BLAS library should be used whenever # possible.) # -BLASLIB = ../../librefblas.a -CBLASLIB = ../../libcblas.a -LAPACKLIB = liblapack.a -TMGLIB = libtmglib.a -LAPACKELIB = liblapacke.a +BLASLIB = $(TOPSRCDIR)/librefblas.a +CBLASLIB = $(TOPSRCDIR)/libcblas.a +LAPACKLIB = $(TOPSRCDIR)/liblapack.a +TMGLIB = $(TOPSRCDIR)/libtmglib.a +LAPACKELIB = $(TOPSRCDIR)/liblapacke.a diff --git a/lapack-netlib/meson.build b/lapack-netlib/meson.build new file mode 100644 index 000000000..b1e9c6bc1 --- /dev/null +++ b/lapack-netlib/meson.build @@ -0,0 +1,28 @@ +# cd build +# meson --buildtype release --prefix=$HOME/.local/lapack .. +# ninja +# ninja install + +project('LAPACK', 'fortran', + default_options : ['default_library=static', 'libdir=lib/'], + version : '3.8.0') + +subdir('BLAS/SRC') +subdir('SRC') + +prec = get_option('realkind') + + +if prec == 'd' + bsrc = DBLAS1 + DBLAS2 + DBLAS3 + lsrc = DZLAUX + DSLASRC +elif prec == 's' + bsrc = SBLAS1 + SBLAS2 + SBLAS3 + lsrc = SCLAUX + SLASRC +endif + +blas = library('blas', bsrc, + install : true) + +lapack = library('lapack', lsrc, ALLAUX, + install : true) diff --git a/lapack-netlib/meson_options.txt b/lapack-netlib/meson_options.txt new file mode 100644 index 000000000..b378e3329 --- /dev/null +++ b/lapack-netlib/meson_options.txt @@ -0,0 +1,3 @@ +option('realkind', type : 'string', value : 'd', + description : 's: real32 d: real64 c: complex32 z: complex64') + diff --git a/lapack/CMakeLists.txt b/lapack/CMakeLists.txt index d48a270ab..fd4e57048 100644 --- a/lapack/CMakeLists.txt +++ b/lapack/CMakeLists.txt @@ -2,9 +2,9 @@ include_directories(${PROJECT_SOURCE_DIR}) include_directories(${PROJECT_BINARY_DIR}) +list (REMOVE_ITEM FLOAT_TYPES "BFLOAT16") set(LAPACK_SOURCES - getrf/getrf_single.c potrf/potrf_U_single.c potrf/potrf_L_single.c lauum/lauum_U_single.c @@ -45,6 +45,13 @@ GenerateNamedObjects("${LAPACK_MANGLED_SOURCES}" "" "" false "" "" false 3) GenerateNamedObjects("laswp/generic/laswp_k_4.c" "" "laswp_plus" false "" "" false 3) GenerateNamedObjects("laswp/generic/laswp_k_4.c" "MINUS" "laswp_minus" false "" "" false 3) +foreach (float_type ${FLOAT_TYPES}) +if (${float_type} STREQUAL "HALF") + continue() +endif() +GenerateNamedObjects("getrf/getrf_single.c" "UNIT" "getrf_single" false "" "" false ${float_type}) +endforeach () + # dynamic_arch laswp needs arch specific code ? #foreach(TARGET_CORE ${DYNAMIC_CORE}) # set(TSUFFIX "_${TARGET_CORE}") @@ -81,7 +88,7 @@ if (USE_THREAD) ) foreach (float_type ${FLOAT_TYPES}) - GenerateNamedObjects("${GETRF_SRC}" "" "getrf_parallel" false "" "" false ${float_type}) + GenerateNamedObjects("${GETRF_SRC}" "UNIT" "getrf_parallel" false "" "" false ${float_type}) endforeach() GenerateNamedObjects("${PARALLEL_SOURCES}") diff --git a/lapack/Makefile b/lapack/Makefile index aff5209d5..2bbb4603f 100644 --- a/lapack/Makefile +++ b/lapack/Makefile @@ -2,7 +2,7 @@ TOPDIR = .. include ../Makefile.system #SUBDIRS = laswp getf2 getrf potf2 potrf lauu2 lauum trti2 trtri getrs -SUBDIRS = getrf getf2 laswp getrs potrf potf2 lauu2 lauum trti2 trtri +SUBDIRS = getrf getf2 laswp getrs potrf potf2 lauu2 lauum trti2 trtri trtrs FLAMEDIRS = laswp getf2 potf2 lauu2 trti2 diff --git a/lapack/getf2/Makefile b/lapack/getf2/Makefile index 612c6f9cc..a524a3235 100644 --- a/lapack/getf2/Makefile +++ b/lapack/getf2/Makefile @@ -1,11 +1,19 @@ TOPDIR = ../.. include ../../Makefile.system +ifneq "$(or $(BUILD_SINGLE),$(BUILD_DOUBLE))" "" SBLASOBJS = sgetf2_k.$(SUFFIX) +endif +ifeq ($(BUILD_DOUBLE),1) DBLASOBJS = dgetf2_k.$(SUFFIX) +endif QBLASOBJS = qgetf2_k.$(SUFFIX) +ifneq "$(or $(BUILD_COMPLEX),$(BUILD_COMPLEX16))" "" CBLASOBJS = cgetf2_k.$(SUFFIX) +endif +ifeq ($(BUILD_COMPLEX16),1) ZBLASOBJS = zgetf2_k.$(SUFFIX) +endif XBLASOBJS = xgetf2_k.$(SUFFIX) sgetf2_k.$(SUFFIX) : getf2_k.c diff --git a/lapack/getrf/Makefile b/lapack/getrf/Makefile index a559dfb0d..976ca3c0b 100644 --- a/lapack/getrf/Makefile +++ b/lapack/getrf/Makefile @@ -17,6 +17,19 @@ ZBLASOBJS += zgetrf_parallel.$(SUFFIX) XBLASOBJS += xgetrf_parallel.$(SUFFIX) endif +ifeq "$(or $(BUILD_SINGLE),$(BUILD_DOUBLE))" "" +SBLASOBJS= +endif +ifneq ($(BUILD_DOUBLE),1) +DBLASOBJS= +endif +ifeq "$(or $(BUILD_COMPLEX),$(BUILD_COMPLEX16))" "" +CBLASOBJS= +endif +ifneq ($(BUILD_COMPLEX16),1) +ZBLASOBJS= +endif + ifeq ($(USE_OPENMP), 1) GETRF_SRC = getrf_parallel_omp.c else diff --git a/lapack/getrf/getrf_parallel.c b/lapack/getrf/getrf_parallel.c index 591ce4a99..fc410b0e7 100644 --- a/lapack/getrf/getrf_parallel.c +++ b/lapack/getrf/getrf_parallel.c @@ -68,25 +68,16 @@ double sqrt(double); #define GETRF_FACTOR 1.00 -#if defined(USE_PTHREAD_LOCK) -static pthread_mutex_t getrf_lock = PTHREAD_MUTEX_INITIALIZER; -#elif defined(USE_PTHREAD_SPINLOCK) -static pthread_spinlock_t getrf_lock = 0; +#ifdef HAVE_C11 +#define atomic_load_long(p) __atomic_load_n(p, __ATOMIC_RELAXED) +#define atomic_store_long(p, v) __atomic_store_n(p, v, __ATOMIC_RELAXED) #else -static BLASULONG getrf_lock = 0UL; -#endif - -#if defined(USE_PTHREAD_LOCK) -static pthread_mutex_t getrf_flag_lock = PTHREAD_MUTEX_INITIALIZER; -#elif defined(USE_PTHREAD_SPINLOCK) -static pthread_spinlock_t getrf_flag_lock = 0; -#else -static BLASULONG getrf_flag_lock = 0UL; +#define atomic_load_long(p) (BLASLONG)(*(volatile BLASLONG*)(p)) +#define atomic_store_long(p, v) (*(volatile BLASLONG *)(p)) = (v) #endif - static __inline BLASLONG FORMULA1(BLASLONG M, BLASLONG N, BLASLONG IS, BLASLONG BK, BLASLONG T) { double m = (double)(M - IS - BK); @@ -119,11 +110,7 @@ static void inner_basic_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG *ra FLOAT *d = (FLOAT *)args -> b + (k + k * lda) * COMPSIZE; FLOAT *sbb = sb; -#if __STDC_VERSION__ >= 201112L - _Atomic BLASLONG *flag = (_Atomic BLASLONG *)args -> d; -#else volatile BLASLONG *flag = (volatile BLASLONG *)args -> d; -#endif blasint *ipiv = (blasint *)args -> c; @@ -180,7 +167,10 @@ static void inner_basic_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG *ra } } - if ((js + REAL_GEMM_R >= n) && (mypos >= 0)) flag[mypos * CACHE_LINE_SIZE] = 0; + if ((js + REAL_GEMM_R >= n) && (mypos >= 0)) { + MB; + atomic_store_long(&flag[mypos * CACHE_LINE_SIZE], 0); + } for (is = 0; is < m; is += GEMM_P){ min_i = m - is; @@ -201,14 +191,10 @@ static void inner_basic_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG *ra /* Non blocking implementation */ typedef struct { -#if __STDC_VERSION__ >= 201112L - _Atomic -#else - volatile -#endif - BLASLONG working[MAX_CPU_NUMBER][CACHE_LINE_SIZE * DIVIDE_RATE]; + volatile BLASLONG working[MAX_CPU_NUMBER][CACHE_LINE_SIZE * DIVIDE_RATE]; } job_t; + #define ICOPY_OPERATION(M, N, A, LDA, X, Y, BUFFER) GEMM_ITCOPY(M, N, (FLOAT *)(A) + ((Y) + (X) * (LDA)) * COMPSIZE, LDA, BUFFER); #define OCOPY_OPERATION(M, N, A, LDA, X, Y, BUFFER) GEMM_ONCOPY(M, N, (FLOAT *)(A) + ((X) + (Y) * (LDA)) * COMPSIZE, LDA, BUFFER); @@ -246,11 +232,8 @@ static int inner_advanced_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG * blasint *ipiv = (blasint *)args -> c; BLASLONG jw; -#if __STDC_VERSION__ >= 201112L - _Atomic BLASLONG *flag = (_Atomic BLASLONG *)args -> d; -#else volatile BLASLONG *flag = (volatile BLASLONG *)args -> d; -#endif + if (args -> a == NULL) { TRSM_ILTCOPY(k, k, (FLOAT *)args -> b, lda, 0, sb); sbb = (FLOAT *)((((BLASULONG)(sb + k * k * COMPSIZE) + GEMM_ALIGN) & ~GEMM_ALIGN) + GEMM_OFFSET_B); @@ -279,14 +262,10 @@ static int inner_advanced_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG * for (i = 0; i < args -> nthreads; i++) #if 1 { - LOCK_COMMAND(&getrf_lock); - jw = job[mypos].working[i][CACHE_LINE_SIZE * bufferside]; - UNLOCK_COMMAND(&getrf_lock); do { - LOCK_COMMAND(&getrf_lock); - jw = job[mypos].working[i][CACHE_LINE_SIZE * bufferside]; - UNLOCK_COMMAND(&getrf_lock); + jw = atomic_load_long(&job[mypos].working[i][CACHE_LINE_SIZE * bufferside]); } while (jw); + MB; } #else while (job[mypos].working[i][CACHE_LINE_SIZE * bufferside]) {}; @@ -329,21 +308,17 @@ static int inner_advanced_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG * } MB; for (i = 0; i < args -> nthreads; i++) { - LOCK_COMMAND(&getrf_lock); - job[mypos].working[i][CACHE_LINE_SIZE * bufferside] = (BLASLONG)buffer[bufferside]; - UNLOCK_COMMAND(&getrf_lock); + atomic_store_long(&job[mypos].working[i][CACHE_LINE_SIZE * bufferside], (BLASLONG)buffer[bufferside]); } } - LOCK_COMMAND(&getrf_flag_lock); - flag[mypos * CACHE_LINE_SIZE] = 0; - UNLOCK_COMMAND(&getrf_flag_lock); + MB; + atomic_store_long(&flag[mypos * CACHE_LINE_SIZE], 0); if (m == 0) { + MB; for (xxx = 0; xxx < DIVIDE_RATE; xxx++) { - LOCK_COMMAND(&getrf_lock); - job[mypos].working[mypos][CACHE_LINE_SIZE * xxx] = 0; - UNLOCK_COMMAND(&getrf_lock); + atomic_store_long(&job[mypos].working[mypos][CACHE_LINE_SIZE * xxx], 0); } } @@ -368,14 +343,10 @@ static int inner_advanced_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG * if ((current != mypos) && (!is)) { #if 1 - LOCK_COMMAND(&getrf_lock); - jw = job[current].working[mypos][CACHE_LINE_SIZE * bufferside]; - UNLOCK_COMMAND(&getrf_lock); do { - LOCK_COMMAND(&getrf_lock); - jw = job[current].working[mypos][CACHE_LINE_SIZE * bufferside]; - UNLOCK_COMMAND(&getrf_lock); - } while (jw == 0); + jw = atomic_load_long(&job[current].working[mypos][CACHE_LINE_SIZE * bufferside]); + } while (jw == 0); + MB; #else while(job[current].working[mypos][CACHE_LINE_SIZE * bufferside] == 0) {}; #endif @@ -387,9 +358,7 @@ static int inner_advanced_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG * MB; if (is + min_i >= m) { - LOCK_COMMAND(&getrf_lock); - job[current].working[mypos][CACHE_LINE_SIZE * bufferside] = 0; - UNLOCK_COMMAND(&getrf_lock); + atomic_store_long(&job[current].working[mypos][CACHE_LINE_SIZE * bufferside], 0); } } @@ -402,14 +371,10 @@ static int inner_advanced_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG * for (i = 0; i < args -> nthreads; i++) { for (xxx = 0; xxx < DIVIDE_RATE; xxx++) { #if 1 - LOCK_COMMAND(&getrf_lock); - jw = job[mypos].working[i][CACHE_LINE_SIZE *xxx]; - UNLOCK_COMMAND(&getrf_lock); do { - LOCK_COMMAND(&getrf_lock); - jw = job[mypos].working[i][CACHE_LINE_SIZE *xxx]; - UNLOCK_COMMAND(&getrf_lock); + jw = atomic_load_long(&job[mypos].working[i][CACHE_LINE_SIZE *xxx]); } while(jw != 0); + MB; #else while (job[mypos].working[i][CACHE_LINE_SIZE * xxx] ) {}; #endif @@ -452,12 +417,7 @@ blasint CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, #ifdef _MSC_VER BLASLONG flag[MAX_CPU_NUMBER * CACHE_LINE_SIZE]; #else -#if __STDC_VERSION__ >= 201112L - _Atomic -#else - volatile -#endif - BLASLONG flag[MAX_CPU_NUMBER * CACHE_LINE_SIZE] __attribute__((aligned(128))); + volatile BLASLONG flag[MAX_CPU_NUMBER * CACHE_LINE_SIZE] __attribute__((aligned(128))); #endif #ifndef COMPLEX @@ -552,7 +512,11 @@ blasint CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, if (width > mn - is - bk) width = mn - is - bk; } - if (num_cpu > 0) exec_blas_async_wait(num_cpu, &queue[0]); + + if (num_cpu > 0) { + WMB; + exec_blas_async_wait(num_cpu, &queue[0]); + } mm = m - bk - is; nn = n - bk - is; @@ -617,7 +581,7 @@ blasint CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, queue[num_cpu].sa = NULL; queue[num_cpu].sb = NULL; queue[num_cpu].next = &queue[num_cpu + 1]; - flag[num_cpu * CACHE_LINE_SIZE] = 1; + atomic_store_long(&flag[num_cpu * CACHE_LINE_SIZE], 1); num_cpu ++; @@ -646,6 +610,8 @@ blasint CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, if (num_cpu > 0) { queue[num_cpu - 1].next = NULL; + WMB; + exec_blas_async(0, &queue[0]); inner_basic_thread(&newarg, NULL, range_n_mine, sa, sbb, -1); @@ -656,14 +622,10 @@ blasint CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, for (i = 0; i < num_cpu; i ++) { #if 1 - LOCK_COMMAND(&getrf_flag_lock); - f=flag[i*CACHE_LINE_SIZE]; - UNLOCK_COMMAND(&getrf_flag_lock); - while (f!=0) { - LOCK_COMMAND(&getrf_flag_lock); - f=flag[i*CACHE_LINE_SIZE]; - UNLOCK_COMMAND(&getrf_flag_lock); - }; + do { + f = atomic_load_long(&flag[i*CACHE_LINE_SIZE]); + } while (f != 0); + MB; #else while (flag[i*CACHE_LINE_SIZE]) {}; #endif @@ -728,12 +690,7 @@ blasint CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, BLASLONG range[MAX_CPU_NUMBER + 1]; BLASLONG width, nn, num_cpu; -#if __STDC_VERSION__ >= 201112L - _Atomic -#else - volatile -#endif - BLASLONG flag[MAX_CPU_NUMBER * CACHE_LINE_SIZE] __attribute__((aligned(128))); + volatile BLASLONG flag[MAX_CPU_NUMBER * CACHE_LINE_SIZE] __attribute__((aligned(128))); #ifndef COMPLEX #ifdef XDOUBLE @@ -842,6 +799,8 @@ blasint CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, nn = n - bk - is; if (width > nn) width = nn; + WMB; + if (num_cpu > 1) exec_blas_async_wait(num_cpu - 1, &queue[1]); range[0] = 0; @@ -876,7 +835,7 @@ blasint CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, queue[num_cpu].sa = NULL; queue[num_cpu].sb = NULL; queue[num_cpu].next = &queue[num_cpu + 1]; - flag[num_cpu * CACHE_LINE_SIZE] = 1; + atomic_store_long(&flag[num_cpu * CACHE_LINE_SIZE], 1); num_cpu ++; } @@ -891,6 +850,7 @@ blasint CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, range_n_new[0] = offset + is; range_n_new[1] = offset + is + bk; + WMB; if (num_cpu > 1) { exec_blas_async(1, &queue[1]); @@ -926,7 +886,7 @@ blasint CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, #endif - for (i = 1; i < num_cpu; i ++) while (flag[i * CACHE_LINE_SIZE]) {}; + for (i = 1; i < num_cpu; i ++) while (atomic_load_long(&flag[i * CACHE_LINE_SIZE])) {}; TRSM_ILTCOPY(bk, bk, a + (is + is * lda) * COMPSIZE, lda, 0, sb); diff --git a/lapack/getrf/potrf_parallel.c b/lapack/getrf/potrf_parallel.c deleted file mode 100644 index c2fee6bd1..000000000 --- a/lapack/getrf/potrf_parallel.c +++ /dev/null @@ -1,664 +0,0 @@ -/*********************************************************************/ -/* Copyright 2009, 2010 The University of Texas at Austin. */ -/* All rights reserved. */ -/* */ -/* Redistribution and use in source and binary forms, with or */ -/* without modification, are permitted provided that the following */ -/* conditions are met: */ -/* */ -/* 1. Redistributions of source code must retain the above */ -/* copyright notice, this list of conditions and the following */ -/* disclaimer. */ -/* */ -/* 2. Redistributions in binary form must reproduce the above */ -/* copyright notice, this list of conditions and the following */ -/* disclaimer in the documentation and/or other materials */ -/* provided with the distribution. */ -/* */ -/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ -/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ -/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ -/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ -/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ -/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ -/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ -/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ -/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ -/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ -/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ -/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ -/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ -/* POSSIBILITY OF SUCH DAMAGE. */ -/* */ -/* The views and conclusions contained in the software and */ -/* documentation are those of the authors and should not be */ -/* interpreted as representing official policies, either expressed */ -/* or implied, of The University of Texas at Austin. */ -/*********************************************************************/ - -#include -#include "common.h" - -#ifndef USE_SIMPLE_THREADED_LEVEL3 - -//The array of job_t may overflow the stack. -//Instead, use malloc to alloc job_t. -#if MAX_CPU_NUMBER > BLAS3_MEM_ALLOC_THRESHOLD -#define USE_ALLOC_HEAP -#endif - - -static FLOAT dm1 = -1.; - -#ifndef KERNEL_FUNC -#ifndef LOWER -#define KERNEL_FUNC SYRK_KERNEL_U -#else -#define KERNEL_FUNC SYRK_KERNEL_L -#endif -#endif - -#ifndef LOWER -#ifndef COMPLEX -#define TRSM_KERNEL TRSM_KERNEL_LT -#else -#define TRSM_KERNEL TRSM_KERNEL_LC -#endif -#else -#ifndef COMPLEX -#define TRSM_KERNEL TRSM_KERNEL_RN -#else -#define TRSM_KERNEL TRSM_KERNEL_RR -#endif -#endif - -#ifndef CACHE_LINE_SIZE -#define CACHE_LINE_SIZE 8 -#endif - -#ifndef DIVIDE_RATE -#define DIVIDE_RATE 2 -#endif - -#ifndef SWITCH_RATIO -#define SWITCH_RATIO 2 -#endif - -#ifndef LOWER -#define TRANS -#endif - -#ifndef SYRK_LOCAL -#if !defined(LOWER) && !defined(TRANS) -#define SYRK_LOCAL SYRK_UN -#elif !defined(LOWER) && defined(TRANS) -#define SYRK_LOCAL SYRK_UT -#elif defined(LOWER) && !defined(TRANS) -#define SYRK_LOCAL SYRK_LN -#else -#define SYRK_LOCAL SYRK_LT -#endif -#endif - -typedef struct { -#if __STDC_VERSION__ >= 201112L - _Atomic -#else - volatile -#endif - BLASLONG working[MAX_CPU_NUMBER][CACHE_LINE_SIZE * DIVIDE_RATE]; -} job_t; - - -#ifndef KERNEL_OPERATION -#ifndef COMPLEX -#define KERNEL_OPERATION(M, N, K, ALPHA, SA, SB, C, LDC, X, Y) \ - KERNEL_FUNC(M, N, K, ALPHA[0], SA, SB, (FLOAT *)(C) + ((X) + (Y) * LDC) * COMPSIZE, LDC, (X) - (Y)) -#else -#define KERNEL_OPERATION(M, N, K, ALPHA, SA, SB, C, LDC, X, Y) \ - KERNEL_FUNC(M, N, K, ALPHA[0], ALPHA[1], SA, SB, (FLOAT *)(C) + ((X) + (Y) * LDC) * COMPSIZE, LDC, (X) - (Y)) -#endif -#endif - -#ifndef ICOPY_OPERATION -#ifndef TRANS -#define ICOPY_OPERATION(M, N, A, LDA, X, Y, BUFFER) GEMM_ITCOPY(M, N, (FLOAT *)(A) + ((Y) + (X) * (LDA)) * COMPSIZE, LDA, BUFFER); -#else -#define ICOPY_OPERATION(M, N, A, LDA, X, Y, BUFFER) GEMM_INCOPY(M, N, (FLOAT *)(A) + ((X) + (Y) * (LDA)) * COMPSIZE, LDA, BUFFER); -#endif -#endif - -#ifndef OCOPY_OPERATION -#ifdef TRANS -#define OCOPY_OPERATION(M, N, A, LDA, X, Y, BUFFER) GEMM_ONCOPY(M, N, (FLOAT *)(A) + ((X) + (Y) * (LDA)) * COMPSIZE, LDA, BUFFER); -#else -#define OCOPY_OPERATION(M, N, A, LDA, X, Y, BUFFER) GEMM_OTCOPY(M, N, (FLOAT *)(A) + ((Y) + (X) * (LDA)) * COMPSIZE, LDA, BUFFER); -#endif -#endif - -#ifndef S -#define S args -> a -#endif -#ifndef A -#define A args -> b -#endif -#ifndef C -#define C args -> c -#endif -#ifndef LDA -#define LDA args -> lda -#endif -#ifndef N -#define N args -> m -#endif -#ifndef K -#define K args -> k -#endif - -static int inner_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, FLOAT *sb, BLASLONG mypos){ - - FLOAT *buffer[DIVIDE_RATE]; - - BLASLONG k, lda; - BLASLONG m_from, m_to; - - FLOAT *alpha; - FLOAT *a, *c; - job_t *job = (job_t *)args -> common; - BLASLONG xxx, bufferside; - - BLASLONG jjs, min_jj; - BLASLONG is, min_i, div_n; - - BLASLONG i, current; - - k = K; - - a = (FLOAT *)A; - c = (FLOAT *)C; - - lda = LDA; - - alpha = (FLOAT *)args -> alpha; - - m_from = range_n[mypos + 0]; - m_to = range_n[mypos + 1]; - -#if 0 - fprintf(stderr, "Thread[%ld] m_from : %ld m_to : %ld\n", mypos, m_from, m_to); -#endif - - div_n = (((m_to - m_from + DIVIDE_RATE - 1) / DIVIDE_RATE + GEMM_UNROLL_MN - 1)/GEMM_UNROLL_MN) * GEMM_UNROLL_MN; - - buffer[0] = (FLOAT *)((((BLASULONG)(sb + k * k * COMPSIZE) + GEMM_ALIGN) & ~GEMM_ALIGN) + GEMM_OFFSET_B); - for (i = 1; i < DIVIDE_RATE; i++) { - buffer[i] = buffer[i - 1] + GEMM_Q * div_n * COMPSIZE; - } - -#ifndef LOWER - TRSM_IUNCOPY(k, k, (FLOAT *)S, lda, 0, sb); -#else - TRSM_OLTCOPY(k, k, (FLOAT *)S, lda, 0, sb); -#endif - - for (xxx = m_from, bufferside = 0; xxx < m_to; xxx += div_n, bufferside ++) { - - for(jjs = xxx; jjs < MIN(m_to, xxx + div_n); jjs += min_jj){ - - min_jj = MIN(m_to, xxx + div_n) - jjs; - -#ifndef LOWER - if (min_jj > GEMM_UNROLL_MN) min_jj = GEMM_UNROLL_MN; -#else - if (min_jj > GEMM_P) min_jj = GEMM_P; -#endif - -#ifndef LOWER - OCOPY_OPERATION (k, min_jj, a, lda, 0, jjs, buffer[bufferside] + k * (jjs - xxx) * COMPSIZE); - - TRSM_KERNEL (k, min_jj, k, dm1, -#ifdef COMPLEX - ZERO, -#endif - sb, - buffer[bufferside] + k * (jjs - xxx) * COMPSIZE, - a + jjs * lda * COMPSIZE, lda, 0); -#else - ICOPY_OPERATION (k, min_jj, a, lda, 0, jjs, buffer[bufferside] + k * (jjs - xxx) * COMPSIZE); - - TRSM_KERNEL (min_jj, k, k, dm1, -#ifdef COMPLEX - ZERO, -#endif - buffer[bufferside] + k * (jjs - xxx) * COMPSIZE, - sb, - a + jjs * COMPSIZE, lda, 0); -#endif - } - -#ifndef LOWER - for (i = 0; i <= mypos; i++) - job[mypos].working[i][CACHE_LINE_SIZE * bufferside] = (BLASLONG)buffer[bufferside]; -#else - for (i = mypos; i < args -> nthreads; i++) - job[mypos].working[i][CACHE_LINE_SIZE * bufferside] = (BLASLONG)buffer[bufferside]; -#endif - - WMB; - } - - min_i = m_to - m_from; - - if (min_i >= GEMM_P * 2) { - min_i = GEMM_P; - } else - if (min_i > GEMM_P) { - min_i = (((min_i + 1) / 2 + GEMM_UNROLL_MN - 1)/GEMM_UNROLL_MN) * GEMM_UNROLL_MN; - } - -#ifndef LOWER - ICOPY_OPERATION(k, min_i, a, lda, 0, m_from, sa); -#else - OCOPY_OPERATION(k, min_i, a, lda, 0, m_from, sa); -#endif - - current = mypos; - -#ifndef LOWER - while (current < args -> nthreads) -#else - while (current >= 0) -#endif - { - div_n = (((range_n[current + 1] - range_n[current] + DIVIDE_RATE - 1) / DIVIDE_RATE + GEMM_UNROLL_MN - 1)/GEMM_UNROLL_MN) * GEMM_UNROLL_MN; - - for (xxx = range_n[current], bufferside = 0; xxx < range_n[current + 1]; xxx += div_n, bufferside ++) { - - /* thread has to wait */ - if (current != mypos) while(job[current].working[mypos][CACHE_LINE_SIZE * bufferside] == 0) {YIELDING;}; - - KERNEL_OPERATION(min_i, MIN(range_n[current + 1] - xxx, div_n), k, alpha, - sa, (FLOAT *)job[current].working[mypos][CACHE_LINE_SIZE * bufferside], - c, lda, m_from, xxx); - - if (m_from + min_i >= m_to) { - job[current].working[mypos][CACHE_LINE_SIZE * bufferside] &= 0; - WMB; - } - } - -#ifndef LOWER - current ++; -#else - current --; -#endif - } - - for(is = m_from + min_i; is < m_to; is += min_i){ - min_i = m_to - is; - - if (min_i >= GEMM_P * 2) { - min_i = GEMM_P; - } else - if (min_i > GEMM_P) { - min_i = (((min_i + 1) / 2 + GEMM_UNROLL_MN - 1)/GEMM_UNROLL_MN) * GEMM_UNROLL_MN; - } - -#ifndef LOWER - ICOPY_OPERATION(k, min_i, a, lda, 0, is, sa); -#else - OCOPY_OPERATION(k, min_i, a, lda, 0, is, sa); -#endif - - current = mypos; - -#ifndef LOWER - while (current < args -> nthreads) -#else - while (current >= 0) -#endif - { - div_n = (((range_n[current + 1] - range_n[current] + DIVIDE_RATE - 1) / DIVIDE_RATE + GEMM_UNROLL_MN - 1)/GEMM_UNROLL_MN) * GEMM_UNROLL_MN; - - for (xxx = range_n[current], bufferside = 0; xxx < range_n[current + 1]; xxx += div_n, bufferside ++) { - - KERNEL_OPERATION(min_i, MIN(range_n[current + 1] - xxx, div_n), k, alpha, - sa, (FLOAT *)job[current].working[mypos][CACHE_LINE_SIZE * bufferside], - c, lda, is, xxx); - - if (is + min_i >= m_to) { - job[current].working[mypos][CACHE_LINE_SIZE * bufferside] &= 0; - WMB; - } - } -#ifndef LOWER - current ++; -#else - current --; -#endif - } - } - - for (i = 0; i < args -> nthreads; i++) { - if (i != mypos) { - for (xxx = 0; xxx < DIVIDE_RATE; xxx++) { - while (job[mypos].working[i][CACHE_LINE_SIZE * xxx] ) {YIELDING;}; - } - } - } - - return 0; - } - -static int thread_driver(blas_arg_t *args, FLOAT *sa, FLOAT *sb){ - - blas_arg_t newarg; - -#ifndef USE_ALLOC_HEAP - job_t job[MAX_CPU_NUMBER]; -#else - job_t * job = NULL; -#endif - - blas_queue_t queue[MAX_CPU_NUMBER]; - - BLASLONG range[MAX_CPU_NUMBER + 100]; - - BLASLONG num_cpu; - - BLASLONG nthreads = args -> nthreads; - - BLASLONG width, i, j, k; - BLASLONG n, n_from, n_to; - int mode, mask; - double dnum; - -#ifndef COMPLEX -#ifdef XDOUBLE - mode = BLAS_XDOUBLE | BLAS_REAL; - mask = MAX(QGEMM_UNROLL_M, QGEMM_UNROLL_N) - 1; -#elif defined(DOUBLE) - mode = BLAS_DOUBLE | BLAS_REAL; - mask = MAX(DGEMM_UNROLL_M, DGEMM_UNROLL_N) - 1; -#else - mode = BLAS_SINGLE | BLAS_REAL; - mask = MAX(SGEMM_UNROLL_M, SGEMM_UNROLL_N) - 1; -#endif -#else -#ifdef XDOUBLE - mode = BLAS_XDOUBLE | BLAS_COMPLEX; - mask = MAX(XGEMM_UNROLL_M, XGEMM_UNROLL_N) - 1; -#elif defined(DOUBLE) - mode = BLAS_DOUBLE | BLAS_COMPLEX; - mask = MAX(ZGEMM_UNROLL_M, ZGEMM_UNROLL_N) - 1; -#else - mode = BLAS_SINGLE | BLAS_COMPLEX; - mask = MAX(CGEMM_UNROLL_M, CGEMM_UNROLL_N) - 1; -#endif -#endif - - newarg.m = args -> m; - newarg.k = args -> k; - newarg.a = args -> a; - newarg.b = args -> b; - newarg.c = args -> c; - newarg.lda = args -> lda; - newarg.alpha = args -> alpha; - -#ifdef USE_ALLOC_HEAP - job = (job_t*)malloc(MAX_CPU_NUMBER * sizeof(job_t)); - if(job==NULL){ - fprintf(stderr, "OpenBLAS: malloc failed in %s\n", __func__); - exit(1); - } -#endif - - newarg.common = (void *)job; - - n_from = 0; - n_to = args -> m; - -#ifndef LOWER - - range[MAX_CPU_NUMBER] = n_to - n_from; - range[0] = 0; - num_cpu = 0; - i = 0; - n = n_to - n_from; - - dnum = (double)n * (double)n /(double)nthreads; - - while (i < n){ - - if (nthreads - num_cpu > 1) { - - double di = (double)i; - - width = ((((BLASLONG)(sqrt(di * di + dnum) - di) + mask)/(mask+1)) * (mask+1)); - - if (num_cpu == 0) width = n - (((n - width)/(mask+1)) * (mask+1)); - - if ((width > n - i) || (width < mask)) width = n - i; - - } else { - width = n - i; - } - - range[MAX_CPU_NUMBER - num_cpu - 1] = range[MAX_CPU_NUMBER - num_cpu] - width; - - queue[num_cpu].mode = mode; - queue[num_cpu].routine = inner_thread; - queue[num_cpu].args = &newarg; - queue[num_cpu].range_m = NULL; - - queue[num_cpu].sa = NULL; - queue[num_cpu].sb = NULL; - queue[num_cpu].next = &queue[num_cpu + 1]; - - num_cpu ++; - i += width; - } - - for (i = 0; i < num_cpu; i ++) queue[i].range_n = &range[MAX_CPU_NUMBER - num_cpu]; - -#else - - range[0] = 0; - num_cpu = 0; - i = 0; - n = n_to - n_from; - - dnum = (double)n * (double)n /(double)nthreads; - - while (i < n){ - - if (nthreads - num_cpu > 1) { - - double di = (double)i; - - width = ((((BLASLONG)(sqrt(di * di + dnum) - di) + mask)/(mask+1)) * (mask+1)); - - if ((width > n - i) || (width < mask)) width = n - i; - - } else { - width = n - i; - } - - range[num_cpu + 1] = range[num_cpu] + width; - - queue[num_cpu].mode = mode; - queue[num_cpu].routine = inner_thread; - queue[num_cpu].args = &newarg; - queue[num_cpu].range_m = NULL; - queue[num_cpu].range_n = range; - queue[num_cpu].sa = NULL; - queue[num_cpu].sb = NULL; - queue[num_cpu].next = &queue[num_cpu + 1]; - - num_cpu ++; - i += width; - } - -#endif - - newarg.nthreads = num_cpu; - - if (num_cpu) { - - for (j = 0; j < num_cpu; j++) { - for (i = 0; i < num_cpu; i++) { - for (k = 0; k < DIVIDE_RATE; k++) { - job[j].working[i][CACHE_LINE_SIZE * k] = 0; - } - } - } - - queue[0].sa = sa; - queue[0].sb = sb; - queue[num_cpu - 1].next = NULL; - - exec_blas(num_cpu, queue); - } - -#ifdef USE_ALLOC_HEAP - free(job); -#endif - - return 0; -} - -#endif - -blasint CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, FLOAT *sb, BLASLONG myid) { - - BLASLONG n, bk, i, blocking, lda; - BLASLONG info; - int mode; - blas_arg_t newarg; - FLOAT *a; - FLOAT alpha[2] = { -ONE, ZERO}; - -#ifndef COMPLEX -#ifdef XDOUBLE - mode = BLAS_XDOUBLE | BLAS_REAL; -#elif defined(DOUBLE) - mode = BLAS_DOUBLE | BLAS_REAL; -#else - mode = BLAS_SINGLE | BLAS_REAL; -#endif -#else -#ifdef XDOUBLE - mode = BLAS_XDOUBLE | BLAS_COMPLEX; -#elif defined(DOUBLE) - mode = BLAS_DOUBLE | BLAS_COMPLEX; -#else - mode = BLAS_SINGLE | BLAS_COMPLEX; -#endif -#endif - - if (args -> nthreads == 1) { -#ifndef LOWER - info = POTRF_U_SINGLE(args, NULL, NULL, sa, sb, 0); -#else - info = POTRF_L_SINGLE(args, NULL, NULL, sa, sb, 0); -#endif - return info; - } - - n = args -> n; - a = (FLOAT *)args -> a; - lda = args -> lda; - - if (range_n) n = range_n[1] - range_n[0]; - - if (n <= GEMM_UNROLL_N * 2) { -#ifndef LOWER - info = POTRF_U_SINGLE(args, NULL, range_n, sa, sb, 0); -#else - info = POTRF_L_SINGLE(args, NULL, range_n, sa, sb, 0); -#endif - return info; - } - - newarg.lda = lda; - newarg.ldb = lda; - newarg.ldc = lda; - newarg.alpha = alpha; - newarg.beta = NULL; - newarg.nthreads = args -> nthreads; - - blocking = ((n / 2 + GEMM_UNROLL_N - 1)/GEMM_UNROLL_N) * GEMM_UNROLL_N; - if (blocking > GEMM_Q) blocking = GEMM_Q; - - for (i = 0; i < n; i += blocking) { - bk = n - i; - if (bk > blocking) bk = blocking; - - newarg.m = bk; - newarg.n = bk; - newarg.a = a + (i + i * lda) * COMPSIZE; - - info = CNAME(&newarg, NULL, NULL, sa, sb, 0); - if (info) return info + i; - - if (n - i - bk > 0) { -#ifndef USE_SIMPLE_THREADED_LEVEL3 - newarg.m = n - i - bk; - newarg.k = bk; -#ifndef LOWER - newarg.b = a + ( i + (i + bk) * lda) * COMPSIZE; -#else - newarg.b = a + ((i + bk) + i * lda) * COMPSIZE; -#endif - newarg.c = a + ((i + bk) + (i + bk) * lda) * COMPSIZE; - - thread_driver(&newarg, sa, sb); -#else - -#ifndef LOWER - newarg.m = bk; - newarg.n = n - i - bk; - newarg.a = a + (i + i * lda) * COMPSIZE; - newarg.b = a + (i + (i + bk) * lda) * COMPSIZE; - - gemm_thread_n(mode | BLAS_TRANSA_T, - &newarg, NULL, NULL, (void *)TRSM_LCUN, sa, sb, args -> nthreads); - - newarg.n = n - i - bk; - newarg.k = bk; - newarg.a = a + ( i + (i + bk) * lda) * COMPSIZE; - newarg.c = a + ((i + bk) + (i + bk) * lda) * COMPSIZE; - -#if 0 - HERK_THREAD_UC(&newarg, NULL, NULL, sa, sb, 0); -#else - syrk_thread(mode | BLAS_TRANSA_N | BLAS_TRANSB_T, - &newarg, NULL, NULL, (void *)HERK_UC, sa, sb, args -> nthreads); -#endif -#else - newarg.m = n - i - bk; - newarg.n = bk; - newarg.a = a + (i + i * lda) * COMPSIZE; - newarg.b = a + (i + bk + i * lda) * COMPSIZE; - - gemm_thread_m(mode | BLAS_RSIDE | BLAS_TRANSA_T | BLAS_UPLO, - &newarg, NULL, NULL, (void *)TRSM_RCLN, sa, sb, args -> nthreads); - - newarg.n = n - i - bk; - newarg.k = bk; - newarg.a = a + (i + bk + i * lda) * COMPSIZE; - newarg.c = a + (i + bk + (i + bk) * lda) * COMPSIZE; - -#if 0 - HERK_THREAD_LN(&newarg, NULL, NULL, sa, sb, 0); -#else - syrk_thread(mode | BLAS_TRANSA_N | BLAS_TRANSB_T | BLAS_UPLO, - &newarg, NULL, NULL, (void *)HERK_LN, sa, sb, args -> nthreads); -#endif -#endif - -#endif - } - } - return 0; -} diff --git a/lapack/getrs/Makefile b/lapack/getrs/Makefile index 2640ef097..f32569367 100644 --- a/lapack/getrs/Makefile +++ b/lapack/getrs/Makefile @@ -17,6 +17,19 @@ ZBLASOBJS += zgetrs_N_parallel.$(SUFFIX) zgetrs_T_parallel.$(SUFFIX) zgetrs_R_pa XBLASOBJS += xgetrs_N_parallel.$(SUFFIX) xgetrs_T_parallel.$(SUFFIX) xgetrs_R_parallel.$(SUFFIX) xgetrs_C_parallel.$(SUFFIX) endif +ifeq "$(or $(BUILD_SINGLE),$(BUILD_DOUBLE))" "" +SBLASOBJS= +endif +ifneq ($(BUILD_DOUBLE),1) +DBLASOBJS= +endif +ifeq "$(or $(BUILD_COMPLEX),$(BUILD_COMPLEX16))" "" +CBLASOBJS= +endif +ifneq ($(BUILD_COMPLEX16),1) +ZBLASOBJS= +endif + sgetrs_N_single.$(SUFFIX) : getrs_single.c $(CC) -c $(CFLAGS) -UCOMPLEX -UDOUBLE -UTRANS $< -o $(@F) diff --git a/lapack/laswp/Makefile b/lapack/laswp/Makefile index 389800692..2028d994e 100644 --- a/lapack/laswp/Makefile +++ b/lapack/laswp/Makefile @@ -1,11 +1,19 @@ TOPDIR = ../.. include ../../Makefile.system +ifneq "$(or $(BUILD_SINGLE),$(BUILD_DOUBLE))" "" SBLASOBJS = slaswp_plus.$(SUFFIX) slaswp_minus.$(SUFFIX) +endif +ifeq ($(BUILD_DOUBLE),1) DBLASOBJS = dlaswp_plus.$(SUFFIX) dlaswp_minus.$(SUFFIX) +endif QBLASOBJS = qlaswp_plus.$(SUFFIX) qlaswp_minus.$(SUFFIX) +ifneq "$(or $(BUILD_COMPLEX),$(BUILD_COMPLEX16))" "" CBLASOBJS = claswp_plus.$(SUFFIX) claswp_minus.$(SUFFIX) +endif +ifeq ($(BUILD_COMPLEX16),1) ZBLASOBJS = zlaswp_plus.$(SUFFIX) zlaswp_minus.$(SUFFIX) +endif XBLASOBJS = xlaswp_plus.$(SUFFIX) xlaswp_minus.$(SUFFIX) slaswp_plus.$(SUFFIX) slaswp_minus.$(SUFFIX) dlaswp_plus.$(SUFFIX) dlaswp_minus.$(SUFFIX) \ diff --git a/lapack/laswp/riscv64/Makefile b/lapack/laswp/riscv64/Makefile new file mode 100644 index 000000000..75411deb5 --- /dev/null +++ b/lapack/laswp/riscv64/Makefile @@ -0,0 +1,13 @@ +TOPDIR = ../../.. +include ../../../Makefile.system + +ifndef LASWP +LASWP = ../generic/laswp_k.c +endif + +ifndef ZLASWP +ZLASWP = ../generic/zlaswp_k.c +endif + +include ../generic/Makefile + diff --git a/lapack/lauu2/Makefile b/lapack/lauu2/Makefile index dc6a640b4..60d2db4db 100644 --- a/lapack/lauu2/Makefile +++ b/lapack/lauu2/Makefile @@ -1,11 +1,19 @@ TOPDIR = ../.. include ../../Makefile.system +ifeq ($(BUILD_SINGLE),1) SBLASOBJS = slauu2_U.$(SUFFIX) slauu2_L.$(SUFFIX) +endif +ifeq ($(BUILD_DOUBLE),1) DBLASOBJS = dlauu2_U.$(SUFFIX) dlauu2_L.$(SUFFIX) +endif QBLASOBJS = qlauu2_U.$(SUFFIX) qlauu2_L.$(SUFFIX) +ifeq ($(BUILD_COMPLEX),1) CBLASOBJS = clauu2_U.$(SUFFIX) clauu2_L.$(SUFFIX) +endif +ifeq ($(BUILD_COMPLEX16),1) ZBLASOBJS = zlauu2_U.$(SUFFIX) zlauu2_L.$(SUFFIX) +endif XBLASOBJS = xlauu2_U.$(SUFFIX) xlauu2_L.$(SUFFIX) slauu2_U.$(SUFFIX) : lauu2_U.c diff --git a/lapack/lauum/Makefile b/lapack/lauum/Makefile index f163479ef..c57f17937 100644 --- a/lapack/lauum/Makefile +++ b/lapack/lauum/Makefile @@ -17,6 +17,19 @@ ZBLASOBJS += zlauum_U_parallel.$(SUFFIX) zlauum_L_parallel.$(SUFFIX) XBLASOBJS += xlauum_U_parallel.$(SUFFIX) xlauum_L_parallel.$(SUFFIX) endif +ifneq ($(BUILD_SINGLE),1) +SBLASOBJS= +endif +ifneq ($(BUILD_DOUBLE),1) +DBLASOBJS= +endif +ifneq ($(BUILD_COMPLEX),1) +CBLASOBJS= +endif +ifneq ($(BUILD_COMPLEX16),1) +ZBLASOBJS= +endif + slauum_U_single.$(SUFFIX) : lauum_U_single.c $(CC) -c $(CFLAGS) -UCOMPLEX -UDOUBLE $< -o $(@F) diff --git a/lapack/potf2/Makefile b/lapack/potf2/Makefile index 5946ad9c8..f48570064 100644 --- a/lapack/potf2/Makefile +++ b/lapack/potf2/Makefile @@ -8,6 +8,19 @@ CBLASOBJS = cpotf2_U.$(SUFFIX) cpotf2_L.$(SUFFIX) ZBLASOBJS = zpotf2_U.$(SUFFIX) zpotf2_L.$(SUFFIX) XBLASOBJS = xpotf2_U.$(SUFFIX) xpotf2_L.$(SUFFIX) +ifeq "$(or $(BUILD_SINGLE),$(BUILD_DOUBLE))" "" + SBLASOBJS= +endif +ifneq ($(BUILD_DOUBLE),1) + DBLASOBJS= +endif +ifeq "$(or $(BUILD_COMPLEX),$(BUILD_COMPLEX16))" "" + CBLASOBJS= +endif +ifneq ($(BUILD_COMPLEX16),1) + ZBLASOBJS= +endif + spotf2_U.$(SUFFIX) : potf2_U.c $(CC) -c $(CFLAGS) -UCOMPLEX -UDOUBLE $< -o $(@F) diff --git a/lapack/potrf/Makefile b/lapack/potrf/Makefile index 21efa5540..feefd0483 100644 --- a/lapack/potrf/Makefile +++ b/lapack/potrf/Makefile @@ -17,6 +17,20 @@ ZBLASOBJS += zpotrf_U_parallel.$(SUFFIX) zpotrf_L_parallel.$(SUFFIX) XBLASOBJS += xpotrf_U_parallel.$(SUFFIX) xpotrf_L_parallel.$(SUFFIX) endif +ifeq "$(or $(BUILD_SINGLE),$(BUILD_DOUBLE))" "" +SBLASOBJS= +endif +ifneq ($(BUILD_DOUBLE),1) +DBLASOBJS= +endif +ifeq "$(or $(BUILD_COMPLEX),$(BUILD_COMPLEX16))" "" +CBLASOBJS= +endif +ifneq ($(BUILD_COMPLEX16),1) +ZBLASOBJS= +endif + + spotrf_U_single.$(SUFFIX) : potrf_U_single.c $(CC) -c $(CFLAGS) -UCOMPLEX -UDOUBLE $< -o $(@F) diff --git a/lapack/potrf/potrf_parallel.c b/lapack/potrf/potrf_parallel.c index e61e8decb..29364cc05 100644 --- a/lapack/potrf/potrf_parallel.c +++ b/lapack/potrf/potrf_parallel.c @@ -101,7 +101,12 @@ static FLOAT dm1 = -1.; #endif typedef struct { - volatile BLASLONG working[MAX_CPU_NUMBER][CACHE_LINE_SIZE * DIVIDE_RATE]; +#ifdef HAVE_C11 + _Atomic +#else + volatile +#endif + BLASLONG working[MAX_CPU_NUMBER][CACHE_LINE_SIZE * DIVIDE_RATE]; } job_t; @@ -375,6 +380,9 @@ static int thread_driver(blas_arg_t *args, FLOAT *sa, FLOAT *sb){ #elif defined(DOUBLE) mode = BLAS_DOUBLE | BLAS_REAL; mask = MAX(DGEMM_UNROLL_M, DGEMM_UNROLL_N) - 1; +#elif defined(HALF) + mode = BLAS_HALF | BLAS_REAL; + mask = MAX(SBGEMM_UNROLL_M, SBGEMM_UNROLL_N) - 1; #else mode = BLAS_SINGLE | BLAS_REAL; mask = MAX(SGEMM_UNROLL_M, SGEMM_UNROLL_N) - 1; diff --git a/lapack/trti2/Makefile b/lapack/trti2/Makefile index 45251fb1e..005e80d73 100644 --- a/lapack/trti2/Makefile +++ b/lapack/trti2/Makefile @@ -1,11 +1,19 @@ TOPDIR = ../.. include ../../Makefile.system +ifeq ($(BUILD_SINGLE),1) SBLASOBJS = strti2_UU.$(SUFFIX) strti2_UN.$(SUFFIX) strti2_LU.$(SUFFIX) strti2_LN.$(SUFFIX) +endif +ifeq ($(BUILD_DOUBLE),1) DBLASOBJS = dtrti2_UU.$(SUFFIX) dtrti2_UN.$(SUFFIX) dtrti2_LU.$(SUFFIX) dtrti2_LN.$(SUFFIX) +endif QBLASOBJS = qtrti2_UU.$(SUFFIX) qtrti2_UN.$(SUFFIX) qtrti2_LU.$(SUFFIX) qtrti2_LN.$(SUFFIX) +ifeq ($(BUILD_COMPLEX),1) CBLASOBJS = ctrti2_UU.$(SUFFIX) ctrti2_UN.$(SUFFIX) ctrti2_LU.$(SUFFIX) ctrti2_LN.$(SUFFIX) +endif +ifeq ($(BUILD_COMPLEX16),1) ZBLASOBJS = ztrti2_UU.$(SUFFIX) ztrti2_UN.$(SUFFIX) ztrti2_LU.$(SUFFIX) ztrti2_LN.$(SUFFIX) +endif XBLASOBJS = xtrti2_UU.$(SUFFIX) xtrti2_UN.$(SUFFIX) xtrti2_LU.$(SUFFIX) xtrti2_LN.$(SUFFIX) strti2_UU.$(SUFFIX) : trti2_U.c diff --git a/lapack/trtri/Makefile b/lapack/trtri/Makefile index 626c47bbf..72167ff56 100644 --- a/lapack/trtri/Makefile +++ b/lapack/trtri/Makefile @@ -23,6 +23,19 @@ ZBLASOBJS += ztrtri_UU_parallel.$(SUFFIX) ztrtri_UN_parallel.$(SUFFIX) ztrtri_LU XBLASOBJS += xtrtri_UU_parallel.$(SUFFIX) xtrtri_UN_parallel.$(SUFFIX) xtrtri_LU_parallel.$(SUFFIX) xtrtri_LN_parallel.$(SUFFIX) endif +ifneq ($(BUILD_SINGLE),1) +SBLASOBJS= +endif +ifneq ($(BUILD_DOUBLE),1) +DBLASOBJS= +endif +ifneq ($(BUILD_COMPLEX),1) +CBLASOBJS= +endif +ifneq ($(BUILD_COMPLEX16),1) +ZBLASOBJS= +endif + strtri_UU_single.$(SUFFIX) : trtri_U_single.c $(CC) -c $(CFLAGS) -UCOMPLEX -UDOUBLE -DUNIT $< -o $(@F) diff --git a/lapack/trtri/trtri_L_parallel.c b/lapack/trtri/trtri_L_parallel.c index 5dc60b862..fb8c8fc77 100644 --- a/lapack/trtri/trtri_L_parallel.c +++ b/lapack/trtri/trtri_L_parallel.c @@ -54,7 +54,7 @@ blasint CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, BLASLONG n, info; BLASLONG bk, i, blocking, start_i; int mode; - BLASLONG lda, range_N[2]; + BLASLONG lda; // , range_N[2]; blas_arg_t newarg; FLOAT *a; FLOAT alpha[2] = { ONE, ZERO}; @@ -100,8 +100,8 @@ blasint CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, bk = n - i; if (bk > blocking) bk = blocking; - range_N[0] = i; - range_N[1] = i + bk; + /* range_N[0] = i; + range_N[1] = i + bk; */ newarg.lda = lda; newarg.ldb = lda; diff --git a/lapack/trtri/trtri_U_parallel.c b/lapack/trtri/trtri_U_parallel.c index fc48a33f1..5287421d6 100644 --- a/lapack/trtri/trtri_U_parallel.c +++ b/lapack/trtri/trtri_U_parallel.c @@ -54,7 +54,7 @@ blasint CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, BLASLONG n, info; BLASLONG bk, i, blocking; int mode; - BLASLONG lda, range_N[2]; + BLASLONG lda; //, range_N[2]; blas_arg_t newarg; FLOAT *a; FLOAT alpha[2] = { ONE, ZERO}; @@ -96,8 +96,8 @@ blasint CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, bk = n - i; if (bk > blocking) bk = blocking; - range_N[0] = i; - range_N[1] = i + bk; + /* range_N[0] = i; + range_N[1] = i + bk; */ newarg.lda = lda; newarg.ldb = lda; diff --git a/lapack/trtrs/Makefile b/lapack/trtrs/Makefile new file mode 100644 index 000000000..8ba63c21a --- /dev/null +++ b/lapack/trtrs/Makefile @@ -0,0 +1,465 @@ +TOPDIR = ../.. +include ../../Makefile.system + +SBLASOBJS = strtrs_UNU_single.$(SUFFIX) strtrs_UNN_single.$(SUFFIX) strtrs_UTU_single.$(SUFFIX) strtrs_UTN_single.$(SUFFIX) strtrs_LNU_single.$(SUFFIX) strtrs_LNN_single.$(SUFFIX) strtrs_LTU_single.$(SUFFIX) strtrs_LTN_single.$(SUFFIX) +DBLASOBJS = dtrtrs_UNU_single.$(SUFFIX) dtrtrs_UNN_single.$(SUFFIX) dtrtrs_UTU_single.$(SUFFIX) dtrtrs_UTN_single.$(SUFFIX) dtrtrs_LNU_single.$(SUFFIX) dtrtrs_LNN_single.$(SUFFIX) dtrtrs_LTU_single.$(SUFFIX) dtrtrs_LTN_single.$(SUFFIX) +QBLASOBJS = qtrtrs_UNU_single.$(SUFFIX) qtrtrs_UNN_single.$(SUFFIX) qtrtrs_UTU_single.$(SUFFIX) qtrtrs_UTN_single.$(SUFFIX) qtrtrs_LNU_single.$(SUFFIX) qtrtrs_LNN_single.$(SUFFIX) qtrtrs_LTU_single.$(SUFFIX) qtrtrs_LTN_single.$(SUFFIX) +CBLASOBJS = ctrtrs_UNU_single.$(SUFFIX) ctrtrs_UNN_single.$(SUFFIX) ctrtrs_UTU_single.$(SUFFIX) ctrtrs_UTN_single.$(SUFFIX) ctrtrs_URU_single.$(SUFFIX) ctrtrs_URN_single.$(SUFFIX) ctrtrs_UCU_single.$(SUFFIX) ctrtrs_UCN_single.$(SUFFIX) ctrtrs_LNU_single.$(SUFFIX) ctrtrs_LNN_single.$(SUFFIX) ctrtrs_LTU_single.$(SUFFIX) ctrtrs_LTN_single.$(SUFFIX) ctrtrs_LRU_single.$(SUFFIX) ctrtrs_LRN_single.$(SUFFIX) ctrtrs_LCU_single.$(SUFFIX) ctrtrs_LCN_single.$(SUFFIX) +ZBLASOBJS = ztrtrs_UNU_single.$(SUFFIX) ztrtrs_UNN_single.$(SUFFIX) ztrtrs_UTU_single.$(SUFFIX) ztrtrs_UTN_single.$(SUFFIX) ztrtrs_URU_single.$(SUFFIX) ztrtrs_URN_single.$(SUFFIX) ztrtrs_UCU_single.$(SUFFIX) ztrtrs_UCN_single.$(SUFFIX) ztrtrs_LNU_single.$(SUFFIX) ztrtrs_LNN_single.$(SUFFIX) ztrtrs_LTU_single.$(SUFFIX) ztrtrs_LTN_single.$(SUFFIX) ztrtrs_LRU_single.$(SUFFIX) ztrtrs_LRN_single.$(SUFFIX) ztrtrs_LCU_single.$(SUFFIX) ztrtrs_LCN_single.$(SUFFIX) +XBLASOBJS = xtrtrs_UNU_single.$(SUFFIX) xtrtrs_UNN_single.$(SUFFIX) xtrtrs_UTU_single.$(SUFFIX) xtrtrs_UTN_single.$(SUFFIX) xtrtrs_URU_single.$(SUFFIX) xtrtrs_URN_single.$(SUFFIX) xtrtrs_UCU_single.$(SUFFIX) xtrtrs_UCN_single.$(SUFFIX) xtrtrs_LNU_single.$(SUFFIX) xtrtrs_LNN_single.$(SUFFIX) xtrtrs_LTU_single.$(SUFFIX) xtrtrs_LTN_single.$(SUFFIX) xtrtrs_LRU_single.$(SUFFIX) xtrtrs_LRN_single.$(SUFFIX) xtrtrs_LCU_single.$(SUFFIX) xtrtrs_LCN_single.$(SUFFIX) + +ifdef SMP +SBLASOBJS += strtrs_UNU_parallel.$(SUFFIX) strtrs_UNN_parallel.$(SUFFIX) strtrs_UTU_parallel.$(SUFFIX) strtrs_UTN_parallel.$(SUFFIX) strtrs_LNU_parallel.$(SUFFIX) strtrs_LNN_parallel.$(SUFFIX) strtrs_LTU_parallel.$(SUFFIX) strtrs_LTN_parallel.$(SUFFIX) +DBLASOBJS += dtrtrs_UNU_parallel.$(SUFFIX) dtrtrs_UNN_parallel.$(SUFFIX) dtrtrs_UTU_parallel.$(SUFFIX) dtrtrs_UTN_parallel.$(SUFFIX) dtrtrs_LNU_parallel.$(SUFFIX) dtrtrs_LNN_parallel.$(SUFFIX) dtrtrs_LTU_parallel.$(SUFFIX) dtrtrs_LTN_parallel.$(SUFFIX) +QBLASOBJS += qtrtrs_UNU_parallel.$(SUFFIX) qtrtrs_UNN_parallel.$(SUFFIX) qtrtrs_UTU_parallel.$(SUFFIX) qtrtrs_UTN_parallel.$(SUFFIX) qtrtrs_LNU_parallel.$(SUFFIX) qtrtrs_LNN_parallel.$(SUFFIX) qtrtrs_LTU_parallel.$(SUFFIX) qtrtrs_LTN_parallel.$(SUFFIX) +CBLASOBJS += ctrtrs_UNU_parallel.$(SUFFIX) ctrtrs_UNN_parallel.$(SUFFIX) ctrtrs_UTU_parallel.$(SUFFIX) ctrtrs_UTN_parallel.$(SUFFIX) ctrtrs_URU_parallel.$(SUFFIX) ctrtrs_URN_parallel.$(SUFFIX) ctrtrs_UCU_parallel.$(SUFFIX) ctrtrs_UCN_parallel.$(SUFFIX) ctrtrs_LNU_parallel.$(SUFFIX) ctrtrs_LNN_parallel.$(SUFFIX) ctrtrs_LTU_parallel.$(SUFFIX) ctrtrs_LTN_parallel.$(SUFFIX) ctrtrs_LRU_parallel.$(SUFFIX) ctrtrs_LRN_parallel.$(SUFFIX) ctrtrs_LCU_parallel.$(SUFFIX) ctrtrs_LCN_parallel.$(SUFFIX) +ZBLASOBJS += ztrtrs_UNU_parallel.$(SUFFIX) ztrtrs_UNN_parallel.$(SUFFIX) ztrtrs_UTU_parallel.$(SUFFIX) ztrtrs_UTN_parallel.$(SUFFIX) ztrtrs_URU_parallel.$(SUFFIX) ztrtrs_URN_parallel.$(SUFFIX) ztrtrs_UCU_parallel.$(SUFFIX) ztrtrs_UCN_parallel.$(SUFFIX) ztrtrs_LNU_parallel.$(SUFFIX) ztrtrs_LNN_parallel.$(SUFFIX) ztrtrs_LTU_parallel.$(SUFFIX) ztrtrs_LTN_parallel.$(SUFFIX) ztrtrs_LRU_parallel.$(SUFFIX) ztrtrs_LRN_parallel.$(SUFFIX) ztrtrs_LCU_parallel.$(SUFFIX) ztrtrs_LCN_parallel.$(SUFFIX) +XBLASOBJS += xtrtrs_UNU_parallel.$(SUFFIX) xtrtrs_UNN_parallel.$(SUFFIX) xtrtrs_UTU_parallel.$(SUFFIX) xtrtrs_UTN_parallel.$(SUFFIX) xtrtrs_URU_parallel.$(SUFFIX) xtrtrs_URN_parallel.$(SUFFIX) xtrtrs_UCU_parallel.$(SUFFIX) xtrtrs_UCN_parallel.$(SUFFIX) xtrtrs_LNU_parallel.$(SUFFIX) xtrtrs_LNN_parallel.$(SUFFIX) xtrtrs_LTU_parallel.$(SUFFIX) xtrtrs_LTN_parallel.$(SUFFIX) xtrtrs_LRU_parallel.$(SUFFIX) xtrtrs_LRN_parallel.$(SUFFIX) xtrtrs_LCU_parallel.$(SUFFIX) xtrtrs_LCN_parallel.$(SUFFIX) +endif + +ifneq ($(BUILD_SINGLE),1) +SBLASOBJS= +endif +ifneq ($(BUILD_DOUBLE),1) +DBLASOBJS= +endif +ifneq ($(BUILD_COMPLEX),1) +CBLASOBJS= +endif +ifneq ($(BUILD_COMPLEX16),1) +ZBLASOBJS= +endif + +strtrs_UNU_single.$(SUFFIX) : trtrs_single.c + $(CC) -c $(CFLAGS) -UCOMPLEX -UDOUBLE -UUPLO -UTRANS -UDIAG $< -o $(@F) + +strtrs_UNN_single.$(SUFFIX) : trtrs_single.c + $(CC) -c $(CFLAGS) -UCOMPLEX -UDOUBLE -UUPLO -UTRANS -DDIAG $< -o $(@F) + +strtrs_UTU_single.$(SUFFIX) : trtrs_single.c + $(CC) -c $(CFLAGS) -UCOMPLEX -UDOUBLE -UUPLO -DTRANS -UDIAG $< -o $(@F) + +strtrs_UTN_single.$(SUFFIX) : trtrs_single.c + $(CC) -c $(CFLAGS) -UCOMPLEX -UDOUBLE -UUPLO -DTRANS -DDIAG $< -o $(@F) + +strtrs_LNU_single.$(SUFFIX) : trtrs_single.c + $(CC) -c $(CFLAGS) -UCOMPLEX -UDOUBLE -DUPLO -UTRANS -UDIAG $< -o $(@F) + +strtrs_LNN_single.$(SUFFIX) : trtrs_single.c + $(CC) -c $(CFLAGS) -UCOMPLEX -UDOUBLE -DUPLO -UTRANS -DDIAG $< -o $(@F) + +strtrs_LTU_single.$(SUFFIX) : trtrs_single.c + $(CC) -c $(CFLAGS) -UCOMPLEX -UDOUBLE -DUPLO -DTRANS -UDIAG $< -o $(@F) + +strtrs_LTN_single.$(SUFFIX) : trtrs_single.c + $(CC) -c $(CFLAGS) -UCOMPLEX -UDOUBLE -DUPLO -DTRANS -DDIAG $< -o $(@F) + +strtrs_UNU_parallel.$(SUFFIX) : trtrs_parallel.c + $(CC) -c $(CFLAGS) -UCOMPLEX -UDOUBLE -UUPLO -UTRANS -UDIAG $< -o $(@F) + +strtrs_UNN_parallel.$(SUFFIX) : trtrs_parallel.c + $(CC) -c $(CFLAGS) -UCOMPLEX -UDOUBLE -UUPLO -UTRANS -DDIAG $< -o $(@F) + +strtrs_UTU_parallel.$(SUFFIX) : trtrs_parallel.c + $(CC) -c $(CFLAGS) -UCOMPLEX -UDOUBLE -UUPLO -DTRANS -UDIAG $< -o $(@F) + +strtrs_UTN_parallel.$(SUFFIX) : trtrs_parallel.c + $(CC) -c $(CFLAGS) -UCOMPLEX -UDOUBLE -UUPLO -DTRANS -DDIAG $< -o $(@F) + +strtrs_LNU_parallel.$(SUFFIX) : trtrs_parallel.c + $(CC) -c $(CFLAGS) -UCOMPLEX -UDOUBLE -DUPLO -UTRANS -UDIAG $< -o $(@F) + +strtrs_LNN_parallel.$(SUFFIX) : trtrs_parallel.c + $(CC) -c $(CFLAGS) -UCOMPLEX -UDOUBLE -DUPLO -UTRANS -DDIAG $< -o $(@F) + +strtrs_LTU_parallel.$(SUFFIX) : trtrs_parallel.c + $(CC) -c $(CFLAGS) -UCOMPLEX -UDOUBLE -DUPLO -DTRANS -UDIAG $< -o $(@F) + +strtrs_LTN_parallel.$(SUFFIX) : trtrs_parallel.c + $(CC) -c $(CFLAGS) -UCOMPLEX -UDOUBLE -DUPLO -DTRANS -DDIAG $< -o $(@F) + +dtrtrs_UNU_single.$(SUFFIX) : trtrs_single.c + $(CC) -c $(CFLAGS) -UCOMPLEX -DDOUBLE -UUPLO -UTRANS -UDIAG $< -o $(@F) + +dtrtrs_UNN_single.$(SUFFIX) : trtrs_single.c + $(CC) -c $(CFLAGS) -UCOMPLEX -DDOUBLE -UUPLO -UTRANS -DDIAG $< -o $(@F) + +dtrtrs_UTU_single.$(SUFFIX) : trtrs_single.c + $(CC) -c $(CFLAGS) -UCOMPLEX -DDOUBLE -UUPLO -DTRANS -UDIAG $< -o $(@F) + +dtrtrs_UTN_single.$(SUFFIX) : trtrs_single.c + $(CC) -c $(CFLAGS) -UCOMPLEX -DDOUBLE -UUPLO -DTRANS -DDIAG $< -o $(@F) + +dtrtrs_LNU_single.$(SUFFIX) : trtrs_single.c + $(CC) -c $(CFLAGS) -UCOMPLEX -DDOUBLE -DUPLO -UTRANS -UDIAG $< -o $(@F) + +dtrtrs_LNN_single.$(SUFFIX) : trtrs_single.c + $(CC) -c $(CFLAGS) -UCOMPLEX -DDOUBLE -DUPLO -UTRANS -DDIAG $< -o $(@F) + +dtrtrs_LTU_single.$(SUFFIX) : trtrs_single.c + $(CC) -c $(CFLAGS) -UCOMPLEX -DDOUBLE -DUPLO -DTRANS -UDIAG $< -o $(@F) + +dtrtrs_LTN_single.$(SUFFIX) : trtrs_single.c + $(CC) -c $(CFLAGS) -UCOMPLEX -DDOUBLE -DUPLO -DTRANS -DDIAG $< -o $(@F) + +dtrtrs_UNU_parallel.$(SUFFIX) : trtrs_parallel.c + $(CC) -c $(CFLAGS) -UCOMPLEX -DDOUBLE -UUPLO -UTRANS -UDIAG $< -o $(@F) + +dtrtrs_UNN_parallel.$(SUFFIX) : trtrs_parallel.c + $(CC) -c $(CFLAGS) -UCOMPLEX -DDOUBLE -UUPLO -UTRANS -DDIAG $< -o $(@F) + +dtrtrs_UTU_parallel.$(SUFFIX) : trtrs_parallel.c + $(CC) -c $(CFLAGS) -UCOMPLEX -DDOUBLE -UUPLO -DTRANS -UDIAG $< -o $(@F) + +dtrtrs_UTN_parallel.$(SUFFIX) : trtrs_parallel.c + $(CC) -c $(CFLAGS) -UCOMPLEX -DDOUBLE -UUPLO -DTRANS -DDIAG $< -o $(@F) + +dtrtrs_LNU_parallel.$(SUFFIX) : trtrs_parallel.c + $(CC) -c $(CFLAGS) -UCOMPLEX -DDOUBLE -DUPLO -UTRANS -UDIAG $< -o $(@F) + +dtrtrs_LNN_parallel.$(SUFFIX) : trtrs_parallel.c + $(CC) -c $(CFLAGS) -UCOMPLEX -DDOUBLE -DUPLO -UTRANS -DDIAG $< -o $(@F) + +dtrtrs_LTU_parallel.$(SUFFIX) : trtrs_parallel.c + $(CC) -c $(CFLAGS) -UCOMPLEX -DDOUBLE -DUPLO -DTRANS -UDIAG $< -o $(@F) + +dtrtrs_LTN_parallel.$(SUFFIX) : trtrs_parallel.c + $(CC) -c $(CFLAGS) -UCOMPLEX -DDOUBLE -DUPLO -DTRANS -DDIAG $< -o $(@F) + +qtrtrs_UNU_single.$(SUFFIX) : trtrs_single.c + $(CC) -c $(CFLAGS) -UCOMPLEX -DXDOUBLE -UUPLO -UTRANS -UDIAG $< -o $(@F) + +qtrtrs_UNN_single.$(SUFFIX) : trtrs_single.c + $(CC) -c $(CFLAGS) -UCOMPLEX -DXDOUBLE -UUPLO -UTRANS -DDIAG $< -o $(@F) + +qtrtrs_UTU_single.$(SUFFIX) : trtrs_single.c + $(CC) -c $(CFLAGS) -UCOMPLEX -DXDOUBLE -UUPLO -DTRANS -UDIAG $< -o $(@F) + +qtrtrs_UTN_single.$(SUFFIX) : trtrs_single.c + $(CC) -c $(CFLAGS) -UCOMPLEX -DXDOUBLE -UUPLO -DTRANS -DDIAG $< -o $(@F) + +qtrtrs_LNU_single.$(SUFFIX) : trtrs_single.c + $(CC) -c $(CFLAGS) -UCOMPLEX -DXDOUBLE -DUPLO -UTRANS -UDIAG $< -o $(@F) + +qtrtrs_LNN_single.$(SUFFIX) : trtrs_single.c + $(CC) -c $(CFLAGS) -UCOMPLEX -DXDOUBLE -DUPLO -UTRANS -DDIAG $< -o $(@F) + +qtrtrs_LTU_single.$(SUFFIX) : trtrs_single.c + $(CC) -c $(CFLAGS) -UCOMPLEX -DXDOUBLE -DUPLO -DTRANS -UDIAG $< -o $(@F) + +qtrtrs_LTN_single.$(SUFFIX) : trtrs_single.c + $(CC) -c $(CFLAGS) -UCOMPLEX -DXDOUBLE -DUPLO -DTRANS -DDIAG $< -o $(@F) + +qtrtrs_UNU_parallel.$(SUFFIX) : trtrs_parallel.c + $(CC) -c $(CFLAGS) -UCOMPLEX -DXDOUBLE -UUPLO -UTRANS -UDIAG $< -o $(@F) + +qtrtrs_UNN_parallel.$(SUFFIX) : trtrs_parallel.c + $(CC) -c $(CFLAGS) -UCOMPLEX -DXDOUBLE -UUPLO -UTRANS -DDIAG $< -o $(@F) + +qtrtrs_UTU_parallel.$(SUFFIX) : trtrs_parallel.c + $(CC) -c $(CFLAGS) -UCOMPLEX -DXDOUBLE -UUPLO -DTRANS -UDIAG $< -o $(@F) + +qtrtrs_UTN_parallel.$(SUFFIX) : trtrs_parallel.c + $(CC) -c $(CFLAGS) -UCOMPLEX -DXDOUBLE -UUPLO -DTRANS -DDIAG $< -o $(@F) + +qtrtrs_LNU_parallel.$(SUFFIX) : trtrs_parallel.c + $(CC) -c $(CFLAGS) -UCOMPLEX -DXDOUBLE -DUPLO -UTRANS -UDIAG $< -o $(@F) + +qtrtrs_LNN_parallel.$(SUFFIX) : trtrs_parallel.c + $(CC) -c $(CFLAGS) -UCOMPLEX -DXDOUBLE -DUPLO -UTRANS -DDIAG $< -o $(@F) + +qtrtrs_LTU_parallel.$(SUFFIX) : trtrs_parallel.c + $(CC) -c $(CFLAGS) -UCOMPLEX -DXDOUBLE -DUPLO -DTRANS -UDIAG $< -o $(@F) + +qtrtrs_LTN_parallel.$(SUFFIX) : trtrs_parallel.c + $(CC) -c $(CFLAGS) -UCOMPLEX -DXDOUBLE -DUPLO -DTRANS -DDIAG $< -o $(@F) + +ctrtrs_UNU_single.$(SUFFIX) : ztrtrs_single.c + $(CC) -c $(CFLAGS) -DCOMPLEX -UDOUBLE -UUPLO -DTRANS=1 -UDIAG $< -o $(@F) + +ctrtrs_UNN_single.$(SUFFIX) : ztrtrs_single.c + $(CC) -c $(CFLAGS) -DCOMPLEX -UDOUBLE -UUPLO -DTRANS=1 -DDIAG $< -o $(@F) + +ctrtrs_UTU_single.$(SUFFIX) : ztrtrs_single.c + $(CC) -c $(CFLAGS) -DCOMPLEX -UDOUBLE -UUPLO -DTRANS=2 -UDIAG $< -o $(@F) + +ctrtrs_UTN_single.$(SUFFIX) : ztrtrs_single.c + $(CC) -c $(CFLAGS) -DCOMPLEX -UDOUBLE -UUPLO -DTRANS=2 -DDIAG $< -o $(@F) + +ctrtrs_URU_single.$(SUFFIX) : ztrtrs_single.c + $(CC) -c $(CFLAGS) -DCOMPLEX -UDOUBLE -UUPLO -DTRANS=3 -UDIAG $< -o $(@F) + +ctrtrs_URN_single.$(SUFFIX) : ztrtrs_single.c + $(CC) -c $(CFLAGS) -DCOMPLEX -UDOUBLE -UUPLO -DTRANS=3 -DDIAG $< -o $(@F) + +ctrtrs_UCU_single.$(SUFFIX) : ztrtrs_single.c + $(CC) -c $(CFLAGS) -DCOMPLEX -UDOUBLE -UUPLO -DTRANS=4 -UDIAG $< -o $(@F) + +ctrtrs_UCN_single.$(SUFFIX) : ztrtrs_single.c + $(CC) -c $(CFLAGS) -DCOMPLEX -UDOUBLE -UUPLO -DTRANS=4 -DDIAG $< -o $(@F) + +ctrtrs_LNU_single.$(SUFFIX) : ztrtrs_single.c + $(CC) -c $(CFLAGS) -DCOMPLEX -UDOUBLE -DUPLO -DTRANS=1 -UDIAG $< -o $(@F) + +ctrtrs_LNN_single.$(SUFFIX) : ztrtrs_single.c + $(CC) -c $(CFLAGS) -DCOMPLEX -UDOUBLE -DUPLO -DTRANS=1 -DDIAG $< -o $(@F) + +ctrtrs_LTU_single.$(SUFFIX) : ztrtrs_single.c + $(CC) -c $(CFLAGS) -DCOMPLEX -UDOUBLE -DUPLO -DTRANS=2 -UDIAG $< -o $(@F) + +ctrtrs_LTN_single.$(SUFFIX) : ztrtrs_single.c + $(CC) -c $(CFLAGS) -DCOMPLEX -UDOUBLE -DUPLO -DTRANS=2 -DDIAG $< -o $(@F) + +ctrtrs_LRU_single.$(SUFFIX) : ztrtrs_single.c + $(CC) -c $(CFLAGS) -DCOMPLEX -UDOUBLE -DUPLO -DTRANS=3 -UDIAG $< -o $(@F) + +ctrtrs_LRN_single.$(SUFFIX) : ztrtrs_single.c + $(CC) -c $(CFLAGS) -DCOMPLEX -UDOUBLE -DUPLO -DTRANS=3 -DDIAG $< -o $(@F) + +ctrtrs_LCU_single.$(SUFFIX) : ztrtrs_single.c + $(CC) -c $(CFLAGS) -DCOMPLEX -UDOUBLE -DUPLO -DTRANS=4 -UDIAG $< -o $(@F) + +ctrtrs_LCN_single.$(SUFFIX) : ztrtrs_single.c + $(CC) -c $(CFLAGS) -DCOMPLEX -UDOUBLE -DUPLO -DTRANS=4 -DDIAG $< -o $(@F) + +ztrtrs_UNU_single.$(SUFFIX) : ztrtrs_single.c + $(CC) -c $(CFLAGS) -DCOMPLEX -DDOUBLE -UUPLO -DTRANS=1 -UDIAG $< -o $(@F) + +ztrtrs_UNN_single.$(SUFFIX) : ztrtrs_single.c + $(CC) -c $(CFLAGS) -DCOMPLEX -DDOUBLE -UUPLO -DTRANS=1 -DDIAG $< -o $(@F) + +ztrtrs_UTU_single.$(SUFFIX) : ztrtrs_single.c + $(CC) -c $(CFLAGS) -DCOMPLEX -DDOUBLE -UUPLO -DTRANS=2 -UDIAG $< -o $(@F) + +ztrtrs_UTN_single.$(SUFFIX) : ztrtrs_single.c + $(CC) -c $(CFLAGS) -DCOMPLEX -DDOUBLE -UUPLO -DTRANS=2 -DDIAG $< -o $(@F) + +ztrtrs_URU_single.$(SUFFIX) : ztrtrs_single.c + $(CC) -c $(CFLAGS) -DCOMPLEX -DDOUBLE -UUPLO -DTRANS=3 -UDIAG $< -o $(@F) + +ztrtrs_URN_single.$(SUFFIX) : ztrtrs_single.c + $(CC) -c $(CFLAGS) -DCOMPLEX -DDOUBLE -UUPLO -DTRANS=3 -DDIAG $< -o $(@F) + +ztrtrs_UCU_single.$(SUFFIX) : ztrtrs_single.c + $(CC) -c $(CFLAGS) -DCOMPLEX -DDOUBLE -UUPLO -DTRANS=4 -UDIAG $< -o $(@F) + +ztrtrs_UCN_single.$(SUFFIX) : ztrtrs_single.c + $(CC) -c $(CFLAGS) -DCOMPLEX -DDOUBLE -UUPLO -DTRANS=4 -DDIAG $< -o $(@F) + +ztrtrs_LNU_single.$(SUFFIX) : ztrtrs_single.c + $(CC) -c $(CFLAGS) -DCOMPLEX -DDOUBLE -DUPLO -DTRANS=1 -UDIAG $< -o $(@F) + +ztrtrs_LNN_single.$(SUFFIX) : ztrtrs_single.c + $(CC) -c $(CFLAGS) -DCOMPLEX -DDOUBLE -DUPLO -DTRANS=1 -DDIAG $< -o $(@F) + +ztrtrs_LTU_single.$(SUFFIX) : ztrtrs_single.c + $(CC) -c $(CFLAGS) -DCOMPLEX -DDOUBLE -DUPLO -DTRANS=2 -UDIAG $< -o $(@F) + +ztrtrs_LTN_single.$(SUFFIX) : ztrtrs_single.c + $(CC) -c $(CFLAGS) -DCOMPLEX -DDOUBLE -DUPLO -DTRANS=2 -DDIAG $< -o $(@F) + +ztrtrs_LRU_single.$(SUFFIX) : ztrtrs_single.c + $(CC) -c $(CFLAGS) -DCOMPLEX -DDOUBLE -DUPLO -DTRANS=3 -UDIAG $< -o $(@F) + +ztrtrs_LRN_single.$(SUFFIX) : ztrtrs_single.c + $(CC) -c $(CFLAGS) -DCOMPLEX -DDOUBLE -DUPLO -DTRANS=3 -DDIAG $< -o $(@F) + +ztrtrs_LCU_single.$(SUFFIX) : ztrtrs_single.c + $(CC) -c $(CFLAGS) -DCOMPLEX -DDOUBLE -DUPLO -DTRANS=4 -UDIAG $< -o $(@F) + +ztrtrs_LCN_single.$(SUFFIX) : ztrtrs_single.c + $(CC) -c $(CFLAGS) -DCOMPLEX -DDOUBLE -DUPLO -DTRANS=4 -DDIAG $< -o $(@F) + +xtrtrs_UNU_single.$(SUFFIX) : ztrtrs_single.c + $(CC) -c $(CFLAGS) -DCOMPLEX -DXDOUBLE -UUPLO -DTRANS=1 -UDIAG $< -o $(@F) + +xtrtrs_UNN_single.$(SUFFIX) : ztrtrs_single.c + $(CC) -c $(CFLAGS) -DCOMPLEX -DXDOUBLE -UUPLO -DTRANS=1 -DDIAG $< -o $(@F) + +xtrtrs_UTU_single.$(SUFFIX) : ztrtrs_single.c + $(CC) -c $(CFLAGS) -DCOMPLEX -DXDOUBLE -UUPLO -DTRANS=2 -UDIAG $< -o $(@F) + +xtrtrs_UTN_single.$(SUFFIX) : ztrtrs_single.c + $(CC) -c $(CFLAGS) -DCOMPLEX -DXDOUBLE -UUPLO -DTRANS=2 -DDIAG $< -o $(@F) + +xtrtrs_URU_single.$(SUFFIX) : ztrtrs_single.c + $(CC) -c $(CFLAGS) -DCOMPLEX -DXDOUBLE -UUPLO -DTRANS=3 -UDIAG $< -o $(@F) + +xtrtrs_URN_single.$(SUFFIX) : ztrtrs_single.c + $(CC) -c $(CFLAGS) -DCOMPLEX -DXDOUBLE -UUPLO -DTRANS=3 -DDIAG $< -o $(@F) + +xtrtrs_UCU_single.$(SUFFIX) : ztrtrs_single.c + $(CC) -c $(CFLAGS) -DCOMPLEX -DXDOUBLE -UUPLO -DTRANS=4 -UDIAG $< -o $(@F) + +xtrtrs_UCN_single.$(SUFFIX) : ztrtrs_single.c + $(CC) -c $(CFLAGS) -DCOMPLEX -DXDOUBLE -UUPLO -DTRANS=4 -DDIAG $< -o $(@F) + +xtrtrs_LNU_single.$(SUFFIX) : ztrtrs_single.c + $(CC) -c $(CFLAGS) -DCOMPLEX -DXDOUBLE -DUPLO -DTRANS=1 -UDIAG $< -o $(@F) + +xtrtrs_LNN_single.$(SUFFIX) : ztrtrs_single.c + $(CC) -c $(CFLAGS) -DCOMPLEX -DXDOUBLE -DUPLO -DTRANS=1 -DDIAG $< -o $(@F) + +xtrtrs_LTU_single.$(SUFFIX) : ztrtrs_single.c + $(CC) -c $(CFLAGS) -DCOMPLEX -DXDOUBLE -DUPLO -DTRANS=2 -UDIAG $< -o $(@F) + +xtrtrs_LTN_single.$(SUFFIX) : ztrtrs_single.c + $(CC) -c $(CFLAGS) -DCOMPLEX -DXDOUBLE -DUPLO -DTRANS=2 -DDIAG $< -o $(@F) + +xtrtrs_LRU_single.$(SUFFIX) : ztrtrs_single.c + $(CC) -c $(CFLAGS) -DCOMPLEX -DXDOUBLE -DUPLO -DTRANS=3 -UDIAG $< -o $(@F) + +xtrtrs_LRN_single.$(SUFFIX) : ztrtrs_single.c + $(CC) -c $(CFLAGS) -DCOMPLEX -DXDOUBLE -DUPLO -DTRANS=3 -DDIAG $< -o $(@F) + +xtrtrs_LCU_single.$(SUFFIX) : ztrtrs_single.c + $(CC) -c $(CFLAGS) -DCOMPLEX -DXDOUBLE -DUPLO -DTRANS=4 -UDIAG $< -o $(@F) + +xtrtrs_LCN_single.$(SUFFIX) : ztrtrs_single.c + $(CC) -c $(CFLAGS) -DCOMPLEX -DXDOUBLE -DUPLO -DTRANS=4 -DDIAG $< -o $(@F) + +ctrtrs_UNU_parallel.$(SUFFIX) : ztrtrs_parallel.c + $(CC) -c $(CFLAGS) -DCOMPLEX -UDOUBLE -UUPLO -DTRANS=1 -UDIAG $< -o $(@F) + +ctrtrs_UNN_parallel.$(SUFFIX) : ztrtrs_parallel.c + $(CC) -c $(CFLAGS) -DCOMPLEX -UDOUBLE -UUPLO -DTRANS=1 -DDIAG $< -o $(@F) + +ctrtrs_UTU_parallel.$(SUFFIX) : ztrtrs_parallel.c + $(CC) -c $(CFLAGS) -DCOMPLEX -UDOUBLE -UUPLO -DTRANS=2 -UDIAG $< -o $(@F) + +ctrtrs_UTN_parallel.$(SUFFIX) : ztrtrs_parallel.c + $(CC) -c $(CFLAGS) -DCOMPLEX -UDOUBLE -UUPLO -DTRANS=2 -DDIAG $< -o $(@F) + +ctrtrs_URU_parallel.$(SUFFIX) : ztrtrs_parallel.c + $(CC) -c $(CFLAGS) -DCOMPLEX -UDOUBLE -UUPLO -DTRANS=3 -UDIAG $< -o $(@F) + +ctrtrs_URN_parallel.$(SUFFIX) : ztrtrs_parallel.c + $(CC) -c $(CFLAGS) -DCOMPLEX -UDOUBLE -UUPLO -DTRANS=3 -DDIAG $< -o $(@F) + +ctrtrs_UCU_parallel.$(SUFFIX) : ztrtrs_parallel.c + $(CC) -c $(CFLAGS) -DCOMPLEX -UDOUBLE -UUPLO -DTRANS=4 -UDIAG $< -o $(@F) + +ctrtrs_UCN_parallel.$(SUFFIX) : ztrtrs_parallel.c + $(CC) -c $(CFLAGS) -DCOMPLEX -UDOUBLE -UUPLO -DTRANS=4 -DDIAG $< -o $(@F) + +ctrtrs_LNU_parallel.$(SUFFIX) : ztrtrs_parallel.c + $(CC) -c $(CFLAGS) -DCOMPLEX -UDOUBLE -DUPLO -DTRANS=1 -UDIAG $< -o $(@F) + +ctrtrs_LNN_parallel.$(SUFFIX) : ztrtrs_parallel.c + $(CC) -c $(CFLAGS) -DCOMPLEX -UDOUBLE -DUPLO -DTRANS=1 -DDIAG $< -o $(@F) + +ctrtrs_LTU_parallel.$(SUFFIX) : ztrtrs_parallel.c + $(CC) -c $(CFLAGS) -DCOMPLEX -UDOUBLE -DUPLO -DTRANS=2 -UDIAG $< -o $(@F) + +ctrtrs_LTN_parallel.$(SUFFIX) : ztrtrs_parallel.c + $(CC) -c $(CFLAGS) -DCOMPLEX -UDOUBLE -DUPLO -DTRANS=2 -DDIAG $< -o $(@F) + +ctrtrs_LRU_parallel.$(SUFFIX) : ztrtrs_parallel.c + $(CC) -c $(CFLAGS) -DCOMPLEX -UDOUBLE -DUPLO -DTRANS=3 -UDIAG $< -o $(@F) + +ctrtrs_LRN_parallel.$(SUFFIX) : ztrtrs_parallel.c + $(CC) -c $(CFLAGS) -DCOMPLEX -UDOUBLE -DUPLO -DTRANS=3 -DDIAG $< -o $(@F) + +ctrtrs_LCU_parallel.$(SUFFIX) : ztrtrs_parallel.c + $(CC) -c $(CFLAGS) -DCOMPLEX -UDOUBLE -DUPLO -DTRANS=4 -UDIAG $< -o $(@F) + +ctrtrs_LCN_parallel.$(SUFFIX) : ztrtrs_parallel.c + $(CC) -c $(CFLAGS) -DCOMPLEX -UDOUBLE -DUPLO -DTRANS=4 -DDIAG $< -o $(@F) + +ztrtrs_UNU_parallel.$(SUFFIX) : ztrtrs_parallel.c + $(CC) -c $(CFLAGS) -DCOMPLEX -DDOUBLE -UUPLO -DTRANS=1 -UDIAG $< -o $(@F) + +ztrtrs_UNN_parallel.$(SUFFIX) : ztrtrs_parallel.c + $(CC) -c $(CFLAGS) -DCOMPLEX -DDOUBLE -UUPLO -DTRANS=1 -DDIAG $< -o $(@F) + +ztrtrs_UTU_parallel.$(SUFFIX) : ztrtrs_parallel.c + $(CC) -c $(CFLAGS) -DCOMPLEX -DDOUBLE -UUPLO -DTRANS=2 -UDIAG $< -o $(@F) + +ztrtrs_UTN_parallel.$(SUFFIX) : ztrtrs_parallel.c + $(CC) -c $(CFLAGS) -DCOMPLEX -DDOUBLE -UUPLO -DTRANS=2 -DDIAG $< -o $(@F) + +ztrtrs_URU_parallel.$(SUFFIX) : ztrtrs_parallel.c + $(CC) -c $(CFLAGS) -DCOMPLEX -DDOUBLE -UUPLO -DTRANS=3 -UDIAG $< -o $(@F) + +ztrtrs_URN_parallel.$(SUFFIX) : ztrtrs_parallel.c + $(CC) -c $(CFLAGS) -DCOMPLEX -DDOUBLE -UUPLO -DTRANS=3 -DDIAG $< -o $(@F) + +ztrtrs_UCU_parallel.$(SUFFIX) : ztrtrs_parallel.c + $(CC) -c $(CFLAGS) -DCOMPLEX -DDOUBLE -UUPLO -DTRANS=4 -UDIAG $< -o $(@F) + +ztrtrs_UCN_parallel.$(SUFFIX) : ztrtrs_parallel.c + $(CC) -c $(CFLAGS) -DCOMPLEX -DDOUBLE -UUPLO -DTRANS=4 -DDIAG $< -o $(@F) + +ztrtrs_LNU_parallel.$(SUFFIX) : ztrtrs_parallel.c + $(CC) -c $(CFLAGS) -DCOMPLEX -DDOUBLE -DUPLO -DTRANS=1 -UDIAG $< -o $(@F) + +ztrtrs_LNN_parallel.$(SUFFIX) : ztrtrs_parallel.c + $(CC) -c $(CFLAGS) -DCOMPLEX -DDOUBLE -DUPLO -DTRANS=1 -DDIAG $< -o $(@F) + +ztrtrs_LTU_parallel.$(SUFFIX) : ztrtrs_parallel.c + $(CC) -c $(CFLAGS) -DCOMPLEX -DDOUBLE -DUPLO -DTRANS=2 -UDIAG $< -o $(@F) + +ztrtrs_LTN_parallel.$(SUFFIX) : ztrtrs_parallel.c + $(CC) -c $(CFLAGS) -DCOMPLEX -DDOUBLE -DUPLO -DTRANS=2 -DDIAG $< -o $(@F) + +ztrtrs_LRU_parallel.$(SUFFIX) : ztrtrs_parallel.c + $(CC) -c $(CFLAGS) -DCOMPLEX -DDOUBLE -DUPLO -DTRANS=3 -UDIAG $< -o $(@F) + +ztrtrs_LRN_parallel.$(SUFFIX) : ztrtrs_parallel.c + $(CC) -c $(CFLAGS) -DCOMPLEX -DDOUBLE -DUPLO -DTRANS=3 -DDIAG $< -o $(@F) + +ztrtrs_LCU_parallel.$(SUFFIX) : ztrtrs_parallel.c + $(CC) -c $(CFLAGS) -DCOMPLEX -DDOUBLE -DUPLO -DTRANS=4 -UDIAG $< -o $(@F) + +ztrtrs_LCN_parallel.$(SUFFIX) : ztrtrs_parallel.c + $(CC) -c $(CFLAGS) -DCOMPLEX -DDOUBLE -DUPLO -DTRANS=4 -DDIAG $< -o $(@F) + +xtrtrs_UNU_parallel.$(SUFFIX) : ztrtrs_parallel.c + $(CC) -c $(CFLAGS) -DCOMPLEX -DXDOUBLE -UUPLO -DTRANS=1 -UDIAG $< -o $(@F) + +xtrtrs_UNN_parallel.$(SUFFIX) : ztrtrs_parallel.c + $(CC) -c $(CFLAGS) -DCOMPLEX -DXDOUBLE -UUPLO -DTRANS=1 -DDIAG $< -o $(@F) + +xtrtrs_UTU_parallel.$(SUFFIX) : ztrtrs_parallel.c + $(CC) -c $(CFLAGS) -DCOMPLEX -DXDOUBLE -UUPLO -DTRANS=2 -UDIAG $< -o $(@F) + +xtrtrs_UTN_parallel.$(SUFFIX) : ztrtrs_parallel.c + $(CC) -c $(CFLAGS) -DCOMPLEX -DXDOUBLE -UUPLO -DTRANS=2 -DDIAG $< -o $(@F) + +xtrtrs_URU_parallel.$(SUFFIX) : ztrtrs_parallel.c + $(CC) -c $(CFLAGS) -DCOMPLEX -DXDOUBLE -UUPLO -DTRANS=3 -UDIAG $< -o $(@F) + +xtrtrs_URN_parallel.$(SUFFIX) : ztrtrs_parallel.c + $(CC) -c $(CFLAGS) -DCOMPLEX -DXDOUBLE -UUPLO -DTRANS=3 -DDIAG $< -o $(@F) + +xtrtrs_UCU_parallel.$(SUFFIX) : ztrtrs_parallel.c + $(CC) -c $(CFLAGS) -DCOMPLEX -DXDOUBLE -UUPLO -DTRANS=4 -UDIAG $< -o $(@F) + +xtrtrs_UCN_parallel.$(SUFFIX) : ztrtrs_parallel.c + $(CC) -c $(CFLAGS) -DCOMPLEX -DXDOUBLE -UUPLO -DTRANS=4 -DDIAG $< -o $(@F) + +xtrtrs_LNU_parallel.$(SUFFIX) : ztrtrs_parallel.c + $(CC) -c $(CFLAGS) -DCOMPLEX -DXDOUBLE -DUPLO -DTRANS=1 -UDIAG $< -o $(@F) + +xtrtrs_LNN_parallel.$(SUFFIX) : ztrtrs_parallel.c + $(CC) -c $(CFLAGS) -DCOMPLEX -DXDOUBLE -DUPLO -DTRANS=1 -DDIAG $< -o $(@F) + +xtrtrs_LTU_parallel.$(SUFFIX) : ztrtrs_parallel.c + $(CC) -c $(CFLAGS) -DCOMPLEX -DXDOUBLE -DUPLO -DTRANS=2 -UDIAG $< -o $(@F) + +xtrtrs_LTN_parallel.$(SUFFIX) : ztrtrs_parallel.c + $(CC) -c $(CFLAGS) -DCOMPLEX -DXDOUBLE -DUPLO -DTRANS=2 -DDIAG $< -o $(@F) + +xtrtrs_LRU_parallel.$(SUFFIX) : ztrtrs_parallel.c + $(CC) -c $(CFLAGS) -DCOMPLEX -DXDOUBLE -DUPLO -DTRANS=3 -UDIAG $< -o $(@F) + +xtrtrs_LRN_parallel.$(SUFFIX) : ztrtrs_parallel.c + $(CC) -c $(CFLAGS) -DCOMPLEX -DXDOUBLE -DUPLO -DTRANS=3 -DDIAG $< -o $(@F) + +xtrtrs_LCU_parallel.$(SUFFIX) : ztrtrs_parallel.c + $(CC) -c $(CFLAGS) -DCOMPLEX -DXDOUBLE -DUPLO -DTRANS=4 -UDIAG $< -o $(@F) + +xtrtrs_LCN_parallel.$(SUFFIX) : ztrtrs_parallel.c + $(CC) -c $(CFLAGS) -DCOMPLEX -DXDOUBLE -DUPLO -DTRANS=4 -DDIAG $< -o $(@F) + +include ../../Makefile.tail diff --git a/lapack/trtrs/trtrs_parallel.c b/lapack/trtrs/trtrs_parallel.c new file mode 100644 index 000000000..52f42f693 --- /dev/null +++ b/lapack/trtrs/trtrs_parallel.c @@ -0,0 +1,111 @@ +/*********************************************************************/ +/* Copyright 2009, 2010 The University of Texas at Austin. */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* 1. Redistributions of source code must retain the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer. */ +/* */ +/* 2. Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials */ +/* provided with the distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ +/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ +/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ +/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ +/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ +/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ +/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* */ +/* The views and conclusions contained in the software and */ +/* documentation are those of the authors and should not be */ +/* interpreted as representing official policies, either expressed */ +/* or implied, of The University of Texas at Austin. */ +/*********************************************************************/ + +#include +#include "common.h" + +#if !defined(TRANS) && !defined(UPLO) && !defined(DIAG) +#define TRSM TRSM_LNUU +#define TRSV TRSV_NUU +#elif !defined(TRANS) && !defined(UPLO) && defined(DIAG) +#define TRSM TRSM_LNUN +#define TRSV TRSV_NUN +#elif !defined(TRANS) && defined(UPLO) && !defined(DIAG) +#define TRSM TRSM_LNLU +#define TRSV TRSV_NLU +#elif !defined(TRANS) && defined(UPLO) && defined(DIAG) +#define TRSM TRSM_LNLN +#define TRSV TRSV_NLN +#elif defined(TRANS) && !defined(UPLO) && !defined(DIAG) +#define TRSM TRSM_LTUU +#define TRSV TRSV_TUU +#elif defined(TRANS) && !defined(UPLO) && defined(DIAG) +#define TRSM TRSM_LTUN +#define TRSV TRSV_TUN +#elif defined(TRANS) && defined(UPLO) && !defined(DIAG) +#define TRSM TRSM_LTLU +#define TRSV TRSV_TLU +#elif defined(TRANS) && defined(UPLO) && defined(DIAG) +#define TRSM TRSM_LTLN +#define TRSV TRSV_TLN +#endif + +static int inner_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, + FLOAT *sa, FLOAT *sb, BLASLONG mypos) { + + TRSM (args, range_m, range_n, sa, sb, 0); + + return 0; +} + +blasint CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, FLOAT *sb, BLASLONG mypos) { + + int mode; + +#ifndef TRANS + if (args -> n == 1){ + TRSV (args -> m, args -> a, args -> lda, args -> b, 1, sb); + } else { +#ifdef XDOUBLE + mode = BLAS_XDOUBLE | BLAS_REAL; +#elif defined(DOUBLE) + mode = BLAS_DOUBLE | BLAS_REAL; +#else + mode = BLAS_SINGLE | BLAS_REAL; +#endif + + gemm_thread_n(mode, args, NULL, NULL, inner_thread, sa, sb, args -> nthreads); + } +#else + if (args -> n == 1){ + TRSV (args -> m, args -> a, args -> lda, args -> b, 1, sb); + } else { +#ifdef XDOUBLE + mode = BLAS_XDOUBLE | BLAS_REAL | (1 << BLAS_TRANSA_SHIFT); +#elif defined(DOUBLE) + mode = BLAS_DOUBLE | BLAS_REAL | (1 << BLAS_TRANSA_SHIFT); +#else + mode = BLAS_SINGLE | BLAS_REAL | (1 << BLAS_TRANSA_SHIFT); +#endif + + gemm_thread_n(mode, args, NULL, NULL, inner_thread, sa, sb, args -> nthreads); + } +#endif + + return 0; + } diff --git a/lapack/trtrs/trtrs_single.c b/lapack/trtrs/trtrs_single.c new file mode 100644 index 000000000..c82b81303 --- /dev/null +++ b/lapack/trtrs/trtrs_single.c @@ -0,0 +1,75 @@ +/*********************************************************************/ +/* Copyright 2009, 2010 The University of Texas at Austin. */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* 1. Redistributions of source code must retain the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer. */ +/* */ +/* 2. Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials */ +/* provided with the distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ +/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ +/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ +/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ +/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ +/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ +/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* */ +/* The views and conclusions contained in the software and */ +/* documentation are those of the authors and should not be */ +/* interpreted as representing official policies, either expressed */ +/* or implied, of The University of Texas at Austin. */ +/*********************************************************************/ + +#include +#include "common.h" + +#if !defined(TRANS) && !defined(UPLO) && !defined(DIAG) +#define TRSM TRSM_LNUU +#define TRSV TRSV_NUU +#elif !defined(TRANS) && !defined(UPLO) && defined(DIAG) +#define TRSM TRSM_LNUN +#define TRSV TRSV_NUN +#elif !defined(TRANS) && defined(UPLO) && !defined(DIAG) +#define TRSM TRSM_LNLU +#define TRSV TRSV_NLU +#elif !defined(TRANS) && defined(UPLO) && defined(DIAG) +#define TRSM TRSM_LNLN +#define TRSV TRSV_NLN +#elif defined(TRANS) && !defined(UPLO) && !defined(DIAG) +#define TRSM TRSM_LTUU +#define TRSV TRSV_TUU +#elif defined(TRANS) && !defined(UPLO) && defined(DIAG) +#define TRSM TRSM_LTUN +#define TRSV TRSV_TUN +#elif defined(TRANS) && defined(UPLO) && !defined(DIAG) +#define TRSM TRSM_LTLU +#define TRSV TRSV_TLU +#elif defined(TRANS) && defined(UPLO) && defined(DIAG) +#define TRSM TRSM_LTLN +#define TRSV TRSV_TLN +#endif + +blasint CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, FLOAT *sb, BLASLONG mypos) { + + if (args -> n == 1){ + TRSV (args -> m, args -> a, args -> lda, args -> b, 1, sb); + } else { + TRSM (args, range_m, range_n, sa, sb, 0); + } + return 0; } diff --git a/lapack/trtrs/ztrtrs_parallel.c b/lapack/trtrs/ztrtrs_parallel.c new file mode 100644 index 000000000..d5248f21b --- /dev/null +++ b/lapack/trtrs/ztrtrs_parallel.c @@ -0,0 +1,118 @@ +/*********************************************************************/ +/* Copyright 2009, 2010 The University of Texas at Austin. */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* 1. Redistributions of source code must retain the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer. */ +/* */ +/* 2. Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials */ +/* provided with the distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ +/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ +/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ +/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ +/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ +/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ +/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* */ +/* The views and conclusions contained in the software and */ +/* documentation are those of the authors and should not be */ +/* interpreted as representing official policies, either expressed */ +/* or implied, of The University of Texas at Austin. */ +/*********************************************************************/ + +#include +#include "common.h" + +#if TRANS == 1 && !defined(UPLO) && !defined(DIAG) +#define TRSM TRSM_LNUU +#define ZTRSV ZTRSV_NUU +#elif TRANS == 1 && !defined(UPLO) && defined(DIAG) +#define TRSM TRSM_LNUN +#define ZTRSV ZTRSV_NUN +#elif TRANS == 1 && defined(UPLO) && !defined(DIAG) +#define TRSM TRSM_LNLU +#define ZTRSV ZTRSV_NLU +#elif TRANS == 1 && defined(UPLO) && defined(DIAG) +#define TRSM TRSM_LNLN +#define ZTRSV ZTRSV_NLN +#elif TRANS == 2 && !defined(UPLO) && !defined(DIAG) +#define TRSM TRSM_LTUU +#define ZTRSV ZTRSV_TUU +#elif TRANS == 2 && !defined(UPLO) && defined(DIAG) +#define TRSM TRSM_LTUN +#define ZTRSV ZTRSV_TUN +#elif TRANS == 2 && defined(UPLO) && !defined(DIAG) +#define TRSM TRSM_LTLU +#define ZTRSV ZTRSV_TLU +#elif TRANS == 2 && defined(UPLO) && defined(DIAG) +#define TRSM TRSM_LTLN +#define ZTRSV ZTRSV_TLN +#elif TRANS == 3 && !defined(UPLO) && !defined(DIAG) +#define TRSM TRSM_LRUU +#define ZTRSV ZTRSV_RUU +#elif TRANS == 3 && !defined(UPLO) && defined(DIAG) +#define TRSM TRSM_LRUN +#define ZTRSV ZTRSV_RUN +#elif TRANS == 3 && defined(UPLO) && !defined(DIAG) +#define TRSM TRSM_LRLU +#define ZTRSV ZTRSV_RLU +#elif TRANS == 3 && defined(UPLO) && defined(DIAG) +#define TRSM TRSM_LRLN +#define ZTRSV ZTRSV_RLN +#elif TRANS == 4 && !defined(UPLO) && !defined(DIAG) +#define TRSM TRSM_LCUU +#define ZTRSV ZTRSV_CUU +#elif TRANS == 4 && !defined(UPLO) && defined(DIAG) +#define TRSM TRSM_LCUN +#define ZTRSV ZTRSV_CUN +#elif TRANS == 4 && defined(UPLO) && !defined(DIAG) +#define TRSM TRSM_LCLU +#define ZTRSV ZTRSV_CLU +#elif TRANS == 4 && defined(UPLO) && defined(DIAG) +#define TRSM TRSM_LCLN +#define ZTRSV ZTRSV_CLN +#endif + +static int inner_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, + FLOAT *sa, FLOAT *sb, BLASLONG mypos) { + + TRSM (args, range_m, range_n, sa, sb, 0); + return 0; +} + +blasint CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, FLOAT *sb, BLASLONG mypos) { + + int mode; + + if (args -> n == 1){ + ZTRSV (args -> m, args -> a, args -> lda, args -> b, 1, sb); + } else { +#ifdef XDOUBLE + mode = BLAS_XDOUBLE | BLAS_COMPLEX; +#elif defined(DOUBLE) + mode = BLAS_DOUBLE | BLAS_COMPLEX; +#else + mode = BLAS_SINGLE | BLAS_COMPLEX; +#endif + + gemm_thread_n(mode, args, NULL, NULL, inner_thread, sa, sb, args -> nthreads); + } + + return 0; + } diff --git a/lapack/trtrs/ztrtrs_single.c b/lapack/trtrs/ztrtrs_single.c new file mode 100644 index 000000000..f39d72900 --- /dev/null +++ b/lapack/trtrs/ztrtrs_single.c @@ -0,0 +1,98 @@ +/*********************************************************************/ +/* Copyright 2009, 2010 The University of Texas at Austin. */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* 1. Redistributions of source code must retain the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer. */ +/* */ +/* 2. Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials */ +/* provided with the distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ +/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ +/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ +/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ +/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ +/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ +/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* */ +/* The views and conclusions contained in the software and */ +/* documentation are those of the authors and should not be */ +/* interpreted as representing official policies, either expressed */ +/* or implied, of The University of Texas at Austin. */ +/*********************************************************************/ + +#include +#include "common.h" + +#if TRANS == 1 && !defined(UPLO) && !defined(DIAG) +#define TRSM TRSM_LNUU +#define ZTRSV ZTRSV_NUU +#elif TRANS == 1 && !defined(UPLO) && defined(DIAG) +#define TRSM TRSM_LNUN +#define ZTRSV ZTRSV_NUN +#elif TRANS == 1 && defined(UPLO) && !defined(DIAG) +#define TRSM TRSM_LNLU +#define ZTRSV ZTRSV_NLU +#elif TRANS == 1 && defined(UPLO) && defined(DIAG) +#define TRSM TRSM_LNLN +#define ZTRSV ZTRSV_NLN +#elif TRANS == 2 && !defined(UPLO) && !defined(DIAG) +#define TRSM TRSM_LTUU +#define ZTRSV ZTRSV_TUU +#elif TRANS == 2 && !defined(UPLO) && defined(DIAG) +#define TRSM TRSM_LTUN +#define ZTRSV ZTRSV_TUN +#elif TRANS == 2 && defined(UPLO) && !defined(DIAG) +#define TRSM TRSM_LTLU +#define ZTRSV ZTRSV_TLU +#elif TRANS == 2 && defined(UPLO) && defined(DIAG) +#define TRSM TRSM_LTLN +#define ZTRSV ZTRSV_TLN +#elif TRANS == 3 && !defined(UPLO) && !defined(DIAG) +#define TRSM TRSM_LRUU +#define ZTRSV ZTRSV_RUU +#elif TRANS == 3 && !defined(UPLO) && defined(DIAG) +#define TRSM TRSM_LRUN +#define ZTRSV ZTRSV_RUN +#elif TRANS == 3 && defined(UPLO) && !defined(DIAG) +#define TRSM TRSM_LRLU +#define ZTRSV ZTRSV_RLU +#elif TRANS == 3 && defined(UPLO) && defined(DIAG) +#define TRSM TRSM_LRLN +#define ZTRSV ZTRSV_RLN +#elif TRANS == 4 && !defined(UPLO) && !defined(DIAG) +#define TRSM TRSM_LCUU +#define ZTRSV ZTRSV_CUU +#elif TRANS == 4 && !defined(UPLO) && defined(DIAG) +#define TRSM TRSM_LCUN +#define ZTRSV ZTRSV_CUN +#elif TRANS == 4 && defined(UPLO) && !defined(DIAG) +#define TRSM TRSM_LCLU +#define ZTRSV ZTRSV_CLU +#elif TRANS == 4 && defined(UPLO) && defined(DIAG) +#define TRSM TRSM_LCLN +#define ZTRSV ZTRSV_CLN +#endif + +blasint CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, FLOAT *sb, BLASLONG mypos) { + if (args -> n == 1){ + ZTRSV (args -> m, args -> a, args -> lda, args -> b, 1, sb); + } else { + TRSM (args, range_m, range_n, sa, sb, 0); + } + return 0; } diff --git a/openblas_config_template.h b/openblas_config_template.h index 52dd49da2..858b8c5cb 100644 --- a/openblas_config_template.h +++ b/openblas_config_template.h @@ -34,6 +34,11 @@ typedef long BLASLONG; typedef unsigned long BLASULONG; #endif +#ifndef BFLOAT16 +#include +typedef uint16_t bfloat16; +#endif + #ifdef OPENBLAS_USE64BITINT typedef BLASLONG blasint; #else @@ -91,3 +96,8 @@ typedef int blasint; #define openblas_complex_xdouble_real(z) ((z).real) #define openblas_complex_xdouble_imag(z) ((z).imag) #endif + +/* Inclusion of Linux-specific header is needed for definition of cpu_set_t. */ +#ifdef OPENBLAS_OS_LINUX +#include +#endif diff --git a/param.h b/param.h index 71d423831..a9a8f2688 100644 --- a/param.h +++ b/param.h @@ -74,6 +74,12 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" +#define SBGEMM_DEFAULT_UNROLL_N 4 +#define SBGEMM_DEFAULT_UNROLL_M 8 +#define SBGEMM_DEFAULT_UNROLL_MN 32 +#define SBGEMM_DEFAULT_P 256 +#define SBGEMM_DEFAULT_R 256 +#define SBGEMM_DEFAULT_Q 256 #ifdef OPTERON #define SNUMOPT 4 @@ -627,7 +633,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #else -#define SGEMM_DEFAULT_UNROLL_M 16 +#define SGEMM_DEFAULT_UNROLL_M 8 #define DGEMM_DEFAULT_UNROLL_M 4 #define QGEMM_DEFAULT_UNROLL_M 2 #define CGEMM_DEFAULT_UNROLL_M 8 @@ -640,9 +646,10 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define CGEMM_DEFAULT_UNROLL_N 2 #define ZGEMM_DEFAULT_UNROLL_N 2 #define XGEMM_DEFAULT_UNROLL_N 1 - +/* #define SGEMM_DEFAULT_UNROLL_MN 32 #define DGEMM_DEFAULT_UNROLL_MN 32 +*/ #endif #ifdef ARCH_X86 @@ -668,20 +675,20 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #else -#define SGEMM_DEFAULT_P 768 +#define SGEMM_DEFAULT_P 320 #define DGEMM_DEFAULT_P 512 -#define CGEMM_DEFAULT_P 384 -#define ZGEMM_DEFAULT_P 256 +#define CGEMM_DEFAULT_P 256 +#define ZGEMM_DEFAULT_P 192 #ifdef WINDOWS_ABI #define SGEMM_DEFAULT_Q 320 #define DGEMM_DEFAULT_Q 128 #else -#define SGEMM_DEFAULT_Q 384 +#define SGEMM_DEFAULT_Q 320 #define DGEMM_DEFAULT_Q 256 #endif -#define CGEMM_DEFAULT_Q 192 -#define ZGEMM_DEFAULT_Q 128 +#define CGEMM_DEFAULT_Q 256 +#define ZGEMM_DEFAULT_Q 192 #define SGEMM_DEFAULT_R sgemm_r #define DGEMM_DEFAULT_R 13824 @@ -695,16 +702,16 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define XGEMM_DEFAULT_R xgemm_r #define XGEMM_DEFAULT_Q 128 -#define CGEMM3M_DEFAULT_UNROLL_N 8 -#define CGEMM3M_DEFAULT_UNROLL_M 4 -#define ZGEMM3M_DEFAULT_UNROLL_N 8 -#define ZGEMM3M_DEFAULT_UNROLL_M 2 +#define CGEMM3M_DEFAULT_UNROLL_N 4 +#define CGEMM3M_DEFAULT_UNROLL_M 8 +#define ZGEMM3M_DEFAULT_UNROLL_N 4 +#define ZGEMM3M_DEFAULT_UNROLL_M 4 -#define CGEMM3M_DEFAULT_P 448 -#define ZGEMM3M_DEFAULT_P 224 +#define CGEMM3M_DEFAULT_P 320 +#define ZGEMM3M_DEFAULT_P 256 #define XGEMM3M_DEFAULT_P 112 -#define CGEMM3M_DEFAULT_Q 224 -#define ZGEMM3M_DEFAULT_Q 224 +#define CGEMM3M_DEFAULT_Q 320 +#define ZGEMM3M_DEFAULT_Q 256 #define XGEMM3M_DEFAULT_Q 224 #define CGEMM3M_DEFAULT_R 12288 #define ZGEMM3M_DEFAULT_R 12288 @@ -1450,22 +1457,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define SGEMM_DEFAULT_P 768 #define SGEMM_DEFAULT_R sgemm_r -//#define SGEMM_DEFAULT_R 1024 +/*#define SGEMM_DEFAULT_R 1024*/ #define DGEMM_DEFAULT_P 512 #define DGEMM_DEFAULT_R dgemm_r -//#define DGEMM_DEFAULT_R 1024 +/*#define DGEMM_DEFAULT_R 1024*/ #define QGEMM_DEFAULT_P 504 #define QGEMM_DEFAULT_R qgemm_r #define CGEMM_DEFAULT_P 768 #define CGEMM_DEFAULT_R cgemm_r -//#define CGEMM_DEFAULT_R 1024 +/*#define CGEMM_DEFAULT_R 1024*/ #define ZGEMM_DEFAULT_P 512 #define ZGEMM_DEFAULT_R zgemm_r -//#define ZGEMM_DEFAULT_R 1024 +/*#define ZGEMM_DEFAULT_R 1024*/ #define XGEMM_DEFAULT_P 252 #define XGEMM_DEFAULT_R xgemm_r @@ -1509,8 +1516,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define SYMV_P 8 -#define SWITCH_RATIO 32 -#define GEMM_PREFERED_SIZE 16 +#if defined(XDOUBLE) || defined(DOUBLE) +#define SWITCH_RATIO 4 +#define GEMM_PREFERED_SIZE 4 +#else +#define SWITCH_RATIO 8 +#define GEMM_PREFERED_SIZE 8 +#endif #ifdef ARCH_X86 @@ -1530,7 +1542,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #else -#define SGEMM_DEFAULT_UNROLL_M 16 +#define SGEMM_DEFAULT_UNROLL_M 8 #define DGEMM_DEFAULT_UNROLL_M 4 #define QGEMM_DEFAULT_UNROLL_M 2 #define CGEMM_DEFAULT_UNROLL_M 8 @@ -1543,9 +1555,10 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define CGEMM_DEFAULT_UNROLL_N 2 #define ZGEMM_DEFAULT_UNROLL_N 2 #define XGEMM_DEFAULT_UNROLL_N 1 - +/* #define SGEMM_DEFAULT_UNROLL_MN 32 #define DGEMM_DEFAULT_UNROLL_MN 32 +*/ #endif #ifdef ARCH_X86 @@ -1571,20 +1584,20 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #else -#define SGEMM_DEFAULT_P 768 +#define SGEMM_DEFAULT_P 320 #define DGEMM_DEFAULT_P 512 -#define CGEMM_DEFAULT_P 384 -#define ZGEMM_DEFAULT_P 256 +#define CGEMM_DEFAULT_P 256 +#define ZGEMM_DEFAULT_P 192 #ifdef WINDOWS_ABI #define SGEMM_DEFAULT_Q 320 #define DGEMM_DEFAULT_Q 128 #else -#define SGEMM_DEFAULT_Q 384 +#define SGEMM_DEFAULT_Q 320 #define DGEMM_DEFAULT_Q 256 #endif -#define CGEMM_DEFAULT_Q 192 -#define ZGEMM_DEFAULT_Q 128 +#define CGEMM_DEFAULT_Q 256 +#define ZGEMM_DEFAULT_Q 192 #define SGEMM_DEFAULT_R sgemm_r #define DGEMM_DEFAULT_R 13824 @@ -1598,16 +1611,16 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define XGEMM_DEFAULT_R xgemm_r #define XGEMM_DEFAULT_Q 128 -#define CGEMM3M_DEFAULT_UNROLL_N 8 -#define CGEMM3M_DEFAULT_UNROLL_M 4 -#define ZGEMM3M_DEFAULT_UNROLL_N 8 -#define ZGEMM3M_DEFAULT_UNROLL_M 2 +#define CGEMM3M_DEFAULT_UNROLL_N 4 +#define CGEMM3M_DEFAULT_UNROLL_M 8 +#define ZGEMM3M_DEFAULT_UNROLL_N 4 +#define ZGEMM3M_DEFAULT_UNROLL_M 4 -#define CGEMM3M_DEFAULT_P 448 -#define ZGEMM3M_DEFAULT_P 224 +#define CGEMM3M_DEFAULT_P 320 +#define ZGEMM3M_DEFAULT_P 256 #define XGEMM3M_DEFAULT_P 112 -#define CGEMM3M_DEFAULT_Q 224 -#define ZGEMM3M_DEFAULT_Q 224 +#define CGEMM3M_DEFAULT_Q 320 +#define ZGEMM3M_DEFAULT_Q 256 #define XGEMM3M_DEFAULT_Q 224 #define CGEMM3M_DEFAULT_R 12288 #define ZGEMM3M_DEFAULT_R 12288 @@ -1629,8 +1642,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define SYMV_P 8 -#define SWITCH_RATIO 32 -#define GEMM_PREFERED_SIZE 32 +#if defined(XDOUBLE) || defined(DOUBLE) +#define SWITCH_RATIO 8 +#define GEMM_PREFERED_SIZE 8 +#else +#define SWITCH_RATIO 16 +#define GEMM_PREFERED_SIZE 16 +#endif #define USE_SGEMM_KERNEL_DIRECT 1 #ifdef ARCH_X86 @@ -1652,14 +1670,14 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #else #define SGEMM_DEFAULT_UNROLL_M 16 -#define DGEMM_DEFAULT_UNROLL_M 4 +#define DGEMM_DEFAULT_UNROLL_M 16 #define QGEMM_DEFAULT_UNROLL_M 2 #define CGEMM_DEFAULT_UNROLL_M 8 #define ZGEMM_DEFAULT_UNROLL_M 4 #define XGEMM_DEFAULT_UNROLL_M 1 #define SGEMM_DEFAULT_UNROLL_N 4 -#define DGEMM_DEFAULT_UNROLL_N 8 +#define DGEMM_DEFAULT_UNROLL_N 2 #define QGEMM_DEFAULT_UNROLL_N 2 #define CGEMM_DEFAULT_UNROLL_N 2 #define ZGEMM_DEFAULT_UNROLL_N 2 @@ -1692,23 +1710,18 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #else -#define SGEMM_DEFAULT_P 768 -#define DGEMM_DEFAULT_P 512 +#define SGEMM_DEFAULT_P 448 +#define DGEMM_DEFAULT_P 192 #define CGEMM_DEFAULT_P 384 #define ZGEMM_DEFAULT_P 256 -#ifdef WINDOWS_ABI -#define SGEMM_DEFAULT_Q 320 -#define DGEMM_DEFAULT_Q 128 -#else -#define SGEMM_DEFAULT_Q 384 -#define DGEMM_DEFAULT_Q 256 -#endif +#define SGEMM_DEFAULT_Q 448 +#define DGEMM_DEFAULT_Q 384 #define CGEMM_DEFAULT_Q 192 #define ZGEMM_DEFAULT_Q 128 #define SGEMM_DEFAULT_R sgemm_r -#define DGEMM_DEFAULT_R 13824 +#define DGEMM_DEFAULT_R 8640 #define CGEMM_DEFAULT_R cgemm_r #define ZGEMM_DEFAULT_R zgemm_r @@ -1719,16 +1732,16 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define XGEMM_DEFAULT_R xgemm_r #define XGEMM_DEFAULT_Q 128 -#define CGEMM3M_DEFAULT_UNROLL_N 8 -#define CGEMM3M_DEFAULT_UNROLL_M 4 -#define ZGEMM3M_DEFAULT_UNROLL_N 8 -#define ZGEMM3M_DEFAULT_UNROLL_M 2 +#define CGEMM3M_DEFAULT_UNROLL_N 4 +#define CGEMM3M_DEFAULT_UNROLL_M 8 +#define ZGEMM3M_DEFAULT_UNROLL_N 4 +#define ZGEMM3M_DEFAULT_UNROLL_M 4 -#define CGEMM3M_DEFAULT_P 448 -#define ZGEMM3M_DEFAULT_P 224 +#define CGEMM3M_DEFAULT_P 320 +#define ZGEMM3M_DEFAULT_P 256 #define XGEMM3M_DEFAULT_P 112 -#define CGEMM3M_DEFAULT_Q 224 -#define ZGEMM3M_DEFAULT_Q 224 +#define CGEMM3M_DEFAULT_Q 320 +#define ZGEMM3M_DEFAULT_Q 256 #define XGEMM3M_DEFAULT_Q 224 #define CGEMM3M_DEFAULT_R 12288 #define ZGEMM3M_DEFAULT_R 12288 @@ -1739,6 +1752,124 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endif +#ifdef COOPERLAKE + +#define SNUMOPT 16 +#define DNUMOPT 8 + +#define GEMM_DEFAULT_OFFSET_A 0 +#define GEMM_DEFAULT_OFFSET_B 0 +#define GEMM_DEFAULT_ALIGN 0x03fffUL + +#define SYMV_P 8 + +#if defined(XDOUBLE) || defined(DOUBLE) +#define SWITCH_RATIO 8 +#define GEMM_PREFERED_SIZE 8 +#else +#define SWITCH_RATIO 16 +#define GEMM_PREFERED_SIZE 16 +#endif +#define USE_SGEMM_KERNEL_DIRECT 1 + +#ifdef ARCH_X86 + +#define SGEMM_DEFAULT_UNROLL_M 4 +#define DGEMM_DEFAULT_UNROLL_M 2 +#define QGEMM_DEFAULT_UNROLL_M 2 +#define CGEMM_DEFAULT_UNROLL_M 2 +#define ZGEMM_DEFAULT_UNROLL_M 1 +#define XGEMM_DEFAULT_UNROLL_M 1 + +#define SGEMM_DEFAULT_UNROLL_N 4 +#define DGEMM_DEFAULT_UNROLL_N 4 +#define QGEMM_DEFAULT_UNROLL_N 2 +#define CGEMM_DEFAULT_UNROLL_N 2 +#define ZGEMM_DEFAULT_UNROLL_N 2 +#define XGEMM_DEFAULT_UNROLL_N 1 + +#else + +#define SGEMM_DEFAULT_UNROLL_M 16 +#define DGEMM_DEFAULT_UNROLL_M 16 +#define QGEMM_DEFAULT_UNROLL_M 2 +#define CGEMM_DEFAULT_UNROLL_M 8 +#define ZGEMM_DEFAULT_UNROLL_M 4 +#define XGEMM_DEFAULT_UNROLL_M 1 + +#define SGEMM_DEFAULT_UNROLL_N 4 +#define DGEMM_DEFAULT_UNROLL_N 2 +#define QGEMM_DEFAULT_UNROLL_N 2 +#define CGEMM_DEFAULT_UNROLL_N 2 +#define ZGEMM_DEFAULT_UNROLL_N 2 +#define XGEMM_DEFAULT_UNROLL_N 1 + +#define SGEMM_DEFAULT_UNROLL_MN 32 +#define DGEMM_DEFAULT_UNROLL_MN 32 +#endif + +#ifdef ARCH_X86 + +#define SGEMM_DEFAULT_P 512 +#define SGEMM_DEFAULT_R sgemm_r +#define DGEMM_DEFAULT_P 512 +#define DGEMM_DEFAULT_R dgemm_r +#define QGEMM_DEFAULT_P 504 +#define QGEMM_DEFAULT_R qgemm_r +#define CGEMM_DEFAULT_P 128 +#define CGEMM_DEFAULT_R 1024 +#define ZGEMM_DEFAULT_P 512 +#define ZGEMM_DEFAULT_R zgemm_r +#define XGEMM_DEFAULT_P 252 +#define XGEMM_DEFAULT_R xgemm_r +#define SGEMM_DEFAULT_Q 256 +#define DGEMM_DEFAULT_Q 256 +#define QGEMM_DEFAULT_Q 128 +#define CGEMM_DEFAULT_Q 256 +#define ZGEMM_DEFAULT_Q 192 +#define XGEMM_DEFAULT_Q 128 + +#else + +#define SGEMM_DEFAULT_P 640 +#define DGEMM_DEFAULT_P 192 +#define CGEMM_DEFAULT_P 384 +#define ZGEMM_DEFAULT_P 256 + +#define SGEMM_DEFAULT_Q 320 +#define DGEMM_DEFAULT_Q 384 +#define CGEMM_DEFAULT_Q 192 +#define ZGEMM_DEFAULT_Q 128 + +#define SGEMM_DEFAULT_R sgemm_r +#define DGEMM_DEFAULT_R 8640 +#define CGEMM_DEFAULT_R cgemm_r +#define ZGEMM_DEFAULT_R zgemm_r + +#define QGEMM_DEFAULT_Q 128 +#define QGEMM_DEFAULT_P 504 +#define QGEMM_DEFAULT_R qgemm_r +#define XGEMM_DEFAULT_P 252 +#define XGEMM_DEFAULT_R xgemm_r +#define XGEMM_DEFAULT_Q 128 + +#define CGEMM3M_DEFAULT_UNROLL_N 4 +#define CGEMM3M_DEFAULT_UNROLL_M 8 +#define ZGEMM3M_DEFAULT_UNROLL_N 4 +#define ZGEMM3M_DEFAULT_UNROLL_M 4 + +#define CGEMM3M_DEFAULT_P 320 +#define ZGEMM3M_DEFAULT_P 256 +#define XGEMM3M_DEFAULT_P 112 +#define CGEMM3M_DEFAULT_Q 320 +#define ZGEMM3M_DEFAULT_Q 256 +#define XGEMM3M_DEFAULT_Q 224 +#define CGEMM3M_DEFAULT_R 12288 +#define ZGEMM3M_DEFAULT_R 12288 +#define XGEMM3M_DEFAULT_R 12288 + +#endif +#endif #ifdef ATOM @@ -1965,7 +2096,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define SGEMM_DEFAULT_UNROLL_N 4 #define DGEMM_DEFAULT_UNROLL_M 4 #define DGEMM_DEFAULT_UNROLL_N 4 -#define CGEMM_DEFAULT_UNROLL_M 8 +#define CGEMM_DEFAULT_UNROLL_M 2 #define CGEMM_DEFAULT_UNROLL_N 2 #define ZGEMM_DEFAULT_UNROLL_M 2 #define ZGEMM_DEFAULT_UNROLL_N 2 @@ -1992,16 +2123,24 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define GEMM_DEFAULT_OFFSET_B 3072 #define GEMM_DEFAULT_ALIGN (BLASLONG)0x03fffUL +#if defined(__BYTE_ORDER__)&&(__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) +#define SGEMM_DEFAULT_UNROLL_M 4 +#else #define SGEMM_DEFAULT_UNROLL_M 16 +#endif #define SGEMM_DEFAULT_UNROLL_N 4 #define DGEMM_DEFAULT_UNROLL_M 4 #define DGEMM_DEFAULT_UNROLL_N 4 +#if defined(__BYTE_ORDER__)&&(__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) +#define CGEMM_DEFAULT_UNROLL_M 2 +#else #define CGEMM_DEFAULT_UNROLL_M 8 +#endif #define CGEMM_DEFAULT_UNROLL_N 2 #define ZGEMM_DEFAULT_UNROLL_M 2 #define ZGEMM_DEFAULT_UNROLL_N 2 -#if defined(OS_LINUX) || defined(OS_DARWIN) +#if defined(OS_LINUX) || defined(OS_DARWIN) || defined(OS_FREEBSD) #if L2_SIZE == 1024976 #define SGEMM_DEFAULT_P 320 #define DGEMM_DEFAULT_P 256 @@ -2207,8 +2346,19 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define GEMM_DEFAULT_OFFSET_A 0 #define GEMM_DEFAULT_OFFSET_B 65536 -#define GEMM_DEFAULT_ALIGN (BLASLONG)0x0ffffUL +#define GEMM_DEFAULT_ALIGN (BLASLONG)0x0ffffUL +#if defined(__32BIT__) +#warning using BINARY32==POWER6 +#define SGEMM_DEFAULT_UNROLL_M 4 +#define SGEMM_DEFAULT_UNROLL_N 4 +#define DGEMM_DEFAULT_UNROLL_M 4 +#define DGEMM_DEFAULT_UNROLL_N 4 +#define CGEMM_DEFAULT_UNROLL_M 2 +#define CGEMM_DEFAULT_UNROLL_N 4 +#define ZGEMM_DEFAULT_UNROLL_M 2 +#define ZGEMM_DEFAULT_UNROLL_N 4 +#else #define SGEMM_DEFAULT_UNROLL_M 16 #define SGEMM_DEFAULT_UNROLL_N 8 #define DGEMM_DEFAULT_UNROLL_M 16 @@ -2217,16 +2367,27 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define CGEMM_DEFAULT_UNROLL_N 4 #define ZGEMM_DEFAULT_UNROLL_M 8 #define ZGEMM_DEFAULT_UNROLL_N 2 +#endif +#define SGEMM_DEFAULT_P 1280UL +#define DGEMM_DEFAULT_P 640UL +#define CGEMM_DEFAULT_P 640UL +#define ZGEMM_DEFAULT_P 320UL -#define SGEMM_DEFAULT_P 1280 -#define DGEMM_DEFAULT_P 640 -#define CGEMM_DEFAULT_P 640 -#define ZGEMM_DEFAULT_P 320 +#define SGEMM_DEFAULT_Q 640UL +#define DGEMM_DEFAULT_Q 720UL +#define CGEMM_DEFAULT_Q 640UL +#define ZGEMM_DEFAULT_Q 640UL -#define SGEMM_DEFAULT_Q 640 -#define DGEMM_DEFAULT_Q 720 -#define CGEMM_DEFAULT_Q 640 -#define ZGEMM_DEFAULT_Q 640 +#if 0 +#define SGEMM_DEFAULT_R SGEMM_DEFAULT_P +#define DGEMM_DEFAULT_R DGEMM_DEFAULT_P +#define CGEMM_DEFAULT_R CGEMM_DEFAULT_P +#define ZGEMM_DEFAULT_R ZGEMM_DEFAULT_P +#endif +#define SGEMM_DEFAULT_R 4096 +#define DGEMM_DEFAULT_R 4096 +#define CGEMM_DEFAULT_R 4096 +#define ZGEMM_DEFAULT_R 4096 #define SYMV_P 8 @@ -2241,6 +2402,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define GEMM_DEFAULT_OFFSET_B 65536 #define GEMM_DEFAULT_ALIGN 0x0ffffUL +#define SWITCH_RATIO 16 +#define GEMM_PREFERED_SIZE 16 + #define SGEMM_DEFAULT_UNROLL_M 16 #define SGEMM_DEFAULT_UNROLL_N 8 #define DGEMM_DEFAULT_UNROLL_M 16 @@ -2250,18 +2414,72 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define ZGEMM_DEFAULT_UNROLL_M 8 #define ZGEMM_DEFAULT_UNROLL_N 2 -#define SGEMM_DEFAULT_P 640 +#define SGEMM_DEFAULT_P 832 #define DGEMM_DEFAULT_P 128 -#define CGEMM_DEFAULT_P 640 -#define ZGEMM_DEFAULT_P 320 +#define CGEMM_DEFAULT_P 512 +#define ZGEMM_DEFAULT_P 256 -#define SGEMM_DEFAULT_Q 1408 +#define SGEMM_DEFAULT_Q 1026 #define DGEMM_DEFAULT_Q 384 -#define CGEMM_DEFAULT_Q 640 -#define ZGEMM_DEFAULT_Q 640 +#define CGEMM_DEFAULT_Q 1026 +#define ZGEMM_DEFAULT_Q 1026 + +#define SGEMM_DEFAULT_R 4096 +#define DGEMM_DEFAULT_R 4096 +#define CGEMM_DEFAULT_R 4096 +#define ZGEMM_DEFAULT_R 4096 + +#define SYMV_P 8 + +#endif + +#if defined(POWER10) +#define SNUMOPT 16 +#define DNUMOPT 8 + +#define GEMM_DEFAULT_OFFSET_A 0 +#define GEMM_DEFAULT_OFFSET_B 65536 +#define GEMM_DEFAULT_ALIGN 0x0ffffUL + +#define SWITCH_RATIO 16 +#define GEMM_PREFERED_SIZE 16 + +#define SGEMM_DEFAULT_UNROLL_M 16 +#define SGEMM_DEFAULT_UNROLL_N 8 +#define DGEMM_DEFAULT_UNROLL_M 8 +#define DGEMM_DEFAULT_UNROLL_N 8 +#define CGEMM_DEFAULT_UNROLL_M 8 +#define CGEMM_DEFAULT_UNROLL_N 4 +#define ZGEMM_DEFAULT_UNROLL_M 8 +#define ZGEMM_DEFAULT_UNROLL_N 2 + +#define SGEMM_DEFAULT_P 832 +#define DGEMM_DEFAULT_P 320 +#define CGEMM_DEFAULT_P 512 +#define ZGEMM_DEFAULT_P 256 + +#define SGEMM_DEFAULT_Q 1026 +#define DGEMM_DEFAULT_Q 960 +#define CGEMM_DEFAULT_Q 1026 +#define ZGEMM_DEFAULT_Q 1026 + +#define SGEMM_DEFAULT_R 4096 +#define DGEMM_DEFAULT_R 4096 +#define CGEMM_DEFAULT_R 4096 +#define ZGEMM_DEFAULT_R 4096 #define SYMV_P 8 +#undef SBGEMM_DEFAULT_UNROLL_N +#undef SBGEMM_DEFAULT_UNROLL_M +#undef SBGEMM_DEFAULT_P +#undef SBGEMM_DEFAULT_R +#undef SBGEMM_DEFAULT_Q +#define SBGEMM_DEFAULT_UNROLL_M 16 +#define SBGEMM_DEFAULT_UNROLL_N 8 +#define SBGEMM_DEFAULT_P 832 +#define SBGEMM_DEFAULT_Q 1026 +#define SBGEMM_DEFAULT_R 4096 #endif #if defined(SPARC) && defined(V7) @@ -2363,8 +2581,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define SYMV_P 16 #endif -#ifdef LOONGSON3A -////Copy from SICORTEX +#if defined(LOONGSON3R4) #define SNUMOPT 2 #define DNUMOPT 2 @@ -2372,6 +2589,19 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define GEMM_DEFAULT_OFFSET_B 0 #define GEMM_DEFAULT_ALIGN (BLASLONG)0x03fffUL +#ifdef HAVE_MSA +#define SGEMM_DEFAULT_UNROLL_M 8 +#define SGEMM_DEFAULT_UNROLL_N 8 + +#define DGEMM_DEFAULT_UNROLL_M 8 +#define DGEMM_DEFAULT_UNROLL_N 4 + +#define CGEMM_DEFAULT_UNROLL_M 8 +#define CGEMM_DEFAULT_UNROLL_N 4 + +#define ZGEMM_DEFAULT_UNROLL_M 4 +#define ZGEMM_DEFAULT_UNROLL_N 4 +#else #define SGEMM_DEFAULT_UNROLL_M 8 #define SGEMM_DEFAULT_UNROLL_N 4 @@ -2383,6 +2613,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define ZGEMM_DEFAULT_UNROLL_M 2 #define ZGEMM_DEFAULT_UNROLL_N 2 +#endif #define SGEMM_DEFAULT_P 64 #define DGEMM_DEFAULT_P 44 @@ -2405,7 +2636,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define SYMV_P 16 #endif -#ifdef LOONGSON3B +#if defined(LOONGSON3R3) +////Copy from SICORTEX #define SNUMOPT 2 #define DNUMOPT 2 @@ -2413,32 +2645,32 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define GEMM_DEFAULT_OFFSET_B 0 #define GEMM_DEFAULT_ALIGN (BLASLONG)0x03fffUL -#define SGEMM_DEFAULT_UNROLL_M 2 -#define SGEMM_DEFAULT_UNROLL_N 2 +#define SGEMM_DEFAULT_UNROLL_M 8 +#define SGEMM_DEFAULT_UNROLL_N 4 -#define DGEMM_DEFAULT_UNROLL_M 2 -#define DGEMM_DEFAULT_UNROLL_N 2 +#define DGEMM_DEFAULT_UNROLL_M 4 +#define DGEMM_DEFAULT_UNROLL_N 4 -#define CGEMM_DEFAULT_UNROLL_M 2 +#define CGEMM_DEFAULT_UNROLL_M 4 #define CGEMM_DEFAULT_UNROLL_N 2 #define ZGEMM_DEFAULT_UNROLL_M 2 #define ZGEMM_DEFAULT_UNROLL_N 2 #define SGEMM_DEFAULT_P 64 -#define DGEMM_DEFAULT_P 24 -#define CGEMM_DEFAULT_P 24 -#define ZGEMM_DEFAULT_P 20 +#define DGEMM_DEFAULT_P 44 +#define CGEMM_DEFAULT_P 64 +#define ZGEMM_DEFAULT_P 32 #define SGEMM_DEFAULT_Q 192 -#define DGEMM_DEFAULT_Q 128 +#define DGEMM_DEFAULT_Q 92 #define CGEMM_DEFAULT_Q 128 -#define ZGEMM_DEFAULT_Q 64 +#define ZGEMM_DEFAULT_Q 80 -#define SGEMM_DEFAULT_R 512 -#define DGEMM_DEFAULT_R 512 -#define CGEMM_DEFAULT_R 512 -#define ZGEMM_DEFAULT_R 512 +#define SGEMM_DEFAULT_R 640 +#define DGEMM_DEFAULT_R dgemm_r +#define CGEMM_DEFAULT_R 640 +#define ZGEMM_DEFAULT_R 640 #define GEMM_OFFSET_A1 0x10000 #define GEMM_OFFSET_B1 0x100000 @@ -2446,7 +2678,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define SYMV_P 16 #endif -#if defined(P5600) || defined(MIPS1004K) || defined(I6400) || defined(P6600) || defined(I6500) +#if defined(P5600) || defined(MIPS1004K) || defined(MIPS24K) || defined(I6400) || defined(P6600) || defined(I6500) #define SNUMOPT 2 #define DNUMOPT 2 @@ -2498,19 +2730,16 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define SYMV_P 16 #endif -#ifdef ARMV7 -#define SNUMOPT 2 -#define DNUMOPT 2 - +#ifdef RISCV64_GENERIC #define GEMM_DEFAULT_OFFSET_A 0 #define GEMM_DEFAULT_OFFSET_B 0 #define GEMM_DEFAULT_ALIGN (BLASLONG)0x03fffUL -#define SGEMM_DEFAULT_UNROLL_M 4 -#define SGEMM_DEFAULT_UNROLL_N 4 +#define SGEMM_DEFAULT_UNROLL_M 2 +#define SGEMM_DEFAULT_UNROLL_N 2 -#define DGEMM_DEFAULT_UNROLL_M 4 -#define DGEMM_DEFAULT_UNROLL_N 4 +#define DGEMM_DEFAULT_UNROLL_M 2 +#define DGEMM_DEFAULT_UNROLL_N 2 #define CGEMM_DEFAULT_UNROLL_M 2 #define CGEMM_DEFAULT_UNROLL_N 2 @@ -2533,13 +2762,53 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define CGEMM_DEFAULT_R 4096 #define ZGEMM_DEFAULT_R 4096 +#define SYMV_P 16 +#define GEMM_DEFAULT_OFFSET_A 0 +#define GEMM_DEFAULT_OFFSET_B 0 -#define SYMV_P 16 #endif +#ifdef C910V +#define GEMM_DEFAULT_OFFSET_A 0 +#define GEMM_DEFAULT_OFFSET_B 0 +#define GEMM_DEFAULT_ALIGN 0x03fffUL -#if defined(ARMV6) +#define SGEMM_DEFAULT_UNROLL_M 16 +#define SGEMM_DEFAULT_UNROLL_N 4 + +#define DGEMM_DEFAULT_UNROLL_M 8 +#define DGEMM_DEFAULT_UNROLL_N 4 + +#define CGEMM_DEFAULT_UNROLL_M 2 +#define CGEMM_DEFAULT_UNROLL_N 2 + +#define ZGEMM_DEFAULT_UNROLL_M 2 +#define ZGEMM_DEFAULT_UNROLL_N 2 + +#define SGEMM_DEFAULT_P 160 +#define DGEMM_DEFAULT_P 160 +#define CGEMM_DEFAULT_P 96 +#define ZGEMM_DEFAULT_P 64 + +#define SGEMM_DEFAULT_Q 240 +#define DGEMM_DEFAULT_Q 128 +#define CGEMM_DEFAULT_Q 120 +#define ZGEMM_DEFAULT_Q 120 + +#define SGEMM_DEFAULT_R 12288 +#define DGEMM_DEFAULT_R 8192 +#define CGEMM_DEFAULT_R 4096 +#define ZGEMM_DEFAULT_R 4096 + +#define SYMV_P 16 + +#define GEMM_DEFAULT_OFFSET_A 0 +#define GEMM_DEFAULT_OFFSET_B 0 + +#endif + +#ifdef ARMV7 #define SNUMOPT 2 #define DNUMOPT 2 @@ -2548,10 +2817,10 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define GEMM_DEFAULT_ALIGN (BLASLONG)0x03fffUL #define SGEMM_DEFAULT_UNROLL_M 4 -#define SGEMM_DEFAULT_UNROLL_N 2 +#define SGEMM_DEFAULT_UNROLL_N 4 #define DGEMM_DEFAULT_UNROLL_M 4 -#define DGEMM_DEFAULT_UNROLL_N 2 +#define DGEMM_DEFAULT_UNROLL_N 4 #define CGEMM_DEFAULT_UNROLL_M 2 #define CGEMM_DEFAULT_UNROLL_N 2 @@ -2575,12 +2844,12 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define ZGEMM_DEFAULT_R 4096 + #define SYMV_P 16 #endif -// Common ARMv8 parameters -#if defined(ARMV8) +#if defined(ARMV6) #define SNUMOPT 2 #define DNUMOPT 2 @@ -2588,15 +2857,10 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define GEMM_DEFAULT_OFFSET_B 0 #define GEMM_DEFAULT_ALIGN (BLASLONG)0x03fffUL -#define SYMV_P 16 - -// Darwin / Cross -#if defined(OS_DARWIN) && defined(CROSS) - -#define SGEMM_DEFAULT_UNROLL_M 2 +#define SGEMM_DEFAULT_UNROLL_M 4 #define SGEMM_DEFAULT_UNROLL_N 2 -#define DGEMM_DEFAULT_UNROLL_M 2 +#define DGEMM_DEFAULT_UNROLL_M 4 #define DGEMM_DEFAULT_UNROLL_N 2 #define CGEMM_DEFAULT_UNROLL_M 2 @@ -2620,11 +2884,25 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define CGEMM_DEFAULT_R 4096 #define ZGEMM_DEFAULT_R 4096 -#else // Linux / Native -#if defined(CORTEXA53) || defined(CORTEXA57) || \ +#define SYMV_P 16 +#endif + +/* Common ARMv8 parameters */ +#if defined(ARMV8) + +#define SNUMOPT 2 +#define DNUMOPT 2 + +#define GEMM_DEFAULT_OFFSET_A 0 +#define GEMM_DEFAULT_OFFSET_B 0 +#define GEMM_DEFAULT_ALIGN 0x03fffUL + +#define SYMV_P 16 + +#if defined(CORTEXA57) || \ defined(CORTEXA72) || defined(CORTEXA73) || \ - defined(FALKOR) || defined(TSV110) + defined(FALKOR) || defined(TSV110) || defined(EMAG8180) #define SGEMM_DEFAULT_UNROLL_M 16 #define SGEMM_DEFAULT_UNROLL_N 4 @@ -2638,15 +2916,59 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define ZGEMM_DEFAULT_UNROLL_M 4 #define ZGEMM_DEFAULT_UNROLL_N 4 -#define SGEMM_DEFAULT_P 512 -#define DGEMM_DEFAULT_P 256 -#define CGEMM_DEFAULT_P 256 +/*FIXME: this should be using the cache size, but there is currently no easy way to +query that on ARM. So if getarch counted more than 8 cores we simply assume the host +is a big desktop or server with abundant cache rather than a phone or embedded device */ +#if NUM_CORES > 8 || defined(TSV110) || defined(EMAG8180) + #define SGEMM_DEFAULT_P 512 + #define DGEMM_DEFAULT_P 256 + #define CGEMM_DEFAULT_P 256 + #define ZGEMM_DEFAULT_P 128 + + #define SGEMM_DEFAULT_Q 1024 + #define DGEMM_DEFAULT_Q 512 + #define CGEMM_DEFAULT_Q 512 + #define ZGEMM_DEFAULT_Q 512 +#else + #define SGEMM_DEFAULT_P 128 + #define DGEMM_DEFAULT_P 160 + #define CGEMM_DEFAULT_P 128 + #define ZGEMM_DEFAULT_P 128 + + #define SGEMM_DEFAULT_Q 352 + #define DGEMM_DEFAULT_Q 128 + #define CGEMM_DEFAULT_Q 224 + #define ZGEMM_DEFAULT_Q 112 +#endif + +#define SGEMM_DEFAULT_R 4096 +#define DGEMM_DEFAULT_R 4096 +#define CGEMM_DEFAULT_R 4096 +#define ZGEMM_DEFAULT_R 2048 + +#elif defined(CORTEXA53) + +#define SGEMM_DEFAULT_UNROLL_M 8 +#define SGEMM_DEFAULT_UNROLL_N 8 + +#define DGEMM_DEFAULT_UNROLL_M 8 +#define DGEMM_DEFAULT_UNROLL_N 4 + +#define CGEMM_DEFAULT_UNROLL_M 8 +#define CGEMM_DEFAULT_UNROLL_N 4 + +#define ZGEMM_DEFAULT_UNROLL_M 4 +#define ZGEMM_DEFAULT_UNROLL_N 4 + +#define SGEMM_DEFAULT_P 256 +#define DGEMM_DEFAULT_P 160 +#define CGEMM_DEFAULT_P 128 #define ZGEMM_DEFAULT_P 128 -#define SGEMM_DEFAULT_Q 1024 -#define DGEMM_DEFAULT_Q 512 -#define CGEMM_DEFAULT_Q 512 -#define ZGEMM_DEFAULT_Q 512 +#define SGEMM_DEFAULT_Q 256 +#define DGEMM_DEFAULT_Q 128 +#define CGEMM_DEFAULT_Q 224 +#define ZGEMM_DEFAULT_Q 112 #define SGEMM_DEFAULT_R 4096 #define DGEMM_DEFAULT_R 4096 @@ -2711,7 +3033,36 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define CGEMM_DEFAULT_R 4096 #define ZGEMM_DEFAULT_R 4096 -#else // Other/undetected ARMv8 cores +#elif defined(THUNDERX3T110) + +#define SGEMM_DEFAULT_UNROLL_M 16 +#define SGEMM_DEFAULT_UNROLL_N 4 + +#define DGEMM_DEFAULT_UNROLL_M 8 +#define DGEMM_DEFAULT_UNROLL_N 4 + +#define CGEMM_DEFAULT_UNROLL_M 8 +#define CGEMM_DEFAULT_UNROLL_N 4 + +#define ZGEMM_DEFAULT_UNROLL_M 4 +#define ZGEMM_DEFAULT_UNROLL_N 4 + +#define SGEMM_DEFAULT_P 128 +#define DGEMM_DEFAULT_P 320 +#define CGEMM_DEFAULT_P 128 +#define ZGEMM_DEFAULT_P 128 + +#define SGEMM_DEFAULT_Q 352 +#define DGEMM_DEFAULT_Q 128 +#define CGEMM_DEFAULT_Q 224 +#define ZGEMM_DEFAULT_Q 112 + +#define SGEMM_DEFAULT_R 4096 +#define DGEMM_DEFAULT_R 4096 +#define CGEMM_DEFAULT_R 4096 +#define ZGEMM_DEFAULT_R 4096 + +#elif defined(NEOVERSEN1) #define SGEMM_DEFAULT_UNROLL_M 16 #define SGEMM_DEFAULT_UNROLL_N 4 @@ -2740,11 +3091,38 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define CGEMM_DEFAULT_R 4096 #define ZGEMM_DEFAULT_R 4096 -#endif // Cores +#else /* Other/undetected ARMv8 cores */ + +#define SGEMM_DEFAULT_UNROLL_M 16 +#define SGEMM_DEFAULT_UNROLL_N 4 + +#define DGEMM_DEFAULT_UNROLL_M 8 +#define DGEMM_DEFAULT_UNROLL_N 4 -#endif // Linux / Darwin +#define CGEMM_DEFAULT_UNROLL_M 8 +#define CGEMM_DEFAULT_UNROLL_N 4 -#endif // ARMv8 +#define ZGEMM_DEFAULT_UNROLL_M 4 +#define ZGEMM_DEFAULT_UNROLL_N 4 + +#define SGEMM_DEFAULT_P 128 +#define DGEMM_DEFAULT_P 160 +#define CGEMM_DEFAULT_P 128 +#define ZGEMM_DEFAULT_P 128 + +#define SGEMM_DEFAULT_Q 352 +#define DGEMM_DEFAULT_Q 128 +#define CGEMM_DEFAULT_Q 224 +#define ZGEMM_DEFAULT_Q 112 + +#define SGEMM_DEFAULT_R 4096 +#define DGEMM_DEFAULT_R 4096 +#define CGEMM_DEFAULT_R 4096 +#define ZGEMM_DEFAULT_R 4096 + +#endif /* Cores */ + +#endif /* ARMv8 */ #if defined(ARMV5) #define SNUMOPT 2 @@ -2956,7 +3334,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define GEMM_DEFAULT_OFFSET_B 0 #define GEMM_DEFAULT_ALIGN 0x03fffUL -#define SGEMM_DEFAULT_UNROLL_M 8 +#define SGEMM_DEFAULT_UNROLL_M 16 #define SGEMM_DEFAULT_UNROLL_N 4 #define DGEMM_DEFAULT_UNROLL_M 8 @@ -2968,12 +3346,12 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define ZGEMM_DEFAULT_UNROLL_M 4 #define ZGEMM_DEFAULT_UNROLL_N 4 -#define SGEMM_DEFAULT_P 456 +#define SGEMM_DEFAULT_P 480 #define DGEMM_DEFAULT_P 320 #define CGEMM_DEFAULT_P 480 #define ZGEMM_DEFAULT_P 224 -#define SGEMM_DEFAULT_Q 488 +#define SGEMM_DEFAULT_Q 512 #define DGEMM_DEFAULT_Q 384 #define CGEMM_DEFAULT_Q 128 #define ZGEMM_DEFAULT_Q 352 diff --git a/relapack/src/cgbtrf.c b/relapack/src/cgbtrf.c index 61332c6a6..e52f2e6c1 100644 --- a/relapack/src/cgbtrf.c +++ b/relapack/src/cgbtrf.c @@ -36,6 +36,7 @@ void RELAPACK_cgbtrf( return; } + if (*m == 0 || *n == 0) return; // Constant const float ZERO[] = { 0., 0. }; @@ -56,10 +57,10 @@ void RELAPACK_cgbtrf( // Allocate work space const blasint n1 = CREC_SPLIT(*n); - const blasint mWorkl = (kv > n1) ? MAX(1, *m - *kl) : kv; - const blasint nWorkl = (kv > n1) ? n1 : kv; - const blasint mWorku = (*kl > n1) ? n1 : *kl; - const blasint nWorku = (*kl > n1) ? MAX(0, *n - *kl) : *kl; + const blasint mWorkl = abs ( (kv > n1) ? MAX(1, *m - *kl) : kv); + const blasint nWorkl = abs ( (kv > n1) ? n1 : kv); + const blasint mWorku = abs ((*kl > n1) ? n1 : *kl); + const blasint nWorku = abs ((*kl > n1) ? MAX(0, *n - *kl) : *kl); float *Workl = malloc(mWorkl * nWorkl * 2 * sizeof(float)); float *Worku = malloc(mWorku * nWorku * 2 * sizeof(float)); LAPACK(claset)("L", &mWorkl, &nWorkl, ZERO, ZERO, Workl, &mWorkl); @@ -82,7 +83,7 @@ static void RELAPACK_cgbtrf_rec( blasint *info ) { - if (*n <= MAX(CROSSOVER_CGBTRF, 1)) { + if (*n <= MAX(CROSSOVER_CGBTRF, 1)|| *n > *kl || *ldAb == 1) { // Unblocked LAPACK(cgbtf2)(m, n, kl, ku, Ab, ldAb, ipiv, info); return; diff --git a/relapack/src/cgetrf.c b/relapack/src/cgetrf.c index 878c9ec15..bf9ca53f4 100644 --- a/relapack/src/cgetrf.c +++ b/relapack/src/cgetrf.c @@ -30,6 +30,8 @@ void RELAPACK_cgetrf( return; } + if (*m == 0 || *n == 0) return; + const blasint sn = MIN(*m, *n); RELAPACK_cgetrf_rec(m, &sn, A, ldA, ipiv, info); @@ -62,9 +64,11 @@ static void RELAPACK_cgetrf_rec( blasint *info ) { - if (*n <= MAX(CROSSOVER_CGETRF, 1)) { + if (*m == 0 || *n == 0) return; + + if ( *n <= MAX(CROSSOVER_CGETRF, 1)) { // Unblocked - LAPACK(cgetf2)(m, n, A, ldA, ipiv, info); + LAPACK(cgetrf2)(m, n, A, ldA, ipiv, info); return; } @@ -96,6 +100,7 @@ static void RELAPACK_cgetrf_rec( // recursion(A_L, ipiv_T) RELAPACK_cgetrf_rec(m, &n1, A_L, ldA, ipiv_T, info); + if (*info) return; // apply pivots to A_R LAPACK(claswp)(&n2, A_R, ldA, iONE, &n1, ipiv_T, iONE); diff --git a/relapack/src/chegst.c b/relapack/src/chegst.c index fe77b03ea..8557c2952 100644 --- a/relapack/src/chegst.c +++ b/relapack/src/chegst.c @@ -40,6 +40,8 @@ void RELAPACK_chegst( return; } + if (*n == 0) return; + // Clean char * arguments const char cleanuplo = lower ? 'L' : 'U'; diff --git a/relapack/src/chetrf_rook.c b/relapack/src/chetrf_rook.c index 3d2fa3216..9ed1261cf 100644 --- a/relapack/src/chetrf_rook.c +++ b/relapack/src/chetrf_rook.c @@ -36,7 +36,7 @@ void RELAPACK_chetrf_rook( *info = -2; else if (*ldA < MAX(1, *n)) *info = -4; - else if (*lWork < minlWork && *lWork != -1) + else if ((*lWork < 1 || *lWork < minlWork) && *lWork != -1) *info = -7; else if (*lWork == -1) { // Work size query @@ -56,7 +56,7 @@ void RELAPACK_chetrf_rook( if (*info) { const blasint minfo = -*info; - LAPACK(xerbla)("CHETRF", &minfo, strlen("CHETRF")); + LAPACK(xerbla)("CHETRF_ROOK", &minfo, strlen("CHETRF_ROOK")); return; } diff --git a/relapack/src/clauum.c b/relapack/src/clauum.c index 2bc93f182..58a14e7da 100644 --- a/relapack/src/clauum.c +++ b/relapack/src/clauum.c @@ -32,6 +32,8 @@ void RELAPACK_clauum( return; } + if (*n == 0) return; + // Clean char * arguments const char cleanuplo = lower ? 'L' : 'U'; diff --git a/relapack/src/cpbtrf.c b/relapack/src/cpbtrf.c index 971e547c6..a0fa13850 100644 --- a/relapack/src/cpbtrf.c +++ b/relapack/src/cpbtrf.c @@ -35,6 +35,8 @@ void RELAPACK_cpbtrf( return; } + if (*n == 0) return; + // Clean char * arguments const char cleanuplo = lower ? 'L' : 'U'; @@ -43,8 +45,8 @@ void RELAPACK_cpbtrf( // Allocate work space const blasint n1 = CREC_SPLIT(*n); - const blasint mWork = (*kd > n1) ? (lower ? *n - *kd : n1) : *kd; - const blasint nWork = (*kd > n1) ? (lower ? n1 : *n - *kd) : *kd; + const blasint mWork = abs((*kd > n1) ? (lower ? *n - *kd : n1) : *kd); + const blasint nWork = abs((*kd > n1) ? (lower ? n1 : *n - *kd) : *kd); float *Work = malloc(mWork * nWork * 2 * sizeof(float)); LAPACK(claset)(uplo, &mWork, &nWork, ZERO, ZERO, Work, &mWork); @@ -64,7 +66,7 @@ static void RELAPACK_cpbtrf_rec( blasint *info ){ - if (*n <= MAX(CROSSOVER_CPBTRF, 1)) { + if (*n <= MAX(CROSSOVER_CPBTRF, 1) || *ldAb==1) { // Unblocked LAPACK(cpbtf2)(uplo, n, kd, Ab, ldAb, info); return; @@ -148,7 +150,7 @@ static void RELAPACK_cpbtrf_rec( } // recursion(A_BR) - if (*kd > n1) + if (*kd > n1 && ldA != 0) RELAPACK_cpotrf(uplo, &n2, A_BR, ldA, info); else RELAPACK_cpbtrf_rec(uplo, &n2, kd, Ab_BR, ldAb, Work, ldWork, info); diff --git a/relapack/src/cpotrf.c b/relapack/src/cpotrf.c index 0f8e7ebb0..db06c6fef 100644 --- a/relapack/src/cpotrf.c +++ b/relapack/src/cpotrf.c @@ -32,6 +32,8 @@ void RELAPACK_cpotrf( return; } + if (*n == 0) return; + // Clean char * arguments const char cleanuplo = lower ? 'L' : 'U'; @@ -46,6 +48,7 @@ static void RELAPACK_cpotrf_rec( float *A, const blasint *ldA, blasint *info ){ + if (*n == 0) return; if (*n <= MAX(CROSSOVER_CPOTRF, 1)) { // Unblocked diff --git a/relapack/src/csytrf.c b/relapack/src/csytrf.c index 2ebc31001..807c91ece 100644 --- a/relapack/src/csytrf.c +++ b/relapack/src/csytrf.c @@ -36,7 +36,7 @@ void RELAPACK_csytrf( *info = -2; else if (*ldA < MAX(1, *n)) *info = -4; - else if (*lWork < minlWork && *lWork != -1) + else if ((*lWork < 1 || *lWork < minlWork) && *lWork != -1) *info = -7; else if (*lWork == -1) { // Work size query @@ -67,6 +67,7 @@ void RELAPACK_csytrf( blasint nout; // Recursive kernel +if (*n != 0) RELAPACK_csytrf_rec(&cleanuplo, n, n, &nout, A, ldA, ipiv, cleanWork, n, info); #if XSYTRF_ALLOW_MALLOC diff --git a/relapack/src/csytrf_rook.c b/relapack/src/csytrf_rook.c index e8a9865cc..105c6b8b6 100644 --- a/relapack/src/csytrf_rook.c +++ b/relapack/src/csytrf_rook.c @@ -36,7 +36,7 @@ void RELAPACK_csytrf_rook( *info = -2; else if (*ldA < MAX(1, *n)) *info = -4; - else if (*lWork < minlWork && *lWork != -1) + else if ((*lWork < 1 || *lWork < minlWork) && *lWork != -1) *info = -7; else if (*lWork == -1) { // Work size query @@ -56,7 +56,7 @@ void RELAPACK_csytrf_rook( if (*info) { const blasint minfo = -*info; - LAPACK(xerbla)("CSYTRF", &minfo, strlen("CSYTRF")); + LAPACK(xerbla)("CSYTRF_ROOK", &minfo, strlen("CSYTRF_ROOK")); return; } diff --git a/relapack/src/ctgsyl.c b/relapack/src/ctgsyl.c index 704f3ef23..632bbc14e 100644 --- a/relapack/src/ctgsyl.c +++ b/relapack/src/ctgsyl.c @@ -68,6 +68,13 @@ void RELAPACK_ctgsyl( return; } + if ( *m == 0 || *n == 0) { + *scale = 1.; + if (notran && (*ijob != 0)) + *dif = 0.; + return; + } + // Clean char * arguments const char cleantrans = notran ? 'N' : 'C'; diff --git a/relapack/src/ctrsyl.c b/relapack/src/ctrsyl.c index fed6e847e..f7b841cb0 100644 --- a/relapack/src/ctrsyl.c +++ b/relapack/src/ctrsyl.c @@ -47,6 +47,11 @@ void RELAPACK_ctrsyl( return; } + if (*m == 0 || *n == 0) { + *scale = 1.; + return; + } + // Clean char * arguments const char cleantranA = notransA ? 'N' : 'C'; const char cleantranB = notransB ? 'N' : 'C'; diff --git a/relapack/src/ctrtri.c b/relapack/src/ctrtri.c index 5201a24c7..8d736007b 100644 --- a/relapack/src/ctrtri.c +++ b/relapack/src/ctrtri.c @@ -36,6 +36,8 @@ void RELAPACK_ctrtri( return; } + if (*n == 0) return; + // Clean char * arguments const char cleanuplo = lower ? 'L' : 'U'; const char cleandiag = nounit ? 'N' : 'U'; diff --git a/relapack/src/dgbtrf.c b/relapack/src/dgbtrf.c index cdf06ad5b..aac10f251 100644 --- a/relapack/src/dgbtrf.c +++ b/relapack/src/dgbtrf.c @@ -36,6 +36,8 @@ void RELAPACK_dgbtrf( return; } + if (*m == 0 || *n == 0) return; + // Constant const double ZERO[] = { 0. }; @@ -83,7 +85,7 @@ static void RELAPACK_dgbtrf_rec( blasint *info ) { - if (*n <= MAX(CROSSOVER_DGBTRF, 1)) { + if (*n <= MAX(CROSSOVER_DGBTRF, 1) || *n > *kl || *ldAb == 1) { // Unblocked LAPACK(dgbtf2)(m, n, kl, ku, Ab, ldAb, ipiv, info); return; @@ -195,6 +197,7 @@ static void RELAPACK_dgbtrf_rec( // Worku = A_TRr LAPACK(dlacpy)("L", &m1, &n22, A_TRr, ldA, Worku, ldWorku); // Worku = A_TL \ Worku + if (ldWorku <= 0) return; BLAS(dtrsm)("L", "L", "N", "U", &m1, &n22, ONE, A_TL, ldA, Worku, ldWorku); // A_TRr = Worku LAPACK(dlacpy)("L", &m1, &n22, Worku, ldWorku, A_TRr, ldA); diff --git a/relapack/src/dgetrf.c b/relapack/src/dgetrf.c index be960fde9..3ebfb18d2 100644 --- a/relapack/src/dgetrf.c +++ b/relapack/src/dgetrf.c @@ -29,15 +29,16 @@ void RELAPACK_dgetrf( return; } - const blasint sn = MIN(*m, *n); + if (*m == 0 || *n == 0) return; + const blasint sn = MIN(*m, *n); RELAPACK_dgetrf_rec(m, &sn, A, ldA, ipiv, info); // Right remainder if (*m < *n) { // Constants const double ONE[] = { 1. }; - const blasint iONE[] = { 1. }; + const blasint iONE[] = { 1 }; // Splitting const blasint rn = *n - *m; @@ -60,13 +61,11 @@ static void RELAPACK_dgetrf_rec( double *A, const blasint *ldA, blasint *ipiv, blasint *info ) { - - if (*n <= MAX(CROSSOVER_DGETRF, 1)) { + if ( *n <= MAX(CROSSOVER_DGETRF, 1)) { // Unblocked - LAPACK(dgetf2)(m, n, A, ldA, ipiv, info); + LAPACK(dgetrf2)(m, n, A, ldA, ipiv, info); return; } - // Constants const double ONE[] = { 1. }; const double MONE[] = { -1. }; @@ -95,6 +94,7 @@ static void RELAPACK_dgetrf_rec( // recursion(A_L, ipiv_T) RELAPACK_dgetrf_rec(m, &n1, A_L, ldA, ipiv_T, info); + if (*info) return; // apply pivots to A_R LAPACK(dlaswp)(&n2, A_R, ldA, iONE, &n1, ipiv_T, iONE); diff --git a/relapack/src/dpbtrf.c b/relapack/src/dpbtrf.c index 9380b28ad..94e9b80e2 100644 --- a/relapack/src/dpbtrf.c +++ b/relapack/src/dpbtrf.c @@ -35,6 +35,8 @@ void RELAPACK_dpbtrf( return; } + if (*n == 0) return; + // Clean char * arguments const char cleanuplo = lower ? 'L' : 'U'; @@ -43,8 +45,8 @@ void RELAPACK_dpbtrf( // Allocate work space const blasint n1 = DREC_SPLIT(*n); - const blasint mWork = (*kd > n1) ? (lower ? *n - *kd : n1) : *kd; - const blasint nWork = (*kd > n1) ? (lower ? n1 : *n - *kd) : *kd; + const blasint mWork = abs((*kd > n1) ? (lower ? *n - *kd : n1) : *kd); + const blasint nWork = abs((*kd > n1) ? (lower ? n1 : *n - *kd) : *kd); double *Work = malloc(mWork * nWork * sizeof(double)); LAPACK(dlaset)(uplo, &mWork, &nWork, ZERO, ZERO, Work, &mWork); @@ -64,7 +66,7 @@ static void RELAPACK_dpbtrf_rec( blasint *info ){ - if (*n <= MAX(CROSSOVER_DPBTRF, 1)) { + if (*n <= MAX(CROSSOVER_DPBTRF, 1) || *ldAb == 1) { // Unblocked LAPACK(dpbtf2)(uplo, n, kd, Ab, ldAb, info); return; @@ -148,7 +150,7 @@ static void RELAPACK_dpbtrf_rec( } // recursion(A_BR) - if (*kd > n1) + if (*kd > n1 && ldA != 0) RELAPACK_dpotrf(uplo, &n2, A_BR, ldA, info); else RELAPACK_dpbtrf_rec(uplo, &n2, kd, Ab_BR, ldAb, Work, ldWork, info); diff --git a/relapack/src/dsytrf.c b/relapack/src/dsytrf.c index 43d28f94e..ba869ad11 100644 --- a/relapack/src/dsytrf.c +++ b/relapack/src/dsytrf.c @@ -36,7 +36,7 @@ void RELAPACK_dsytrf( *info = -2; else if (*ldA < MAX(1, *n)) *info = -4; - else if (*lWork < minlWork && *lWork != -1) + else if ((*lWork < 1 || *lWork < minlWork) && *lWork != -1) *info = -7; else if (*lWork == -1) { // Work size query @@ -67,6 +67,7 @@ void RELAPACK_dsytrf( blasint nout; // Recursive kernel +if (*n != 0) RELAPACK_dsytrf_rec(&cleanuplo, n, n, &nout, A, ldA, ipiv, cleanWork, n, info); #if XSYTRF_ALLOW_MALLOC diff --git a/relapack/src/dsytrf_rook.c b/relapack/src/dsytrf_rook.c index 78fa652ab..fcdc2809f 100644 --- a/relapack/src/dsytrf_rook.c +++ b/relapack/src/dsytrf_rook.c @@ -36,7 +36,7 @@ void RELAPACK_dsytrf_rook( *info = -2; else if (*ldA < MAX(1, *n)) *info = -4; - else if (*lWork < minlWork && *lWork != -1) + else if ((*lWork <1 || *lWork < minlWork) && *lWork != -1) *info = -7; else if (*lWork == -1) { // Work size query @@ -56,7 +56,7 @@ void RELAPACK_dsytrf_rook( if (*info) { const blasint minfo = -*info; - LAPACK(xerbla)("DSYTRF", &minfo, strlen("DSYTRF")); + LAPACK(xerbla)("DSYTRF_ROOK", &minfo, strlen("DSYTRF_ROOK")); return; } diff --git a/relapack/src/dtrsyl.c b/relapack/src/dtrsyl.c index 766377300..4948c4977 100644 --- a/relapack/src/dtrsyl.c +++ b/relapack/src/dtrsyl.c @@ -49,6 +49,11 @@ void RELAPACK_dtrsyl( return; } + if (*m == 0 || *n == 0) { + *scale = 1.; + return; + } + // Clean char * arguments const char cleantranA = notransA ? 'N' : (transA ? 'T' : 'C'); const char cleantranB = notransB ? 'N' : (transB ? 'T' : 'C'); diff --git a/relapack/src/lapack.h b/relapack/src/lapack.h index 776b0589f..9e9cdff7e 100644 --- a/relapack/src/lapack.h +++ b/relapack/src/lapack.h @@ -4,6 +4,13 @@ extern blasint LAPACK(lsame)(const char *, const char *); extern blasint LAPACK(xerbla)(const char *, const blasint *, int); +extern const blasint LAPACK(ilaenv)(const blasint *, const char*, const char*, const blasint* , int , int, int ); + +extern void LAPACK(sgetrf2)(const blasint *, const blasint *, float *, const blasint *, blasint *, blasint *); +extern void LAPACK(dgetrf2)(const blasint *, const blasint *, double *, const blasint *, blasint *, blasint *); +extern void LAPACK(cgetrf2)(const blasint *, const blasint *, float *, const blasint *, blasint *, blasint *); +extern void LAPACK(zgetrf2)(const blasint *, const blasint *, double *, const blasint *, blasint *, blasint *); + extern void LAPACK(slaswp)(const blasint *, float *, const blasint *, const blasint *, const blasint *, const blasint *, const blasint *); extern void LAPACK(dlaswp)(const blasint *, double *, const blasint *, const blasint *, const blasint *, const blasint *, const blasint *); extern void LAPACK(claswp)(const blasint *, float *, const blasint *, const blasint *, const blasint *, const blasint *, const blasint *); diff --git a/relapack/src/sgbtrf.c b/relapack/src/sgbtrf.c index 3e3fdf455..76e84e671 100644 --- a/relapack/src/sgbtrf.c +++ b/relapack/src/sgbtrf.c @@ -35,6 +35,13 @@ void RELAPACK_sgbtrf( return; } + if (*m == 0 || *n == 0) return; + + if (*ldAb == 1) { + LAPACK(sgbtf2)(m, n, kl, ku, Ab, ldAb, ipiv, info); + return; + } + // Constant const float ZERO[] = { 0. }; @@ -82,8 +89,9 @@ static void RELAPACK_sgbtrf_rec( blasint *info ) { + if (*m == 0 || *n == 0) return; - if (*n <= MAX(CROSSOVER_SGBTRF, 1)) { + if ( *n <= MAX(CROSSOVER_SGBTRF, 1) || *n > *kl || *ldAb == 1) { // Unblocked LAPACK(sgbtf2)(m, n, kl, ku, Ab, ldAb, ipiv, info); return; @@ -160,7 +168,7 @@ static void RELAPACK_sgbtrf_rec( // recursion(Ab_L, ipiv_T) RELAPACK_sgbtrf_rec(m, &n1, kl, ku, Ab_L, ldAb, ipiv_T, Workl, ldWorkl, Worku, ldWorku, info); - + if (*info) return; // Workl = A_BLb LAPACK(slacpy)("U", &m22, &n1, A_BLb, ldA, Workl, ldWorkl); @@ -222,8 +230,8 @@ static void RELAPACK_sgbtrf_rec( // recursion(Ab_BR, ipiv_B) //cause of infinite recursion here ? -// RELAPACK_sgbtrf_rec(&m2, &n2, kl, ku, Ab_BR, ldAb, ipiv_B, Workl, ldWorkl, Worku, ldWorku, info); - LAPACK(sgbtf2)(&m2, &n2, kl, ku, Ab_BR, ldAb, ipiv_B, info); + RELAPACK_sgbtrf_rec(&m2, &n2, kl, ku, Ab_BR, ldAb, ipiv_B, Workl, ldWorkl, Worku, ldWorku, info); +// LAPACK(sgbtf2)(&m2, &n2, kl, ku, Ab_BR, ldAb, ipiv_B, info); if (*info) *info += n1; // shift pivots diff --git a/relapack/src/sgetrf.c b/relapack/src/sgetrf.c index 0231cc166..a0c7015fd 100644 --- a/relapack/src/sgetrf.c +++ b/relapack/src/sgetrf.c @@ -14,7 +14,6 @@ void RELAPACK_sgetrf( float *A, const blasint *ldA, blasint *ipiv, blasint *info ) { - // Check arguments *info = 0; if (*m < 0) @@ -28,6 +27,9 @@ void RELAPACK_sgetrf( LAPACK(xerbla)("SGETRF", &minfo, strlen("SGETRF")); return; } + + if (*m == 0 || *n == 0) return; + const blasint sn = MIN(*m, *n); RELAPACK_sgetrf_rec(m, &sn, A, ldA, ipiv, info); @@ -35,7 +37,7 @@ void RELAPACK_sgetrf( if (*m < *n) { // Constants const float ONE[] = { 1. }; - const blasint iONE[] = { 1. }; + const blasint iONE[] = { 1 }; // Splitting const blasint rn = *n - *m; @@ -58,9 +60,12 @@ static void RELAPACK_sgetrf_rec( float *A, const blasint *ldA, blasint *ipiv, blasint *info ) { - if (*n <= MAX(CROSSOVER_SGETRF, 1)) { + + if (*m == 0 || *n == 0) return; + + if ( *n <= MAX(CROSSOVER_SGETRF, 1)) { // Unblocked - LAPACK(sgetf2)(m, n, A, ldA, ipiv, info); + LAPACK(sgetrf2)(m, n, A, ldA, ipiv, info); return; } @@ -91,6 +96,8 @@ static void RELAPACK_sgetrf_rec( // recursion(A_L, ipiv_T) RELAPACK_sgetrf_rec(m, &n1, A_L, ldA, ipiv_T, info); + if (*info) + return; // apply pivots to A_R LAPACK(slaswp)(&n2, A_R, ldA, iONE, &n1, ipiv_T, iONE); diff --git a/relapack/src/spbtrf.c b/relapack/src/spbtrf.c index 26804dcc2..330276312 100644 --- a/relapack/src/spbtrf.c +++ b/relapack/src/spbtrf.c @@ -35,6 +35,9 @@ void RELAPACK_spbtrf( return; } + + if (*n == 0) return; + // Clean char * arguments const char cleanuplo = lower ? 'L' : 'U'; @@ -43,8 +46,8 @@ void RELAPACK_spbtrf( // Allocate work space const blasint n1 = SREC_SPLIT(*n); - const blasint mWork = (*kd > n1) ? (lower ? *n - *kd : n1) : *kd; - const blasint nWork = (*kd > n1) ? (lower ? n1 : *n - *kd) : *kd; + const blasint mWork = abs( (*kd > n1) ? (lower ? *n - *kd : n1) : *kd); + const blasint nWork = abs((*kd > n1) ? (lower ? n1 : *n - *kd) : *kd); float *Work = malloc(mWork * nWork * sizeof(float)); LAPACK(slaset)(uplo, &mWork, &nWork, ZERO, ZERO, Work, &mWork); @@ -64,7 +67,9 @@ static void RELAPACK_spbtrf_rec( blasint *info ){ - if (*n <= MAX(CROSSOVER_SPBTRF, 1)) { + if (*n == 0 ) return; + + if ( *n <= MAX(CROSSOVER_SPBTRF, 1) || *ldAb == 1) { // Unblocked LAPACK(spbtf2)(uplo, n, kd, Ab, ldAb, info); return; @@ -148,7 +153,7 @@ static void RELAPACK_spbtrf_rec( } // recursion(A_BR) - if (*kd > n1) + if (*kd > n1 && ldA != 0) RELAPACK_spotrf(uplo, &n2, A_BR, ldA, info); else RELAPACK_spbtrf_rec(uplo, &n2, kd, Ab_BR, ldAb, Work, ldWork, info); diff --git a/relapack/src/ssytrf.c b/relapack/src/ssytrf.c index 9fe7ce4a6..5f8e03391 100644 --- a/relapack/src/ssytrf.c +++ b/relapack/src/ssytrf.c @@ -35,7 +35,7 @@ void RELAPACK_ssytrf( *info = -2; else if (*ldA < MAX(1, *n)) *info = -4; - else if (*lWork < minlWork && *lWork != -1) + else if ((*lWork <1 || *lWork < minlWork) && *lWork != -1) *info = -7; else if (*lWork == -1) { // Work size query @@ -66,6 +66,7 @@ void RELAPACK_ssytrf( blasint nout; // Recursive kernel +if (*n != 0) RELAPACK_ssytrf_rec(&cleanuplo, n, n, &nout, A, ldA, ipiv, cleanWork, n, info); #if XSYTRF_ALLOW_MALLOC diff --git a/relapack/src/ssytrf_rook.c b/relapack/src/ssytrf_rook.c index abcf29d1c..b40f12271 100644 --- a/relapack/src/ssytrf_rook.c +++ b/relapack/src/ssytrf_rook.c @@ -36,7 +36,7 @@ void RELAPACK_ssytrf_rook( *info = -2; else if (*ldA < MAX(1, *n)) *info = -4; - else if (*lWork < minlWork && *lWork != -1) + else if ((*lWork < 1 ||*lWork < minlWork) && *lWork != -1) *info = -7; else if (*lWork == -1) { // Work size query @@ -56,7 +56,7 @@ void RELAPACK_ssytrf_rook( if (*info) { const blasint minfo = -*info; - LAPACK(xerbla)("SSYTRF", &minfo, strlen("SSYTRF")); + LAPACK(xerbla)("SSYTRF_ROOK", &minfo, strlen("SSYTRF_ROOK")); return; } @@ -67,6 +67,7 @@ void RELAPACK_ssytrf_rook( blasint nout; // Recursive kernel +if (*n != 0) RELAPACK_ssytrf_rook_rec(&cleanuplo, n, n, &nout, A, ldA, ipiv, cleanWork, n, info); #if XSYTRF_ALLOW_MALLOC diff --git a/relapack/src/strsyl.c b/relapack/src/strsyl.c index 012fb3548..d85963fcc 100644 --- a/relapack/src/strsyl.c +++ b/relapack/src/strsyl.c @@ -49,6 +49,11 @@ void RELAPACK_strsyl( return; } + if (*m == 0 || *n == 0) { + *scale = 1.; + return; + } + // Clean char * arguments const char cleantranA = notransA ? 'N' : (transA ? 'T' : 'C'); const char cleantranB = notransB ? 'N' : (transB ? 'T' : 'C'); diff --git a/relapack/src/zgbtrf.c b/relapack/src/zgbtrf.c index d4ba41753..5d7dfd3c7 100644 --- a/relapack/src/zgbtrf.c +++ b/relapack/src/zgbtrf.c @@ -36,6 +36,8 @@ void RELAPACK_zgbtrf( return; } + if (*m == 0 || *n == 0) return; + // Constant const double ZERO[] = { 0., 0. }; @@ -82,7 +84,7 @@ static void RELAPACK_zgbtrf_rec( blasint *info ) { - if (*n <= MAX(CROSSOVER_ZGBTRF, 1)) { + if (*n <= MAX(CROSSOVER_ZGBTRF, 1) || *n > *kl || *ldAb == 1) { // Unblocked LAPACK(zgbtf2)(m, n, kl, ku, Ab, ldAb, ipiv, info); return; @@ -92,6 +94,7 @@ static void RELAPACK_zgbtrf_rec( const double ONE[] = { 1., 0. }; const double MONE[] = { -1., 0. }; const blasint iONE[] = { 1 }; + const blasint min11 = -11; // Loop iterators blasint i, j; @@ -158,6 +161,7 @@ static void RELAPACK_zgbtrf_rec( // recursion(Ab_L, ipiv_T) RELAPACK_zgbtrf_rec(m, &n1, kl, ku, Ab_L, ldAb, ipiv_T, Workl, ldWorkl, Worku, ldWorku, info); +if (*info) return; // Workl = A_BLb LAPACK(zlacpy)("U", &m22, &n1, A_BLb, ldA, Workl, ldWorkl); @@ -193,11 +197,21 @@ static void RELAPACK_zgbtrf_rec( } // A_TRl = A_TL \ A_TRl + if (*ldA < MAX(1,m1)) { + LAPACK(xerbla)("ZGBTRF", &min11, strlen("ZGBTRF")); + return; + } else { BLAS(ztrsm)("L", "L", "N", "U", &m1, &n21, ONE, A_TL, ldA, A_TRl, ldA); + } // Worku = A_TRr LAPACK(zlacpy)("L", &m1, &n22, A_TRr, ldA, Worku, ldWorku); // Worku = A_TL \ Worku + if (*ldWorku < MAX(1,m1)) { + LAPACK(xerbla)("ZGBTRF", &min11, strlen("ZGBTRF")); + return; + } else { BLAS(ztrsm)("L", "L", "N", "U", &m1, &n22, ONE, A_TL, ldA, Worku, ldWorku); + } // A_TRr = Worku LAPACK(zlacpy)("L", &m1, &n22, Worku, ldWorku, A_TRr, ldA); // A_BRtl = A_BRtl - A_BLt * A_TRl diff --git a/relapack/src/zgetrf.c b/relapack/src/zgetrf.c index b0d14ffb1..8c3e8a8e8 100644 --- a/relapack/src/zgetrf.c +++ b/relapack/src/zgetrf.c @@ -30,6 +30,7 @@ void RELAPACK_zgetrf( return; } + if (*m == 0 || *n == 0) return; const blasint sn = MIN(*m, *n); RELAPACK_zgetrf_rec(m, &sn, A, ldA, ipiv, info); @@ -62,9 +63,11 @@ static void RELAPACK_zgetrf_rec( blasint *info ) { - if (*n <= MAX(CROSSOVER_ZGETRF, 1)) { + if (*m == 0 || *n == 0) return; + + if ( *n <= MAX(CROSSOVER_ZGETRF, 1)) { // Unblocked - LAPACK(zgetf2)(m, n, A, ldA, ipiv, info); + LAPACK(zgetrf2)(m, n, A, ldA, ipiv, info); return; } @@ -96,6 +99,8 @@ static void RELAPACK_zgetrf_rec( // recursion(A_L, ipiv_T) RELAPACK_zgetrf_rec(m, &n1, A_L, ldA, ipiv_T, info); +if (*info) return; + // apply pivots to A_R LAPACK(zlaswp)(&n2, A_R, ldA, iONE, &n1, ipiv_T, iONE); diff --git a/relapack/src/zhetrf_rook.c b/relapack/src/zhetrf_rook.c index 285aea96e..605e3a77f 100644 --- a/relapack/src/zhetrf_rook.c +++ b/relapack/src/zhetrf_rook.c @@ -36,7 +36,7 @@ void RELAPACK_zhetrf_rook( *info = -2; else if (*ldA < MAX(1, *n)) *info = -4; - else if (*lWork < minlWork && *lWork != -1) + else if ((*lWork < 1 || *lWork < minlWork) && *lWork != -1) *info = -7; else if (*lWork == -1) { // Work size query @@ -56,7 +56,7 @@ void RELAPACK_zhetrf_rook( if (*info) { const blasint minfo = -*info; - LAPACK(xerbla)("ZHETRF", &minfo, strlen("ZHETRF")); + LAPACK(xerbla)("ZHETRF_ROOK", &minfo, strlen("ZHETRF_ROOK")); return; } diff --git a/relapack/src/zpbtrf.c b/relapack/src/zpbtrf.c index fb0e1e97b..8b094380c 100644 --- a/relapack/src/zpbtrf.c +++ b/relapack/src/zpbtrf.c @@ -35,6 +35,8 @@ void RELAPACK_zpbtrf( return; } + if (*n == 0) return; + // Clean char * arguments const char cleanuplo = lower ? 'L' : 'U'; @@ -43,9 +45,10 @@ void RELAPACK_zpbtrf( // Allocate work space const blasint n1 = ZREC_SPLIT(*n); - const blasint mWork = (*kd > n1) ? (lower ? *n - *kd : n1) : *kd; - const blasint nWork = (*kd > n1) ? (lower ? n1 : *n - *kd) : *kd; + const blasint mWork = abs((*kd > n1) ? (lower ? *n - *kd : n1) : *kd); + const blasint nWork = abs((*kd > n1) ? (lower ? n1 : *n - *kd) : *kd); double *Work = malloc(mWork * nWork * 2 * sizeof(double)); + LAPACK(zlaset)(uplo, &mWork, &nWork, ZERO, ZERO, Work, &mWork); // Recursive kernel @@ -64,7 +67,7 @@ static void RELAPACK_zpbtrf_rec( blasint *info ){ - if (*n <= MAX(CROSSOVER_ZPBTRF, 1)) { + if (*n <= MAX(CROSSOVER_ZPBTRF, 1) || *ldAb == 1) { // Unblocked LAPACK(zpbtf2)(uplo, n, kd, Ab, ldAb, info); return; @@ -148,7 +151,7 @@ static void RELAPACK_zpbtrf_rec( } // recursion(A_BR) - if (*kd > n1) + if (*kd > n1 && ldA != 0) RELAPACK_zpotrf(uplo, &n2, A_BR, ldA, info); else RELAPACK_zpbtrf_rec(uplo, &n2, kd, Ab_BR, ldAb, Work, ldWork, info); diff --git a/relapack/src/zsytrf.c b/relapack/src/zsytrf.c index f3412ad8f..59daba02f 100644 --- a/relapack/src/zsytrf.c +++ b/relapack/src/zsytrf.c @@ -36,7 +36,7 @@ void RELAPACK_zsytrf( *info = -2; else if (*ldA < MAX(1, *n)) *info = -4; - else if (*lWork < minlWork && *lWork != -1) + else if ((*lWork < 1 || *lWork < minlWork) && *lWork != -1) *info = -7; else if (*lWork == -1) { // Work size query @@ -67,6 +67,7 @@ void RELAPACK_zsytrf( blasint nout; // Recursive kernel + if (*n != 0) RELAPACK_zsytrf_rec(&cleanuplo, n, n, &nout, A, ldA, ipiv, cleanWork, n, info); #if XSYTRF_ALLOW_MALLOC diff --git a/relapack/src/zsytrf_rook.c b/relapack/src/zsytrf_rook.c index fc6d73645..0fd8e7033 100644 --- a/relapack/src/zsytrf_rook.c +++ b/relapack/src/zsytrf_rook.c @@ -36,7 +36,7 @@ void RELAPACK_zsytrf_rook( *info = -2; else if (*ldA < MAX(1, *n)) *info = -4; - else if (*lWork < minlWork && *lWork != -1) + else if ((*lWork < 1 || *lWork < minlWork) && *lWork != -1) *info = -7; else if (*lWork == -1) { // Work size query @@ -56,7 +56,7 @@ void RELAPACK_zsytrf_rook( if (*info) { const blasint minfo = -*info; - LAPACK(xerbla)("ZSYTRF", &minfo, strlen("ZSYTRF")); + LAPACK(xerbla)("ZSYTRF_ROOK", &minfo, strlen("ZSYTRF_ROOK")); return; } @@ -67,6 +67,7 @@ void RELAPACK_zsytrf_rook( blasint nout; // Recursive kernel + if (*n != 0) RELAPACK_zsytrf_rook_rec(&cleanuplo, n, n, &nout, A, ldA, ipiv, cleanWork, n, info); #if XSYTRF_ALLOW_MALLOC diff --git a/relapack/src/ztrsyl.c b/relapack/src/ztrsyl.c index 567ef115a..9d0107526 100644 --- a/relapack/src/ztrsyl.c +++ b/relapack/src/ztrsyl.c @@ -47,6 +47,11 @@ void RELAPACK_ztrsyl( return; } + if (*m == 0 || *n == 0) { + *scale = 1.; + return; + } + // Clean char * arguments const char cleantranA = notransA ? 'N' : 'C'; const char cleantranB = notransB ? 'N' : 'C'; diff --git a/relapack/src/ztrtri.c b/relapack/src/ztrtri.c index 3f6606d84..54854f525 100644 --- a/relapack/src/ztrtri.c +++ b/relapack/src/ztrtri.c @@ -69,8 +69,8 @@ static void RELAPACK_ztrtri_rec( } // Constants - const double ONE[] = { 1. }; - const double MONE[] = { -1. }; + const double ONE[] = { 1., 0. }; + const double MONE[] = { -1. , 0. }; // Splitting const blasint n1 = ZREC_SPLIT(*n); diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index 25a29030a..ccd1175a3 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -3,14 +3,21 @@ include_directories(${PROJECT_BINARY_DIR}) enable_language(Fortran) -set(OpenBLAS_Tests - sblat1 sblat2 sblat3 - dblat1 dblat2 dblat3 - cblat1 cblat2 cblat3 - zblat1 zblat2 zblat3) +if (BUILD_SINGLE) + list( APPEND OpenBLAS_Tests sblat1 sblat2 sblat3) +endif() +if (BUILD_DOUBLE) + list (APPEND OpenBLAS_Tests dblat1 dblat2 dblat3) +endif() +if (BUILD_COMPLEX) + list (APPEND OpenBLAS_Tests cblat1 cblat2 cblat3) +endif() +if (BUILD_COMPLEX16) + list (APPEND OpenBLAS_Tests zblat1 zblat2 zblat3) +endif() foreach(test_bin ${OpenBLAS_Tests}) -add_executable(${test_bin} ${test_bin}.f) + add_executable(${test_bin} ${test_bin}.f) target_link_libraries(${test_bin} ${OpenBLAS_LIBNAME}) endforeach() @@ -43,7 +50,19 @@ FILE(WRITE ${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh set(helper_prefix sh "${CMAKE_CURRENT_BINARY_DIR}/test_helper.sh") endif() -set(float_types s d c z) +#set(float_types s d c z) +if (BUILD_SINGLE) + list (APPEND float_types s) +endif() +if (BUILD_DOUBLE) + list (APPEND float_types d) +endif() +if (BUILD_COMPLEX) + list (APPEND float_types c) +endif() +if (BUILD_COMPLEX16) + list (APPEND float_types z) +endif() foreach(float_type ${float_types}) string(TOUPPER ${float_type} float_type_upper) add_test(NAME "${float_type}blas1" diff --git a/test/Makefile b/test/Makefile index 074411b05..5f653414a 100644 --- a/test/Makefile +++ b/test/Makefile @@ -1,102 +1,238 @@ TOPDIR = .. include ../Makefile.system + +ifeq ($(NOFORTRAN),1) +all :: +else all :: level1 level2 level3 +endif + +ifeq ($(BUILD_SINGLE),1) +S1=sblat1 +endif +ifeq ($(BUILD_DOUBLE),1) +D1=dblat1 +endif +ifeq ($(BUILD_COMPLEX),1) +C1=cblat1 +endif +ifeq ($(BUILD_COMPLEX16),1) +Z1=zblat1 +endif -level1 : sblat1 dblat1 cblat1 zblat1 -ifndef CROSS +level1: $(S1) $(D1) $(C1) $(Z1) + +ifneq ($(CROSS), 1) +ifeq ($(BUILD_SINGLE),1) OPENBLAS_NUM_THREADS=1 OMP_NUM_THREADS=1 ./sblat1 +endif +ifeq ($(BUILD_DOUBLE),1) OPENBLAS_NUM_THREADS=1 OMP_NUM_THREADS=1 ./dblat1 +endif +ifeq ($(BUILD_COMPLEX),1) OPENBLAS_NUM_THREADS=1 OMP_NUM_THREADS=1 ./cblat1 +endif +ifeq ($(BUILD_COMPLEX16),1) OPENBLAS_NUM_THREADS=1 OMP_NUM_THREADS=1 ./zblat1 +endif ifdef SMP ifeq ($(USE_OPENMP), 1) +ifeq ($(BUILD_SINGLE),1) OMP_NUM_THREADS=2 ./sblat1 +endif +ifeq ($(BUILD_DOUBLE),1) OMP_NUM_THREADS=2 ./dblat1 +endif +ifeq ($(BUILD_COMPLEX),1) OMP_NUM_THREADS=2 ./cblat1 +endif +ifeq ($(BUILD_COMPLEX16),1) OMP_NUM_THREADS=2 ./zblat1 +endif else +ifeq ($(BUILD_SINGLE),1) OPENBLAS_NUM_THREADS=2 ./sblat1 +endif +ifeq ($(BUILD_DOUBLE),1) OPENBLAS_NUM_THREADS=2 ./dblat1 +endif +ifeq ($(BUILD_COMPLEX),1) OPENBLAS_NUM_THREADS=2 ./cblat1 +endif +ifeq ($(BUILD_COMPLEX16),1) OPENBLAS_NUM_THREADS=2 ./zblat1 endif endif endif +endif + +ifeq ($(BUILD_SINGLE),1) +S2=sblat2 +endif +ifeq ($(BUILD_DOUBLE),1) +D2=dblat2 +endif +ifeq ($(BUILD_COMPLEX),1) +C2=cblat2 +endif +ifeq ($(BUILD_COMPLEX16),1) +Z2=zblat2 +endif -level2 : sblat2 dblat2 cblat2 zblat2 -ifndef CROSS +level2: $(S2) $(D2) $(C2) $(Z2) + + +ifneq ($(CROSS), 1) rm -f ?BLAT2.SUMM +ifeq ($(BUILD_SINGLE),1) OPENBLAS_NUM_THREADS=1 OMP_NUM_THREADS=1 ./sblat2 < ./sblat2.dat @$(GREP) -q FATAL SBLAT2.SUMM && cat SBLAT2.SUMM || exit 0 +endif +ifeq ($(BUILD_DOUBLE),1) OPENBLAS_NUM_THREADS=1 OMP_NUM_THREADS=1 ./dblat2 < ./dblat2.dat @$(GREP) -q FATAL DBLAT2.SUMM && cat DBLAT2.SUMM || exit 0 +endif +ifeq ($(BUILD_COMPLEX),1) OPENBLAS_NUM_THREADS=1 OMP_NUM_THREADS=1 ./cblat2 < ./cblat2.dat @$(GREP) -q FATAL CBLAT2.SUMM && cat CBLAT2.SUMM || exit 0 +endif +ifeq ($(BUILD_COMPLEX16),1) OPENBLAS_NUM_THREADS=1 OMP_NUM_THREADS=1 ./zblat2 < ./zblat2.dat @$(GREP) -q FATAL ZBLAT2.SUMM && cat ZBLAT2.SUMM || exit 0 +endif ifdef SMP rm -f ?BLAT2.SUMM ifeq ($(USE_OPENMP), 1) +ifeq ($(BUILD_SINGLE),1) OMP_NUM_THREADS=2 ./sblat2 < ./sblat2.dat @$(GREP) -q FATAL SBLAT2.SUMM && cat SBLAT2.SUMM || exit 0 +endif +ifeq ($(BUILD_DOUBLE),1) OMP_NUM_THREADS=2 ./dblat2 < ./dblat2.dat @$(GREP) -q FATAL DBLAT2.SUMM && cat DBLAT2.SUMM || exit 0 +endif +ifeq ($(BUILD_COMPLEX),1) OMP_NUM_THREADS=2 ./cblat2 < ./cblat2.dat @$(GREP) -q FATAL CBLAT2.SUMM && cat CBLAT2.SUMM || exit 0 +endif +ifeq ($(BUILD_COMPLEX16),1) OMP_NUM_THREADS=2 ./zblat2 < ./zblat2.dat @$(GREP) -q FATAL ZBLAT2.SUMM && cat ZBLAT2.SUMM || exit 0 +endif else +ifeq ($(BUILD_SINGLE),1) OPENBLAS_NUM_THREADS=2 ./sblat2 < ./sblat2.dat @$(GREP) -q FATAL SBLAT2.SUMM && cat SBLAT2.SUMM || exit 0 +endif +ifeq ($(BUILD_DOUBLE),1) OPENBLAS_NUM_THREADS=2 ./dblat2 < ./dblat2.dat @$(GREP) -q FATAL DBLAT2.SUMM && cat DBLAT2.SUMM || exit 0 +endif +ifeq ($(BUILD_COMPLEX),1) OPENBLAS_NUM_THREADS=2 ./cblat2 < ./cblat2.dat @$(GREP) -q FATAL CBLAT2.SUMM && cat CBLAT2.SUMM || exit 0 +endif +ifeq ($(BUILD_COMPLEX16),1) OPENBLAS_NUM_THREADS=2 ./zblat2 < ./zblat2.dat @$(GREP) -q FATAL ZBLAT2.SUMM && cat ZBLAT2.SUMM || exit 0 endif endif endif +endif + +ifeq ($(BUILD_BFLOAT16),1) +B3= test_sbgemm +endif +ifeq ($(BUILD_SINGLE),1) +S3=sblat3 +endif +ifeq ($(BUILD_DOUBLE),1) +D3=dblat3 +endif +ifeq ($(BUILD_COMPLEX),1) +C3=cblat3 +endif +ifeq ($(BUILD_COMPLEX16),1) +Z3=zblat3 +endif + +level3: $(B3) $(S3) $(D3) $(C3) $(Z3) -level3 : sblat3 dblat3 cblat3 zblat3 -ifndef CROSS + +ifneq ($(CROSS), 1) rm -f ?BLAT3.SUMM +ifeq ($(BUILD_BFLOAT16),1) + OPENBLAS_NUM_THREADS=1 OMP_NUM_THREADS=1 ./test_sbgemm > SBBLAT3.SUMM + @$(GREP) -q FATAL SBBLAT3.SUMM && cat SBBLAT3.SUMM || exit 0 +endif +ifeq ($(BUILD_SINGLE),1) OPENBLAS_NUM_THREADS=1 OMP_NUM_THREADS=1 ./sblat3 < ./sblat3.dat @$(GREP) -q FATAL SBLAT3.SUMM && cat SBLAT3.SUMM || exit 0 +endif +ifeq ($(BUILD_DOUBLE),1) OPENBLAS_NUM_THREADS=1 OMP_NUM_THREADS=1 ./dblat3 < ./dblat3.dat @$(GREP) -q FATAL DBLAT3.SUMM && cat DBLAT3.SUMM || exit 0 +endif +ifeq ($(BUILD_COMPLEX),1) OPENBLAS_NUM_THREADS=1 OMP_NUM_THREADS=1 ./cblat3 < ./cblat3.dat @$(GREP) -q FATAL CBLAT3.SUMM && cat CBLAT3.SUMM || exit 0 +endif +ifeq ($(BUILD_COMPLEX16),1) OPENBLAS_NUM_THREADS=1 OMP_NUM_THREADS=1 ./zblat3 < ./zblat3.dat @$(GREP) -q FATAL ZBLAT3.SUMM && cat ZBLAT3.SUMM || exit 0 +endif ifdef SMP rm -f ?BLAT3.SUMM ifeq ($(USE_OPENMP), 1) +ifeq ($(BUILD_BFLOAT16),1) + OMP_NUM_THREADS=2 ./test_sbgemm > SBBLAT3.SUMM + @$(GREP) -q FATAL SBBLAT3.SUMM && cat SBBLAT3.SUMM || exit 0 +endif +ifeq ($(BUILD_SINGLE),1) OMP_NUM_THREADS=2 ./sblat3 < ./sblat3.dat @$(GREP) -q FATAL SBLAT3.SUMM && cat SBLAT3.SUMM || exit 0 +endif +ifeq ($(BUILD_DOUBLE),1) OMP_NUM_THREADS=2 ./dblat3 < ./dblat3.dat @$(GREP) -q FATAL DBLAT3.SUMM && cat DBLAT3.SUMM || exit 0 +endif +ifeq ($(BUILD_COMPLEX),1) OMP_NUM_THREADS=2 ./cblat3 < ./cblat3.dat @$(GREP) -q FATAL CBLAT3.SUMM && cat CBLAT3.SUMM || exit 0 +endif +ifeq ($(BUILD_COMPLEX16),1) OMP_NUM_THREADS=2 ./zblat3 < ./zblat3.dat @$(GREP) -q FATAL ZBLAT3.SUMM && cat ZBLAT3.SUMM || exit 0 +endif else +ifeq ($(BUILD_BFLOAT16),1) + OPENBLAS_NUM_THREADS=2 ./test_sbgemm > SBBLAT3.SUMM + @$(GREP) -q FATAL SBBLAT3.SUMM && cat SBBLAT3.SUMM || exit 0 +endif +ifeq ($(BUILD_SINGLE),1) OPENBLAS_NUM_THREADS=2 ./sblat3 < ./sblat3.dat @$(GREP) -q FATAL SBLAT3.SUMM && cat SBLAT3.SUMM || exit 0 +endif +ifeq ($(BUILD_DOUBLE),1) OPENBLAS_NUM_THREADS=2 ./dblat3 < ./dblat3.dat @$(GREP) -q FATAL DBLAT3.SUMM && cat DBLAT3.SUMM || exit 0 +endif +ifeq ($(BUILD_COMPLEX),1) OPENBLAS_NUM_THREADS=2 ./cblat3 < ./cblat3.dat @$(GREP) -q FATAL CBLAT3.SUMM && cat CBLAT3.SUMM || exit 0 +endif +ifeq ($(BUILD_COMPLEX16),1) OPENBLAS_NUM_THREADS=2 ./zblat3 < ./zblat3.dat @$(GREP) -q FATAL ZBLAT3.SUMM && cat ZBLAT3.SUMM || exit 0 endif endif endif +endif level3_3m : zblat3_3m cblat3_3m -ifndef CROSS +ifneq ($(CROSS), 1) rm -f ?BLAT3_3M.SUMM OPENBLAS_NUM_THREADS=1 OMP_NUM_THREADS=1 ./cblat3_3m < ./cblat3_3m.dat @$(GREP) -q FATAL CBLAT3_3M.SUMM && cat CBLAT3_3M.SUMM || exit 0 @@ -122,6 +258,12 @@ endif FLDFLAGS = $(FFLAGS:-fPIC=) $(LDFLAGS) + +ifeq ($(CORE), C910V) +EXTRALIB = +CEXTRALIB = +endif + ifeq ($(USE_OPENMP), 1) ifeq ($(F_COMPILER), GFORTRAN) ifeq ($(C_COMPILER), CLANG) @@ -130,51 +272,71 @@ endif endif endif +ifeq ($(BUILD_SINGLE),1) sblat1 : sblat1.$(SUFFIX) ../$(LIBNAME) $(FC) $(FLDFLAGS) -o sblat1 sblat1.$(SUFFIX) ../$(LIBNAME) $(EXTRALIB) $(CEXTRALIB) +sblat2 : sblat2.$(SUFFIX) ../$(LIBNAME) + $(FC) $(FLDFLAGS) -o sblat2 sblat2.$(SUFFIX) ../$(LIBNAME) $(EXTRALIB) $(CEXTRALIB) + +sblat3 : sblat3.$(SUFFIX) ../$(LIBNAME) + $(FC) $(FLDFLAGS) -o sblat3 sblat3.$(SUFFIX) ../$(LIBNAME) $(EXTRALIB) $(CEXTRALIB) +endif + +ifeq ($(BUILD_DOUBLE),1) dblat1 : dblat1.$(SUFFIX) ../$(LIBNAME) $(FC) $(FLDFLAGS) -o dblat1 dblat1.$(SUFFIX) ../$(LIBNAME) $(EXTRALIB) $(CEXTRALIB) +dblat2 : dblat2.$(SUFFIX) ../$(LIBNAME) + $(FC) $(FLDFLAGS) -o dblat2 dblat2.$(SUFFIX) ../$(LIBNAME) $(EXTRALIB) $(CEXTRALIB) + +dblat3 : dblat3.$(SUFFIX) ../$(LIBNAME) + $(FC) $(FLDFLAGS) -o dblat3 dblat3.$(SUFFIX) ../$(LIBNAME) $(EXTRALIB) $(CEXTRALIB) +else +dblat2: +dblat3: +endif + + qblat1 : qblat1.$(SUFFIX) ../$(LIBNAME) $(FC) $(FLDFLAGS) -o qblat1 qblat1.$(SUFFIX) ../$(LIBNAME) $(EXTRALIB) $(CEXTRALIB) +ifeq ($(BUILD_COMPLEX),1) cblat1 : cblat1.$(SUFFIX) ../$(LIBNAME) $(FC) $(FLDFLAGS) -o cblat1 cblat1.$(SUFFIX) ../$(LIBNAME) $(EXTRALIB) $(CEXTRALIB) -zblat1 : zblat1.$(SUFFIX) ../$(LIBNAME) - $(FC) $(FLDFLAGS) -o zblat1 zblat1.$(SUFFIX) ../$(LIBNAME) $(EXTRALIB) $(CEXTRALIB) - -sblat2 : sblat2.$(SUFFIX) ../$(LIBNAME) - $(FC) $(FLDFLAGS) -o sblat2 sblat2.$(SUFFIX) ../$(LIBNAME) $(EXTRALIB) $(CEXTRALIB) - -dblat2 : dblat2.$(SUFFIX) ../$(LIBNAME) - $(FC) $(FLDFLAGS) -o dblat2 dblat2.$(SUFFIX) ../$(LIBNAME) $(EXTRALIB) $(CEXTRALIB) - cblat2 : cblat2.$(SUFFIX) ../$(LIBNAME) $(FC) $(FLDFLAGS) -o cblat2 cblat2.$(SUFFIX) ../$(LIBNAME) $(EXTRALIB) $(CEXTRALIB) -zblat2 : zblat2.$(SUFFIX) ../$(LIBNAME) - $(FC) $(FLDFLAGS) -o zblat2 zblat2.$(SUFFIX) ../$(LIBNAME) $(EXTRALIB) $(CEXTRALIB) - -sblat3 : sblat3.$(SUFFIX) ../$(LIBNAME) - $(FC) $(FLDFLAGS) -o sblat3 sblat3.$(SUFFIX) ../$(LIBNAME) $(EXTRALIB) $(CEXTRALIB) - -dblat3 : dblat3.$(SUFFIX) ../$(LIBNAME) - $(FC) $(FLDFLAGS) -o dblat3 dblat3.$(SUFFIX) ../$(LIBNAME) $(EXTRALIB) $(CEXTRALIB) - cblat3 : cblat3.$(SUFFIX) ../$(LIBNAME) $(FC) $(FLDFLAGS) -o cblat3 cblat3.$(SUFFIX) ../$(LIBNAME) $(EXTRALIB) $(CEXTRALIB) +endif + +ifeq ($(BUILD_COMPLEX16),1) +zblat1 : zblat1.$(SUFFIX) ../$(LIBNAME) + $(FC) $(FLDFLAGS) -o zblat1 zblat1.$(SUFFIX) ../$(LIBNAME) $(EXTRALIB) $(CEXTRALIB) + +zblat2 : zblat2.$(SUFFIX) ../$(LIBNAME) + $(FC) $(FLDFLAGS) -o zblat2 zblat2.$(SUFFIX) ../$(LIBNAME) $(EXTRALIB) $(CEXTRALIB) zblat3 : zblat3.$(SUFFIX) ../$(LIBNAME) $(FC) $(FLDFLAGS) -o zblat3 zblat3.$(SUFFIX) ../$(LIBNAME) $(EXTRALIB) $(CEXTRALIB) +endif + +ifeq ($(BUILD_BFLOAT16),1) +test_sbgemm : compare_sgemm_sbgemm.c ../$(LIBNAME) + $(FC) $(FLDFLAGS) -o test_sbgemm compare_sgemm_sbgemm.c ../$(LIBNAME) $(EXTRALIB) $(CEXTRALIB) +endif +ifeq ($(BUILD_COMPLEX),1) cblat3_3m : cblat3_3m.$(SUFFIX) ../$(LIBNAME) $(FC) $(FLDFLAGS) -o cblat3_3m cblat3_3m.$(SUFFIX) ../$(LIBNAME) $(EXTRALIB) $(CEXTRALIB) +endif +ifeq ($(BUILD_COMPLEX16),1) zblat3_3m : zblat3_3m.$(SUFFIX) ../$(LIBNAME) $(FC) $(FLDFLAGS) -o zblat3_3m zblat3_3m.$(SUFFIX) ../$(LIBNAME) $(EXTRALIB) $(CEXTRALIB) - +endif @@ -182,7 +344,7 @@ clean: @rm -f *.$(SUFFIX) *.$(PSUFFIX) gmon.$(SUFFIX)ut *.SUMM *.cxml *.exe *.pdb *.dwf \ sblat1 dblat1 cblat1 zblat1 \ sblat2 dblat2 cblat2 zblat2 \ - sblat3 dblat3 cblat3 zblat3 \ + test_sbgemm sblat3 dblat3 cblat3 zblat3 \ sblat1p dblat1p cblat1p zblat1p \ sblat2p dblat2p cblat2p zblat2p \ sblat3p dblat3p cblat3p zblat3p \ diff --git a/test/cblat1.f b/test/cblat1.f index d6b53d105..ecf2a44cb 100644 --- a/test/cblat1.f +++ b/test/cblat1.f @@ -1,7 +1,49 @@ +*> \brief \b CBLAT1 +* +* =========== DOCUMENTATION =========== +* +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ +* +* Definition: +* =========== +* +* PROGRAM CBLAT1 +* +* +*> \par Purpose: +* ============= +*> +*> \verbatim +*> +*> Test program for the COMPLEX Level 1 BLAS. +*> Based upon the original BLAS test routine together with: +*> +*> F06GAF Example Program Text +*> \endverbatim +* +* Authors: +* ======== +* +*> \author Univ. of Tennessee +*> \author Univ. of California Berkeley +*> \author Univ. of Colorado Denver +*> \author NAG Ltd. +* +*> \date April 2012 +* +*> \ingroup complex_blas_testing +* +* ===================================================================== PROGRAM CBLAT1 -* Test program for the COMPLEX Level 1 BLAS. -* Based upon the original BLAS test routine together with: -* F06GAF Example Program Text +* +* -- Reference BLAS test routine (version 3.7.0) -- +* -- Reference BLAS is a software package provided by Univ. of Tennessee, -- +* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- +* April 2012 +* +* ===================================================================== +* * .. Parameters .. INTEGER NOUT PARAMETER (NOUT=6) @@ -114,8 +156,8 @@ + (5.0E0,6.0E0), (5.0E0,6.0E0), (0.1E0,0.1E0), + (-0.6E0,0.1E0), (0.1E0,-0.3E0), (7.0E0,8.0E0), + (7.0E0,8.0E0), (7.0E0,8.0E0), (7.0E0,8.0E0), - + (7.0E0,8.0E0), (0.3E0,0.1E0), (0.1E0,0.4E0), - + (0.4E0,0.1E0), (0.1E0,0.2E0), (2.0E0,3.0E0), + + (7.0E0,8.0E0), (0.3E0,0.1E0), (0.5E0,0.0E0), + + (0.0E0,0.5E0), (0.0E0,0.2E0), (2.0E0,3.0E0), + (2.0E0,3.0E0), (2.0E0,3.0E0), (2.0E0,3.0E0)/ DATA ((CV(I,J,2),I=1,8),J=1,5)/(0.1E0,0.1E0), + (4.0E0,5.0E0), (4.0E0,5.0E0), (4.0E0,5.0E0), @@ -129,10 +171,10 @@ + (3.0E0,6.0E0), (-0.6E0,0.1E0), (4.0E0,7.0E0), + (0.1E0,-0.3E0), (7.0E0,2.0E0), (7.0E0,2.0E0), + (7.0E0,2.0E0), (0.3E0,0.1E0), (5.0E0,8.0E0), - + (0.1E0,0.4E0), (6.0E0,9.0E0), (0.4E0,0.1E0), - + (8.0E0,3.0E0), (0.1E0,0.2E0), (9.0E0,4.0E0)/ - DATA STRUE2/0.0E0, 0.5E0, 0.6E0, 0.7E0, 0.7E0/ - DATA STRUE4/0.0E0, 0.7E0, 1.0E0, 1.3E0, 1.7E0/ + + (0.5E0,0.0E0), (6.0E0,9.0E0), (0.0E0,0.5E0), + + (8.0E0,3.0E0), (0.0E0,0.2E0), (9.0E0,4.0E0)/ + DATA STRUE2/0.0E0, 0.5E0, 0.6E0, 0.7E0, 0.8E0/ + DATA STRUE4/0.0E0, 0.7E0, 1.0E0, 1.3E0, 1.6E0/ DATA ((CTRUE5(I,J,1),I=1,8),J=1,5)/(0.1E0,0.1E0), + (1.0E0,2.0E0), (1.0E0,2.0E0), (1.0E0,2.0E0), + (1.0E0,2.0E0), (1.0E0,2.0E0), (1.0E0,2.0E0), @@ -145,8 +187,8 @@ + (0.11E0,-0.03E0), (-0.17E0,0.46E0), + (-0.17E0,-0.19E0), (7.0E0,8.0E0), (7.0E0,8.0E0), + (7.0E0,8.0E0), (7.0E0,8.0E0), (7.0E0,8.0E0), - + (0.19E0,-0.17E0), (0.32E0,0.09E0), - + (0.23E0,-0.24E0), (0.18E0,0.01E0), + + (0.19E0,-0.17E0), (0.20E0,-0.35E0), + + (0.35E0,0.20E0), (0.14E0,0.08E0), + (2.0E0,3.0E0), (2.0E0,3.0E0), (2.0E0,3.0E0), + (2.0E0,3.0E0)/ DATA ((CTRUE5(I,J,2),I=1,8),J=1,5)/(0.1E0,0.1E0), @@ -162,9 +204,9 @@ + (-0.17E0,0.46E0), (4.0E0,7.0E0), + (-0.17E0,-0.19E0), (7.0E0,2.0E0), (7.0E0,2.0E0), + (7.0E0,2.0E0), (0.19E0,-0.17E0), (5.0E0,8.0E0), - + (0.32E0,0.09E0), (6.0E0,9.0E0), - + (0.23E0,-0.24E0), (8.0E0,3.0E0), - + (0.18E0,0.01E0), (9.0E0,4.0E0)/ + + (0.20E0,-0.35E0), (6.0E0,9.0E0), + + (0.35E0,0.20E0), (8.0E0,3.0E0), + + (0.14E0,0.08E0), (9.0E0,4.0E0)/ DATA ((CTRUE6(I,J,1),I=1,8),J=1,5)/(0.1E0,0.1E0), + (1.0E0,2.0E0), (1.0E0,2.0E0), (1.0E0,2.0E0), + (1.0E0,2.0E0), (1.0E0,2.0E0), (1.0E0,2.0E0), @@ -177,8 +219,8 @@ + (0.03E0,0.03E0), (-0.18E0,0.03E0), + (0.03E0,-0.09E0), (7.0E0,8.0E0), (7.0E0,8.0E0), + (7.0E0,8.0E0), (7.0E0,8.0E0), (7.0E0,8.0E0), - + (0.09E0,0.03E0), (0.03E0,0.12E0), - + (0.12E0,0.03E0), (0.03E0,0.06E0), (2.0E0,3.0E0), + + (0.09E0,0.03E0), (0.15E0,0.00E0), + + (0.00E0,0.15E0), (0.00E0,0.06E0), (2.0E0,3.0E0), + (2.0E0,3.0E0), (2.0E0,3.0E0), (2.0E0,3.0E0)/ DATA ((CTRUE6(I,J,2),I=1,8),J=1,5)/(0.1E0,0.1E0), + (4.0E0,5.0E0), (4.0E0,5.0E0), (4.0E0,5.0E0), @@ -193,8 +235,8 @@ + (-0.18E0,0.03E0), (4.0E0,7.0E0), + (0.03E0,-0.09E0), (7.0E0,2.0E0), (7.0E0,2.0E0), + (7.0E0,2.0E0), (0.09E0,0.03E0), (5.0E0,8.0E0), - + (0.03E0,0.12E0), (6.0E0,9.0E0), (0.12E0,0.03E0), - + (8.0E0,3.0E0), (0.03E0,0.06E0), (9.0E0,4.0E0)/ + + (0.15E0,0.00E0), (6.0E0,9.0E0), (0.00E0,0.15E0), + + (8.0E0,3.0E0), (0.00E0,0.06E0), (9.0E0,4.0E0)/ DATA ITRUE3/0, 1, 2, 2, 2/ * .. Executable Statements .. DO 60 INCX = 1, 2 @@ -529,7 +571,8 @@ * * .. Parameters .. INTEGER NOUT - PARAMETER (NOUT=6) + REAL ZERO + PARAMETER (NOUT=6, ZERO=0.0E0) * .. Scalar Arguments .. REAL SFAC INTEGER LEN @@ -552,7 +595,7 @@ * DO 40 I = 1, LEN SD = SCOMP(I) - STRUE(I) - IF (SDIFF(ABS(SSIZE(I))+ABS(SFAC*SD),ABS(SSIZE(I))).EQ.0.0E0) + IF (ABS(SFAC*SD) .LE. ABS(SSIZE(I))*EPSILON(ZERO)) + GO TO 40 * * HERE SCOMP(I) IS NOT CLOSE TO STRUE(I). diff --git a/test/cblat2.f b/test/cblat2.f index 20f188100..8c7bac48e 100644 --- a/test/cblat2.f +++ b/test/cblat2.f @@ -1,68 +1,114 @@ +*> \brief \b CBLAT2 +* +* =========== DOCUMENTATION =========== +* +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ +* +* Definition: +* =========== +* +* PROGRAM CBLAT2 +* +* +*> \par Purpose: +* ============= +*> +*> \verbatim +*> +*> Test program for the COMPLEX Level 2 Blas. +*> +*> The program must be driven by a short data file. The first 18 records +*> of the file are read using list-directed input, the last 17 records +*> are read using the format ( A6, L2 ). An annotated example of a data +*> file can be obtained by deleting the first 3 characters from the +*> following 35 lines: +*> 'cblat2.out' NAME OF SUMMARY OUTPUT FILE +*> 6 UNIT NUMBER OF SUMMARY FILE +*> 'CBLA2T.SNAP' NAME OF SNAPSHOT OUTPUT FILE +*> -1 UNIT NUMBER OF SNAPSHOT FILE (NOT USED IF .LT. 0) +*> F LOGICAL FLAG, T TO REWIND SNAPSHOT FILE AFTER EACH RECORD. +*> F LOGICAL FLAG, T TO STOP ON FAILURES. +*> T LOGICAL FLAG, T TO TEST ERROR EXITS. +*> 16.0 THRESHOLD VALUE OF TEST RATIO +*> 6 NUMBER OF VALUES OF N +*> 0 1 2 3 5 9 VALUES OF N +*> 4 NUMBER OF VALUES OF K +*> 0 1 2 4 VALUES OF K +*> 4 NUMBER OF VALUES OF INCX AND INCY +*> 1 2 -1 -2 VALUES OF INCX AND INCY +*> 3 NUMBER OF VALUES OF ALPHA +*> (0.0,0.0) (1.0,0.0) (0.7,-0.9) VALUES OF ALPHA +*> 3 NUMBER OF VALUES OF BETA +*> (0.0,0.0) (1.0,0.0) (1.3,-1.1) VALUES OF BETA +*> CGEMV T PUT F FOR NO TEST. SAME COLUMNS. +*> CGBMV T PUT F FOR NO TEST. SAME COLUMNS. +*> CHEMV T PUT F FOR NO TEST. SAME COLUMNS. +*> CHBMV T PUT F FOR NO TEST. SAME COLUMNS. +*> CHPMV T PUT F FOR NO TEST. SAME COLUMNS. +*> CTRMV T PUT F FOR NO TEST. SAME COLUMNS. +*> CTBMV T PUT F FOR NO TEST. SAME COLUMNS. +*> CTPMV T PUT F FOR NO TEST. SAME COLUMNS. +*> CTRSV T PUT F FOR NO TEST. SAME COLUMNS. +*> CTBSV T PUT F FOR NO TEST. SAME COLUMNS. +*> CTPSV T PUT F FOR NO TEST. SAME COLUMNS. +*> CGERC T PUT F FOR NO TEST. SAME COLUMNS. +*> CGERU T PUT F FOR NO TEST. SAME COLUMNS. +*> CHER T PUT F FOR NO TEST. SAME COLUMNS. +*> CHPR T PUT F FOR NO TEST. SAME COLUMNS. +*> CHER2 T PUT F FOR NO TEST. SAME COLUMNS. +*> CHPR2 T PUT F FOR NO TEST. SAME COLUMNS. +*> +*> Further Details +*> =============== +*> +*> See: +*> +*> Dongarra J. J., Du Croz J. J., Hammarling S. and Hanson R. J.. +*> An extended set of Fortran Basic Linear Algebra Subprograms. +*> +*> Technical Memoranda Nos. 41 (revision 3) and 81, Mathematics +*> and Computer Science Division, Argonne National Laboratory, +*> 9700 South Cass Avenue, Argonne, Illinois 60439, US. +*> +*> Or +*> +*> NAG Technical Reports TR3/87 and TR4/87, Numerical Algorithms +*> Group Ltd., NAG Central Office, 256 Banbury Road, Oxford +*> OX2 7DE, UK, and Numerical Algorithms Group Inc., 1101 31st +*> Street, Suite 100, Downers Grove, Illinois 60515-1263, USA. +*> +*> +*> -- Written on 10-August-1987. +*> Richard Hanson, Sandia National Labs. +*> Jeremy Du Croz, NAG Central Office. +*> +*> 10-9-00: Change STATUS='NEW' to 'UNKNOWN' so that the testers +*> can be run multiple times without deleting generated +*> output files (susan) +*> \endverbatim +* +* Authors: +* ======== +* +*> \author Univ. of Tennessee +*> \author Univ. of California Berkeley +*> \author Univ. of Colorado Denver +*> \author NAG Ltd. +* +*> \date April 2012 +* +*> \ingroup complex_blas_testing +* +* ===================================================================== PROGRAM CBLAT2 * -* Test program for the COMPLEX Level 2 Blas. -* -* The program must be driven by a short data file. The first 18 records -* of the file are read using list-directed input, the last 17 records -* are read using the format ( A6, L2 ). An annotated example of a data -* file can be obtained by deleting the first 3 characters from the -* following 35 lines: -* 'CBLAT2.SUMM' NAME OF SUMMARY OUTPUT FILE -* 6 UNIT NUMBER OF SUMMARY FILE -* 'CBLA2T.SNAP' NAME OF SNAPSHOT OUTPUT FILE -* -1 UNIT NUMBER OF SNAPSHOT FILE (NOT USED IF .LT. 0) -* F LOGICAL FLAG, T TO REWIND SNAPSHOT FILE AFTER EACH RECORD. -* F LOGICAL FLAG, T TO STOP ON FAILURES. -* T LOGICAL FLAG, T TO TEST ERROR EXITS. -* 16.0 THRESHOLD VALUE OF TEST RATIO -* 6 NUMBER OF VALUES OF N -* 0 1 2 3 5 9 VALUES OF N -* 4 NUMBER OF VALUES OF K -* 0 1 2 4 VALUES OF K -* 4 NUMBER OF VALUES OF INCX AND INCY -* 1 2 -1 -2 VALUES OF INCX AND INCY -* 3 NUMBER OF VALUES OF ALPHA -* (0.0,0.0) (1.0,0.0) (0.7,-0.9) VALUES OF ALPHA -* 3 NUMBER OF VALUES OF BETA -* (0.0,0.0) (1.0,0.0) (1.3,-1.1) VALUES OF BETA -* CGEMV T PUT F FOR NO TEST. SAME COLUMNS. -* CGBMV T PUT F FOR NO TEST. SAME COLUMNS. -* CHEMV T PUT F FOR NO TEST. SAME COLUMNS. -* CHBMV T PUT F FOR NO TEST. SAME COLUMNS. -* CHPMV T PUT F FOR NO TEST. SAME COLUMNS. -* CTRMV T PUT F FOR NO TEST. SAME COLUMNS. -* CTBMV T PUT F FOR NO TEST. SAME COLUMNS. -* CTPMV T PUT F FOR NO TEST. SAME COLUMNS. -* CTRSV T PUT F FOR NO TEST. SAME COLUMNS. -* CTBSV T PUT F FOR NO TEST. SAME COLUMNS. -* CTPSV T PUT F FOR NO TEST. SAME COLUMNS. -* CGERC T PUT F FOR NO TEST. SAME COLUMNS. -* CGERU T PUT F FOR NO TEST. SAME COLUMNS. -* CHER T PUT F FOR NO TEST. SAME COLUMNS. -* CHPR T PUT F FOR NO TEST. SAME COLUMNS. -* CHER2 T PUT F FOR NO TEST. SAME COLUMNS. -* CHPR2 T PUT F FOR NO TEST. SAME COLUMNS. -* -* See: -* -* Dongarra J. J., Du Croz J. J., Hammarling S. and Hanson R. J.. -* An extended set of Fortran Basic Linear Algebra Subprograms. -* -* Technical Memoranda Nos. 41 (revision 3) and 81, Mathematics -* and Computer Science Division, Argonne National Laboratory, -* 9700 South Cass Avenue, Argonne, Illinois 60439, US. -* -* Or -* -* NAG Technical Reports TR3/87 and TR4/87, Numerical Algorithms -* Group Ltd., NAG Central Office, 256 Banbury Road, Oxford -* OX2 7DE, UK, and Numerical Algorithms Group Inc., 1101 31st -* Street, Suite 100, Downers Grove, Illinois 60515-1263, USA. +* -- Reference BLAS test routine (version 3.7.0) -- +* -- Reference BLAS is a software package provided by Univ. of Tennessee, -- +* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- +* April 2012 * -* -* -- Written on 10-August-1987. -* Richard Hanson, Sandia National Labs. -* Jeremy Du Croz, NAG Central Office. +* ===================================================================== * * .. Parameters .. INTEGER NIN @@ -71,8 +117,8 @@ PARAMETER ( NSUBS = 17 ) COMPLEX ZERO, ONE PARAMETER ( ZERO = ( 0.0, 0.0 ), ONE = ( 1.0, 0.0 ) ) - REAL RZERO, RHALF, RONE - PARAMETER ( RZERO = 0.0, RHALF = 0.5, RONE = 1.0 ) + REAL RZERO + PARAMETER ( RZERO = 0.0 ) INTEGER NMAX, INCMAX PARAMETER ( NMAX = 65, INCMAX = 2 ) INTEGER NINMAX, NIDMAX, NKBMAX, NALMAX, NBEMAX @@ -126,7 +172,7 @@ * READ( NIN, FMT = * )SUMMRY READ( NIN, FMT = * )NOUT - OPEN( NOUT, FILE = SUMMRY, STATUS = 'NEW' ) + OPEN( NOUT, FILE = SUMMRY, STATUS = 'UNKNOWN' ) NOUTC = NOUT * * Read name and unit number for snapshot output file and open file. @@ -135,7 +181,7 @@ READ( NIN, FMT = * )NTRA TRACE = NTRA.GE.0 IF( TRACE )THEN - OPEN( NTRA, FILE = SNAPS, STATUS = 'NEW' ) + OPEN( NTRA, FILE = SNAPS, STATUS = 'UNKNOWN' ) END IF * Read the flag that directs rewinding of the snapshot file. READ( NIN, FMT = * )REWI @@ -240,14 +286,7 @@ * * Compute EPS (the machine precision). * - EPS = RONE - 90 CONTINUE - IF( SDIFF( RONE + EPS, RONE ).EQ.RZERO ) - $ GO TO 100 - EPS = RHALF*EPS - GO TO 90 - 100 CONTINUE - EPS = EPS + EPS + EPS = EPSILON(RZERO) WRITE( NOUT, FMT = 9998 )EPS * * Check the reliability of CMVCH using exact data. @@ -3079,7 +3118,6 @@ 50 CONTINUE END IF * - 60 CONTINUE LCERES = .TRUE. GO TO 80 70 CONTINUE diff --git a/test/cblat3.f b/test/cblat3.f index 5df1ddd64..a65e1364c 100644 --- a/test/cblat3.f +++ b/test/cblat3.f @@ -1,50 +1,96 @@ +*> \brief \b CBLAT3 +* +* =========== DOCUMENTATION =========== +* +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ +* +* Definition: +* =========== +* +* PROGRAM CBLAT3 +* +* +*> \par Purpose: +* ============= +*> +*> \verbatim +*> +*> Test program for the COMPLEX Level 3 Blas. +*> +*> The program must be driven by a short data file. The first 14 records +*> of the file are read using list-directed input, the last 9 records +*> are read using the format ( A6, L2 ). An annotated example of a data +*> file can be obtained by deleting the first 3 characters from the +*> following 23 lines: +*> 'cblat3.out' NAME OF SUMMARY OUTPUT FILE +*> 6 UNIT NUMBER OF SUMMARY FILE +*> 'CBLAT3.SNAP' NAME OF SNAPSHOT OUTPUT FILE +*> -1 UNIT NUMBER OF SNAPSHOT FILE (NOT USED IF .LT. 0) +*> F LOGICAL FLAG, T TO REWIND SNAPSHOT FILE AFTER EACH RECORD. +*> F LOGICAL FLAG, T TO STOP ON FAILURES. +*> T LOGICAL FLAG, T TO TEST ERROR EXITS. +*> 16.0 THRESHOLD VALUE OF TEST RATIO +*> 6 NUMBER OF VALUES OF N +*> 0 1 2 3 5 9 VALUES OF N +*> 3 NUMBER OF VALUES OF ALPHA +*> (0.0,0.0) (1.0,0.0) (0.7,-0.9) VALUES OF ALPHA +*> 3 NUMBER OF VALUES OF BETA +*> (0.0,0.0) (1.0,0.0) (1.3,-1.1) VALUES OF BETA +*> CGEMM T PUT F FOR NO TEST. SAME COLUMNS. +*> CHEMM T PUT F FOR NO TEST. SAME COLUMNS. +*> CSYMM T PUT F FOR NO TEST. SAME COLUMNS. +*> CTRMM T PUT F FOR NO TEST. SAME COLUMNS. +*> CTRSM T PUT F FOR NO TEST. SAME COLUMNS. +*> CHERK T PUT F FOR NO TEST. SAME COLUMNS. +*> CSYRK T PUT F FOR NO TEST. SAME COLUMNS. +*> CHER2K T PUT F FOR NO TEST. SAME COLUMNS. +*> CSYR2K T PUT F FOR NO TEST. SAME COLUMNS. +*> +*> Further Details +*> =============== +*> +*> See: +*> +*> Dongarra J. J., Du Croz J. J., Duff I. S. and Hammarling S. +*> A Set of Level 3 Basic Linear Algebra Subprograms. +*> +*> Technical Memorandum No.88 (Revision 1), Mathematics and +*> Computer Science Division, Argonne National Laboratory, 9700 +*> South Cass Avenue, Argonne, Illinois 60439, US. +*> +*> -- Written on 8-February-1989. +*> Jack Dongarra, Argonne National Laboratory. +*> Iain Duff, AERE Harwell. +*> Jeremy Du Croz, Numerical Algorithms Group Ltd. +*> Sven Hammarling, Numerical Algorithms Group Ltd. +*> +*> 10-9-00: Change STATUS='NEW' to 'UNKNOWN' so that the testers +*> can be run multiple times without deleting generated +*> output files (susan) +*> \endverbatim +* +* Authors: +* ======== +* +*> \author Univ. of Tennessee +*> \author Univ. of California Berkeley +*> \author Univ. of Colorado Denver +*> \author NAG Ltd. +* +*> \date April 2012 +* +*> \ingroup complex_blas_testing +* +* ===================================================================== PROGRAM CBLAT3 * -* Test program for the COMPLEX Level 3 Blas. -* -* The program must be driven by a short data file. The first 14 records -* of the file are read using list-directed input, the last 9 records -* are read using the format ( A6, L2 ). An annotated example of a data -* file can be obtained by deleting the first 3 characters from the -* following 23 lines: -* 'CBLAT3.SUMM' NAME OF SUMMARY OUTPUT FILE -* 6 UNIT NUMBER OF SUMMARY FILE -* 'CBLAT3.SNAP' NAME OF SNAPSHOT OUTPUT FILE -* -1 UNIT NUMBER OF SNAPSHOT FILE (NOT USED IF .LT. 0) -* F LOGICAL FLAG, T TO REWIND SNAPSHOT FILE AFTER EACH RECORD. -* F LOGICAL FLAG, T TO STOP ON FAILURES. -* T LOGICAL FLAG, T TO TEST ERROR EXITS. -* 16.0 THRESHOLD VALUE OF TEST RATIO -* 6 NUMBER OF VALUES OF N -* 0 1 2 3 5 9 VALUES OF N -* 3 NUMBER OF VALUES OF ALPHA -* (0.0,0.0) (1.0,0.0) (0.7,-0.9) VALUES OF ALPHA -* 3 NUMBER OF VALUES OF BETA -* (0.0,0.0) (1.0,0.0) (1.3,-1.1) VALUES OF BETA -* CGEMM T PUT F FOR NO TEST. SAME COLUMNS. -* CHEMM T PUT F FOR NO TEST. SAME COLUMNS. -* CSYMM T PUT F FOR NO TEST. SAME COLUMNS. -* CTRMM T PUT F FOR NO TEST. SAME COLUMNS. -* CTRSM T PUT F FOR NO TEST. SAME COLUMNS. -* CHERK T PUT F FOR NO TEST. SAME COLUMNS. -* CSYRK T PUT F FOR NO TEST. SAME COLUMNS. -* CHER2K T PUT F FOR NO TEST. SAME COLUMNS. -* CSYR2K T PUT F FOR NO TEST. SAME COLUMNS. -* -* See: -* -* Dongarra J. J., Du Croz J. J., Duff I. S. and Hammarling S. -* A Set of Level 3 Basic Linear Algebra Subprograms. -* -* Technical Memorandum No.88 (Revision 1), Mathematics and -* Computer Science Division, Argonne National Laboratory, 9700 -* South Cass Avenue, Argonne, Illinois 60439, US. +* -- Reference BLAS test routine (version 3.7.0) -- +* -- Reference BLAS is a software package provided by Univ. of Tennessee, -- +* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- +* April 2012 * -* -- Written on 8-February-1989. -* Jack Dongarra, Argonne National Laboratory. -* Iain Duff, AERE Harwell. -* Jeremy Du Croz, Numerical Algorithms Group Ltd. -* Sven Hammarling, Numerical Algorithms Group Ltd. +* ===================================================================== * * .. Parameters .. INTEGER NIN @@ -53,8 +99,8 @@ PARAMETER ( NSUBS = 9 ) COMPLEX ZERO, ONE PARAMETER ( ZERO = ( 0.0, 0.0 ), ONE = ( 1.0, 0.0 ) ) - REAL RZERO, RHALF, RONE - PARAMETER ( RZERO = 0.0, RHALF = 0.5, RONE = 1.0 ) + REAL RZERO + PARAMETER ( RZERO = 0.0 ) INTEGER NMAX PARAMETER ( NMAX = 65 ) INTEGER NIDMAX, NALMAX, NBEMAX @@ -103,7 +149,7 @@ * READ( NIN, FMT = * )SUMMRY READ( NIN, FMT = * )NOUT - OPEN( NOUT, FILE = SUMMRY, STATUS = 'NEW' ) + OPEN( NOUT, FILE = SUMMRY ) NOUTC = NOUT * * Read name and unit number for snapshot output file and open file. @@ -112,7 +158,7 @@ READ( NIN, FMT = * )NTRA TRACE = NTRA.GE.0 IF( TRACE )THEN - OPEN( NTRA, FILE = SNAPS, STATUS = 'NEW' ) + OPEN( NTRA, FILE = SNAPS ) END IF * Read the flag that directs rewinding of the snapshot file. READ( NIN, FMT = * )REWI @@ -189,14 +235,7 @@ * * Compute EPS (the machine precision). * - EPS = RONE - 70 CONTINUE - IF( SDIFF( RONE + EPS, RONE ).EQ.RZERO ) - $ GO TO 80 - EPS = RHALF*EPS - GO TO 70 - 80 CONTINUE - EPS = EPS + EPS + EPS = EPSILON(RZERO) WRITE( NOUT, FMT = 9998 )EPS * * Check the reliability of CMMCH using exact data. @@ -1301,8 +1340,6 @@ NC = 0 RESET = .TRUE. ERRMAX = RZERO - RALS = RONE - RBETS = RONE * DO 100 IN = 1, NIDIM N = IDIM( IN ) @@ -1948,7 +1985,7 @@ * * Tests the error exits from the Level 3 Blas. * Requires a special version of the error-handling routine XERBLA. -* ALPHA, RALPHA, BETA, RBETA, A, B and C should not need to be defined. +* A, B and C should not need to be defined. * * Auxiliary routine for test program for Level 3 Blas. * @@ -1958,12 +1995,19 @@ * Jeremy Du Croz, Numerical Algorithms Group Ltd. * Sven Hammarling, Numerical Algorithms Group Ltd. * +* 3-19-92: Initialize ALPHA, BETA, RALPHA, and RBETA (eca) +* 3-19-92: Fix argument 12 in calls to CSYMM and CHEMM +* with INFOT = 9 (eca) +* * .. Scalar Arguments .. INTEGER ISNUM, NOUT CHARACTER*6 SRNAMT * .. Scalars in Common .. INTEGER INFOT, NOUTC LOGICAL LERR, OK +* .. Parameters .. + REAL ONE, TWO + PARAMETER ( ONE = 1.0E0, TWO = 2.0E0 ) * .. Local Scalars .. COMPLEX ALPHA, BETA REAL RALPHA, RBETA @@ -1981,6 +2025,14 @@ * LERR is set to .TRUE. by the special version of XERBLA each time * it is called, and is then tested and re-set by CHKXER. LERR = .FALSE. +* +* Initialize ALPHA, BETA, RALPHA, and RBETA. +* + ALPHA = CMPLX( ONE, -ONE ) + BETA = CMPLX( TWO, -TWO ) + RALPHA = ONE + RBETA = TWO +* GO TO ( 10, 20, 30, 40, 50, 60, 70, 80, $ 90 )ISNUM 10 INFOT = 1 @@ -2207,16 +2259,16 @@ CALL CHEMM( 'R', 'L', 0, 2, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 9 - CALL CHEMM( 'L', 'U', 2, 0, ALPHA, A, 2, B, 1, BETA, C, 1 ) + CALL CHEMM( 'L', 'U', 2, 0, ALPHA, A, 2, B, 1, BETA, C, 2 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 9 - CALL CHEMM( 'R', 'U', 2, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHEMM( 'R', 'U', 2, 0, ALPHA, A, 1, B, 1, BETA, C, 2 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 9 - CALL CHEMM( 'L', 'L', 2, 0, ALPHA, A, 2, B, 1, BETA, C, 1 ) + CALL CHEMM( 'L', 'L', 2, 0, ALPHA, A, 2, B, 1, BETA, C, 2 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 9 - CALL CHEMM( 'R', 'L', 2, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHEMM( 'R', 'L', 2, 0, ALPHA, A, 1, B, 1, BETA, C, 2 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 12 CALL CHEMM( 'L', 'U', 2, 0, ALPHA, A, 2, B, 2, BETA, C, 1 ) @@ -2274,16 +2326,16 @@ CALL CSYMM( 'R', 'L', 0, 2, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 9 - CALL CSYMM( 'L', 'U', 2, 0, ALPHA, A, 2, B, 1, BETA, C, 1 ) + CALL CSYMM( 'L', 'U', 2, 0, ALPHA, A, 2, B, 1, BETA, C, 2 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 9 - CALL CSYMM( 'R', 'U', 2, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CSYMM( 'R', 'U', 2, 0, ALPHA, A, 1, B, 1, BETA, C, 2 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 9 - CALL CSYMM( 'L', 'L', 2, 0, ALPHA, A, 2, B, 1, BETA, C, 1 ) + CALL CSYMM( 'L', 'L', 2, 0, ALPHA, A, 2, B, 1, BETA, C, 2 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 9 - CALL CSYMM( 'R', 'L', 2, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CSYMM( 'R', 'L', 2, 0, ALPHA, A, 1, B, 1, BETA, C, 2 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 12 CALL CSYMM( 'L', 'U', 2, 0, ALPHA, A, 2, B, 2, BETA, C, 1 ) @@ -3270,7 +3322,6 @@ 50 CONTINUE END IF * - 60 CONTINUE LCERES = .TRUE. GO TO 80 70 CONTINUE diff --git a/test/compare_sgemm_sbgemm.c b/test/compare_sgemm_sbgemm.c new file mode 100644 index 000000000..3d4eb2515 --- /dev/null +++ b/test/compare_sgemm_sbgemm.c @@ -0,0 +1,131 @@ +/*************************************************************************** +Copyright (c) 2020, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ +#include +#include +#include "../common.h" +#define SGEMM BLASFUNC(sgemm) +#define SBGEMM BLASFUNC(sbgemm) +typedef union +{ + unsigned short v; + struct + { +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + unsigned short s:1; + unsigned short e:8; + unsigned short m:7; +#else + unsigned short m:7; + unsigned short e:8; + unsigned short s:1; +#endif + } bits; +} bfloat16_bits; + +typedef union +{ + float v; + struct + { + uint32_t m:23; + uint32_t e:8; + uint32_t s:1; + } bits; +} float32_bits; + +float +float16to32 (bfloat16_bits f16) +{ + float32_bits f32; + f32.bits.s = f16.bits.s; + f32.bits.e = f16.bits.e; + f32.bits.m = (uint32_t) f16.bits.m << 16; + return f32.v; +} + +int +main (int argc, char *argv[]) +{ + int m, n, k; + int i, j, l; + int x; + int ret = 0; + int loop = 100; + char transA = 'N', transB = 'N'; + float alpha = 1.0, beta = 0.0; + + for (x = 0; x <= loop; x++) + { + m = k = n = x; + float A[m * k]; + float B[k * n]; + float C[m * n]; + bfloat16_bits AA[m * k], BB[k * n]; + float DD[m * n], CC[m * n]; + + for (j = 0; j < m; j++) + { + for (i = 0; i < m; i++) + { + A[j * k + i] = ((FLOAT) rand () / (FLOAT) RAND_MAX) + 0.5; + B[j * k + i] = ((FLOAT) rand () / (FLOAT) RAND_MAX) + 0.5; + C[j * k + i] = 0; + AA[j * k + i].v = *(uint32_t *) & A[j * k + i] >> 16; + BB[j * k + i].v = *(uint32_t *) & B[j * k + i] >> 16; + CC[j * k + i] = 0; + DD[j * k + i] = 0; + } + } + SGEMM (&transA, &transB, &m, &n, &k, &alpha, A, + &m, B, &k, &beta, C, &m); + SBGEMM (&transA, &transB, &m, &n, &k, &alpha, AA, + &m, BB, &k, &beta, CC, &m); + for (i = 0; i < n; i++) + for (j = 0; j < m; j++) + for (l = 0; l < k; l++) + if (fabs (CC[i * m + j] - C[i * m + j]) > 1.0) + ret++; + if (transA == 'N' && transB == 'N') + { + for (i = 0; i < n; i++) + for (j = 0; j < m; j++) + for (l = 0; l < k; l++) + { + DD[i * m + j] += + float16to32 (AA[l * m + j]) * float16to32 (BB[l + k * i]); + } + for (i = 0; i < n; i++) + for (j = 0; j < m; j++) + for (l = 0; l < k; l++) + if (CC[i * m + j] != DD[i * m + j]) + ret++; + } + } + if (ret != 0) + fprintf (stderr, "FATAL ERROR SBGEMM - Return code: %d\n", ret); + return ret; +} diff --git a/test/dblat2.f b/test/dblat2.f index 4002d4368..9bbbe9792 100644 --- a/test/dblat2.f +++ b/test/dblat2.f @@ -1,75 +1,121 @@ +*> \brief \b DBLAT2 +* +* =========== DOCUMENTATION =========== +* +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ +* +* Definition: +* =========== +* +* PROGRAM DBLAT2 +* +* +*> \par Purpose: +* ============= +*> +*> \verbatim +*> +*> Test program for the DOUBLE PRECISION Level 2 Blas. +*> +*> The program must be driven by a short data file. The first 18 records +*> of the file are read using list-directed input, the last 16 records +*> are read using the format ( A6, L2 ). An annotated example of a data +*> file can be obtained by deleting the first 3 characters from the +*> following 34 lines: +*> 'dblat2.out' NAME OF SUMMARY OUTPUT FILE +*> 6 UNIT NUMBER OF SUMMARY FILE +*> 'DBLAT2.SNAP' NAME OF SNAPSHOT OUTPUT FILE +*> -1 UNIT NUMBER OF SNAPSHOT FILE (NOT USED IF .LT. 0) +*> F LOGICAL FLAG, T TO REWIND SNAPSHOT FILE AFTER EACH RECORD. +*> F LOGICAL FLAG, T TO STOP ON FAILURES. +*> T LOGICAL FLAG, T TO TEST ERROR EXITS. +*> 16.0 THRESHOLD VALUE OF TEST RATIO +*> 6 NUMBER OF VALUES OF N +*> 0 1 2 3 5 9 VALUES OF N +*> 4 NUMBER OF VALUES OF K +*> 0 1 2 4 VALUES OF K +*> 4 NUMBER OF VALUES OF INCX AND INCY +*> 1 2 -1 -2 VALUES OF INCX AND INCY +*> 3 NUMBER OF VALUES OF ALPHA +*> 0.0 1.0 0.7 VALUES OF ALPHA +*> 3 NUMBER OF VALUES OF BETA +*> 0.0 1.0 0.9 VALUES OF BETAC +*> DGEMV T PUT F FOR NO TEST. SAME COLUMNS. +*> DGBMV T PUT F FOR NO TEST. SAME COLUMNS. +*> DSYMV T PUT F FOR NO TEST. SAME COLUMNS. +*> DSBMV T PUT F FOR NO TEST. SAME COLUMNS. +*> DSPMV T PUT F FOR NO TEST. SAME COLUMNS. +*> DTRMV T PUT F FOR NO TEST. SAME COLUMNS. +*> DTBMV T PUT F FOR NO TEST. SAME COLUMNS. +*> DTPMV T PUT F FOR NO TEST. SAME COLUMNS. +*> DTRSV T PUT F FOR NO TEST. SAME COLUMNS. +*> DTBSV T PUT F FOR NO TEST. SAME COLUMNS. +*> DTPSV T PUT F FOR NO TEST. SAME COLUMNS. +*> DGER T PUT F FOR NO TEST. SAME COLUMNS. +*> DSYR T PUT F FOR NO TEST. SAME COLUMNS. +*> DSPR T PUT F FOR NO TEST. SAME COLUMNS. +*> DSYR2 T PUT F FOR NO TEST. SAME COLUMNS. +*> DSPR2 T PUT F FOR NO TEST. SAME COLUMNS. +*> +*> Further Details +*> =============== +*> +*> See: +*> +*> Dongarra J. J., Du Croz J. J., Hammarling S. and Hanson R. J.. +*> An extended set of Fortran Basic Linear Algebra Subprograms. +*> +*> Technical Memoranda Nos. 41 (revision 3) and 81, Mathematics +*> and Computer Science Division, Argonne National Laboratory, +*> 9700 South Cass Avenue, Argonne, Illinois 60439, US. +*> +*> Or +*> +*> NAG Technical Reports TR3/87 and TR4/87, Numerical Algorithms +*> Group Ltd., NAG Central Office, 256 Banbury Road, Oxford +*> OX2 7DE, UK, and Numerical Algorithms Group Inc., 1101 31st +*> Street, Suite 100, Downers Grove, Illinois 60515-1263, USA. +*> +*> +*> -- Written on 10-August-1987. +*> Richard Hanson, Sandia National Labs. +*> Jeremy Du Croz, NAG Central Office. +*> +*> 10-9-00: Change STATUS='NEW' to 'UNKNOWN' so that the testers +*> can be run multiple times without deleting generated +*> output files (susan) +*> \endverbatim +* +* Authors: +* ======== +* +*> \author Univ. of Tennessee +*> \author Univ. of California Berkeley +*> \author Univ. of Colorado Denver +*> \author NAG Ltd. +* +*> \date April 2012 +* +*> \ingroup double_blas_testing +* +* ===================================================================== PROGRAM DBLAT2 * -* Test program for the DOUBLE PRECISION Level 2 Blas. -* -* The program must be driven by a short data file. The first 18 records -* of the file are read using list-directed input, the last 16 records -* are read using the format ( A6, L2 ). An annotated example of a data -* file can be obtained by deleting the first 3 characters from the -* following 34 lines: -* 'DBLAT2.SUMM' NAME OF SUMMARY OUTPUT FILE -* 6 UNIT NUMBER OF SUMMARY FILE -* 'DBLAT2.SNAP' NAME OF SNAPSHOT OUTPUT FILE -* -1 UNIT NUMBER OF SNAPSHOT FILE (NOT USED IF .LT. 0) -* F LOGICAL FLAG, T TO REWIND SNAPSHOT FILE AFTER EACH RECORD. -* F LOGICAL FLAG, T TO STOP ON FAILURES. -* T LOGICAL FLAG, T TO TEST ERROR EXITS. -* 16.0 THRESHOLD VALUE OF TEST RATIO -* 6 NUMBER OF VALUES OF N -* 0 1 2 3 5 9 VALUES OF N -* 4 NUMBER OF VALUES OF K -* 0 1 2 4 VALUES OF K -* 4 NUMBER OF VALUES OF INCX AND INCY -* 1 2 -1 -2 VALUES OF INCX AND INCY -* 3 NUMBER OF VALUES OF ALPHA -* 0.0 1.0 0.7 VALUES OF ALPHA -* 3 NUMBER OF VALUES OF BETA -* 0.0 1.0 0.9 VALUES OF BETA -* DGEMV T PUT F FOR NO TEST. SAME COLUMNS. -* DGBMV T PUT F FOR NO TEST. SAME COLUMNS. -* DSYMV T PUT F FOR NO TEST. SAME COLUMNS. -* DSBMV T PUT F FOR NO TEST. SAME COLUMNS. -* DSPMV T PUT F FOR NO TEST. SAME COLUMNS. -* DTRMV T PUT F FOR NO TEST. SAME COLUMNS. -* DTBMV T PUT F FOR NO TEST. SAME COLUMNS. -* DTPMV T PUT F FOR NO TEST. SAME COLUMNS. -* DTRSV T PUT F FOR NO TEST. SAME COLUMNS. -* DTBSV T PUT F FOR NO TEST. SAME COLUMNS. -* DTPSV T PUT F FOR NO TEST. SAME COLUMNS. -* DGER T PUT F FOR NO TEST. SAME COLUMNS. -* DSYR T PUT F FOR NO TEST. SAME COLUMNS. -* DSPR T PUT F FOR NO TEST. SAME COLUMNS. -* DSYR2 T PUT F FOR NO TEST. SAME COLUMNS. -* DSPR2 T PUT F FOR NO TEST. SAME COLUMNS. -* -* See: -* -* Dongarra J. J., Du Croz J. J., Hammarling S. and Hanson R. J.. -* An extended set of Fortran Basic Linear Algebra Subprograms. -* -* Technical Memoranda Nos. 41 (revision 3) and 81, Mathematics -* and Computer Science Division, Argonne National Laboratory, -* 9700 South Cass Avenue, Argonne, Illinois 60439, US. -* -* Or -* -* NAG Technical Reports TR3/87 and TR4/87, Numerical Algorithms -* Group Ltd., NAG Central Office, 256 Banbury Road, Oxford -* OX2 7DE, UK, and Numerical Algorithms Group Inc., 1101 31st -* Street, Suite 100, Downers Grove, Illinois 60515-1263, USA. +* -- Reference BLAS test routine (version 3.7.0) -- +* -- Reference BLAS is a software package provided by Univ. of Tennessee, -- +* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- +* April 2012 * -* -* -- Written on 10-August-1987. -* Richard Hanson, Sandia National Labs. -* Jeremy Du Croz, NAG Central Office. +* ===================================================================== * * .. Parameters .. INTEGER NIN PARAMETER ( NIN = 5 ) INTEGER NSUBS PARAMETER ( NSUBS = 16 ) - DOUBLE PRECISION ZERO, HALF, ONE - PARAMETER ( ZERO = 0.0D0, HALF = 0.5D0, ONE = 1.0D0 ) + DOUBLE PRECISION ZERO, ONE + PARAMETER ( ZERO = 0.0D0, ONE = 1.0D0 ) INTEGER NMAX, INCMAX PARAMETER ( NMAX = 65, INCMAX = 2 ) INTEGER NINMAX, NIDMAX, NKBMAX, NALMAX, NBEMAX @@ -121,7 +167,7 @@ * READ( NIN, FMT = * )SUMMRY READ( NIN, FMT = * )NOUT - OPEN( NOUT, FILE = SUMMRY, STATUS = 'NEW' ) + OPEN( NOUT, FILE = SUMMRY, STATUS = 'UNKNOWN' ) NOUTC = NOUT * * Read name and unit number for snapshot output file and open file. @@ -130,7 +176,7 @@ READ( NIN, FMT = * )NTRA TRACE = NTRA.GE.0 IF( TRACE )THEN - OPEN( NTRA, FILE = SNAPS, STATUS = 'NEW' ) + OPEN( NTRA, FILE = SNAPS, STATUS = 'UNKNOWN' ) END IF * Read the flag that directs rewinding of the snapshot file. READ( NIN, FMT = * )REWI @@ -235,14 +281,7 @@ * * Compute EPS (the machine precision). * - EPS = ONE - 90 CONTINUE - IF( DDIFF( ONE + EPS, ONE ).EQ.ZERO ) - $ GO TO 100 - EPS = HALF*EPS - GO TO 90 - 100 CONTINUE - EPS = EPS + EPS + EPS = EPSILON(ZERO) WRITE( NOUT, FMT = 9998 )EPS * * Check the reliability of DMVCH using exact data. @@ -2982,7 +3021,6 @@ 50 CONTINUE END IF * - 60 CONTINUE LDERES = .TRUE. GO TO 80 70 CONTINUE diff --git a/test/dblat3.f b/test/dblat3.f index 082e03e5e..1ebec4ffa 100644 --- a/test/dblat3.f +++ b/test/dblat3.f @@ -1,55 +1,101 @@ +*> \brief \b DBLAT3 +* +* =========== DOCUMENTATION =========== +* +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ +* +* Definition: +* =========== +* +* PROGRAM DBLAT3 +* +* +*> \par Purpose: +* ============= +*> +*> \verbatim +*> +*> Test program for the DOUBLE PRECISION Level 3 Blas. +*> +*> The program must be driven by a short data file. The first 14 records +*> of the file are read using list-directed input, the last 6 records +*> are read using the format ( A6, L2 ). An annotated example of a data +*> file can be obtained by deleting the first 3 characters from the +*> following 20 lines: +*> 'dblat3.out' NAME OF SUMMARY OUTPUT FILE +*> 6 UNIT NUMBER OF SUMMARY FILE +*> 'DBLAT3.SNAP' NAME OF SNAPSHOT OUTPUT FILE +*> -1 UNIT NUMBER OF SNAPSHOT FILE (NOT USED IF .LT. 0) +*> F LOGICAL FLAG, T TO REWIND SNAPSHOT FILE AFTER EACH RECORD. +*> F LOGICAL FLAG, T TO STOP ON FAILURES. +*> T LOGICAL FLAG, T TO TEST ERROR EXITS. +*> 16.0 THRESHOLD VALUE OF TEST RATIO +*> 6 NUMBER OF VALUES OF N +*> 0 1 2 3 5 9 VALUES OF N +*> 3 NUMBER OF VALUES OF ALPHA +*> 0.0 1.0 0.7 VALUES OF ALPHA +*> 3 NUMBER OF VALUES OF BETA +*> 0.0 1.0 1.3 VALUES OF BETA +*> DGEMM T PUT F FOR NO TEST. SAME COLUMNS. +*> DSYMM T PUT F FOR NO TEST. SAME COLUMNS. +*> DTRMM T PUT F FOR NO TEST. SAME COLUMNS. +*> DTRSM T PUT F FOR NO TEST. SAME COLUMNS. +*> DSYRK T PUT F FOR NO TEST. SAME COLUMNS. +*> DSYR2K T PUT F FOR NO TEST. SAME COLUMNS. +*> +*> Further Details +*> =============== +*> +*> See: +*> +*> Dongarra J. J., Du Croz J. J., Duff I. S. and Hammarling S. +*> A Set of Level 3 Basic Linear Algebra Subprograms. +*> +*> Technical Memorandum No.88 (Revision 1), Mathematics and +*> Computer Science Division, Argonne National Laboratory, 9700 +*> South Cass Avenue, Argonne, Illinois 60439, US. +*> +*> -- Written on 8-February-1989. +*> Jack Dongarra, Argonne National Laboratory. +*> Iain Duff, AERE Harwell. +*> Jeremy Du Croz, Numerical Algorithms Group Ltd. +*> Sven Hammarling, Numerical Algorithms Group Ltd. +*> +*> 10-9-00: Change STATUS='NEW' to 'UNKNOWN' so that the testers +*> can be run multiple times without deleting generated +*> output files (susan) +*> \endverbatim +* +* Authors: +* ======== +* +*> \author Univ. of Tennessee +*> \author Univ. of California Berkeley +*> \author Univ. of Colorado Denver +*> \author NAG Ltd. +* +*> \date April 2012 +* +*> \ingroup double_blas_testing +* +* ===================================================================== PROGRAM DBLAT3 * -* Test program for the DOUBLE PRECISION Level 3 Blas. -* -* The program must be driven by a short data file. The first 14 records -* of the file are read using list-directed input, the last 6 records -* are read using the format ( A6, L2 ). An annotated example of a data -* file can be obtained by deleting the first 3 characters from the -* following 20 lines: -* 'DBLAT3.SUMM' NAME OF SUMMARY OUTPUT FILE -* 6 UNIT NUMBER OF SUMMARY FILE -* 'DBLAT3.SNAP' NAME OF SNAPSHOT OUTPUT FILE -* -1 UNIT NUMBER OF SNAPSHOT FILE (NOT USED IF .LT. 0) -* F LOGICAL FLAG, T TO REWIND SNAPSHOT FILE AFTER EACH RECORD. -* F LOGICAL FLAG, T TO STOP ON FAILURES. -* T LOGICAL FLAG, T TO TEST ERROR EXITS. -* 16.0 THRESHOLD VALUE OF TEST RATIO -* 6 NUMBER OF VALUES OF N -* 0 1 2 3 5 9 VALUES OF N -* 3 NUMBER OF VALUES OF ALPHA -* 0.0 1.0 0.7 VALUES OF ALPHA -* 3 NUMBER OF VALUES OF BETA -* 0.0 1.0 1.3 VALUES OF BETA -* DGEMM T PUT F FOR NO TEST. SAME COLUMNS. -* DSYMM T PUT F FOR NO TEST. SAME COLUMNS. -* DTRMM T PUT F FOR NO TEST. SAME COLUMNS. -* DTRSM T PUT F FOR NO TEST. SAME COLUMNS. -* DSYRK T PUT F FOR NO TEST. SAME COLUMNS. -* DSYR2K T PUT F FOR NO TEST. SAME COLUMNS. -* -* See: -* -* Dongarra J. J., Du Croz J. J., Duff I. S. and Hammarling S. -* A Set of Level 3 Basic Linear Algebra Subprograms. -* -* Technical Memorandum No.88 (Revision 1), Mathematics and -* Computer Science Division, Argonne National Laboratory, 9700 -* South Cass Avenue, Argonne, Illinois 60439, US. +* -- Reference BLAS test routine (version 3.7.0) -- +* -- Reference BLAS is a software package provided by Univ. of Tennessee, -- +* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- +* April 2012 * -* -- Written on 8-February-1989. -* Jack Dongarra, Argonne National Laboratory. -* Iain Duff, AERE Harwell. -* Jeremy Du Croz, Numerical Algorithms Group Ltd. -* Sven Hammarling, Numerical Algorithms Group Ltd. +* ===================================================================== * * .. Parameters .. INTEGER NIN PARAMETER ( NIN = 5 ) INTEGER NSUBS PARAMETER ( NSUBS = 6 ) - DOUBLE PRECISION ZERO, HALF, ONE - PARAMETER ( ZERO = 0.0D0, HALF = 0.5D0, ONE = 1.0D0 ) + DOUBLE PRECISION ZERO, ONE + PARAMETER ( ZERO = 0.0D0, ONE = 1.0D0 ) INTEGER NMAX PARAMETER ( NMAX = 65 ) INTEGER NIDMAX, NALMAX, NBEMAX @@ -96,7 +142,7 @@ * READ( NIN, FMT = * )SUMMRY READ( NIN, FMT = * )NOUT - OPEN( NOUT, FILE = SUMMRY, STATUS = 'NEW' ) + OPEN( NOUT, FILE = SUMMRY, STATUS = 'UNKNOWN' ) NOUTC = NOUT * * Read name and unit number for snapshot output file and open file. @@ -105,7 +151,7 @@ READ( NIN, FMT = * )NTRA TRACE = NTRA.GE.0 IF( TRACE )THEN - OPEN( NTRA, FILE = SNAPS, STATUS = 'NEW' ) + OPEN( NTRA, FILE = SNAPS, STATUS = 'UNKNOWN' ) END IF * Read the flag that directs rewinding of the snapshot file. READ( NIN, FMT = * )REWI @@ -182,14 +228,7 @@ * * Compute EPS (the machine precision). * - EPS = ONE - 70 CONTINUE - IF( DDIFF( ONE + EPS, ONE ).EQ.ZERO ) - $ GO TO 80 - EPS = HALF*EPS - GO TO 70 - 80 CONTINUE - EPS = EPS + EPS + EPS = EPSILON(ZERO) WRITE( NOUT, FMT = 9998 )EPS * * Check the reliability of DMMCH using exact data. @@ -1802,7 +1841,7 @@ * * Tests the error exits from the Level 3 Blas. * Requires a special version of the error-handling routine XERBLA. -* ALPHA, BETA, A, B and C should not need to be defined. +* A, B and C should not need to be defined. * * Auxiliary routine for test program for Level 3 Blas. * @@ -1812,12 +1851,18 @@ * Jeremy Du Croz, Numerical Algorithms Group Ltd. * Sven Hammarling, Numerical Algorithms Group Ltd. * +* 3-19-92: Initialize ALPHA and BETA (eca) +* 3-19-92: Fix argument 12 in calls to SSYMM with INFOT = 9 (eca) +* * .. Scalar Arguments .. INTEGER ISNUM, NOUT CHARACTER*6 SRNAMT * .. Scalars in Common .. INTEGER INFOT, NOUTC LOGICAL LERR, OK +* .. Parameters .. + DOUBLE PRECISION ONE, TWO + PARAMETER ( ONE = 1.0D0, TWO = 2.0D0 ) * .. Local Scalars .. DOUBLE PRECISION ALPHA, BETA * .. Local Arrays .. @@ -1834,6 +1879,12 @@ * LERR is set to .TRUE. by the special version of XERBLA each time * it is called, and is then tested and re-set by CHKXER. LERR = .FALSE. +* +* Initialize ALPHA and BETA. +* + ALPHA = ONE + BETA = TWO +* GO TO ( 10, 20, 30, 40, 50, 60 )ISNUM 10 INFOT = 1 CALL DGEMM( '/', 'N', 0, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) @@ -1963,16 +2014,16 @@ CALL DSYMM( 'R', 'L', 0, 2, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 9 - CALL DSYMM( 'L', 'U', 2, 0, ALPHA, A, 2, B, 1, BETA, C, 1 ) + CALL DSYMM( 'L', 'U', 2, 0, ALPHA, A, 2, B, 1, BETA, C, 2 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 9 - CALL DSYMM( 'R', 'U', 2, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL DSYMM( 'R', 'U', 2, 0, ALPHA, A, 1, B, 1, BETA, C, 2 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 9 - CALL DSYMM( 'L', 'L', 2, 0, ALPHA, A, 2, B, 1, BETA, C, 1 ) + CALL DSYMM( 'L', 'L', 2, 0, ALPHA, A, 2, B, 1, BETA, C, 2 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 9 - CALL DSYMM( 'R', 'L', 2, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL DSYMM( 'R', 'L', 2, 0, ALPHA, A, 1, B, 1, BETA, C, 2 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 12 CALL DSYMM( 'L', 'U', 2, 0, ALPHA, A, 2, B, 2, BETA, C, 1 ) @@ -2660,7 +2711,6 @@ 50 CONTINUE END IF * - 60 CONTINUE LDERES = .TRUE. GO TO 80 70 CONTINUE diff --git a/test/sblat2.f b/test/sblat2.f index a1074be52..56ead8640 100644 --- a/test/sblat2.f +++ b/test/sblat2.f @@ -1,75 +1,121 @@ +*> \brief \b SBLAT2 +* +* =========== DOCUMENTATION =========== +* +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ +* +* Definition: +* =========== +* +* PROGRAM SBLAT2 +* +* +*> \par Purpose: +* ============= +*> +*> \verbatim +*> +*> Test program for the REAL Level 2 Blas. +*> +*> The program must be driven by a short data file. The first 18 records +*> of the file are read using list-directed input, the last 16 records +*> are read using the format ( A6, L2 ). An annotated example of a data +*> file can be obtained by deleting the first 3 characters from the +*> following 34 lines: +*> 'sblat2.out' NAME OF SUMMARY OUTPUT FILE +*> 6 UNIT NUMBER OF SUMMARY FILE +*> 'SBLAT2.SNAP' NAME OF SNAPSHOT OUTPUT FILE +*> -1 UNIT NUMBER OF SNAPSHOT FILE (NOT USED IF .LT. 0) +*> F LOGICAL FLAG, T TO REWIND SNAPSHOT FILE AFTER EACH RECORD. +*> F LOGICAL FLAG, T TO STOP ON FAILURES. +*> T LOGICAL FLAG, T TO TEST ERROR EXITS. +*> 16.0 THRESHOLD VALUE OF TEST RATIO +*> 6 NUMBER OF VALUES OF N +*> 0 1 2 3 5 9 VALUES OF N +*> 4 NUMBER OF VALUES OF K +*> 0 1 2 4 VALUES OF K +*> 4 NUMBER OF VALUES OF INCX AND INCY +*> 1 2 -1 -2 VALUES OF INCX AND INCY +*> 3 NUMBER OF VALUES OF ALPHA +*> 0.0 1.0 0.7 VALUES OF ALPHA +*> 3 NUMBER OF VALUES OF BETA +*> 0.0 1.0 0.9 VALUES OF BETA +*> SGEMV T PUT F FOR NO TEST. SAME COLUMNS. +*> SGBMV T PUT F FOR NO TEST. SAME COLUMNS. +*> SSYMV T PUT F FOR NO TEST. SAME COLUMNS. +*> SSBMV T PUT F FOR NO TEST. SAME COLUMNS. +*> SSPMV T PUT F FOR NO TEST. SAME COLUMNS. +*> STRMV T PUT F FOR NO TEST. SAME COLUMNS. +*> STBMV T PUT F FOR NO TEST. SAME COLUMNS. +*> STPMV T PUT F FOR NO TEST. SAME COLUMNS. +*> STRSV T PUT F FOR NO TEST. SAME COLUMNS. +*> STBSV T PUT F FOR NO TEST. SAME COLUMNS. +*> STPSV T PUT F FOR NO TEST. SAME COLUMNS. +*> SGER T PUT F FOR NO TEST. SAME COLUMNS. +*> SSYR T PUT F FOR NO TEST. SAME COLUMNS. +*> SSPR T PUT F FOR NO TEST. SAME COLUMNS. +*> SSYR2 T PUT F FOR NO TEST. SAME COLUMNS. +*> SSPR2 T PUT F FOR NO TEST. SAME COLUMNS. +*> +*> Further Details +*> =============== +*> +*> See: +*> +*> Dongarra J. J., Du Croz J. J., Hammarling S. and Hanson R. J.. +*> An extended set of Fortran Basic Linear Algebra Subprograms. +*> +*> Technical Memoranda Nos. 41 (revision 3) and 81, Mathematics +*> and Computer Science Division, Argonne National Laboratory, +*> 9700 South Cass Avenue, Argonne, Illinois 60439, US. +*> +*> Or +*> +*> NAG Technical Reports TR3/87 and TR4/87, Numerical Algorithms +*> Group Ltd., NAG Central Office, 256 Banbury Road, Oxford +*> OX2 7DE, UK, and Numerical Algorithms Group Inc., 1101 31st +*> Street, Suite 100, Downers Grove, Illinois 60515-1263, USA. +*> +*> +*> -- Written on 10-August-1987. +*> Richard Hanson, Sandia National Labs. +*> Jeremy Du Croz, NAG Central Office. +*> +*> 10-9-00: Change STATUS='NEW' to 'UNKNOWN' so that the testers +*> can be run multiple times without deleting generated +*> output files (susan) +*> \endverbatim +* +* Authors: +* ======== +* +*> \author Univ. of Tennessee +*> \author Univ. of California Berkeley +*> \author Univ. of Colorado Denver +*> \author NAG Ltd. +* +*> \date April 2012 +* +*> \ingroup single_blas_testing +* +* ===================================================================== PROGRAM SBLAT2 * -* Test program for the REAL Level 2 Blas. -* -* The program must be driven by a short data file. The first 18 records -* of the file are read using list-directed input, the last 16 records -* are read using the format ( A6, L2 ). An annotated example of a data -* file can be obtained by deleting the first 3 characters from the -* following 34 lines: -* 'SBLAT2.SUMM' NAME OF SUMMARY OUTPUT FILE -* 6 UNIT NUMBER OF SUMMARY FILE -* 'SBLAT2.SNAP' NAME OF SNAPSHOT OUTPUT FILE -* -1 UNIT NUMBER OF SNAPSHOT FILE (NOT USED IF .LT. 0) -* F LOGICAL FLAG, T TO REWIND SNAPSHOT FILE AFTER EACH RECORD. -* F LOGICAL FLAG, T TO STOP ON FAILURES. -* T LOGICAL FLAG, T TO TEST ERROR EXITS. -* 16.0 THRESHOLD VALUE OF TEST RATIO -* 6 NUMBER OF VALUES OF N -* 0 1 2 3 5 9 VALUES OF N -* 4 NUMBER OF VALUES OF K -* 0 1 2 4 VALUES OF K -* 4 NUMBER OF VALUES OF INCX AND INCY -* 1 2 -1 -2 VALUES OF INCX AND INCY -* 3 NUMBER OF VALUES OF ALPHA -* 0.0 1.0 0.7 VALUES OF ALPHA -* 3 NUMBER OF VALUES OF BETA -* 0.0 1.0 0.9 VALUES OF BETA -* SGEMV T PUT F FOR NO TEST. SAME COLUMNS. -* SGBMV T PUT F FOR NO TEST. SAME COLUMNS. -* SSYMV T PUT F FOR NO TEST. SAME COLUMNS. -* SSBMV T PUT F FOR NO TEST. SAME COLUMNS. -* SSPMV T PUT F FOR NO TEST. SAME COLUMNS. -* STRMV T PUT F FOR NO TEST. SAME COLUMNS. -* STBMV T PUT F FOR NO TEST. SAME COLUMNS. -* STPMV T PUT F FOR NO TEST. SAME COLUMNS. -* STRSV T PUT F FOR NO TEST. SAME COLUMNS. -* STBSV T PUT F FOR NO TEST. SAME COLUMNS. -* STPSV T PUT F FOR NO TEST. SAME COLUMNS. -* SGER T PUT F FOR NO TEST. SAME COLUMNS. -* SSYR T PUT F FOR NO TEST. SAME COLUMNS. -* SSPR T PUT F FOR NO TEST. SAME COLUMNS. -* SSYR2 T PUT F FOR NO TEST. SAME COLUMNS. -* SSPR2 T PUT F FOR NO TEST. SAME COLUMNS. -* -* See: -* -* Dongarra J. J., Du Croz J. J., Hammarling S. and Hanson R. J.. -* An extended set of Fortran Basic Linear Algebra Subprograms. -* -* Technical Memoranda Nos. 41 (revision 3) and 81, Mathematics -* and Computer Science Division, Argonne National Laboratory, -* 9700 South Cass Avenue, Argonne, Illinois 60439, US. -* -* Or -* -* NAG Technical Reports TR3/87 and TR4/87, Numerical Algorithms -* Group Ltd., NAG Central Office, 256 Banbury Road, Oxford -* OX2 7DE, UK, and Numerical Algorithms Group Inc., 1101 31st -* Street, Suite 100, Downers Grove, Illinois 60515-1263, USA. +* -- Reference BLAS test routine (version 3.7.0) -- +* -- Reference BLAS is a software package provided by Univ. of Tennessee, -- +* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- +* April 2012 * -* -* -- Written on 10-August-1987. -* Richard Hanson, Sandia National Labs. -* Jeremy Du Croz, NAG Central Office. +* ===================================================================== * * .. Parameters .. INTEGER NIN PARAMETER ( NIN = 5 ) INTEGER NSUBS PARAMETER ( NSUBS = 16 ) - REAL ZERO, HALF, ONE - PARAMETER ( ZERO = 0.0, HALF = 0.5, ONE = 1.0 ) + REAL ZERO, ONE + PARAMETER ( ZERO = 0.0, ONE = 1.0 ) INTEGER NMAX, INCMAX PARAMETER ( NMAX = 65, INCMAX = 2 ) INTEGER NINMAX, NIDMAX, NKBMAX, NALMAX, NBEMAX @@ -121,7 +167,7 @@ * READ( NIN, FMT = * )SUMMRY READ( NIN, FMT = * )NOUT - OPEN( NOUT, FILE = SUMMRY, STATUS = 'NEW' ) + OPEN( NOUT, FILE = SUMMRY, STATUS = 'UNKNOWN' ) NOUTC = NOUT * * Read name and unit number for snapshot output file and open file. @@ -130,7 +176,7 @@ READ( NIN, FMT = * )NTRA TRACE = NTRA.GE.0 IF( TRACE )THEN - OPEN( NTRA, FILE = SNAPS, STATUS = 'NEW' ) + OPEN( NTRA, FILE = SNAPS, STATUS = 'UNKNOWN' ) END IF * Read the flag that directs rewinding of the snapshot file. READ( NIN, FMT = * )REWI @@ -235,14 +281,7 @@ * * Compute EPS (the machine precision). * - EPS = ONE - 90 CONTINUE - IF( SDIFF( ONE + EPS, ONE ).EQ.ZERO ) - $ GO TO 100 - EPS = HALF*EPS - GO TO 90 - 100 CONTINUE - EPS = EPS + EPS + EPS = EPSILON(ZERO) WRITE( NOUT, FMT = 9998 )EPS * * Check the reliability of SMVCH using exact data. @@ -2982,7 +3021,6 @@ 50 CONTINUE END IF * - 60 CONTINUE LSERES = .TRUE. GO TO 80 70 CONTINUE diff --git a/test/sblat3.f b/test/sblat3.f index 325a9eb92..66edac14e 100644 --- a/test/sblat3.f +++ b/test/sblat3.f @@ -1,55 +1,101 @@ +*> \brief \b SBLAT3 +* +* =========== DOCUMENTATION =========== +* +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ +* +* Definition: +* =========== +* +* PROGRAM SBLAT3 +* +* +*> \par Purpose: +* ============= +*> +*> \verbatim +*> +*> Test program for the REAL Level 3 Blas. +*> +*> The program must be driven by a short data file. The first 14 records +*> of the file are read using list-directed input, the last 6 records +*> are read using the format ( A6, L2 ). An annotated example of a data +*> file can be obtained by deleting the first 3 characters from the +*> following 20 lines: +*> 'sblat3.out' NAME OF SUMMARY OUTPUT FILE +*> 6 UNIT NUMBER OF SUMMARY FILE +*> 'SBLAT3.SNAP' NAME OF SNAPSHOT OUTPUT FILE +*> -1 UNIT NUMBER OF SNAPSHOT FILE (NOT USED IF .LT. 0) +*> F LOGICAL FLAG, T TO REWIND SNAPSHOT FILE AFTER EACH RECORD. +*> F LOGICAL FLAG, T TO STOP ON FAILURES. +*> T LOGICAL FLAG, T TO TEST ERROR EXITS. +*> 16.0 THRESHOLD VALUE OF TEST RATIO +*> 6 NUMBER OF VALUES OF N +*> 0 1 2 3 5 9 VALUES OF N +*> 3 NUMBER OF VALUES OF ALPHA +*> 0.0 1.0 0.7 VALUES OF ALPHA +*> 3 NUMBER OF VALUES OF BETA +*> 0.0 1.0 1.3 VALUES OF BETA +*> SGEMM T PUT F FOR NO TEST. SAME COLUMNS. +*> SSYMM T PUT F FOR NO TEST. SAME COLUMNS. +*> STRMM T PUT F FOR NO TEST. SAME COLUMNS. +*> STRSM T PUT F FOR NO TEST. SAME COLUMNS. +*> SSYRK T PUT F FOR NO TEST. SAME COLUMNS. +*> SSYR2K T PUT F FOR NO TEST. SAME COLUMNS. +*> +*> Further Details +*> =============== +*> +*> See: +*> +*> Dongarra J. J., Du Croz J. J., Duff I. S. and Hammarling S. +*> A Set of Level 3 Basic Linear Algebra Subprograms. +*> +*> Technical Memorandum No.88 (Revision 1), Mathematics and +*> Computer Science Division, Argonne National Laboratory, 9700 +*> South Cass Avenue, Argonne, Illinois 60439, US. +*> +*> -- Written on 8-February-1989. +*> Jack Dongarra, Argonne National Laboratory. +*> Iain Duff, AERE Harwell. +*> Jeremy Du Croz, Numerical Algorithms Group Ltd. +*> Sven Hammarling, Numerical Algorithms Group Ltd. +*> +*> 10-9-00: Change STATUS='NEW' to 'UNKNOWN' so that the testers +*> can be run multiple times without deleting generated +*> output files (susan) +*> \endverbatim +* +* Authors: +* ======== +* +*> \author Univ. of Tennessee +*> \author Univ. of California Berkeley +*> \author Univ. of Colorado Denver +*> \author NAG Ltd. +* +*> \date April 2012 +* +*> \ingroup single_blas_testing +* +* ===================================================================== PROGRAM SBLAT3 * -* Test program for the REAL Level 3 Blas. -* -* The program must be driven by a short data file. The first 14 records -* of the file are read using list-directed input, the last 6 records -* are read using the format ( A6, L2 ). An annotated example of a data -* file can be obtained by deleting the first 3 characters from the -* following 20 lines: -* 'SBLAT3.SUMM' NAME OF SUMMARY OUTPUT FILE -* 6 UNIT NUMBER OF SUMMARY FILE -* 'SBLAT3.SNAP' NAME OF SNAPSHOT OUTPUT FILE -* -1 UNIT NUMBER OF SNAPSHOT FILE (NOT USED IF .LT. 0) -* F LOGICAL FLAG, T TO REWIND SNAPSHOT FILE AFTER EACH RECORD. -* F LOGICAL FLAG, T TO STOP ON FAILURES. -* T LOGICAL FLAG, T TO TEST ERROR EXITS. -* 16.0 THRESHOLD VALUE OF TEST RATIO -* 6 NUMBER OF VALUES OF N -* 0 1 2 3 5 9 VALUES OF N -* 3 NUMBER OF VALUES OF ALPHA -* 0.0 1.0 0.7 VALUES OF ALPHA -* 3 NUMBER OF VALUES OF BETA -* 0.0 1.0 1.3 VALUES OF BETA -* SGEMM T PUT F FOR NO TEST. SAME COLUMNS. -* SSYMM T PUT F FOR NO TEST. SAME COLUMNS. -* STRMM T PUT F FOR NO TEST. SAME COLUMNS. -* STRSM T PUT F FOR NO TEST. SAME COLUMNS. -* SSYRK T PUT F FOR NO TEST. SAME COLUMNS. -* SSYR2K T PUT F FOR NO TEST. SAME COLUMNS. -* -* See: -* -* Dongarra J. J., Du Croz J. J., Duff I. S. and Hammarling S. -* A Set of Level 3 Basic Linear Algebra Subprograms. -* -* Technical Memorandum No.88 (Revision 1), Mathematics and -* Computer Science Division, Argonne National Laboratory, 9700 -* South Cass Avenue, Argonne, Illinois 60439, US. +* -- Reference BLAS test routine (version 3.7.0) -- +* -- Reference BLAS is a software package provided by Univ. of Tennessee, -- +* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- +* April 2012 * -* -- Written on 8-February-1989. -* Jack Dongarra, Argonne National Laboratory. -* Iain Duff, AERE Harwell. -* Jeremy Du Croz, Numerical Algorithms Group Ltd. -* Sven Hammarling, Numerical Algorithms Group Ltd. +* ===================================================================== * * .. Parameters .. INTEGER NIN PARAMETER ( NIN = 5 ) INTEGER NSUBS PARAMETER ( NSUBS = 6 ) - REAL ZERO, HALF, ONE - PARAMETER ( ZERO = 0.0, HALF = 0.5, ONE = 1.0 ) + REAL ZERO, ONE + PARAMETER ( ZERO = 0.0, ONE = 1.0 ) INTEGER NMAX PARAMETER ( NMAX = 65 ) INTEGER NIDMAX, NALMAX, NBEMAX @@ -96,7 +142,7 @@ * READ( NIN, FMT = * )SUMMRY READ( NIN, FMT = * )NOUT - OPEN( NOUT, FILE = SUMMRY, STATUS = 'NEW' ) + OPEN( NOUT, FILE = SUMMRY ) NOUTC = NOUT * * Read name and unit number for snapshot output file and open file. @@ -105,7 +151,7 @@ READ( NIN, FMT = * )NTRA TRACE = NTRA.GE.0 IF( TRACE )THEN - OPEN( NTRA, FILE = SNAPS, STATUS = 'NEW' ) + OPEN( NTRA, FILE = SNAPS ) END IF * Read the flag that directs rewinding of the snapshot file. READ( NIN, FMT = * )REWI @@ -182,14 +228,7 @@ * * Compute EPS (the machine precision). * - EPS = ONE - 70 CONTINUE - IF( SDIFF( ONE + EPS, ONE ).EQ.ZERO ) - $ GO TO 80 - EPS = HALF*EPS - GO TO 70 - 80 CONTINUE - EPS = EPS + EPS + EPS = EPSILON(ZERO) WRITE( NOUT, FMT = 9998 )EPS * * Check the reliability of SMMCH using exact data. @@ -1802,7 +1841,7 @@ * * Tests the error exits from the Level 3 Blas. * Requires a special version of the error-handling routine XERBLA. -* ALPHA, BETA, A, B and C should not need to be defined. +* A, B and C should not need to be defined. * * Auxiliary routine for test program for Level 3 Blas. * @@ -1812,12 +1851,18 @@ * Jeremy Du Croz, Numerical Algorithms Group Ltd. * Sven Hammarling, Numerical Algorithms Group Ltd. * +* 3-19-92: Initialize ALPHA and BETA (eca) +* 3-19-92: Fix argument 12 in calls to SSYMM with INFOT = 9 (eca) +* * .. Scalar Arguments .. INTEGER ISNUM, NOUT CHARACTER*6 SRNAMT * .. Scalars in Common .. INTEGER INFOT, NOUTC LOGICAL LERR, OK +* .. Parameters .. + REAL ONE, TWO + PARAMETER ( ONE = 1.0E0, TWO = 2.0E0 ) * .. Local Scalars .. REAL ALPHA, BETA * .. Local Arrays .. @@ -1834,6 +1879,12 @@ * LERR is set to .TRUE. by the special version of XERBLA each time * it is called, and is then tested and re-set by CHKXER. LERR = .FALSE. +* +* Initialize ALPHA and BETA. +* + ALPHA = ONE + BETA = TWO +* GO TO ( 10, 20, 30, 40, 50, 60 )ISNUM 10 INFOT = 1 CALL SGEMM( '/', 'N', 0, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) @@ -1963,16 +2014,16 @@ CALL SSYMM( 'R', 'L', 0, 2, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 9 - CALL SSYMM( 'L', 'U', 2, 0, ALPHA, A, 2, B, 1, BETA, C, 1 ) + CALL SSYMM( 'L', 'U', 2, 0, ALPHA, A, 2, B, 1, BETA, C, 2 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 9 - CALL SSYMM( 'R', 'U', 2, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL SSYMM( 'R', 'U', 2, 0, ALPHA, A, 1, B, 1, BETA, C, 2 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 9 - CALL SSYMM( 'L', 'L', 2, 0, ALPHA, A, 2, B, 1, BETA, C, 1 ) + CALL SSYMM( 'L', 'L', 2, 0, ALPHA, A, 2, B, 1, BETA, C, 2 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 9 - CALL SSYMM( 'R', 'L', 2, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL SSYMM( 'R', 'L', 2, 0, ALPHA, A, 1, B, 1, BETA, C, 2 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 12 CALL SSYMM( 'L', 'U', 2, 0, ALPHA, A, 2, B, 2, BETA, C, 1 ) @@ -2660,7 +2711,6 @@ 50 CONTINUE END IF * - 60 CONTINUE LSERES = .TRUE. GO TO 80 70 CONTINUE diff --git a/test/zblat1.f b/test/zblat1.f index 8b4b8d21e..2d7b88490 100644 --- a/test/zblat1.f +++ b/test/zblat1.f @@ -1,7 +1,49 @@ +*> \brief \b ZBLAT1 +* +* =========== DOCUMENTATION =========== +* +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ +* +* Definition: +* =========== +* +* PROGRAM ZBLAT1 +* +* +*> \par Purpose: +* ============= +*> +*> \verbatim +*> +*> Test program for the COMPLEX*16 Level 1 BLAS. +*> +*> Based upon the original BLAS test routine together with: +*> F06GAF Example Program Text +*> \endverbatim +* +* Authors: +* ======== +* +*> \author Univ. of Tennessee +*> \author Univ. of California Berkeley +*> \author Univ. of Colorado Denver +*> \author NAG Ltd. +* +*> \date April 2012 +* +*> \ingroup complex16_blas_testing +* +* ===================================================================== PROGRAM ZBLAT1 -* Test program for the COMPLEX*16 Level 1 BLAS. -* Based upon the original BLAS test routine together with: -* F06GAF Example Program Text +* +* -- Reference BLAS test routine (version 3.7.0) -- +* -- Reference BLAS is a software package provided by Univ. of Tennessee, -- +* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- +* April 2012 +* +* ===================================================================== +* * .. Parameters .. INTEGER NOUT PARAMETER (NOUT=6) @@ -114,8 +156,8 @@ + (5.0D0,6.0D0), (5.0D0,6.0D0), (0.1D0,0.1D0), + (-0.6D0,0.1D0), (0.1D0,-0.3D0), (7.0D0,8.0D0), + (7.0D0,8.0D0), (7.0D0,8.0D0), (7.0D0,8.0D0), - + (7.0D0,8.0D0), (0.3D0,0.1D0), (0.1D0,0.4D0), - + (0.4D0,0.1D0), (0.1D0,0.2D0), (2.0D0,3.0D0), + + (7.0D0,8.0D0), (0.3D0,0.1D0), (0.5D0,0.0D0), + + (0.0D0,0.5D0), (0.0D0,0.2D0), (2.0D0,3.0D0), + (2.0D0,3.0D0), (2.0D0,3.0D0), (2.0D0,3.0D0)/ DATA ((CV(I,J,2),I=1,8),J=1,5)/(0.1D0,0.1D0), + (4.0D0,5.0D0), (4.0D0,5.0D0), (4.0D0,5.0D0), @@ -129,10 +171,10 @@ + (3.0D0,6.0D0), (-0.6D0,0.1D0), (4.0D0,7.0D0), + (0.1D0,-0.3D0), (7.0D0,2.0D0), (7.0D0,2.0D0), + (7.0D0,2.0D0), (0.3D0,0.1D0), (5.0D0,8.0D0), - + (0.1D0,0.4D0), (6.0D0,9.0D0), (0.4D0,0.1D0), - + (8.0D0,3.0D0), (0.1D0,0.2D0), (9.0D0,4.0D0)/ - DATA STRUE2/0.0D0, 0.5D0, 0.6D0, 0.7D0, 0.7D0/ - DATA STRUE4/0.0D0, 0.7D0, 1.0D0, 1.3D0, 1.7D0/ + + (0.5D0,0.0D0), (6.0D0,9.0D0), (0.0D0,0.5D0), + + (8.0D0,3.0D0), (0.0D0,0.2D0), (9.0D0,4.0D0)/ + DATA STRUE2/0.0D0, 0.5D0, 0.6D0, 0.7D0, 0.8D0/ + DATA STRUE4/0.0D0, 0.7D0, 1.0D0, 1.3D0, 1.6D0/ DATA ((CTRUE5(I,J,1),I=1,8),J=1,5)/(0.1D0,0.1D0), + (1.0D0,2.0D0), (1.0D0,2.0D0), (1.0D0,2.0D0), + (1.0D0,2.0D0), (1.0D0,2.0D0), (1.0D0,2.0D0), @@ -145,8 +187,8 @@ + (0.11D0,-0.03D0), (-0.17D0,0.46D0), + (-0.17D0,-0.19D0), (7.0D0,8.0D0), (7.0D0,8.0D0), + (7.0D0,8.0D0), (7.0D0,8.0D0), (7.0D0,8.0D0), - + (0.19D0,-0.17D0), (0.32D0,0.09D0), - + (0.23D0,-0.24D0), (0.18D0,0.01D0), + + (0.19D0,-0.17D0), (0.20D0,-0.35D0), + + (0.35D0,0.20D0), (0.14D0,0.08D0), + (2.0D0,3.0D0), (2.0D0,3.0D0), (2.0D0,3.0D0), + (2.0D0,3.0D0)/ DATA ((CTRUE5(I,J,2),I=1,8),J=1,5)/(0.1D0,0.1D0), @@ -162,9 +204,9 @@ + (-0.17D0,0.46D0), (4.0D0,7.0D0), + (-0.17D0,-0.19D0), (7.0D0,2.0D0), (7.0D0,2.0D0), + (7.0D0,2.0D0), (0.19D0,-0.17D0), (5.0D0,8.0D0), - + (0.32D0,0.09D0), (6.0D0,9.0D0), - + (0.23D0,-0.24D0), (8.0D0,3.0D0), - + (0.18D0,0.01D0), (9.0D0,4.0D0)/ + + (0.20D0,-0.35D0), (6.0D0,9.0D0), + + (0.35D0,0.20D0), (8.0D0,3.0D0), + + (0.14D0,0.08D0), (9.0D0,4.0D0)/ DATA ((CTRUE6(I,J,1),I=1,8),J=1,5)/(0.1D0,0.1D0), + (1.0D0,2.0D0), (1.0D0,2.0D0), (1.0D0,2.0D0), + (1.0D0,2.0D0), (1.0D0,2.0D0), (1.0D0,2.0D0), @@ -177,8 +219,8 @@ + (0.03D0,0.03D0), (-0.18D0,0.03D0), + (0.03D0,-0.09D0), (7.0D0,8.0D0), (7.0D0,8.0D0), + (7.0D0,8.0D0), (7.0D0,8.0D0), (7.0D0,8.0D0), - + (0.09D0,0.03D0), (0.03D0,0.12D0), - + (0.12D0,0.03D0), (0.03D0,0.06D0), (2.0D0,3.0D0), + + (0.09D0,0.03D0), (0.15D0,0.00D0), + + (0.00D0,0.15D0), (0.00D0,0.06D0), (2.0D0,3.0D0), + (2.0D0,3.0D0), (2.0D0,3.0D0), (2.0D0,3.0D0)/ DATA ((CTRUE6(I,J,2),I=1,8),J=1,5)/(0.1D0,0.1D0), + (4.0D0,5.0D0), (4.0D0,5.0D0), (4.0D0,5.0D0), @@ -193,8 +235,8 @@ + (-0.18D0,0.03D0), (4.0D0,7.0D0), + (0.03D0,-0.09D0), (7.0D0,2.0D0), (7.0D0,2.0D0), + (7.0D0,2.0D0), (0.09D0,0.03D0), (5.0D0,8.0D0), - + (0.03D0,0.12D0), (6.0D0,9.0D0), (0.12D0,0.03D0), - + (8.0D0,3.0D0), (0.03D0,0.06D0), (9.0D0,4.0D0)/ + + (0.15D0,0.00D0), (6.0D0,9.0D0), (0.00D0,0.15D0), + + (8.0D0,3.0D0), (0.00D0,0.06D0), (9.0D0,4.0D0)/ DATA ITRUE3/0, 1, 2, 2, 2/ * .. Executable Statements .. DO 60 INCX = 1, 2 @@ -529,7 +571,8 @@ * * .. Parameters .. INTEGER NOUT - PARAMETER (NOUT=6) + DOUBLE PRECISION ZERO + PARAMETER (NOUT=6, ZERO=0.0D0) * .. Scalar Arguments .. DOUBLE PRECISION SFAC INTEGER LEN @@ -552,7 +595,7 @@ * DO 40 I = 1, LEN SD = SCOMP(I) - STRUE(I) - IF (SDIFF(ABS(SSIZE(I))+ABS(SFAC*SD),ABS(SSIZE(I))).EQ.0.0D0) + IF (ABS(SFAC*SD) .LE. ABS(SSIZE(I))*EPSILON(ZERO)) + GO TO 40 * * HERE SCOMP(I) IS NOT CLOSE TO STRUE(I). diff --git a/test/zblat2.f b/test/zblat2.f index e65cdcc70..4a20ac567 100644 --- a/test/zblat2.f +++ b/test/zblat2.f @@ -1,68 +1,114 @@ +*> \brief \b ZBLAT2 +* +* =========== DOCUMENTATION =========== +* +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ +* +* Definition: +* =========== +* +* PROGRAM ZBLAT2 +* +* +*> \par Purpose: +* ============= +*> +*> \verbatim +*> +*> Test program for the COMPLEX*16 Level 2 Blas. +*> +*> The program must be driven by a short data file. The first 18 records +*> of the file are read using list-directed input, the last 17 records +*> are read using the format ( A6, L2 ). An annotated example of a data +*> file can be obtained by deleting the first 3 characters from the +*> following 35 lines: +*> 'zblat2.out' NAME OF SUMMARY OUTPUT FILE +*> 6 UNIT NUMBER OF SUMMARY FILE +*> 'CBLA2T.SNAP' NAME OF SNAPSHOT OUTPUT FILE +*> -1 UNIT NUMBER OF SNAPSHOT FILE (NOT USED IF .LT. 0) +*> F LOGICAL FLAG, T TO REWIND SNAPSHOT FILE AFTER EACH RECORD. +*> F LOGICAL FLAG, T TO STOP ON FAILURES. +*> T LOGICAL FLAG, T TO TEST ERROR EXITS. +*> 16.0 THRESHOLD VALUE OF TEST RATIO +*> 6 NUMBER OF VALUES OF N +*> 0 1 2 3 5 9 VALUES OF N +*> 4 NUMBER OF VALUES OF K +*> 0 1 2 4 VALUES OF K +*> 4 NUMBER OF VALUES OF INCX AND INCY +*> 1 2 -1 -2 VALUES OF INCX AND INCY +*> 3 NUMBER OF VALUES OF ALPHA +*> (0.0,0.0) (1.0,0.0) (0.7,-0.9) VALUES OF ALPHA +*> 3 NUMBER OF VALUES OF BETA +*> (0.0,0.0) (1.0,0.0) (1.3,-1.1) VALUES OF BETA +*> ZGEMV T PUT F FOR NO TEST. SAME COLUMNS. +*> ZGBMV T PUT F FOR NO TEST. SAME COLUMNS. +*> ZHEMV T PUT F FOR NO TEST. SAME COLUMNS. +*> ZHBMV T PUT F FOR NO TEST. SAME COLUMNS. +*> ZHPMV T PUT F FOR NO TEST. SAME COLUMNS. +*> ZTRMV T PUT F FOR NO TEST. SAME COLUMNS. +*> ZTBMV T PUT F FOR NO TEST. SAME COLUMNS. +*> ZTPMV T PUT F FOR NO TEST. SAME COLUMNS. +*> ZTRSV T PUT F FOR NO TEST. SAME COLUMNS. +*> ZTBSV T PUT F FOR NO TEST. SAME COLUMNS. +*> ZTPSV T PUT F FOR NO TEST. SAME COLUMNS. +*> ZGERC T PUT F FOR NO TEST. SAME COLUMNS. +*> ZGERU T PUT F FOR NO TEST. SAME COLUMNS. +*> ZHER T PUT F FOR NO TEST. SAME COLUMNS. +*> ZHPR T PUT F FOR NO TEST. SAME COLUMNS. +*> ZHER2 T PUT F FOR NO TEST. SAME COLUMNS. +*> ZHPR2 T PUT F FOR NO TEST. SAME COLUMNS. +*> +*> Further Details +*> =============== +*> +*> See: +*> +*> Dongarra J. J., Du Croz J. J., Hammarling S. and Hanson R. J.. +*> An extended set of Fortran Basic Linear Algebra Subprograms. +*> +*> Technical Memoranda Nos. 41 (revision 3) and 81, Mathematics +*> and Computer Science Division, Argonne National Laboratory, +*> 9700 South Cass Avenue, Argonne, Illinois 60439, US. +*> +*> Or +*> +*> NAG Technical Reports TR3/87 and TR4/87, Numerical Algorithms +*> Group Ltd., NAG Central Office, 256 Banbury Road, Oxford +*> OX2 7DE, UK, and Numerical Algorithms Group Inc., 1101 31st +*> Street, Suite 100, Downers Grove, Illinois 60515-1263, USA. +*> +*> +*> -- Written on 10-August-1987. +*> Richard Hanson, Sandia National Labs. +*> Jeremy Du Croz, NAG Central Office. +*> +*> 10-9-00: Change STATUS='NEW' to 'UNKNOWN' so that the testers +*> can be run multiple times without deleting generated +*> output files (susan) +*> \endverbatim +* +* Authors: +* ======== +* +*> \author Univ. of Tennessee +*> \author Univ. of California Berkeley +*> \author Univ. of Colorado Denver +*> \author NAG Ltd. +* +*> \date April 2012 +* +*> \ingroup complex16_blas_testing +* +* ===================================================================== PROGRAM ZBLAT2 * -* Test program for the COMPLEX*16 Level 2 Blas. -* -* The program must be driven by a short data file. The first 18 records -* of the file are read using list-directed input, the last 17 records -* are read using the format ( A6, L2 ). An annotated example of a data -* file can be obtained by deleting the first 3 characters from the -* following 35 lines: -* 'ZBLAT2.SUMM' NAME OF SUMMARY OUTPUT FILE -* 6 UNIT NUMBER OF SUMMARY FILE -* 'CBLA2T.SNAP' NAME OF SNAPSHOT OUTPUT FILE -* -1 UNIT NUMBER OF SNAPSHOT FILE (NOT USED IF .LT. 0) -* F LOGICAL FLAG, T TO REWIND SNAPSHOT FILE AFTER EACH RECORD. -* F LOGICAL FLAG, T TO STOP ON FAILURES. -* T LOGICAL FLAG, T TO TEST ERROR EXITS. -* 16.0 THRESHOLD VALUE OF TEST RATIO -* 6 NUMBER OF VALUES OF N -* 0 1 2 3 5 9 VALUES OF N -* 4 NUMBER OF VALUES OF K -* 0 1 2 4 VALUES OF K -* 4 NUMBER OF VALUES OF INCX AND INCY -* 1 2 -1 -2 VALUES OF INCX AND INCY -* 3 NUMBER OF VALUES OF ALPHA -* (0.0,0.0) (1.0,0.0) (0.7,-0.9) VALUES OF ALPHA -* 3 NUMBER OF VALUES OF BETA -* (0.0,0.0) (1.0,0.0) (1.3,-1.1) VALUES OF BETA -* ZGEMV T PUT F FOR NO TEST. SAME COLUMNS. -* ZGBMV T PUT F FOR NO TEST. SAME COLUMNS. -* ZHEMV T PUT F FOR NO TEST. SAME COLUMNS. -* ZHBMV T PUT F FOR NO TEST. SAME COLUMNS. -* ZHPMV T PUT F FOR NO TEST. SAME COLUMNS. -* ZTRMV T PUT F FOR NO TEST. SAME COLUMNS. -* ZTBMV T PUT F FOR NO TEST. SAME COLUMNS. -* ZTPMV T PUT F FOR NO TEST. SAME COLUMNS. -* ZTRSV T PUT F FOR NO TEST. SAME COLUMNS. -* ZTBSV T PUT F FOR NO TEST. SAME COLUMNS. -* ZTPSV T PUT F FOR NO TEST. SAME COLUMNS. -* ZGERC T PUT F FOR NO TEST. SAME COLUMNS. -* ZGERU T PUT F FOR NO TEST. SAME COLUMNS. -* ZHER T PUT F FOR NO TEST. SAME COLUMNS. -* ZHPR T PUT F FOR NO TEST. SAME COLUMNS. -* ZHER2 T PUT F FOR NO TEST. SAME COLUMNS. -* ZHPR2 T PUT F FOR NO TEST. SAME COLUMNS. -* -* See: -* -* Dongarra J. J., Du Croz J. J., Hammarling S. and Hanson R. J.. -* An extended set of Fortran Basic Linear Algebra Subprograms. -* -* Technical Memoranda Nos. 41 (revision 3) and 81, Mathematics -* and Computer Science Division, Argonne National Laboratory, -* 9700 South Cass Avenue, Argonne, Illinois 60439, US. -* -* Or -* -* NAG Technical Reports TR3/87 and TR4/87, Numerical Algorithms -* Group Ltd., NAG Central Office, 256 Banbury Road, Oxford -* OX2 7DE, UK, and Numerical Algorithms Group Inc., 1101 31st -* Street, Suite 100, Downers Grove, Illinois 60515-1263, USA. +* -- Reference BLAS test routine (version 3.7.0) -- +* -- Reference BLAS is a software package provided by Univ. of Tennessee, -- +* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- +* April 2012 * -* -* -- Written on 10-August-1987. -* Richard Hanson, Sandia National Labs. -* Jeremy Du Croz, NAG Central Office. +* ===================================================================== * * .. Parameters .. INTEGER NIN @@ -72,8 +118,8 @@ COMPLEX*16 ZERO, ONE PARAMETER ( ZERO = ( 0.0D0, 0.0D0 ), $ ONE = ( 1.0D0, 0.0D0 ) ) - DOUBLE PRECISION RZERO, RHALF, RONE - PARAMETER ( RZERO = 0.0D0, RHALF = 0.5D0, RONE = 1.0D0 ) + DOUBLE PRECISION RZERO + PARAMETER ( RZERO = 0.0D0 ) INTEGER NMAX, INCMAX PARAMETER ( NMAX = 65, INCMAX = 2 ) INTEGER NINMAX, NIDMAX, NKBMAX, NALMAX, NBEMAX @@ -127,7 +173,7 @@ * READ( NIN, FMT = * )SUMMRY READ( NIN, FMT = * )NOUT - OPEN( NOUT, FILE = SUMMRY, STATUS = 'NEW' ) + OPEN( NOUT, FILE = SUMMRY, STATUS = 'UNKNOWN' ) NOUTC = NOUT * * Read name and unit number for snapshot output file and open file. @@ -136,7 +182,7 @@ READ( NIN, FMT = * )NTRA TRACE = NTRA.GE.0 IF( TRACE )THEN - OPEN( NTRA, FILE = SNAPS, STATUS = 'NEW' ) + OPEN( NTRA, FILE = SNAPS, STATUS = 'UNKNOWN' ) END IF * Read the flag that directs rewinding of the snapshot file. READ( NIN, FMT = * )REWI @@ -241,14 +287,7 @@ * * Compute EPS (the machine precision). * - EPS = RONE - 90 CONTINUE - IF( DDIFF( RONE + EPS, RONE ).EQ.RZERO ) - $ GO TO 100 - EPS = RHALF*EPS - GO TO 90 - 100 CONTINUE - EPS = EPS + EPS + EPS = EPSILON(RZERO) WRITE( NOUT, FMT = 9998 )EPS * * Check the reliability of ZMVCH using exact data. @@ -3087,7 +3126,6 @@ 50 CONTINUE END IF * - 60 CONTINUE LZERES = .TRUE. GO TO 80 70 CONTINUE diff --git a/test/zblat3.f b/test/zblat3.f index f03b1a617..0e38334e9 100644 --- a/test/zblat3.f +++ b/test/zblat3.f @@ -1,50 +1,97 @@ +*> \brief \b ZBLAT3 +* +* =========== DOCUMENTATION =========== +* +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ +* +* Definition: +* =========== +* +* PROGRAM ZBLAT3 +* +* +*> \par Purpose: +* ============= +*> +*> \verbatim +*> +*> Test program for the COMPLEX*16 Level 3 Blas. +*> +*> The program must be driven by a short data file. The first 14 records +*> of the file are read using list-directed input, the last 9 records +*> are read using the format ( A6, L2 ). An annotated example of a data +*> file can be obtained by deleting the first 3 characters from the +*> following 23 lines: +*> 'zblat3.out' NAME OF SUMMARY OUTPUT FILE +*> 6 UNIT NUMBER OF SUMMARY FILE +*> 'ZBLAT3.SNAP' NAME OF SNAPSHOT OUTPUT FILE +*> -1 UNIT NUMBER OF SNAPSHOT FILE (NOT USED IF .LT. 0) +*> F LOGICAL FLAG, T TO REWIND SNAPSHOT FILE AFTER EACH RECORD. +*> F LOGICAL FLAG, T TO STOP ON FAILURES. +*> T LOGICAL FLAG, T TO TEST ERROR EXITS. +*> 16.0 THRESHOLD VALUE OF TEST RATIO +*> 6 NUMBER OF VALUES OF N +*> 0 1 2 3 5 9 VALUES OF N +*> 3 NUMBER OF VALUES OF ALPHA +*> (0.0,0.0) (1.0,0.0) (0.7,-0.9) VALUES OF ALPHA +*> 3 NUMBER OF VALUES OF BETA +*> (0.0,0.0) (1.0,0.0) (1.3,-1.1) VALUES OF BETA +*> ZGEMM T PUT F FOR NO TEST. SAME COLUMNS. +*> ZHEMM T PUT F FOR NO TEST. SAME COLUMNS. +*> ZSYMM T PUT F FOR NO TEST. SAME COLUMNS. +*> ZTRMM T PUT F FOR NO TEST. SAME COLUMNS. +*> ZTRSM T PUT F FOR NO TEST. SAME COLUMNS. +*> ZHERK T PUT F FOR NO TEST. SAME COLUMNS. +*> ZSYRK T PUT F FOR NO TEST. SAME COLUMNS. +*> ZHER2K T PUT F FOR NO TEST. SAME COLUMNS. +*> ZSYR2K T PUT F FOR NO TEST. SAME COLUMNS. +*> +*> +*> Further Details +*> =============== +*> +*> See: +*> +*> Dongarra J. J., Du Croz J. J., Duff I. S. and Hammarling S. +*> A Set of Level 3 Basic Linear Algebra Subprograms. +*> +*> Technical Memorandum No.88 (Revision 1), Mathematics and +*> Computer Science Division, Argonne National Laboratory, 9700 +*> South Cass Avenue, Argonne, Illinois 60439, US. +*> +*> -- Written on 8-February-1989. +*> Jack Dongarra, Argonne National Laboratory. +*> Iain Duff, AERE Harwell. +*> Jeremy Du Croz, Numerical Algorithms Group Ltd. +*> Sven Hammarling, Numerical Algorithms Group Ltd. +*> +*> 10-9-00: Change STATUS='NEW' to 'UNKNOWN' so that the testers +*> can be run multiple times without deleting generated +*> output files (susan) +*> \endverbatim +* +* Authors: +* ======== +* +*> \author Univ. of Tennessee +*> \author Univ. of California Berkeley +*> \author Univ. of Colorado Denver +*> \author NAG Ltd. +* +*> \date April 2012 +* +*> \ingroup complex16_blas_testing +* +* ===================================================================== PROGRAM ZBLAT3 * -* Test program for the COMPLEX*16 Level 3 Blas. -* -* The program must be driven by a short data file. The first 14 records -* of the file are read using list-directed input, the last 9 records -* are read using the format ( A6, L2 ). An annotated example of a data -* file can be obtained by deleting the first 3 characters from the -* following 23 lines: -* 'ZBLAT3.SUMM' NAME OF SUMMARY OUTPUT FILE -* 6 UNIT NUMBER OF SUMMARY FILE -* 'ZBLAT3.SNAP' NAME OF SNAPSHOT OUTPUT FILE -* -1 UNIT NUMBER OF SNAPSHOT FILE (NOT USED IF .LT. 0) -* F LOGICAL FLAG, T TO REWIND SNAPSHOT FILE AFTER EACH RECORD. -* F LOGICAL FLAG, T TO STOP ON FAILURES. -* T LOGICAL FLAG, T TO TEST ERROR EXITS. -* 16.0 THRESHOLD VALUE OF TEST RATIO -* 6 NUMBER OF VALUES OF N -* 0 1 2 3 5 9 VALUES OF N -* 3 NUMBER OF VALUES OF ALPHA -* (0.0,0.0) (1.0,0.0) (0.7,-0.9) VALUES OF ALPHA -* 3 NUMBER OF VALUES OF BETA -* (0.0,0.0) (1.0,0.0) (1.3,-1.1) VALUES OF BETA -* ZGEMM T PUT F FOR NO TEST. SAME COLUMNS. -* ZHEMM T PUT F FOR NO TEST. SAME COLUMNS. -* ZSYMM T PUT F FOR NO TEST. SAME COLUMNS. -* ZTRMM T PUT F FOR NO TEST. SAME COLUMNS. -* ZTRSM T PUT F FOR NO TEST. SAME COLUMNS. -* ZHERK T PUT F FOR NO TEST. SAME COLUMNS. -* ZSYRK T PUT F FOR NO TEST. SAME COLUMNS. -* ZHER2K T PUT F FOR NO TEST. SAME COLUMNS. -* ZSYR2K T PUT F FOR NO TEST. SAME COLUMNS. -* -* See: -* -* Dongarra J. J., Du Croz J. J., Duff I. S. and Hammarling S. -* A Set of Level 3 Basic Linear Algebra Subprograms. -* -* Technical Memorandum No.88 (Revision 1), Mathematics and -* Computer Science Division, Argonne National Laboratory, 9700 -* South Cass Avenue, Argonne, Illinois 60439, US. +* -- Reference BLAS test routine (version 3.7.0) -- +* -- Reference BLAS is a software package provided by Univ. of Tennessee, -- +* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- +* April 2012 * -* -- Written on 8-February-1989. -* Jack Dongarra, Argonne National Laboratory. -* Iain Duff, AERE Harwell. -* Jeremy Du Croz, Numerical Algorithms Group Ltd. -* Sven Hammarling, Numerical Algorithms Group Ltd. +* ===================================================================== * * .. Parameters .. INTEGER NIN @@ -54,8 +101,8 @@ COMPLEX*16 ZERO, ONE PARAMETER ( ZERO = ( 0.0D0, 0.0D0 ), $ ONE = ( 1.0D0, 0.0D0 ) ) - DOUBLE PRECISION RZERO, RHALF, RONE - PARAMETER ( RZERO = 0.0D0, RHALF = 0.5D0, RONE = 1.0D0 ) + DOUBLE PRECISION RZERO + PARAMETER ( RZERO = 0.0D0 ) INTEGER NMAX PARAMETER ( NMAX = 65 ) INTEGER NIDMAX, NALMAX, NBEMAX @@ -104,7 +151,7 @@ * READ( NIN, FMT = * )SUMMRY READ( NIN, FMT = * )NOUT - OPEN( NOUT, FILE = SUMMRY, STATUS = 'NEW' ) + OPEN( NOUT, FILE = SUMMRY, STATUS = 'UNKNOWN' ) NOUTC = NOUT * * Read name and unit number for snapshot output file and open file. @@ -113,7 +160,7 @@ READ( NIN, FMT = * )NTRA TRACE = NTRA.GE.0 IF( TRACE )THEN - OPEN( NTRA, FILE = SNAPS, STATUS = 'NEW' ) + OPEN( NTRA, FILE = SNAPS, STATUS = 'UNKNOWN' ) END IF * Read the flag that directs rewinding of the snapshot file. READ( NIN, FMT = * )REWI @@ -190,14 +237,7 @@ * * Compute EPS (the machine precision). * - EPS = RONE - 70 CONTINUE - IF( DDIFF( RONE + EPS, RONE ).EQ.RZERO ) - $ GO TO 80 - EPS = RHALF*EPS - GO TO 70 - 80 CONTINUE - EPS = EPS + EPS + EPS = EPSILON(RZERO) WRITE( NOUT, FMT = 9998 )EPS * * Check the reliability of ZMMCH using exact data. @@ -1303,8 +1343,6 @@ NC = 0 RESET = .TRUE. ERRMAX = RZERO - RALS = RONE - RBETS = RONE * DO 100 IN = 1, NIDIM N = IDIM( IN ) @@ -1951,7 +1989,7 @@ * * Tests the error exits from the Level 3 Blas. * Requires a special version of the error-handling routine XERBLA. -* ALPHA, RALPHA, BETA, RBETA, A, B and C should not need to be defined. +* A, B and C should not need to be defined. * * Auxiliary routine for test program for Level 3 Blas. * @@ -1961,12 +1999,20 @@ * Jeremy Du Croz, Numerical Algorithms Group Ltd. * Sven Hammarling, Numerical Algorithms Group Ltd. * +* 3-19-92: Initialize ALPHA, BETA, RALPHA, and RBETA (eca) +* 3-19-92: Fix argument 12 in calls to ZSYMM and ZHEMM +* with INFOT = 9 (eca) +* 10-9-00: Declared INTRINSIC DCMPLX (susan) +* * .. Scalar Arguments .. INTEGER ISNUM, NOUT CHARACTER*6 SRNAMT * .. Scalars in Common .. INTEGER INFOT, NOUTC LOGICAL LERR, OK +* .. Parameters .. + REAL ONE, TWO + PARAMETER ( ONE = 1.0D0, TWO = 2.0D0 ) * .. Local Scalars .. COMPLEX*16 ALPHA, BETA DOUBLE PRECISION RALPHA, RBETA @@ -1975,6 +2021,8 @@ * .. External Subroutines .. EXTERNAL ZGEMM, ZHEMM, ZHER2K, ZHERK, CHKXER, ZSYMM, $ ZSYR2K, ZSYRK, ZTRMM, ZTRSM +* .. Intrinsic Functions .. + INTRINSIC DCMPLX * .. Common blocks .. COMMON /INFOC/INFOT, NOUTC, OK, LERR * .. Executable Statements .. @@ -1984,6 +2032,14 @@ * LERR is set to .TRUE. by the special version of XERBLA each time * it is called, and is then tested and re-set by CHKXER. LERR = .FALSE. +* +* Initialize ALPHA, BETA, RALPHA, and RBETA. +* + ALPHA = DCMPLX( ONE, -ONE ) + BETA = DCMPLX( TWO, -TWO ) + RALPHA = ONE + RBETA = TWO +* GO TO ( 10, 20, 30, 40, 50, 60, 70, 80, $ 90 )ISNUM 10 INFOT = 1 @@ -2210,16 +2266,16 @@ CALL ZHEMM( 'R', 'L', 0, 2, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 9 - CALL ZHEMM( 'L', 'U', 2, 0, ALPHA, A, 2, B, 1, BETA, C, 1 ) + CALL ZHEMM( 'L', 'U', 2, 0, ALPHA, A, 2, B, 1, BETA, C, 2 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 9 - CALL ZHEMM( 'R', 'U', 2, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL ZHEMM( 'R', 'U', 2, 0, ALPHA, A, 1, B, 1, BETA, C, 2 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 9 - CALL ZHEMM( 'L', 'L', 2, 0, ALPHA, A, 2, B, 1, BETA, C, 1 ) + CALL ZHEMM( 'L', 'L', 2, 0, ALPHA, A, 2, B, 1, BETA, C, 2 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 9 - CALL ZHEMM( 'R', 'L', 2, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL ZHEMM( 'R', 'L', 2, 0, ALPHA, A, 1, B, 1, BETA, C, 2 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 12 CALL ZHEMM( 'L', 'U', 2, 0, ALPHA, A, 2, B, 2, BETA, C, 1 ) @@ -2277,16 +2333,16 @@ CALL ZSYMM( 'R', 'L', 0, 2, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 9 - CALL ZSYMM( 'L', 'U', 2, 0, ALPHA, A, 2, B, 1, BETA, C, 1 ) + CALL ZSYMM( 'L', 'U', 2, 0, ALPHA, A, 2, B, 1, BETA, C, 2 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 9 - CALL ZSYMM( 'R', 'U', 2, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL ZSYMM( 'R', 'U', 2, 0, ALPHA, A, 1, B, 1, BETA, C, 2 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 9 - CALL ZSYMM( 'L', 'L', 2, 0, ALPHA, A, 2, B, 1, BETA, C, 1 ) + CALL ZSYMM( 'L', 'L', 2, 0, ALPHA, A, 2, B, 1, BETA, C, 2 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 9 - CALL ZSYMM( 'R', 'L', 2, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL ZSYMM( 'R', 'L', 2, 0, ALPHA, A, 1, B, 1, BETA, C, 2 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 12 CALL ZSYMM( 'L', 'U', 2, 0, ALPHA, A, 2, B, 2, BETA, C, 1 ) @@ -3276,7 +3332,6 @@ 50 CONTINUE END IF * - 60 CONTINUE LZERES = .TRUE. GO TO 80 70 CONTINUE diff --git a/utest/CMakeLists.txt b/utest/CMakeLists.txt index dc306501f..0c99e0d12 100644 --- a/utest/CMakeLists.txt +++ b/utest/CMakeLists.txt @@ -6,7 +6,9 @@ if (MSVC AND "${CMAKE_C_COMPILER_ID}" MATCHES Clang) else () set(OpenBLAS_utest_src utest_main.c + test_min.c test_amax.c + test_ismin.c test_rotmg.c test_rot.c test_axpy.c @@ -25,13 +27,17 @@ endif () # known to hang with the native Windows and Android threads # FIXME needs checking if this works on any of the other platforms -if (NOT USE_OPENMP) if (OS_CYGWIN_NT OR OS_LINUX) +if (NOT USE_OPENMP) set(OpenBLAS_utest_src ${OpenBLAS_utest_src} test_fork.c ) endif() +set(OpenBLAS_utest_src + ${OpenBLAS_utest_src} + test_post_fork.c + ) endif() if (NOT NO_LAPACK) @@ -39,6 +45,12 @@ set(OpenBLAS_utest_src ${OpenBLAS_utest_src} test_potrs.c ) +if (NOT NO_CBLAS AND NOT NO_LAPACKE) +set(OpenBLAS_utest_src + ${OpenBLAS_utest_src} + test_kernel_regress.c + ) +endif() endif() set(OpenBLAS_utest_bin openblas_utest) @@ -46,7 +58,7 @@ add_executable(${OpenBLAS_utest_bin} ${OpenBLAS_utest_src}) target_link_libraries(${OpenBLAS_utest_bin} ${OpenBLAS_LIBNAME}) -if(${CMAKE_SYSTEM_NAME} MATCHES "Linux") +if(${CMAKE_SYSTEM_NAME} MATCHES "Linux" OR ${CMAKE_SYSTEM_NAME} MATCHES "FreeBSD") target_link_libraries(${OpenBLAS_utest_bin} m) endif() diff --git a/utest/Makefile b/utest/Makefile index 550a65569..fad3607de 100644 --- a/utest/Makefile +++ b/utest/Makefile @@ -1,6 +1,9 @@ UTEST_CHECK = 1 TOPDIR = .. +override TARGET_ARCH= +override TARGET_MACH= + UTESTBIN=openblas_utest .PHONY : all @@ -8,19 +11,35 @@ UTESTBIN=openblas_utest include $(TOPDIR)/Makefile.system -OBJS=utest_main.o test_amax.o test_rotmg.o test_axpy.o test_dotu.o test_dsdot.o test_swap.o test_rot.o +OBJS=utest_main.o test_min.o test_amax.o test_ismin.o test_rotmg.o test_axpy.o test_dotu.o test_dsdot.o test_swap.o test_rot.o #test_rot.o test_swap.o test_axpy.o test_dotu.o test_dsdot.o test_fork.o ifneq ($(NO_LAPACK), 1) OBJS += test_potrs.o +ifneq ($(NO_CBLAS), 1) +ifneq ($(NO_LAPACKE), 1) +OBJS += test_kernel_regress.o +endif +endif endif #this does not work with OpenMP nor with native Windows or Android threads # FIXME TBD if this works on OSX, SunOS, POWER and zarch -ifndef USE_OPENMP ifeq ($(OSNAME), $(filter $(OSNAME),Linux CYGWIN_NT)) +ifneq ($(USE_OPENMP), 1) OBJS += test_fork.o endif +OBJS += test_post_fork.o +endif + +ifeq ($(C_COMPILER), PGI) +OBJS = utest_main2.o +endif +ifeq ($(C_COMPILER), SUN) +OBJS = utest_main2.o +endif +ifeq ($(OSNAME), AIX) +OBJS = utest_main2.o endif all : run_test @@ -29,7 +48,7 @@ $(UTESTBIN): $(OBJS) $(CC) $(CFLAGS) $(LDFLAGS) -o $@ $^ ../$(LIBNAME) $(EXTRALIB) $(FEXTRALIB) run_test: $(UTESTBIN) -ifndef CROSS +ifneq ($(CROSS), 1) ./$(UTESTBIN) endif diff --git a/utest/test_amax.c b/utest/test_amax.c index 411598410..a9e5a1c85 100644 --- a/utest/test_amax.c +++ b/utest/test_amax.c @@ -33,6 +33,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "openblas_utest.h" +#ifdef BUILD_SINGLE CTEST(amax, samax){ blasint N=3, inc=1; float te_max=0.0, tr_max=0.0; @@ -40,6 +41,20 @@ CTEST(amax, samax){ te_max=BLASFUNC(samax)(&N, x, &inc); tr_max=3.3; - + ASSERT_DBL_NEAR_TOL((double)(tr_max), (double)(te_max), SINGLE_EPS); } +#endif +#ifdef BUILD_DOUBLE +CTEST(amax, damax){ + blasint N=3, inc=1; + double te_max=0.0, tr_max=0.0; + double x[]={-1.1, 2.2, -3.3}; + + te_max=BLASFUNC(damax)(&N, x, &inc); + tr_max=3.3; + + ASSERT_DBL_NEAR_TOL((double)(tr_max), (double)(te_max), DOUBLE_EPS); +} +#endif + diff --git a/utest/test_axpy.c b/utest/test_axpy.c index 603043073..5fd7c1b04 100644 --- a/utest/test_axpy.c +++ b/utest/test_axpy.c @@ -33,6 +33,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "openblas_utest.h" +#ifdef BUILD_DOUBLE CTEST(axpy,daxpy_inc_0) { blasint i; @@ -52,7 +53,9 @@ CTEST(axpy,daxpy_inc_0) ASSERT_DBL_NEAR_TOL(y2[i], y1[i], DOUBLE_EPS); } } +#endif +#ifdef BUILD_COMPLEX16 CTEST(axpy,zaxpy_inc_0) { blasint i; @@ -71,7 +74,9 @@ CTEST(axpy,zaxpy_inc_0) ASSERT_DBL_NEAR_TOL(y2[i], y1[i], DOUBLE_EPS); } } +#endif +#ifdef BUILD_SINGLE CTEST(axpy,saxpy_inc_0) { blasint i; @@ -90,7 +95,9 @@ CTEST(axpy,saxpy_inc_0) ASSERT_DBL_NEAR_TOL(y2[i], y1[i], DOUBLE_EPS); } } +#endif +#ifdef BUILD_COMPLEX CTEST(axpy,caxpy_inc_0) { blasint i; @@ -109,3 +116,5 @@ CTEST(axpy,caxpy_inc_0) ASSERT_DBL_NEAR_TOL(y2[i], y1[i], DOUBLE_EPS); } } +#endif + diff --git a/utest/test_dotu.c b/utest/test_dotu.c index 918541848..542286403 100644 --- a/utest/test_dotu.c +++ b/utest/test_dotu.c @@ -33,6 +33,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "openblas_utest.h" +#ifdef BUILD_COMPLEX16 CTEST( zdotu,zdotu_n_1) { blasint N=1,incX=1,incY=1; @@ -80,3 +81,5 @@ CTEST(zdotu, zdotu_offset_1) #endif } +#endif + diff --git a/utest/test_dsdot.c b/utest/test_dsdot.c index d58b398a8..adef4e91c 100644 --- a/utest/test_dsdot.c +++ b/utest/test_dsdot.c @@ -32,7 +32,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. **********************************************************************************/ #include "openblas_utest.h" - +#if defined(BUILD_SINGLE) && defined(BUILD_DOUBLE) CTEST(dsdot,dsdot_n_1) { float x= 0.172555164F; @@ -47,3 +47,4 @@ CTEST(dsdot,dsdot_n_1) ASSERT_DBL_NEAR_TOL(res2, res1, DOUBLE_EPS); } +#endif diff --git a/utest/test_fork.c b/utest/test_fork.c index 0b90407b1..bd531e7fb 100644 --- a/utest/test_fork.c +++ b/utest/test_fork.c @@ -36,7 +36,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include #include "openblas_utest.h" -void* xmalloc(size_t n) +static void* xmalloc(size_t n) { void* tmp; tmp = malloc(n); @@ -48,7 +48,8 @@ void* xmalloc(size_t n) } } -void check_dgemm(double *a, double *b, double *result, double *expected, blasint n) +#ifdef BUILD_DOUBLE +static void check_dgemm(double *a, double *b, double *result, double *expected, blasint n) { char trans1 = 'T'; char trans2 = 'N'; @@ -59,9 +60,13 @@ void check_dgemm(double *a, double *b, double *result, double *expected, blasint ASSERT_DBL_NEAR_TOL(expected[i], result[i], DOUBLE_EPS); } } +#endif CTEST(fork, safety) { +#ifndef BUILD_DOUBLE +exit(0); +#else blasint n = 1000; int i; @@ -124,4 +129,5 @@ CTEST(fork, safety) ASSERT_EQUAL(wait_pid, fork_pid); ASSERT_EQUAL(0, WEXITSTATUS (child_status)); } +#endif } diff --git a/utest/test_ismin.c b/utest/test_ismin.c new file mode 100644 index 000000000..af597807f --- /dev/null +++ b/utest/test_ismin.c @@ -0,0 +1,91 @@ +/***************************************************************************** +Copyright (c) 2020, The OpenBLAS Project +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + 3. Neither the name of the OpenBLAS project nor the names of + its contributors may be used to endorse or promote products + derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +**********************************************************************************/ + +#include "openblas_utest.h" + +#define ELEMENTS 50 +#define INCREMENT 2 + +#ifdef BUILD_SINGLE +CTEST(ismin, positive_step_2){ + blasint i; + blasint N = ELEMENTS, inc = INCREMENT; + float x[ELEMENTS * INCREMENT]; + for (i = 0; i < N * inc; i ++) { + x[i] = i + 1000; + } + + x[8 * inc] = 0; + blasint index = BLASFUNC(ismin)(&N, x, &inc); + ASSERT_EQUAL(9, index); +} + +CTEST(ismin, negative_step_2){ + blasint i; + blasint N = ELEMENTS, inc = INCREMENT; + float x[ELEMENTS * INCREMENT]; + for (i = 0; i < N * inc; i ++) { + x[i] = - i - 1000; + } + + x[8 * inc] = -123456.0f; + blasint index = BLASFUNC(ismin)(&N, x, &inc); + ASSERT_EQUAL(9, index); +} + +CTEST(ismax, positive_step_2){ + blasint i; + blasint N = ELEMENTS, inc = INCREMENT; + float x[ELEMENTS * INCREMENT]; + for (i = 0; i < N * inc; i ++) { + x[i] = i + 1000; + } + + x[8 * inc] = 123456.0f; + blasint index = BLASFUNC(ismax)(&N, x, &inc); + ASSERT_EQUAL(9, index); +} + +CTEST(ismax, negative_step_2){ + blasint i; + blasint N = ELEMENTS, inc = INCREMENT; + float x[ELEMENTS * INCREMENT]; + for (i = 0; i < N * inc; i ++) { + x[i] = - i - 1000; + } + + x[8 * inc] = 0; + blasint index = BLASFUNC(ismax)(&N, x, &inc); + ASSERT_EQUAL(9, index); +} +#endif diff --git a/utest/test_kernel_regress.c b/utest/test_kernel_regress.c new file mode 100644 index 000000000..5b131bb2c --- /dev/null +++ b/utest/test_kernel_regress.c @@ -0,0 +1,52 @@ +#include "openblas_utest.h" +#include +#include +#include + +#define LAPACK_ROW_MAJOR 101 +blasint LAPACKE_dgesvd( blasint matrix_layout, char jobu, char jobvt, + blasint m, blasint n, double* a, + blasint lda, double* s, double* u, blasint ldu, + double* vt, blasint ldvt, double* superb ); + + +#define DATASIZE 100 + +double s[DATASIZE]; +double u[DATASIZE*DATASIZE]; +double vt[DATASIZE*DATASIZE]; +double X[DATASIZE*DATASIZE]; +double superb[DATASIZE]; +double tmp[DATASIZE*DATASIZE]; +double m[DATASIZE*DATASIZE]; + +CTEST(kernel_regress,skx_avx) +{ +#ifdef BUILD_DOUBLE + double norm; + int i, j, info; + srand(0); + for (i = 0; i < DATASIZE*DATASIZE; i++) { + m[i] = (rand()+0.0)/RAND_MAX * 10; + tmp[i] = m[i]; + } + + info = LAPACKE_dgesvd( LAPACK_ROW_MAJOR, 'A', 'A', DATASIZE, DATASIZE, m, DATASIZE, + s, u, DATASIZE, vt, DATASIZE, superb); + + for (i = 0; i < DATASIZE; i++) { + for (j = 0; j < DATASIZE; j++) { + u[i*DATASIZE+j] = u[i*DATASIZE+j]*s[j]; + } + } + cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, + DATASIZE, DATASIZE, DATASIZE, 1, u, DATASIZE, vt, DATASIZE, 0, X, DATASIZE); + + for (i = 0; i < DATASIZE*DATASIZE; i++) { + X[i] = X[i] - tmp[i]; + } + + norm = cblas_dnrm2(DATASIZE*DATASIZE, X, 1); + ASSERT_DBL_NEAR_TOL(0.0, norm, 1e-10); +#endif +} diff --git a/utest/test_min.c b/utest/test_min.c new file mode 100644 index 000000000..a627674ae --- /dev/null +++ b/utest/test_min.c @@ -0,0 +1,109 @@ +/***************************************************************************** +Copyright (c) 2011-2016, The OpenBLAS Project +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + 3. Neither the name of the OpenBLAS project nor the names of + its contributors may be used to endorse or promote products + derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +**********************************************************************************/ + +#include "openblas_utest.h" +#ifdef BUILD_SINGLE +CTEST(min, smin_negative){ + blasint N=3, inc=1; + float te_min=0.0, tr_min=0.0; + float x[]={-1.1, -2.2, -3.3}; + + te_min=BLASFUNC(smin)(&N, x, &inc); + tr_min=-3.3; + + ASSERT_DBL_NEAR_TOL((double)(tr_min), (double)(te_min), SINGLE_EPS); +} +#endif + +#ifdef BUILD_DOUBLE +CTEST(min, dmin_positive){ + blasint N=3, inc=1; + double te_min=0.0, tr_min=0.0; + double x[]={1.1, 0.0, 3.3}; + + te_min=BLASFUNC(dmin)(&N, x, &inc); + tr_min=0.0; + + ASSERT_DBL_NEAR_TOL((double)(tr_min), (double)(te_min), DOUBLE_EPS); +} +#endif + +#ifdef BUILD_SINGLE +CTEST(min, smin_zero){ + blasint N=3, inc=1; + float te_min=0.0, tr_min=0.0; + float x[]={1.1, 2.2, 0.0}; + + te_min=BLASFUNC(smin)(&N, x, &inc); + tr_min=0.0; + + ASSERT_DBL_NEAR_TOL((double)(tr_min), (double)(te_min), SINGLE_EPS); +} + +CTEST(max, smax_negative){ + blasint N=3, inc=1; + float te_max=0.0, tr_max=0.0; + float x[]={-1.1, -2.2, -3.3}; + + te_max=BLASFUNC(smax)(&N, x, &inc); + tr_max=-1.1; + + ASSERT_DBL_NEAR_TOL((double)(tr_max), (double)(te_max), SINGLE_EPS); +} +#endif + +#ifdef BUILD_DOUBLE +CTEST(max, dmax_positive){ + blasint N=3, inc=1; + double te_max=0.0, tr_max=0.0; + double x[]={1.1, 0.0, 3.3}; + + te_max=BLASFUNC(dmax)(&N, x, &inc); + tr_max=3.3; + + ASSERT_DBL_NEAR_TOL((double)(tr_max), (double)(te_max), DOUBLE_EPS); +} +#endif +#ifdef BUILD_SINGLE +CTEST(max, smax_zero){ + blasint N=3, inc=1; + float te_max=0.0, tr_max=0.0; + float x[]={-1.1, -2.2, 0.0}; + + te_max=BLASFUNC(smax)(&N, x, &inc); + tr_max=0.0; + + ASSERT_DBL_NEAR_TOL((double)(tr_max), (double)(te_max), SINGLE_EPS); +} +#endif + diff --git a/utest/test_post_fork.c b/utest/test_post_fork.c new file mode 100644 index 000000000..9370a02ce --- /dev/null +++ b/utest/test_post_fork.c @@ -0,0 +1,131 @@ +/***************************************************************************** +Copyright (c) 2011-2020, The OpenBLAS Project +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + 3. Neither the name of the OpenBLAS project nor the names of + its contributors may be used to endorse or promote products + derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +**********************************************************************************/ + +#include +#include +#include +#ifdef USE_OPENMP +#include +#endif +#include "openblas_utest.h" + +static void* xmalloc(size_t n) +{ + void* tmp; + tmp = malloc(n); + if (tmp == NULL) { + fprintf(stderr, "You are about to die\n"); + exit(1); + } else { + return tmp; + } +} + +#ifdef BUILD_DOUBLE +static void check_dgemm(double *a, double *b, double *result, double *expected, blasint n) +{ + char trans1 = 'T'; + char trans2 = 'N'; + double zerod = 0, oned = 1; + int i; + BLASFUNC(dgemm)(&trans1, &trans2, &n, &n, &n, &oned, a, &n, b, &n, &zerod, result, &n); + for(i = 0; i < n * n; ++i) { + ASSERT_DBL_NEAR_TOL(expected[i], result[i], DOUBLE_EPS); + } +} +#endif + +CTEST(fork, safety_after_fork_in_parent) +{ +#ifndef BUILD_DOUBLE +exit(0); +#else + blasint n = 100; + int i, nthreads_omp; + + double *a, *b, *c, *d; + size_t n_bytes; + + pid_t fork_pid; + + n_bytes = sizeof(*a) * n * n; + + a = xmalloc(n_bytes); + b = xmalloc(n_bytes); + c = xmalloc(n_bytes); + d = xmalloc(n_bytes); + + // Put ones in a, b and n in c (result) + for(i = 0; i < n * n; ++i) { + a[i] = 1; + b[i] = 1; + c[i] = 1 * n; + } + + // Test that OpenBLAS works after a fork. + // This situation routinely happens with Pythons numpy where a + // `sys.platform` calls `uname` in a forked process. + // So we simulate this situation here. + + // There was an issue where a different number of OpenBLAS and OpenMP + // threads triggered a memory leak. So run this multiple times + // with different number of threads set. +#ifdef USE_OPENMP + nthreads_omp = omp_get_max_threads(); + // Run with half the max OMP threads, the max threads and twice that + for(i = (nthreads_omp + 1) / 2; i <= nthreads_omp * 2; i *= 2) { + omp_set_num_threads(i); +#endif + + fork_pid = fork(); + if (fork_pid == -1) { + CTEST_ERR("Failed to fork process."); + } else if (fork_pid == 0) { + // Just pretend to do something, e.g. call `uname`, then exit + exit(0); + } else { + // Wait for the child to finish and check the exit code. + int child_status = 0; + pid_t wait_pid = wait(&child_status); + ASSERT_EQUAL(wait_pid, fork_pid); + ASSERT_EQUAL(0, WEXITSTATUS (child_status)); + + // Now OpenBLAS has to work + check_dgemm(a, b, d, c, n); + } +#ifdef USE_OPENMP + } +#endif + +#endif +} diff --git a/utest/test_potrs.c b/utest/test_potrs.c index 7afeb4c9d..f39287d6f 100644 --- a/utest/test_potrs.c +++ b/utest/test_potrs.c @@ -39,7 +39,6 @@ void BLASFUNC(zpotrs_(char*, BLASINT*, BLASINT*, complex double*, BLASINT*, complex double*, BLASINT*, BLASINT*); */ - //https://github.com/xianyi/OpenBLAS/issues/695 CTEST(potrf, bug_695){ @@ -151,8 +150,10 @@ CTEST(potrf, bug_695){ blasint n=10; blasint info[1]; +#ifdef BUILD_COMPLEX BLASFUNC(cpotrf)(&up, &n, (float*)(A1), &n, info); //printf("%g+%g*I\n", creal(A1[91]), cimag(A1[91])); +#endif openblas_complex_double A2[100] = { @@ -282,8 +283,9 @@ CTEST(potrf, bug_695){ }; char lo = 'L'; blasint nrhs = 2; +#ifdef BUILD_COMPLEX16 BLASFUNC(zpotrs)(&lo, &n, &nrhs, (double*)(A2), &n, (double*)(B), &n, info); - +#endif // note that this is exactly equal to A1 openblas_complex_float A3[100] = { @@ -388,14 +390,15 @@ CTEST(potrf, bug_695){ openblas_make_complex_float(-0.9617417, -1.2486815), openblas_make_complex_float(3.4629636, +0.0) }; +#ifdef BUILD_COMPLEX BLASFUNC(cpotrf)(&up, &n, (float*)(A3), &n, info); // printf("%g+%g*I\n", creal(A3[91]), cimag(A3[91])); if(isnan(CREAL(A3[91])) || isnan(CIMAG(A3[91]))) { CTEST_ERR("%s:%d got NaN", __FILE__, __LINE__); } +#endif } - // Check potrf factorizes a small problem correctly CTEST(potrf, smoketest_trivial){ float A1s[4] = {2, 0.3, 0.3, 3}; @@ -439,31 +442,43 @@ CTEST(potrf, smoketest_trivial){ uplo = 'U'; } +#ifdef BUILD_SINGLE BLASFUNC(scopy)(&nv, A1s, &inc, As, &inc); +#endif +#ifdef BUILD_DOUBLE BLASFUNC(dcopy)(&nv, A1d, &inc, Ad, &inc); +#endif +#ifdef BUILD_COMPLEX BLASFUNC(ccopy)(&nv, (float *)A1c, &inc, (float *)Ac, &inc); +#endif +#ifdef BUILD_COMPLEX16 BLASFUNC(zcopy)(&nv, (double *)A1z, &inc, (double *)Az, &inc); +#endif +#ifdef BUILD_SINGLE BLASFUNC(spotrf)(&uplo, &n, As, &n, &info); if (info != 0) { CTEST_ERR("%s:%d info != 0", __FILE__, __LINE__); } - +#endif +#ifdef BUILD_DOUBLE BLASFUNC(dpotrf)(&uplo, &n, Ad, &n, &info); if (info != 0) { CTEST_ERR("%s:%d info != 0", __FILE__, __LINE__); } - +#endif +#ifdef BUILD_COMPLEX BLASFUNC(cpotrf)(&uplo, &n, (float *)Ac, &n, &info); if (info != 0) { CTEST_ERR("%s:%d info != 0", __FILE__, __LINE__); } - +#endif +#ifdef BUILD_COMPLEX16 BLASFUNC(zpotrf)(&uplo, &n, (double *)Az, &n, &info); if (info != 0) { CTEST_ERR("%s:%d info != 0", __FILE__, __LINE__); } - +#endif /* Fill the other triangle */ if (uplo == 'L') { for (i = 0; i < n; ++i) { @@ -495,29 +510,39 @@ CTEST(potrf, smoketest_trivial){ trans1 = 'C'; trans2 = 'N'; } - +#ifdef BUILD_SINGLE BLASFUNC(sgemm)(&trans1, &trans2, &n, &n, &n, &ones, As, &n, As, &n, &zeros, Bs, &n); +#endif +#ifdef BUILD_DOUBLE BLASFUNC(dgemm)(&trans1, &trans2, &n, &n, &n, &oned, Ad, &n, Ad, &n, &zerod, Bd, &n); +#endif +#ifdef BUILD_COMPLEX BLASFUNC(cgemm)(&trans1, &trans2, &n, &n, &n, (float *)&onec, (float *)Ac, &n, (float *)Ac, &n, (float *)&zeroc, (float *)Bc, &n); +#endif +#ifdef BUILD_COMPLEX16 BLASFUNC(zgemm)(&trans1, &trans2, &n, &n, &n, (double *)&onez, (double *)Az, &n, (double *)Az, &n, (double *)&zeroz, (double *)Bz, &n); - +#endif /* Check result is close to original */ for (i = 0; i < n; ++i) { for (j = 0; j < n; ++j) { double err; +#ifdef BUILD_SINGLE err = fabs(A1s[i+n*j] - Bs[i+n*j]); if (err > 1e-5) { CTEST_ERR("%s:%d %c s(%d,%d) difference: %g", __FILE__, __LINE__, uplo, i, j, err); } - +#endif +#ifdef BUILD_DOUBLE err = fabs(A1d[i+n*j] - Bd[i+n*j]); if (err > 1e-12) { CTEST_ERR("%s:%d %c d(%d,%d) difference: %g", __FILE__, __LINE__, uplo, i, j, err); } +#endif +#ifdef BUILD_COMPLEX #ifdef OPENBLAS_COMPLEX_C99 err = cabsf(A1c[i+n*j] - Bc[i+n*j]); #else @@ -527,7 +552,9 @@ CTEST(potrf, smoketest_trivial){ if (err > 1e-5) { CTEST_ERR("%s:%d %c c(%d,%d) difference: %g", __FILE__, __LINE__, uplo, i, j, err); } +#endif +#ifdef BUILD_COMPLEX16 #ifdef OPENBLAS_COMPLEX_C99 err = cabs(A1z[i+n*j] - Bz[i+n*j]); #else @@ -537,6 +564,7 @@ CTEST(potrf, smoketest_trivial){ if (err > 1e-12) { CTEST_ERR("%s:%d %c z(%d,%d) difference: %g", __FILE__, __LINE__, uplo, i, j, err); } +#endif } } } diff --git a/utest/test_rot.c b/utest/test_rot.c index cf72ad22d..0e74ecbb3 100644 --- a/utest/test_rot.c +++ b/utest/test_rot.c @@ -33,6 +33,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "openblas_utest.h" +#ifdef BUILD_DOUBLE CTEST(rot,drot_inc_0) { blasint i=0; @@ -52,7 +53,9 @@ CTEST(rot,drot_inc_0) ASSERT_DBL_NEAR_TOL(y2[i], y1[i], DOUBLE_EPS); } } +#endif +#ifdef BUILD_COMPLEX16 CTEST(rot,zdrot_inc_0) { blasint i=0; @@ -72,7 +75,9 @@ CTEST(rot,zdrot_inc_0) ASSERT_DBL_NEAR_TOL(y2[i], y1[i], DOUBLE_EPS); } } +#endif +#ifdef BUILD_SINGLE CTEST(rot,srot_inc_0) { blasint i=0; @@ -91,7 +96,9 @@ CTEST(rot,srot_inc_0) ASSERT_DBL_NEAR_TOL(y2[i], y1[i], SINGLE_EPS); } } +#endif +#ifdef BUILD_COMPLEX CTEST(rot, csrot_inc_0) { blasint i=0; @@ -110,3 +117,5 @@ CTEST(rot, csrot_inc_0) ASSERT_DBL_NEAR_TOL(y2[i], y1[i], SINGLE_EPS); } } +#endif + diff --git a/utest/test_rotmg.c b/utest/test_rotmg.c index e5ec78983..ad435f6b0 100644 --- a/utest/test_rotmg.c +++ b/utest/test_rotmg.c @@ -33,6 +33,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "openblas_utest.h" +#ifdef BUILD_DOUBLE CTEST (drotmg,rotmg) { double te_d1, tr_d1; @@ -204,3 +205,4 @@ CTEST(drotmg, drotmg_D1_big_D2_big_flag_zero) ASSERT_DBL_NEAR_TOL(tr_param[i], te_param[i], DOUBLE_EPS); } } +#endif diff --git a/utest/test_swap.c b/utest/test_swap.c index 259c83a5c..6d8ae8056 100644 --- a/utest/test_swap.c +++ b/utest/test_swap.c @@ -33,6 +33,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "openblas_utest.h" +#ifdef BUILD_DOUBLE CTEST(swap,dswap_inc_0) { blasint i=0; @@ -50,7 +51,9 @@ CTEST(swap,dswap_inc_0) ASSERT_DBL_NEAR_TOL(y2[i], y1[i], DOUBLE_EPS); } } +#endif +#ifdef BUILD_COMPLEX16 CTEST(swap,zswap_inc_0) { blasint i=0; @@ -68,7 +71,9 @@ CTEST(swap,zswap_inc_0) ASSERT_DBL_NEAR_TOL(y2[i], y1[i], DOUBLE_EPS); } } +#endif +#ifdef BUILD_SINGLE CTEST(swap,sswap_inc_0) { blasint i=0; @@ -86,7 +91,9 @@ CTEST(swap,sswap_inc_0) ASSERT_DBL_NEAR_TOL(y2[i], y1[i], SINGLE_EPS); } } +#endif +#ifdef BUILD_COMPLEX CTEST(swap,cswap_inc_0) { blasint i=0; @@ -104,3 +111,5 @@ CTEST(swap,cswap_inc_0) ASSERT_DBL_NEAR_TOL(y2[i], y1[i], SINGLE_EPS); } } +#endif + diff --git a/utest/utest_main2.c b/utest/utest_main2.c index aa95a5a3f..6b252863a 100644 --- a/utest/utest_main2.c +++ b/utest/utest_main2.c @@ -50,6 +50,17 @@ CTEST(amax, samax){ ASSERT_DBL_NEAR_TOL((double)(tr_max), (double)(te_max), SINGLE_EPS); } +CTEST(amax, damax){ + blasint N=3, inc=1; + double te_max=0.0, tr_max=0.0; + double x[]={-1.1, 2.2, -3.3}; + + te_max=BLASFUNC(damax)(&N, x, &inc); + tr_max=3.3; + + ASSERT_DBL_NEAR_TOL((double)(tr_max), (double)(te_max), DOUBLE_EPS); +} + CTEST (drotmg,rotmg) { double te_d1, tr_d1; @@ -508,9 +519,82 @@ CTEST(swap,cswap_inc_0) } } +CTEST(min, smin_negative){ + blasint N=3, inc=1; + float te_min=0.0, tr_min=0.0; + float x[]={-1.1, -2.2, -3.3}; + + te_min=BLASFUNC(smin)(&N, x, &inc); + tr_min=-3.3; + + ASSERT_DBL_NEAR_TOL((double)(tr_min), (double)(te_min), SINGLE_EPS); +} + +CTEST(min, dmin_positive){ + blasint N=3, inc=1; + double te_min=0.0, tr_min=0.0; + double x[]={1.1, 0.0, 3.3}; + + te_min=BLASFUNC(dmin)(&N, x, &inc); + tr_min=0.0; + + ASSERT_DBL_NEAR_TOL((double)(tr_min), (double)(te_min), DOUBLE_EPS); +} + +CTEST(min, smin_zero){ + blasint N=3, inc=1; + float te_min=0.0, tr_min=0.0; + float x[]={1.1, 2.2, 0.0}; + + te_min=BLASFUNC(smin)(&N, x, &inc); + tr_min=0.0; + + ASSERT_DBL_NEAR_TOL((double)(tr_min), (double)(te_min), SINGLE_EPS); +} + +CTEST(max, smax_negative){ + blasint N=3, inc=1; + float te_max=0.0, tr_max=0.0; + float x[]={-1.1, -2.2, -3.3}; + + te_max=BLASFUNC(smax)(&N, x, &inc); + tr_max=-1.1; + + ASSERT_DBL_NEAR_TOL((double)(tr_max), (double)(te_max), SINGLE_EPS); +} + +CTEST(max, dmax_positive){ + blasint N=3, inc=1; + double te_max=0.0, tr_max=0.0; + double x[]={1.1, 0.0, 3.3}; + + te_max=BLASFUNC(dmax)(&N, x, &inc); + tr_max=3.3; + + ASSERT_DBL_NEAR_TOL((double)(tr_max), (double)(te_max), DOUBLE_EPS); +} + +CTEST(max, smax_zero){ + blasint N=3, inc=1; + float te_max=0.0, tr_max=0.0; + float x[]={-1.1, -2.2, 0.0}; + + te_max=BLASFUNC(smax)(&N, x, &inc); + tr_max=0.0; + + ASSERT_DBL_NEAR_TOL((double)(tr_max), (double)(te_max), SINGLE_EPS); +} + int main(int argc, const char ** argv){ - CTEST_ADD(amax, samax); + CTEST_ADD (amax, samax); + CTEST_ADD (amax, damax); + CTEST_ADD (min, smin_negative); + CTEST_ADD (min, dmin_positive); + CTEST_ADD (min, smin_zero); + CTEST_ADD (max, smax_negative); + CTEST_ADD (max, dmax_positive); + CTEST_ADD (max, smax_zero); CTEST_ADD (drotmg,rotmg); CTEST_ADD (drotmg,rotmg_issue1452); CTEST_ADD (drotmg,rotmg_D1eqD2_X1eqX2);